max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
DiabeticRetinopathy/Features/cold_palette.py | fierval/retina | 3 | 6614451 | <reponame>fierval/retina
# Generated with GIMP Palette Export
# Based on the palette Cool Colors
colors={
'Color0': 0x112ac6,
'Color1': 0x539be2,
'Color2': 0x161066,
'Color3': 0x40234c,
'Color4': 0x073f93,
'Color5': 0x2c6ccc,
'Color6': 0x265121,
'Color7': 0x04422c} | # Generated with GIMP Palette Export
# Based on the palette Cool Colors
colors={
'Color0': 0x112ac6,
'Color1': 0x539be2,
'Color2': 0x161066,
'Color3': 0x40234c,
'Color4': 0x073f93,
'Color5': 0x2c6ccc,
'Color6': 0x265121,
'Color7': 0x04422c} | en | 0.699521 | # Generated with GIMP Palette Export # Based on the palette Cool Colors | 1.714449 | 2 |
game-utils/match-viewer/serve_games_assets.py | matjazp/planet-lia | 13 | 6614452 | #!/usr/bin/env python
import argparse
import os
import sys
parser = argparse.ArgumentParser()
parser.add_argument("dir", help="Root dir for the HTTP server", nargs=1)
parser.add_argument("-p", "--port", type=int, help="Port to bind the HTTP server", required=False, default=3333)
parser.add_argument("-b", "--bind", help="Bind address of HTTP server", required=False, default="127.0.0.1")
cli_args = parser.parse_args()
os.chdir(cli_args.dir[0])
try:
# try to use Python 3
from http.server import HTTPServer, SimpleHTTPRequestHandler, test as test_orig
def test (*args):
test_orig(*args, bind=cli_args.bind, port=cli_args.port)
except ImportError:
# fall back to Python 2
from BaseHTTPServer import HTTPServer, test
from SimpleHTTPServer import SimpleHTTPRequestHandler
class CORSRequestHandler(SimpleHTTPRequestHandler):
def end_headers (self):
self.send_header('Access-Control-Allow-Origin', '*')
SimpleHTTPRequestHandler.end_headers(self)
if __name__ == '__main__':
test(CORSRequestHandler, HTTPServer) | #!/usr/bin/env python
import argparse
import os
import sys
parser = argparse.ArgumentParser()
parser.add_argument("dir", help="Root dir for the HTTP server", nargs=1)
parser.add_argument("-p", "--port", type=int, help="Port to bind the HTTP server", required=False, default=3333)
parser.add_argument("-b", "--bind", help="Bind address of HTTP server", required=False, default="127.0.0.1")
cli_args = parser.parse_args()
os.chdir(cli_args.dir[0])
try:
# try to use Python 3
from http.server import HTTPServer, SimpleHTTPRequestHandler, test as test_orig
def test (*args):
test_orig(*args, bind=cli_args.bind, port=cli_args.port)
except ImportError:
# fall back to Python 2
from BaseHTTPServer import HTTPServer, test
from SimpleHTTPServer import SimpleHTTPRequestHandler
class CORSRequestHandler(SimpleHTTPRequestHandler):
def end_headers (self):
self.send_header('Access-Control-Allow-Origin', '*')
SimpleHTTPRequestHandler.end_headers(self)
if __name__ == '__main__':
test(CORSRequestHandler, HTTPServer) | en | 0.424712 | #!/usr/bin/env python # try to use Python 3 # fall back to Python 2 | 2.875532 | 3 |
py/sqlutilpy/sqlutil.py | segasai/sqlutilpy | 6 | 6614453 | """Sqlutilpy module to access SQL databases
"""
from __future__ import print_function
import numpy
import numpy as np
import psycopg2
import threading
import collections
import warnings
from numpy.core import numeric as sb
from select import select
from psycopg2.extensions import POLL_OK, POLL_READ, POLL_WRITE
try:
import astropy.table as atpy
except ImportError:
# astropy is not installed
atpy = None
try:
import pandas
except ImportError:
# pandas is not installed
pandas = None
from io import BytesIO as StringIO
import queue
_WAIT_SELECT_TIMEOUT = 10
class config:
arraysize = 100000
class SqlUtilException(Exception):
pass
def __wait_select_inter(conn):
""" Make the queries interruptable by Ctrl-C
Taken from http://initd.org/psycopg/articles/2014/07/20/cancelling-postgresql-statements-python/ # noqa
"""
while True:
try:
state = conn.poll()
if state == POLL_OK:
break
elif state == POLL_READ:
select([conn.fileno()], [], [], _WAIT_SELECT_TIMEOUT)
elif state == POLL_WRITE:
select([], [conn.fileno()], [], _WAIT_SELECT_TIMEOUT)
else:
raise conn.OperationalError("bad state from poll: %s" % state)
except KeyboardInterrupt:
conn.cancel()
# the loop will be broken by a server error
continue
psycopg2.extensions.set_wait_callback(__wait_select_inter)
def getConnection(db=None,
driver=None,
user=None,
password=None,
host=None,
port=5432,
timeout=None):
""" Retrieve the connection to the DB object
Parameters
----------
db : string
The name of the database (in case of Postgresql) or filename in
case of sqlite db
driver : string
The db driver (either 'psycopg2' or 'sqlite3')
user : string, optional
Username
password: string, optional
Password
host : string, optional
Host-name
port : integer
Connection port (by default 5432 for PostgreSQL)
timeout : integer
Connection timeout for sqlite
Returns
-------
conn : object
Database Connection
"""
if driver == 'psycopg2':
conn_str = "dbname=%s host=%s port=%d" % (db, host, (port or 5432))
if user is not None:
conn_str = conn_str + ' user=%s' % user
if password is not None:
conn_str = conn_str + ' password=%s' % password
conn = psycopg2.connect(conn_str)
elif driver == 'sqlite3':
import sqlite3
if timeout is None:
timeout = 5
conn = sqlite3.connect(db, timeout=timeout)
else:
raise Exception("Unknown driver")
return conn
def getCursor(conn, driver=None, preamb=None, notNamed=False):
"""Retrieve the cursor"""
if driver == 'psycopg2':
cur = conn.cursor()
if preamb is not None:
cur.execute(preamb)
else:
cur.execute('set cursor_tuple_fraction TO 1')
# this is required because otherwise PG may decide to execute a
# different plan
if notNamed:
return cur
cur = conn.cursor(name='sqlutilcursor')
cur.arraysize = config.arraysize
elif driver == 'sqlite3':
cur = conn.cursor()
if preamb is not None:
cur.execute(preamb)
return cur
def __fromrecords(recList, dtype=None, intNullVal=None):
""" This function was taken from np.core.records and updated to
support conversion null integers to intNullVal
"""
shape = None
descr = sb.dtype((np.core.records.record, dtype))
try:
retval = sb.array(recList, dtype=descr)
except TypeError: # list of lists instead of list of tuples
shape = (len(recList), )
_array = np.core.records.recarray(shape, descr)
try:
for k in range(_array.size):
_array[k] = tuple(recList[k])
except TypeError:
convs = []
ncols = len(dtype.fields)
for _k in dtype.names:
_v = dtype.fields[_k]
if _v[0] in [np.int16, np.int32, np.int64]:
convs.append(lambda x: intNullVal if x is None else x)
else:
convs.append(lambda x: x)
convs = tuple(convs)
def convF(x):
return [convs[_](x[_]) for _ in range(ncols)]
for k in range(k, _array.size):
try:
_array[k] = tuple(recList[k])
except TypeError:
_array[k] = tuple(convF(recList[k]))
return _array
else:
if shape is not None and retval.shape != shape:
retval.shape = shape
res = retval.view(numpy.core.records.recarray)
return res
def __converter(qIn, qOut, endEvent, dtype, intNullVal):
""" Convert the input stream of tuples into numpy arrays """
while (not endEvent.is_set()):
try:
tups = qIn.get(True, 0.1)
except queue.Empty:
continue
try:
res = __fromrecords(tups, dtype=dtype, intNullVal=intNullVal)
except Exception:
print('Failed to convert input data into array')
endEvent.set()
raise
qOut.put(res)
def __getDType(row, typeCodes, strLength):
pgTypeHash = {
16: bool,
18: str,
20: 'i8',
21: 'i2',
23: 'i4',
1007: 'i4',
25: '|U%d',
700: 'f4',
701: 'f8',
1000: bool,
1005: 'i2',
1007: 'i4',
1016: 'i8',
1021: 'f4',
1022: 'f8',
1042: '|U%d', # character()
1043: '|U%d', # varchar
1700: 'f8', # numeric
1114: '<M8[us]', # timestamp
1082: '<M8[us]' # date
}
strTypes = [25, 1042, 1043]
pgTypes = []
for i, (curv, curt) in enumerate(zip(row, typeCodes)):
if curt not in pgTypeHash:
raise SqlUtilException('Unknown PG type %d' % curt)
pgType = pgTypeHash[curt]
if curt in strTypes:
if curv is not None:
# if the first string value is longer than
# strLength use that as a maximum
curmax = max(strLength, len(curv))
else:
# if the first value is null
# just use strLength
curmax = strLength
pgType = pgType % (curmax, )
if curt not in strTypes:
try:
len(curv)
pgType = 'O'
except TypeError:
pass
pgTypes.append(('a%d' % i, pgType))
dtype = numpy.dtype(pgTypes)
return dtype
def get(query,
params=None,
db="wsdb",
driver="psycopg2",
user=None,
password=<PASSWORD>,
host='localhost',
preamb=None,
conn=None,
port=5432,
strLength=10,
timeout=None,
notNamed=False,
asDict=False,
intNullVal=-9999):
'''Executes the sql query and returns the tuple or dictionary
with the numpy arrays.
Parameters
----------
query : string
Query you want to execute, can include question
marks to refer to query parameters
params : tuple
Query parameters
conn : object
The connection object to the DB (optional) to avoid reconnecting
asDict : boolean
Flag whether to retrieve the results as a dictionary with column
names as keys
strLength : integer
The maximum length of the string.
Strings will be truncated to this length
intNullVal : integer, optional
All the integer columns with nulls will have null replaced by
this value
db : string
The name of the database
driver : string, optional
The sql driver to be used (psycopg2 or sqlite3)
user : string, optional
user name for the DB connection
password : string, optional
DB connection password
host : string, optional
Hostname of the database
port : integer, optional
Port of the database
preamb : string
SQL code to be executed before the query
Returns
-------
ret : Tuple or dictionary
By default you get a tuple with numpy arrays for each column
in your query.
If you specified asDict keyword then you get an ordered dictionary with
your columns.
Examples
--------
>>> a, b, c = sqlutil.get('select ra,dec,d25 from rc3')
You can also use the parameters in your query:
>>> a, b = squlil.get('select ra,dec from rc3 where name=?',"NGC 3166")
'''
connSupplied = (conn is not None)
if not connSupplied:
conn = getConnection(db=db,
driver=driver,
user=user,
password=password,
host=host,
port=port,
timeout=timeout)
try:
cur = getCursor(conn, driver=driver, preamb=preamb, notNamed=notNamed)
if params is None:
res = cur.execute(query)
else:
res = cur.execute(query, params)
qIn = queue.Queue(1)
qOut = queue.Queue()
endEvent = threading.Event()
nrec = 0 # keeps the number of arrays sent to the other thread
# minus number received
reslist = []
proc = None
colNames = []
if driver == 'psycopg2':
try:
while (True):
# Iterating over the cursor, retrieving batches of results
# and then sending them for conversion
tups = cur.fetchmany()
desc = cur.description
# No more data
if tups == []:
break
# Send the new batch for conversion
qIn.put(tups)
# If the is just the start we need to launch the
# thread doing the conversion
if nrec == 0:
typeCodes = [_tmp.type_code for _tmp in desc]
colNames = [_tmp.name for _tmp in cur.description]
dtype = __getDType(tups[0], typeCodes, strLength)
proc = threading.Thread(target=__converter,
args=(qIn, qOut, endEvent,
dtype, intNullVal))
proc.start()
# nrec is the number of batches in conversion currently
nrec += 1
# Try to retrieve one processed batch without waiting
# on it
try:
reslist.append(qOut.get(False))
nrec -= 1
except queue.Empty:
pass
# Now we are done fetching the data from the DB, we
# just need to assemble the converted results
while (nrec != 0):
try:
reslist.append(qOut.get(True, 0.1))
nrec -= 1
except queue.Empty:
# continue looping unless the endEvent was set
# which should happen in the case of the crash
# of the converter thread
if endEvent.is_set():
raise Exception('Child thread failed')
endEvent.set()
except BaseException:
endEvent.set()
if proc is not None:
# notice that here the timeout is larger than the timeout
proc.join(0.2)
# in the converter process
if proc.is_alive():
proc.terminate()
raise
if proc is not None:
proc.join()
if reslist == []:
nCols = len(desc)
res = numpy.array([],
dtype=numpy.dtype([('a%d' % i, 'f')
for i in range(nCols)]))
else:
res = numpy.concatenate(reslist)
elif driver == 'sqlite3':
tups = cur.fetchall()
colNames = [_tmp[0] for _tmp in cur.description]
if len(tups) > 0:
res = numpy.core.records.array(tups)
else:
return [[]] * len(cur.description)
res = [res[tmp] for tmp in res.dtype.names]
except BaseException:
failure_cleanup(conn, connSupplied)
raise
cur.close()
if not connSupplied:
conn.close()
if asDict:
resDict = collections.OrderedDict()
repeats = {}
for _n, _v in zip(colNames, res):
if _n in resDict:
curn = _n + '_%d' % (repeats[_n])
repeats[_n] += 1
warnings.warn(('Column name %s is repeated in the output, ' +
'new name %s assigned') % (_n, curn))
else:
repeats[_n] = 1
curn = _n
resDict[curn] = _v
res = resDict
return res
def execute(query,
params=None,
db='wsdb',
driver="psycopg2",
user=None,
password=<PASSWORD>,
host='localhost',
conn=None,
preamb=None,
timeout=None,
noCommit=False):
"""Execute a given SQL command without returning the results
Parameters
----------
query: string
The query or command you are executing
params: tuple, optional
Optional parameters of your query
db : string
Database name
driver : string
Driver for the DB connection ('psucopg2' or 'sqlite3')
user : string, optional
user name for the DB connection
password : string, optional
DB connection password
host : string, optional
Hostname of the database
port : integer, optional
Port of the database
noCommit: bool
By default execute() will commit your command.
If you say noCommit, the commit won't be issued.
"""
connSupplied = (conn is not None)
if not connSupplied:
conn = getConnection(db=db,
driver=driver,
user=user,
password=password,
host=host,
timeout=timeout)
try:
cur = getCursor(conn, driver=driver, preamb=preamb, notNamed=True)
if params is not None:
cur.execute(query, params)
else:
# sqlite3 doesn't like params here...
cur.execute(query)
except BaseException:
failure_cleanup(conn, connSupplied)
raise
cur.close()
if not noCommit:
conn.commit()
if not connSupplied:
conn.close() # do not close if we were given the connection
def __create_schema(tableName, arrays, names, temp=False):
hash = dict([(np.int32, 'integer'), (np.int64, 'bigint'),
(np.uint64, 'bigint'), (np.int16, 'smallint'),
(np.uint8, 'bigint'), (np.float32, 'real'),
(np.float64, 'double precision'), (np.string_, 'varchar'),
(np.str_, 'varchar'), (np.bool_, 'boolean'),
(np.datetime64, 'timestamp')])
if temp:
temp = 'temporary'
else:
temp = ''
outp = 'create %s table %s ' % (temp, tableName)
outp1 = []
for arr, name in zip(arrays, names):
outp1.append('"' + name + '" ' + hash[arr.dtype.type])
return outp + '(' + ','.join(outp1) + ')'
def __print_arrays(arrays, f, sep=' '):
hash = dict([(np.int32, '%d'), (np.int64, '%d'), (np.int16, '%d'),
(np.uint8, '%d'), (np.float32, '%.18e'),
(np.float64, '%.18e'), (np.string_, '%s'), (np.str_, '%s'),
(np.datetime64, '%s'), (np.bool_, '%d')])
fmt = [hash[x.dtype.type] for x in arrays]
recarr = np.rec.fromarrays(arrays)
np.savetxt(f, recarr, fmt=fmt, delimiter=sep)
def failure_cleanup(conn, connSupplied):
try:
conn.rollback()
except Exception:
pass
if not connSupplied:
try:
conn.close() # do not close if we were given the connection
except Exception:
pass
def upload(tableName,
arrays,
names=None,
db="wsdb",
driver="psycopg2",
user=None,
password=<PASSWORD>,
host='locahost',
conn=None,
preamb=None,
timeout=None,
noCommit=False,
temp=False,
analyze=False,
createTable=True):
""" Upload the data stored in the tuple of arrays in the DB
Parameters
----------
tableName : string
The name of the table where the data will be uploaded
arrays_or_table : tuple
Tuple of arrays thar will be columns of the new table
If names are not specified, I this parameter can be pandas or
astropy table
names : tuple
Tuple of strings with column names
Examples
--------
>>> x = np.arange(10)
>>> y = x**.5
>>> sqlutil.upload('mytable',(x,y),('xcol','ycol'))
"""
connSupplied = (conn is not None)
sep = '|'
if not connSupplied:
conn = getConnection(db=db,
driver=driver,
user=user,
password=password,
host=host,
timeout=timeout)
if names is None:
for i in range(1):
# we assume that we were given a table
if atpy is not None:
if isinstance(arrays, atpy.Table):
names = arrays.columns
arrays = [arrays[_] for _ in names]
break
if pandas is not None:
if isinstance(arrays, pandas.DataFrame):
names = arrays.columns
arrays = [arrays[_] for _ in names]
break
if isinstance(arrays, dict):
names = arrays.keys()
arrays = [arrays[_] for _ in names]
break
if names is None:
raise Exception('you either need to give astropy \
table/pandas/dictionary or provide a separate list of arrays and their names')
arrays = [np.asarray(_) for _ in arrays]
repl_char = {
' ': '_',
'-': '_',
'(': '_',
')': '_',
'[': '_',
']': '_',
'<': '_',
'>': '_'
}
fixed_names = []
for name in names:
fixed_name = name + ''
for k in repl_char.keys():
fixed_name = fixed_name.replace(k, repl_char[k])
if fixed_name != name:
warnings.warn('''Renamed column '%s' to '%s' ''' %
(name, fixed_name))
fixed_names.append(fixed_name)
names = fixed_names
try:
cur = getCursor(conn, driver=driver, preamb=preamb, notNamed=True)
if createTable:
query1 = __create_schema(tableName, arrays, names, temp=temp)
cur.execute(query1)
nsplit = 100000
N = len(arrays[0])
for i in range(0, N, nsplit):
f = StringIO()
__print_arrays([_[i:i + nsplit] for _ in arrays], f, sep=sep)
f.seek(0)
try:
thread = psycopg2.extensions.get_wait_callback()
psycopg2.extensions.set_wait_callback(None)
cur.copy_from(f, tableName, sep=sep, columns=names)
finally:
psycopg2.extensions.set_wait_callback(thread)
except BaseException:
failure_cleanup(conn, connSupplied)
raise
if analyze:
cur.execute('analyze %s' % tableName)
cur.close()
if not noCommit:
conn.commit()
if not connSupplied:
conn.close() # do not close if we were given the connection
def local_join(query,
tableName,
arrays,
names,
db=None,
driver="psycopg2",
user=None,
password=<PASSWORD>,
host='locahost',
port=5432,
conn=None,
preamb=None,
timeout=None,
strLength=20,
asDict=False):
""" Join the data from python with the data in the database
This command first uploads the data in the DB and then runs a
user specified query.
Parameters
----------
query : String with the query to be executed
tableName : The name of the temporary table that is going to be created
arrays : The tuple with list of arrays with the data to be loaded in the DB
names : The tuple with the column names for the user table
Examples
--------
>>> x = np.arange(10)
>>> y = x**.5
>>> sqlutil.local_join('select * from mytable as m, sometable as s
where s.id=m.xcol',
'mytable',(x,y),('xcol','ycol'))
"""
connSupplied = (conn is not None)
if not connSupplied:
conn = getConnection(db=db,
driver=driver,
user=user,
password=password,
host=host,
timeout=timeout,
port=port)
upload(tableName,
arrays,
names,
conn=conn,
noCommit=True,
temp=True,
analyze=True)
res = get(query,
conn=conn,
preamb=preamb,
strLength=strLength,
asDict=asDict)
conn.rollback()
if not connSupplied:
conn.close()
return res
| """Sqlutilpy module to access SQL databases
"""
from __future__ import print_function
import numpy
import numpy as np
import psycopg2
import threading
import collections
import warnings
from numpy.core import numeric as sb
from select import select
from psycopg2.extensions import POLL_OK, POLL_READ, POLL_WRITE
try:
import astropy.table as atpy
except ImportError:
# astropy is not installed
atpy = None
try:
import pandas
except ImportError:
# pandas is not installed
pandas = None
from io import BytesIO as StringIO
import queue
_WAIT_SELECT_TIMEOUT = 10
class config:
arraysize = 100000
class SqlUtilException(Exception):
pass
def __wait_select_inter(conn):
""" Make the queries interruptable by Ctrl-C
Taken from http://initd.org/psycopg/articles/2014/07/20/cancelling-postgresql-statements-python/ # noqa
"""
while True:
try:
state = conn.poll()
if state == POLL_OK:
break
elif state == POLL_READ:
select([conn.fileno()], [], [], _WAIT_SELECT_TIMEOUT)
elif state == POLL_WRITE:
select([], [conn.fileno()], [], _WAIT_SELECT_TIMEOUT)
else:
raise conn.OperationalError("bad state from poll: %s" % state)
except KeyboardInterrupt:
conn.cancel()
# the loop will be broken by a server error
continue
psycopg2.extensions.set_wait_callback(__wait_select_inter)
def getConnection(db=None,
driver=None,
user=None,
password=None,
host=None,
port=5432,
timeout=None):
""" Retrieve the connection to the DB object
Parameters
----------
db : string
The name of the database (in case of Postgresql) or filename in
case of sqlite db
driver : string
The db driver (either 'psycopg2' or 'sqlite3')
user : string, optional
Username
password: string, optional
Password
host : string, optional
Host-name
port : integer
Connection port (by default 5432 for PostgreSQL)
timeout : integer
Connection timeout for sqlite
Returns
-------
conn : object
Database Connection
"""
if driver == 'psycopg2':
conn_str = "dbname=%s host=%s port=%d" % (db, host, (port or 5432))
if user is not None:
conn_str = conn_str + ' user=%s' % user
if password is not None:
conn_str = conn_str + ' password=%s' % password
conn = psycopg2.connect(conn_str)
elif driver == 'sqlite3':
import sqlite3
if timeout is None:
timeout = 5
conn = sqlite3.connect(db, timeout=timeout)
else:
raise Exception("Unknown driver")
return conn
def getCursor(conn, driver=None, preamb=None, notNamed=False):
"""Retrieve the cursor"""
if driver == 'psycopg2':
cur = conn.cursor()
if preamb is not None:
cur.execute(preamb)
else:
cur.execute('set cursor_tuple_fraction TO 1')
# this is required because otherwise PG may decide to execute a
# different plan
if notNamed:
return cur
cur = conn.cursor(name='sqlutilcursor')
cur.arraysize = config.arraysize
elif driver == 'sqlite3':
cur = conn.cursor()
if preamb is not None:
cur.execute(preamb)
return cur
def __fromrecords(recList, dtype=None, intNullVal=None):
""" This function was taken from np.core.records and updated to
support conversion null integers to intNullVal
"""
shape = None
descr = sb.dtype((np.core.records.record, dtype))
try:
retval = sb.array(recList, dtype=descr)
except TypeError: # list of lists instead of list of tuples
shape = (len(recList), )
_array = np.core.records.recarray(shape, descr)
try:
for k in range(_array.size):
_array[k] = tuple(recList[k])
except TypeError:
convs = []
ncols = len(dtype.fields)
for _k in dtype.names:
_v = dtype.fields[_k]
if _v[0] in [np.int16, np.int32, np.int64]:
convs.append(lambda x: intNullVal if x is None else x)
else:
convs.append(lambda x: x)
convs = tuple(convs)
def convF(x):
return [convs[_](x[_]) for _ in range(ncols)]
for k in range(k, _array.size):
try:
_array[k] = tuple(recList[k])
except TypeError:
_array[k] = tuple(convF(recList[k]))
return _array
else:
if shape is not None and retval.shape != shape:
retval.shape = shape
res = retval.view(numpy.core.records.recarray)
return res
def __converter(qIn, qOut, endEvent, dtype, intNullVal):
""" Convert the input stream of tuples into numpy arrays """
while (not endEvent.is_set()):
try:
tups = qIn.get(True, 0.1)
except queue.Empty:
continue
try:
res = __fromrecords(tups, dtype=dtype, intNullVal=intNullVal)
except Exception:
print('Failed to convert input data into array')
endEvent.set()
raise
qOut.put(res)
def __getDType(row, typeCodes, strLength):
pgTypeHash = {
16: bool,
18: str,
20: 'i8',
21: 'i2',
23: 'i4',
1007: 'i4',
25: '|U%d',
700: 'f4',
701: 'f8',
1000: bool,
1005: 'i2',
1007: 'i4',
1016: 'i8',
1021: 'f4',
1022: 'f8',
1042: '|U%d', # character()
1043: '|U%d', # varchar
1700: 'f8', # numeric
1114: '<M8[us]', # timestamp
1082: '<M8[us]' # date
}
strTypes = [25, 1042, 1043]
pgTypes = []
for i, (curv, curt) in enumerate(zip(row, typeCodes)):
if curt not in pgTypeHash:
raise SqlUtilException('Unknown PG type %d' % curt)
pgType = pgTypeHash[curt]
if curt in strTypes:
if curv is not None:
# if the first string value is longer than
# strLength use that as a maximum
curmax = max(strLength, len(curv))
else:
# if the first value is null
# just use strLength
curmax = strLength
pgType = pgType % (curmax, )
if curt not in strTypes:
try:
len(curv)
pgType = 'O'
except TypeError:
pass
pgTypes.append(('a%d' % i, pgType))
dtype = numpy.dtype(pgTypes)
return dtype
def get(query,
params=None,
db="wsdb",
driver="psycopg2",
user=None,
password=<PASSWORD>,
host='localhost',
preamb=None,
conn=None,
port=5432,
strLength=10,
timeout=None,
notNamed=False,
asDict=False,
intNullVal=-9999):
'''Executes the sql query and returns the tuple or dictionary
with the numpy arrays.
Parameters
----------
query : string
Query you want to execute, can include question
marks to refer to query parameters
params : tuple
Query parameters
conn : object
The connection object to the DB (optional) to avoid reconnecting
asDict : boolean
Flag whether to retrieve the results as a dictionary with column
names as keys
strLength : integer
The maximum length of the string.
Strings will be truncated to this length
intNullVal : integer, optional
All the integer columns with nulls will have null replaced by
this value
db : string
The name of the database
driver : string, optional
The sql driver to be used (psycopg2 or sqlite3)
user : string, optional
user name for the DB connection
password : string, optional
DB connection password
host : string, optional
Hostname of the database
port : integer, optional
Port of the database
preamb : string
SQL code to be executed before the query
Returns
-------
ret : Tuple or dictionary
By default you get a tuple with numpy arrays for each column
in your query.
If you specified asDict keyword then you get an ordered dictionary with
your columns.
Examples
--------
>>> a, b, c = sqlutil.get('select ra,dec,d25 from rc3')
You can also use the parameters in your query:
>>> a, b = squlil.get('select ra,dec from rc3 where name=?',"NGC 3166")
'''
connSupplied = (conn is not None)
if not connSupplied:
conn = getConnection(db=db,
driver=driver,
user=user,
password=password,
host=host,
port=port,
timeout=timeout)
try:
cur = getCursor(conn, driver=driver, preamb=preamb, notNamed=notNamed)
if params is None:
res = cur.execute(query)
else:
res = cur.execute(query, params)
qIn = queue.Queue(1)
qOut = queue.Queue()
endEvent = threading.Event()
nrec = 0 # keeps the number of arrays sent to the other thread
# minus number received
reslist = []
proc = None
colNames = []
if driver == 'psycopg2':
try:
while (True):
# Iterating over the cursor, retrieving batches of results
# and then sending them for conversion
tups = cur.fetchmany()
desc = cur.description
# No more data
if tups == []:
break
# Send the new batch for conversion
qIn.put(tups)
# If the is just the start we need to launch the
# thread doing the conversion
if nrec == 0:
typeCodes = [_tmp.type_code for _tmp in desc]
colNames = [_tmp.name for _tmp in cur.description]
dtype = __getDType(tups[0], typeCodes, strLength)
proc = threading.Thread(target=__converter,
args=(qIn, qOut, endEvent,
dtype, intNullVal))
proc.start()
# nrec is the number of batches in conversion currently
nrec += 1
# Try to retrieve one processed batch without waiting
# on it
try:
reslist.append(qOut.get(False))
nrec -= 1
except queue.Empty:
pass
# Now we are done fetching the data from the DB, we
# just need to assemble the converted results
while (nrec != 0):
try:
reslist.append(qOut.get(True, 0.1))
nrec -= 1
except queue.Empty:
# continue looping unless the endEvent was set
# which should happen in the case of the crash
# of the converter thread
if endEvent.is_set():
raise Exception('Child thread failed')
endEvent.set()
except BaseException:
endEvent.set()
if proc is not None:
# notice that here the timeout is larger than the timeout
proc.join(0.2)
# in the converter process
if proc.is_alive():
proc.terminate()
raise
if proc is not None:
proc.join()
if reslist == []:
nCols = len(desc)
res = numpy.array([],
dtype=numpy.dtype([('a%d' % i, 'f')
for i in range(nCols)]))
else:
res = numpy.concatenate(reslist)
elif driver == 'sqlite3':
tups = cur.fetchall()
colNames = [_tmp[0] for _tmp in cur.description]
if len(tups) > 0:
res = numpy.core.records.array(tups)
else:
return [[]] * len(cur.description)
res = [res[tmp] for tmp in res.dtype.names]
except BaseException:
failure_cleanup(conn, connSupplied)
raise
cur.close()
if not connSupplied:
conn.close()
if asDict:
resDict = collections.OrderedDict()
repeats = {}
for _n, _v in zip(colNames, res):
if _n in resDict:
curn = _n + '_%d' % (repeats[_n])
repeats[_n] += 1
warnings.warn(('Column name %s is repeated in the output, ' +
'new name %s assigned') % (_n, curn))
else:
repeats[_n] = 1
curn = _n
resDict[curn] = _v
res = resDict
return res
def execute(query,
params=None,
db='wsdb',
driver="psycopg2",
user=None,
password=<PASSWORD>,
host='localhost',
conn=None,
preamb=None,
timeout=None,
noCommit=False):
"""Execute a given SQL command without returning the results
Parameters
----------
query: string
The query or command you are executing
params: tuple, optional
Optional parameters of your query
db : string
Database name
driver : string
Driver for the DB connection ('psucopg2' or 'sqlite3')
user : string, optional
user name for the DB connection
password : string, optional
DB connection password
host : string, optional
Hostname of the database
port : integer, optional
Port of the database
noCommit: bool
By default execute() will commit your command.
If you say noCommit, the commit won't be issued.
"""
connSupplied = (conn is not None)
if not connSupplied:
conn = getConnection(db=db,
driver=driver,
user=user,
password=password,
host=host,
timeout=timeout)
try:
cur = getCursor(conn, driver=driver, preamb=preamb, notNamed=True)
if params is not None:
cur.execute(query, params)
else:
# sqlite3 doesn't like params here...
cur.execute(query)
except BaseException:
failure_cleanup(conn, connSupplied)
raise
cur.close()
if not noCommit:
conn.commit()
if not connSupplied:
conn.close() # do not close if we were given the connection
def __create_schema(tableName, arrays, names, temp=False):
hash = dict([(np.int32, 'integer'), (np.int64, 'bigint'),
(np.uint64, 'bigint'), (np.int16, 'smallint'),
(np.uint8, 'bigint'), (np.float32, 'real'),
(np.float64, 'double precision'), (np.string_, 'varchar'),
(np.str_, 'varchar'), (np.bool_, 'boolean'),
(np.datetime64, 'timestamp')])
if temp:
temp = 'temporary'
else:
temp = ''
outp = 'create %s table %s ' % (temp, tableName)
outp1 = []
for arr, name in zip(arrays, names):
outp1.append('"' + name + '" ' + hash[arr.dtype.type])
return outp + '(' + ','.join(outp1) + ')'
def __print_arrays(arrays, f, sep=' '):
hash = dict([(np.int32, '%d'), (np.int64, '%d'), (np.int16, '%d'),
(np.uint8, '%d'), (np.float32, '%.18e'),
(np.float64, '%.18e'), (np.string_, '%s'), (np.str_, '%s'),
(np.datetime64, '%s'), (np.bool_, '%d')])
fmt = [hash[x.dtype.type] for x in arrays]
recarr = np.rec.fromarrays(arrays)
np.savetxt(f, recarr, fmt=fmt, delimiter=sep)
def failure_cleanup(conn, connSupplied):
try:
conn.rollback()
except Exception:
pass
if not connSupplied:
try:
conn.close() # do not close if we were given the connection
except Exception:
pass
def upload(tableName,
arrays,
names=None,
db="wsdb",
driver="psycopg2",
user=None,
password=<PASSWORD>,
host='locahost',
conn=None,
preamb=None,
timeout=None,
noCommit=False,
temp=False,
analyze=False,
createTable=True):
""" Upload the data stored in the tuple of arrays in the DB
Parameters
----------
tableName : string
The name of the table where the data will be uploaded
arrays_or_table : tuple
Tuple of arrays thar will be columns of the new table
If names are not specified, I this parameter can be pandas or
astropy table
names : tuple
Tuple of strings with column names
Examples
--------
>>> x = np.arange(10)
>>> y = x**.5
>>> sqlutil.upload('mytable',(x,y),('xcol','ycol'))
"""
connSupplied = (conn is not None)
sep = '|'
if not connSupplied:
conn = getConnection(db=db,
driver=driver,
user=user,
password=password,
host=host,
timeout=timeout)
if names is None:
for i in range(1):
# we assume that we were given a table
if atpy is not None:
if isinstance(arrays, atpy.Table):
names = arrays.columns
arrays = [arrays[_] for _ in names]
break
if pandas is not None:
if isinstance(arrays, pandas.DataFrame):
names = arrays.columns
arrays = [arrays[_] for _ in names]
break
if isinstance(arrays, dict):
names = arrays.keys()
arrays = [arrays[_] for _ in names]
break
if names is None:
raise Exception('you either need to give astropy \
table/pandas/dictionary or provide a separate list of arrays and their names')
arrays = [np.asarray(_) for _ in arrays]
repl_char = {
' ': '_',
'-': '_',
'(': '_',
')': '_',
'[': '_',
']': '_',
'<': '_',
'>': '_'
}
fixed_names = []
for name in names:
fixed_name = name + ''
for k in repl_char.keys():
fixed_name = fixed_name.replace(k, repl_char[k])
if fixed_name != name:
warnings.warn('''Renamed column '%s' to '%s' ''' %
(name, fixed_name))
fixed_names.append(fixed_name)
names = fixed_names
try:
cur = getCursor(conn, driver=driver, preamb=preamb, notNamed=True)
if createTable:
query1 = __create_schema(tableName, arrays, names, temp=temp)
cur.execute(query1)
nsplit = 100000
N = len(arrays[0])
for i in range(0, N, nsplit):
f = StringIO()
__print_arrays([_[i:i + nsplit] for _ in arrays], f, sep=sep)
f.seek(0)
try:
thread = psycopg2.extensions.get_wait_callback()
psycopg2.extensions.set_wait_callback(None)
cur.copy_from(f, tableName, sep=sep, columns=names)
finally:
psycopg2.extensions.set_wait_callback(thread)
except BaseException:
failure_cleanup(conn, connSupplied)
raise
if analyze:
cur.execute('analyze %s' % tableName)
cur.close()
if not noCommit:
conn.commit()
if not connSupplied:
conn.close() # do not close if we were given the connection
def local_join(query,
tableName,
arrays,
names,
db=None,
driver="psycopg2",
user=None,
password=<PASSWORD>,
host='locahost',
port=5432,
conn=None,
preamb=None,
timeout=None,
strLength=20,
asDict=False):
""" Join the data from python with the data in the database
This command first uploads the data in the DB and then runs a
user specified query.
Parameters
----------
query : String with the query to be executed
tableName : The name of the temporary table that is going to be created
arrays : The tuple with list of arrays with the data to be loaded in the DB
names : The tuple with the column names for the user table
Examples
--------
>>> x = np.arange(10)
>>> y = x**.5
>>> sqlutil.local_join('select * from mytable as m, sometable as s
where s.id=m.xcol',
'mytable',(x,y),('xcol','ycol'))
"""
connSupplied = (conn is not None)
if not connSupplied:
conn = getConnection(db=db,
driver=driver,
user=user,
password=password,
host=host,
timeout=timeout,
port=port)
upload(tableName,
arrays,
names,
conn=conn,
noCommit=True,
temp=True,
analyze=True)
res = get(query,
conn=conn,
preamb=preamb,
strLength=strLength,
asDict=asDict)
conn.rollback()
if not connSupplied:
conn.close()
return res
| en | 0.717884 | Sqlutilpy module to access SQL databases # astropy is not installed # pandas is not installed Make the queries interruptable by Ctrl-C Taken from http://initd.org/psycopg/articles/2014/07/20/cancelling-postgresql-statements-python/ # noqa # the loop will be broken by a server error Retrieve the connection to the DB object Parameters ---------- db : string The name of the database (in case of Postgresql) or filename in case of sqlite db driver : string The db driver (either 'psycopg2' or 'sqlite3') user : string, optional Username password: string, optional Password host : string, optional Host-name port : integer Connection port (by default 5432 for PostgreSQL) timeout : integer Connection timeout for sqlite Returns ------- conn : object Database Connection Retrieve the cursor # this is required because otherwise PG may decide to execute a # different plan This function was taken from np.core.records and updated to support conversion null integers to intNullVal # list of lists instead of list of tuples Convert the input stream of tuples into numpy arrays # character() # varchar # numeric # timestamp # date # if the first string value is longer than # strLength use that as a maximum # if the first value is null # just use strLength Executes the sql query and returns the tuple or dictionary with the numpy arrays. Parameters ---------- query : string Query you want to execute, can include question marks to refer to query parameters params : tuple Query parameters conn : object The connection object to the DB (optional) to avoid reconnecting asDict : boolean Flag whether to retrieve the results as a dictionary with column names as keys strLength : integer The maximum length of the string. Strings will be truncated to this length intNullVal : integer, optional All the integer columns with nulls will have null replaced by this value db : string The name of the database driver : string, optional The sql driver to be used (psycopg2 or sqlite3) user : string, optional user name for the DB connection password : string, optional DB connection password host : string, optional Hostname of the database port : integer, optional Port of the database preamb : string SQL code to be executed before the query Returns ------- ret : Tuple or dictionary By default you get a tuple with numpy arrays for each column in your query. If you specified asDict keyword then you get an ordered dictionary with your columns. Examples -------- >>> a, b, c = sqlutil.get('select ra,dec,d25 from rc3') You can also use the parameters in your query: >>> a, b = squlil.get('select ra,dec from rc3 where name=?',"NGC 3166") # keeps the number of arrays sent to the other thread # minus number received # Iterating over the cursor, retrieving batches of results # and then sending them for conversion # No more data # Send the new batch for conversion # If the is just the start we need to launch the # thread doing the conversion # nrec is the number of batches in conversion currently # Try to retrieve one processed batch without waiting # on it # Now we are done fetching the data from the DB, we # just need to assemble the converted results # continue looping unless the endEvent was set # which should happen in the case of the crash # of the converter thread # notice that here the timeout is larger than the timeout # in the converter process Execute a given SQL command without returning the results Parameters ---------- query: string The query or command you are executing params: tuple, optional Optional parameters of your query db : string Database name driver : string Driver for the DB connection ('psucopg2' or 'sqlite3') user : string, optional user name for the DB connection password : string, optional DB connection password host : string, optional Hostname of the database port : integer, optional Port of the database noCommit: bool By default execute() will commit your command. If you say noCommit, the commit won't be issued. # sqlite3 doesn't like params here... # do not close if we were given the connection # do not close if we were given the connection Upload the data stored in the tuple of arrays in the DB Parameters ---------- tableName : string The name of the table where the data will be uploaded arrays_or_table : tuple Tuple of arrays thar will be columns of the new table If names are not specified, I this parameter can be pandas or astropy table names : tuple Tuple of strings with column names Examples -------- >>> x = np.arange(10) >>> y = x**.5 >>> sqlutil.upload('mytable',(x,y),('xcol','ycol')) # we assume that we were given a table Renamed column '%s' to '%s' # do not close if we were given the connection Join the data from python with the data in the database This command first uploads the data in the DB and then runs a user specified query. Parameters ---------- query : String with the query to be executed tableName : The name of the temporary table that is going to be created arrays : The tuple with list of arrays with the data to be loaded in the DB names : The tuple with the column names for the user table Examples -------- >>> x = np.arange(10) >>> y = x**.5 >>> sqlutil.local_join('select * from mytable as m, sometable as s where s.id=m.xcol', 'mytable',(x,y),('xcol','ycol')) | 2.491103 | 2 |
lifemonitor/auth/serializers.py | ilveroluca/life_monitor | 0 | 6614454 | # Copyright (c) 2020-2021 CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
from lifemonitor.serializers import BaseSchema, ma
from marshmallow import fields
from . import models
class ProviderSchema(BaseSchema):
uuid = fields.String()
name = fields.String()
type = fields.Method('get_type')
uri = fields.String(attribute="api_base_url")
userinfo_endpoint = fields.String()
def get_type(self, object):
return object.type \
if object.type == 'oauth2_identity_provider' \
else 'registry'
class IdentitySchema(BaseSchema):
sub = fields.String(attribute="provider_user_id")
name = fields.String(attribute="user_info.name")
username = fields.String(attribute="user_info.preferred_username")
email = fields.String(attribute="user_info.email")
mbox_sha1sum = fields.String(attribute="user_info.mbox_sha1sum")
profile = fields.String(attribute="user_info.profile")
picture = fields.String(attribute="user_info.picture")
provider = fields.Nested(ProviderSchema())
class UserSchema(BaseSchema):
__envelope__ = {"single": None, "many": "items"}
__model__ = models.User
class Meta:
model = models.User
id = ma.auto_field()
username = ma.auto_field()
# Uncomment to include all identities
identities = fields.Dict(attribute="current_identity",
keys=fields.String(),
values=fields.Nested(IdentitySchema()))
| # Copyright (c) 2020-2021 CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
from lifemonitor.serializers import BaseSchema, ma
from marshmallow import fields
from . import models
class ProviderSchema(BaseSchema):
uuid = fields.String()
name = fields.String()
type = fields.Method('get_type')
uri = fields.String(attribute="api_base_url")
userinfo_endpoint = fields.String()
def get_type(self, object):
return object.type \
if object.type == 'oauth2_identity_provider' \
else 'registry'
class IdentitySchema(BaseSchema):
sub = fields.String(attribute="provider_user_id")
name = fields.String(attribute="user_info.name")
username = fields.String(attribute="user_info.preferred_username")
email = fields.String(attribute="user_info.email")
mbox_sha1sum = fields.String(attribute="user_info.mbox_sha1sum")
profile = fields.String(attribute="user_info.profile")
picture = fields.String(attribute="user_info.picture")
provider = fields.Nested(ProviderSchema())
class UserSchema(BaseSchema):
__envelope__ = {"single": None, "many": "items"}
__model__ = models.User
class Meta:
model = models.User
id = ma.auto_field()
username = ma.auto_field()
# Uncomment to include all identities
identities = fields.Dict(attribute="current_identity",
keys=fields.String(),
values=fields.Nested(IdentitySchema()))
| en | 0.781715 | # Copyright (c) 2020-2021 CRS4 # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # Uncomment to include all identities | 1.824046 | 2 |
perimeter_of_a_fibonnacci_square.py | julzhk/codekata | 0 | 6614455 | <gh_stars>0
FIB_MEMO = {0: 0, 1: 1, 2: 1, 3: 2, 4: 3, 5: 5, 6: 8, 7: 13, 8: 21, 9: 34, 10: 55, 11: 89, 12: 144, 13: 233, 14: 377,
15: 610, 16: 987, 17: 1597, 18: 2584, 19: 4181, 20: 6765, 21: 10946, 22: 17711, 23: 28657, 24: 46368,
25: 75025, 26: 121393, 27: 196418, 28: 317811, 29: 514229, 30: 832040, 31: 1346269, 32: 2178309,
33: 3524578, 34: 5702887, 35: 9227465, 36: 14930352, 37: 24157817, 38: 39088169, 39: 63245986,
40: 102334155, 41: 165580141, 42: 267914296, 43: 433494437, 44: 701408733, 45: 1134903170, 46: 1836311903,
47: 2971215073, 48: 4807526976, 49: 7778742049, 50: 12586269025, 51: 20365011074, 52: 32951280099,
53: 53316291173, 54: 86267571272, 55: 139583862445, 56: 225851433717, 57: 365435296162, 58: 591286729879,
59: 956722026041, 60: 1548008755920, 61: 2504730781961, 62: 4052739537881, 63: 6557470319842,
64: 10610209857723, 65: 17167680177565, 66: 27777890035288, 67: 44945570212853, 68: 72723460248141,
69: 117669030460994, 70: 190392490709135, 71: 308061521170129, 72: 498454011879264, 73: 806515533049393,
74: 1304969544928657, 75: 2111485077978050, 76: 3416454622906707, 77: 5527939700884757,
78: 8944394323791464, 79: 14472334024676221, 80: 23416728348467685, 81: 37889062373143906,
82: 61305790721611591, 83: 99194853094755497, 84: 160500643816367088, 85: 259695496911122585,
86: 420196140727489673, 87: 679891637638612258, 88: 1100087778366101931, 89: 1779979416004714189,
90: 2880067194370816120, 91: 4660046610375530309, 92: 7540113804746346429, 93: 12200160415121876738,
94: 19740274219868223167, 95: 31940434634990099905, 96: 51680708854858323072, 97: 83621143489848422977,
98: 135301852344706746049, 99: 218922995834555169026, 100: 354224848179261915075,
101: 573147844013817084101, 102: 927372692193078999176, 103: 1500520536206896083277,
104: 2427893228399975082453, 105: 3928413764606871165730, 106: 6356306993006846248183,
107: 10284720757613717413913, 108: 16641027750620563662096, 109: 26925748508234281076009,
110: 43566776258854844738105, 111: 70492524767089125814114, 112: 114059301025943970552219,
113: 184551825793033096366333, 114: 298611126818977066918552, 115: 483162952612010163284885,
116: 781774079430987230203437, 117: 1264937032042997393488322, 118: 2046711111473984623691759,
119: 3311648143516982017180081, 120: 5358359254990966640871840, 121: 8670007398507948658051921,
122: 14028366653498915298923761, 123: 22698374052006863956975682, 124: 36726740705505779255899443,
125: 59425114757512643212875125, 126: 96151855463018422468774568, 127: 155576970220531065681649693,
128: 251728825683549488150424261, 129: 407305795904080553832073954, 130: 659034621587630041982498215,
131: 1066340417491710595814572169, 132: 1725375039079340637797070384, 133: 2791715456571051233611642553,
134: 4517090495650391871408712937, 135: 7308805952221443105020355490, 136: 11825896447871834976429068427,
137: 19134702400093278081449423917, 138: 30960598847965113057878492344, 139: 50095301248058391139327916261,
140: 81055900096023504197206408605, 141: 131151201344081895336534324866,
142: 212207101440105399533740733471, 143: 343358302784187294870275058337,
144: 555565404224292694404015791808, 145: 898923707008479989274290850145,
146: 1454489111232772683678306641953, 147: 2353412818241252672952597492098,
148: 3807901929474025356630904134051, 149: 6161314747715278029583501626149,
150: 9969216677189303386214405760200, 151: 16130531424904581415797907386349,
152: 26099748102093884802012313146549, 153: 42230279526998466217810220532898,
154: 68330027629092351019822533679447, 155: 110560307156090817237632754212345,
156: 178890334785183168257455287891792, 157: 289450641941273985495088042104137,
158: 468340976726457153752543329995929, 159: 757791618667731139247631372100066,
160: 1226132595394188293000174702095995, 161: 1983924214061919432247806074196061,
162: 3210056809456107725247980776292056, 163: 5193981023518027157495786850488117,
164: 8404037832974134882743767626780173, 165: 13598018856492162040239554477268290,
166: 22002056689466296922983322104048463, 167: 35600075545958458963222876581316753,
168: 57602132235424755886206198685365216, 169: 93202207781383214849429075266681969,
170: 150804340016807970735635273952047185, 171: 244006547798191185585064349218729154,
172: 394810887814999156320699623170776339, 173: 638817435613190341905763972389505493,
174: 1033628323428189498226463595560281832, 175: 1672445759041379840132227567949787325,
176: 2706074082469569338358691163510069157, 177: 4378519841510949178490918731459856482,
178: 7084593923980518516849609894969925639, 179: 11463113765491467695340528626429782121,
180: 18547707689471986212190138521399707760, 181: 30010821454963453907530667147829489881,
182: 48558529144435440119720805669229197641, 183: 78569350599398894027251472817058687522,
184: 127127879743834334146972278486287885163, 185: 205697230343233228174223751303346572685,
186: 332825110087067562321196029789634457848, 187: 538522340430300790495419781092981030533,
188: 871347450517368352816615810882615488381, 189: 1409869790947669143312035591975596518914,
190: 2281217241465037496128651402858212007295, 191: 3691087032412706639440686994833808526209,
192: 5972304273877744135569338397692020533504, 193: 9663391306290450775010025392525829059713,
194: 15635695580168194910579363790217849593217, 195: 25299086886458645685589389182743678652930,
196: 40934782466626840596168752972961528246147, 197: 66233869353085486281758142155705206899077,
198: 107168651819712326877926895128666735145224, 199: 173402521172797813159685037284371942044301,
200: 280571172992510140037611932413038677189525, 201: 453973694165307953197296969697410619233826,
202: 734544867157818093234908902110449296423351, 203: 1188518561323126046432205871807859915657177,
204: 1923063428480944139667114773918309212080528, 205: 3111581989804070186099320645726169127737705,
206: 5034645418285014325766435419644478339818233, 207: 8146227408089084511865756065370647467555938,
208: 13180872826374098837632191485015125807374171, 209: 21327100234463183349497947550385773274930109,
210: 34507973060837282187130139035400899082304280, 211: 55835073295300465536628086585786672357234389,
212: 90343046356137747723758225621187571439538669, 213: 146178119651438213260386312206974243796773058,
214: 236521166007575960984144537828161815236311727, 215: 382699285659014174244530850035136059033084785,
216: 619220451666590135228675387863297874269396512, 217: 1001919737325604309473206237898433933302481297,
218: 1621140188992194444701881625761731807571877809, 219: 2623059926317798754175087863660165740874359106,
220: 4244200115309993198876969489421897548446236915, 221: 6867260041627791953052057353082063289320596021,
222: 11111460156937785151929026842503960837766832936, 223: 17978720198565577104981084195586024127087428957,
224: 29090180355503362256910111038089984964854261893, 225: 47068900554068939361891195233676009091941690850,
226: 76159080909572301618801306271765994056795952743, 227: 123227981463641240980692501505442003148737643593 }
def memo_ize(func):
global FIB_MEMO
def func_wrapper(n):
if n in FIB_MEMO:
return FIB_MEMO[n]
FIB_MEMO[n] = func(n)
return FIB_MEMO[n]
return func_wrapper
@memo_ize
def fibonacci(n):
# get the n'th fibonacci
# 1, 1, 2, 3, 5, 8, 13,
a, b = 0, 1
for i in range(0, n):
a, b = b, a + b
return a
def perimeter(n):
return 4 * (fibonacci(n + 3) - 1)
# perimeter(77911)
# perimeter(5)
# for i in range(0, 1000):
# print '%d : %d,' % (i , fibonacci(i))
import unittest
class TestFirst(unittest.TestCase):
def test_first(self):
test = self
test.assert_equals = self.assertEqual
Test = self
Test.assert_equals = self.assertEqual
test.assert_equals(perimeter(5), 80)
test.assert_equals(perimeter(7), 216)
test.assert_equals(perimeter(20), 114624)
test.assert_equals(perimeter(30), 14098308)
test.assert_equals(perimeter(100), 6002082144827584333104) | FIB_MEMO = {0: 0, 1: 1, 2: 1, 3: 2, 4: 3, 5: 5, 6: 8, 7: 13, 8: 21, 9: 34, 10: 55, 11: 89, 12: 144, 13: 233, 14: 377,
15: 610, 16: 987, 17: 1597, 18: 2584, 19: 4181, 20: 6765, 21: 10946, 22: 17711, 23: 28657, 24: 46368,
25: 75025, 26: 121393, 27: 196418, 28: 317811, 29: 514229, 30: 832040, 31: 1346269, 32: 2178309,
33: 3524578, 34: 5702887, 35: 9227465, 36: 14930352, 37: 24157817, 38: 39088169, 39: 63245986,
40: 102334155, 41: 165580141, 42: 267914296, 43: 433494437, 44: 701408733, 45: 1134903170, 46: 1836311903,
47: 2971215073, 48: 4807526976, 49: 7778742049, 50: 12586269025, 51: 20365011074, 52: 32951280099,
53: 53316291173, 54: 86267571272, 55: 139583862445, 56: 225851433717, 57: 365435296162, 58: 591286729879,
59: 956722026041, 60: 1548008755920, 61: 2504730781961, 62: 4052739537881, 63: 6557470319842,
64: 10610209857723, 65: 17167680177565, 66: 27777890035288, 67: 44945570212853, 68: 72723460248141,
69: 117669030460994, 70: 190392490709135, 71: 308061521170129, 72: 498454011879264, 73: 806515533049393,
74: 1304969544928657, 75: 2111485077978050, 76: 3416454622906707, 77: 5527939700884757,
78: 8944394323791464, 79: 14472334024676221, 80: 23416728348467685, 81: 37889062373143906,
82: 61305790721611591, 83: 99194853094755497, 84: 160500643816367088, 85: 259695496911122585,
86: 420196140727489673, 87: 679891637638612258, 88: 1100087778366101931, 89: 1779979416004714189,
90: 2880067194370816120, 91: 4660046610375530309, 92: 7540113804746346429, 93: 12200160415121876738,
94: 19740274219868223167, 95: 31940434634990099905, 96: 51680708854858323072, 97: 83621143489848422977,
98: 135301852344706746049, 99: 218922995834555169026, 100: 354224848179261915075,
101: 573147844013817084101, 102: 927372692193078999176, 103: 1500520536206896083277,
104: 2427893228399975082453, 105: 3928413764606871165730, 106: 6356306993006846248183,
107: 10284720757613717413913, 108: 16641027750620563662096, 109: 26925748508234281076009,
110: 43566776258854844738105, 111: 70492524767089125814114, 112: 114059301025943970552219,
113: 184551825793033096366333, 114: 298611126818977066918552, 115: 483162952612010163284885,
116: 781774079430987230203437, 117: 1264937032042997393488322, 118: 2046711111473984623691759,
119: 3311648143516982017180081, 120: 5358359254990966640871840, 121: 8670007398507948658051921,
122: 14028366653498915298923761, 123: 22698374052006863956975682, 124: 36726740705505779255899443,
125: 59425114757512643212875125, 126: 96151855463018422468774568, 127: 155576970220531065681649693,
128: 251728825683549488150424261, 129: 407305795904080553832073954, 130: 659034621587630041982498215,
131: 1066340417491710595814572169, 132: 1725375039079340637797070384, 133: 2791715456571051233611642553,
134: 4517090495650391871408712937, 135: 7308805952221443105020355490, 136: 11825896447871834976429068427,
137: 19134702400093278081449423917, 138: 30960598847965113057878492344, 139: 50095301248058391139327916261,
140: 81055900096023504197206408605, 141: 131151201344081895336534324866,
142: 212207101440105399533740733471, 143: 343358302784187294870275058337,
144: 555565404224292694404015791808, 145: 898923707008479989274290850145,
146: 1454489111232772683678306641953, 147: 2353412818241252672952597492098,
148: 3807901929474025356630904134051, 149: 6161314747715278029583501626149,
150: 9969216677189303386214405760200, 151: 16130531424904581415797907386349,
152: 26099748102093884802012313146549, 153: 42230279526998466217810220532898,
154: 68330027629092351019822533679447, 155: 110560307156090817237632754212345,
156: 178890334785183168257455287891792, 157: 289450641941273985495088042104137,
158: 468340976726457153752543329995929, 159: 757791618667731139247631372100066,
160: 1226132595394188293000174702095995, 161: 1983924214061919432247806074196061,
162: 3210056809456107725247980776292056, 163: 5193981023518027157495786850488117,
164: 8404037832974134882743767626780173, 165: 13598018856492162040239554477268290,
166: 22002056689466296922983322104048463, 167: 35600075545958458963222876581316753,
168: 57602132235424755886206198685365216, 169: 93202207781383214849429075266681969,
170: 150804340016807970735635273952047185, 171: 244006547798191185585064349218729154,
172: 394810887814999156320699623170776339, 173: 638817435613190341905763972389505493,
174: 1033628323428189498226463595560281832, 175: 1672445759041379840132227567949787325,
176: 2706074082469569338358691163510069157, 177: 4378519841510949178490918731459856482,
178: 7084593923980518516849609894969925639, 179: 11463113765491467695340528626429782121,
180: 18547707689471986212190138521399707760, 181: 30010821454963453907530667147829489881,
182: 48558529144435440119720805669229197641, 183: 78569350599398894027251472817058687522,
184: 127127879743834334146972278486287885163, 185: 205697230343233228174223751303346572685,
186: 332825110087067562321196029789634457848, 187: 538522340430300790495419781092981030533,
188: 871347450517368352816615810882615488381, 189: 1409869790947669143312035591975596518914,
190: 2281217241465037496128651402858212007295, 191: 3691087032412706639440686994833808526209,
192: 5972304273877744135569338397692020533504, 193: 9663391306290450775010025392525829059713,
194: 15635695580168194910579363790217849593217, 195: 25299086886458645685589389182743678652930,
196: 40934782466626840596168752972961528246147, 197: 66233869353085486281758142155705206899077,
198: 107168651819712326877926895128666735145224, 199: 173402521172797813159685037284371942044301,
200: 280571172992510140037611932413038677189525, 201: 453973694165307953197296969697410619233826,
202: 734544867157818093234908902110449296423351, 203: 1188518561323126046432205871807859915657177,
204: 1923063428480944139667114773918309212080528, 205: 3111581989804070186099320645726169127737705,
206: 5034645418285014325766435419644478339818233, 207: 8146227408089084511865756065370647467555938,
208: 13180872826374098837632191485015125807374171, 209: 21327100234463183349497947550385773274930109,
210: 34507973060837282187130139035400899082304280, 211: 55835073295300465536628086585786672357234389,
212: 90343046356137747723758225621187571439538669, 213: 146178119651438213260386312206974243796773058,
214: 236521166007575960984144537828161815236311727, 215: 382699285659014174244530850035136059033084785,
216: 619220451666590135228675387863297874269396512, 217: 1001919737325604309473206237898433933302481297,
218: 1621140188992194444701881625761731807571877809, 219: 2623059926317798754175087863660165740874359106,
220: 4244200115309993198876969489421897548446236915, 221: 6867260041627791953052057353082063289320596021,
222: 11111460156937785151929026842503960837766832936, 223: 17978720198565577104981084195586024127087428957,
224: 29090180355503362256910111038089984964854261893, 225: 47068900554068939361891195233676009091941690850,
226: 76159080909572301618801306271765994056795952743, 227: 123227981463641240980692501505442003148737643593 }
def memo_ize(func):
global FIB_MEMO
def func_wrapper(n):
if n in FIB_MEMO:
return FIB_MEMO[n]
FIB_MEMO[n] = func(n)
return FIB_MEMO[n]
return func_wrapper
@memo_ize
def fibonacci(n):
# get the n'th fibonacci
# 1, 1, 2, 3, 5, 8, 13,
a, b = 0, 1
for i in range(0, n):
a, b = b, a + b
return a
def perimeter(n):
return 4 * (fibonacci(n + 3) - 1)
# perimeter(77911)
# perimeter(5)
# for i in range(0, 1000):
# print '%d : %d,' % (i , fibonacci(i))
import unittest
class TestFirst(unittest.TestCase):
def test_first(self):
test = self
test.assert_equals = self.assertEqual
Test = self
Test.assert_equals = self.assertEqual
test.assert_equals(perimeter(5), 80)
test.assert_equals(perimeter(7), 216)
test.assert_equals(perimeter(20), 114624)
test.assert_equals(perimeter(30), 14098308)
test.assert_equals(perimeter(100), 6002082144827584333104) | en | 0.289466 | # get the n'th fibonacci # 1, 1, 2, 3, 5, 8, 13, # perimeter(77911) # perimeter(5) # for i in range(0, 1000): # print '%d : %d,' % (i , fibonacci(i)) | 0.739778 | 1 |
source/db_api/crud/crud_users.py | JungeAlexander/kbase_db_api | 1 | 6614456 | <filename>source/db_api/crud/crud_users.py
from typing import Iterable
from sqlalchemy.orm import Session
from db_api import models, schemas
from db_api.core import security
def create_user(db: Session, user: schemas.UserCreate) -> models.User:
hashed_password = security.get_password_hash(user.password)
db_user = models.User(
email=user.email,
username=user.username,
hashed_password=<PASSWORD>,
is_superuser=user.is_superuser,
)
db.add(db_user)
db.commit()
db.refresh(db_user)
return db_user
def update_user(db: Session, user: schemas.UserUpdate) -> models.User:
old_user = get_user_by_email(db, user.email)
new_user = models.User(id=old_user.id, **user.dict())
db.delete(old_user)
db.add(new_user)
db.commit()
db.refresh(new_user)
return new_user
def get_user(db: Session, user_id: int) -> models.User:
return db.query(models.User).filter(models.User.id == user_id).first()
def get_user_by_email(db: Session, email: str) -> models.User:
return db.query(models.User).filter(models.User.email == email).first()
def get_user_by_username(db: Session, username: str) -> models.User:
return db.query(models.User).filter(models.User.username == username).first()
def get_users(db: Session, skip: int = 0, limit: int = 100) -> Iterable[models.User]:
return db.query(models.User).offset(skip).limit(limit).all()
def authenticate_user(db: Session, username: str, password: str) -> models.User:
user = get_user_by_username(db, username=username)
if not user:
return None
if not security.verify_password(password, user.hashed_password):
return None
return user
| <filename>source/db_api/crud/crud_users.py
from typing import Iterable
from sqlalchemy.orm import Session
from db_api import models, schemas
from db_api.core import security
def create_user(db: Session, user: schemas.UserCreate) -> models.User:
hashed_password = security.get_password_hash(user.password)
db_user = models.User(
email=user.email,
username=user.username,
hashed_password=<PASSWORD>,
is_superuser=user.is_superuser,
)
db.add(db_user)
db.commit()
db.refresh(db_user)
return db_user
def update_user(db: Session, user: schemas.UserUpdate) -> models.User:
old_user = get_user_by_email(db, user.email)
new_user = models.User(id=old_user.id, **user.dict())
db.delete(old_user)
db.add(new_user)
db.commit()
db.refresh(new_user)
return new_user
def get_user(db: Session, user_id: int) -> models.User:
return db.query(models.User).filter(models.User.id == user_id).first()
def get_user_by_email(db: Session, email: str) -> models.User:
return db.query(models.User).filter(models.User.email == email).first()
def get_user_by_username(db: Session, username: str) -> models.User:
return db.query(models.User).filter(models.User.username == username).first()
def get_users(db: Session, skip: int = 0, limit: int = 100) -> Iterable[models.User]:
return db.query(models.User).offset(skip).limit(limit).all()
def authenticate_user(db: Session, username: str, password: str) -> models.User:
user = get_user_by_username(db, username=username)
if not user:
return None
if not security.verify_password(password, user.hashed_password):
return None
return user
| none | 1 | 2.756544 | 3 | |
ch08/paramref.py | stoneflyop1/fluent_py | 0 | 6614457 | <reponame>stoneflyop1/fluent_py
def f(a,b):
a += b
return a
x = 1; y = 2; print('x','y'); print(x,y); f(x,y); print(x,y)
a = [1,2]; b = [3,4]; print('a','b'); print(a,b); f(a,b); print(a,b)
t = (10, 20); u = (30,40); print('t','u'); print(t,u); f(t,u); print(t,u)
##########################################
# demo with empty list as function parameter
class HauntedBus:
''' A bus model haunted by ghost passengers '''
def __init__(self, passengers=[]):
self.passengers = passengers # can use list(passengers) to get a different ref
def pick(self, name):
self.passengers.append(name)
return self.passengers
def drop(self, name):
self.passengers.remove(name)
return self.passengers
bus1 = HauntedBus(['Alice', 'Bill'])
print('bus1:', bus1.passengers)
bus1.pick('Charlie')
bus1.drop('Alice')
print('bus1:', bus1.passengers)
bus2 = HauntedBus()
print('bus2 with default empty passenger list')
print('bus2:', bus2.pick('Carrie'))
bus3 = HauntedBus()
print('bus3 with default empty passenger list')
print('bus3:', bus3.pick('Dave'))
print('bus2:', bus2.passengers)
print('bus2.passengers is bus3.passengers: ', bus2.passengers is bus3.passengers)
print('bus1:', bus1.passengers)
# all HauntedBus instances share the same empty list instance
print(HauntedBus.__init__.__defaults__[0] is HauntedBus().passengers)
# empty list ids
print(id([]), id([]))
xx = []; yy = []
print(id([]), id(xx), id(yy)) | def f(a,b):
a += b
return a
x = 1; y = 2; print('x','y'); print(x,y); f(x,y); print(x,y)
a = [1,2]; b = [3,4]; print('a','b'); print(a,b); f(a,b); print(a,b)
t = (10, 20); u = (30,40); print('t','u'); print(t,u); f(t,u); print(t,u)
##########################################
# demo with empty list as function parameter
class HauntedBus:
''' A bus model haunted by ghost passengers '''
def __init__(self, passengers=[]):
self.passengers = passengers # can use list(passengers) to get a different ref
def pick(self, name):
self.passengers.append(name)
return self.passengers
def drop(self, name):
self.passengers.remove(name)
return self.passengers
bus1 = HauntedBus(['Alice', 'Bill'])
print('bus1:', bus1.passengers)
bus1.pick('Charlie')
bus1.drop('Alice')
print('bus1:', bus1.passengers)
bus2 = HauntedBus()
print('bus2 with default empty passenger list')
print('bus2:', bus2.pick('Carrie'))
bus3 = HauntedBus()
print('bus3 with default empty passenger list')
print('bus3:', bus3.pick('Dave'))
print('bus2:', bus2.passengers)
print('bus2.passengers is bus3.passengers: ', bus2.passengers is bus3.passengers)
print('bus1:', bus1.passengers)
# all HauntedBus instances share the same empty list instance
print(HauntedBus.__init__.__defaults__[0] is HauntedBus().passengers)
# empty list ids
print(id([]), id([]))
xx = []; yy = []
print(id([]), id(xx), id(yy)) | en | 0.431027 | ########################################## # demo with empty list as function parameter A bus model haunted by ghost passengers # can use list(passengers) to get a different ref # all HauntedBus instances share the same empty list instance # empty list ids | 4.256817 | 4 |
app.py | jamessandy/twilio-chatbot | 1 | 6614458 | # coding=utf-8
import tensorflow as tf
import numpy as np
import keras
import os
import time
# SQLite for information
import sqlite3
# Keras
from keras.models import load_model, model_from_json
from keras.preprocessing import image
from PIL import Image
# Flask utils
from flask import Flask, url_for, render_template, request,current_app,send_from_directory,redirect
from werkzeug.utils import secure_filename
#twilio stuffs
from twilio.twiml.messaging_response import MessagingResponse
import requests
from twilio.rest import Client
# Define a flask app
app = Flask(__name__)
# load json file before weights
loaded_json = open("models/crop.json", "r")
# read json architecture into variable
loaded_json_read = loaded_json.read()
# close file
loaded_json.close()
# retreive model from json
loaded_model = model_from_json(loaded_json_read)
# load weights
loaded_model.load_weights("models/crop_weights.h5")
model1 = load_model("models/one-class.h5")
global graph
graph = tf.get_default_graph()
def info():
conn = sqlite3.connect("models/crop.sqlite")
cursor = conn.cursor()
cursor.execute("SELECT * FROM crop")
rows = cursor.fetchall()
return rows
img_dict = {}
img_path = img_dict.get(entry)
#sending replies
resp = MessagingResponse()
msg = resp.message()
def leaf_predict(img_path):
# load image with target size
img = image.load_img(img_path, target_size=(256, 256))
# convert to array
img = image.img_to_array(img)
# normalize the array
img /= 255
# expand dimensions for keras convention
img = np.expand_dims(img, axis=0)
with graph.as_default():
opt = keras.optimizers.Adam(lr=0.001)
loaded_model.compile(optimizer=opt, loss='mse')
preds = model1.predict(img)
dist = np.linalg.norm(img - preds)
if dist <= 20:
return "leaf"
else:
return "not leaf"
def model_predict(img_path):
# load image with target size
img = image.load_img(img_path, target_size=(256, 256))
# convert to array
img = image.img_to_array(img)
# normalize the array
img /= 255
# expand dimensions for keras convention
img = np.expand_dims(img, axis=0)
with graph.as_default():
opt = keras.optimizers.Adam(lr=0.001)
loaded_model.compile(
optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
preds = loaded_model.predict_classes(img)
return int(preds)
#flask app for bot
@app.route('/test', methods=['POST', 'GET'])
def upload():
#recieveing messeage
sender = request.form.get('From')
if int(request.values['NumMedia']) > 0:
img = request.values['MediaUrl0']
img_path[sender] = img
leaf = leaf_predict(img_path)
if leaf == "leaf":
#Make prediction
preds = model_predict(img_path)
rows = info()
res = np.asarray(rows[preds])
value = (preds == int(res[0]))
if value:
Disease, Pathogen, Symptoms, Management = [i for i in res]
return msg(Pathogen=Pathogen, Symptoms=Symptoms, Management=Management, result=Disease )
else:
return msg(Error="ERROR: UPLOADED IMAGE IS NOT A LEAF (OR) MORE LEAVES IN ONE IMAGE")
return None
if __name__ == '__main__':
app.run()
| # coding=utf-8
import tensorflow as tf
import numpy as np
import keras
import os
import time
# SQLite for information
import sqlite3
# Keras
from keras.models import load_model, model_from_json
from keras.preprocessing import image
from PIL import Image
# Flask utils
from flask import Flask, url_for, render_template, request,current_app,send_from_directory,redirect
from werkzeug.utils import secure_filename
#twilio stuffs
from twilio.twiml.messaging_response import MessagingResponse
import requests
from twilio.rest import Client
# Define a flask app
app = Flask(__name__)
# load json file before weights
loaded_json = open("models/crop.json", "r")
# read json architecture into variable
loaded_json_read = loaded_json.read()
# close file
loaded_json.close()
# retreive model from json
loaded_model = model_from_json(loaded_json_read)
# load weights
loaded_model.load_weights("models/crop_weights.h5")
model1 = load_model("models/one-class.h5")
global graph
graph = tf.get_default_graph()
def info():
conn = sqlite3.connect("models/crop.sqlite")
cursor = conn.cursor()
cursor.execute("SELECT * FROM crop")
rows = cursor.fetchall()
return rows
img_dict = {}
img_path = img_dict.get(entry)
#sending replies
resp = MessagingResponse()
msg = resp.message()
def leaf_predict(img_path):
# load image with target size
img = image.load_img(img_path, target_size=(256, 256))
# convert to array
img = image.img_to_array(img)
# normalize the array
img /= 255
# expand dimensions for keras convention
img = np.expand_dims(img, axis=0)
with graph.as_default():
opt = keras.optimizers.Adam(lr=0.001)
loaded_model.compile(optimizer=opt, loss='mse')
preds = model1.predict(img)
dist = np.linalg.norm(img - preds)
if dist <= 20:
return "leaf"
else:
return "not leaf"
def model_predict(img_path):
# load image with target size
img = image.load_img(img_path, target_size=(256, 256))
# convert to array
img = image.img_to_array(img)
# normalize the array
img /= 255
# expand dimensions for keras convention
img = np.expand_dims(img, axis=0)
with graph.as_default():
opt = keras.optimizers.Adam(lr=0.001)
loaded_model.compile(
optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
preds = loaded_model.predict_classes(img)
return int(preds)
#flask app for bot
@app.route('/test', methods=['POST', 'GET'])
def upload():
#recieveing messeage
sender = request.form.get('From')
if int(request.values['NumMedia']) > 0:
img = request.values['MediaUrl0']
img_path[sender] = img
leaf = leaf_predict(img_path)
if leaf == "leaf":
#Make prediction
preds = model_predict(img_path)
rows = info()
res = np.asarray(rows[preds])
value = (preds == int(res[0]))
if value:
Disease, Pathogen, Symptoms, Management = [i for i in res]
return msg(Pathogen=Pathogen, Symptoms=Symptoms, Management=Management, result=Disease )
else:
return msg(Error="ERROR: UPLOADED IMAGE IS NOT A LEAF (OR) MORE LEAVES IN ONE IMAGE")
return None
if __name__ == '__main__':
app.run()
| en | 0.628682 | # coding=utf-8 # SQLite for information # Keras # Flask utils #twilio stuffs # Define a flask app # load json file before weights # read json architecture into variable # close file # retreive model from json # load weights #sending replies # load image with target size # convert to array # normalize the array # expand dimensions for keras convention # load image with target size # convert to array # normalize the array # expand dimensions for keras convention #flask app for bot #recieveing messeage #Make prediction | 2.602491 | 3 |
btc_api/app/wallet/coin_select.py | krebernisak/btc-payments | 0 | 6614459 | import math
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Dict, List, Tuple
from functools import partial
from bit.wallet import Unspent
from app.wallet.transaction import (
TxContext,
Output,
address_to_output_size,
estimate_tx_fee_kb,
)
from app.wallet.exceptions import InsufficientFunds
DUST_THRESHOLD = 5430
@dataclass(frozen=True)
class SelectedCoins:
"""Class represents result of a successfull coin selection."""
inputs: List[Unspent]
outputs: List[Output]
out_amount: int
change_amount: int
fee_amount: int
class UnspentCoinSelector(ABC):
"""
The Strategy interface declares common interface for all supported coin
select algorithms.
The Context uses this interface to call the algorithm defined by Concrete
Strategies.
"""
@abstractmethod
def select(self, context: TxContext) -> SelectedCoins:
"""
Selects coins from unspent inputs.
Returns a result of a successfull coin selection.
"""
pass
class Greedy(UnspentCoinSelector):
def select(self, context: TxContext) -> SelectedCoins:
"""
Selects coins from unspent inputs using greedy algorithm
until enough are selected from input list.
Returns a result of a successfull coin selection.
"""
if not context.inputs:
raise InsufficientFunds(context.address, 0)
outputs = context.outputs[:]
estimate_tx_fee = partial(estimate_tx_fee_kb, fee_kb=context.fee_kb)
n_out = len(outputs)
def change_included():
return n_out == len(outputs) + 1
out_amount = sum(out.amount for out in outputs)
out_size = sum(address_to_output_size(out.address) for out in outputs)
in_size = 0
in_amount = 0
change_amount = 0
for n_in, utxo in enumerate(context.inputs, 1):
in_size += utxo.vsize
fee = estimate_tx_fee(in_size, n_in, out_size, n_out)
in_amount += utxo.amount
change_amount = max(0, in_amount - (out_amount + fee))
if 0 < change_amount < DUST_THRESHOLD:
fee += change_amount
change_amount = 0
elif change_amount >= DUST_THRESHOLD and not change_included():
# Calculate new change_amount with fee including the change address output
# and add it to tx if new estimate gives us change_amount >= DUST_THRESHOLD
change_out_size = address_to_output_size(context.change_address)
fee_with_change = estimate_tx_fee(
in_size, n_in, out_size + change_out_size, n_out + 1
)
change_amount_with_fee = in_amount - (out_amount + fee_with_change)
if change_amount_with_fee < DUST_THRESHOLD:
fee += change_amount
change_amount = 0
else:
n_out += 1
out_size += change_out_size
fee, change_amount = fee_with_change, change_amount_with_fee
if out_amount + fee + change_amount <= in_amount:
assert change_amount == 0 or change_amount >= DUST_THRESHOLD
assert in_amount - (out_amount + fee + change_amount) == 0
break
elif n_in == len(context.inputs):
raise InsufficientFunds.forAmount(
context.address, in_amount, out_amount, fee
)
selected_inputs = context.inputs[:n_in]
if change_amount:
outputs.append(Output(context.change_address, change_amount))
return SelectedCoins(selected_inputs, outputs, out_amount, change_amount, fee)
class GreedyMaxSecure(Greedy):
def select(self, context: TxContext) -> SelectedCoins:
"""
Selects coins from unspent inputs using oldest coins first.
Returns a result of a successfull coin selection.
"""
sorted_inputs = sorted(context.inputs, key=lambda utxo: -utxo.confirmations)
return super().select(context.copy(inputs=sorted_inputs))
class GreedyMaxCoins(Greedy):
def select(self, context: TxContext) -> SelectedCoins:
"""
Selects coins from unspent inputs using coins with min amount first.
Try to spend MAX number of coins.
Returns a result of a successfull coin selection.
"""
sorted_inputs = sorted(context.inputs, key=lambda utxo: utxo.amount)
return super().select(context.copy(inputs=sorted_inputs))
class GreedyMinCoins(Greedy):
def select(self, context: TxContext) -> SelectedCoins:
"""
Selects coins from unspent inputs using coins with max amount first.
Try to spend MIN number of coins.
Returns a result of a successfull coin selection.
"""
sorted_inputs = sorted(context.inputs, key=lambda utxo: -utxo.amount)
return super().select(context.copy(inputs=sorted_inputs))
class GreedyRandom(Greedy):
def __init__(self, random):
self.random = random
def select(self, context: TxContext) -> SelectedCoins:
"""
Selects coins from unspent inputs on random.
Returns a result of a successfull coin selection.
"""
shuffled_copy = context.inputs[:]
self.random.shuffle(shuffled_copy)
return super().select(context.copy(inputs=shuffled_copy))
| import math
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Dict, List, Tuple
from functools import partial
from bit.wallet import Unspent
from app.wallet.transaction import (
TxContext,
Output,
address_to_output_size,
estimate_tx_fee_kb,
)
from app.wallet.exceptions import InsufficientFunds
DUST_THRESHOLD = 5430
@dataclass(frozen=True)
class SelectedCoins:
"""Class represents result of a successfull coin selection."""
inputs: List[Unspent]
outputs: List[Output]
out_amount: int
change_amount: int
fee_amount: int
class UnspentCoinSelector(ABC):
"""
The Strategy interface declares common interface for all supported coin
select algorithms.
The Context uses this interface to call the algorithm defined by Concrete
Strategies.
"""
@abstractmethod
def select(self, context: TxContext) -> SelectedCoins:
"""
Selects coins from unspent inputs.
Returns a result of a successfull coin selection.
"""
pass
class Greedy(UnspentCoinSelector):
def select(self, context: TxContext) -> SelectedCoins:
"""
Selects coins from unspent inputs using greedy algorithm
until enough are selected from input list.
Returns a result of a successfull coin selection.
"""
if not context.inputs:
raise InsufficientFunds(context.address, 0)
outputs = context.outputs[:]
estimate_tx_fee = partial(estimate_tx_fee_kb, fee_kb=context.fee_kb)
n_out = len(outputs)
def change_included():
return n_out == len(outputs) + 1
out_amount = sum(out.amount for out in outputs)
out_size = sum(address_to_output_size(out.address) for out in outputs)
in_size = 0
in_amount = 0
change_amount = 0
for n_in, utxo in enumerate(context.inputs, 1):
in_size += utxo.vsize
fee = estimate_tx_fee(in_size, n_in, out_size, n_out)
in_amount += utxo.amount
change_amount = max(0, in_amount - (out_amount + fee))
if 0 < change_amount < DUST_THRESHOLD:
fee += change_amount
change_amount = 0
elif change_amount >= DUST_THRESHOLD and not change_included():
# Calculate new change_amount with fee including the change address output
# and add it to tx if new estimate gives us change_amount >= DUST_THRESHOLD
change_out_size = address_to_output_size(context.change_address)
fee_with_change = estimate_tx_fee(
in_size, n_in, out_size + change_out_size, n_out + 1
)
change_amount_with_fee = in_amount - (out_amount + fee_with_change)
if change_amount_with_fee < DUST_THRESHOLD:
fee += change_amount
change_amount = 0
else:
n_out += 1
out_size += change_out_size
fee, change_amount = fee_with_change, change_amount_with_fee
if out_amount + fee + change_amount <= in_amount:
assert change_amount == 0 or change_amount >= DUST_THRESHOLD
assert in_amount - (out_amount + fee + change_amount) == 0
break
elif n_in == len(context.inputs):
raise InsufficientFunds.forAmount(
context.address, in_amount, out_amount, fee
)
selected_inputs = context.inputs[:n_in]
if change_amount:
outputs.append(Output(context.change_address, change_amount))
return SelectedCoins(selected_inputs, outputs, out_amount, change_amount, fee)
class GreedyMaxSecure(Greedy):
def select(self, context: TxContext) -> SelectedCoins:
"""
Selects coins from unspent inputs using oldest coins first.
Returns a result of a successfull coin selection.
"""
sorted_inputs = sorted(context.inputs, key=lambda utxo: -utxo.confirmations)
return super().select(context.copy(inputs=sorted_inputs))
class GreedyMaxCoins(Greedy):
def select(self, context: TxContext) -> SelectedCoins:
"""
Selects coins from unspent inputs using coins with min amount first.
Try to spend MAX number of coins.
Returns a result of a successfull coin selection.
"""
sorted_inputs = sorted(context.inputs, key=lambda utxo: utxo.amount)
return super().select(context.copy(inputs=sorted_inputs))
class GreedyMinCoins(Greedy):
def select(self, context: TxContext) -> SelectedCoins:
"""
Selects coins from unspent inputs using coins with max amount first.
Try to spend MIN number of coins.
Returns a result of a successfull coin selection.
"""
sorted_inputs = sorted(context.inputs, key=lambda utxo: -utxo.amount)
return super().select(context.copy(inputs=sorted_inputs))
class GreedyRandom(Greedy):
def __init__(self, random):
self.random = random
def select(self, context: TxContext) -> SelectedCoins:
"""
Selects coins from unspent inputs on random.
Returns a result of a successfull coin selection.
"""
shuffled_copy = context.inputs[:]
self.random.shuffle(shuffled_copy)
return super().select(context.copy(inputs=shuffled_copy))
| en | 0.852289 | Class represents result of a successfull coin selection. The Strategy interface declares common interface for all supported coin select algorithms. The Context uses this interface to call the algorithm defined by Concrete Strategies. Selects coins from unspent inputs. Returns a result of a successfull coin selection. Selects coins from unspent inputs using greedy algorithm until enough are selected from input list. Returns a result of a successfull coin selection. # Calculate new change_amount with fee including the change address output # and add it to tx if new estimate gives us change_amount >= DUST_THRESHOLD Selects coins from unspent inputs using oldest coins first. Returns a result of a successfull coin selection. Selects coins from unspent inputs using coins with min amount first. Try to spend MAX number of coins. Returns a result of a successfull coin selection. Selects coins from unspent inputs using coins with max amount first. Try to spend MIN number of coins. Returns a result of a successfull coin selection. Selects coins from unspent inputs on random. Returns a result of a successfull coin selection. | 2.906438 | 3 |
forecast_dataset_to_stories.py | Cyntwikip/PreSumm | 0 | 6614460 | <reponame>Cyntwikip/PreSumm<gh_stars>0
import pandas as pd
import re, os
import click
from tqdm import tqdm
def split_lines(text):
text = [' '.join(i.split()) for i in re.split(r'\n{2,}', text)]
text = [i for i in text if i]
return text
def preprocess(file, forecast_folder):
# file = '~/notebooks/Cognitive_Search/sash/data/feb_20/ulm_forecasts.csv'
df = pd.read_csv(file, usecols=[2,4,5])
df['reference_id'] = df['reference_id'].apply(lambda x: 0 if x!=x else x).astype(int)
df = df.where(df['isLesson']==1).dropna()
df.drop('isLesson', axis=1, inplace=True)
df['paragraph'] = df['paragraph'].apply(split_lines)
df = df.reset_index(drop=True)
df['reference_id'] = df['reference_id'].astype(int)
df['title'] = df[['reference_id']].apply(lambda x: f'dummy lesson number {x.name} - {x[0]}', axis=1)
# path_story = '../Presumm2/PreSumm/raw_data/eva_forecast_02_21_2020/'
path_story = './raw_data/{}/'.format(forecast_folder)
if not os.path.isdir(path_story):
print('Path does not exist...')
print('Creating folder...')
os.mkdir(path_story)
for idx, rows in tqdm(df.iterrows()):
fn = '{:05} - {}.story'.format(idx, rows['reference_id'])
content = rows['paragraph'] + ['@highlight', rows['title']]
content = '\n\n'.join(content)
with open(path_story+fn, 'w+') as f:
f.write(content)
return
@click.group()
def cli():
pass
@cli.command()
@click.argument('filename')
@click.argument('forecast_folder')
def convert(filename, forecast_folder):
print(f'Converting {filename} to {forecast_folder}')
preprocess(filename, forecast_folder)
return
if __name__=='__main__':
cli()
| import pandas as pd
import re, os
import click
from tqdm import tqdm
def split_lines(text):
text = [' '.join(i.split()) for i in re.split(r'\n{2,}', text)]
text = [i for i in text if i]
return text
def preprocess(file, forecast_folder):
# file = '~/notebooks/Cognitive_Search/sash/data/feb_20/ulm_forecasts.csv'
df = pd.read_csv(file, usecols=[2,4,5])
df['reference_id'] = df['reference_id'].apply(lambda x: 0 if x!=x else x).astype(int)
df = df.where(df['isLesson']==1).dropna()
df.drop('isLesson', axis=1, inplace=True)
df['paragraph'] = df['paragraph'].apply(split_lines)
df = df.reset_index(drop=True)
df['reference_id'] = df['reference_id'].astype(int)
df['title'] = df[['reference_id']].apply(lambda x: f'dummy lesson number {x.name} - {x[0]}', axis=1)
# path_story = '../Presumm2/PreSumm/raw_data/eva_forecast_02_21_2020/'
path_story = './raw_data/{}/'.format(forecast_folder)
if not os.path.isdir(path_story):
print('Path does not exist...')
print('Creating folder...')
os.mkdir(path_story)
for idx, rows in tqdm(df.iterrows()):
fn = '{:05} - {}.story'.format(idx, rows['reference_id'])
content = rows['paragraph'] + ['@highlight', rows['title']]
content = '\n\n'.join(content)
with open(path_story+fn, 'w+') as f:
f.write(content)
return
@click.group()
def cli():
pass
@cli.command()
@click.argument('filename')
@click.argument('forecast_folder')
def convert(filename, forecast_folder):
print(f'Converting {filename} to {forecast_folder}')
preprocess(filename, forecast_folder)
return
if __name__=='__main__':
cli() | en | 0.586317 | # file = '~/notebooks/Cognitive_Search/sash/data/feb_20/ulm_forecasts.csv' # path_story = '../Presumm2/PreSumm/raw_data/eva_forecast_02_21_2020/' | 2.881356 | 3 |
simpleportscanner.py | rishabhacking/Simple-Port-Scanner | 1 | 6614461 | #! /usr/bin/python
import socket
sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = "127.0.0.1" #Enter the HOST
port = 1234 #Enter the PORT
def port_scanner(port):
if sock.connect_ex((host,port)):
print("The port %d is closed") % (port)
else:
print("The port %d is opened") % (port)
port_scanner(port)
| #! /usr/bin/python
import socket
sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = "127.0.0.1" #Enter the HOST
port = 1234 #Enter the PORT
def port_scanner(port):
if sock.connect_ex((host,port)):
print("The port %d is closed") % (port)
else:
print("The port %d is opened") % (port)
port_scanner(port)
| en | 0.279606 | #! /usr/bin/python #Enter the HOST #Enter the PORT | 3.664809 | 4 |
datacamp/improving_data_vis_in_python/showing_uncertainty.py | Katsute/Baruch-CIS-4170-Assignments | 0 | 6614462 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# Construct CI bounds for averages
average_ests['lower'] = average_ests['mean'] - 1.96*average_ests['std_err']
average_ests['upper'] = average_ests['mean'] + 1.96*average_ests['std_err']
# Setup a grid of plots, with non-shared x axes limits
g = sns.FacetGrid(average_ests, row = 'pollutant', sharex = False)
# Plot CI for average estimate
g.map(plt.hlines, 'y', 'lower', 'upper')
# Plot observed values for comparison and remove axes labels
g.map(plt.scatter, 'seen', 'y', color = 'orangered').set_ylabels('').set_xlabels('')
plt.show()
# In[ ]:
# Set start and ends according to intervals
# Make intervals thicker
plt.hlines(y = 'year', xmin = 'lower', xmax = 'upper',
linewidth = 5, color = 'steelblue', alpha = 0.7,
data = diffs_by_year)
# Point estimates
plt.plot('mean', 'year', 'k|', data = diffs_by_year)
# Add a 'null' reference line at 0 and color orangered
plt.axvline(x = 0, color = 'orangered', linestyle = '--')
# Set descriptive axis labels and title
plt.xlabel('95% CI')
plt.title('Avg SO2 differences between Cincinnati and Indianapolis')
plt.show()
# In[ ]:
# Draw 99% inverval bands for average NO2
vandenberg_NO2['lower'] = vandenberg_NO2['mean'] - 2.58*vandenberg_NO2['std_err']
vandenberg_NO2['upper'] = vandenberg_NO2['mean'] + 2.58*vandenberg_NO2['std_err']
# Plot mean estimate as a white semi-transparent line
plt.plot('day', 'mean', data = vandenberg_NO2,
color = 'white', alpha = 0.4)
# Fill between the upper and lower confidence band values
plt.fill_between(x = 'day',
y1 = 'lower', y2 = 'upper',
data = vandenberg_NO2)
plt.show()
# In[ ]:
# Setup a grid of plots with columns divided by location
g = sns.FacetGrid(eastern_SO2, col = 'city', col_wrap = 2)
# Map interval plots to each cities data with corol colored ribbons
g.map(plt.fill_between, 'day', 'lower', 'upper', color = 'coral')
# Map overlaid mean plots with white line
g.map(plt.plot, 'day', 'mean', color = 'white')
plt.show()
# In[ ]:
for city, color in [('Denver',"#66c2a5"), ('Long Beach', "#fc8d62")]:
# Filter data to desired city
city_data = SO2_compare[SO2_compare.city == city]
# Set city interval color to desired and lower opacity
plt.fill_between(x = 'day', y1 = 'lower', y2 = 'upper', data = city_data,
color = color, alpha = 0.4)
# Draw a faint mean line for reference and give a label for legend
plt.plot('day','mean', data = city_data, label = city,
color = color, alpha = 0.25)
plt.legend()
plt.show()
# In[ ]:
# Add interval percent widths
alphas = [ 0.01, 0.05, 0.1]
widths = [ '99% CI', '95%', '90%']
colors = ['#fee08b','#fc8d59','#d53e4f']
for alpha, color, width in zip(alphas, colors, widths):
# Grab confidence interval
conf_ints = pollution_model.conf_int(alpha)
# Pass current interval color and legend label to plot
plt.hlines(y = conf_ints.index, xmin = conf_ints[0], xmax = conf_ints[1],
colors = color, label = width, linewidth = 10)
# Draw point estimates
plt.plot(pollution_model.params, pollution_model.params.index, 'wo', label = 'Point Estimate')
plt.legend()
plt.show()
# In[ ]:
int_widths = ['90%', '99%']
z_scores = [1.67, 2.58]
colors = ['#fc8d59', '#fee08b']
for percent, Z, color in zip(int_widths, z_scores, colors):
# Pass lower and upper confidence bounds and lower opacity
plt.fill_between(
x = cinci_13_no2.day, alpha = 0.4, color = color,
y1 = cinci_13_no2['mean'] - 2*cinci_13_no2['std_err'],
y2 = cinci_13_no2['mean'] + 2*cinci_13_no2['std_err'],
label = percent)
plt.legend()
plt.show()
# In[ ]:
# Decrase interval thickness as interval widens
sizes = [ 15, 10, 5]
int_widths = ['90% CI', '95%', '99%']
z_scores = [ 1.67, 1.96, 2.58]
for percent, Z, size in zip(int_widths, z_scores, sizes):
plt.hlines(y = rocket_model.pollutant,
xmin = rocket_model['est'] - Z*rocket_model['std_err'],
xmax = rocket_model['est'] + Z*rocket_model['std_err'],
label = percent,
# Resize lines and color them gray
linewidth = sizes,
color = 'gray')
# Add point estimate
plt.plot('est', 'pollutant', 'wo', data = rocket_model, label = 'Point Estimate')
plt.legend(loc = 'center left', bbox_to_anchor = (1, 0.5))
plt.show()
# In[ ]:
cinci_may_NO2 = pollution.query("city == 'Cincinnati' & month == 5").NO2
# Generate bootstrap samples
boot_means = bootstrap(cinci_may_NO2, 1000)
# Get lower and upper 95% interval bounds
lower, upper = np.percentile(boot_means, [2.5, 97.5])
# Plot shaded area for interval
plt.axvspan(lower, upper, color = 'gray', alpha = 0.2)
# Draw histogram of bootstrap samples
sns.distplot(boot_means, bins = 100, kde = False)
plt.show()
# In[ ]:
sns.lmplot('NO2', 'SO2', data = no2_so2_boot,
# Tell seaborn to a regression line for each sample
hue = 'sample',
# Make lines blue and transparent
line_kws = {'color': 'steelblue', 'alpha': 0.2},
# Disable built-in confidence intervals
ci = None, legend = False, scatter = False)
# Draw scatter of all points
plt.scatter('NO2', 'SO2', data = no2_so2)
plt.show()
# In[ ]:
# Initialize a holder DataFrame for bootstrap results
city_boots = pd.DataFrame()
for city in ['Cincinnati', 'Des Moines', 'Indianapolis', 'Houston']:
# Filter to city
city_NO2 = pollution_may[pollution_may.city == city].NO2
# Bootstrap city data & put in DataFrame
cur_boot = pd.DataFrame({'NO2_avg': bootstrap(city_NO2, 100), 'city': city})
# Append to other city's bootstraps
city_boots = pd.concat([city_boots,cur_boot])
# Beeswarm plot of averages with citys on y axis
sns.swarmplot(y = "city", x = "NO2_avg", data = city_boots, color = 'coral')
plt.show()
| #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# Construct CI bounds for averages
average_ests['lower'] = average_ests['mean'] - 1.96*average_ests['std_err']
average_ests['upper'] = average_ests['mean'] + 1.96*average_ests['std_err']
# Setup a grid of plots, with non-shared x axes limits
g = sns.FacetGrid(average_ests, row = 'pollutant', sharex = False)
# Plot CI for average estimate
g.map(plt.hlines, 'y', 'lower', 'upper')
# Plot observed values for comparison and remove axes labels
g.map(plt.scatter, 'seen', 'y', color = 'orangered').set_ylabels('').set_xlabels('')
plt.show()
# In[ ]:
# Set start and ends according to intervals
# Make intervals thicker
plt.hlines(y = 'year', xmin = 'lower', xmax = 'upper',
linewidth = 5, color = 'steelblue', alpha = 0.7,
data = diffs_by_year)
# Point estimates
plt.plot('mean', 'year', 'k|', data = diffs_by_year)
# Add a 'null' reference line at 0 and color orangered
plt.axvline(x = 0, color = 'orangered', linestyle = '--')
# Set descriptive axis labels and title
plt.xlabel('95% CI')
plt.title('Avg SO2 differences between Cincinnati and Indianapolis')
plt.show()
# In[ ]:
# Draw 99% inverval bands for average NO2
vandenberg_NO2['lower'] = vandenberg_NO2['mean'] - 2.58*vandenberg_NO2['std_err']
vandenberg_NO2['upper'] = vandenberg_NO2['mean'] + 2.58*vandenberg_NO2['std_err']
# Plot mean estimate as a white semi-transparent line
plt.plot('day', 'mean', data = vandenberg_NO2,
color = 'white', alpha = 0.4)
# Fill between the upper and lower confidence band values
plt.fill_between(x = 'day',
y1 = 'lower', y2 = 'upper',
data = vandenberg_NO2)
plt.show()
# In[ ]:
# Setup a grid of plots with columns divided by location
g = sns.FacetGrid(eastern_SO2, col = 'city', col_wrap = 2)
# Map interval plots to each cities data with corol colored ribbons
g.map(plt.fill_between, 'day', 'lower', 'upper', color = 'coral')
# Map overlaid mean plots with white line
g.map(plt.plot, 'day', 'mean', color = 'white')
plt.show()
# In[ ]:
for city, color in [('Denver',"#66c2a5"), ('Long Beach', "#fc8d62")]:
# Filter data to desired city
city_data = SO2_compare[SO2_compare.city == city]
# Set city interval color to desired and lower opacity
plt.fill_between(x = 'day', y1 = 'lower', y2 = 'upper', data = city_data,
color = color, alpha = 0.4)
# Draw a faint mean line for reference and give a label for legend
plt.plot('day','mean', data = city_data, label = city,
color = color, alpha = 0.25)
plt.legend()
plt.show()
# In[ ]:
# Add interval percent widths
alphas = [ 0.01, 0.05, 0.1]
widths = [ '99% CI', '95%', '90%']
colors = ['#fee08b','#fc8d59','#d53e4f']
for alpha, color, width in zip(alphas, colors, widths):
# Grab confidence interval
conf_ints = pollution_model.conf_int(alpha)
# Pass current interval color and legend label to plot
plt.hlines(y = conf_ints.index, xmin = conf_ints[0], xmax = conf_ints[1],
colors = color, label = width, linewidth = 10)
# Draw point estimates
plt.plot(pollution_model.params, pollution_model.params.index, 'wo', label = 'Point Estimate')
plt.legend()
plt.show()
# In[ ]:
int_widths = ['90%', '99%']
z_scores = [1.67, 2.58]
colors = ['#fc8d59', '#fee08b']
for percent, Z, color in zip(int_widths, z_scores, colors):
# Pass lower and upper confidence bounds and lower opacity
plt.fill_between(
x = cinci_13_no2.day, alpha = 0.4, color = color,
y1 = cinci_13_no2['mean'] - 2*cinci_13_no2['std_err'],
y2 = cinci_13_no2['mean'] + 2*cinci_13_no2['std_err'],
label = percent)
plt.legend()
plt.show()
# In[ ]:
# Decrase interval thickness as interval widens
sizes = [ 15, 10, 5]
int_widths = ['90% CI', '95%', '99%']
z_scores = [ 1.67, 1.96, 2.58]
for percent, Z, size in zip(int_widths, z_scores, sizes):
plt.hlines(y = rocket_model.pollutant,
xmin = rocket_model['est'] - Z*rocket_model['std_err'],
xmax = rocket_model['est'] + Z*rocket_model['std_err'],
label = percent,
# Resize lines and color them gray
linewidth = sizes,
color = 'gray')
# Add point estimate
plt.plot('est', 'pollutant', 'wo', data = rocket_model, label = 'Point Estimate')
plt.legend(loc = 'center left', bbox_to_anchor = (1, 0.5))
plt.show()
# In[ ]:
cinci_may_NO2 = pollution.query("city == 'Cincinnati' & month == 5").NO2
# Generate bootstrap samples
boot_means = bootstrap(cinci_may_NO2, 1000)
# Get lower and upper 95% interval bounds
lower, upper = np.percentile(boot_means, [2.5, 97.5])
# Plot shaded area for interval
plt.axvspan(lower, upper, color = 'gray', alpha = 0.2)
# Draw histogram of bootstrap samples
sns.distplot(boot_means, bins = 100, kde = False)
plt.show()
# In[ ]:
sns.lmplot('NO2', 'SO2', data = no2_so2_boot,
# Tell seaborn to a regression line for each sample
hue = 'sample',
# Make lines blue and transparent
line_kws = {'color': 'steelblue', 'alpha': 0.2},
# Disable built-in confidence intervals
ci = None, legend = False, scatter = False)
# Draw scatter of all points
plt.scatter('NO2', 'SO2', data = no2_so2)
plt.show()
# In[ ]:
# Initialize a holder DataFrame for bootstrap results
city_boots = pd.DataFrame()
for city in ['Cincinnati', 'Des Moines', 'Indianapolis', 'Houston']:
# Filter to city
city_NO2 = pollution_may[pollution_may.city == city].NO2
# Bootstrap city data & put in DataFrame
cur_boot = pd.DataFrame({'NO2_avg': bootstrap(city_NO2, 100), 'city': city})
# Append to other city's bootstraps
city_boots = pd.concat([city_boots,cur_boot])
# Beeswarm plot of averages with citys on y axis
sns.swarmplot(y = "city", x = "NO2_avg", data = city_boots, color = 'coral')
plt.show()
| en | 0.752332 | #!/usr/bin/env python # coding: utf-8 # In[ ]: # Construct CI bounds for averages # Setup a grid of plots, with non-shared x axes limits # Plot CI for average estimate # Plot observed values for comparison and remove axes labels # In[ ]: # Set start and ends according to intervals # Make intervals thicker # Point estimates # Add a 'null' reference line at 0 and color orangered # Set descriptive axis labels and title # In[ ]: # Draw 99% inverval bands for average NO2 # Plot mean estimate as a white semi-transparent line # Fill between the upper and lower confidence band values # In[ ]: # Setup a grid of plots with columns divided by location # Map interval plots to each cities data with corol colored ribbons # Map overlaid mean plots with white line # In[ ]: # Filter data to desired city # Set city interval color to desired and lower opacity # Draw a faint mean line for reference and give a label for legend # In[ ]: # Add interval percent widths # Grab confidence interval # Pass current interval color and legend label to plot # Draw point estimates # In[ ]: # Pass lower and upper confidence bounds and lower opacity # In[ ]: # Decrase interval thickness as interval widens # Resize lines and color them gray # Add point estimate # In[ ]: # Generate bootstrap samples # Get lower and upper 95% interval bounds # Plot shaded area for interval # Draw histogram of bootstrap samples # In[ ]: # Tell seaborn to a regression line for each sample # Make lines blue and transparent # Disable built-in confidence intervals # Draw scatter of all points # In[ ]: # Initialize a holder DataFrame for bootstrap results # Filter to city # Bootstrap city data & put in DataFrame # Append to other city's bootstraps # Beeswarm plot of averages with citys on y axis | 2.704546 | 3 |
tpdatasrc/tpgamefiles/scr/Spell515 - Vampiric Touch.py | edoipi/TemplePlus | 69 | 6614463 | from toee import *
def OnBeginSpellCast( spell ):
print "Vampiric Touch OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
game.particles( "sp-necromancy-conjure", spell.caster )
def OnSpellEffect( spell ):
print "Vampiric Touch OnSpellEffect"
dice = dice_new("1d6")
dice.number = min(10, (spell.caster_level) / 2)
spell.duration = 600
target = spell.target_list[0]
if not (target.obj == spell.caster):
attack_successful = spell.caster.perform_touch_attack( target.obj , 1)
if attack_successful & D20CAF_HIT:
old_hp = target.obj.stat_level_get( stat_hp_current )
target.obj.spell_damage_weaponlike( spell.caster, D20DT_NEGATIVE_ENERGY, dice, D20DAP_UNSPECIFIED, 100, D20A_CAST_SPELL, spell.id, attack_successful, 0 )
new_hp = target.obj.stat_level_get( stat_hp_current )
damage = old_hp - new_hp
if damage > (old_hp + 10):
damage = old_hp + 10
#spell.caster.condition_add_with_args( 'Temporary_Hit_Points', spell.id, spell.duration, damage )
spell.caster.condition_add_with_args( 'sp-Vampiric Touch', spell.id, spell.duration, damage )
spell.caster.float_mesfile_line( 'mes\\spell.mes', 20005, 0 )
else:
#target.obj.float_mesfile_line( 'mes\\spell.mes', 30021 )
game.particles( 'Fizzle', target.obj )
spell.target_list.remove_target( target.obj )
game.particles( 'sp-Vampiric Touch', spell.caster )
def OnBeginRound( spell ):
print "Vampiric Touch OnBeginRound"
def OnEndSpellCast( spell ):
print "Vampiric Touch OnEndSpellCast" | from toee import *
def OnBeginSpellCast( spell ):
print "Vampiric Touch OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
game.particles( "sp-necromancy-conjure", spell.caster )
def OnSpellEffect( spell ):
print "Vampiric Touch OnSpellEffect"
dice = dice_new("1d6")
dice.number = min(10, (spell.caster_level) / 2)
spell.duration = 600
target = spell.target_list[0]
if not (target.obj == spell.caster):
attack_successful = spell.caster.perform_touch_attack( target.obj , 1)
if attack_successful & D20CAF_HIT:
old_hp = target.obj.stat_level_get( stat_hp_current )
target.obj.spell_damage_weaponlike( spell.caster, D20DT_NEGATIVE_ENERGY, dice, D20DAP_UNSPECIFIED, 100, D20A_CAST_SPELL, spell.id, attack_successful, 0 )
new_hp = target.obj.stat_level_get( stat_hp_current )
damage = old_hp - new_hp
if damage > (old_hp + 10):
damage = old_hp + 10
#spell.caster.condition_add_with_args( 'Temporary_Hit_Points', spell.id, spell.duration, damage )
spell.caster.condition_add_with_args( 'sp-Vampiric Touch', spell.id, spell.duration, damage )
spell.caster.float_mesfile_line( 'mes\\spell.mes', 20005, 0 )
else:
#target.obj.float_mesfile_line( 'mes\\spell.mes', 30021 )
game.particles( 'Fizzle', target.obj )
spell.target_list.remove_target( target.obj )
game.particles( 'sp-Vampiric Touch', spell.caster )
def OnBeginRound( spell ):
print "Vampiric Touch OnBeginRound"
def OnEndSpellCast( spell ):
print "Vampiric Touch OnEndSpellCast" | en | 0.181632 | #spell.caster.condition_add_with_args( 'Temporary_Hit_Points', spell.id, spell.duration, damage ) #target.obj.float_mesfile_line( 'mes\\spell.mes', 30021 ) | 2.273762 | 2 |
objetto/_history.py | brunonicko/objetto | 8 | 6614464 | # -*- coding: utf-8 -*-
from typing import TYPE_CHECKING
from ._bases import final
from ._changes import BaseAtomicChange, Batch
from ._exceptions import BaseObjettoException
from .factories import Integer
from .objects import (
Object,
attribute,
protected_attribute_pair,
protected_list_attribute_pair,
)
if TYPE_CHECKING:
from typing import Any, Optional, TypeVar, Union
from ._objects import (
DictObject,
ListObject,
MutableDictObject,
MutableListObject,
MutableSetObject,
ProxyDictObject,
ProxyListObject,
ProxySetObject,
SetObject,
)
T = TypeVar("T") # Any type.
KT = TypeVar("KT") # Any key type.
VT = TypeVar("VT") # Any value type.
MDA = MutableDictObject[KT, VT]
MLA = MutableListObject[T]
MSA = MutableSetObject[T]
DA = DictObject[KT, VT]
LA = ListObject[T]
SA = SetObject[T]
PDA = ProxyDictObject[KT, VT]
PLA = ProxyListObject[T]
PSA = ProxySetObject[T]
CT = Union[BaseAtomicChange, "BatchChanges"]
__all__ = ["HistoryError", "BatchChanges", "HistoryObject"]
class HistoryError(BaseObjettoException):
"""
History failed to execute.
Inherits from:
- :class:`objetto.bases.BaseObjettoException`
"""
# noinspection PyAbstractClass
@final
class BatchChanges(Object):
"""
Batch changes.
Inherits from:
- :class:`objetto.objects.Object`
"""
__slots__ = ()
change = attribute(Batch, checked=False, changeable=False) # type: Batch
"""
Batch change with name and metadata.
:type: objetto.changes.Batch
"""
name = attribute(str, checked=False, changeable=False) # type: str
"""
The batch change name.
:type: str
"""
_changes, changes = protected_list_attribute_pair(
(BaseAtomicChange, "BatchChanges"), subtypes=True, checked=False
) # type: PLA[CT], LA[CT]
"""
Changes executed during the batch.
:type: objetto.objects.ListObject[objetto.history.BatchChanges or \
objetto.bases.BaseAtomicChange]
"""
_closed, closed = protected_attribute_pair(
bool, checked=False, default=False
) # type: bool, bool
"""
Whether the batch has already completed or is still running.
:type: bool
"""
def format_changes(self):
# type: () -> str
"""
Format changes into readable string.
:return: Formatted changes.
:rtype: str
"""
with self.app.read_context():
parts = []
# noinspection PyTypeChecker
for change in self.changes:
if isinstance(change, BatchChanges):
parts.append(
"{} (Batch) ({})".format(
change.name, type(change.change.obj).__name__
)
)
for part in change.format_changes().split("\n"):
parts.append(" {}".format(part))
else:
parts.append(
"{} ({})".format(change.name, type(change.obj).__name__)
)
return "\n".join(parts)
def __undo__(self, _):
# type: (Any) -> None
"""Undo."""
# noinspection PyTypeChecker
for change in reversed(self.changes):
change.__undo__(change)
def __redo__(self, _):
# type: (Any) -> None
"""Redo."""
# noinspection PyTypeChecker
for change in self.changes:
change.__redo__(change)
# noinspection PyAbstractClass
@final
class HistoryObject(Object):
"""
History object.
Inherits from:
- :class:`objetto.objects.Object`
"""
__slots__ = ()
size = attribute(
(int, None),
checked=False,
default=None,
factory=Integer(minimum=0, accepts_none=True),
changeable=False,
) # type: int
"""
How many changes to remember.
:type: int
"""
__executing, executing = protected_attribute_pair(
bool, checked=False, default=False
) # type: bool, bool
"""
Whether the history is undoing or redoing.
:type: bool
"""
__undoing, undoing = protected_attribute_pair(
bool, checked=False, default=False
) # type: bool, bool
"""
Whether the history is undoing.
:type: bool
"""
__redoing, redoing = protected_attribute_pair(
bool, checked=False, default=False
) # type: bool, bool
"""
Whether the history is redoing.
:type: bool
"""
__index, index = protected_attribute_pair(
int, checked=False, default=0
) # type: int, int
"""
The index of the current change.
:type: int
"""
__changes, changes = protected_list_attribute_pair(
(BatchChanges, None),
subtypes=True,
checked=False,
default=(None,),
) # type: PLA[Optional[BatchChanges]], LA[Optional[BatchChanges]]
"""
List of batch changes. The first one is always `None`.
:type: objetto.objects.ListObject[objetto.history.BatchChanges or None]
"""
_current_batches, current_batches = protected_list_attribute_pair(
BatchChanges,
subtypes=False,
checked=False,
child=False,
) # type: PLA[BatchChanges], LA[BatchChanges]
"""
Open batches.
:type: objetto.objects.ListObject[objetto.history.BatchChanges]
"""
def set_index(self, index):
# type: (int) -> None
"""
Undo/redo until we reach the desired index.
:param index: Index.
:type index: int
:raise IndexError: Invalid index.
"""
with self.app.write_context():
if self.__executing:
error = "can't set index while executing"
raise HistoryError(error)
if 0 <= index <= len(self.changes) - 1:
if index > self.__index:
with self._batch_context("Multiple Redo"):
while index > self.__index:
self.redo()
elif index < self.__index:
with self._batch_context("Multiple Undo"):
while index < self.__index:
self.undo()
else:
raise IndexError(index)
def undo_all(self):
# type: () -> None
"""
Undo all.
:raises HistoryError: Can't undo all while executing.
"""
with self.app.write_context():
if self.__executing:
error = "can't undo all while executing"
raise HistoryError(error)
with self._batch_context("Undo All"):
while self.__index > 0:
self.undo()
def redo_all(self):
# type: () -> None
"""
Redo all.
:raises HistoryError: Can't redo all while executing.
"""
with self.app.write_context():
if self.__executing:
error = "can't redo all while executing"
raise HistoryError(error)
with self._batch_context("Redo All"):
while self.__index < len(self.changes) - 1:
self.redo()
# noinspection PyTypeChecker
def redo(self):
# type: () -> None
"""
Redo.
:raises HistoryError: Can't redo while executing.
"""
with self.app.write_context():
if self.__executing:
error = "can't redo while executing"
raise HistoryError(error)
if self.__index < len(self.changes) - 1:
change = self.changes[self.__index + 1]
assert change is not None
with self._batch_context("Redo", change=change):
self.__executing = True
self.__redoing = True
try:
change.__redo__(change)
finally:
self.__executing = False
self.__redoing = False
self.__index += 1
else:
error = "can't redo any further"
raise HistoryError(error)
# noinspection PyTypeChecker
def undo(self):
# type: () -> None
"""
Undo.
:raises HistoryError: Can't undo while executing.
"""
with self.app.write_context():
if self.__executing:
error = "can't undo while executing"
raise HistoryError(error)
if self.__index > 0:
change = self.changes[self.index]
assert change is not None
with self._batch_context("Undo", change=change):
self.__executing = True
self.__undoing = True
try:
change.__undo__(change)
finally:
self.__executing = False
self.__undoing = False
self.__index -= 1
else:
error = "can't undo any further"
raise HistoryError(error)
def flush(self):
# type: () -> None
"""
Flush all changes.
:raises HistoryError: Can't flush while executing.
"""
with self.app.write_context():
if self.__executing:
error = "can't flush history while executing"
raise HistoryError(error)
if len(self.changes) > 1:
with self._batch_context("Flush"):
# noinspection PyTypeChecker
self.__index = 0
del self.__changes[1:]
def flush_redo(self):
# type: () -> None
"""
Flush changes ahead of the current index.
:raises HistoryError: Can't flush while executing.
"""
with self.app.write_context():
if self.__executing:
error = "can't flush history while executing"
raise HistoryError(error)
if len(self.changes) > 1:
if self.__index < len(self.changes) - 1:
with self._batch_context("Flush Redo"):
del self.__changes[self.__index + 1 :]
def in_batch(self):
# type: () -> bool
"""
Get whether history is currently in an open batch.
:return: True if currently in an open batch.
:rtype: bool
:raises HistoryError: Can't check while executing.
"""
with self.app.read_context():
if self.__executing:
error = "can't check if in a batch while executing"
raise HistoryError(error)
return bool(
len(self.changes) > 1
and isinstance(self.changes[-1], BatchChanges)
and not self.changes[-1].closed
)
def format_changes(self):
# type: () -> str
"""
Format changes into readable string.
:return: Formatted changes.
:rtype: str
"""
with self.app.read_context():
parts = ["--- <-" if self.index == 0 else "---"]
for i, change in enumerate(self.changes):
if i == 0:
continue
if isinstance(change, BatchChanges):
parts.append(
"{} (Batch) ({}) <-".format(
change.name, type(change.change.obj).__name__
)
if self.index == i
else "{} (Batch) ({})".format(
change.name, type(change.change.obj).__name__
)
)
for part in change.format_changes().split("\n"):
parts.append(" {}".format(part))
elif change is not None:
parts.append(
"{} ({}) <-".format(change.name, type(change.obj).__name__)
if self.index == i
else "{} ({})".format(change.name, type(change.obj).__name__)
)
return "\n".join(parts)
def __enter_batch__(self, batch):
# type: (Batch) -> None
"""
Enter batch context.
:param batch: Batch.
"""
with self.app.write_context():
if self.__executing:
return
with self._batch_context("Enter Batch", batch=batch):
self.flush_redo()
topmost = not self._current_batches
batch_changes = BatchChanges(self.app, change=batch, name=batch.name)
if topmost:
self.__changes.append(batch_changes)
if self.size is not None and len(self.changes) > self.size + 1:
del self.__changes[1:2]
else:
self.__index += 1
else:
self._current_batches[-1]._changes.append(batch_changes)
self._current_batches.append(batch_changes)
def __push_change__(self, change):
# type: (BaseAtomicChange) -> None
"""
Push change to the current batch.
:param change: Change.
:raises RuntimeError: Reaction triggered during history redo/undo.
"""
with self.app.write_context():
# Check for inconsistent changes triggered during reactions while executing.
if self.__executing:
if isinstance(change, BaseAtomicChange) and change.history is not self:
error = "reaction triggered during history redo/undo in {}".format(
change.obj
)
raise RuntimeError(error)
return
# We should always be in a batch (the application should make sure of it).
assert self._current_batches
# Add to batch.
with self._batch_context("Push Change", change=change):
self.flush_redo()
self._current_batches[-1]._changes.append(change)
def __exit_batch__(self, batch):
# type: (Batch) -> None
"""
Exit batch context.
:param batch: Batch.
"""
with self.app.write_context():
if self.__executing:
return
with self._batch_context("Exit Batch", batch=batch):
assert batch is self._current_batches[-1].change
self._current_batches[-1]._closed = True
self._current_batches.pop()
| # -*- coding: utf-8 -*-
from typing import TYPE_CHECKING
from ._bases import final
from ._changes import BaseAtomicChange, Batch
from ._exceptions import BaseObjettoException
from .factories import Integer
from .objects import (
Object,
attribute,
protected_attribute_pair,
protected_list_attribute_pair,
)
if TYPE_CHECKING:
from typing import Any, Optional, TypeVar, Union
from ._objects import (
DictObject,
ListObject,
MutableDictObject,
MutableListObject,
MutableSetObject,
ProxyDictObject,
ProxyListObject,
ProxySetObject,
SetObject,
)
T = TypeVar("T") # Any type.
KT = TypeVar("KT") # Any key type.
VT = TypeVar("VT") # Any value type.
MDA = MutableDictObject[KT, VT]
MLA = MutableListObject[T]
MSA = MutableSetObject[T]
DA = DictObject[KT, VT]
LA = ListObject[T]
SA = SetObject[T]
PDA = ProxyDictObject[KT, VT]
PLA = ProxyListObject[T]
PSA = ProxySetObject[T]
CT = Union[BaseAtomicChange, "BatchChanges"]
__all__ = ["HistoryError", "BatchChanges", "HistoryObject"]
class HistoryError(BaseObjettoException):
"""
History failed to execute.
Inherits from:
- :class:`objetto.bases.BaseObjettoException`
"""
# noinspection PyAbstractClass
@final
class BatchChanges(Object):
"""
Batch changes.
Inherits from:
- :class:`objetto.objects.Object`
"""
__slots__ = ()
change = attribute(Batch, checked=False, changeable=False) # type: Batch
"""
Batch change with name and metadata.
:type: objetto.changes.Batch
"""
name = attribute(str, checked=False, changeable=False) # type: str
"""
The batch change name.
:type: str
"""
_changes, changes = protected_list_attribute_pair(
(BaseAtomicChange, "BatchChanges"), subtypes=True, checked=False
) # type: PLA[CT], LA[CT]
"""
Changes executed during the batch.
:type: objetto.objects.ListObject[objetto.history.BatchChanges or \
objetto.bases.BaseAtomicChange]
"""
_closed, closed = protected_attribute_pair(
bool, checked=False, default=False
) # type: bool, bool
"""
Whether the batch has already completed or is still running.
:type: bool
"""
def format_changes(self):
# type: () -> str
"""
Format changes into readable string.
:return: Formatted changes.
:rtype: str
"""
with self.app.read_context():
parts = []
# noinspection PyTypeChecker
for change in self.changes:
if isinstance(change, BatchChanges):
parts.append(
"{} (Batch) ({})".format(
change.name, type(change.change.obj).__name__
)
)
for part in change.format_changes().split("\n"):
parts.append(" {}".format(part))
else:
parts.append(
"{} ({})".format(change.name, type(change.obj).__name__)
)
return "\n".join(parts)
def __undo__(self, _):
# type: (Any) -> None
"""Undo."""
# noinspection PyTypeChecker
for change in reversed(self.changes):
change.__undo__(change)
def __redo__(self, _):
# type: (Any) -> None
"""Redo."""
# noinspection PyTypeChecker
for change in self.changes:
change.__redo__(change)
# noinspection PyAbstractClass
@final
class HistoryObject(Object):
"""
History object.
Inherits from:
- :class:`objetto.objects.Object`
"""
__slots__ = ()
size = attribute(
(int, None),
checked=False,
default=None,
factory=Integer(minimum=0, accepts_none=True),
changeable=False,
) # type: int
"""
How many changes to remember.
:type: int
"""
__executing, executing = protected_attribute_pair(
bool, checked=False, default=False
) # type: bool, bool
"""
Whether the history is undoing or redoing.
:type: bool
"""
__undoing, undoing = protected_attribute_pair(
bool, checked=False, default=False
) # type: bool, bool
"""
Whether the history is undoing.
:type: bool
"""
__redoing, redoing = protected_attribute_pair(
bool, checked=False, default=False
) # type: bool, bool
"""
Whether the history is redoing.
:type: bool
"""
__index, index = protected_attribute_pair(
int, checked=False, default=0
) # type: int, int
"""
The index of the current change.
:type: int
"""
__changes, changes = protected_list_attribute_pair(
(BatchChanges, None),
subtypes=True,
checked=False,
default=(None,),
) # type: PLA[Optional[BatchChanges]], LA[Optional[BatchChanges]]
"""
List of batch changes. The first one is always `None`.
:type: objetto.objects.ListObject[objetto.history.BatchChanges or None]
"""
_current_batches, current_batches = protected_list_attribute_pair(
BatchChanges,
subtypes=False,
checked=False,
child=False,
) # type: PLA[BatchChanges], LA[BatchChanges]
"""
Open batches.
:type: objetto.objects.ListObject[objetto.history.BatchChanges]
"""
def set_index(self, index):
# type: (int) -> None
"""
Undo/redo until we reach the desired index.
:param index: Index.
:type index: int
:raise IndexError: Invalid index.
"""
with self.app.write_context():
if self.__executing:
error = "can't set index while executing"
raise HistoryError(error)
if 0 <= index <= len(self.changes) - 1:
if index > self.__index:
with self._batch_context("Multiple Redo"):
while index > self.__index:
self.redo()
elif index < self.__index:
with self._batch_context("Multiple Undo"):
while index < self.__index:
self.undo()
else:
raise IndexError(index)
def undo_all(self):
# type: () -> None
"""
Undo all.
:raises HistoryError: Can't undo all while executing.
"""
with self.app.write_context():
if self.__executing:
error = "can't undo all while executing"
raise HistoryError(error)
with self._batch_context("Undo All"):
while self.__index > 0:
self.undo()
def redo_all(self):
# type: () -> None
"""
Redo all.
:raises HistoryError: Can't redo all while executing.
"""
with self.app.write_context():
if self.__executing:
error = "can't redo all while executing"
raise HistoryError(error)
with self._batch_context("Redo All"):
while self.__index < len(self.changes) - 1:
self.redo()
# noinspection PyTypeChecker
def redo(self):
# type: () -> None
"""
Redo.
:raises HistoryError: Can't redo while executing.
"""
with self.app.write_context():
if self.__executing:
error = "can't redo while executing"
raise HistoryError(error)
if self.__index < len(self.changes) - 1:
change = self.changes[self.__index + 1]
assert change is not None
with self._batch_context("Redo", change=change):
self.__executing = True
self.__redoing = True
try:
change.__redo__(change)
finally:
self.__executing = False
self.__redoing = False
self.__index += 1
else:
error = "can't redo any further"
raise HistoryError(error)
# noinspection PyTypeChecker
def undo(self):
# type: () -> None
"""
Undo.
:raises HistoryError: Can't undo while executing.
"""
with self.app.write_context():
if self.__executing:
error = "can't undo while executing"
raise HistoryError(error)
if self.__index > 0:
change = self.changes[self.index]
assert change is not None
with self._batch_context("Undo", change=change):
self.__executing = True
self.__undoing = True
try:
change.__undo__(change)
finally:
self.__executing = False
self.__undoing = False
self.__index -= 1
else:
error = "can't undo any further"
raise HistoryError(error)
def flush(self):
# type: () -> None
"""
Flush all changes.
:raises HistoryError: Can't flush while executing.
"""
with self.app.write_context():
if self.__executing:
error = "can't flush history while executing"
raise HistoryError(error)
if len(self.changes) > 1:
with self._batch_context("Flush"):
# noinspection PyTypeChecker
self.__index = 0
del self.__changes[1:]
def flush_redo(self):
# type: () -> None
"""
Flush changes ahead of the current index.
:raises HistoryError: Can't flush while executing.
"""
with self.app.write_context():
if self.__executing:
error = "can't flush history while executing"
raise HistoryError(error)
if len(self.changes) > 1:
if self.__index < len(self.changes) - 1:
with self._batch_context("Flush Redo"):
del self.__changes[self.__index + 1 :]
def in_batch(self):
# type: () -> bool
"""
Get whether history is currently in an open batch.
:return: True if currently in an open batch.
:rtype: bool
:raises HistoryError: Can't check while executing.
"""
with self.app.read_context():
if self.__executing:
error = "can't check if in a batch while executing"
raise HistoryError(error)
return bool(
len(self.changes) > 1
and isinstance(self.changes[-1], BatchChanges)
and not self.changes[-1].closed
)
def format_changes(self):
# type: () -> str
"""
Format changes into readable string.
:return: Formatted changes.
:rtype: str
"""
with self.app.read_context():
parts = ["--- <-" if self.index == 0 else "---"]
for i, change in enumerate(self.changes):
if i == 0:
continue
if isinstance(change, BatchChanges):
parts.append(
"{} (Batch) ({}) <-".format(
change.name, type(change.change.obj).__name__
)
if self.index == i
else "{} (Batch) ({})".format(
change.name, type(change.change.obj).__name__
)
)
for part in change.format_changes().split("\n"):
parts.append(" {}".format(part))
elif change is not None:
parts.append(
"{} ({}) <-".format(change.name, type(change.obj).__name__)
if self.index == i
else "{} ({})".format(change.name, type(change.obj).__name__)
)
return "\n".join(parts)
def __enter_batch__(self, batch):
# type: (Batch) -> None
"""
Enter batch context.
:param batch: Batch.
"""
with self.app.write_context():
if self.__executing:
return
with self._batch_context("Enter Batch", batch=batch):
self.flush_redo()
topmost = not self._current_batches
batch_changes = BatchChanges(self.app, change=batch, name=batch.name)
if topmost:
self.__changes.append(batch_changes)
if self.size is not None and len(self.changes) > self.size + 1:
del self.__changes[1:2]
else:
self.__index += 1
else:
self._current_batches[-1]._changes.append(batch_changes)
self._current_batches.append(batch_changes)
def __push_change__(self, change):
# type: (BaseAtomicChange) -> None
"""
Push change to the current batch.
:param change: Change.
:raises RuntimeError: Reaction triggered during history redo/undo.
"""
with self.app.write_context():
# Check for inconsistent changes triggered during reactions while executing.
if self.__executing:
if isinstance(change, BaseAtomicChange) and change.history is not self:
error = "reaction triggered during history redo/undo in {}".format(
change.obj
)
raise RuntimeError(error)
return
# We should always be in a batch (the application should make sure of it).
assert self._current_batches
# Add to batch.
with self._batch_context("Push Change", change=change):
self.flush_redo()
self._current_batches[-1]._changes.append(change)
def __exit_batch__(self, batch):
# type: (Batch) -> None
"""
Exit batch context.
:param batch: Batch.
"""
with self.app.write_context():
if self.__executing:
return
with self._batch_context("Exit Batch", batch=batch):
assert batch is self._current_batches[-1].change
self._current_batches[-1]._closed = True
self._current_batches.pop()
| en | 0.659281 | # -*- coding: utf-8 -*- # Any type. # Any key type. # Any value type. History failed to execute. Inherits from: - :class:`objetto.bases.BaseObjettoException` # noinspection PyAbstractClass Batch changes. Inherits from: - :class:`objetto.objects.Object` # type: Batch Batch change with name and metadata. :type: objetto.changes.Batch # type: str The batch change name. :type: str # type: PLA[CT], LA[CT] Changes executed during the batch. :type: objetto.objects.ListObject[objetto.history.BatchChanges or \ objetto.bases.BaseAtomicChange] # type: bool, bool Whether the batch has already completed or is still running. :type: bool # type: () -> str Format changes into readable string. :return: Formatted changes. :rtype: str # noinspection PyTypeChecker # type: (Any) -> None Undo. # noinspection PyTypeChecker # type: (Any) -> None Redo. # noinspection PyTypeChecker # noinspection PyAbstractClass History object. Inherits from: - :class:`objetto.objects.Object` # type: int How many changes to remember. :type: int # type: bool, bool Whether the history is undoing or redoing. :type: bool # type: bool, bool Whether the history is undoing. :type: bool # type: bool, bool Whether the history is redoing. :type: bool # type: int, int The index of the current change. :type: int # type: PLA[Optional[BatchChanges]], LA[Optional[BatchChanges]] List of batch changes. The first one is always `None`. :type: objetto.objects.ListObject[objetto.history.BatchChanges or None] # type: PLA[BatchChanges], LA[BatchChanges] Open batches. :type: objetto.objects.ListObject[objetto.history.BatchChanges] # type: (int) -> None Undo/redo until we reach the desired index. :param index: Index. :type index: int :raise IndexError: Invalid index. # type: () -> None Undo all. :raises HistoryError: Can't undo all while executing. # type: () -> None Redo all. :raises HistoryError: Can't redo all while executing. # noinspection PyTypeChecker # type: () -> None Redo. :raises HistoryError: Can't redo while executing. # noinspection PyTypeChecker # type: () -> None Undo. :raises HistoryError: Can't undo while executing. # type: () -> None Flush all changes. :raises HistoryError: Can't flush while executing. # noinspection PyTypeChecker # type: () -> None Flush changes ahead of the current index. :raises HistoryError: Can't flush while executing. # type: () -> bool Get whether history is currently in an open batch. :return: True if currently in an open batch. :rtype: bool :raises HistoryError: Can't check while executing. # type: () -> str Format changes into readable string. :return: Formatted changes. :rtype: str # type: (Batch) -> None Enter batch context. :param batch: Batch. # type: (BaseAtomicChange) -> None Push change to the current batch. :param change: Change. :raises RuntimeError: Reaction triggered during history redo/undo. # Check for inconsistent changes triggered during reactions while executing. # We should always be in a batch (the application should make sure of it). # Add to batch. # type: (Batch) -> None Exit batch context. :param batch: Batch. | 2.031887 | 2 |
train.py | xingruiy/DH3D | 125 | 6614465 | <filename>train.py
# Copyright (C) 2020 <NAME> (Technical University of Munich)
# For more information see <https://vision.in.tum.de/research/vslam/dh3d>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from tensorpack import *
from core.datasets import *
from core.model import DH3D
from core.configs import ConfigFactory
from core.utils import log_config_info
def get_data(cfg={}):
if cfg.training_local:
return get_train_local_selfpair(cfg)
else:
return get_train_global_triplet(cfg)
def get_config(model, config):
callbacks = [
PeriodicTrigger(ModelSaver(max_to_keep=100), every_k_steps=config.savemodel_every_k_steps),
ModelSaver(),
]
train_configs = TrainConfig(
model=model(config),
dataflow=get_data(cfg=config),
callbacks=callbacks,
extra_callbacks=[
MovingAverageSummary(),
MergeAllSummaries(),
ProgressBar(['total_cost']),
RunUpdateOps()
],
max_epoch=50,
)
if config.loadpath is not None:
train_configs.session_init = SmartInit(configs.loadpath, ignore_mismatch=True)
return train_configs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.', default='0')
parser.add_argument('--logdir', help='log directory', default='logs')
parser.add_argument('--logact', type=str, help='action to log directory', default='k')
parser.add_argument('--cfg', type=str,default='basic_config' )
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
configs = ConfigFactory(args.cfg).getconfig()
logger.set_logger_dir(args.logdir, action=args.logact)
log_config_info(configs)
train_configs = get_config(DH3D, configs)
# lauch training
launch_train_with_config(train_configs, SimpleTrainer())
| <filename>train.py
# Copyright (C) 2020 <NAME> (Technical University of Munich)
# For more information see <https://vision.in.tum.de/research/vslam/dh3d>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from tensorpack import *
from core.datasets import *
from core.model import DH3D
from core.configs import ConfigFactory
from core.utils import log_config_info
def get_data(cfg={}):
if cfg.training_local:
return get_train_local_selfpair(cfg)
else:
return get_train_global_triplet(cfg)
def get_config(model, config):
callbacks = [
PeriodicTrigger(ModelSaver(max_to_keep=100), every_k_steps=config.savemodel_every_k_steps),
ModelSaver(),
]
train_configs = TrainConfig(
model=model(config),
dataflow=get_data(cfg=config),
callbacks=callbacks,
extra_callbacks=[
MovingAverageSummary(),
MergeAllSummaries(),
ProgressBar(['total_cost']),
RunUpdateOps()
],
max_epoch=50,
)
if config.loadpath is not None:
train_configs.session_init = SmartInit(configs.loadpath, ignore_mismatch=True)
return train_configs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.', default='0')
parser.add_argument('--logdir', help='log directory', default='logs')
parser.add_argument('--logact', type=str, help='action to log directory', default='k')
parser.add_argument('--cfg', type=str,default='basic_config' )
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
configs = ConfigFactory(args.cfg).getconfig()
logger.set_logger_dir(args.logdir, action=args.logact)
log_config_info(configs)
train_configs = get_config(DH3D, configs)
# lauch training
launch_train_with_config(train_configs, SimpleTrainer())
| en | 0.842742 | # Copyright (C) 2020 <NAME> (Technical University of Munich) # For more information see <https://vision.in.tum.de/research/vslam/dh3d> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # lauch training | 1.967346 | 2 |
CustomNum.py | L0RD-ZER0/CustomNum | 0 | 6614466 | """
MIT License (MIT)
Copyright (c) 2021-Present L0RD-ZER0
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import Any, Iterable, Callable, Union, Mapping
from types import MappingProxyType
from collections.abc import Mapping as Map
__all__ = [
'CustomNum',
'CustomNumMeta',
'FormatNotMatched'
]
class FormatNotMatched(Exception):
def __init__(self, format_used, raw_member) -> None:
self.format_used = format_used
self.raw_member = raw_member
self.message = "Format and Raw-Member Value does not match"
super().__init__(self.message)
def __repr__(self) -> str:
return f"Format: {self.format_used} | Raw Member: {self.raw_member} -> {self.message}"
def __is_dunder__(name: str): return (len(name) > 5) and (name[:2] == name [-2:] == '__') and name[2] != "_" and name[-3] != '_'
def __is_sunder__(name: str): return (len(name) > 3) and (name[:1] == name [-1:] == '_') and name[1] != "_" and name[-2] != '_'
class CustomNumMeta(type):
def __call__(self, value, *args: Any, create = False, format: dict = None, operator_field_field: str = None, **kwargs: Any):
# Rough python equivalent of how type.__call__ would look as per my understanding
# obj = self.__new__(*args, **kwargs) # Making instance of self by calling it's `__new__` method
# if obj is not None and isinstance(obj, self) and hasattr(obj, '__init__'): # Checking if our returned instance isn't none and calling `__init__` on it if `__init__` exists
# init_return = obj.__init__(*args, **kwargs) # Calling `__init__` here
# if init_return is not None: # raising error if `__init__` returns something
# raise TypeError("__init__() should return None, not '{}'".format(type(init_return)))
# return obj # Returning the object created
pass
def __new__(
cls, # `__new__` is a static-method by default so it's value is equal to `CustomNumMeta` || This is usually refered-to as `metacls`
name: str, # Name of the class which is formed by `CustomNumMeta`, here it's `'CustomNum'` (a string) || This is usually refered to `cls`
bases: tuple, # Tuple of all the classes it 'inherits' from, is a tuple containing different classes to inherit from
namespace: dict, # The stuff we defined inside the class, also refered to as `classdict` sometimes, is a dictionary
format: Union[Mapping, Callable] = None, # Custom kwarg we can pass during creation of class
operator_field: str = None, # Field for applying `greater-than', 'less-than', and 'equal-to' operations if format is given
ignore: list = None, # Ignore to ignore given values so they function like they would do in any regular class
*args, **kwargs # To keep out the extra stuff from interfering, is not used anwhere
):
# namespace.setdefault("__ignore__", []).append('__ignore__') # adding a key called ignore if it does not exists. After that, adding '__ignore__' to the list associated with __ignore__ key in the dict
# ignore = namespace['__ignore__'] # Grabbing the ignore list and storing it in a variable
# TODO: Add __ignore__ ; __format__ ; __operator_field__ implimentation
if format and operator_field and (operator_field not in format.keys()): # Validation for operator-field being in format
raise KeyError(f"'{operator_field}' was not found in format")
new_namespace = namespace.copy()
raw_members = {}
for k in namespace:
if not (__is_sunder__(k) or __is_dunder__(k) or callable(namespace[k]) or ((k in ignore) if ignore else None)):
new_namespace.pop(k, None)
# checking keys and removing all Qualified Names from namespace of new class
for base in reversed(bases):
if isinstance(base, cls):
raw_members.update(dict(base.__members__))
# Inheriting member values from parents if they are an instance of CustomNumMeta
raw_members.update({k:namespace[k] for k in namespace if k not in new_namespace})
# updating in raw_members for qualified values defined in class's namespace
self = super().__new__(cls, name, bases, new_namespace) # Creating the CustomNum class
# Making a new class of type `cls`, name `name`, which inherits from all classes in `bases`
# and which contains all things defined in `new_namespace`
member_map = {}
for raw_member_key in raw_members:
raw_member = raw_members[raw_member_key]
if format is not None:
if callable(format): # If callable, set value equal to returned value from format
member = format(raw_member)
elif isinstance(format, Map) and isinstance(raw_member, Map):
try:
assert format.keys() == raw_member.keys()
member = object.__new__(self)
super(self, member).__setattr__('__name__', raw_member_key)
for k in format:
assert isinstance(raw_member[k], format[k]) # Checking here is only on first level
super(self, member).__setattr__(k, raw_member[k])
except AssertionError:
raise FormatNotMatched(format, raw_member)
else:
raise FormatNotMatched(format, raw_member)
else: # If format not none, add as is
member = raw_member
member_map[raw_member_key] = member
super().__setattr__(self, raw_member_key, member)
# Format matching and applying before setting it as a class attribute
super().__setattr__(self, '__members__', MappingProxyType(member_map))
super().__setattr__(self, '__raw_members__', MappingProxyType(raw_members))
super().__setattr__(self, '__format_used__', format if format else None)
super().__setattr__(self, '__operator_field__', operator_field if format and operator_field else None)
if self is not None and isinstance(self, cls) and hasattr(self, '__init__'): # Checking if our returned instance isn't none and calling `__init__` on it if `__init__` exists
init_return = self.__init__(name, bases, namespace, format=format, ignore=ignore, *args, **kwargs) # Calling `__init__` here
if init_return is not None: # raising error if `__init__` returns something
raise TypeError("__init__() should return `None`, not '{}'".format(init_return.__class__))
return self # Returning the object created
# def __init__(self, name, bases, namespace, *args, **kwargs) -> None:
# pass
def __bool__(self): # Defines how it's conversion to a boolean will work and whether it'll be `True` or `False`
"""
Classes always return `True`
"""
return True
def __contains__(self, obj) -> bool: # Determine if an object is contained inside this bj using `in` or similar methods
"""
Check if an object is in member-values
"""
return (obj in self.__members__.values())
def __getattr__(self, name: str) -> Any: # Get an attribute from the CustomNum Class
"""
Returns the member with a matching name
"""
if __is_dunder__(name):
raise AttributeError(name)
try:
return self.__members__[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name: str, value: Any) -> None: # Method called for setting an attribute
"""
Block attempts to reassign members
"""
if name in self.__members__:
raise AttributeError("Can not re-assign members.")
return super().__setattr__(name, value)
def __delattr__(self, name: str) -> None: # Method called for deleting an attribute
"""
Block attempts to delete members
"""
if name in self.__members__:
raise AttributeError("Can not detele members.")
return super().__delattr__(name)
def __dir__(self) -> Iterable[str]: # Returns all available attributes of an object
return ['__class__', '__doc__', '__members__', '__raw_members__', '__module__', '__qualname__', '__format_used__', '__operator_field__', *self.__members__.keys()]
def __getitem__(self, name): # method used for item lookup of an object by `obj[name]`
"""
Returns a member associated with name, else raises `KeyError`
"""
return self.__members__[name]
def __setitem__(self, name, value): # Method defining reassignment of an item
"""
Blocks and raises error for reassigning items
"""
raise ValueError("Can not reassign items")
def __delitem__(self, name): # Method defining how deletion of an item works
"""
Blocks and raises error for deleting items
"""
raise ValueError("Can not delete items")
def __iter__(self): # Fefines how iteration of this object world work, returns a generator object
"""
Returns an iterator of member names for iteration,
similar to how a dict would work
"""
return (key for key in self.__members__.keys())
def __len__(self) -> int: # Defines the result of `len(obj)`
return len(self.__members__)
def __repr__(self) -> str: # Defines how an object would be represented
return f"<CustomNum: '{self.__name__}'>"
def __str__(self) -> str: # Defines how string conversion of an object (`str(obj)`) would work
return repr(self)
def __reversed__(self): # Defines the reverse iterator for class using `reversed`
"""
Return member names in a reversed order
"""
return reversed(self.__members__.keys())
class CustomNum(metaclass=CustomNumMeta):
def __repr__(self) -> str:
return "<{self.__class__.__name__}: '{self.__name__}'>"
def __dir__(self) -> Iterable[str]:
return ['__class__', '__doc__', '__module__'] + (list(self.__class__.__format_used__.keys()) if self.__class__.__format_used__ else [])
def __str__(self) -> str:
return self.__name__
def __hash__(self) -> int: # Result of hashing the object
return hash(self.__name__)
def __gt__(self, other): # Defining Greater-Than
if isinstance(other, self.__class__) and self.__class__.__format_used__ and self.__class__.__operator_field__: # Both have same class, class has non-None format_used and operator_field
return getattr(self, self.__class__.__operator_field__) > getattr(other, self.__class__.__operator_field__)
raise NotImplementedError(f"Operation is not supported between operands of type '{type(self)}' and '{type(other)}'")
def __gt__(self, other): # Defining Less-Than
if isinstance(other, self.__class__) and self.__class__.__format_used__ and self.__class__.__operator_field__:
return getattr(self, self.__class__.__operator_field__) < getattr(other, self.__class__.__operator_field__)
raise NotImplementedError(f"Operation is not supported between operands of type '{type(self)}' and '{type(other)}'")
def __gt__(self, other): # Defining Equal-To
if isinstance(other, self.__class__) and self.__class__.__format_used__ and self.__class__.__operator_field__:
return getattr(self, self.__class__.__operator_field__) == getattr(other, self.__class__.__operator_field__)
raise NotImplementedError(f"Operation is not supported between operands of type '{type(self)}' and '{type(other)}'") | """
MIT License (MIT)
Copyright (c) 2021-Present L0RD-ZER0
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import Any, Iterable, Callable, Union, Mapping
from types import MappingProxyType
from collections.abc import Mapping as Map
__all__ = [
'CustomNum',
'CustomNumMeta',
'FormatNotMatched'
]
class FormatNotMatched(Exception):
def __init__(self, format_used, raw_member) -> None:
self.format_used = format_used
self.raw_member = raw_member
self.message = "Format and Raw-Member Value does not match"
super().__init__(self.message)
def __repr__(self) -> str:
return f"Format: {self.format_used} | Raw Member: {self.raw_member} -> {self.message}"
def __is_dunder__(name: str): return (len(name) > 5) and (name[:2] == name [-2:] == '__') and name[2] != "_" and name[-3] != '_'
def __is_sunder__(name: str): return (len(name) > 3) and (name[:1] == name [-1:] == '_') and name[1] != "_" and name[-2] != '_'
class CustomNumMeta(type):
def __call__(self, value, *args: Any, create = False, format: dict = None, operator_field_field: str = None, **kwargs: Any):
# Rough python equivalent of how type.__call__ would look as per my understanding
# obj = self.__new__(*args, **kwargs) # Making instance of self by calling it's `__new__` method
# if obj is not None and isinstance(obj, self) and hasattr(obj, '__init__'): # Checking if our returned instance isn't none and calling `__init__` on it if `__init__` exists
# init_return = obj.__init__(*args, **kwargs) # Calling `__init__` here
# if init_return is not None: # raising error if `__init__` returns something
# raise TypeError("__init__() should return None, not '{}'".format(type(init_return)))
# return obj # Returning the object created
pass
def __new__(
cls, # `__new__` is a static-method by default so it's value is equal to `CustomNumMeta` || This is usually refered-to as `metacls`
name: str, # Name of the class which is formed by `CustomNumMeta`, here it's `'CustomNum'` (a string) || This is usually refered to `cls`
bases: tuple, # Tuple of all the classes it 'inherits' from, is a tuple containing different classes to inherit from
namespace: dict, # The stuff we defined inside the class, also refered to as `classdict` sometimes, is a dictionary
format: Union[Mapping, Callable] = None, # Custom kwarg we can pass during creation of class
operator_field: str = None, # Field for applying `greater-than', 'less-than', and 'equal-to' operations if format is given
ignore: list = None, # Ignore to ignore given values so they function like they would do in any regular class
*args, **kwargs # To keep out the extra stuff from interfering, is not used anwhere
):
# namespace.setdefault("__ignore__", []).append('__ignore__') # adding a key called ignore if it does not exists. After that, adding '__ignore__' to the list associated with __ignore__ key in the dict
# ignore = namespace['__ignore__'] # Grabbing the ignore list and storing it in a variable
# TODO: Add __ignore__ ; __format__ ; __operator_field__ implimentation
if format and operator_field and (operator_field not in format.keys()): # Validation for operator-field being in format
raise KeyError(f"'{operator_field}' was not found in format")
new_namespace = namespace.copy()
raw_members = {}
for k in namespace:
if not (__is_sunder__(k) or __is_dunder__(k) or callable(namespace[k]) or ((k in ignore) if ignore else None)):
new_namespace.pop(k, None)
# checking keys and removing all Qualified Names from namespace of new class
for base in reversed(bases):
if isinstance(base, cls):
raw_members.update(dict(base.__members__))
# Inheriting member values from parents if they are an instance of CustomNumMeta
raw_members.update({k:namespace[k] for k in namespace if k not in new_namespace})
# updating in raw_members for qualified values defined in class's namespace
self = super().__new__(cls, name, bases, new_namespace) # Creating the CustomNum class
# Making a new class of type `cls`, name `name`, which inherits from all classes in `bases`
# and which contains all things defined in `new_namespace`
member_map = {}
for raw_member_key in raw_members:
raw_member = raw_members[raw_member_key]
if format is not None:
if callable(format): # If callable, set value equal to returned value from format
member = format(raw_member)
elif isinstance(format, Map) and isinstance(raw_member, Map):
try:
assert format.keys() == raw_member.keys()
member = object.__new__(self)
super(self, member).__setattr__('__name__', raw_member_key)
for k in format:
assert isinstance(raw_member[k], format[k]) # Checking here is only on first level
super(self, member).__setattr__(k, raw_member[k])
except AssertionError:
raise FormatNotMatched(format, raw_member)
else:
raise FormatNotMatched(format, raw_member)
else: # If format not none, add as is
member = raw_member
member_map[raw_member_key] = member
super().__setattr__(self, raw_member_key, member)
# Format matching and applying before setting it as a class attribute
super().__setattr__(self, '__members__', MappingProxyType(member_map))
super().__setattr__(self, '__raw_members__', MappingProxyType(raw_members))
super().__setattr__(self, '__format_used__', format if format else None)
super().__setattr__(self, '__operator_field__', operator_field if format and operator_field else None)
if self is not None and isinstance(self, cls) and hasattr(self, '__init__'): # Checking if our returned instance isn't none and calling `__init__` on it if `__init__` exists
init_return = self.__init__(name, bases, namespace, format=format, ignore=ignore, *args, **kwargs) # Calling `__init__` here
if init_return is not None: # raising error if `__init__` returns something
raise TypeError("__init__() should return `None`, not '{}'".format(init_return.__class__))
return self # Returning the object created
# def __init__(self, name, bases, namespace, *args, **kwargs) -> None:
# pass
def __bool__(self): # Defines how it's conversion to a boolean will work and whether it'll be `True` or `False`
"""
Classes always return `True`
"""
return True
def __contains__(self, obj) -> bool: # Determine if an object is contained inside this bj using `in` or similar methods
"""
Check if an object is in member-values
"""
return (obj in self.__members__.values())
def __getattr__(self, name: str) -> Any: # Get an attribute from the CustomNum Class
"""
Returns the member with a matching name
"""
if __is_dunder__(name):
raise AttributeError(name)
try:
return self.__members__[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name: str, value: Any) -> None: # Method called for setting an attribute
"""
Block attempts to reassign members
"""
if name in self.__members__:
raise AttributeError("Can not re-assign members.")
return super().__setattr__(name, value)
def __delattr__(self, name: str) -> None: # Method called for deleting an attribute
"""
Block attempts to delete members
"""
if name in self.__members__:
raise AttributeError("Can not detele members.")
return super().__delattr__(name)
def __dir__(self) -> Iterable[str]: # Returns all available attributes of an object
return ['__class__', '__doc__', '__members__', '__raw_members__', '__module__', '__qualname__', '__format_used__', '__operator_field__', *self.__members__.keys()]
def __getitem__(self, name): # method used for item lookup of an object by `obj[name]`
"""
Returns a member associated with name, else raises `KeyError`
"""
return self.__members__[name]
def __setitem__(self, name, value): # Method defining reassignment of an item
"""
Blocks and raises error for reassigning items
"""
raise ValueError("Can not reassign items")
def __delitem__(self, name): # Method defining how deletion of an item works
"""
Blocks and raises error for deleting items
"""
raise ValueError("Can not delete items")
def __iter__(self): # Fefines how iteration of this object world work, returns a generator object
"""
Returns an iterator of member names for iteration,
similar to how a dict would work
"""
return (key for key in self.__members__.keys())
def __len__(self) -> int: # Defines the result of `len(obj)`
return len(self.__members__)
def __repr__(self) -> str: # Defines how an object would be represented
return f"<CustomNum: '{self.__name__}'>"
def __str__(self) -> str: # Defines how string conversion of an object (`str(obj)`) would work
return repr(self)
def __reversed__(self): # Defines the reverse iterator for class using `reversed`
"""
Return member names in a reversed order
"""
return reversed(self.__members__.keys())
class CustomNum(metaclass=CustomNumMeta):
def __repr__(self) -> str:
return "<{self.__class__.__name__}: '{self.__name__}'>"
def __dir__(self) -> Iterable[str]:
return ['__class__', '__doc__', '__module__'] + (list(self.__class__.__format_used__.keys()) if self.__class__.__format_used__ else [])
def __str__(self) -> str:
return self.__name__
def __hash__(self) -> int: # Result of hashing the object
return hash(self.__name__)
def __gt__(self, other): # Defining Greater-Than
if isinstance(other, self.__class__) and self.__class__.__format_used__ and self.__class__.__operator_field__: # Both have same class, class has non-None format_used and operator_field
return getattr(self, self.__class__.__operator_field__) > getattr(other, self.__class__.__operator_field__)
raise NotImplementedError(f"Operation is not supported between operands of type '{type(self)}' and '{type(other)}'")
def __gt__(self, other): # Defining Less-Than
if isinstance(other, self.__class__) and self.__class__.__format_used__ and self.__class__.__operator_field__:
return getattr(self, self.__class__.__operator_field__) < getattr(other, self.__class__.__operator_field__)
raise NotImplementedError(f"Operation is not supported between operands of type '{type(self)}' and '{type(other)}'")
def __gt__(self, other): # Defining Equal-To
if isinstance(other, self.__class__) and self.__class__.__format_used__ and self.__class__.__operator_field__:
return getattr(self, self.__class__.__operator_field__) == getattr(other, self.__class__.__operator_field__)
raise NotImplementedError(f"Operation is not supported between operands of type '{type(self)}' and '{type(other)}'") | en | 0.791423 | MIT License (MIT)
Copyright (c) 2021-Present L0RD-ZER0
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. # Rough python equivalent of how type.__call__ would look as per my understanding # obj = self.__new__(*args, **kwargs) # Making instance of self by calling it's `__new__` method # if obj is not None and isinstance(obj, self) and hasattr(obj, '__init__'): # Checking if our returned instance isn't none and calling `__init__` on it if `__init__` exists # init_return = obj.__init__(*args, **kwargs) # Calling `__init__` here # if init_return is not None: # raising error if `__init__` returns something # raise TypeError("__init__() should return None, not '{}'".format(type(init_return))) # return obj # Returning the object created # `__new__` is a static-method by default so it's value is equal to `CustomNumMeta` || This is usually refered-to as `metacls` # Name of the class which is formed by `CustomNumMeta`, here it's `'CustomNum'` (a string) || This is usually refered to `cls` # Tuple of all the classes it 'inherits' from, is a tuple containing different classes to inherit from # The stuff we defined inside the class, also refered to as `classdict` sometimes, is a dictionary # Custom kwarg we can pass during creation of class # Field for applying `greater-than', 'less-than', and 'equal-to' operations if format is given # Ignore to ignore given values so they function like they would do in any regular class # To keep out the extra stuff from interfering, is not used anwhere # namespace.setdefault("__ignore__", []).append('__ignore__') # adding a key called ignore if it does not exists. After that, adding '__ignore__' to the list associated with __ignore__ key in the dict # ignore = namespace['__ignore__'] # Grabbing the ignore list and storing it in a variable # TODO: Add __ignore__ ; __format__ ; __operator_field__ implimentation # Validation for operator-field being in format # checking keys and removing all Qualified Names from namespace of new class # Inheriting member values from parents if they are an instance of CustomNumMeta # updating in raw_members for qualified values defined in class's namespace # Creating the CustomNum class # Making a new class of type `cls`, name `name`, which inherits from all classes in `bases` # and which contains all things defined in `new_namespace` # If callable, set value equal to returned value from format # Checking here is only on first level # If format not none, add as is # Format matching and applying before setting it as a class attribute # Checking if our returned instance isn't none and calling `__init__` on it if `__init__` exists # Calling `__init__` here # raising error if `__init__` returns something # Returning the object created # def __init__(self, name, bases, namespace, *args, **kwargs) -> None: # pass # Defines how it's conversion to a boolean will work and whether it'll be `True` or `False` Classes always return `True` # Determine if an object is contained inside this bj using `in` or similar methods Check if an object is in member-values # Get an attribute from the CustomNum Class Returns the member with a matching name # Method called for setting an attribute Block attempts to reassign members # Method called for deleting an attribute Block attempts to delete members # Returns all available attributes of an object # method used for item lookup of an object by `obj[name]` Returns a member associated with name, else raises `KeyError` # Method defining reassignment of an item Blocks and raises error for reassigning items # Method defining how deletion of an item works Blocks and raises error for deleting items # Fefines how iteration of this object world work, returns a generator object Returns an iterator of member names for iteration,
similar to how a dict would work # Defines the result of `len(obj)` # Defines how an object would be represented # Defines how string conversion of an object (`str(obj)`) would work # Defines the reverse iterator for class using `reversed` Return member names in a reversed order # Result of hashing the object # Defining Greater-Than # Both have same class, class has non-None format_used and operator_field # Defining Less-Than # Defining Equal-To | 2.26518 | 2 |
access-control/user-role-can-be-modified-in-user-profile.py | brandonaltermatt/penetration-testing-scripts | 0 | 6614467 | """
https://portswigger.net/web-security/access-control/lab-user-role-can-be-modified-in-user-profile
"""
import sys
import requests
from bs4 import BeautifulSoup
site = sys.argv[1]
if 'https://' in site:
site = site.rstrip('/').lstrip('https://')
s = requests.Session()
login_url = f'https://{site}/login'
login_data = { 'password' : '<PASSWORD>', 'username' : 'wiener'}
resp = s.post(login_url, data=login_data)
change_url = f'https://{site}/my-account/change-email'
json_data = {'email' : '<EMAIL>', 'roleid': 2}
resp = s.post(change_url,json=json_data, allow_redirects = False)
url = f'https://{site}/admin'
resp = s.get(url)
soup = BeautifulSoup(resp.text,'html.parser')
carlos_delete_link = [link for link in soup.find_all('a') if 'carlos' in link.get('href')]
delete_uri = carlos_delete_link[0]['href']
s.get(f'https://{site}{delete_uri}')
| """
https://portswigger.net/web-security/access-control/lab-user-role-can-be-modified-in-user-profile
"""
import sys
import requests
from bs4 import BeautifulSoup
site = sys.argv[1]
if 'https://' in site:
site = site.rstrip('/').lstrip('https://')
s = requests.Session()
login_url = f'https://{site}/login'
login_data = { 'password' : '<PASSWORD>', 'username' : 'wiener'}
resp = s.post(login_url, data=login_data)
change_url = f'https://{site}/my-account/change-email'
json_data = {'email' : '<EMAIL>', 'roleid': 2}
resp = s.post(change_url,json=json_data, allow_redirects = False)
url = f'https://{site}/admin'
resp = s.get(url)
soup = BeautifulSoup(resp.text,'html.parser')
carlos_delete_link = [link for link in soup.find_all('a') if 'carlos' in link.get('href')]
delete_uri = carlos_delete_link[0]['href']
s.get(f'https://{site}{delete_uri}')
| en | 0.536437 | https://portswigger.net/web-security/access-control/lab-user-role-can-be-modified-in-user-profile | 2.672944 | 3 |
model.py | tr7200/CBNN_SEM_loss_convergence | 4 | 6614468 | """
2021 ASA SDSS paper code
Code structure follows DAG structure,
2 V-structures nested together with immoralities
"""
import os
import math
import random
import logging
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.keras.layers import Input, Lambda
from tensorflow.keras import Model
from tensorflow.keras.layers import concatenate
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from sklearn.model_selection import RepeatedKFold
from sklearn.preprocessing import MinMaxScaler
logging.disable(logging.WARNING)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
SEED = 1387
random.seed(SEED)
np.random.seed(SEED)
INDPENDENT_VARS = 'independent_variables.csv'
DEPENDENT_VAR = 'firm_perf_total.csv'
tf.reset_default_graph()
def main(independent_vars: str=INDEPENDENT_VARS,
dependent_var: str=DEPENDENT_VAR):
"""Training loop for main result - Figure 1"""
independent_variables = pd.read_csv(, sep=',', header=0).astype('float32')
firm_performance = pd.read_csv(, sep=',', header=0)
kfold = RepeatedKFold(n_splits=10, n_repeats=10, random_state=12345)
val_losses_df = pd.DataFrame()
losses_df = pd.DataFrame()
for i, (train_index, test_index) in enumerate(kfold.split(independent_variables)):
X_train, X_test = independent_variables.iloc[train_index,], independent_variables.iloc[test_index,]
y_train, y_test = firm_performance.iloc[train_index,], firm_performance.iloc[test_index,]
X_train = MinMaxScaler(copy=False).fit_transform(X_train)
X_train = pd.DataFrame(data=X_train)
SM = np.array(X_train.iloc[:,:9])
BD = np.array(X_train.iloc[:,21:])
AS = np.array(X_train.iloc[:,9:21])
sm_input = Input(shape=(9,))
bd_input = Input(shape=(39,))
as_input = Input(shape=(12,))
krnl_dvrgnc_fn = lambda q, p, _: tfp.distributions.kl_divergence(q, p) / (323 * 1.0)
# outer chevron
sm_bd_combined = concatenate([sm_input, bd_input])
sm_bd_combined_out = tfp.layers.DenseFlipout(48, activation='relu', kernel_divergence_fn=krnl_dvrgnc_fn)(sm_bd_combined)
as_and_sm_bd_combined = concatenate([sm_bd_combined_out, as_input])
as_and_sm_bd_combined_out = tfp.layers.DenseFlipout(3, activation='relu', kernel_divergence_fn=krnl_dvrgnc_fn)(as_and_sm_bd_combined)
V_struct_1_out = tfp.layers.DistributionLambda(lambda t: tfp.distributions.Normal(loc=25 + t[..., :1],
validate_args=True,
allow_nan_stats=False,
scale=2 + tf.math.softplus(0.01 * t[..., 1:])))(as_and_sm_bd_combined_out)
V_struct_1 = Model([sm_input, bd_input, as_input], V_struct_1_out)
V_struct_1_kl = sum(V_struct_1.losses)
negloglik_V_struct_1 = lambda y, p_y: -p_y.log_prob(y) + V_struct_1_kl
V_struct_1.compile(optimizer=tf.keras.optimizers.Adam(lr=0.01),
loss=negloglik_V_struct_1)
# inner chevron
V1_out = V_struct_1([sm_input, bd_input, as_input])
V1_SM_BD_combined = concatenate([V1_out, bd_input, sm_input])
V1_SM_BD_combined_out1 = tfp.layers.DenseFlipout(37, activation='relu', kernel_divergence_fn=krnl_dvrgnc_fn)(V1_SM_BD_combined)
V1_SM_BD_combined_out2 = tfp.layers.DenseFlipout(10, activation='relu', kernel_divergence_fn=krnl_dvrgnc_fn)(V1_SM_BD_combined_out1)
V2_out = tfp.layers.DistributionLambda(lambda t: tfp.distributions.Normal(loc=25 + t[..., :1],
validate_args=True,
allow_nan_stats=False,
scale=2 + tf.math.softplus(0.01 * t[..., 1:])))(V1_SM_BD_combined_out2)
V_struct_2 = Model([sm_input, bd_input, as_input], V2_out)
V_struct_2_kl = sum(V_struct_2.losses)
negloglik_V_struct_2 = lambda y, p_y: -p_y.log_prob(y) + V_struct_2_kl
losses = lambda y, p_y: negloglik_V_struct_1(y, p_y) + negloglik_V_struct_2(y, p_y)
V_struct_2.compile(optimizer=tf.keras.optimizers.Adam(lr=0.01),
loss=losses)
result = V_struct_2.fit([SM, BD, AS],
y_train,
epochs=50,
batch_size=4,
verbose=1,
validation_split=0.05)
if __name__ == '__main__':
result = main(independent_vars=INDEPENDENT_VARS,
dependent_var=DEPENDENT_VAR)
# ... plots
| """
2021 ASA SDSS paper code
Code structure follows DAG structure,
2 V-structures nested together with immoralities
"""
import os
import math
import random
import logging
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.keras.layers import Input, Lambda
from tensorflow.keras import Model
from tensorflow.keras.layers import concatenate
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from sklearn.model_selection import RepeatedKFold
from sklearn.preprocessing import MinMaxScaler
logging.disable(logging.WARNING)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
SEED = 1387
random.seed(SEED)
np.random.seed(SEED)
INDPENDENT_VARS = 'independent_variables.csv'
DEPENDENT_VAR = 'firm_perf_total.csv'
tf.reset_default_graph()
def main(independent_vars: str=INDEPENDENT_VARS,
dependent_var: str=DEPENDENT_VAR):
"""Training loop for main result - Figure 1"""
independent_variables = pd.read_csv(, sep=',', header=0).astype('float32')
firm_performance = pd.read_csv(, sep=',', header=0)
kfold = RepeatedKFold(n_splits=10, n_repeats=10, random_state=12345)
val_losses_df = pd.DataFrame()
losses_df = pd.DataFrame()
for i, (train_index, test_index) in enumerate(kfold.split(independent_variables)):
X_train, X_test = independent_variables.iloc[train_index,], independent_variables.iloc[test_index,]
y_train, y_test = firm_performance.iloc[train_index,], firm_performance.iloc[test_index,]
X_train = MinMaxScaler(copy=False).fit_transform(X_train)
X_train = pd.DataFrame(data=X_train)
SM = np.array(X_train.iloc[:,:9])
BD = np.array(X_train.iloc[:,21:])
AS = np.array(X_train.iloc[:,9:21])
sm_input = Input(shape=(9,))
bd_input = Input(shape=(39,))
as_input = Input(shape=(12,))
krnl_dvrgnc_fn = lambda q, p, _: tfp.distributions.kl_divergence(q, p) / (323 * 1.0)
# outer chevron
sm_bd_combined = concatenate([sm_input, bd_input])
sm_bd_combined_out = tfp.layers.DenseFlipout(48, activation='relu', kernel_divergence_fn=krnl_dvrgnc_fn)(sm_bd_combined)
as_and_sm_bd_combined = concatenate([sm_bd_combined_out, as_input])
as_and_sm_bd_combined_out = tfp.layers.DenseFlipout(3, activation='relu', kernel_divergence_fn=krnl_dvrgnc_fn)(as_and_sm_bd_combined)
V_struct_1_out = tfp.layers.DistributionLambda(lambda t: tfp.distributions.Normal(loc=25 + t[..., :1],
validate_args=True,
allow_nan_stats=False,
scale=2 + tf.math.softplus(0.01 * t[..., 1:])))(as_and_sm_bd_combined_out)
V_struct_1 = Model([sm_input, bd_input, as_input], V_struct_1_out)
V_struct_1_kl = sum(V_struct_1.losses)
negloglik_V_struct_1 = lambda y, p_y: -p_y.log_prob(y) + V_struct_1_kl
V_struct_1.compile(optimizer=tf.keras.optimizers.Adam(lr=0.01),
loss=negloglik_V_struct_1)
# inner chevron
V1_out = V_struct_1([sm_input, bd_input, as_input])
V1_SM_BD_combined = concatenate([V1_out, bd_input, sm_input])
V1_SM_BD_combined_out1 = tfp.layers.DenseFlipout(37, activation='relu', kernel_divergence_fn=krnl_dvrgnc_fn)(V1_SM_BD_combined)
V1_SM_BD_combined_out2 = tfp.layers.DenseFlipout(10, activation='relu', kernel_divergence_fn=krnl_dvrgnc_fn)(V1_SM_BD_combined_out1)
V2_out = tfp.layers.DistributionLambda(lambda t: tfp.distributions.Normal(loc=25 + t[..., :1],
validate_args=True,
allow_nan_stats=False,
scale=2 + tf.math.softplus(0.01 * t[..., 1:])))(V1_SM_BD_combined_out2)
V_struct_2 = Model([sm_input, bd_input, as_input], V2_out)
V_struct_2_kl = sum(V_struct_2.losses)
negloglik_V_struct_2 = lambda y, p_y: -p_y.log_prob(y) + V_struct_2_kl
losses = lambda y, p_y: negloglik_V_struct_1(y, p_y) + negloglik_V_struct_2(y, p_y)
V_struct_2.compile(optimizer=tf.keras.optimizers.Adam(lr=0.01),
loss=losses)
result = V_struct_2.fit([SM, BD, AS],
y_train,
epochs=50,
batch_size=4,
verbose=1,
validation_split=0.05)
if __name__ == '__main__':
result = main(independent_vars=INDEPENDENT_VARS,
dependent_var=DEPENDENT_VAR)
# ... plots
| en | 0.713003 | 2021 ASA SDSS paper code Code structure follows DAG structure, 2 V-structures nested together with immoralities Training loop for main result - Figure 1 # outer chevron # inner chevron # ... plots | 2.058738 | 2 |
basic_algorithm/fft/fft.py | Quanfita/ImageProcessing | 0 | 6614469 | <filename>basic_algorithm/fft/fft.py
import cv2
import numpy as np
def np_fft(img):
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
fimg = np.log(np.abs(fshift))
fmax, fmin = np.max(fimg),np.min(fimg)
fimg = (fimg - fmin) / (fmax - fmin)
return fshift, fimg*255
def np_fft_inv(fshift):
ishift = np.fft.ifftshift(fshift)
iimg = np.fft.ifft2(ishift)
iimg = np.abs(iimg)
return iimg
def cv_fft(img):
dft = cv2.dft(np.float32(img), flags = cv2.DFT_COMPLEX_OUTPUT)
dft_shift = np.fft.fftshift(dft)
fimg = np.log(cv2.magnitude(dft_shift[:,:,0], dft_shift[:,:,1]))
fmax, fmin = np.max(fimg),np.min(fimg)
fimg = (fimg - fmin) / (fmax - fmin)
return dft_shift, fimg*255
def cv_fft_inv(dftshift):
ishift = np.fft.ifftshift(dftshift)
iimg = cv2.idft(ishift)
res = cv2.magnitude(iimg[:,:,0], iimg[:,:,1])
fmax, fmin = np.max(res),np.min(res)
res = (res - fmin) / (fmax - fmin)
return res * 255
if __name__ == '__main__':
img = cv2.imread('test.jpg', 0)
fshift, fft = np_fft(img)
cv2.imwrite('np_fft.png',fft)
cv2.imwrite('np_fft_inv.png',np_fft_inv(fshift))
fshift, fft = cv_fft(img)
cv2.imwrite('cv_fft.png',fft)
cv2.imwrite('cv_fft_inv.png',cv_fft_inv(fshift))
| <filename>basic_algorithm/fft/fft.py
import cv2
import numpy as np
def np_fft(img):
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
fimg = np.log(np.abs(fshift))
fmax, fmin = np.max(fimg),np.min(fimg)
fimg = (fimg - fmin) / (fmax - fmin)
return fshift, fimg*255
def np_fft_inv(fshift):
ishift = np.fft.ifftshift(fshift)
iimg = np.fft.ifft2(ishift)
iimg = np.abs(iimg)
return iimg
def cv_fft(img):
dft = cv2.dft(np.float32(img), flags = cv2.DFT_COMPLEX_OUTPUT)
dft_shift = np.fft.fftshift(dft)
fimg = np.log(cv2.magnitude(dft_shift[:,:,0], dft_shift[:,:,1]))
fmax, fmin = np.max(fimg),np.min(fimg)
fimg = (fimg - fmin) / (fmax - fmin)
return dft_shift, fimg*255
def cv_fft_inv(dftshift):
ishift = np.fft.ifftshift(dftshift)
iimg = cv2.idft(ishift)
res = cv2.magnitude(iimg[:,:,0], iimg[:,:,1])
fmax, fmin = np.max(res),np.min(res)
res = (res - fmin) / (fmax - fmin)
return res * 255
if __name__ == '__main__':
img = cv2.imread('test.jpg', 0)
fshift, fft = np_fft(img)
cv2.imwrite('np_fft.png',fft)
cv2.imwrite('np_fft_inv.png',np_fft_inv(fshift))
fshift, fft = cv_fft(img)
cv2.imwrite('cv_fft.png',fft)
cv2.imwrite('cv_fft_inv.png',cv_fft_inv(fshift))
| none | 1 | 2.84146 | 3 | |
tests/StellarEvol/test_StellarEvol.py | jrlivesey/vplanet | 0 | 6614470 | <reponame>jrlivesey/vplanet
import astropy.units as u
import pytest
from benchmark import Benchmark, benchmark
@benchmark(
{
"log.initial.system.Age": {"value": 6.311520e13, "unit": u.sec},
"log.initial.system.Time": {"value": 0.000000, "unit": u.sec},
"log.initial.system.TotAngMom": {
"value": 5.357909e43,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.system.TotEnergy": {"value": -1.192378e41, "unit": u.erg},
"log.initial.system.PotEnergy": {"value": -2.556201e39, "unit": u.Joule},
"log.initial.system.KinEnergy": {"value": 4.054947e37, "unit": u.Joule},
"log.initial.system.DeltaTime": {"value": 0.000000, "unit": u.sec},
"log.initial.a.Mass": {"value": 1.988416e29, "unit": u.kg},
"log.initial.a.Radius": {"value": 97.114438, "unit": u.Rearth},
"log.initial.a.RadGyra": {"value": 0.448345},
"log.initial.a.RotAngMom": {
"value": 1.115191e42,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.a.RotVel": {"value": 4.504445e04, "unit": u.m / u.sec},
"log.initial.a.BodyType": {"value": 0.000000},
"log.initial.a.RotRate": {"value": 7.272205e-05, "unit": 1 / u.sec},
"log.initial.a.RotPer": {"value": 1.000000, "unit": u.day},
"log.initial.a.Density": {"value": 199.752981, "unit": u.kg / u.m ** 3},
"log.initial.a.HZLimitDryRunaway": {"value": 3.200490e10, "unit": u.m},
"log.initial.a.HZLimRecVenus": {"value": 1.360577e11, "unit": u.m},
"log.initial.a.HZLimRunaway": {"value": 1.790817e11, "unit": u.m},
"log.initial.a.HZLimMoistGreenhouse": {"value": 1.800268e11, "unit": u.m},
"log.initial.a.HZLimMaxGreenhouse": {"value": 3.452485e11, "unit": u.m},
"log.initial.a.HZLimEarlyMars": {"value": 3.765288e11, "unit": u.m},
"log.initial.a.Instellation": {"value": -1.000000, "unit": u.kg / u.sec ** 3},
"log.initial.a.CriticalSemiMajorAxis": {"value": -1.000000, "unit": u.m},
"log.initial.a.LXUVTot": {"value": 2.136736e22, "unit": u.kg / u.sec ** 3},
"log.initial.a.LostEnergy": {"value": 5.562685e-309, "unit": u.Joule},
"log.initial.a.LostAngMom": {
"value": 5.562685e-309,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.a.Luminosity": {"value": 0.055557, "unit": u.LSUN},
"log.initial.a.LXUVStellar": {"value": 2.136736e22, "unit": u.W},
"log.initial.a.Temperature": {"value": 2971.232396, "unit": u.K},
"log.initial.a.LXUVFrac": {"value": 0.001000},
"log.initial.a.RossbyNumber": {"value": 0.014575},
"log.initial.a.DRotPerDtStellar": {"value": 4.420158e-10},
"log.initial.b.Mass": {"value": 1.988416e30, "unit": u.kg},
"log.initial.b.Radius": {"value": 209.259428, "unit": u.Rearth},
"log.initial.b.RadGyra": {"value": 0.451302},
"log.initial.b.RotAngMom": {
"value": 5.246390e43,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.b.RotVel": {"value": 9.706049e04, "unit": u.m / u.sec},
"log.initial.b.BodyType": {"value": 0.000000},
"log.initial.b.RotRate": {"value": 7.272205e-05, "unit": 1 / u.sec},
"log.initial.b.RotPer": {"value": 1.000000, "unit": u.day},
"log.initial.b.Density": {"value": 199.659310, "unit": u.kg / u.m ** 3},
"log.initial.b.HZLimitDryRunaway": {"value": 3.200490e10, "unit": u.m},
"log.initial.b.HZLimRecVenus": {"value": 1.360577e11, "unit": u.m},
"log.initial.b.HZLimRunaway": {"value": 1.790817e11, "unit": u.m},
"log.initial.b.HZLimMoistGreenhouse": {"value": 1.800268e11, "unit": u.m},
"log.initial.b.HZLimMaxGreenhouse": {"value": 3.452485e11, "unit": u.m},
"log.initial.b.HZLimEarlyMars": {"value": 3.765288e11, "unit": u.m},
"log.initial.b.Instellation": {"value": 75.978415, "unit": u.kg / u.sec ** 3},
"log.initial.b.CriticalSemiMajorAxis": {"value": -1.000000, "unit": u.m},
"log.initial.b.LXUVTot": {"value": 4.556110e23, "unit": u.kg / u.sec ** 3},
"log.initial.b.LostEnergy": {"value": 5.562685e-309, "unit": u.Joule},
"log.initial.b.LostAngMom": {
"value": 5.562685e-309,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.b.Luminosity": {"value": 1.184636, "unit": u.LSUN},
"log.initial.b.LXUVStellar": {"value": 4.556110e23, "unit": u.W},
"log.initial.b.Temperature": {"value": 4349.796199, "unit": u.K},
"log.initial.b.LXUVFrac": {"value": 0.001000},
"log.initial.b.RossbyNumber": {"value": 0.029572},
"log.initial.b.DRotPerDtStellar": {"value": -4.686689e-10},
"log.final.system.Age": {"value": 3.218875e15, "unit": u.sec, "rtol": 1e-4},
"log.final.system.Time": {"value": 3.155760e15, "unit": u.sec, "rtol": 1e-4},
"log.final.system.TotAngMom": {
"value": 5.362484e43,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.system.TotEnergy": {
"value": -1.191823e41,
"unit": u.erg,
"rtol": 1e-4,
},
"log.final.system.PotEnergy": {
"value": -1.234478e40,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.system.KinEnergy": {
"value": 2.087487e37,
"unit": u.Joule,
"rtol": 1e-4,
},
# "log.final.system.DeltaTime": {"value": 2.677436e+10, "unit": u.sec, "rtol": 1e-4},
"log.final.a.Mass": {"value": 1.988416e29, "unit": u.kg, "rtol": 1e-4},
"log.final.a.Radius": {"value": 20.109235, "unit": u.Rearth, "rtol": 1e-4},
"log.final.a.RadGyra": {"value": 0.464900, "rtol": 1e-4},
"log.final.a.RotAngMom": {
"value": 1.718013e41,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.a.RotVel": {"value": 3.116837e04, "unit": u.m / u.sec, "rtol": 1e-4},
"log.final.a.BodyType": {"value": 0.000000, "rtol": 1e-4},
"log.final.a.RotRate": {"value": 0.000243, "unit": 1 / u.sec, "rtol": 1e-4},
"log.final.a.RotPer": {"value": 0.299253, "unit": u.day, "rtol": 1e-4},
"log.final.a.Density": {
"value": 2.249873e04,
"unit": u.kg / u.m ** 3,
"rtol": 1e-4,
},
"log.final.a.HZLimitDryRunaway": {
"value": 6.713518e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.a.HZLimRecVenus": {"value": 1.015610e11, "unit": u.m, "rtol": 1e-4},
"log.final.a.HZLimRunaway": {"value": 1.336981e11, "unit": u.m, "rtol": 1e-4},
"log.final.a.HZLimMoistGreenhouse": {
"value": 1.343821e11,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.a.HZLimMaxGreenhouse": {
"value": 2.575080e11,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.a.HZLimEarlyMars": {"value": 2.808388e11, "unit": u.m, "rtol": 1e-4},
"log.final.a.Instellation": {
"value": -1.000000,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.a.CriticalSemiMajorAxis": {
"value": -1.000000,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.a.LXUVTot": {
"value": 9.175718e20,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.a.LostEnergy": {"value": 9.808344e39, "unit": u.Joule, "rtol": 1e-4},
"log.final.a.LostAngMom": {
"value": 9.438649e41,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.a.Luminosity": {"value": 0.002445, "unit": u.LSUN, "rtol": 1e-4},
"log.final.a.LXUVStellar": {"value": 9.175718e20, "unit": u.W, "rtol": 1e-4},
"log.final.a.Temperature": {"value": 2992.329951, "unit": u.K, "rtol": 1e-4},
"log.final.a.LXUVFrac": {"value": 0.000976, "rtol": 1e-4},
"log.final.a.RossbyNumber": {"value": 0.004409, "rtol": 1e-4},
"log.final.a.DRotPerDtStellar": {"value": -3.016858e-12, "rtol": 1e-4},
"log.final.b.Mass": {"value": 1.988416e30, "unit": u.kg, "rtol": 1e-4},
"log.final.b.Radius": {"value": 98.456448, "unit": u.Rearth, "rtol": 1e-4},
"log.final.b.RadGyra": {"value": 0.298250, "rtol": 1e-4},
"log.final.b.RotAngMom": {
"value": 1.649165e42,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.b.RotVel": {"value": 1.484777e04, "unit": u.m / u.sec, "rtol": 1e-4},
"log.final.b.BodyType": {"value": 0.000000, "rtol": 1e-4},
"log.final.b.RotRate": {"value": 2.364427e-05, "unit": 1 / u.sec, "rtol": 1e-4},
"log.final.b.RotPer": {"value": 3.075674, "unit": u.day, "rtol": 1e-4},
"log.final.b.Density": {
"value": 1916.956147,
"unit": u.kg / u.m ** 3,
"rtol": 1e-4,
},
"log.final.b.HZLimitDryRunaway": {
"value": 6.713518e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.b.HZLimRecVenus": {"value": 1.015610e11, "unit": u.m, "rtol": 1e-4},
"log.final.b.HZLimRunaway": {"value": 1.336981e11, "unit": u.m, "rtol": 1e-4},
"log.final.b.HZLimMoistGreenhouse": {
"value": 1.343821e11,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.b.HZLimMaxGreenhouse": {
"value": 2.575080e11,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.b.HZLimEarlyMars": {"value": 2.808388e11, "unit": u.m, "rtol": 1e-4},
"log.final.b.Instellation": {
"value": 3.343163,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.b.CriticalSemiMajorAxis": {
"value": -1.000000,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.b.LXUVTot": {
"value": 2.586461e23,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.b.LostEnergy": {"value": 1.354496e41, "unit": u.Joule, "rtol": 1e-4},
"log.final.b.LostAngMom": {
"value": 5.086001e43,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.b.Luminosity": {"value": 0.689088, "unit": u.LSUN, "rtol": 1e-4},
"log.final.b.LXUVStellar": {"value": 2.586461e23, "unit": u.W, "rtol": 1e-4},
"log.final.b.Temperature": {"value": 5539.190297, "unit": u.K, "rtol": 1e-4},
"log.final.b.LXUVFrac": {"value": 0.000976, "rtol": 1e-4},
"log.final.b.RossbyNumber": {"value": 0.187127, "rtol": 1e-4},
"log.final.b.DRotPerDtStellar": {"value": 2.479928e-10, "rtol": 1e-4},
}
)
class TestStellarEvol(Benchmark):
pass
| import astropy.units as u
import pytest
from benchmark import Benchmark, benchmark
@benchmark(
{
"log.initial.system.Age": {"value": 6.311520e13, "unit": u.sec},
"log.initial.system.Time": {"value": 0.000000, "unit": u.sec},
"log.initial.system.TotAngMom": {
"value": 5.357909e43,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.system.TotEnergy": {"value": -1.192378e41, "unit": u.erg},
"log.initial.system.PotEnergy": {"value": -2.556201e39, "unit": u.Joule},
"log.initial.system.KinEnergy": {"value": 4.054947e37, "unit": u.Joule},
"log.initial.system.DeltaTime": {"value": 0.000000, "unit": u.sec},
"log.initial.a.Mass": {"value": 1.988416e29, "unit": u.kg},
"log.initial.a.Radius": {"value": 97.114438, "unit": u.Rearth},
"log.initial.a.RadGyra": {"value": 0.448345},
"log.initial.a.RotAngMom": {
"value": 1.115191e42,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.a.RotVel": {"value": 4.504445e04, "unit": u.m / u.sec},
"log.initial.a.BodyType": {"value": 0.000000},
"log.initial.a.RotRate": {"value": 7.272205e-05, "unit": 1 / u.sec},
"log.initial.a.RotPer": {"value": 1.000000, "unit": u.day},
"log.initial.a.Density": {"value": 199.752981, "unit": u.kg / u.m ** 3},
"log.initial.a.HZLimitDryRunaway": {"value": 3.200490e10, "unit": u.m},
"log.initial.a.HZLimRecVenus": {"value": 1.360577e11, "unit": u.m},
"log.initial.a.HZLimRunaway": {"value": 1.790817e11, "unit": u.m},
"log.initial.a.HZLimMoistGreenhouse": {"value": 1.800268e11, "unit": u.m},
"log.initial.a.HZLimMaxGreenhouse": {"value": 3.452485e11, "unit": u.m},
"log.initial.a.HZLimEarlyMars": {"value": 3.765288e11, "unit": u.m},
"log.initial.a.Instellation": {"value": -1.000000, "unit": u.kg / u.sec ** 3},
"log.initial.a.CriticalSemiMajorAxis": {"value": -1.000000, "unit": u.m},
"log.initial.a.LXUVTot": {"value": 2.136736e22, "unit": u.kg / u.sec ** 3},
"log.initial.a.LostEnergy": {"value": 5.562685e-309, "unit": u.Joule},
"log.initial.a.LostAngMom": {
"value": 5.562685e-309,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.a.Luminosity": {"value": 0.055557, "unit": u.LSUN},
"log.initial.a.LXUVStellar": {"value": 2.136736e22, "unit": u.W},
"log.initial.a.Temperature": {"value": 2971.232396, "unit": u.K},
"log.initial.a.LXUVFrac": {"value": 0.001000},
"log.initial.a.RossbyNumber": {"value": 0.014575},
"log.initial.a.DRotPerDtStellar": {"value": 4.420158e-10},
"log.initial.b.Mass": {"value": 1.988416e30, "unit": u.kg},
"log.initial.b.Radius": {"value": 209.259428, "unit": u.Rearth},
"log.initial.b.RadGyra": {"value": 0.451302},
"log.initial.b.RotAngMom": {
"value": 5.246390e43,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.b.RotVel": {"value": 9.706049e04, "unit": u.m / u.sec},
"log.initial.b.BodyType": {"value": 0.000000},
"log.initial.b.RotRate": {"value": 7.272205e-05, "unit": 1 / u.sec},
"log.initial.b.RotPer": {"value": 1.000000, "unit": u.day},
"log.initial.b.Density": {"value": 199.659310, "unit": u.kg / u.m ** 3},
"log.initial.b.HZLimitDryRunaway": {"value": 3.200490e10, "unit": u.m},
"log.initial.b.HZLimRecVenus": {"value": 1.360577e11, "unit": u.m},
"log.initial.b.HZLimRunaway": {"value": 1.790817e11, "unit": u.m},
"log.initial.b.HZLimMoistGreenhouse": {"value": 1.800268e11, "unit": u.m},
"log.initial.b.HZLimMaxGreenhouse": {"value": 3.452485e11, "unit": u.m},
"log.initial.b.HZLimEarlyMars": {"value": 3.765288e11, "unit": u.m},
"log.initial.b.Instellation": {"value": 75.978415, "unit": u.kg / u.sec ** 3},
"log.initial.b.CriticalSemiMajorAxis": {"value": -1.000000, "unit": u.m},
"log.initial.b.LXUVTot": {"value": 4.556110e23, "unit": u.kg / u.sec ** 3},
"log.initial.b.LostEnergy": {"value": 5.562685e-309, "unit": u.Joule},
"log.initial.b.LostAngMom": {
"value": 5.562685e-309,
"unit": (u.kg * u.m ** 2) / u.sec,
},
"log.initial.b.Luminosity": {"value": 1.184636, "unit": u.LSUN},
"log.initial.b.LXUVStellar": {"value": 4.556110e23, "unit": u.W},
"log.initial.b.Temperature": {"value": 4349.796199, "unit": u.K},
"log.initial.b.LXUVFrac": {"value": 0.001000},
"log.initial.b.RossbyNumber": {"value": 0.029572},
"log.initial.b.DRotPerDtStellar": {"value": -4.686689e-10},
"log.final.system.Age": {"value": 3.218875e15, "unit": u.sec, "rtol": 1e-4},
"log.final.system.Time": {"value": 3.155760e15, "unit": u.sec, "rtol": 1e-4},
"log.final.system.TotAngMom": {
"value": 5.362484e43,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.system.TotEnergy": {
"value": -1.191823e41,
"unit": u.erg,
"rtol": 1e-4,
},
"log.final.system.PotEnergy": {
"value": -1.234478e40,
"unit": u.Joule,
"rtol": 1e-4,
},
"log.final.system.KinEnergy": {
"value": 2.087487e37,
"unit": u.Joule,
"rtol": 1e-4,
},
# "log.final.system.DeltaTime": {"value": 2.677436e+10, "unit": u.sec, "rtol": 1e-4},
"log.final.a.Mass": {"value": 1.988416e29, "unit": u.kg, "rtol": 1e-4},
"log.final.a.Radius": {"value": 20.109235, "unit": u.Rearth, "rtol": 1e-4},
"log.final.a.RadGyra": {"value": 0.464900, "rtol": 1e-4},
"log.final.a.RotAngMom": {
"value": 1.718013e41,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.a.RotVel": {"value": 3.116837e04, "unit": u.m / u.sec, "rtol": 1e-4},
"log.final.a.BodyType": {"value": 0.000000, "rtol": 1e-4},
"log.final.a.RotRate": {"value": 0.000243, "unit": 1 / u.sec, "rtol": 1e-4},
"log.final.a.RotPer": {"value": 0.299253, "unit": u.day, "rtol": 1e-4},
"log.final.a.Density": {
"value": 2.249873e04,
"unit": u.kg / u.m ** 3,
"rtol": 1e-4,
},
"log.final.a.HZLimitDryRunaway": {
"value": 6.713518e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.a.HZLimRecVenus": {"value": 1.015610e11, "unit": u.m, "rtol": 1e-4},
"log.final.a.HZLimRunaway": {"value": 1.336981e11, "unit": u.m, "rtol": 1e-4},
"log.final.a.HZLimMoistGreenhouse": {
"value": 1.343821e11,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.a.HZLimMaxGreenhouse": {
"value": 2.575080e11,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.a.HZLimEarlyMars": {"value": 2.808388e11, "unit": u.m, "rtol": 1e-4},
"log.final.a.Instellation": {
"value": -1.000000,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.a.CriticalSemiMajorAxis": {
"value": -1.000000,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.a.LXUVTot": {
"value": 9.175718e20,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.a.LostEnergy": {"value": 9.808344e39, "unit": u.Joule, "rtol": 1e-4},
"log.final.a.LostAngMom": {
"value": 9.438649e41,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.a.Luminosity": {"value": 0.002445, "unit": u.LSUN, "rtol": 1e-4},
"log.final.a.LXUVStellar": {"value": 9.175718e20, "unit": u.W, "rtol": 1e-4},
"log.final.a.Temperature": {"value": 2992.329951, "unit": u.K, "rtol": 1e-4},
"log.final.a.LXUVFrac": {"value": 0.000976, "rtol": 1e-4},
"log.final.a.RossbyNumber": {"value": 0.004409, "rtol": 1e-4},
"log.final.a.DRotPerDtStellar": {"value": -3.016858e-12, "rtol": 1e-4},
"log.final.b.Mass": {"value": 1.988416e30, "unit": u.kg, "rtol": 1e-4},
"log.final.b.Radius": {"value": 98.456448, "unit": u.Rearth, "rtol": 1e-4},
"log.final.b.RadGyra": {"value": 0.298250, "rtol": 1e-4},
"log.final.b.RotAngMom": {
"value": 1.649165e42,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.b.RotVel": {"value": 1.484777e04, "unit": u.m / u.sec, "rtol": 1e-4},
"log.final.b.BodyType": {"value": 0.000000, "rtol": 1e-4},
"log.final.b.RotRate": {"value": 2.364427e-05, "unit": 1 / u.sec, "rtol": 1e-4},
"log.final.b.RotPer": {"value": 3.075674, "unit": u.day, "rtol": 1e-4},
"log.final.b.Density": {
"value": 1916.956147,
"unit": u.kg / u.m ** 3,
"rtol": 1e-4,
},
"log.final.b.HZLimitDryRunaway": {
"value": 6.713518e09,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.b.HZLimRecVenus": {"value": 1.015610e11, "unit": u.m, "rtol": 1e-4},
"log.final.b.HZLimRunaway": {"value": 1.336981e11, "unit": u.m, "rtol": 1e-4},
"log.final.b.HZLimMoistGreenhouse": {
"value": 1.343821e11,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.b.HZLimMaxGreenhouse": {
"value": 2.575080e11,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.b.HZLimEarlyMars": {"value": 2.808388e11, "unit": u.m, "rtol": 1e-4},
"log.final.b.Instellation": {
"value": 3.343163,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.b.CriticalSemiMajorAxis": {
"value": -1.000000,
"unit": u.m,
"rtol": 1e-4,
},
"log.final.b.LXUVTot": {
"value": 2.586461e23,
"unit": u.kg / u.sec ** 3,
"rtol": 1e-4,
},
"log.final.b.LostEnergy": {"value": 1.354496e41, "unit": u.Joule, "rtol": 1e-4},
"log.final.b.LostAngMom": {
"value": 5.086001e43,
"unit": (u.kg * u.m ** 2) / u.sec,
"rtol": 1e-4,
},
"log.final.b.Luminosity": {"value": 0.689088, "unit": u.LSUN, "rtol": 1e-4},
"log.final.b.LXUVStellar": {"value": 2.586461e23, "unit": u.W, "rtol": 1e-4},
"log.final.b.Temperature": {"value": 5539.190297, "unit": u.K, "rtol": 1e-4},
"log.final.b.LXUVFrac": {"value": 0.000976, "rtol": 1e-4},
"log.final.b.RossbyNumber": {"value": 0.187127, "rtol": 1e-4},
"log.final.b.DRotPerDtStellar": {"value": 2.479928e-10, "rtol": 1e-4},
}
)
class TestStellarEvol(Benchmark):
pass | en | 0.114029 | # "log.final.system.DeltaTime": {"value": 2.677436e+10, "unit": u.sec, "rtol": 1e-4}, | 2.161219 | 2 |
plugins/custom/testdata/connections.py | cgs3238/amonagent | 56 | 6614471 | print "connections.active:100|gauge"
print "connections.error:500|gauge"
| print "connections.active:100|gauge"
print "connections.error:500|gauge"
| none | 1 | 1.207482 | 1 | |
mypythontools/property.py | Malachov/mybuildtools | 0 | 6614472 | """Module contains MyProperty class that edit normal python property to add new features.
There is default setter, it's possible to auto init values on class init and values in setter can be
validated.
It's possible to set function as a value and it's evaluated during call.
Example of how can it be used is in module config.
Examples:
=========
>>> class MyClass:
... # Init all default values of defined properties
... def __init__(self):
... for j in vars(type(self)).values():
... if type(j) is MyProperty:
... setattr(self, j.private_name, j.init_function)
...
... @MyProperty(int) # New value will be validated whether it's int
... def var() -> int: # This is for type hints in IDE. Self is not necessary, but better for code inspection tools to avoid errors.
... '''
... Type:
... int
...
... Default:
... 123
...
... This is docstrings (also visible in IDE, because not defined dynamically).'''
...
... return 123 # This is initial value that can be edited.
...
... @MyProperty() # Even if you don't need any params, use empty brackets
... def var2(self):
... return 111
...
>>> myobject = MyClass()
>>> myobject.var
123
>>> myobject.var = 124
>>> myobject.var
124
"""
from __future__ import annotations
import types as types_lib
from typing import Any
from .misc import validate
import mylogging
class MyProperty(property):
"""Python property on steroids. Check module docstrings for more info."""
def __init__(
self,
types=None,
options=None,
fget=None,
fset=None,
doc=None,
) -> None:
if isinstance(types, types_lib.FunctionType):
raise SyntaxError(
mylogging.return_str("@MyProperty decorator has to be called (parentheses at the end).")
)
self.fget_new = fget if fget else self.default_fget
self.fset_new = fset if fset else self.default_fset
self.__doc__ = doc
self.types = types
self.options = options
def __call__(self, init_function) -> MyProperty:
self.init_function = init_function
self.__doc__ = init_function.__doc__
return self
def default_fget(self, object) -> Any:
return getattr(object, self.private_name)
def default_fset(self, object, content) -> None:
setattr(object, self.private_name, content)
def __set_name__(self, _, name):
self.public_name = name
self.private_name = "_" + name
def __get__(self, object, objtype=None):
# If getting MyProperty class, not object, return MyProperty itself
if not object:
return self
# Expected value can be nominal value or function, that return that value
content = self.fget_new(object)
if callable(content):
if not len(content.__code__.co_varnames):
value = content()
else:
value = content(object)
else:
value = content
return value
def __set__(self, object, content):
# You can setup value or function, that return that value
if callable(content):
result = content(object)
else:
result = content
validate(
result,
self.types,
self.options,
self.public_name,
)
# Method defined can be pass as static method
try:
self.fset_new(object, content)
except TypeError:
self.fset_new(self, object, content)
| """Module contains MyProperty class that edit normal python property to add new features.
There is default setter, it's possible to auto init values on class init and values in setter can be
validated.
It's possible to set function as a value and it's evaluated during call.
Example of how can it be used is in module config.
Examples:
=========
>>> class MyClass:
... # Init all default values of defined properties
... def __init__(self):
... for j in vars(type(self)).values():
... if type(j) is MyProperty:
... setattr(self, j.private_name, j.init_function)
...
... @MyProperty(int) # New value will be validated whether it's int
... def var() -> int: # This is for type hints in IDE. Self is not necessary, but better for code inspection tools to avoid errors.
... '''
... Type:
... int
...
... Default:
... 123
...
... This is docstrings (also visible in IDE, because not defined dynamically).'''
...
... return 123 # This is initial value that can be edited.
...
... @MyProperty() # Even if you don't need any params, use empty brackets
... def var2(self):
... return 111
...
>>> myobject = MyClass()
>>> myobject.var
123
>>> myobject.var = 124
>>> myobject.var
124
"""
from __future__ import annotations
import types as types_lib
from typing import Any
from .misc import validate
import mylogging
class MyProperty(property):
"""Python property on steroids. Check module docstrings for more info."""
def __init__(
self,
types=None,
options=None,
fget=None,
fset=None,
doc=None,
) -> None:
if isinstance(types, types_lib.FunctionType):
raise SyntaxError(
mylogging.return_str("@MyProperty decorator has to be called (parentheses at the end).")
)
self.fget_new = fget if fget else self.default_fget
self.fset_new = fset if fset else self.default_fset
self.__doc__ = doc
self.types = types
self.options = options
def __call__(self, init_function) -> MyProperty:
self.init_function = init_function
self.__doc__ = init_function.__doc__
return self
def default_fget(self, object) -> Any:
return getattr(object, self.private_name)
def default_fset(self, object, content) -> None:
setattr(object, self.private_name, content)
def __set_name__(self, _, name):
self.public_name = name
self.private_name = "_" + name
def __get__(self, object, objtype=None):
# If getting MyProperty class, not object, return MyProperty itself
if not object:
return self
# Expected value can be nominal value or function, that return that value
content = self.fget_new(object)
if callable(content):
if not len(content.__code__.co_varnames):
value = content()
else:
value = content(object)
else:
value = content
return value
def __set__(self, object, content):
# You can setup value or function, that return that value
if callable(content):
result = content(object)
else:
result = content
validate(
result,
self.types,
self.options,
self.public_name,
)
# Method defined can be pass as static method
try:
self.fset_new(object, content)
except TypeError:
self.fset_new(self, object, content)
| en | 0.703152 | Module contains MyProperty class that edit normal python property to add new features. There is default setter, it's possible to auto init values on class init and values in setter can be validated. It's possible to set function as a value and it's evaluated during call. Example of how can it be used is in module config. Examples: ========= >>> class MyClass: ... # Init all default values of defined properties ... def __init__(self): ... for j in vars(type(self)).values(): ... if type(j) is MyProperty: ... setattr(self, j.private_name, j.init_function) ... ... @MyProperty(int) # New value will be validated whether it's int ... def var() -> int: # This is for type hints in IDE. Self is not necessary, but better for code inspection tools to avoid errors. ... ''' ... Type: ... int ... ... Default: ... 123 ... ... This is docstrings (also visible in IDE, because not defined dynamically).''' ... ... return 123 # This is initial value that can be edited. ... ... @MyProperty() # Even if you don't need any params, use empty brackets ... def var2(self): ... return 111 ... >>> myobject = MyClass() >>> myobject.var 123 >>> myobject.var = 124 >>> myobject.var 124 Python property on steroids. Check module docstrings for more info. # If getting MyProperty class, not object, return MyProperty itself # Expected value can be nominal value or function, that return that value # You can setup value or function, that return that value # Method defined can be pass as static method | 3.875272 | 4 |
Packs/Arcanna/Integrations/ArcannaAI/ArcannaAI.py | diCagri/content | 799 | 6614473 | <reponame>diCagri/content
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
from CommonServerUserPython import * # noqa: F401
import json
import urllib3
import traceback
import requests
from typing import Any, Dict
# Disable insecure warnings
urllib3.disable_warnings()
''' CLIENT CLASS '''
class Client:
""" Implements Arcanna API
"""
def __init__(self, api_key, base_url, verify=True, proxy=False, default_job_id=-1):
self.base_url = base_url
self.verify = verify
self.proxy = proxy
self.api_key = api_key
self.default_job_id = default_job_id
def get_headers(self):
""" Adds header
"""
headers = {
'accept': 'application/json',
'x-arcanna-api-key': self.api_key
}
return headers
def set_default_job_id(self, job_id):
self.default_job_id = job_id
def get_default_job_id(self):
return self.default_job_id
def test_arcanna(self):
url_suffix = 'api/v1/health'
raw_response = requests.get(url=self.base_url + url_suffix, headers=self.get_headers(), verify=self.verify)
return raw_response.json()
def list_jobs(self):
url_suffix = 'api/v1/jobs'
raw_response = requests.get(url=self.base_url + url_suffix, headers=self.get_headers(), verify=self.verify)
if raw_response.status_code != 200:
raise Exception(f"Error in API call [{raw_response.status_code}]. Reason: {raw_response.reason}")
return raw_response.json()
def send_raw_event(self, job_id, severity, title, raw_body):
url_suffix = 'api/v1/events/'
raw = json.loads(raw_body)
body = self.map_to_arcanna_raw_event(job_id, raw, severity, title)
raw_response = requests.post(url=self.base_url + url_suffix, headers=self.get_headers(), verify=self.verify,
json=body)
if raw_response.status_code != 201:
raise Exception(f"Error HttpCode={raw_response.status_code} text={raw_response.text}")
return raw_response.json()
def map_to_arcanna_raw_event(self, job_id, raw, severity, title):
body = {
"job_id": job_id,
"title": title,
"raw_body": raw
}
if severity is not None:
body["severity"] = severity
return body
def get_event_status(self, job_id, event_id):
url_suffix = f"api/v1/events/{job_id}/{event_id}"
raw_response = requests.get(url=self.base_url + url_suffix, headers=self.get_headers(), verify=self.verify)
if raw_response.status_code != 200:
raise Exception(f"Error HttpCode={raw_response.status_code}")
return raw_response.json()
def send_feedback(self, job_id, event_id, username, arcanna_label, closing_notes, indicators):
url_suffix = f"api/v1/events/{job_id}/{event_id}/feedback"
body = self.map_to_arcanna_label(arcanna_label, closing_notes, username)
if indicators:
body["indicators"] = json.loads(indicators)
raw_response = requests.put(url=self.base_url + url_suffix, headers=self.get_headers(), verify=self.verify,
json=body)
if raw_response.status_code != 200:
raise Exception(f"Arcanna Error HttpCode={raw_response.status_code} body={raw_response.text}")
return raw_response.json()
@staticmethod
def map_to_arcanna_label(arcanna_label, closing_notes, username):
body = {
"cortex_user": username,
"feedback": arcanna_label,
"closing_notes": closing_notes
}
return body
def send_bulk(self, job_id, events):
url_suffix = f"api/v1/bulk/{job_id}"
body = {
"count": len(events),
"events": events
}
raw_response = requests.post(url=self.base_url + url_suffix, headers=self.get_headers(), verify=self.verify,
json=body)
if raw_response.status_code != 201:
raise Exception(f"Arcanna Error HttpCode={raw_response.status_code} body={raw_response.text}")
return raw_response.json()
''' COMMAND FUNCTIONS '''
def test_module(client: Client, feature_mapping_field: str) -> str:
result = parse_mappings(feature_mapping_field)
if len(result) < 2:
return "Arcanna Mapping Error. Please check your feature_mapping field"
try:
response = client.test_arcanna()
demisto.info(f'test_module response={response}')
if not response["connected"]:
return "Authentication Error. Please check the API Key you provided."
else:
return "ok"
except DemistoException as e:
raise e
def get_jobs(client: Client) -> CommandResults:
result = client.list_jobs()
headers = ["job_id", "title", "data_type", "status"]
readable_output = tableToMarkdown(name="Arcanna Jobs", headers=headers, t=result)
outputs = {
'Arcanna.Jobs(val.job_id && val.job_id === obj.job_id)': createContext(result)
}
return CommandResults(
readable_output=readable_output,
outputs=outputs,
raw_response=result
)
def post_event(client: Client, args: Dict[str, Any]) -> CommandResults:
title = args.get("title")
job_id = args.get("job_id", None)
if not job_id:
job_id = client.get_default_job_id()
raw_payload = args.get("event_json")
severity = args.get("severity")
response = client.send_raw_event(job_id=job_id, severity=severity, title=title, raw_body=raw_payload)
readable_output = f'## {response}'
return CommandResults(
readable_output=readable_output,
outputs_prefix='Arcanna.Event',
outputs_key_field='event_id',
outputs=response
)
def get_event_status(client: Client, args: Dict[str, Any]) -> CommandResults:
job_id = args.get("job_id", None)
if not job_id:
job_id = client.get_default_job_id()
event_id = args.get("event_id")
response = client.get_event_status(job_id, event_id)
readable_output = f'## {response}'
return CommandResults(
readable_output=readable_output,
outputs_prefix='Arcanna.Event',
outputs_key_field='event_id',
outputs=response
)
def get_default_job_id(client: Client) -> CommandResults:
response = client.get_default_job_id()
readable_output = f'## {response}'
return CommandResults(
readable_output=readable_output,
outputs_prefix='Arcanna.Default_Job_Id',
outputs=response
)
def get_feedback_field(params: Dict[str, Any]) -> CommandResults:
response = params.get("closing_reason_field")
readable_output = f' ## Get feedback returned results: {response}'
return CommandResults(
readable_output=readable_output,
outputs_prefix='Arcanna.FeedbackField',
outputs=response
)
def set_default_job_id(client: Client, args: Dict[str, Any]) -> CommandResults:
job_id = args.get("job_id")
client.set_default_job_id(job_id)
return get_default_job_id(client)
''' MAIN FUNCTION '''
def send_event_feedback(client: Client, feature_mapping_field: str, args: Dict[str, Any]) -> CommandResults:
job_id = args.get("job_id", None)
if not job_id:
job_id = client.get_default_job_id()
event_id = args.get("event_id")
mappings = parse_mappings(feature_mapping_field)
username = args.get("username")
label = args.get("label")
closing_notes = args.get("closing_notes", "")
indicators = args.get("indicators", None)
arcanna_label = mappings.get(label, None)
if arcanna_label is None:
raise Exception(f"Error in arcanna-send-feedback.Wrong label={label}")
response = client.send_feedback(job_id, event_id, username, arcanna_label, closing_notes, indicators)
readable_output = f' ## Arcanna send event feedback results: {response}'
return CommandResults(
readable_output=readable_output,
outputs_prefix='Arcanna.Event',
outputs_key_field='feedback_status',
outputs=response
)
def send_bulk_events(client: Client, feature_mapping_field: str, args: Dict[str, Any]) -> CommandResults:
job_id = args.get("job_id")
events = argToList(args.get("events"))
mappings = parse_mappings(feature_mapping_field)
mapped_events = []
for event in events:
closing_status = event.get("closingReason")
closing_notes = event.get("closeNotes")
closing_user = event.get("closeUser")
arcanna_label = mappings.get(closing_status)
title = event.get("name")
severity = event.get("severity")
body = client.map_to_arcanna_raw_event(job_id, event, severity, title)
body["label"] = client.map_to_arcanna_label(arcanna_label, closing_notes, closing_user)
mapped_events.append(body)
response = client.send_bulk(job_id, mapped_events)
readable_output = f' ## Arcanna send bulk results: {response}'
return CommandResults(
readable_output=readable_output,
outputs_prefix='Arcanna.Bulk',
outputs_key_field='status',
outputs=response
)
def parse_mappings(mapping: str) -> dict:
result = {}
pairs = mapping.split(",")
for pair in pairs:
parts = pair.split("=")
if len(parts) != 2:
raise BaseException("Arcanna: Error while parsing mapping fields")
demisto_closing_reason = parts[0].strip().replace("\"", "")
arcanna_label = parts[1].strip().replace("\"", "")
result[demisto_closing_reason] = arcanna_label
return result
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
api_key = demisto.params().get('apikey')
# get the service API url
base_url = urljoin(demisto.params()['url'])
verify_certificate = not demisto.params().get('insecure', False)
feature_mapping = demisto.params().get('feature_mapping')
proxy = demisto.params().get('proxy', False)
default_job_id = demisto.params().get('default_job_id', -1)
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
api_key=api_key,
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
default_job_id=default_job_id
)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result_test = test_module(client, feature_mapping)
return_results(result_test)
elif demisto.command() == "arcanna-get-jobs":
result_get_jobs = get_jobs(client)
return_results(result_get_jobs)
elif demisto.command() == "arcanna-send-event":
result_send_event = post_event(client, demisto.args())
return_results(result_send_event)
elif demisto.command() == "arcanna-get-event-status":
result_get_event = get_event_status(client, demisto.args())
return_results(result_get_event)
elif demisto.command() == "arcanna-get-default-job-id":
result_get_default_id = get_default_job_id(client)
return_results(result_get_default_id)
elif demisto.command() == "arcanna-set-default-job-id":
result_set_default_id = set_default_job_id(client, demisto.args())
return_results(result_set_default_id)
elif demisto.command() == "arcanna-send-event-feedback":
result_send_feedback = send_event_feedback(client, feature_mapping, demisto.args())
return_results(result_send_feedback)
elif demisto.command() == "arcanna-send-bulk-events":
result_bulk = send_bulk_events(client, feature_mapping, demisto.args())
return_results(result_bulk)
elif demisto.command() == "arcanna-get-feedback-field":
result_feedback_field = get_feedback_field(demisto.params())
return_results(result_feedback_field)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
from CommonServerUserPython import * # noqa: F401
import json
import urllib3
import traceback
import requests
from typing import Any, Dict
# Disable insecure warnings
urllib3.disable_warnings()
''' CLIENT CLASS '''
class Client:
""" Implements Arcanna API
"""
def __init__(self, api_key, base_url, verify=True, proxy=False, default_job_id=-1):
self.base_url = base_url
self.verify = verify
self.proxy = proxy
self.api_key = api_key
self.default_job_id = default_job_id
def get_headers(self):
""" Adds header
"""
headers = {
'accept': 'application/json',
'x-arcanna-api-key': self.api_key
}
return headers
def set_default_job_id(self, job_id):
self.default_job_id = job_id
def get_default_job_id(self):
return self.default_job_id
def test_arcanna(self):
url_suffix = 'api/v1/health'
raw_response = requests.get(url=self.base_url + url_suffix, headers=self.get_headers(), verify=self.verify)
return raw_response.json()
def list_jobs(self):
url_suffix = 'api/v1/jobs'
raw_response = requests.get(url=self.base_url + url_suffix, headers=self.get_headers(), verify=self.verify)
if raw_response.status_code != 200:
raise Exception(f"Error in API call [{raw_response.status_code}]. Reason: {raw_response.reason}")
return raw_response.json()
def send_raw_event(self, job_id, severity, title, raw_body):
url_suffix = 'api/v1/events/'
raw = json.loads(raw_body)
body = self.map_to_arcanna_raw_event(job_id, raw, severity, title)
raw_response = requests.post(url=self.base_url + url_suffix, headers=self.get_headers(), verify=self.verify,
json=body)
if raw_response.status_code != 201:
raise Exception(f"Error HttpCode={raw_response.status_code} text={raw_response.text}")
return raw_response.json()
def map_to_arcanna_raw_event(self, job_id, raw, severity, title):
body = {
"job_id": job_id,
"title": title,
"raw_body": raw
}
if severity is not None:
body["severity"] = severity
return body
def get_event_status(self, job_id, event_id):
url_suffix = f"api/v1/events/{job_id}/{event_id}"
raw_response = requests.get(url=self.base_url + url_suffix, headers=self.get_headers(), verify=self.verify)
if raw_response.status_code != 200:
raise Exception(f"Error HttpCode={raw_response.status_code}")
return raw_response.json()
def send_feedback(self, job_id, event_id, username, arcanna_label, closing_notes, indicators):
url_suffix = f"api/v1/events/{job_id}/{event_id}/feedback"
body = self.map_to_arcanna_label(arcanna_label, closing_notes, username)
if indicators:
body["indicators"] = json.loads(indicators)
raw_response = requests.put(url=self.base_url + url_suffix, headers=self.get_headers(), verify=self.verify,
json=body)
if raw_response.status_code != 200:
raise Exception(f"Arcanna Error HttpCode={raw_response.status_code} body={raw_response.text}")
return raw_response.json()
@staticmethod
def map_to_arcanna_label(arcanna_label, closing_notes, username):
body = {
"cortex_user": username,
"feedback": arcanna_label,
"closing_notes": closing_notes
}
return body
def send_bulk(self, job_id, events):
url_suffix = f"api/v1/bulk/{job_id}"
body = {
"count": len(events),
"events": events
}
raw_response = requests.post(url=self.base_url + url_suffix, headers=self.get_headers(), verify=self.verify,
json=body)
if raw_response.status_code != 201:
raise Exception(f"Arcanna Error HttpCode={raw_response.status_code} body={raw_response.text}")
return raw_response.json()
''' COMMAND FUNCTIONS '''
def test_module(client: Client, feature_mapping_field: str) -> str:
result = parse_mappings(feature_mapping_field)
if len(result) < 2:
return "Arcanna Mapping Error. Please check your feature_mapping field"
try:
response = client.test_arcanna()
demisto.info(f'test_module response={response}')
if not response["connected"]:
return "Authentication Error. Please check the API Key you provided."
else:
return "ok"
except DemistoException as e:
raise e
def get_jobs(client: Client) -> CommandResults:
result = client.list_jobs()
headers = ["job_id", "title", "data_type", "status"]
readable_output = tableToMarkdown(name="Arcanna Jobs", headers=headers, t=result)
outputs = {
'Arcanna.Jobs(val.job_id && val.job_id === obj.job_id)': createContext(result)
}
return CommandResults(
readable_output=readable_output,
outputs=outputs,
raw_response=result
)
def post_event(client: Client, args: Dict[str, Any]) -> CommandResults:
title = args.get("title")
job_id = args.get("job_id", None)
if not job_id:
job_id = client.get_default_job_id()
raw_payload = args.get("event_json")
severity = args.get("severity")
response = client.send_raw_event(job_id=job_id, severity=severity, title=title, raw_body=raw_payload)
readable_output = f'## {response}'
return CommandResults(
readable_output=readable_output,
outputs_prefix='Arcanna.Event',
outputs_key_field='event_id',
outputs=response
)
def get_event_status(client: Client, args: Dict[str, Any]) -> CommandResults:
job_id = args.get("job_id", None)
if not job_id:
job_id = client.get_default_job_id()
event_id = args.get("event_id")
response = client.get_event_status(job_id, event_id)
readable_output = f'## {response}'
return CommandResults(
readable_output=readable_output,
outputs_prefix='Arcanna.Event',
outputs_key_field='event_id',
outputs=response
)
def get_default_job_id(client: Client) -> CommandResults:
response = client.get_default_job_id()
readable_output = f'## {response}'
return CommandResults(
readable_output=readable_output,
outputs_prefix='Arcanna.Default_Job_Id',
outputs=response
)
def get_feedback_field(params: Dict[str, Any]) -> CommandResults:
response = params.get("closing_reason_field")
readable_output = f' ## Get feedback returned results: {response}'
return CommandResults(
readable_output=readable_output,
outputs_prefix='Arcanna.FeedbackField',
outputs=response
)
def set_default_job_id(client: Client, args: Dict[str, Any]) -> CommandResults:
job_id = args.get("job_id")
client.set_default_job_id(job_id)
return get_default_job_id(client)
''' MAIN FUNCTION '''
def send_event_feedback(client: Client, feature_mapping_field: str, args: Dict[str, Any]) -> CommandResults:
job_id = args.get("job_id", None)
if not job_id:
job_id = client.get_default_job_id()
event_id = args.get("event_id")
mappings = parse_mappings(feature_mapping_field)
username = args.get("username")
label = args.get("label")
closing_notes = args.get("closing_notes", "")
indicators = args.get("indicators", None)
arcanna_label = mappings.get(label, None)
if arcanna_label is None:
raise Exception(f"Error in arcanna-send-feedback.Wrong label={label}")
response = client.send_feedback(job_id, event_id, username, arcanna_label, closing_notes, indicators)
readable_output = f' ## Arcanna send event feedback results: {response}'
return CommandResults(
readable_output=readable_output,
outputs_prefix='Arcanna.Event',
outputs_key_field='feedback_status',
outputs=response
)
def send_bulk_events(client: Client, feature_mapping_field: str, args: Dict[str, Any]) -> CommandResults:
job_id = args.get("job_id")
events = argToList(args.get("events"))
mappings = parse_mappings(feature_mapping_field)
mapped_events = []
for event in events:
closing_status = event.get("closingReason")
closing_notes = event.get("closeNotes")
closing_user = event.get("closeUser")
arcanna_label = mappings.get(closing_status)
title = event.get("name")
severity = event.get("severity")
body = client.map_to_arcanna_raw_event(job_id, event, severity, title)
body["label"] = client.map_to_arcanna_label(arcanna_label, closing_notes, closing_user)
mapped_events.append(body)
response = client.send_bulk(job_id, mapped_events)
readable_output = f' ## Arcanna send bulk results: {response}'
return CommandResults(
readable_output=readable_output,
outputs_prefix='Arcanna.Bulk',
outputs_key_field='status',
outputs=response
)
def parse_mappings(mapping: str) -> dict:
result = {}
pairs = mapping.split(",")
for pair in pairs:
parts = pair.split("=")
if len(parts) != 2:
raise BaseException("Arcanna: Error while parsing mapping fields")
demisto_closing_reason = parts[0].strip().replace("\"", "")
arcanna_label = parts[1].strip().replace("\"", "")
result[demisto_closing_reason] = arcanna_label
return result
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
api_key = demisto.params().get('apikey')
# get the service API url
base_url = urljoin(demisto.params()['url'])
verify_certificate = not demisto.params().get('insecure', False)
feature_mapping = demisto.params().get('feature_mapping')
proxy = demisto.params().get('proxy', False)
default_job_id = demisto.params().get('default_job_id', -1)
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
api_key=api_key,
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
default_job_id=default_job_id
)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result_test = test_module(client, feature_mapping)
return_results(result_test)
elif demisto.command() == "arcanna-get-jobs":
result_get_jobs = get_jobs(client)
return_results(result_get_jobs)
elif demisto.command() == "arcanna-send-event":
result_send_event = post_event(client, demisto.args())
return_results(result_send_event)
elif demisto.command() == "arcanna-get-event-status":
result_get_event = get_event_status(client, demisto.args())
return_results(result_get_event)
elif demisto.command() == "arcanna-get-default-job-id":
result_get_default_id = get_default_job_id(client)
return_results(result_get_default_id)
elif demisto.command() == "arcanna-set-default-job-id":
result_set_default_id = set_default_job_id(client, demisto.args())
return_results(result_set_default_id)
elif demisto.command() == "arcanna-send-event-feedback":
result_send_feedback = send_event_feedback(client, feature_mapping, demisto.args())
return_results(result_send_feedback)
elif demisto.command() == "arcanna-send-bulk-events":
result_bulk = send_bulk_events(client, feature_mapping, demisto.args())
return_results(result_bulk)
elif demisto.command() == "arcanna-get-feedback-field":
result_feedback_field = get_feedback_field(demisto.params())
return_results(result_feedback_field)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main() | en | 0.589347 | # noqa: F401 # noqa: F401 # noqa: F401 # Disable insecure warnings CLIENT CLASS Implements Arcanna API Adds header COMMAND FUNCTIONS # {response}' # {response}' # {response}' ## Get feedback returned results: {response}' MAIN FUNCTION ## Arcanna send event feedback results: {response}' ## Arcanna send bulk results: {response}' main function, parses params and runs command functions :return: :rtype: # get the service API url # This is the call made when pressing the integration Test button. # Log exceptions and return errors # print the traceback ENTRY POINT | 2.04459 | 2 |
tgtypes/models/input_contact_message_content.py | autogram/tgtypes | 0 | 6614474 | from __future__ import annotations
from typing import Optional
from .input_message_content import InputMessageContent
class InputContactMessageContent(InputMessageContent):
"""
Represents the content of a contact message to be sent as the result of an inline query.
Source: https://core.telegram.org/bots/api#inputcontactmessagecontent
"""
phone_number: str
"""Contact's phone number"""
first_name: str
"""Contact's first name"""
last_name: Optional[str] = None
"""Contact's last name"""
vcard: Optional[str] = None
"""Additional data about the contact in the form of a vCard, 0-2048 bytes"""
| from __future__ import annotations
from typing import Optional
from .input_message_content import InputMessageContent
class InputContactMessageContent(InputMessageContent):
"""
Represents the content of a contact message to be sent as the result of an inline query.
Source: https://core.telegram.org/bots/api#inputcontactmessagecontent
"""
phone_number: str
"""Contact's phone number"""
first_name: str
"""Contact's first name"""
last_name: Optional[str] = None
"""Contact's last name"""
vcard: Optional[str] = None
"""Additional data about the contact in the form of a vCard, 0-2048 bytes"""
| en | 0.744485 | Represents the content of a contact message to be sent as the result of an inline query. Source: https://core.telegram.org/bots/api#inputcontactmessagecontent Contact's phone number Contact's first name Contact's last name Additional data about the contact in the form of a vCard, 0-2048 bytes | 2.666493 | 3 |
src/utils/transactions.py | AdityaSidharta/saltedge_python | 0 | 6614475 | import os
import pandas as pd
from src.utils.directory import RAW_PATH
def get_accounts(year, month):
accounts = []
for filename in os.listdir(RAW_PATH):
if filename.endswith("_{}_{}.csv".format(year, month)):
index = filename.index("_{}_{}.csv".format(year, month))
account = filename[:index].split(" ")[0]
accounts.append(account)
return accounts
def load_transaction(year, month):
list_df = []
for filename in os.listdir(RAW_PATH):
if filename.endswith("_{}_{}.csv".format(year, month)):
list_df.append(pd.read_csv(os.path.join(RAW_PATH, filename)))
return pd.concat(list_df, axis=0, ignore_index=True)
def remove_accounts(input_df, accounts):
df = input_df.copy()
for account in accounts:
df = df[~df.description.apply(lambda x: account in x)]
return df.reset_index(drop=True)
def remove_keywords(input_df, keywords):
df = input_df.copy()
for keyword in keywords:
df = df[~df.description.apply(lambda x: keyword in x)]
return df.reset_index(drop=True)
def get_filename(account_name, year, month):
return "{}_{}_{}.csv".format(account_name, year, month)
def map_type(input_df):
df = input_df.copy()
df.loc[df["type"] == "INT", "true_category"] = "INTEREST"
df.loc[df["type"] == "ATINT", "true_category"] = "INTEREST"
df.loc[df["type"] == "AWL", "true_category"] = "CASH"
return df
def map_category(input_df, transaction_dict):
df = input_df.copy()
for key, items in transaction_dict.items():
true_category = key
categories = items["mapping"]
for category in categories:
df.loc[df["category"] == category, "true_category"] = true_category
return df
def map_description(input_df, transaction_dict):
df = input_df.copy()
for key, items in transaction_dict.items():
true_category = key
keywords = items["description"]
for keyword in keywords:
df.loc[df["description"].apply(lambda x: keyword in x), "true_category"] = true_category
return df
def get_emoji(input_df, transaction_dict):
df = input_df.copy()
df["emoji"] = ""
for key, items in transaction_dict.items():
true_category = key
emoji = items["emoji"]
df.loc[df["true_category"] == true_category, "emoji"] = emoji
return df
| import os
import pandas as pd
from src.utils.directory import RAW_PATH
def get_accounts(year, month):
accounts = []
for filename in os.listdir(RAW_PATH):
if filename.endswith("_{}_{}.csv".format(year, month)):
index = filename.index("_{}_{}.csv".format(year, month))
account = filename[:index].split(" ")[0]
accounts.append(account)
return accounts
def load_transaction(year, month):
list_df = []
for filename in os.listdir(RAW_PATH):
if filename.endswith("_{}_{}.csv".format(year, month)):
list_df.append(pd.read_csv(os.path.join(RAW_PATH, filename)))
return pd.concat(list_df, axis=0, ignore_index=True)
def remove_accounts(input_df, accounts):
df = input_df.copy()
for account in accounts:
df = df[~df.description.apply(lambda x: account in x)]
return df.reset_index(drop=True)
def remove_keywords(input_df, keywords):
df = input_df.copy()
for keyword in keywords:
df = df[~df.description.apply(lambda x: keyword in x)]
return df.reset_index(drop=True)
def get_filename(account_name, year, month):
return "{}_{}_{}.csv".format(account_name, year, month)
def map_type(input_df):
df = input_df.copy()
df.loc[df["type"] == "INT", "true_category"] = "INTEREST"
df.loc[df["type"] == "ATINT", "true_category"] = "INTEREST"
df.loc[df["type"] == "AWL", "true_category"] = "CASH"
return df
def map_category(input_df, transaction_dict):
df = input_df.copy()
for key, items in transaction_dict.items():
true_category = key
categories = items["mapping"]
for category in categories:
df.loc[df["category"] == category, "true_category"] = true_category
return df
def map_description(input_df, transaction_dict):
df = input_df.copy()
for key, items in transaction_dict.items():
true_category = key
keywords = items["description"]
for keyword in keywords:
df.loc[df["description"].apply(lambda x: keyword in x), "true_category"] = true_category
return df
def get_emoji(input_df, transaction_dict):
df = input_df.copy()
df["emoji"] = ""
for key, items in transaction_dict.items():
true_category = key
emoji = items["emoji"]
df.loc[df["true_category"] == true_category, "emoji"] = emoji
return df
| none | 1 | 3.064219 | 3 | |
polrev/offices/migrations/0006_alter_office_options.py | polrev-github/polrev-django | 1 | 6614476 | <gh_stars>1-10
# Generated by Django 3.2.13 on 2022-05-02 15:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('offices', '0005_office_number'),
]
operations = [
migrations.AlterModelOptions(
name='office',
options={'ordering': ['state_fips', 'number']},
),
]
| # Generated by Django 3.2.13 on 2022-05-02 15:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('offices', '0005_office_number'),
]
operations = [
migrations.AlterModelOptions(
name='office',
options={'ordering': ['state_fips', 'number']},
),
] | en | 0.81816 | # Generated by Django 3.2.13 on 2022-05-02 15:01 | 1.487206 | 1 |
snippets/dp/lcs.py | KATO-Hiro/Somen-Soupy | 1 | 6614477 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from typing import List
def lcs(a: List, b: List) -> int:
"""Get LCS (Longest Common Subsequence).
Args:
a: List of numbers.
b: List of numbers.
Returns:
LCS
Landau notation: O(a_count * b_count).
"""
n = len(a)
m = len(b)
dp = [[0 for _ in range(m + 1)] for _ in range(n + 1)]
for i, ai in enumerate(a):
for j, bj in enumerate(b):
if ai == bj:
dp[i + 1][j + 1] = dp[i][j] + 1
else:
dp[i + 1][j + 1] = max(dp[i + 1][j], dp[i][j + 1])
return dp[n][m]
| # -*- coding: utf-8 -*-
from typing import List
def lcs(a: List, b: List) -> int:
"""Get LCS (Longest Common Subsequence).
Args:
a: List of numbers.
b: List of numbers.
Returns:
LCS
Landau notation: O(a_count * b_count).
"""
n = len(a)
m = len(b)
dp = [[0 for _ in range(m + 1)] for _ in range(n + 1)]
for i, ai in enumerate(a):
for j, bj in enumerate(b):
if ai == bj:
dp[i + 1][j + 1] = dp[i][j] + 1
else:
dp[i + 1][j + 1] = max(dp[i + 1][j], dp[i][j + 1])
return dp[n][m] | en | 0.738919 | # -*- coding: utf-8 -*- Get LCS (Longest Common Subsequence). Args: a: List of numbers. b: List of numbers. Returns: LCS Landau notation: O(a_count * b_count). | 3.501848 | 4 |
bin/features/steps/testutil.py | raulpush/monitorizare-site | 0 | 6614478 | # -*- coding: UTF-8 -*-
"""
Test utility support.
"""
# @mark.test_support
# ----------------------------------------------------------------------------
# TEST SUPPORT:
# ----------------------------------------------------------------------------
class NamedNumber(object):
"""Map named numbers into numbers."""
MAP = {
"one": 1,
"two": 2,
"three": 3,
"four": 4,
"five": 5,
"six": 6,
}
@classmethod
def from_string(cls, named_number):
name = named_number.strip().lower()
return cls.MAP[name]
| # -*- coding: UTF-8 -*-
"""
Test utility support.
"""
# @mark.test_support
# ----------------------------------------------------------------------------
# TEST SUPPORT:
# ----------------------------------------------------------------------------
class NamedNumber(object):
"""Map named numbers into numbers."""
MAP = {
"one": 1,
"two": 2,
"three": 3,
"four": 4,
"five": 5,
"six": 6,
}
@classmethod
def from_string(cls, named_number):
name = named_number.strip().lower()
return cls.MAP[name]
| en | 0.306726 | # -*- coding: UTF-8 -*- Test utility support. # @mark.test_support # ---------------------------------------------------------------------------- # TEST SUPPORT: # ---------------------------------------------------------------------------- Map named numbers into numbers. | 3.054428 | 3 |
dojo/management/commands/sla_notifications.py | mtcolman/django-DefectDojo | 1,772 | 6614479 | from django.core.management.base import BaseCommand
from dojo.utils import sla_compute_and_notify
"""
This command will iterate over findings and send SLA notifications as appropriate
"""
class Command(BaseCommand):
help = 'Launch with no argument.'
def handle(self, *args, **options):
sla_compute_and_notify()
| from django.core.management.base import BaseCommand
from dojo.utils import sla_compute_and_notify
"""
This command will iterate over findings and send SLA notifications as appropriate
"""
class Command(BaseCommand):
help = 'Launch with no argument.'
def handle(self, *args, **options):
sla_compute_and_notify()
| en | 0.904862 | This command will iterate over findings and send SLA notifications as appropriate | 1.959669 | 2 |
setup.py | artcom-net/pytgram | 0 | 6614480 | import os
from setuptools import setup
with open(os.path.join('pytgram', '__init__.py'), 'r') as init_file:
init_data = {}
for line in init_file:
if line.startswith('__'):
meta, value = line.split('=')
init_data[meta.strip()] = value.strip().replace("'", '')
setup(
name='pytgram',
version=init_data['__version__'],
packages=['pytgram', 'tests'],
url='https://github.com/artcom-net/pytgram',
license=init_data['__license__'],
author=init_data['__author__'],
author_email=init_data['__email__'],
description='Library to create Telegram Bot based on Twisted',
long_description=open('README.rst').read(),
install_requires=open('requirements.txt').read().split(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Twisted',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Software Development :: Libraries',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers',
]
)
| import os
from setuptools import setup
with open(os.path.join('pytgram', '__init__.py'), 'r') as init_file:
init_data = {}
for line in init_file:
if line.startswith('__'):
meta, value = line.split('=')
init_data[meta.strip()] = value.strip().replace("'", '')
setup(
name='pytgram',
version=init_data['__version__'],
packages=['pytgram', 'tests'],
url='https://github.com/artcom-net/pytgram',
license=init_data['__license__'],
author=init_data['__author__'],
author_email=init_data['__email__'],
description='Library to create Telegram Bot based on Twisted',
long_description=open('README.rst').read(),
install_requires=open('requirements.txt').read().split(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Twisted',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Software Development :: Libraries',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers',
]
)
| none | 1 | 1.669329 | 2 | |
general/gather_all_data.py | j-lazo/lumen_segmentation | 0 | 6614481 | import os
import random
import shutil
def gather_all_data(source_folder, destination_folder, exceptions):
"""
This function gathers all data from different folders and put it all together in a single folder called "all"
:param source_folders:
:param destination_folder:
:param exceptions:
:return:
"""
folder_list = set(os.listdir(source_folder)) - set(exceptions)
folder_list = sorted([element for element in folder_list if
os.path.isdir(''.join([source_folder, element]))])
for folder in folder_list[:]:
print(folder)
files_path_images = "".join([source_folder, folder, '/image/'])
files_path_labels = "".join([source_folder, folder, '/label/'])
images_list = os.listdir(files_path_images)
labels_list = os.listdir(files_path_labels)
#image_subfolder = sorted([element for element in images_list if os.path.isdir(''.join([source_folder, files_path_images]))])
labels_subfolder = sorted([element for element in labels_list if
os.path.isdir(''.join([source_folder, files_path_labels]))])
if not(labels_subfolder):
destination_image_folder = "".join([destination_folder, 'image/'])
destination_label_folder = "".join([destination_folder, 'label/'])
if not (os.path.isdir(destination_image_folder)):
os.mkdir(destination_image_folder)
if not (os.path.isdir(destination_label_folder)):
os.mkdir(destination_label_folder)
for counter, image in enumerate(images_list[:]):
shutil.copy(files_path_images + image, destination_image_folder + image)
shutil.copy(files_path_labels + image[:-4] + '.png', destination_label_folder + image[:-4] + '.png')
else:
for sub_folder in labels_subfolder:
#2Do complete this option and the funciotn copy_images_and_label
copy_images_and_label(source_folder, destination_folder, sub_folder)
def copy_images_and_label(source_folder, destination_folder, folder=''):
"""
Copy tuples of images and labels in 1 step
:param original_folder:
:param destination_folder:
:return:
"""
source_folder = "".join([source_folder, '/', folder, '/'])
destination_folder = "".join([destination_folder, '/', folder, '/'])
files_path_images = "".join([source_folder, '/image/'])
files_path_labels = "".join([source_folder, '/label/'])
images_list = os.listdir(files_path_images)
labels_list = os.listdir(files_path_labels)
destination_image_folder = "".join([destination_folder, 'image/'])
destination_label_folder = "".join([destination_folder, 'label/'])
if not (os.path.isdir(destination_image_folder)):
os.mkdir(destination_image_folder)
if not (os.path.isdir(destination_label_folder)):
os.mkdir(destination_label_folder)
for counter, image in enumerate(images_list):
shutil.copy(files_path_images + image, destination_image_folder + image)
shutil.copy(files_path_labels + image, destination_label_folder + image)
return 0
def main():
source_folders = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/3x3_rgb_dataset/all/patients_cases/'
destination_folder = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/3x3_rgb_dataset/all/all/'
exceptions = ['all']
gather_all_data(source_folders, destination_folder, exceptions)
if __name__ == '__main__':
main()
| import os
import random
import shutil
def gather_all_data(source_folder, destination_folder, exceptions):
"""
This function gathers all data from different folders and put it all together in a single folder called "all"
:param source_folders:
:param destination_folder:
:param exceptions:
:return:
"""
folder_list = set(os.listdir(source_folder)) - set(exceptions)
folder_list = sorted([element for element in folder_list if
os.path.isdir(''.join([source_folder, element]))])
for folder in folder_list[:]:
print(folder)
files_path_images = "".join([source_folder, folder, '/image/'])
files_path_labels = "".join([source_folder, folder, '/label/'])
images_list = os.listdir(files_path_images)
labels_list = os.listdir(files_path_labels)
#image_subfolder = sorted([element for element in images_list if os.path.isdir(''.join([source_folder, files_path_images]))])
labels_subfolder = sorted([element for element in labels_list if
os.path.isdir(''.join([source_folder, files_path_labels]))])
if not(labels_subfolder):
destination_image_folder = "".join([destination_folder, 'image/'])
destination_label_folder = "".join([destination_folder, 'label/'])
if not (os.path.isdir(destination_image_folder)):
os.mkdir(destination_image_folder)
if not (os.path.isdir(destination_label_folder)):
os.mkdir(destination_label_folder)
for counter, image in enumerate(images_list[:]):
shutil.copy(files_path_images + image, destination_image_folder + image)
shutil.copy(files_path_labels + image[:-4] + '.png', destination_label_folder + image[:-4] + '.png')
else:
for sub_folder in labels_subfolder:
#2Do complete this option and the funciotn copy_images_and_label
copy_images_and_label(source_folder, destination_folder, sub_folder)
def copy_images_and_label(source_folder, destination_folder, folder=''):
"""
Copy tuples of images and labels in 1 step
:param original_folder:
:param destination_folder:
:return:
"""
source_folder = "".join([source_folder, '/', folder, '/'])
destination_folder = "".join([destination_folder, '/', folder, '/'])
files_path_images = "".join([source_folder, '/image/'])
files_path_labels = "".join([source_folder, '/label/'])
images_list = os.listdir(files_path_images)
labels_list = os.listdir(files_path_labels)
destination_image_folder = "".join([destination_folder, 'image/'])
destination_label_folder = "".join([destination_folder, 'label/'])
if not (os.path.isdir(destination_image_folder)):
os.mkdir(destination_image_folder)
if not (os.path.isdir(destination_label_folder)):
os.mkdir(destination_label_folder)
for counter, image in enumerate(images_list):
shutil.copy(files_path_images + image, destination_image_folder + image)
shutil.copy(files_path_labels + image, destination_label_folder + image)
return 0
def main():
source_folders = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/3x3_rgb_dataset/all/patients_cases/'
destination_folder = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/3x3_rgb_dataset/all/all/'
exceptions = ['all']
gather_all_data(source_folders, destination_folder, exceptions)
if __name__ == '__main__':
main()
| en | 0.638823 | This function gathers all data from different folders and put it all together in a single folder called "all" :param source_folders: :param destination_folder: :param exceptions: :return: #image_subfolder = sorted([element for element in images_list if os.path.isdir(''.join([source_folder, files_path_images]))]) #2Do complete this option and the funciotn copy_images_and_label Copy tuples of images and labels in 1 step :param original_folder: :param destination_folder: :return: | 3.078713 | 3 |
sorting/selection_sort.py | mgawlinska/basic-algorithms-python | 0 | 6614482 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
""" Selection sort
1. Find a maximum value in the array using linear search
2. Swap the maximum value with the last unsorted element
3. Consider the last element sorted
4. Repeat for the unsorted elements
Time complexity:
* best O(n^2)
* average O(n^2)
* worst O(n^2)
"""
__author__ = "<NAME>"
__copyright__ = ""
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = ""
__status__ = "Prototype"
from random import randint
def linear_search(array):
max_element = array[0]
idx = 0
for i in range(len(array)):
if array[i] > max_element:
max_element = array[i]
idx = i
return idx
def selection_sort(array):
for i in range(len(array)):
if i == 0:
idx = linear_search(array[None:None])
else:
idx = linear_search(array[:-i])
array[-i-1], array[idx] = (array[idx], array[-i-1])
print(f"Selection sort. Pass: {i + 1} list: {array}")
list_length = 10
factor = 10
to_sort = [randint(0, list_length * factor) for x in range(list_length)]
print(f"Array: {to_sort}")
selection_sort(to_sort) | #!/usr/bin/python3
# -*- coding: utf-8 -*-
""" Selection sort
1. Find a maximum value in the array using linear search
2. Swap the maximum value with the last unsorted element
3. Consider the last element sorted
4. Repeat for the unsorted elements
Time complexity:
* best O(n^2)
* average O(n^2)
* worst O(n^2)
"""
__author__ = "<NAME>"
__copyright__ = ""
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = ""
__status__ = "Prototype"
from random import randint
def linear_search(array):
max_element = array[0]
idx = 0
for i in range(len(array)):
if array[i] > max_element:
max_element = array[i]
idx = i
return idx
def selection_sort(array):
for i in range(len(array)):
if i == 0:
idx = linear_search(array[None:None])
else:
idx = linear_search(array[:-i])
array[-i-1], array[idx] = (array[idx], array[-i-1])
print(f"Selection sort. Pass: {i + 1} list: {array}")
list_length = 10
factor = 10
to_sort = [randint(0, list_length * factor) for x in range(list_length)]
print(f"Array: {to_sort}")
selection_sort(to_sort) | en | 0.556918 | #!/usr/bin/python3 # -*- coding: utf-8 -*- Selection sort 1. Find a maximum value in the array using linear search 2. Swap the maximum value with the last unsorted element 3. Consider the last element sorted 4. Repeat for the unsorted elements Time complexity: * best O(n^2) * average O(n^2) * worst O(n^2) | 4.014147 | 4 |
tests/test_spheres.py | 0xF4D3C0D3/ray-tracer-challenge-with-python | 0 | 6614483 | <filename>tests/test_spheres.py
import numpy as np
from src.grid import Point, Vector
from src.light import Ray
from src.material import Material
from src.matrix import Rotation, Scaling, Translation
from src.shape.sphere import Sphere
def test_ray_intersects_sphere_at_two_points():
r = Ray(Point(0, 0, -5), Vector(0, 0, 1))
s = Sphere()
xs = s.intersect(r)
assert xs.count == 1
assert xs == [4, 6]
def test_ray_intersects_sphere_at_tangent():
r = Ray(Point(0, 1, -5), Vector(0, 0, 1))
s = Sphere()
xs = s.intersect(r)
assert xs.count == 1
assert xs == [5, 5]
def test_ray_misses_sphere():
r = Ray(Point(0, 2, -5), Vector(0, 0, 1))
s = Sphere()
xs = s.intersect(r)
assert xs.count == 0
def test_ray_originates_inside_sphere():
r = Ray(Point(0, 0, 0), Vector(0, 0, 1))
s = Sphere()
xs = s.intersect(r)
assert xs.count == 1
assert xs == [-1, 1]
def test_sphere_is_behind_ray():
r = Ray(Point(0, 0, 5), Vector(0, 0, 1))
s = Sphere()
xs = s.intersect(r)
assert xs.count == 1
assert xs == [-6, -4]
def test_intersect_sets_the_object_on_intersection():
r = Ray(Point(0, 0, -5), Vector(0, 0, 1))
s = Sphere()
xs = s.intersect(r)
assert xs.count == 1
assert xs.obj is s
def test_sphere_default_transformation():
s = Sphere()
assert np.allclose(s.transform, np.eye(4))
def test_changing_sphere_transformation():
s = Sphere()
t = Translation(2, 3, 4)
s = s.set_transform(t)
assert np.allclose(s.transform, t)
def test_intersecting_scaled_sphere_with_ray():
r = Ray(Point(0, 0, -5), Vector(0, 0, 1))
s = Sphere()
s = s.set_transform(Scaling(2, 2, 2))
xs = s.intersect(r)
assert xs.count == 1
assert xs == [3, 7]
def test_intersecting_translated_sphere_with_ray():
r = Ray(Point(0, 0, -5), Vector(0, 0, 1))
s = Sphere()
s = s.set_transform(Translation(5, 0, 0))
xs = s.intersect(r)
assert xs.count == 0
def test_normal_on_sphere_at_point_xaxis():
s = Sphere()
n = s.normal_at(Point(1, 0, 0))
assert n == Vector(1, 0, 0)
def test_normal_on_sphere_at_point_yaxis():
s = Sphere()
n = s.normal_at(Point(0, 1, 0))
assert n == Vector(0, 1, 0)
def test_normal_on_sphere_at_point_zaxis():
s = Sphere()
n = s.normal_at(Point(0, 0, 1))
assert n == Vector(0, 0, 1)
def test_normal_on_sphere_at_nonaxial_point():
s = Sphere()
a = 3 ** 0.5 / 3
n = s.normal_at(Point(a, a, a))
assert n == Vector(a, a, a)
def test_normal_is_normalized_vector():
s = Sphere()
a = 3 ** 0.5 / 3
n = s.normal_at(Point(a, a, a))
assert n == n.normalize()
def test_computing_normal_on_translated_sphere():
s = Sphere()
s = s.set_transform(Translation(0, 1, 0))
n = s.normal_at(Point(0, 1.70711, -0.70711))
assert n == Vector(0, 0.70711, -0.70711)
def test_computing_normal_on_transformed_sphere():
s = Sphere()
m = Scaling(1, 0.5, 1) @ Rotation(0, 0, np.pi / 5)
s = s.set_transform(m)
n = s.normal_at(Point(0, 2 ** 0.5 / 2, -(2 ** 0.5) / 2))
assert np.allclose(n, Vector(0, 0.97014, -0.24254), 1e-03, 1e-03)
def test_sphere_has_default_material():
s = Sphere()
m = s.material
assert m == Material()
def test_sphere_may_be_assigned_material():
s = Sphere()
m = Material(ambient=1)
s = s.set_material(m)
assert s.material == m
| <filename>tests/test_spheres.py
import numpy as np
from src.grid import Point, Vector
from src.light import Ray
from src.material import Material
from src.matrix import Rotation, Scaling, Translation
from src.shape.sphere import Sphere
def test_ray_intersects_sphere_at_two_points():
r = Ray(Point(0, 0, -5), Vector(0, 0, 1))
s = Sphere()
xs = s.intersect(r)
assert xs.count == 1
assert xs == [4, 6]
def test_ray_intersects_sphere_at_tangent():
r = Ray(Point(0, 1, -5), Vector(0, 0, 1))
s = Sphere()
xs = s.intersect(r)
assert xs.count == 1
assert xs == [5, 5]
def test_ray_misses_sphere():
r = Ray(Point(0, 2, -5), Vector(0, 0, 1))
s = Sphere()
xs = s.intersect(r)
assert xs.count == 0
def test_ray_originates_inside_sphere():
r = Ray(Point(0, 0, 0), Vector(0, 0, 1))
s = Sphere()
xs = s.intersect(r)
assert xs.count == 1
assert xs == [-1, 1]
def test_sphere_is_behind_ray():
r = Ray(Point(0, 0, 5), Vector(0, 0, 1))
s = Sphere()
xs = s.intersect(r)
assert xs.count == 1
assert xs == [-6, -4]
def test_intersect_sets_the_object_on_intersection():
r = Ray(Point(0, 0, -5), Vector(0, 0, 1))
s = Sphere()
xs = s.intersect(r)
assert xs.count == 1
assert xs.obj is s
def test_sphere_default_transformation():
s = Sphere()
assert np.allclose(s.transform, np.eye(4))
def test_changing_sphere_transformation():
s = Sphere()
t = Translation(2, 3, 4)
s = s.set_transform(t)
assert np.allclose(s.transform, t)
def test_intersecting_scaled_sphere_with_ray():
r = Ray(Point(0, 0, -5), Vector(0, 0, 1))
s = Sphere()
s = s.set_transform(Scaling(2, 2, 2))
xs = s.intersect(r)
assert xs.count == 1
assert xs == [3, 7]
def test_intersecting_translated_sphere_with_ray():
r = Ray(Point(0, 0, -5), Vector(0, 0, 1))
s = Sphere()
s = s.set_transform(Translation(5, 0, 0))
xs = s.intersect(r)
assert xs.count == 0
def test_normal_on_sphere_at_point_xaxis():
s = Sphere()
n = s.normal_at(Point(1, 0, 0))
assert n == Vector(1, 0, 0)
def test_normal_on_sphere_at_point_yaxis():
s = Sphere()
n = s.normal_at(Point(0, 1, 0))
assert n == Vector(0, 1, 0)
def test_normal_on_sphere_at_point_zaxis():
s = Sphere()
n = s.normal_at(Point(0, 0, 1))
assert n == Vector(0, 0, 1)
def test_normal_on_sphere_at_nonaxial_point():
s = Sphere()
a = 3 ** 0.5 / 3
n = s.normal_at(Point(a, a, a))
assert n == Vector(a, a, a)
def test_normal_is_normalized_vector():
s = Sphere()
a = 3 ** 0.5 / 3
n = s.normal_at(Point(a, a, a))
assert n == n.normalize()
def test_computing_normal_on_translated_sphere():
s = Sphere()
s = s.set_transform(Translation(0, 1, 0))
n = s.normal_at(Point(0, 1.70711, -0.70711))
assert n == Vector(0, 0.70711, -0.70711)
def test_computing_normal_on_transformed_sphere():
s = Sphere()
m = Scaling(1, 0.5, 1) @ Rotation(0, 0, np.pi / 5)
s = s.set_transform(m)
n = s.normal_at(Point(0, 2 ** 0.5 / 2, -(2 ** 0.5) / 2))
assert np.allclose(n, Vector(0, 0.97014, -0.24254), 1e-03, 1e-03)
def test_sphere_has_default_material():
s = Sphere()
m = s.material
assert m == Material()
def test_sphere_may_be_assigned_material():
s = Sphere()
m = Material(ambient=1)
s = s.set_material(m)
assert s.material == m
| none | 1 | 2.681896 | 3 | |
tmp/mturk_batch_batches.py | kcarnold/sentiment-slant-gi18 | 0 | 6614484 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 7 13:43:07 2017
@author: kcarnold
"""
# Batch MTurk batches
# Created with the help of the following on the MTurk Manage screen:
# Array.from(document.querySelectorAll('a[id*="batch_status"]')).forEach(x => {let f = document.createElement('iframe'); f.setAttribute('width', '600px'); f.setAttribute('height', '600px'); f.setAttribute('src', x.getAttribute('href')+'/download'); document.body.appendChild(f);})
# or just
# batches.forEach(batch => { let f = document.createElement('iframe'); f.setAttribute('width', '600px'); f.setAttribute('height', '600px'); f.setAttribute('src', `https://requester.mturk.com/batches/${batch}/download`); document.body.appendChild(f);})
#%%
import pandas as pd
import glob
#%%
csvs = sorted(glob.glob('*.csv'))
dfs = [pd.read_csv(csv) for csv in csvs]
#%%
full_concat = pd.concat(dfs, axis=0).drop_duplicates(subset='AssignmentId', keep='first')
concats = pd.concat(dfs, axis=0, join='inner').drop_duplicates(subset='AssignmentId', keep='first')
other_axis = pd.Index(concats.columns.tolist() + ['Answer.code'])
concats = pd.concat(dfs, axis=0, join_axes=[other_axis]).drop_duplicates('AssignmentId', keep='first')
concats.to_csv('all_assignments.csv', index=False)
# You'll also find this helpful:
# copy(Array.from(document.querySelectorAll('#batches_reviewable a[id*="batch_status"]')).map(x => (`${x.textContent},${x.getAttribute('href').slice('/batches/'.length)}`)).join('\n'))
# copy(Array.from(document.querySelectorAll('#batches_reviewed a[id*="batch_status"]')).map(x => (`${x.textContent},${x.getAttribute('href').slice('/batches/'.length)}`)).join('\n'))
| # -*- coding: utf-8 -*-
"""
Created on Fri Apr 7 13:43:07 2017
@author: kcarnold
"""
# Batch MTurk batches
# Created with the help of the following on the MTurk Manage screen:
# Array.from(document.querySelectorAll('a[id*="batch_status"]')).forEach(x => {let f = document.createElement('iframe'); f.setAttribute('width', '600px'); f.setAttribute('height', '600px'); f.setAttribute('src', x.getAttribute('href')+'/download'); document.body.appendChild(f);})
# or just
# batches.forEach(batch => { let f = document.createElement('iframe'); f.setAttribute('width', '600px'); f.setAttribute('height', '600px'); f.setAttribute('src', `https://requester.mturk.com/batches/${batch}/download`); document.body.appendChild(f);})
#%%
import pandas as pd
import glob
#%%
csvs = sorted(glob.glob('*.csv'))
dfs = [pd.read_csv(csv) for csv in csvs]
#%%
full_concat = pd.concat(dfs, axis=0).drop_duplicates(subset='AssignmentId', keep='first')
concats = pd.concat(dfs, axis=0, join='inner').drop_duplicates(subset='AssignmentId', keep='first')
other_axis = pd.Index(concats.columns.tolist() + ['Answer.code'])
concats = pd.concat(dfs, axis=0, join_axes=[other_axis]).drop_duplicates('AssignmentId', keep='first')
concats.to_csv('all_assignments.csv', index=False)
# You'll also find this helpful:
# copy(Array.from(document.querySelectorAll('#batches_reviewable a[id*="batch_status"]')).map(x => (`${x.textContent},${x.getAttribute('href').slice('/batches/'.length)}`)).join('\n'))
# copy(Array.from(document.querySelectorAll('#batches_reviewed a[id*="batch_status"]')).map(x => (`${x.textContent},${x.getAttribute('href').slice('/batches/'.length)}`)).join('\n'))
| en | 0.279489 | # -*- coding: utf-8 -*- Created on Fri Apr 7 13:43:07 2017 @author: kcarnold # Batch MTurk batches # Created with the help of the following on the MTurk Manage screen: # Array.from(document.querySelectorAll('a[id*="batch_status"]')).forEach(x => {let f = document.createElement('iframe'); f.setAttribute('width', '600px'); f.setAttribute('height', '600px'); f.setAttribute('src', x.getAttribute('href')+'/download'); document.body.appendChild(f);}) # or just # batches.forEach(batch => { let f = document.createElement('iframe'); f.setAttribute('width', '600px'); f.setAttribute('height', '600px'); f.setAttribute('src', `https://requester.mturk.com/batches/${batch}/download`); document.body.appendChild(f);}) #%% #%% #%% # You'll also find this helpful: # copy(Array.from(document.querySelectorAll('#batches_reviewable a[id*="batch_status"]')).map(x => (`${x.textContent},${x.getAttribute('href').slice('/batches/'.length)}`)).join('\n')) # copy(Array.from(document.querySelectorAll('#batches_reviewed a[id*="batch_status"]')).map(x => (`${x.textContent},${x.getAttribute('href').slice('/batches/'.length)}`)).join('\n')) | 2.664891 | 3 |
gobenchplot/plot.py | ShawnROGrady/gobenchplot | 3 | 6614485 | import copy
import matplotlib.pyplot as plt
import numpy as np
import typing
import gobenchplot.benchmark as benchmark
import gobenchplot.inputs as inputs
BAR_TYPE = 'bar'
SCATTER_TYPE = 'scatter'
AVG_LINE_TYPE = 'avg_line'
BEST_FIT_LINE_TYPE = 'best_fit_line'
class PlotData(typing.NamedTuple):
x: np.ndarray
y: np.ndarray
def x_type(self):
return self.x[0].dtype
def y_type(self):
return self.y[0].dtype
def __eq__(self, other):
if not isinstance(other, PlotData):
return False
return (
np.array_equal(self.x, other.x) and
np.array_equal(self.y, other.y))
def avg_over_x(self) -> 'PlotData':
uniq_x = np.unique(self.x)
y_means = np.empty(len(uniq_x), dtype=self.y_type())
for i, uniq_x_val in enumerate(uniq_x):
indices = list(
filter(lambda x: x is not None,
map(
lambda x: x[0] if x[1] == uniq_x_val else None,
enumerate(self.x))))
y_vals = np.empty(len(indices), dtype=self.y_type())
for j, index in enumerate(indices):
y_vals[j] = self.y[index]
y_means[i] = np.mean(y_vals)
return PlotData(x=uniq_x, y=y_means)
def bench_res_data(bench_results: typing.List[benchmark.SplitRes]) -> PlotData:
order = len(bench_results)
x = np.empty(order, dtype=type(bench_results[0].x))
y = np.empty(order, dtype=type(bench_results[0].y))
for i, res in enumerate(bench_results):
x[i] = res.x
y[i] = res.y
return PlotData(x=x, y=y)
def plot_scatter(data: typing.Dict[str, PlotData], include_label):
for label, plot_data in data.items():
if include_label:
plt.plot(plot_data.x, plot_data.y, '.', label=label)
else:
plt.plot(plot_data.x, plot_data.y, '.')
def plot_avg_line(data: typing.Dict[str, PlotData], include_label):
for label, plot_data in data.items():
uniq_x, y_means = plot_data.avg_over_x()
if include_label:
plt.plot(uniq_x, y_means, label=label)
else:
plt.plot(uniq_x, y_means)
def plot_best_fit_line(data: typing.Dict[str, PlotData], include_label):
for label, plot_data in data.items():
uniq_x = np.unique(plot_data.x)
best_fit_fn = np.poly1d(np.polyfit(plot_data.x, plot_data.y, 1))
if include_label:
plt.plot(
uniq_x,
best_fit_fn(uniq_x),
label=label)
else:
plt.plot(
uniq_x,
best_fit_fn(uniq_x))
def get_bar_spacing_adjustment(
plotnum: int,
num_plots: int) -> typing.Union[int, float]:
if num_plots == 0:
return 0
# produce even spacing without colliding with others
return (1/num_plots) * (2*plotnum - (num_plots-1))
def get_bar_widths(
uniq_x: np.ndarray,
num_plots: int) -> typing.Union[float, np.ndarray]:
if non_numeric_dtype(uniq_x[0].dtype):
# for non numeric plots we can control the spacing
# just use default width
return 0.8
# NOTE: this is assuming each of the num_plots has the same uniq_x
base_width = 0.8/num_plots
if len(uniq_x) == 1:
return base_width
widths = np.empty(len(uniq_x), dtype=np.float64)
for index, x in np.ndenumerate(uniq_x):
i = index[0] # just dealing w/ 1D arrays
if i == 0:
continue
# just enough spacing
if i >= 2:
widths[i-1] = min(base_width * (x - uniq_x[i-1]),
base_width * (uniq_x[i-1] - uniq_x[i-2]))
else:
widths[i-1] = base_width * (x - uniq_x[i-1])
widths[len(uniq_x)-1] = widths[len(uniq_x)-2]
return widths
def plot_bar(data: typing.Dict[str, PlotData], include_label):
x_type = list(data.values())[0].x_type()
ax = plt.gca()
if non_numeric_dtype(x_type):
x = np.arange(len(data))
y_means = np.empty(len(data))
for i, plot_data in enumerate(data.values()):
y_means[i] = np.mean(plot_data.y)
if include_label:
# TODO come up with an actual label
# just doing this to prevent legend() error
plt.bar(x, y_means, label='')
else:
plt.bar(x, y_means)
ax.set_xticks(x)
ax.set_xticklabels(data.keys())
return
else:
num_plots = len(data)
# TODO: this is a guestimate, should be determined programatically
i = 0
for label, plot_data in data.items():
uniq_x, y_means = plot_data.avg_over_x()
widths = get_bar_widths(uniq_x, num_plots)
adjustment = get_bar_spacing_adjustment(i, num_plots)
if include_label:
plt.bar(uniq_x-widths*adjustment, y_means, widths, label=label)
else:
plt.bar(uniq_x-widths*adjustment, y_means, widths)
i += 1
ax.set_xticks(uniq_x)
ax.set_xticklabels(uniq_x)
SpecifiedPlots = typing.Optional[typing.Union[typing.List[str], str]]
def plot_fn_from_type(plots: SpecifiedPlots):
if plots is None:
return None
if isinstance(plots, list):
fn = list(map(lambda x: plot_fn_from_type(x), plots))
return fn
if plots == BAR_TYPE:
return plot_bar
elif plots == SCATTER_TYPE:
return plot_scatter
elif plots == AVG_LINE_TYPE:
return plot_avg_line
elif plots == BEST_FIT_LINE_TYPE:
return plot_best_fit_line
else:
raise inputs.InvalidInputError(
'unknown plot type',
inputs.PLOTS_NAME,
input_val=plots)
return
def non_numeric_dtype(dtype) -> bool:
if (
('str' in dtype.name) or
('bool' in dtype.name) or
('bytes' in dtype.name)):
return True
return False
def build_plot_fn(
data: typing.Dict[str, PlotData],
x_name: str, y_name: str = 'time',
plots=None):
x_type = list(data.values())[0].x_type()
y_type = list(data.values())[0].y_type()
if non_numeric_dtype(y_type):
raise inputs.InvalidInputError(
"unsupported data type '%s'" % (y_type.name),
inputs.Y_NAME,
input_val=y_name)
if non_numeric_dtype(x_type):
if plots is None or plots == BAR_TYPE:
return plot_fn_from_type(BAR_TYPE)
elif (
(isinstance(plots, str) and plots == BAR_TYPE)
or (isinstance(plots, list) and BAR_TYPE in plots)):
return plot_fn_from_type(plots)
else:
raise inputs.InvalidInputError(
"unsupported data type '%s' for plot type '%s'" % (
x_type.name, plots),
inputs.X_NAME,
input_val=x_name)
else:
if plots is None:
return plot_fn_from_type([SCATTER_TYPE, AVG_LINE_TYPE])
else:
return plot_fn_from_type(plots)
def run_plot_fns(data: typing.Dict[str, PlotData], plot_fns):
# can't show average bar on same figure as others
non_avg_bar_fns = list(
filter(lambda x: x.__name__ != plot_bar.__name__, plot_fns))
if len(non_avg_bar_fns) != len(plot_fns):
if len(non_avg_bar_fns) != 0:
plt.subplot(212)
for plot_fn in plot_fns:
if plot_fn.__name__ == plot_bar.__name__:
plot_fn(data, include_label=True)
break
if len(non_avg_bar_fns) != 0:
plt.subplot(211)
ax = plt.gca()
for i, fn in enumerate(non_avg_bar_fns):
ax.set_prop_cycle(None)
if i == 0:
fn(data, include_label=True)
else:
fn(data, include_label=False)
def plot_data(
data: typing.Dict[str, PlotData],
x_name: str,
y_name: str = 'time',
plots=None):
plot_fn = build_plot_fn(data, x_name, y_name=y_name, plots=plots)
# NOTE: for now assuming all plots can be shown on figure
plt.xlabel(x_name)
y_label = y_name
y_units = benchmark.bench_output_units(y_name)
if y_units != '':
y_label = '%s (%s)' % (y_name, y_units)
plt.ylabel(y_label)
if isinstance(plot_fn, list):
run_plot_fns(data, plot_fn)
else:
plot_fn(data, include_label=True)
def plot_bench(
bench: benchmark.Benchmark,
group_by: typing.Union[typing.List[str], str],
x_name: str, y_name: str = 'time',
subs: typing.List = None,
filter_vars: typing.List[str] = None,
plots=None):
filter_exprs = benchmark.build_filter_exprs(subs, filter_vars)
filtered: benchmark.BenchResults = copy.deepcopy(bench.results)
for expr in filter_exprs:
filtered = expr(filtered)
if len(filtered) == 0:
raise inputs.InvalidInputError(
"no results remain",
[inputs.FILTER_BY_NAME, inputs.SUBS_NAME],
[filter_vars, subs])
split_res: benchmark.SplitResults = filtered.group_by(
group_by).split_to(x_name, y_name)
data: typing.Dict[str, PlotData] = {}
for label, res in split_res.items():
data[label] = bench_res_data(res)
if subs is None or len(subs) == 0:
plt.title(bench.name)
else:
plt.title("%s/%s" % (bench.name, "/".join(subs)))
plot_data(data, x_name, y_name=y_name, plots=plots)
plt.legend()
plt.show()
| import copy
import matplotlib.pyplot as plt
import numpy as np
import typing
import gobenchplot.benchmark as benchmark
import gobenchplot.inputs as inputs
BAR_TYPE = 'bar'
SCATTER_TYPE = 'scatter'
AVG_LINE_TYPE = 'avg_line'
BEST_FIT_LINE_TYPE = 'best_fit_line'
class PlotData(typing.NamedTuple):
x: np.ndarray
y: np.ndarray
def x_type(self):
return self.x[0].dtype
def y_type(self):
return self.y[0].dtype
def __eq__(self, other):
if not isinstance(other, PlotData):
return False
return (
np.array_equal(self.x, other.x) and
np.array_equal(self.y, other.y))
def avg_over_x(self) -> 'PlotData':
uniq_x = np.unique(self.x)
y_means = np.empty(len(uniq_x), dtype=self.y_type())
for i, uniq_x_val in enumerate(uniq_x):
indices = list(
filter(lambda x: x is not None,
map(
lambda x: x[0] if x[1] == uniq_x_val else None,
enumerate(self.x))))
y_vals = np.empty(len(indices), dtype=self.y_type())
for j, index in enumerate(indices):
y_vals[j] = self.y[index]
y_means[i] = np.mean(y_vals)
return PlotData(x=uniq_x, y=y_means)
def bench_res_data(bench_results: typing.List[benchmark.SplitRes]) -> PlotData:
order = len(bench_results)
x = np.empty(order, dtype=type(bench_results[0].x))
y = np.empty(order, dtype=type(bench_results[0].y))
for i, res in enumerate(bench_results):
x[i] = res.x
y[i] = res.y
return PlotData(x=x, y=y)
def plot_scatter(data: typing.Dict[str, PlotData], include_label):
for label, plot_data in data.items():
if include_label:
plt.plot(plot_data.x, plot_data.y, '.', label=label)
else:
plt.plot(plot_data.x, plot_data.y, '.')
def plot_avg_line(data: typing.Dict[str, PlotData], include_label):
for label, plot_data in data.items():
uniq_x, y_means = plot_data.avg_over_x()
if include_label:
plt.plot(uniq_x, y_means, label=label)
else:
plt.plot(uniq_x, y_means)
def plot_best_fit_line(data: typing.Dict[str, PlotData], include_label):
for label, plot_data in data.items():
uniq_x = np.unique(plot_data.x)
best_fit_fn = np.poly1d(np.polyfit(plot_data.x, plot_data.y, 1))
if include_label:
plt.plot(
uniq_x,
best_fit_fn(uniq_x),
label=label)
else:
plt.plot(
uniq_x,
best_fit_fn(uniq_x))
def get_bar_spacing_adjustment(
plotnum: int,
num_plots: int) -> typing.Union[int, float]:
if num_plots == 0:
return 0
# produce even spacing without colliding with others
return (1/num_plots) * (2*plotnum - (num_plots-1))
def get_bar_widths(
uniq_x: np.ndarray,
num_plots: int) -> typing.Union[float, np.ndarray]:
if non_numeric_dtype(uniq_x[0].dtype):
# for non numeric plots we can control the spacing
# just use default width
return 0.8
# NOTE: this is assuming each of the num_plots has the same uniq_x
base_width = 0.8/num_plots
if len(uniq_x) == 1:
return base_width
widths = np.empty(len(uniq_x), dtype=np.float64)
for index, x in np.ndenumerate(uniq_x):
i = index[0] # just dealing w/ 1D arrays
if i == 0:
continue
# just enough spacing
if i >= 2:
widths[i-1] = min(base_width * (x - uniq_x[i-1]),
base_width * (uniq_x[i-1] - uniq_x[i-2]))
else:
widths[i-1] = base_width * (x - uniq_x[i-1])
widths[len(uniq_x)-1] = widths[len(uniq_x)-2]
return widths
def plot_bar(data: typing.Dict[str, PlotData], include_label):
x_type = list(data.values())[0].x_type()
ax = plt.gca()
if non_numeric_dtype(x_type):
x = np.arange(len(data))
y_means = np.empty(len(data))
for i, plot_data in enumerate(data.values()):
y_means[i] = np.mean(plot_data.y)
if include_label:
# TODO come up with an actual label
# just doing this to prevent legend() error
plt.bar(x, y_means, label='')
else:
plt.bar(x, y_means)
ax.set_xticks(x)
ax.set_xticklabels(data.keys())
return
else:
num_plots = len(data)
# TODO: this is a guestimate, should be determined programatically
i = 0
for label, plot_data in data.items():
uniq_x, y_means = plot_data.avg_over_x()
widths = get_bar_widths(uniq_x, num_plots)
adjustment = get_bar_spacing_adjustment(i, num_plots)
if include_label:
plt.bar(uniq_x-widths*adjustment, y_means, widths, label=label)
else:
plt.bar(uniq_x-widths*adjustment, y_means, widths)
i += 1
ax.set_xticks(uniq_x)
ax.set_xticklabels(uniq_x)
SpecifiedPlots = typing.Optional[typing.Union[typing.List[str], str]]
def plot_fn_from_type(plots: SpecifiedPlots):
if plots is None:
return None
if isinstance(plots, list):
fn = list(map(lambda x: plot_fn_from_type(x), plots))
return fn
if plots == BAR_TYPE:
return plot_bar
elif plots == SCATTER_TYPE:
return plot_scatter
elif plots == AVG_LINE_TYPE:
return plot_avg_line
elif plots == BEST_FIT_LINE_TYPE:
return plot_best_fit_line
else:
raise inputs.InvalidInputError(
'unknown plot type',
inputs.PLOTS_NAME,
input_val=plots)
return
def non_numeric_dtype(dtype) -> bool:
if (
('str' in dtype.name) or
('bool' in dtype.name) or
('bytes' in dtype.name)):
return True
return False
def build_plot_fn(
data: typing.Dict[str, PlotData],
x_name: str, y_name: str = 'time',
plots=None):
x_type = list(data.values())[0].x_type()
y_type = list(data.values())[0].y_type()
if non_numeric_dtype(y_type):
raise inputs.InvalidInputError(
"unsupported data type '%s'" % (y_type.name),
inputs.Y_NAME,
input_val=y_name)
if non_numeric_dtype(x_type):
if plots is None or plots == BAR_TYPE:
return plot_fn_from_type(BAR_TYPE)
elif (
(isinstance(plots, str) and plots == BAR_TYPE)
or (isinstance(plots, list) and BAR_TYPE in plots)):
return plot_fn_from_type(plots)
else:
raise inputs.InvalidInputError(
"unsupported data type '%s' for plot type '%s'" % (
x_type.name, plots),
inputs.X_NAME,
input_val=x_name)
else:
if plots is None:
return plot_fn_from_type([SCATTER_TYPE, AVG_LINE_TYPE])
else:
return plot_fn_from_type(plots)
def run_plot_fns(data: typing.Dict[str, PlotData], plot_fns):
# can't show average bar on same figure as others
non_avg_bar_fns = list(
filter(lambda x: x.__name__ != plot_bar.__name__, plot_fns))
if len(non_avg_bar_fns) != len(plot_fns):
if len(non_avg_bar_fns) != 0:
plt.subplot(212)
for plot_fn in plot_fns:
if plot_fn.__name__ == plot_bar.__name__:
plot_fn(data, include_label=True)
break
if len(non_avg_bar_fns) != 0:
plt.subplot(211)
ax = plt.gca()
for i, fn in enumerate(non_avg_bar_fns):
ax.set_prop_cycle(None)
if i == 0:
fn(data, include_label=True)
else:
fn(data, include_label=False)
def plot_data(
data: typing.Dict[str, PlotData],
x_name: str,
y_name: str = 'time',
plots=None):
plot_fn = build_plot_fn(data, x_name, y_name=y_name, plots=plots)
# NOTE: for now assuming all plots can be shown on figure
plt.xlabel(x_name)
y_label = y_name
y_units = benchmark.bench_output_units(y_name)
if y_units != '':
y_label = '%s (%s)' % (y_name, y_units)
plt.ylabel(y_label)
if isinstance(plot_fn, list):
run_plot_fns(data, plot_fn)
else:
plot_fn(data, include_label=True)
def plot_bench(
bench: benchmark.Benchmark,
group_by: typing.Union[typing.List[str], str],
x_name: str, y_name: str = 'time',
subs: typing.List = None,
filter_vars: typing.List[str] = None,
plots=None):
filter_exprs = benchmark.build_filter_exprs(subs, filter_vars)
filtered: benchmark.BenchResults = copy.deepcopy(bench.results)
for expr in filter_exprs:
filtered = expr(filtered)
if len(filtered) == 0:
raise inputs.InvalidInputError(
"no results remain",
[inputs.FILTER_BY_NAME, inputs.SUBS_NAME],
[filter_vars, subs])
split_res: benchmark.SplitResults = filtered.group_by(
group_by).split_to(x_name, y_name)
data: typing.Dict[str, PlotData] = {}
for label, res in split_res.items():
data[label] = bench_res_data(res)
if subs is None or len(subs) == 0:
plt.title(bench.name)
else:
plt.title("%s/%s" % (bench.name, "/".join(subs)))
plot_data(data, x_name, y_name=y_name, plots=plots)
plt.legend()
plt.show()
| en | 0.867903 | # produce even spacing without colliding with others # for non numeric plots we can control the spacing # just use default width # NOTE: this is assuming each of the num_plots has the same uniq_x # just dealing w/ 1D arrays # just enough spacing # TODO come up with an actual label # just doing this to prevent legend() error # TODO: this is a guestimate, should be determined programatically # can't show average bar on same figure as others # NOTE: for now assuming all plots can be shown on figure | 2.419985 | 2 |
InterviewBit/backtracking/sudoku.py | shrey199325/LeetCodeSolution | 0 | 6614486 | class Solution:
# @param A : list of list of chars
def solveSudoku(self, A):
self.row, self.col, self.grid = (
{1: set(), 2: set(), 3: set(), 4: set(), 5: set(), 6: set(), 7: set(), 8: set(), 9: set()},
{1: set(), 2: set(), 3: set(), 4: set(), 5: set(), 6: set(), 7: set(), 8: set(), 9: set()},
{1: set(), 2: set(), 3: set(), 4: set(), 5: set(), 6: set(), 7: set(), 8: set(), 9: set()}
)
self.grid_first_element = {
(0, 0): 1, (0, 3): 2, (0, 6): 3, (3, 0): 4, (3, 3): 5, (3, 6): 6, (6, 0): 7, (6, 3): 8, (6, 6): 9
}
A = [list(A[_]) for _ in range(len(A))]
for i in range(len(A)):
for j in range(len(A[0])):
if A[i][j] == ".":
A[i][j] = 0
else:
A[i][j] = int(A[i][j])
self.row[i + 1].add(A[i][j])
self.col[j + 1].add(A[i][j])
self.grid[self.calc_grid(i, j)].add(A[i][j])
return self.recur(A, 0, 0)
def recur(self, A, x, y):
x, y = x+1, y+1
for i in range(1, 10):
if self.check(A, x, y, i):
A[x][y] = i
self.row[x].add(i)
self.col[y].add(i)
self.grid[self.calc_grid(x, y)].add(i)
if x == y == 8:
return A
if x < 8:
x_ = x + 1
y_ = y
elif x == 8 and y < 8:
x_ = x
y_ = y + 1
else: continue
A = self.recur(A, x_, y_)
return A
def check(self, A, x, y, val):
return val not in self.row[x] and val not in self.col[y] and val not in self.grid[self.calc_grid(x-1, y-1)]
def calc_grid(self, x, y):
top_left_row, top_left_col = (3 * (x // 3), 3 * (y // 3))
return self.grid_first_element[(top_left_row, top_left_col)]
A = [
"53..7....", "6..195...", ".98....6.", "8...6...3", "4..8.3..1", "7...2...6", ".6....28.", "...419..5", "....8..79"
]
print(Solution().solveSudoku(A))
| class Solution:
# @param A : list of list of chars
def solveSudoku(self, A):
self.row, self.col, self.grid = (
{1: set(), 2: set(), 3: set(), 4: set(), 5: set(), 6: set(), 7: set(), 8: set(), 9: set()},
{1: set(), 2: set(), 3: set(), 4: set(), 5: set(), 6: set(), 7: set(), 8: set(), 9: set()},
{1: set(), 2: set(), 3: set(), 4: set(), 5: set(), 6: set(), 7: set(), 8: set(), 9: set()}
)
self.grid_first_element = {
(0, 0): 1, (0, 3): 2, (0, 6): 3, (3, 0): 4, (3, 3): 5, (3, 6): 6, (6, 0): 7, (6, 3): 8, (6, 6): 9
}
A = [list(A[_]) for _ in range(len(A))]
for i in range(len(A)):
for j in range(len(A[0])):
if A[i][j] == ".":
A[i][j] = 0
else:
A[i][j] = int(A[i][j])
self.row[i + 1].add(A[i][j])
self.col[j + 1].add(A[i][j])
self.grid[self.calc_grid(i, j)].add(A[i][j])
return self.recur(A, 0, 0)
def recur(self, A, x, y):
x, y = x+1, y+1
for i in range(1, 10):
if self.check(A, x, y, i):
A[x][y] = i
self.row[x].add(i)
self.col[y].add(i)
self.grid[self.calc_grid(x, y)].add(i)
if x == y == 8:
return A
if x < 8:
x_ = x + 1
y_ = y
elif x == 8 and y < 8:
x_ = x
y_ = y + 1
else: continue
A = self.recur(A, x_, y_)
return A
def check(self, A, x, y, val):
return val not in self.row[x] and val not in self.col[y] and val not in self.grid[self.calc_grid(x-1, y-1)]
def calc_grid(self, x, y):
top_left_row, top_left_col = (3 * (x // 3), 3 * (y // 3))
return self.grid_first_element[(top_left_row, top_left_col)]
A = [
"53..7....", "6..195...", ".98....6.", "8...6...3", "4..8.3..1", "7...2...6", ".6....28.", "...419..5", "....8..79"
]
print(Solution().solveSudoku(A))
| en | 0.315284 | # @param A : list of list of chars | 3.415001 | 3 |
kafka_consumer/cli.py | dls-controls/kafka_consumer | 0 | 6614487 | <gh_stars>0
import logging
from argparse import ArgumentParser
from pathlib import Path
from kafka_consumer import KafkaConsumer, __version__
from kafka_consumer.utils import profile
def main(args=None):
parser = ArgumentParser()
parser.add_argument("--version", action="version", version=__version__)
parser.add_argument("brokers", type=str, help="List of brokers", nargs="+")
parser.add_argument("group", type=str, help="Group")
parser.add_argument("topic", type=str, help="Topic")
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-t",
"--timestamp",
type=int,
help="Timestamp as secs since epoch to start consuming from",
required=False,
)
group.add_argument(
"-o",
"--offsets",
type=int,
nargs="+",
help="Offsets to start consuming from - must be one for each partition",
required=False,
)
parser.add_argument(
"-i", "--array_id", type=int, help="ID of first array to write", required=False
)
parser.add_argument(
"-d",
"--directory",
type=Path,
default=Path.cwd(),
help="Output file directory, default is cwd",
)
parser.add_argument(
"-f",
"--filename",
type=str,
default="data.h5",
help="Name of output file, default is data.h5",
)
parser.add_argument(
"-n", "--num_arrays", type=int, default=100, help="Number of arrays to write",
)
parser.add_argument(
"--log_level",
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
default="WARNING",
type=str,
help="Log level",
)
parser.add_argument(
"--prof_directory",
type=Path,
default=Path.cwd(),
help="Profiling results directory, default is cwd",
)
parser.add_argument(
"--prof_filename",
type=str,
default="profile_results",
help="Stem of profiling results filenaem, default is profile_results",
)
args = parser.parse_args(args)
logging.basicConfig(level=getattr(logging, args.log_level.upper()))
@profile(args.prof_directory, args.prof_filename)
def main_inner(args):
kafka_consumer = KafkaConsumer(args.brokers, args.group, args.topic)
kafka_consumer.consume_and_write(
args.directory,
args.filename,
args.num_arrays,
start_offsets=args.offsets,
secs_since_epoch=args.timestamp,
first_array_id=args.array_id,
)
main_inner(args)
| import logging
from argparse import ArgumentParser
from pathlib import Path
from kafka_consumer import KafkaConsumer, __version__
from kafka_consumer.utils import profile
def main(args=None):
parser = ArgumentParser()
parser.add_argument("--version", action="version", version=__version__)
parser.add_argument("brokers", type=str, help="List of brokers", nargs="+")
parser.add_argument("group", type=str, help="Group")
parser.add_argument("topic", type=str, help="Topic")
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-t",
"--timestamp",
type=int,
help="Timestamp as secs since epoch to start consuming from",
required=False,
)
group.add_argument(
"-o",
"--offsets",
type=int,
nargs="+",
help="Offsets to start consuming from - must be one for each partition",
required=False,
)
parser.add_argument(
"-i", "--array_id", type=int, help="ID of first array to write", required=False
)
parser.add_argument(
"-d",
"--directory",
type=Path,
default=Path.cwd(),
help="Output file directory, default is cwd",
)
parser.add_argument(
"-f",
"--filename",
type=str,
default="data.h5",
help="Name of output file, default is data.h5",
)
parser.add_argument(
"-n", "--num_arrays", type=int, default=100, help="Number of arrays to write",
)
parser.add_argument(
"--log_level",
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
default="WARNING",
type=str,
help="Log level",
)
parser.add_argument(
"--prof_directory",
type=Path,
default=Path.cwd(),
help="Profiling results directory, default is cwd",
)
parser.add_argument(
"--prof_filename",
type=str,
default="profile_results",
help="Stem of profiling results filenaem, default is profile_results",
)
args = parser.parse_args(args)
logging.basicConfig(level=getattr(logging, args.log_level.upper()))
@profile(args.prof_directory, args.prof_filename)
def main_inner(args):
kafka_consumer = KafkaConsumer(args.brokers, args.group, args.topic)
kafka_consumer.consume_and_write(
args.directory,
args.filename,
args.num_arrays,
start_offsets=args.offsets,
secs_since_epoch=args.timestamp,
first_array_id=args.array_id,
)
main_inner(args) | none | 1 | 2.496624 | 2 | |
HOWMANY.py | abphilip-codes/Codechef_Practice | 2 | 6614488 | # https://www.codechef.com/problems/HOWMANY
N = input()
print(len(N)) if(len(N)<=3) else print("More than 3 digits") | # https://www.codechef.com/problems/HOWMANY
N = input()
print(len(N)) if(len(N)<=3) else print("More than 3 digits") | en | 0.607987 | # https://www.codechef.com/problems/HOWMANY | 3.896765 | 4 |
full_prediction.py | HDWilliams/Live_Emotion_Detection | 0 | 6614489 | import torch
import torch.nn.functional as F
from torchvision import transforms
from emotion_cnn import CNN
import numpy as np
from torch_utils import predict_emotion
import cv2
from PIL import Image
# load cascade module
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
def get_full_prediction(img_file):
"""takes in img_file as string, outputs a prediction if a face is detected"""
#read image
img = np.fromfile(img_file, np.uint8)
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#turn image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Detect the faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# Draw the rectangle around each face
print(faces)
if len(faces) == 0:
return img, 'No faces detected...'
for (x, y, w, h) in faces:
crop_img = gray[y:y+h, x:x+w]
#detect emotion and softmax the output
label, prob = predict_emotion(crop_img)
full_label = str(label) + str(np.around(prob.item(), 3))
#label with emotion and probability
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
cv2.putText(img, full_label, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (36,255,12), 2)
img = Image.fromarray(img)
return img, 'Faces detected...'
| import torch
import torch.nn.functional as F
from torchvision import transforms
from emotion_cnn import CNN
import numpy as np
from torch_utils import predict_emotion
import cv2
from PIL import Image
# load cascade module
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
def get_full_prediction(img_file):
"""takes in img_file as string, outputs a prediction if a face is detected"""
#read image
img = np.fromfile(img_file, np.uint8)
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#turn image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Detect the faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# Draw the rectangle around each face
print(faces)
if len(faces) == 0:
return img, 'No faces detected...'
for (x, y, w, h) in faces:
crop_img = gray[y:y+h, x:x+w]
#detect emotion and softmax the output
label, prob = predict_emotion(crop_img)
full_label = str(label) + str(np.around(prob.item(), 3))
#label with emotion and probability
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
cv2.putText(img, full_label, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (36,255,12), 2)
img = Image.fromarray(img)
return img, 'Faces detected...'
| en | 0.829946 | # load cascade module takes in img_file as string, outputs a prediction if a face is detected #read image #turn image to grayscale # Detect the faces # Draw the rectangle around each face #detect emotion and softmax the output #label with emotion and probability | 3.239148 | 3 |
run_docker.py | SelfDriveGuard/carla-autoware | 0 | 6614490 | # coding=UTF-8
import docker
import os
from os.path import abspath, join, dirname
def main():
run()
def run():
client = docker.from_env()
ROOT_PATH = abspath(dirname(__file__))
CONTENTS_PATH = join(ROOT_PATH, "autoware-contents")
SCRIPTS_PATH = join(ROOT_PATH, "scripts")
ros_container = client.containers.run("registry.cn-beijing.aliyuncs.com/ad-test/carla-autoware-extern:1.0.1",
detach=True,
volumes={CONTENTS_PATH: {'bind': '/home/autoware/autoware-contents', 'mode': 'ro'},
SCRIPTS_PATH: {'bind': '/home/autoware/my_scripts', 'mode': 'rw'}},
runtime='nvidia',
network='host',
privileged=True,
environment=["DISPLAY={}".format(
os.getenv('DISPLAY'))],
tty=True
)
print("Container id:{}".format(ros_container.short_id))
print("Command to enter container:\n docker exec -it --user autoware {} bash".format(ros_container.short_id))
if __name__ == '__main__':
main()
| # coding=UTF-8
import docker
import os
from os.path import abspath, join, dirname
def main():
run()
def run():
client = docker.from_env()
ROOT_PATH = abspath(dirname(__file__))
CONTENTS_PATH = join(ROOT_PATH, "autoware-contents")
SCRIPTS_PATH = join(ROOT_PATH, "scripts")
ros_container = client.containers.run("registry.cn-beijing.aliyuncs.com/ad-test/carla-autoware-extern:1.0.1",
detach=True,
volumes={CONTENTS_PATH: {'bind': '/home/autoware/autoware-contents', 'mode': 'ro'},
SCRIPTS_PATH: {'bind': '/home/autoware/my_scripts', 'mode': 'rw'}},
runtime='nvidia',
network='host',
privileged=True,
environment=["DISPLAY={}".format(
os.getenv('DISPLAY'))],
tty=True
)
print("Container id:{}".format(ros_container.short_id))
print("Command to enter container:\n docker exec -it --user autoware {} bash".format(ros_container.short_id))
if __name__ == '__main__':
main()
| ca | 0.242498 | # coding=UTF-8 | 2.018209 | 2 |
unit2_lesson_05_understanding_functions_part2.py | AbhishekKumar1277/Mission-RnD-Python- | 0 | 6614491 | __author__ = 'Kalyan'
from placeholders import *
notes = """
This lesson explores some advanced features of functions. This will help you make sense
of a lot of standard library functions when you use them. In particular, the following are covered
- positional and keyword arguments
- defining and passing variable number of positional and keyword arguments
- unpacking arguments
Be sure to understand the difference between a parameter and an argument. Keep these links open and refer to them
as you do the lesson!
https://docs.python.org/3/glossary.html#term-parameter
https://docs.python.org/3/glossary.html#term-argument
https://docs.python.org/3/reference/expressions.html#calls
"""
def demo(first, second, third=3, fourth=4):
'''
This is a test function that has some default and non-default parameters. Use the test below to study how the
arguments are bound at runtime and the various ways in which the function can be invoked.
'''
return [first, second, third, fourth]
# parameters with defaults allows you to write one api without having a large number
# of overloads for various scenarios.
# define the above function in console and play with each of the invocations and see the error messages carefully.
# NOTE: add extra arguments where necessary. First note what error you get and then fix the function invocations.
def test_function_call_with_keyword_arguments():
assert [10,10,3,4] == demo(10,10)
assert [10,20,3,4] == demo(10, 20)
assert [10,20,30,4] == demo(10, 20, 30)
assert [5,20,3,4] == demo(5,second=20)
assert [4,20,30,4] == demo(4,second=20, third=30)
assert [10,2,30,4] == demo(first=10,second=2,third=30)
assert [10,2,30,4] == demo(10, 2,third=30)
assert [8,20,10,4] == demo(8,second=20,third=10) # is this allowed? correct and uncomment
# The *args syntax is used to specify that you can pass variable number of arguments to a function.
# args is a var-positional parameter -> variable number of positional arguments can be bound to it at runtime.
def demo_variable_args(first, *args):
return args #just return args so we can inspect its nature in the test.
# assumes args are all strings
def my_merge(separator, *args):
return separator.join(args)
def test_function_with_variable_args():
result = demo_variable_args("hello", "world")
assert "tuple" == type(result).__name__ #this is the type of args
assert ("world",) == result #this is the value of args
assert (1,2,3) == demo_variable_args("hello", 1, 2, 3)
assert "one.two.three"== my_merge(".", "one", "two", "three")
assert "one,two,three"== my_merge(",", "one", "two", "three")
# **kwargs can be used to pass additional named arguments in addition to the specified parameters
# kwargs is a var-keyword parameter that can be bound to a variable number of named keyword arguments that don't map to any other
# function parameter at runtime.
def demo_with_keyword_args(name, *args, **kwargs):
return kwargs # return kwargs to inspect it.
def test_function_with_keyword_args():
result = demo_with_keyword_args("jack", age=10, height=100)
assert "dict"== type(result).__name__
assert {"age":10,"height":100} == result
assert {"age":10,"height":100} == demo_with_keyword_args("jack", "address", age=10, height=100)
assert {"address":"address","age":10,"height":100} == demo_with_keyword_args("jack", address="address", age=10, height=100)
assert { } == demo_with_keyword_args(name="jack") # what do you observe here?
# this is function which accepts a variable number of positional arguments
# and variable number of keyword arguments. Note what comes into args and kwargs based on how you call.
def demo_var_kw(*args, **kwargs):
return args, kwargs
def demo_unpacking(name, *args, **kwargs):
return demo_var_kw(*args, **kwargs)
def demo_no_unpacking(name, *args, **kwargs):
return demo_var_kw(args, kwargs)
# Unpacking sequences into arguments is useful when you are calling other
# functions which take variable/kw args. Also read
# Walk through the visualizer, first read the code a couple of times and then step through in full screen mode :)
# https://goo.gl/KqTnJv
def test_function_unpacking():
result = demo_unpacking("jack", 1, 2, k1="v1", k2="v2")
assert ((1,2,),{"k1":"v1","k2":"v2"}) == result
result = demo_no_unpacking("jack", 1, 2, k1="v1", k2="v2")
assert (((1,2),{"k1":"v1","k2":"v2"}),{}) == result
result = demo_var_kw(1,2, k1="v1")
assert ((1,2),{"k1":"v1"}) == result
result = demo_var_kw((1,2), {"k1" :"v1"})
assert (((1,2),{"k1":"v1"}),{}) == result
result = demo_var_kw(*(1,2), **{"k1": "v1"})
assert ((1,2),{"k1":"v1"}) == result
#you can unpack lists as well
result = demo_var_kw(*[1,2], **{"k1":"v1"})
assert ((1,2),{"k1":"v1"}) == result
# Apply what you learnt:
# This function shows how variable arguments can be useful to define certain kinds of functions
def simple_format(format, *args):
"""
Returns a formatted string by replacing all instances of %X with Xth argument in args (0...len(args))
e.g. "%0 says hello", "ted" should return "ted says hello"
"%1 says hello to %0", ("ted", "jack") should return jack says hello to ted etc.
If %X is used and X > len(args) it is returned as is.
"""
a=format
for i in range(0,len(args)):
a=a.replace("%"+str(i),"{"+str(i)+"}")
return a.format(*args)
pass
def test_simple_format():
assert "hello hari" == simple_format("hello %0", "hari")
assert "hari says hari" == simple_format("%0 says %0", "hari")
assert "hari calls ashok" == simple_format("%1 calls %0", "ashok", "hari")
assert "hari calls ashok and %2" == simple_format("%1 calls %0 and %2", "ashok", "hari")
note2 = '''
Go through this link: https://docs.python.org/3/tutorial/controlflow.html#defining-functions
All of that should make sense now and there should be no surprises.
'''
three_things_i_learnt = """
-about *args
-about *kwargs
-about unpacking functions
"""
| __author__ = 'Kalyan'
from placeholders import *
notes = """
This lesson explores some advanced features of functions. This will help you make sense
of a lot of standard library functions when you use them. In particular, the following are covered
- positional and keyword arguments
- defining and passing variable number of positional and keyword arguments
- unpacking arguments
Be sure to understand the difference between a parameter and an argument. Keep these links open and refer to them
as you do the lesson!
https://docs.python.org/3/glossary.html#term-parameter
https://docs.python.org/3/glossary.html#term-argument
https://docs.python.org/3/reference/expressions.html#calls
"""
def demo(first, second, third=3, fourth=4):
'''
This is a test function that has some default and non-default parameters. Use the test below to study how the
arguments are bound at runtime and the various ways in which the function can be invoked.
'''
return [first, second, third, fourth]
# parameters with defaults allows you to write one api without having a large number
# of overloads for various scenarios.
# define the above function in console and play with each of the invocations and see the error messages carefully.
# NOTE: add extra arguments where necessary. First note what error you get and then fix the function invocations.
def test_function_call_with_keyword_arguments():
assert [10,10,3,4] == demo(10,10)
assert [10,20,3,4] == demo(10, 20)
assert [10,20,30,4] == demo(10, 20, 30)
assert [5,20,3,4] == demo(5,second=20)
assert [4,20,30,4] == demo(4,second=20, third=30)
assert [10,2,30,4] == demo(first=10,second=2,third=30)
assert [10,2,30,4] == demo(10, 2,third=30)
assert [8,20,10,4] == demo(8,second=20,third=10) # is this allowed? correct and uncomment
# The *args syntax is used to specify that you can pass variable number of arguments to a function.
# args is a var-positional parameter -> variable number of positional arguments can be bound to it at runtime.
def demo_variable_args(first, *args):
return args #just return args so we can inspect its nature in the test.
# assumes args are all strings
def my_merge(separator, *args):
return separator.join(args)
def test_function_with_variable_args():
result = demo_variable_args("hello", "world")
assert "tuple" == type(result).__name__ #this is the type of args
assert ("world",) == result #this is the value of args
assert (1,2,3) == demo_variable_args("hello", 1, 2, 3)
assert "one.two.three"== my_merge(".", "one", "two", "three")
assert "one,two,three"== my_merge(",", "one", "two", "three")
# **kwargs can be used to pass additional named arguments in addition to the specified parameters
# kwargs is a var-keyword parameter that can be bound to a variable number of named keyword arguments that don't map to any other
# function parameter at runtime.
def demo_with_keyword_args(name, *args, **kwargs):
return kwargs # return kwargs to inspect it.
def test_function_with_keyword_args():
result = demo_with_keyword_args("jack", age=10, height=100)
assert "dict"== type(result).__name__
assert {"age":10,"height":100} == result
assert {"age":10,"height":100} == demo_with_keyword_args("jack", "address", age=10, height=100)
assert {"address":"address","age":10,"height":100} == demo_with_keyword_args("jack", address="address", age=10, height=100)
assert { } == demo_with_keyword_args(name="jack") # what do you observe here?
# this is function which accepts a variable number of positional arguments
# and variable number of keyword arguments. Note what comes into args and kwargs based on how you call.
def demo_var_kw(*args, **kwargs):
return args, kwargs
def demo_unpacking(name, *args, **kwargs):
return demo_var_kw(*args, **kwargs)
def demo_no_unpacking(name, *args, **kwargs):
return demo_var_kw(args, kwargs)
# Unpacking sequences into arguments is useful when you are calling other
# functions which take variable/kw args. Also read
# Walk through the visualizer, first read the code a couple of times and then step through in full screen mode :)
# https://goo.gl/KqTnJv
def test_function_unpacking():
result = demo_unpacking("jack", 1, 2, k1="v1", k2="v2")
assert ((1,2,),{"k1":"v1","k2":"v2"}) == result
result = demo_no_unpacking("jack", 1, 2, k1="v1", k2="v2")
assert (((1,2),{"k1":"v1","k2":"v2"}),{}) == result
result = demo_var_kw(1,2, k1="v1")
assert ((1,2),{"k1":"v1"}) == result
result = demo_var_kw((1,2), {"k1" :"v1"})
assert (((1,2),{"k1":"v1"}),{}) == result
result = demo_var_kw(*(1,2), **{"k1": "v1"})
assert ((1,2),{"k1":"v1"}) == result
#you can unpack lists as well
result = demo_var_kw(*[1,2], **{"k1":"v1"})
assert ((1,2),{"k1":"v1"}) == result
# Apply what you learnt:
# This function shows how variable arguments can be useful to define certain kinds of functions
def simple_format(format, *args):
"""
Returns a formatted string by replacing all instances of %X with Xth argument in args (0...len(args))
e.g. "%0 says hello", "ted" should return "ted says hello"
"%1 says hello to %0", ("ted", "jack") should return jack says hello to ted etc.
If %X is used and X > len(args) it is returned as is.
"""
a=format
for i in range(0,len(args)):
a=a.replace("%"+str(i),"{"+str(i)+"}")
return a.format(*args)
pass
def test_simple_format():
assert "hello hari" == simple_format("hello %0", "hari")
assert "hari says hari" == simple_format("%0 says %0", "hari")
assert "hari calls ashok" == simple_format("%1 calls %0", "ashok", "hari")
assert "hari calls ashok and %2" == simple_format("%1 calls %0 and %2", "ashok", "hari")
note2 = '''
Go through this link: https://docs.python.org/3/tutorial/controlflow.html#defining-functions
All of that should make sense now and there should be no surprises.
'''
three_things_i_learnt = """
-about *args
-about *kwargs
-about unpacking functions
"""
| en | 0.793087 | This lesson explores some advanced features of functions. This will help you make sense
of a lot of standard library functions when you use them. In particular, the following are covered
- positional and keyword arguments
- defining and passing variable number of positional and keyword arguments
- unpacking arguments
Be sure to understand the difference between a parameter and an argument. Keep these links open and refer to them
as you do the lesson!
https://docs.python.org/3/glossary.html#term-parameter
https://docs.python.org/3/glossary.html#term-argument
https://docs.python.org/3/reference/expressions.html#calls This is a test function that has some default and non-default parameters. Use the test below to study how the
arguments are bound at runtime and the various ways in which the function can be invoked. # parameters with defaults allows you to write one api without having a large number # of overloads for various scenarios. # define the above function in console and play with each of the invocations and see the error messages carefully. # NOTE: add extra arguments where necessary. First note what error you get and then fix the function invocations. # is this allowed? correct and uncomment # The *args syntax is used to specify that you can pass variable number of arguments to a function. # args is a var-positional parameter -> variable number of positional arguments can be bound to it at runtime. #just return args so we can inspect its nature in the test. # assumes args are all strings #this is the type of args #this is the value of args # **kwargs can be used to pass additional named arguments in addition to the specified parameters # kwargs is a var-keyword parameter that can be bound to a variable number of named keyword arguments that don't map to any other # function parameter at runtime. # return kwargs to inspect it. # what do you observe here? # this is function which accepts a variable number of positional arguments # and variable number of keyword arguments. Note what comes into args and kwargs based on how you call. # Unpacking sequences into arguments is useful when you are calling other # functions which take variable/kw args. Also read # Walk through the visualizer, first read the code a couple of times and then step through in full screen mode :) # https://goo.gl/KqTnJv #you can unpack lists as well # Apply what you learnt: # This function shows how variable arguments can be useful to define certain kinds of functions Returns a formatted string by replacing all instances of %X with Xth argument in args (0...len(args))
e.g. "%0 says hello", "ted" should return "ted says hello"
"%1 says hello to %0", ("ted", "jack") should return jack says hello to ted etc.
If %X is used and X > len(args) it is returned as is. Go through this link: https://docs.python.org/3/tutorial/controlflow.html#defining-functions
All of that should make sense now and there should be no surprises. -about *args
-about *kwargs
-about unpacking functions | 4.515362 | 5 |
Graphical-Interface-Tkinter/exemple6.py | benjazor/high-school | 0 | 6614492 | from tkinter import *
import tkinter.messagebox
import tkinter.filedialog
def Ouvrir():
Canevas.delete(ALL) # on efface la zone graphique
filename = tkinter.filedialog.askopenfilename(title="Ouvrir une image",filetypes=[('gif files','.gif'),('all files','.*')])
photo = PhotoImage(file=filename)
gifdict[filename] = photo # référence
Canevas.create_image(0,0,anchor=NW,image=photo)
Canevas.config(height=photo.height(),width=photo.width())
Mafenetre.title("Image "+str(photo.width())+" x "+str(photo.height()))
def Fermer():
Canevas.delete(ALL)
Mafenetre.title("Image")
def Apropos():
tkinter.messagebox.showinfo("A propos","Exemple de messagebox")
def Clic(event):
X = event.x #X = le x de la souris lors de l'evenement
Y = event.y #Y = le y de la souris lors de l'evenement
Balle = Canevas.create_oval(X-10,Y-10,X+10,Y+10,width=1,fill='black')#Crée un oval noir d'une taille 20x20 centré sur la souris
Listeballe.append(Balle)#Enregistre la balle dans une liste
def Annuler():
n=len(Listeballe)#définir n, tel que n soit égal à la longeur de la liste Listeball
if n>0:
Canevas.delete(Listeballe[n-1]) #Supprime le dernier objet de la Listeballe
del(Listeballe[n-1]) #Supprime la dernière valeur de la Listeballe
# Utilisation d'un dictionnaire pour conserver la référence de l'image afin qu'elle existe en dehors de la fonction ouvrir
gifdict={}
# Utilisation d'une liste pour conserver les références des items de balle
Listeballe=[]
# Création de la fenetre principale
Mafenetre = Tk()
Mafenetre.title("Image")
# Création d'un widget Menu associé à la fenetre principale
menubar = Menu(Mafenetre)
menufichier = Menu(menubar,tearoff=0)
menufichier.add_command(label="Ouvrir une image",command=Ouvrir)
menufichier.add_command(label="Fermer l'image",command=Fermer)
menufichier.add_separator()
menufichier.add_command(label="Quitter",command=Mafenetre.destroy)
menubar.add_cascade(label="Fichier", menu=menufichier)
menuaide = Menu(menubar,tearoff=0)
menuaide.add_command(label="A propos",command=Apropos)
menubar.add_cascade(label="Aide", menu=menuaide)
# Affichage du menu
Mafenetre.config(menu=menubar)
# Création d'un widget Canvas dans la fenetre principale
Canevas = Canvas(Mafenetre)
Canevas.pack(padx=5,pady=5)
# La méthode bind() permet de lier un événement avec une fonction :
# un clic gauche sur la zone graphique provoquera l'appel de la fonction utilisateur Clic()
Canevas.bind('<Button-1>', Clic)
# Création d'un widget Button (bouton Annuler)
BoutonAnnuler = Button(Mafenetre, text ='Annuler clic', command = Annuler)
BoutonAnnuler.pack(side = LEFT, padx = 5, pady = 5)
# On lance le gestionnaire d'évènement
Mafenetre.mainloop()
| from tkinter import *
import tkinter.messagebox
import tkinter.filedialog
def Ouvrir():
Canevas.delete(ALL) # on efface la zone graphique
filename = tkinter.filedialog.askopenfilename(title="Ouvrir une image",filetypes=[('gif files','.gif'),('all files','.*')])
photo = PhotoImage(file=filename)
gifdict[filename] = photo # référence
Canevas.create_image(0,0,anchor=NW,image=photo)
Canevas.config(height=photo.height(),width=photo.width())
Mafenetre.title("Image "+str(photo.width())+" x "+str(photo.height()))
def Fermer():
Canevas.delete(ALL)
Mafenetre.title("Image")
def Apropos():
tkinter.messagebox.showinfo("A propos","Exemple de messagebox")
def Clic(event):
X = event.x #X = le x de la souris lors de l'evenement
Y = event.y #Y = le y de la souris lors de l'evenement
Balle = Canevas.create_oval(X-10,Y-10,X+10,Y+10,width=1,fill='black')#Crée un oval noir d'une taille 20x20 centré sur la souris
Listeballe.append(Balle)#Enregistre la balle dans une liste
def Annuler():
n=len(Listeballe)#définir n, tel que n soit égal à la longeur de la liste Listeball
if n>0:
Canevas.delete(Listeballe[n-1]) #Supprime le dernier objet de la Listeballe
del(Listeballe[n-1]) #Supprime la dernière valeur de la Listeballe
# Utilisation d'un dictionnaire pour conserver la référence de l'image afin qu'elle existe en dehors de la fonction ouvrir
gifdict={}
# Utilisation d'une liste pour conserver les références des items de balle
Listeballe=[]
# Création de la fenetre principale
Mafenetre = Tk()
Mafenetre.title("Image")
# Création d'un widget Menu associé à la fenetre principale
menubar = Menu(Mafenetre)
menufichier = Menu(menubar,tearoff=0)
menufichier.add_command(label="Ouvrir une image",command=Ouvrir)
menufichier.add_command(label="Fermer l'image",command=Fermer)
menufichier.add_separator()
menufichier.add_command(label="Quitter",command=Mafenetre.destroy)
menubar.add_cascade(label="Fichier", menu=menufichier)
menuaide = Menu(menubar,tearoff=0)
menuaide.add_command(label="A propos",command=Apropos)
menubar.add_cascade(label="Aide", menu=menuaide)
# Affichage du menu
Mafenetre.config(menu=menubar)
# Création d'un widget Canvas dans la fenetre principale
Canevas = Canvas(Mafenetre)
Canevas.pack(padx=5,pady=5)
# La méthode bind() permet de lier un événement avec une fonction :
# un clic gauche sur la zone graphique provoquera l'appel de la fonction utilisateur Clic()
Canevas.bind('<Button-1>', Clic)
# Création d'un widget Button (bouton Annuler)
BoutonAnnuler = Button(Mafenetre, text ='Annuler clic', command = Annuler)
BoutonAnnuler.pack(side = LEFT, padx = 5, pady = 5)
# On lance le gestionnaire d'évènement
Mafenetre.mainloop()
| fr | 0.986617 | # on efface la zone graphique # référence #X = le x de la souris lors de l'evenement #Y = le y de la souris lors de l'evenement #Crée un oval noir d'une taille 20x20 centré sur la souris #Enregistre la balle dans une liste #définir n, tel que n soit égal à la longeur de la liste Listeball #Supprime le dernier objet de la Listeballe #Supprime la dernière valeur de la Listeballe # Utilisation d'un dictionnaire pour conserver la référence de l'image afin qu'elle existe en dehors de la fonction ouvrir # Utilisation d'une liste pour conserver les références des items de balle # Création de la fenetre principale # Création d'un widget Menu associé à la fenetre principale # Affichage du menu # Création d'un widget Canvas dans la fenetre principale # La méthode bind() permet de lier un événement avec une fonction : # un clic gauche sur la zone graphique provoquera l'appel de la fonction utilisateur Clic() # Création d'un widget Button (bouton Annuler) # On lance le gestionnaire d'évènement | 3.164559 | 3 |
marsi/cobra/flux_analysis/manipulation.py | biosustain/marsi | 0 | 6614493 | # Copyright 2017 <NAME> and The Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
from cameo.flux_analysis.simulation import FluxDistributionResult
from cobra.core.dictlist import DictList
from cobra.core.model import Model
from cobra.core.reaction import Reaction
from pandas import Series
__all__ = ["compete_metabolite", "inhibit_metabolite", "knockout_metabolite", "apply_anti_metabolite"]
logger = logging.getLogger(__name__)
def compete_metabolite(model, metabolite, reference_dist, fraction=0.5, allow_accumulation=True, constant=1e4):
"""
Increases the usage of a metabolite based on a reference flux distributions.
Parameters
----------
model : Model
A constraint-based model.
metabolite : cobra.Metabolite
A metabolite.
reference_dist : dict or FluxDistributionResult
The result of a FBA like simulation. Alternative can be dictionaries of reaction.id -> flux.
fraction : float
How much does it requires the reactions to go up.
allow_accumulation : bool
Allow to accumulate the metabolite (add a exchange reaction).
constant : float
A large number (like 10000).
Returns
-------
cobra.core.Reaction
If allow accumulation returns the exchange reaction associated with the metabolite.
"""
assert isinstance(model, Model)
reactions = [r for r in metabolite.reactions if len(set(m.compartment for m in r.metabolites)) == 1]
if isinstance(reference_dist, FluxDistributionResult):
reference_dist = reference_dist.fluxes.to_dict()
elif isinstance(reference_dist, Series):
reference_dist = reference_dist.to_dict()
if not isinstance(reference_dist, dict):
raise ValueError("'reference_dist' must be a dict or FluxDistributionResult")
exchanges = DictList(model.exchanges)
exchange = None
if allow_accumulation:
species_id = metabolite.id[:-2]
if "EX_%s_e" % species_id in exchanges:
exchange = model.reactions.get_by_id("EX_%s_e" % species_id)
elif "DM_%s_e" % species_id in exchanges:
exchange = model.reactions.get_by_id("DM_%s_e" % species_id)
else:
reaction_id = "COMPETE_%s" % metabolite.id
if reaction_id in model.reactions:
exchange = model.reactions.get_by_id(reaction_id)
else:
exchange = model.add_boundary(metabolite, type="compete sink", reaction_id=reaction_id, lb=0)
aux_variables = {}
ind_variables = {}
turnover = sum(abs(r.metabolites[metabolite] * reference_dist.get(r.id, 0)) for r in metabolite.reactions)
for reaction in reactions:
coefficient = reaction.metabolites[metabolite]
# Optimization to reduce y variables and problem complexity:
# Irreversible reactions that only produce the metabolite can be ignored because they will not contribute
# to the consumption turnover. Reactions that only consume the metabolite can be added directly into the
# sum constraint. This allows for a less complex problem with less variables.
if not reaction.reversibility:
if coefficient < 0: # skip reactions that can only produce the metabolite
continue
else: # keep the v*coefficient value for reactions that can only consume the metabolite
aux_variables[reaction.id] = reaction.flux_expression * coefficient
continue
to_add = []
ind_var_id = "y_%s" % reaction.id
aux_var_id = "u_%s" % reaction.id
try:
ind_var = model.solver.variables[ind_var_id]
aux_var = model.solver.variables[aux_var_id]
except KeyError:
ind_var = model.solver.interface.Variable(ind_var_id, type='binary')
aux_var = model.solver.interface.Variable(aux_var_id, lb=0)
to_add += [ind_var, aux_var]
aux_variables[reaction.id] = aux_var
ind_variables[reaction.id] = ind_var
upper_indicator_constraint_name = "ind_%s_u" % reaction.id
lower_indicator_constraint_name = "ind_%s_l" % reaction.id
auxiliary_constraint_a_name = "aux_%s_a" % reaction.id
auxiliary_constraint_b_name = "aux_%s_b" % reaction.id
auxiliary_constraint_c_name = "aux_%s_c" % reaction.id
auxiliary_constraint_d_name = "aux_%s_d" % reaction.id
try:
model.solver.constraints[upper_indicator_constraint_name]
except KeyError:
# constraint y to be 0 if Sv >= 0 (production)
# -M 0 M
# Sv <-------------|---------------->
# y=0 | y=1
# Sv - My <= 0
# if y = 1 then Sv <= M
# if y = 0 then Sv <= 0
upper_indicator_expression = coefficient * reaction.flux_expression - ind_var * constant
ind_constraint_u = model.solver.interface.Constraint(upper_indicator_expression,
name=upper_indicator_constraint_name,
ub=0)
# Sv + M(1-y) >= 0
# if y = 1 then Sv >= 0
# if y = 0 then Sv >= -M
lower_indicator_expression = coefficient * reaction.flux_expression + constant - ind_var * constant
ind_constraint_l = model.solver.interface.Constraint(lower_indicator_expression,
name=lower_indicator_constraint_name,
lb=0)
# a) -My + u <= 0
# b) My + u >= 0
# if y = 0, u = 0
# if y = 1, -M <= u <= M
aux_indicator_expression_a = -constant * ind_var + aux_var
aux_constraint_a = model.solver.interface.Constraint(aux_indicator_expression_a,
name=auxiliary_constraint_a_name,
ub=0)
aux_indicator_expression_b = constant * ind_var + aux_var
aux_constraint_b = model.solver.interface.Constraint(aux_indicator_expression_b,
name=auxiliary_constraint_b_name,
lb=0)
#
# # c) -M(1-y) + u - viSi <= 0
# # d) M(1-y) + u - viSi >= 0
#
# # if y = 1 then 0 <= u - viSi <= 0
# # if y = 0 then -M <= u - viSi <= M
aux_indicator_expression_c = -constant * (1 - ind_var) + aux_var - reaction.flux_expression * coefficient
aux_constraint_c = model.solver.interface.Constraint(aux_indicator_expression_c,
name=auxiliary_constraint_c_name,
ub=0)
aux_indicator_expression_d = constant * (1 - ind_var) + aux_var - reaction.flux_expression * coefficient
aux_constraint_d = model.solver.interface.Constraint(aux_indicator_expression_d,
name=auxiliary_constraint_d_name,
lb=0)
to_add += [ind_constraint_l, ind_constraint_u, aux_constraint_a,
aux_constraint_b, aux_constraint_c, aux_constraint_d]
model.add_cons_vars(to_add)
min_production_turnover = (1 + fraction) * (turnover / 2)
# sum(u) >= (1 + fraction) * uWT
constrain_name = "take_less_%s" % metabolite.id
if constrain_name not in model.constraints:
increase_turnover_constraint = model.solver.interface.Constraint(sum(aux_variables.values()),
name="take_less_%s" % metabolite.id,
lb=min_production_turnover)
model.add_cons_vars(increase_turnover_constraint)
else:
increase_turnover_constraint = model.constraints[constrain_name]
increase_turnover_constraint.lb = min_production_turnover
return exchange
def inhibit_metabolite(model, metabolite, reference_dist, fraction=0.5, allow_accumulation=True, constant=1e4):
"""
Inhibits the usage of a metabolite based on a reference flux distributions.
Parameters
----------
model : Model
A constraint-based model.
metabolite : cobra.Metabolite
A metabolite.
reference_dist : dict, FluxDistributionResult
The result of a FBA like simulation. Alternative can be dictionaries of reaction.id -> flux.
fraction : float
How much does it inhibits the reactions. A float applies the same amount of inhibition. A dictionary must
contain an inhibition percentage to all reactions associated with the metabolite.
allow_accumulation : bool
Allow to accumulate the metabolite (add a exchange reaction).
constant : float
A large number (like 10000).
Returns
-------
cameo.core.Reaction, None
If allow accumulation returns the exchange reaction associated with the metabolite.
"""
reactions = [r for r in metabolite.reactions if len(set(m.compartment for m in r.metabolites)) == 1]
if isinstance(reference_dist, FluxDistributionResult):
reference_dist = reference_dist.fluxes.to_dict()
elif isinstance(reference_dist, Series):
reference_dist = reference_dist.to_dict()
if not isinstance(reference_dist, dict):
raise ValueError("'reference_dist' must be a dict or FluxDistributionResult")
exchanges = DictList(model.exchanges)
exchange = None
if allow_accumulation:
species_id = metabolite.id[:-2]
if "EX_%s_e" % species_id in exchanges:
exchange = model.reactions.get_by_id("EX_%s_e" % species_id)
elif "DM_%s_e" % species_id in exchanges:
exchange = model.reactions.get_by_id("DM_%s_e" % species_id)
else:
reaction_id = "INHIBIT_%s" % metabolite.id
if reaction_id in model.reactions:
exchange = model.reactions.get_by_id(reaction_id)
else:
exchange = model.add_boundary(metabolite, type="inhibit sink", reaction_id=reaction_id, lb=0)
aux_variables = {}
ind_variables = {}
turnover = sum(abs(r.metabolites[metabolite] * reference_dist.get(r.id, 0)) for r in metabolite.reactions)
for reaction in reactions:
coefficient = reaction.metabolites[metabolite]
# Optimization to reduce y variables and problem complexity:
# Irreversible reactions that only produce the metabolite can be ignored because they will not contribute
# to the consumption turnover. Reactions that only consume the metabolite can be added directly into the
# sum constraint. This allows for a less complex problem with less variables.
if not reaction.reversibility:
if coefficient > 0: # skip reactions that can only produce the metabolite
continue
else: # keep the v*coefficient value for reactions that can only consume the metabolite
aux_variables[reaction.id] = - reaction.flux_expression * coefficient
continue
to_add = []
ind_var_id = "y_%s" % reaction.id
aux_var_id = "u_%s" % reaction.id
try:
ind_var = model.solver.variables[ind_var_id]
aux_var = model.solver.variables[aux_var_id]
except KeyError:
ind_var = model.solver.interface.Variable(ind_var_id, type='binary')
aux_var = model.solver.interface.Variable(aux_var_id, lb=0)
to_add += [ind_var, aux_var]
aux_variables[reaction.id] = aux_var
ind_variables[reaction.id] = ind_var
upper_indicator_constraint_name = "ind_%s_u" % reaction.id
lower_indicator_constraint_name = "ind_%s_l" % reaction.id
auxiliary_constraint_a_name = "aux_%s_a" % reaction.id
auxiliary_constraint_b_name = "aux_%s_b" % reaction.id
auxiliary_constraint_c_name = "aux_%s_c" % reaction.id
auxiliary_constraint_d_name = "aux_%s_d" % reaction.id
try:
model.solver.constraints[upper_indicator_constraint_name]
except KeyError:
# constraint y to be 0 if Sv >= 0 (production)
# -M 0 M
# Sv <-------------|---------------->
# y=0 | y=1
# -Sv - My <= 0
# if y = 1 then Sv <= M
# if y = 0 then Sv >= 0
upper_indicator_expression = - coefficient * reaction.flux_expression - ind_var * constant
ind_constraint_u = model.solver.interface.Constraint(upper_indicator_expression,
name=upper_indicator_constraint_name,
ub=0)
# -Sv + M(1-y) >= 0
# if y = 1 then Sv <= 0
# if y = 0 then Sv <= M
lower_indicator_expression = - coefficient * reaction.flux_expression + constant - ind_var * constant
ind_constraint_l = model.solver.interface.Constraint(lower_indicator_expression,
name=lower_indicator_constraint_name,
lb=0)
# a) -My + u <= 0
# b) My + u >= 0
# if y = 0, u = 0
# if y = 1, -M <= u <= M
aux_indicator_expression_a = -constant * ind_var + aux_var
aux_constraint_a = model.solver.interface.Constraint(aux_indicator_expression_a,
name=auxiliary_constraint_a_name,
ub=0)
aux_indicator_expression_b = constant * ind_var + aux_var
aux_constraint_b = model.solver.interface.Constraint(aux_indicator_expression_b,
name=auxiliary_constraint_b_name,
lb=0)
#
# # c) -M(1-y) + u + viSi <= 0
# # d) M(1-y) + u + viSi >= 0
#
# # if y = 1 then 0 <= u + viSi <= 0
# # if y = 0 then -M <= u + viSi <= M
aux_indicator_expression_c = -constant * (1 - ind_var) + aux_var + reaction.flux_expression * coefficient
aux_constraint_c = model.solver.interface.Constraint(aux_indicator_expression_c,
name=auxiliary_constraint_c_name,
ub=0)
aux_indicator_expression_d = constant * (1 - ind_var) + aux_var + reaction.flux_expression * coefficient
aux_constraint_d = model.solver.interface.Constraint(aux_indicator_expression_d,
name=auxiliary_constraint_d_name,
lb=0)
to_add += [ind_constraint_l, ind_constraint_u, aux_constraint_a,
aux_constraint_b, aux_constraint_c, aux_constraint_d]
model.add_cons_vars(to_add)
max_production_turnover = (1 - fraction) * (turnover / 2)
# sum(u) <= (1-fraction) * uWT
constraint_name = "take_more_%s" % metabolite.id
if constraint_name not in model.constraints:
decrease_turnover_constraint = model.solver.interface.Constraint(sum(aux_variables.values()),
name=constraint_name,
ub=max_production_turnover)
model.add_cons_vars(decrease_turnover_constraint)
else:
decrease_turnover_constraint = model.constraints[constraint_name]
decrease_turnover_constraint.ub = max_production_turnover
return exchange
def knockout_metabolite(model, metabolite, ignore_transport=True, allow_accumulation=True):
"""
Inhibits the usage of a metabolite based on a reference flux distributions.
Parameters
----------
model : Model
A constraint-based model.
metabolite: cobra.Metabolite
A metabolite.
ignore_transport : bool
Choose to ignore transport reactions.
allow_accumulation : bool
Allow to accumulate the metabolite (add a exchange reaction).
Returns
-------
cameo.core.Reaction, None
If allow accumulation returns the exchange reaction associated with the metabolite.
"""
assert isinstance(model, Model)
reactions = metabolite.reactions
if ignore_transport:
reactions = [r for r in reactions if not len(set(m.compartment for m in r.metabolites)) > 1]
exchanges = model.exchanges
for reaction in reactions:
assert isinstance(reaction, Reaction)
if reaction in exchanges:
continue
if reaction.reversibility:
reaction.bounds = (0, 0)
elif reaction.metabolites[metabolite] < 0:
reaction.lower_bound = 0
exchange = None
if allow_accumulation:
species_id = metabolite.id[:-2]
if "EX_%s_e" % species_id in exchanges:
exchange = model.reactions.get_by_id("EX_%s_e" % species_id)
elif "DM_%s_e" % species_id in exchanges:
exchange = model.reactions.get_by_id("DM_%s_e" % species_id)
else:
reaction_id = "KO_%s" % metabolite.id
if reaction_id in model.reactions:
exchange = model.reactions.get_by_id(reaction_id)
else:
exchange = model.add_boundary(metabolite, type="ko sink", reaction_id=reaction_id, lb=0)
return exchange
def apply_anti_metabolite(model, metabolites, essential_metabolites, reference, inhibition_fraction=.0,
competition_fraction=.0, allow_accumulation=True):
"""
Apply a metabolite in the context of a model without knowing if it is activating or inhibiting.
Parameters
----------
model : cameo.core.SolverBasedModel
A constraint-based model.
metabolites : list
Metabolites of the same species.
essential_metabolites : list
A list of essential metabolites.
reference : dict, cameo.core.FluxDistributionResult
A flux distribution.
inhibition_fraction : float
How much a metabolite inhibits.
competition_fraction : float
How much a metabolite competes.
allow_accumulation : bool
Allow accumulation of the metabolite.
Returns
-------
set
Exchange reactions added for accumulation.
"""
exchanges = set()
if any(met in essential_metabolites for met in metabolites):
for metabolite in metabolites:
exchanges.add(compete_metabolite(model,
metabolite,
reference,
allow_accumulation=allow_accumulation,
fraction=competition_fraction))
else:
for metabolite in metabolites:
exchanges.add(inhibit_metabolite(model,
metabolite,
reference,
allow_accumulation=allow_accumulation,
fraction=inhibition_fraction))
return exchanges
| # Copyright 2017 <NAME> and The Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
from cameo.flux_analysis.simulation import FluxDistributionResult
from cobra.core.dictlist import DictList
from cobra.core.model import Model
from cobra.core.reaction import Reaction
from pandas import Series
__all__ = ["compete_metabolite", "inhibit_metabolite", "knockout_metabolite", "apply_anti_metabolite"]
logger = logging.getLogger(__name__)
def compete_metabolite(model, metabolite, reference_dist, fraction=0.5, allow_accumulation=True, constant=1e4):
"""
Increases the usage of a metabolite based on a reference flux distributions.
Parameters
----------
model : Model
A constraint-based model.
metabolite : cobra.Metabolite
A metabolite.
reference_dist : dict or FluxDistributionResult
The result of a FBA like simulation. Alternative can be dictionaries of reaction.id -> flux.
fraction : float
How much does it requires the reactions to go up.
allow_accumulation : bool
Allow to accumulate the metabolite (add a exchange reaction).
constant : float
A large number (like 10000).
Returns
-------
cobra.core.Reaction
If allow accumulation returns the exchange reaction associated with the metabolite.
"""
assert isinstance(model, Model)
reactions = [r for r in metabolite.reactions if len(set(m.compartment for m in r.metabolites)) == 1]
if isinstance(reference_dist, FluxDistributionResult):
reference_dist = reference_dist.fluxes.to_dict()
elif isinstance(reference_dist, Series):
reference_dist = reference_dist.to_dict()
if not isinstance(reference_dist, dict):
raise ValueError("'reference_dist' must be a dict or FluxDistributionResult")
exchanges = DictList(model.exchanges)
exchange = None
if allow_accumulation:
species_id = metabolite.id[:-2]
if "EX_%s_e" % species_id in exchanges:
exchange = model.reactions.get_by_id("EX_%s_e" % species_id)
elif "DM_%s_e" % species_id in exchanges:
exchange = model.reactions.get_by_id("DM_%s_e" % species_id)
else:
reaction_id = "COMPETE_%s" % metabolite.id
if reaction_id in model.reactions:
exchange = model.reactions.get_by_id(reaction_id)
else:
exchange = model.add_boundary(metabolite, type="compete sink", reaction_id=reaction_id, lb=0)
aux_variables = {}
ind_variables = {}
turnover = sum(abs(r.metabolites[metabolite] * reference_dist.get(r.id, 0)) for r in metabolite.reactions)
for reaction in reactions:
coefficient = reaction.metabolites[metabolite]
# Optimization to reduce y variables and problem complexity:
# Irreversible reactions that only produce the metabolite can be ignored because they will not contribute
# to the consumption turnover. Reactions that only consume the metabolite can be added directly into the
# sum constraint. This allows for a less complex problem with less variables.
if not reaction.reversibility:
if coefficient < 0: # skip reactions that can only produce the metabolite
continue
else: # keep the v*coefficient value for reactions that can only consume the metabolite
aux_variables[reaction.id] = reaction.flux_expression * coefficient
continue
to_add = []
ind_var_id = "y_%s" % reaction.id
aux_var_id = "u_%s" % reaction.id
try:
ind_var = model.solver.variables[ind_var_id]
aux_var = model.solver.variables[aux_var_id]
except KeyError:
ind_var = model.solver.interface.Variable(ind_var_id, type='binary')
aux_var = model.solver.interface.Variable(aux_var_id, lb=0)
to_add += [ind_var, aux_var]
aux_variables[reaction.id] = aux_var
ind_variables[reaction.id] = ind_var
upper_indicator_constraint_name = "ind_%s_u" % reaction.id
lower_indicator_constraint_name = "ind_%s_l" % reaction.id
auxiliary_constraint_a_name = "aux_%s_a" % reaction.id
auxiliary_constraint_b_name = "aux_%s_b" % reaction.id
auxiliary_constraint_c_name = "aux_%s_c" % reaction.id
auxiliary_constraint_d_name = "aux_%s_d" % reaction.id
try:
model.solver.constraints[upper_indicator_constraint_name]
except KeyError:
# constraint y to be 0 if Sv >= 0 (production)
# -M 0 M
# Sv <-------------|---------------->
# y=0 | y=1
# Sv - My <= 0
# if y = 1 then Sv <= M
# if y = 0 then Sv <= 0
upper_indicator_expression = coefficient * reaction.flux_expression - ind_var * constant
ind_constraint_u = model.solver.interface.Constraint(upper_indicator_expression,
name=upper_indicator_constraint_name,
ub=0)
# Sv + M(1-y) >= 0
# if y = 1 then Sv >= 0
# if y = 0 then Sv >= -M
lower_indicator_expression = coefficient * reaction.flux_expression + constant - ind_var * constant
ind_constraint_l = model.solver.interface.Constraint(lower_indicator_expression,
name=lower_indicator_constraint_name,
lb=0)
# a) -My + u <= 0
# b) My + u >= 0
# if y = 0, u = 0
# if y = 1, -M <= u <= M
aux_indicator_expression_a = -constant * ind_var + aux_var
aux_constraint_a = model.solver.interface.Constraint(aux_indicator_expression_a,
name=auxiliary_constraint_a_name,
ub=0)
aux_indicator_expression_b = constant * ind_var + aux_var
aux_constraint_b = model.solver.interface.Constraint(aux_indicator_expression_b,
name=auxiliary_constraint_b_name,
lb=0)
#
# # c) -M(1-y) + u - viSi <= 0
# # d) M(1-y) + u - viSi >= 0
#
# # if y = 1 then 0 <= u - viSi <= 0
# # if y = 0 then -M <= u - viSi <= M
aux_indicator_expression_c = -constant * (1 - ind_var) + aux_var - reaction.flux_expression * coefficient
aux_constraint_c = model.solver.interface.Constraint(aux_indicator_expression_c,
name=auxiliary_constraint_c_name,
ub=0)
aux_indicator_expression_d = constant * (1 - ind_var) + aux_var - reaction.flux_expression * coefficient
aux_constraint_d = model.solver.interface.Constraint(aux_indicator_expression_d,
name=auxiliary_constraint_d_name,
lb=0)
to_add += [ind_constraint_l, ind_constraint_u, aux_constraint_a,
aux_constraint_b, aux_constraint_c, aux_constraint_d]
model.add_cons_vars(to_add)
min_production_turnover = (1 + fraction) * (turnover / 2)
# sum(u) >= (1 + fraction) * uWT
constrain_name = "take_less_%s" % metabolite.id
if constrain_name not in model.constraints:
increase_turnover_constraint = model.solver.interface.Constraint(sum(aux_variables.values()),
name="take_less_%s" % metabolite.id,
lb=min_production_turnover)
model.add_cons_vars(increase_turnover_constraint)
else:
increase_turnover_constraint = model.constraints[constrain_name]
increase_turnover_constraint.lb = min_production_turnover
return exchange
def inhibit_metabolite(model, metabolite, reference_dist, fraction=0.5, allow_accumulation=True, constant=1e4):
"""
Inhibits the usage of a metabolite based on a reference flux distributions.
Parameters
----------
model : Model
A constraint-based model.
metabolite : cobra.Metabolite
A metabolite.
reference_dist : dict, FluxDistributionResult
The result of a FBA like simulation. Alternative can be dictionaries of reaction.id -> flux.
fraction : float
How much does it inhibits the reactions. A float applies the same amount of inhibition. A dictionary must
contain an inhibition percentage to all reactions associated with the metabolite.
allow_accumulation : bool
Allow to accumulate the metabolite (add a exchange reaction).
constant : float
A large number (like 10000).
Returns
-------
cameo.core.Reaction, None
If allow accumulation returns the exchange reaction associated with the metabolite.
"""
reactions = [r for r in metabolite.reactions if len(set(m.compartment for m in r.metabolites)) == 1]
if isinstance(reference_dist, FluxDistributionResult):
reference_dist = reference_dist.fluxes.to_dict()
elif isinstance(reference_dist, Series):
reference_dist = reference_dist.to_dict()
if not isinstance(reference_dist, dict):
raise ValueError("'reference_dist' must be a dict or FluxDistributionResult")
exchanges = DictList(model.exchanges)
exchange = None
if allow_accumulation:
species_id = metabolite.id[:-2]
if "EX_%s_e" % species_id in exchanges:
exchange = model.reactions.get_by_id("EX_%s_e" % species_id)
elif "DM_%s_e" % species_id in exchanges:
exchange = model.reactions.get_by_id("DM_%s_e" % species_id)
else:
reaction_id = "INHIBIT_%s" % metabolite.id
if reaction_id in model.reactions:
exchange = model.reactions.get_by_id(reaction_id)
else:
exchange = model.add_boundary(metabolite, type="inhibit sink", reaction_id=reaction_id, lb=0)
aux_variables = {}
ind_variables = {}
turnover = sum(abs(r.metabolites[metabolite] * reference_dist.get(r.id, 0)) for r in metabolite.reactions)
for reaction in reactions:
coefficient = reaction.metabolites[metabolite]
# Optimization to reduce y variables and problem complexity:
# Irreversible reactions that only produce the metabolite can be ignored because they will not contribute
# to the consumption turnover. Reactions that only consume the metabolite can be added directly into the
# sum constraint. This allows for a less complex problem with less variables.
if not reaction.reversibility:
if coefficient > 0: # skip reactions that can only produce the metabolite
continue
else: # keep the v*coefficient value for reactions that can only consume the metabolite
aux_variables[reaction.id] = - reaction.flux_expression * coefficient
continue
to_add = []
ind_var_id = "y_%s" % reaction.id
aux_var_id = "u_%s" % reaction.id
try:
ind_var = model.solver.variables[ind_var_id]
aux_var = model.solver.variables[aux_var_id]
except KeyError:
ind_var = model.solver.interface.Variable(ind_var_id, type='binary')
aux_var = model.solver.interface.Variable(aux_var_id, lb=0)
to_add += [ind_var, aux_var]
aux_variables[reaction.id] = aux_var
ind_variables[reaction.id] = ind_var
upper_indicator_constraint_name = "ind_%s_u" % reaction.id
lower_indicator_constraint_name = "ind_%s_l" % reaction.id
auxiliary_constraint_a_name = "aux_%s_a" % reaction.id
auxiliary_constraint_b_name = "aux_%s_b" % reaction.id
auxiliary_constraint_c_name = "aux_%s_c" % reaction.id
auxiliary_constraint_d_name = "aux_%s_d" % reaction.id
try:
model.solver.constraints[upper_indicator_constraint_name]
except KeyError:
# constraint y to be 0 if Sv >= 0 (production)
# -M 0 M
# Sv <-------------|---------------->
# y=0 | y=1
# -Sv - My <= 0
# if y = 1 then Sv <= M
# if y = 0 then Sv >= 0
upper_indicator_expression = - coefficient * reaction.flux_expression - ind_var * constant
ind_constraint_u = model.solver.interface.Constraint(upper_indicator_expression,
name=upper_indicator_constraint_name,
ub=0)
# -Sv + M(1-y) >= 0
# if y = 1 then Sv <= 0
# if y = 0 then Sv <= M
lower_indicator_expression = - coefficient * reaction.flux_expression + constant - ind_var * constant
ind_constraint_l = model.solver.interface.Constraint(lower_indicator_expression,
name=lower_indicator_constraint_name,
lb=0)
# a) -My + u <= 0
# b) My + u >= 0
# if y = 0, u = 0
# if y = 1, -M <= u <= M
aux_indicator_expression_a = -constant * ind_var + aux_var
aux_constraint_a = model.solver.interface.Constraint(aux_indicator_expression_a,
name=auxiliary_constraint_a_name,
ub=0)
aux_indicator_expression_b = constant * ind_var + aux_var
aux_constraint_b = model.solver.interface.Constraint(aux_indicator_expression_b,
name=auxiliary_constraint_b_name,
lb=0)
#
# # c) -M(1-y) + u + viSi <= 0
# # d) M(1-y) + u + viSi >= 0
#
# # if y = 1 then 0 <= u + viSi <= 0
# # if y = 0 then -M <= u + viSi <= M
aux_indicator_expression_c = -constant * (1 - ind_var) + aux_var + reaction.flux_expression * coefficient
aux_constraint_c = model.solver.interface.Constraint(aux_indicator_expression_c,
name=auxiliary_constraint_c_name,
ub=0)
aux_indicator_expression_d = constant * (1 - ind_var) + aux_var + reaction.flux_expression * coefficient
aux_constraint_d = model.solver.interface.Constraint(aux_indicator_expression_d,
name=auxiliary_constraint_d_name,
lb=0)
to_add += [ind_constraint_l, ind_constraint_u, aux_constraint_a,
aux_constraint_b, aux_constraint_c, aux_constraint_d]
model.add_cons_vars(to_add)
max_production_turnover = (1 - fraction) * (turnover / 2)
# sum(u) <= (1-fraction) * uWT
constraint_name = "take_more_%s" % metabolite.id
if constraint_name not in model.constraints:
decrease_turnover_constraint = model.solver.interface.Constraint(sum(aux_variables.values()),
name=constraint_name,
ub=max_production_turnover)
model.add_cons_vars(decrease_turnover_constraint)
else:
decrease_turnover_constraint = model.constraints[constraint_name]
decrease_turnover_constraint.ub = max_production_turnover
return exchange
def knockout_metabolite(model, metabolite, ignore_transport=True, allow_accumulation=True):
"""
Inhibits the usage of a metabolite based on a reference flux distributions.
Parameters
----------
model : Model
A constraint-based model.
metabolite: cobra.Metabolite
A metabolite.
ignore_transport : bool
Choose to ignore transport reactions.
allow_accumulation : bool
Allow to accumulate the metabolite (add a exchange reaction).
Returns
-------
cameo.core.Reaction, None
If allow accumulation returns the exchange reaction associated with the metabolite.
"""
assert isinstance(model, Model)
reactions = metabolite.reactions
if ignore_transport:
reactions = [r for r in reactions if not len(set(m.compartment for m in r.metabolites)) > 1]
exchanges = model.exchanges
for reaction in reactions:
assert isinstance(reaction, Reaction)
if reaction in exchanges:
continue
if reaction.reversibility:
reaction.bounds = (0, 0)
elif reaction.metabolites[metabolite] < 0:
reaction.lower_bound = 0
exchange = None
if allow_accumulation:
species_id = metabolite.id[:-2]
if "EX_%s_e" % species_id in exchanges:
exchange = model.reactions.get_by_id("EX_%s_e" % species_id)
elif "DM_%s_e" % species_id in exchanges:
exchange = model.reactions.get_by_id("DM_%s_e" % species_id)
else:
reaction_id = "KO_%s" % metabolite.id
if reaction_id in model.reactions:
exchange = model.reactions.get_by_id(reaction_id)
else:
exchange = model.add_boundary(metabolite, type="ko sink", reaction_id=reaction_id, lb=0)
return exchange
def apply_anti_metabolite(model, metabolites, essential_metabolites, reference, inhibition_fraction=.0,
competition_fraction=.0, allow_accumulation=True):
"""
Apply a metabolite in the context of a model without knowing if it is activating or inhibiting.
Parameters
----------
model : cameo.core.SolverBasedModel
A constraint-based model.
metabolites : list
Metabolites of the same species.
essential_metabolites : list
A list of essential metabolites.
reference : dict, cameo.core.FluxDistributionResult
A flux distribution.
inhibition_fraction : float
How much a metabolite inhibits.
competition_fraction : float
How much a metabolite competes.
allow_accumulation : bool
Allow accumulation of the metabolite.
Returns
-------
set
Exchange reactions added for accumulation.
"""
exchanges = set()
if any(met in essential_metabolites for met in metabolites):
for metabolite in metabolites:
exchanges.add(compete_metabolite(model,
metabolite,
reference,
allow_accumulation=allow_accumulation,
fraction=competition_fraction))
else:
for metabolite in metabolites:
exchanges.add(inhibit_metabolite(model,
metabolite,
reference,
allow_accumulation=allow_accumulation,
fraction=inhibition_fraction))
return exchanges
| en | 0.754344 | # Copyright 2017 <NAME> and The Novo Nordisk Foundation Center for Biosustainability, DTU. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Increases the usage of a metabolite based on a reference flux distributions. Parameters ---------- model : Model A constraint-based model. metabolite : cobra.Metabolite A metabolite. reference_dist : dict or FluxDistributionResult The result of a FBA like simulation. Alternative can be dictionaries of reaction.id -> flux. fraction : float How much does it requires the reactions to go up. allow_accumulation : bool Allow to accumulate the metabolite (add a exchange reaction). constant : float A large number (like 10000). Returns ------- cobra.core.Reaction If allow accumulation returns the exchange reaction associated with the metabolite. # Optimization to reduce y variables and problem complexity: # Irreversible reactions that only produce the metabolite can be ignored because they will not contribute # to the consumption turnover. Reactions that only consume the metabolite can be added directly into the # sum constraint. This allows for a less complex problem with less variables. # skip reactions that can only produce the metabolite # keep the v*coefficient value for reactions that can only consume the metabolite # constraint y to be 0 if Sv >= 0 (production) # -M 0 M # Sv <-------------|----------------> # y=0 | y=1 # Sv - My <= 0 # if y = 1 then Sv <= M # if y = 0 then Sv <= 0 # Sv + M(1-y) >= 0 # if y = 1 then Sv >= 0 # if y = 0 then Sv >= -M # a) -My + u <= 0 # b) My + u >= 0 # if y = 0, u = 0 # if y = 1, -M <= u <= M # # # c) -M(1-y) + u - viSi <= 0 # # d) M(1-y) + u - viSi >= 0 # # # if y = 1 then 0 <= u - viSi <= 0 # # if y = 0 then -M <= u - viSi <= M # sum(u) >= (1 + fraction) * uWT Inhibits the usage of a metabolite based on a reference flux distributions. Parameters ---------- model : Model A constraint-based model. metabolite : cobra.Metabolite A metabolite. reference_dist : dict, FluxDistributionResult The result of a FBA like simulation. Alternative can be dictionaries of reaction.id -> flux. fraction : float How much does it inhibits the reactions. A float applies the same amount of inhibition. A dictionary must contain an inhibition percentage to all reactions associated with the metabolite. allow_accumulation : bool Allow to accumulate the metabolite (add a exchange reaction). constant : float A large number (like 10000). Returns ------- cameo.core.Reaction, None If allow accumulation returns the exchange reaction associated with the metabolite. # Optimization to reduce y variables and problem complexity: # Irreversible reactions that only produce the metabolite can be ignored because they will not contribute # to the consumption turnover. Reactions that only consume the metabolite can be added directly into the # sum constraint. This allows for a less complex problem with less variables. # skip reactions that can only produce the metabolite # keep the v*coefficient value for reactions that can only consume the metabolite # constraint y to be 0 if Sv >= 0 (production) # -M 0 M # Sv <-------------|----------------> # y=0 | y=1 # -Sv - My <= 0 # if y = 1 then Sv <= M # if y = 0 then Sv >= 0 # -Sv + M(1-y) >= 0 # if y = 1 then Sv <= 0 # if y = 0 then Sv <= M # a) -My + u <= 0 # b) My + u >= 0 # if y = 0, u = 0 # if y = 1, -M <= u <= M # # # c) -M(1-y) + u + viSi <= 0 # # d) M(1-y) + u + viSi >= 0 # # # if y = 1 then 0 <= u + viSi <= 0 # # if y = 0 then -M <= u + viSi <= M # sum(u) <= (1-fraction) * uWT Inhibits the usage of a metabolite based on a reference flux distributions. Parameters ---------- model : Model A constraint-based model. metabolite: cobra.Metabolite A metabolite. ignore_transport : bool Choose to ignore transport reactions. allow_accumulation : bool Allow to accumulate the metabolite (add a exchange reaction). Returns ------- cameo.core.Reaction, None If allow accumulation returns the exchange reaction associated with the metabolite. Apply a metabolite in the context of a model without knowing if it is activating or inhibiting. Parameters ---------- model : cameo.core.SolverBasedModel A constraint-based model. metabolites : list Metabolites of the same species. essential_metabolites : list A list of essential metabolites. reference : dict, cameo.core.FluxDistributionResult A flux distribution. inhibition_fraction : float How much a metabolite inhibits. competition_fraction : float How much a metabolite competes. allow_accumulation : bool Allow accumulation of the metabolite. Returns ------- set Exchange reactions added for accumulation. | 2.155912 | 2 |
inkcut-master/inkcut/device/filters/overcut.py | ilnanny/Inkscape-addons | 3 | 6614494 | """
Copyright (c) 2018, <NAME>.
Distributed under the terms of the GPL v3 License.
The full license is in the file LICENSE, distributed with this software.
Created on Dec 14, 2018
@author: jrm
"""
from atom.api import Float, Enum, Instance
from inkcut.device.plugin import DeviceFilter, Model
from inkcut.core.utils import unit_conversions
from enaml.qt.QtGui import QPainterPath
class OvercutConfig(Model):
#: Overcut in user units
overcut = Float(strict=False).tag(config=True)
#: Units for display
overcut_units = Enum(*unit_conversions.keys()).tag(config=True)
def _default_overcut_units(self):
return 'mm'
class OvercutFilter(DeviceFilter):
#: Change config
config = Instance(OvercutConfig, ()).tag(config=True)
def apply_to_polypath(self, polypath):
""" Apply the filter to the polypath. It's much easier doing this
after conversion to polypaths.
Parameters
----------
polypath: List of QPolygon
List of polygons to process
Returns
-------
polypath: List of QPolygon
List of polygons with the filter applied
"""
d = self.config.overcut
if d <= 0:
return polypath
result = []
for poly in polypath:
if poly.isClosed():
self.apply_overcut(poly, d)
result.append(poly)
return result
def apply_overcut(self, poly, overcut):
""" Apply overcut to the given polygon by going "past" by overcut
distance.
"""
# Use a QPainterPath to track the distance in c++
path = QPainterPath()
for i, p in enumerate(poly):
if i == 0:
path.moveTo(p)
continue # Don't add a double point
path.lineTo(p)
# Check if that point is past the distance we need to go
if path.length() > overcut:
t = path.percentAtLength(overcut)
poly.append(path.pointAtPercent(t))
return # Done!
else:
# Add the point and go to the next
poly.append(p)
| """
Copyright (c) 2018, <NAME>.
Distributed under the terms of the GPL v3 License.
The full license is in the file LICENSE, distributed with this software.
Created on Dec 14, 2018
@author: jrm
"""
from atom.api import Float, Enum, Instance
from inkcut.device.plugin import DeviceFilter, Model
from inkcut.core.utils import unit_conversions
from enaml.qt.QtGui import QPainterPath
class OvercutConfig(Model):
#: Overcut in user units
overcut = Float(strict=False).tag(config=True)
#: Units for display
overcut_units = Enum(*unit_conversions.keys()).tag(config=True)
def _default_overcut_units(self):
return 'mm'
class OvercutFilter(DeviceFilter):
#: Change config
config = Instance(OvercutConfig, ()).tag(config=True)
def apply_to_polypath(self, polypath):
""" Apply the filter to the polypath. It's much easier doing this
after conversion to polypaths.
Parameters
----------
polypath: List of QPolygon
List of polygons to process
Returns
-------
polypath: List of QPolygon
List of polygons with the filter applied
"""
d = self.config.overcut
if d <= 0:
return polypath
result = []
for poly in polypath:
if poly.isClosed():
self.apply_overcut(poly, d)
result.append(poly)
return result
def apply_overcut(self, poly, overcut):
""" Apply overcut to the given polygon by going "past" by overcut
distance.
"""
# Use a QPainterPath to track the distance in c++
path = QPainterPath()
for i, p in enumerate(poly):
if i == 0:
path.moveTo(p)
continue # Don't add a double point
path.lineTo(p)
# Check if that point is past the distance we need to go
if path.length() > overcut:
t = path.percentAtLength(overcut)
poly.append(path.pointAtPercent(t))
return # Done!
else:
# Add the point and go to the next
poly.append(p)
| en | 0.867385 | Copyright (c) 2018, <NAME>. Distributed under the terms of the GPL v3 License. The full license is in the file LICENSE, distributed with this software. Created on Dec 14, 2018 @author: jrm #: Overcut in user units #: Units for display #: Change config Apply the filter to the polypath. It's much easier doing this after conversion to polypaths. Parameters ---------- polypath: List of QPolygon List of polygons to process Returns ------- polypath: List of QPolygon List of polygons with the filter applied Apply overcut to the given polygon by going "past" by overcut distance. # Use a QPainterPath to track the distance in c++ # Don't add a double point # Check if that point is past the distance we need to go # Done! # Add the point and go to the next | 2.50013 | 3 |
appyter/ext/subprocess.py | MaayanLab/jupyter-template | 0 | 6614495 | import os
import signal
import multiprocessing as mp
def interrupt(proc: mp.Process):
try:
os.kill(proc.pid, signal.SIGINT)
except ProcessLookupError:
pass
except KeyboardInterrupt:
raise
except:
proc.terminate()
| import os
import signal
import multiprocessing as mp
def interrupt(proc: mp.Process):
try:
os.kill(proc.pid, signal.SIGINT)
except ProcessLookupError:
pass
except KeyboardInterrupt:
raise
except:
proc.terminate()
| none | 1 | 2.681216 | 3 | |
Models/DeBruijn/Graph.py | SownBanana/DNA-Decoder-Simulator | 0 | 6614496 | <gh_stars>0
import networkx as nx
import matplotlib.pyplot as plt
from Models.DeBruijn.Node import Node
import numpy as np
import math
class Graph:
def __init__(self, data, data_length=200, kmer_size=4, head=None, tail=None, prune=0):
self.build_visited = {}
self.g_vis = nx.DiGraph()
self.g_vis_pruned = nx.DiGraph()
self.graph = {}
self.vertexes = {}
self.is_2_way = False
self.phase = 'build'
self.data_length = data_length
self.head = head
self.tail = tail
self.datas = data
self.kmer_size = kmer_size
# assert data_length % kmer_size == 0, f'data_length={data_length} is not divisible by kmer_size={kmer_size}'
self.v_num = data_length - kmer_size + 1
self.prune = prune
def set_phase(self, phase='traversal'):
self.phase = phase
def config(self, kmer_size=4, is_2_way=False, head=None, tail=None):
self.kmer_size = kmer_size
self.is_2_way = is_2_way
self.head = head
self.tail = tail
def get_vertexes(self):
return [kmer for kmer, _ in self.vertexes.items()]
def b_visit(self, src, dst, key):
i = src+dst
if not i in self.build_visited:
self.build_visited.update({i: key})
else:
self.build_visited[i] = key
def _update_edge_weight(self, src, dst):
self.vertexes[src].edge_out()
self.vertexes[dst].edge_in()
self.g_vis.add_edge(src, dst, weight=self.vertexes[dst].w_in)
if self.vertexes[dst].weight() > self.prune:
self.g_vis_pruned.add_edge(src, dst, weight=self.vertexes[dst].w_in)
def _add_new_vertex(self, src, dst, dst_node):
self.graph.update({src: dst_node})
if not src in self.vertexes:
src_node = Node(src)
self.vertexes.update({src: src_node})
else:
src_node = self.vertexes[src]
if not dst in self.vertexes:
dst_node = Node(dst)
self.vertexes.update({dst: dst_node})
else:
dst_node = self.vertexes[dst]
def add_edge(self, src, dst, key=0):
dst_node = Node(dst)
if src in self.graph:
if not self.graph[src].has(dst) \
or ((src+dst) in self.build_visited and self.build_visited[src+dst] == key):
dst_node.next = self.graph[src]
self.graph[src] = dst_node
if not dst in self.vertexes:
self.vertexes.update({dst: dst_node})
else:
self._add_new_vertex(src, dst, dst_node)
self._update_edge_weight(src, dst)
self.b_visit(src, dst, key)
def build(self):
key = 1
for data in self.datas:
kmers = [data[i:i+self.kmer_size]
for i in range(0, len(data) - self.kmer_size + 1)]
for i in range(0, len(kmers) - 1):
self.add_edge(kmers[i], kmers[i+1], key=key)
key += 1
self.build_visited = {}
def get_next_vertex(self, v):
if type(v) is str:
if v in self.graph:
return self.graph[v]
return None
if v.vertex in self.graph:
return self.graph[v.vertex]
return None
def get_vertex_with_weight(self, v):
if type(v) is str:
if v in self.vertexes:
return self.vertexes[v]
return None
if v.vertex in self.vertexes:
return self.vertexes[v.vertex]
return None
def __repr__(self):
s = ""
for kmer, node in self.vertexes.items():
if kmer in self.graph:
node = self.graph[kmer]
else:
node = Node(None)
s += "Vertex " + str(kmer) + " - " + \
str(self.vertexes[kmer].weight(
)) + f"({self.vertexes[kmer].w_in}, {self.vertexes[kmer].w_out})" + ": "
s += str(node)
s += " \n"
return s
def draw_de_bruijn_graph(self, weight_on=False, thickness=True, minimize_edge=False, font_color='k', node_size=800, weight_scale=1, font_size=6, pruned=False,figsize=(15,15)):
g = self.g_vis
if pruned:
g = self.g_vis_pruned
weights = None
if thickness:
edges = g.edges()
weights = [g[u][v]['weight'] for u,v in edges]
weights = np.array(weights)
if minimize_edge:
weights = weights / np.average(weights)
weights = weights*weight_scale
plt.figure(figsize=figsize)
#555555
#9ED0FD - light blue
nx.draw_networkx(
g, pos=nx.kamada_kawai_layout(g),
node_shape='o', node_size=node_size, font_size=font_size,
edge_color='#555555', width=weights, font_color=font_color
)
if weight_on:
nx.draw_networkx_edge_labels(
g, pos=nx.kamada_kawai_layout(g),
edge_labels=nx.get_edge_attributes(g, 'weight'),
font_size=font_size+2, label_pos=0.5, rotate=False,
)
plt.axis('off')
plt.show() | import networkx as nx
import matplotlib.pyplot as plt
from Models.DeBruijn.Node import Node
import numpy as np
import math
class Graph:
def __init__(self, data, data_length=200, kmer_size=4, head=None, tail=None, prune=0):
self.build_visited = {}
self.g_vis = nx.DiGraph()
self.g_vis_pruned = nx.DiGraph()
self.graph = {}
self.vertexes = {}
self.is_2_way = False
self.phase = 'build'
self.data_length = data_length
self.head = head
self.tail = tail
self.datas = data
self.kmer_size = kmer_size
# assert data_length % kmer_size == 0, f'data_length={data_length} is not divisible by kmer_size={kmer_size}'
self.v_num = data_length - kmer_size + 1
self.prune = prune
def set_phase(self, phase='traversal'):
self.phase = phase
def config(self, kmer_size=4, is_2_way=False, head=None, tail=None):
self.kmer_size = kmer_size
self.is_2_way = is_2_way
self.head = head
self.tail = tail
def get_vertexes(self):
return [kmer for kmer, _ in self.vertexes.items()]
def b_visit(self, src, dst, key):
i = src+dst
if not i in self.build_visited:
self.build_visited.update({i: key})
else:
self.build_visited[i] = key
def _update_edge_weight(self, src, dst):
self.vertexes[src].edge_out()
self.vertexes[dst].edge_in()
self.g_vis.add_edge(src, dst, weight=self.vertexes[dst].w_in)
if self.vertexes[dst].weight() > self.prune:
self.g_vis_pruned.add_edge(src, dst, weight=self.vertexes[dst].w_in)
def _add_new_vertex(self, src, dst, dst_node):
self.graph.update({src: dst_node})
if not src in self.vertexes:
src_node = Node(src)
self.vertexes.update({src: src_node})
else:
src_node = self.vertexes[src]
if not dst in self.vertexes:
dst_node = Node(dst)
self.vertexes.update({dst: dst_node})
else:
dst_node = self.vertexes[dst]
def add_edge(self, src, dst, key=0):
dst_node = Node(dst)
if src in self.graph:
if not self.graph[src].has(dst) \
or ((src+dst) in self.build_visited and self.build_visited[src+dst] == key):
dst_node.next = self.graph[src]
self.graph[src] = dst_node
if not dst in self.vertexes:
self.vertexes.update({dst: dst_node})
else:
self._add_new_vertex(src, dst, dst_node)
self._update_edge_weight(src, dst)
self.b_visit(src, dst, key)
def build(self):
key = 1
for data in self.datas:
kmers = [data[i:i+self.kmer_size]
for i in range(0, len(data) - self.kmer_size + 1)]
for i in range(0, len(kmers) - 1):
self.add_edge(kmers[i], kmers[i+1], key=key)
key += 1
self.build_visited = {}
def get_next_vertex(self, v):
if type(v) is str:
if v in self.graph:
return self.graph[v]
return None
if v.vertex in self.graph:
return self.graph[v.vertex]
return None
def get_vertex_with_weight(self, v):
if type(v) is str:
if v in self.vertexes:
return self.vertexes[v]
return None
if v.vertex in self.vertexes:
return self.vertexes[v.vertex]
return None
def __repr__(self):
s = ""
for kmer, node in self.vertexes.items():
if kmer in self.graph:
node = self.graph[kmer]
else:
node = Node(None)
s += "Vertex " + str(kmer) + " - " + \
str(self.vertexes[kmer].weight(
)) + f"({self.vertexes[kmer].w_in}, {self.vertexes[kmer].w_out})" + ": "
s += str(node)
s += " \n"
return s
def draw_de_bruijn_graph(self, weight_on=False, thickness=True, minimize_edge=False, font_color='k', node_size=800, weight_scale=1, font_size=6, pruned=False,figsize=(15,15)):
g = self.g_vis
if pruned:
g = self.g_vis_pruned
weights = None
if thickness:
edges = g.edges()
weights = [g[u][v]['weight'] for u,v in edges]
weights = np.array(weights)
if minimize_edge:
weights = weights / np.average(weights)
weights = weights*weight_scale
plt.figure(figsize=figsize)
#555555
#9ED0FD - light blue
nx.draw_networkx(
g, pos=nx.kamada_kawai_layout(g),
node_shape='o', node_size=node_size, font_size=font_size,
edge_color='#555555', width=weights, font_color=font_color
)
if weight_on:
nx.draw_networkx_edge_labels(
g, pos=nx.kamada_kawai_layout(g),
edge_labels=nx.get_edge_attributes(g, 'weight'),
font_size=font_size+2, label_pos=0.5, rotate=False,
)
plt.axis('off')
plt.show() | en | 0.747062 | # assert data_length % kmer_size == 0, f'data_length={data_length} is not divisible by kmer_size={kmer_size}' #555555 #9ED0FD - light blue | 2.857227 | 3 |
examples/waterpump_driver.py | GreenPonik/GreenPonik_WaterPump_Driver | 1 | 6614497 | <gh_stars>1-10
import time
from GreenPonik_WaterPump_Driver.WaterPumpDriver import WaterPumpDriver
if __name__ == "__main__":
# run pump one during 2sec
try:
with WaterPumpDriver() as driver: # default bus=1, default address=0x01
print("My UUIDis : %s" % driver.get_uuid())
driver.set_pump_command(
driver.I2C_REGISTERS["PUMP_1_STATE"],
driver.I2C_COMMANDS["ON"],
)
time.sleep(2)
driver.set_pump_command(
driver.I2C_REGISTERS["PUMP_1_STATE"],
driver.I2C_COMMANDS["OFF"],
)
except Exception as e:
print("Exception occured", e)
| import time
from GreenPonik_WaterPump_Driver.WaterPumpDriver import WaterPumpDriver
if __name__ == "__main__":
# run pump one during 2sec
try:
with WaterPumpDriver() as driver: # default bus=1, default address=0x01
print("My UUIDis : %s" % driver.get_uuid())
driver.set_pump_command(
driver.I2C_REGISTERS["PUMP_1_STATE"],
driver.I2C_COMMANDS["ON"],
)
time.sleep(2)
driver.set_pump_command(
driver.I2C_REGISTERS["PUMP_1_STATE"],
driver.I2C_COMMANDS["OFF"],
)
except Exception as e:
print("Exception occured", e) | en | 0.7057 | # run pump one during 2sec # default bus=1, default address=0x01 | 2.413404 | 2 |
src/api/dataflow/flow/tasks/custom_calculate/__init__.py | Chromico/bk-base | 84 | 6614498 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class BaseCustomCalculateTaskHandler(object):
task_type = None
_is_done = False
def __init__(self, flow_task_handler, op_type):
self.flow_task_handler = flow_task_handler
build_method = "build_%s_custom_calculate_context" % op_type
self.context = getattr(self, build_method)(flow_task_handler)
def get_geog_area_code(self):
return self.flow_task_handler.flow.geog_area_codes[0]
def build_start_custom_cal_context(self, flow_task_handler):
raise NotImplementedError
def build_stop_custom_cal_context(self, flow_task_handler):
raise NotImplementedError
def add_context(self, context):
"""
添加额外 context
"""
self.context.update(context)
self.update_handler_context()
def get_context(self, key=None, default=None):
value = self.flow_task_handler.get_task_context(self.task_type, key)
if default is not None and not value:
return default
else:
return value
def update_handler_context(self):
"""
添加额外 context
"""
self.flow_task_handler.set_task_context(self.task_type, self.context)
def start(self):
try:
self.start_inner()
self.start_ok_callback()
except Exception as e:
self.start_fail_callback()
raise e
def stop(self):
try:
self.stop_inner()
self.stop_ok_callback()
except Exception as e:
self.stop_fail_callback()
raise e
def start_inner(self):
raise NotImplementedError
def stop_inner(self):
raise NotImplementedError
def check(self):
pass
@property
def is_done(self):
return self._is_done
@is_done.setter
def is_done(self, status):
self._is_done = status
def start_ok_callback(self):
pass
def start_fail_callback(self):
pass
def stop_ok_callback(self):
pass
def stop_fail_callback(self):
pass
def log(self, msg, level="INFO", time=None):
self.flow_task_handler.log(msg, level=level, time=time)
def logs(self, data):
"""
@param data:
[
{
'msg': 'xxx',
'level': 'level',
'time': 'time',
'progress': 1.0
}
]
@return:
"""
self.flow_task_handler.logs(data)
def show_finished_jobs(self):
pass
| # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class BaseCustomCalculateTaskHandler(object):
task_type = None
_is_done = False
def __init__(self, flow_task_handler, op_type):
self.flow_task_handler = flow_task_handler
build_method = "build_%s_custom_calculate_context" % op_type
self.context = getattr(self, build_method)(flow_task_handler)
def get_geog_area_code(self):
return self.flow_task_handler.flow.geog_area_codes[0]
def build_start_custom_cal_context(self, flow_task_handler):
raise NotImplementedError
def build_stop_custom_cal_context(self, flow_task_handler):
raise NotImplementedError
def add_context(self, context):
"""
添加额外 context
"""
self.context.update(context)
self.update_handler_context()
def get_context(self, key=None, default=None):
value = self.flow_task_handler.get_task_context(self.task_type, key)
if default is not None and not value:
return default
else:
return value
def update_handler_context(self):
"""
添加额外 context
"""
self.flow_task_handler.set_task_context(self.task_type, self.context)
def start(self):
try:
self.start_inner()
self.start_ok_callback()
except Exception as e:
self.start_fail_callback()
raise e
def stop(self):
try:
self.stop_inner()
self.stop_ok_callback()
except Exception as e:
self.stop_fail_callback()
raise e
def start_inner(self):
raise NotImplementedError
def stop_inner(self):
raise NotImplementedError
def check(self):
pass
@property
def is_done(self):
return self._is_done
@is_done.setter
def is_done(self, status):
self._is_done = status
def start_ok_callback(self):
pass
def start_fail_callback(self):
pass
def stop_ok_callback(self):
pass
def stop_fail_callback(self):
pass
def log(self, msg, level="INFO", time=None):
self.flow_task_handler.log(msg, level=level, time=time)
def logs(self, data):
"""
@param data:
[
{
'msg': 'xxx',
'level': 'level',
'time': 'time',
'progress': 1.0
}
]
@return:
"""
self.flow_task_handler.logs(data)
def show_finished_jobs(self):
pass
| en | 0.693955 | # -*- coding: utf-8 -*- Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available. Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. BK-BASE 蓝鲸基础平台 is licensed under the MIT License. License for BK-BASE 蓝鲸基础平台: -------------------------------------------------------------------- Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 添加额外 context 添加额外 context @param data: [ { 'msg': 'xxx', 'level': 'level', 'time': 'time', 'progress': 1.0 } ] @return: | 1.449495 | 1 |
Algorithms/Flow/FlowGraph/FlowGraphEdge.py | Yarintop/Data-Structures-And-Algorithms-In-Python | 0 | 6614499 | <gh_stars>0
class FlowGraphEdge:
def __init__(self, s, t, capacity, flow=0) -> None:
self.s = s
self.t = t
self.capacity = capacity
self.flow = flow
def addFlow(self, flow):
if self.flow + flow > self.capacity:
raise ValueError("Flow can't be greater than capacity.")
self.flow += flow
| class FlowGraphEdge:
def __init__(self, s, t, capacity, flow=0) -> None:
self.s = s
self.t = t
self.capacity = capacity
self.flow = flow
def addFlow(self, flow):
if self.flow + flow > self.capacity:
raise ValueError("Flow can't be greater than capacity.")
self.flow += flow | none | 1 | 3.61229 | 4 | |
XLM/color_print.py | harold-ogden-walmart/xlmulator | 18 | 6614500 | <reponame>harold-ogden-walmart/xlmulator
"""@package color_print
Print colored text to stdout.
"""
from __future__ import print_function
quiet = False
###########################################################################
def safe_print(text):
"""
Sometimes printing large strings when running in a Docker container triggers exceptions.
This function just wraps a print in a try/except block to not crash when this happens.
@param text (str) The text to print.
"""
try:
print(text)
except Exception as e:
msg = "ERROR: Printing text failed (len text = " + str(len(text)) + ". " + str(e)
if (len(msg) > 100):
msg = msg[:100]
try:
print(msg)
except:
# At this point output is so messed up we can't do anything.
pass
# Used to colorize printed text.
colors = {
'g' : '\033[92m',
'y' : '\033[93m',
'r' : '\033[91m'
}
ENDC = '\033[0m'
###########################################################################
def output(color, text):
"""
Print colored text to stdout.
color - (str) The color to use. 'g' = green, 'r' = red, 'y' = yellow.
text - (str) The text to print.
"""
# Are we skipping all output?
if quiet:
return
# Is this a color we handle?
color = str(color).lower()
if (color not in colors):
raise ValueError("Color '" + color + "' not known.")
# Print the text with the color.
safe_print(colors[color] + str(text) + ENDC)
| """@package color_print
Print colored text to stdout.
"""
from __future__ import print_function
quiet = False
###########################################################################
def safe_print(text):
"""
Sometimes printing large strings when running in a Docker container triggers exceptions.
This function just wraps a print in a try/except block to not crash when this happens.
@param text (str) The text to print.
"""
try:
print(text)
except Exception as e:
msg = "ERROR: Printing text failed (len text = " + str(len(text)) + ". " + str(e)
if (len(msg) > 100):
msg = msg[:100]
try:
print(msg)
except:
# At this point output is so messed up we can't do anything.
pass
# Used to colorize printed text.
colors = {
'g' : '\033[92m',
'y' : '\033[93m',
'r' : '\033[91m'
}
ENDC = '\033[0m'
###########################################################################
def output(color, text):
"""
Print colored text to stdout.
color - (str) The color to use. 'g' = green, 'r' = red, 'y' = yellow.
text - (str) The text to print.
"""
# Are we skipping all output?
if quiet:
return
# Is this a color we handle?
color = str(color).lower()
if (color not in colors):
raise ValueError("Color '" + color + "' not known.")
# Print the text with the color.
safe_print(colors[color] + str(text) + ENDC) | en | 0.516017 | @package color_print Print colored text to stdout. ########################################################################### Sometimes printing large strings when running in a Docker container triggers exceptions. This function just wraps a print in a try/except block to not crash when this happens. @param text (str) The text to print. # At this point output is so messed up we can't do anything. # Used to colorize printed text. ########################################################################### Print colored text to stdout. color - (str) The color to use. 'g' = green, 'r' = red, 'y' = yellow. text - (str) The text to print. # Are we skipping all output? # Is this a color we handle? # Print the text with the color. | 3.68831 | 4 |
Data Science With Python/05-importing-data-in-python-(part-1)/1-introduction-and-flat-files/pop-quiz-what-exactly-are-flat-files_.py | aimanahmedmoin1997/DataCamp | 3 | 6614501 | <gh_stars>1-10
'''
Pop quiz: what exactly are flat files?
50xp
Which of the following statements about flat files is incorrect?
Possible Answers
-Flat files consist of rows and each row is called a record.
-Flat files consist of multiple tables with structured relationships between the tables.
-A record in a flat file is composed of fields or attributes, each of which contains at
most one item of information.
-Flat files are pervasive in data science.
'''
# Flat files consist of multiple tables with structured relationships between the tables. | '''
Pop quiz: what exactly are flat files?
50xp
Which of the following statements about flat files is incorrect?
Possible Answers
-Flat files consist of rows and each row is called a record.
-Flat files consist of multiple tables with structured relationships between the tables.
-A record in a flat file is composed of fields or attributes, each of which contains at
most one item of information.
-Flat files are pervasive in data science.
'''
# Flat files consist of multiple tables with structured relationships between the tables. | en | 0.936262 | Pop quiz: what exactly are flat files? 50xp Which of the following statements about flat files is incorrect? Possible Answers -Flat files consist of rows and each row is called a record. -Flat files consist of multiple tables with structured relationships between the tables. -A record in a flat file is composed of fields or attributes, each of which contains at most one item of information. -Flat files are pervasive in data science. # Flat files consist of multiple tables with structured relationships between the tables. | 2.474715 | 2 |
rmb.py | RoseoxHu/rmb | 0 | 6614502 | # -*- coding: utf-8 -*-
import os, sys
import logging
'''
人民币大写转换
'''
'''数字映射'''
num_map = {
0: '零',
1: '壹',
2: '贰',
3: '叁',
4: '肆',
5: '伍',
6: '陆',
7: '柒',
8: '捌',
9: '玖',
}
'''单位映射'''
unit_map = {
0: '分',
1: '角',
2: '元',
3: '拾',
4: '佰',
5: '仟',
6: '萬',
7: '拾',
8: '佰',
9: '仟',
10: '亿',
11: '拾',
12: '佰',
13: '仟',
}
def to_rmb(amount):
''' 转人民币大写 '''
amount = round(amount * 100) # 金额转为分
result = ''
pointer = 0 # 单位指针
last_remainder = 0 # 上一个余数
while (amount > 0):
remainder = int(amount % 10) # 余数
amount = int(amount / 10) # 缩小10倍
logging.debug("amount: %s, remainder: %s, pointer: %s" % (amount, remainder, pointer))
if remainder == 0:
if (pointer == 2 or pointer == 6 or pointer == 10):
# 余数为0, 元、萬、亿单位需要保留
result = '%s%s' % (unit_map[pointer], result)
else:
if last_remainder > 0:
# 余数为0, 上一个余数也为0, 避免零仟零佰
result = '%s%s' % (num_map[remainder], result)
else:
result = '%s%s%s' % (num_map[remainder], unit_map[pointer], result)
last_remainder = remainder
pointer += 1 # 从右往左
if pointer == 2 and result == '': # 无角、分尾数
result = '整'
print(result)
return result
if __name__ == "__main__":
if len(sys.argv) < 1:
print('Usage: %s <金额>' % os.path.abspath(sys.argv[0]))
sys.exit(0)
logging.basicConfig(level=logging.NOTSET,
format='%(asctime)s - %(filename)s[line:%(lineno)d/%(thread)d] - %(levelname)s: %(message)s') # 设置日志级别
os.chdir(os.path.dirname(os.path.abspath(__file__)))
logging.debug("os.getcwd() = %s" % os.getcwd())
try:
amount = round(float(sys.argv[1]) * 100) / 100.0
if amount >= 1000000000000:
print('Usage: %s 金额超出范围[0, 1000000000000)' % os.path.abspath(sys.argv[0]))
sys.exit(0)
except Exception:
print('Usage: %s <金额>' % os.path.abspath(sys.argv[0]))
sys.exit(0)
logging.debug("amount: %s" % amount)
logging.debug("num_map: %s" % num_map)
logging.debug("unit_map: %s" % unit_map)
to_rmb(amount)
| # -*- coding: utf-8 -*-
import os, sys
import logging
'''
人民币大写转换
'''
'''数字映射'''
num_map = {
0: '零',
1: '壹',
2: '贰',
3: '叁',
4: '肆',
5: '伍',
6: '陆',
7: '柒',
8: '捌',
9: '玖',
}
'''单位映射'''
unit_map = {
0: '分',
1: '角',
2: '元',
3: '拾',
4: '佰',
5: '仟',
6: '萬',
7: '拾',
8: '佰',
9: '仟',
10: '亿',
11: '拾',
12: '佰',
13: '仟',
}
def to_rmb(amount):
''' 转人民币大写 '''
amount = round(amount * 100) # 金额转为分
result = ''
pointer = 0 # 单位指针
last_remainder = 0 # 上一个余数
while (amount > 0):
remainder = int(amount % 10) # 余数
amount = int(amount / 10) # 缩小10倍
logging.debug("amount: %s, remainder: %s, pointer: %s" % (amount, remainder, pointer))
if remainder == 0:
if (pointer == 2 or pointer == 6 or pointer == 10):
# 余数为0, 元、萬、亿单位需要保留
result = '%s%s' % (unit_map[pointer], result)
else:
if last_remainder > 0:
# 余数为0, 上一个余数也为0, 避免零仟零佰
result = '%s%s' % (num_map[remainder], result)
else:
result = '%s%s%s' % (num_map[remainder], unit_map[pointer], result)
last_remainder = remainder
pointer += 1 # 从右往左
if pointer == 2 and result == '': # 无角、分尾数
result = '整'
print(result)
return result
if __name__ == "__main__":
if len(sys.argv) < 1:
print('Usage: %s <金额>' % os.path.abspath(sys.argv[0]))
sys.exit(0)
logging.basicConfig(level=logging.NOTSET,
format='%(asctime)s - %(filename)s[line:%(lineno)d/%(thread)d] - %(levelname)s: %(message)s') # 设置日志级别
os.chdir(os.path.dirname(os.path.abspath(__file__)))
logging.debug("os.getcwd() = %s" % os.getcwd())
try:
amount = round(float(sys.argv[1]) * 100) / 100.0
if amount >= 1000000000000:
print('Usage: %s 金额超出范围[0, 1000000000000)' % os.path.abspath(sys.argv[0]))
sys.exit(0)
except Exception:
print('Usage: %s <金额>' % os.path.abspath(sys.argv[0]))
sys.exit(0)
logging.debug("amount: %s" % amount)
logging.debug("num_map: %s" % num_map)
logging.debug("unit_map: %s" % unit_map)
to_rmb(amount)
| zh | 0.979585 | # -*- coding: utf-8 -*- 人民币大写转换 数字映射 单位映射 转人民币大写 # 金额转为分 # 单位指针 # 上一个余数 # 余数 # 缩小10倍 # 余数为0, 元、萬、亿单位需要保留 # 余数为0, 上一个余数也为0, 避免零仟零佰 # 从右往左 # 无角、分尾数 # 设置日志级别 | 3.057708 | 3 |
libfuturize/test_scripts/py2/implicit_relative_import.py | kojoidrissa/python-future | 1 | 6614503 | <filename>libfuturize/test_scripts/py2/implicit_relative_import.py
'''
Tests whether implicit relative imports are turned into explicit ones.
'''
from __future__ import absolute_import
from future.builtins import *
from . import xrange
| <filename>libfuturize/test_scripts/py2/implicit_relative_import.py
'''
Tests whether implicit relative imports are turned into explicit ones.
'''
from __future__ import absolute_import
from future.builtins import *
from . import xrange
| en | 0.843985 | Tests whether implicit relative imports are turned into explicit ones. | 1.225085 | 1 |
Services/UserProfiling/api_calls.py | akaeme/TeamUp | 0 | 6614504 | <reponame>akaeme/TeamUp
import json
import requests
import requests
#r = requests.post('http://127.0.0.1:5007/userProfiling/v1.0/userProfile/create', json={'user_id': 1234, 'username':'ruizz' ,'mail': '<EMAIL>', 'tlm': 123455,'access_token':'<PASSWORD>' })
#r = requests.post('http://127.0.0.1:5007/userProfiling/v1.0/userProfile/update', json={'user_id': 1234, 'username':'ruioliveirazz' , 'tlm': 455, 'access_token':'<PASSWORD>' })
#r = requests.get('http://127.0.0.1:5007/userProfiling/v1.0/userProfile/profile', params={'user_id':1234, 'access_token':'<PASSWORD>'})
#r = requests.get('http://127.0.0.1:5007/userProfiling/v1.0/userProfile/mobile', params={'user_id':1234, 'access_token':'<PASSWORD>'})
#r = requests.delete('http://127.0.0.1:5007/userProfiling/v1.0/userProfile/', params={'user_id': 1234, 'access_token':'<PASSWORD>'})
print(r.json())
| import json
import requests
import requests
#r = requests.post('http://127.0.0.1:5007/userProfiling/v1.0/userProfile/create', json={'user_id': 1234, 'username':'ruizz' ,'mail': '<EMAIL>', 'tlm': 123455,'access_token':'<PASSWORD>' })
#r = requests.post('http://127.0.0.1:5007/userProfiling/v1.0/userProfile/update', json={'user_id': 1234, 'username':'ruioliveirazz' , 'tlm': 455, 'access_token':'<PASSWORD>' })
#r = requests.get('http://127.0.0.1:5007/userProfiling/v1.0/userProfile/profile', params={'user_id':1234, 'access_token':'<PASSWORD>'})
#r = requests.get('http://127.0.0.1:5007/userProfiling/v1.0/userProfile/mobile', params={'user_id':1234, 'access_token':'<PASSWORD>'})
#r = requests.delete('http://127.0.0.1:5007/userProfiling/v1.0/userProfile/', params={'user_id': 1234, 'access_token':'<PASSWORD>'})
print(r.json()) | en | 0.212306 | #r = requests.post('http://127.0.0.1:5007/userProfiling/v1.0/userProfile/create', json={'user_id': 1234, 'username':'ruizz' ,'mail': '<EMAIL>', 'tlm': 123455,'access_token':'<PASSWORD>' }) #r = requests.post('http://127.0.0.1:5007/userProfiling/v1.0/userProfile/update', json={'user_id': 1234, 'username':'ruioliveirazz' , 'tlm': 455, 'access_token':'<PASSWORD>' }) #r = requests.get('http://127.0.0.1:5007/userProfiling/v1.0/userProfile/profile', params={'user_id':1234, 'access_token':'<PASSWORD>'}) #r = requests.get('http://127.0.0.1:5007/userProfiling/v1.0/userProfile/mobile', params={'user_id':1234, 'access_token':'<PASSWORD>'}) #r = requests.delete('http://127.0.0.1:5007/userProfiling/v1.0/userProfile/', params={'user_id': 1234, 'access_token':'<PASSWORD>'}) | 2.366884 | 2 |
settings-template.py | andoniaf/python-zionsparbot | 1 | 6614505 | <gh_stars>1-10
TOKEN = '' # token del bot
USERS = "" # Chat ID permitidos
# Carpeta y directorio del log
LOGDIR = ""
LOGFILE = "zionsparbot.log"
# Directorio donde se encuentra el bot (para la funcion uptime.log_size)
path = ""
| TOKEN = '' # token del bot
USERS = "" # Chat ID permitidos
# Carpeta y directorio del log
LOGDIR = ""
LOGFILE = "zionsparbot.log"
# Directorio donde se encuentra el bot (para la funcion uptime.log_size)
path = "" | es | 0.89091 | # token del bot # Chat ID permitidos # Carpeta y directorio del log # Directorio donde se encuentra el bot (para la funcion uptime.log_size) | 1.201622 | 1 |
analysis/bias_variance_analysis.py | dsysoev/fun-with-tensorflow | 0 | 6614506 | <gh_stars>0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tempfile
import argparse
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
import pandas as pd
import matplotlib.pyplot as plt
from normal_equation import linear_regression_normal_equation
import matplotlib
matplotlib.style.use('seaborn')
def plot_data():
""" plot chart from prepared data """
# check results file
if not os.path.isfile(FLAGS.results_file):
raise IOError("No such file '{}'".format(FLAGS.results_file))
# read DataFrame from results file
results = pd.read_csv(FLAGS.results_file, index_col='num_objects')
lambda_value = results['lambda'].unique()[0]
results = results.drop('lambda', axis=1)
# plot results
ax = results.plot(alpha=1)
ax.set_title("""California housing with Linear Regression
L2 regularization lambda = {}""".format(lambda_value))
ax.set_xlabel('number of objects')
ax.set_ylabel('MSLE')
ax.set_xscale('log')
ax.legend()
plt.show()
def main():
# create results file if it does not exist
if FLAGS.force or not os.path.isfile(FLAGS.results_file):
os.makedirs(os.path.dirname(FLAGS.results_file), exist_ok=True)
# get data
housing = fetch_california_housing()
# create list of number of object
num_objects_list = [50, 100, 500, 1000, 5000, 10000]
lambda_value = FLAGS.lambda_value
# collect data with different count of objects
train_score_list, test_score_list, lambda_list = [], [], []
for i in num_objects_list:
# split data
trainx, testx, trainy, testy = train_test_split(
housing.data, housing.target, test_size=i, train_size=i,
random_state=100)
# get score
train_score, test_score = linear_regression_normal_equation(
trainx, testx, trainy, testy, lambda_value)
train_score_list.append(train_score[0])
test_score_list.append(test_score[0])
lambda_list.append(lambda_value)
# create DataFrame object
data = pd.DataFrame({'lambda': lambda_list,
'train_score': train_score_list,
'test_score': test_score_list},
index=num_objects_list)
# set num_objects as index
data.index.name = 'num_objects'
# save data to csv file
data.to_csv(FLAGS.results_file, header=True)
plot_data()
if __name__ == '__main__':
# eval filename without extention
filename, _ = os.path.splitext(os.path.basename(__file__))
parser = argparse.ArgumentParser()
parser.add_argument('--force', action='store_true')
parser.add_argument('--test_size', type=float, default=0.5)
parser.add_argument('--lambda_value', type=float, default=0.35)
parser.add_argument(
'--results_file',
type=str,
default=os.path.join(tempfile.gettempdir(),
'fun-with-machine-learning',
filename + '.csv'), # output data has the same name
help='File with results')
FLAGS, unparsed = parser.parse_known_args()
main()
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tempfile
import argparse
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
import pandas as pd
import matplotlib.pyplot as plt
from normal_equation import linear_regression_normal_equation
import matplotlib
matplotlib.style.use('seaborn')
def plot_data():
""" plot chart from prepared data """
# check results file
if not os.path.isfile(FLAGS.results_file):
raise IOError("No such file '{}'".format(FLAGS.results_file))
# read DataFrame from results file
results = pd.read_csv(FLAGS.results_file, index_col='num_objects')
lambda_value = results['lambda'].unique()[0]
results = results.drop('lambda', axis=1)
# plot results
ax = results.plot(alpha=1)
ax.set_title("""California housing with Linear Regression
L2 regularization lambda = {}""".format(lambda_value))
ax.set_xlabel('number of objects')
ax.set_ylabel('MSLE')
ax.set_xscale('log')
ax.legend()
plt.show()
def main():
# create results file if it does not exist
if FLAGS.force or not os.path.isfile(FLAGS.results_file):
os.makedirs(os.path.dirname(FLAGS.results_file), exist_ok=True)
# get data
housing = fetch_california_housing()
# create list of number of object
num_objects_list = [50, 100, 500, 1000, 5000, 10000]
lambda_value = FLAGS.lambda_value
# collect data with different count of objects
train_score_list, test_score_list, lambda_list = [], [], []
for i in num_objects_list:
# split data
trainx, testx, trainy, testy = train_test_split(
housing.data, housing.target, test_size=i, train_size=i,
random_state=100)
# get score
train_score, test_score = linear_regression_normal_equation(
trainx, testx, trainy, testy, lambda_value)
train_score_list.append(train_score[0])
test_score_list.append(test_score[0])
lambda_list.append(lambda_value)
# create DataFrame object
data = pd.DataFrame({'lambda': lambda_list,
'train_score': train_score_list,
'test_score': test_score_list},
index=num_objects_list)
# set num_objects as index
data.index.name = 'num_objects'
# save data to csv file
data.to_csv(FLAGS.results_file, header=True)
plot_data()
if __name__ == '__main__':
# eval filename without extention
filename, _ = os.path.splitext(os.path.basename(__file__))
parser = argparse.ArgumentParser()
parser.add_argument('--force', action='store_true')
parser.add_argument('--test_size', type=float, default=0.5)
parser.add_argument('--lambda_value', type=float, default=0.35)
parser.add_argument(
'--results_file',
type=str,
default=os.path.join(tempfile.gettempdir(),
'fun-with-machine-learning',
filename + '.csv'), # output data has the same name
help='File with results')
FLAGS, unparsed = parser.parse_known_args()
main() | en | 0.824468 | plot chart from prepared data # check results file # read DataFrame from results file # plot results California housing with Linear Regression L2 regularization lambda = {} # create results file if it does not exist # get data # create list of number of object # collect data with different count of objects # split data # get score # create DataFrame object # set num_objects as index # save data to csv file # eval filename without extention # output data has the same name | 2.715805 | 3 |
one/contrib/sites/settings/models.py | riso-tech/one-platform | 5 | 6614507 | <reponame>riso-tech/one-platform
from django.contrib.sites.models import Site
from django.db.models import CASCADE, ImageField, Model, OneToOneField
from django.utils.translation import gettext_lazy as _
from .utils import setting_images_directory_path
class Setting(Model):
"""Setting model is OneToOne related to Site model."""
site = OneToOneField(
Site,
on_delete=CASCADE,
primary_key=True,
related_name="setting",
verbose_name="site",
)
favicon = ImageField(
_("Favicon of site"),
blank=True,
null=True,
upload_to=setting_images_directory_path,
)
logo = ImageField(
_("Logo of site"),
blank=True,
null=True,
upload_to=setting_images_directory_path,
)
mobile_logo = ImageField(
_("Mobile logo of site"),
blank=True,
null=True,
upload_to=setting_images_directory_path,
)
def __str__(self):
return self.site.name
class Meta:
app_label = "sites"
@property
def logo_url(self):
"""Return logo url"""
return (
"/static/metronic/media/logos/logo-1-dark.svg"
if not self.logo
else getattr(self.logo, "url")
)
@property
def mobile_logo_url(self):
"""Return mobile_logo url"""
return (
"/static/metronic/media/logos/logo-2.svg"
if not self.mobile_logo
else getattr(self.mobile_logo, "url")
)
| from django.contrib.sites.models import Site
from django.db.models import CASCADE, ImageField, Model, OneToOneField
from django.utils.translation import gettext_lazy as _
from .utils import setting_images_directory_path
class Setting(Model):
"""Setting model is OneToOne related to Site model."""
site = OneToOneField(
Site,
on_delete=CASCADE,
primary_key=True,
related_name="setting",
verbose_name="site",
)
favicon = ImageField(
_("Favicon of site"),
blank=True,
null=True,
upload_to=setting_images_directory_path,
)
logo = ImageField(
_("Logo of site"),
blank=True,
null=True,
upload_to=setting_images_directory_path,
)
mobile_logo = ImageField(
_("Mobile logo of site"),
blank=True,
null=True,
upload_to=setting_images_directory_path,
)
def __str__(self):
return self.site.name
class Meta:
app_label = "sites"
@property
def logo_url(self):
"""Return logo url"""
return (
"/static/metronic/media/logos/logo-1-dark.svg"
if not self.logo
else getattr(self.logo, "url")
)
@property
def mobile_logo_url(self):
"""Return mobile_logo url"""
return (
"/static/metronic/media/logos/logo-2.svg"
if not self.mobile_logo
else getattr(self.mobile_logo, "url")
) | en | 0.758064 | Setting model is OneToOne related to Site model. Return logo url Return mobile_logo url | 2.143277 | 2 |
extract_image_from_video.py | rishabhjainfinal/opencv | 0 | 6614508 | <reponame>rishabhjainfinal/opencv
import cv2,os
def extract_image(video:str,fps:int,direction):
# 1. get videos frames
if not os.path.exists(direction) : os.mkdir(direction)
vidcap = cv2.VideoCapture('a.mp4')
default_fps = round(vidcap.get(cv2.CAP_PROP_FPS))
print("default fps of video is --> ",default_fps)
if fps < default_fps : steps = round(default_fps/fps)
else : steps = 1
print("new fps of video is --> ",int(default_fps/steps))
folder_path = os.path.join(direction,'image%s.jpg')
success = True
while success:
count = int(vidcap.get(1))
success,frame = vidcap.read()
if count%steps == 0 :
try : cv2.imwrite(folder_path.replace("%s",str(count)),frame) # save file
except : pass # last frame is none
if __name__=='__main__':
video = 'a.mp4'
fps = 5
image_folder = os.path.join(os.getcwd(),'images')
extract_image(video,fps,image_folder) | import cv2,os
def extract_image(video:str,fps:int,direction):
# 1. get videos frames
if not os.path.exists(direction) : os.mkdir(direction)
vidcap = cv2.VideoCapture('a.mp4')
default_fps = round(vidcap.get(cv2.CAP_PROP_FPS))
print("default fps of video is --> ",default_fps)
if fps < default_fps : steps = round(default_fps/fps)
else : steps = 1
print("new fps of video is --> ",int(default_fps/steps))
folder_path = os.path.join(direction,'image%s.jpg')
success = True
while success:
count = int(vidcap.get(1))
success,frame = vidcap.read()
if count%steps == 0 :
try : cv2.imwrite(folder_path.replace("%s",str(count)),frame) # save file
except : pass # last frame is none
if __name__=='__main__':
video = 'a.mp4'
fps = 5
image_folder = os.path.join(os.getcwd(),'images')
extract_image(video,fps,image_folder) | en | 0.662273 | # 1. get videos frames # save file # last frame is none | 2.835184 | 3 |
hippocampus/scripts/s16_cortex_testSLM.py | CNG-LAB/cng-open | 0 | 6614509 | <reponame>CNG-LAB/cng-open
"""
SLM test for the cortex-to-hippocampus connectivity for individual subfields
usage: $ python s16_cortex_testSLM.py LSUB
"""
import os, sys
import h5py
import numpy as np
from numpy import genfromtxt
# definde data directories
ddir = '../data/' # data dir
cordir = '../data/tout_cortex/'
odir = '../data/tout_group'
# final subject list after QC
subjlist = os.path.join(ddir, 'subjectListS900_QC_gr.txt'); # 709 subjects
f = open(subjlist); mylist = f.read().split("\n"); f.close()
mylist = mylist[:-1]
totnum = len(mylist)
labeling_file = '../data/tout_group/glasser.csv'
mylabel = genfromtxt(labeling_file)
print('We have now %i subjects... ' % totnum)
# subfield = 'LSUB'
subfield = sys.argv[1]
# here we go
C360_all = np.zeros((len(mylist), 360))
i = 0
for subjID in mylist:
subjsub= os.path.join(cordir, subjID + '_cortex_%s.h5' % (subfield))
with h5py.File(subjsub, "r") as f:
subjdata = np.array(f[subjID])
C360_all[i, :] = subjdata.T
i +=1
print(C360_all.shape, C360_all.mean(axis=0).max())
# labeling from 360 to 64k points
C64k_all = np.zeros((len(mylist), 64984))
for i in range(0, len(mylist)):
for j in range(1,360+1):
C64k_all[i, np.where(mylabel == j)] = C360_all[i,(j-1)]
print(C64k_all.shape, C64k_all.mean(axis=0).max())
from brainspace.datasets import load_conte69
from brainspace.mesh import mesh_elements
# load poly data for 64k surface (for the test & plotting)
surf_lh, surf_rh = load_conte69()
# write surface coordinates and triangles in a dictionary
lh_coord = np.array(mesh_elements.get_points(surf_lh)).T
rh_coord = np.array(mesh_elements.get_points(surf_rh)).T
lh_tri = np.array(mesh_elements.get_cells(surf_lh))
rh_tri = np.array(mesh_elements.get_cells(surf_rh))
D = {}
D['coord'] = np.concatenate((lh_coord, rh_coord), axis=1) # (3, 64984)
D['tri'] = np.concatenate((lh_tri, rh_tri + lh_coord.shape[1])) # (129960, 3)
# run slm
from brainstat.stats.terms import FixedEffect
from brainstat.stats.SLM import SLM
Y = C64k_all
contrast = np.ones((len(mylist),1))
term_ = FixedEffect(contrast)
model_ = 1 + term_
slm = SLM(model_, contrast = contrast)
slm.fit(Y)
Tvals = slm.t
Tvals.shape
h = h5py.File(os.path.join(odir, 'Tvals_cortex709_%s.h5' % (subfield)), 'w')
h.create_dataset('data', data = Tvals)
h.close()
| """
SLM test for the cortex-to-hippocampus connectivity for individual subfields
usage: $ python s16_cortex_testSLM.py LSUB
"""
import os, sys
import h5py
import numpy as np
from numpy import genfromtxt
# definde data directories
ddir = '../data/' # data dir
cordir = '../data/tout_cortex/'
odir = '../data/tout_group'
# final subject list after QC
subjlist = os.path.join(ddir, 'subjectListS900_QC_gr.txt'); # 709 subjects
f = open(subjlist); mylist = f.read().split("\n"); f.close()
mylist = mylist[:-1]
totnum = len(mylist)
labeling_file = '../data/tout_group/glasser.csv'
mylabel = genfromtxt(labeling_file)
print('We have now %i subjects... ' % totnum)
# subfield = 'LSUB'
subfield = sys.argv[1]
# here we go
C360_all = np.zeros((len(mylist), 360))
i = 0
for subjID in mylist:
subjsub= os.path.join(cordir, subjID + '_cortex_%s.h5' % (subfield))
with h5py.File(subjsub, "r") as f:
subjdata = np.array(f[subjID])
C360_all[i, :] = subjdata.T
i +=1
print(C360_all.shape, C360_all.mean(axis=0).max())
# labeling from 360 to 64k points
C64k_all = np.zeros((len(mylist), 64984))
for i in range(0, len(mylist)):
for j in range(1,360+1):
C64k_all[i, np.where(mylabel == j)] = C360_all[i,(j-1)]
print(C64k_all.shape, C64k_all.mean(axis=0).max())
from brainspace.datasets import load_conte69
from brainspace.mesh import mesh_elements
# load poly data for 64k surface (for the test & plotting)
surf_lh, surf_rh = load_conte69()
# write surface coordinates and triangles in a dictionary
lh_coord = np.array(mesh_elements.get_points(surf_lh)).T
rh_coord = np.array(mesh_elements.get_points(surf_rh)).T
lh_tri = np.array(mesh_elements.get_cells(surf_lh))
rh_tri = np.array(mesh_elements.get_cells(surf_rh))
D = {}
D['coord'] = np.concatenate((lh_coord, rh_coord), axis=1) # (3, 64984)
D['tri'] = np.concatenate((lh_tri, rh_tri + lh_coord.shape[1])) # (129960, 3)
# run slm
from brainstat.stats.terms import FixedEffect
from brainstat.stats.SLM import SLM
Y = C64k_all
contrast = np.ones((len(mylist),1))
term_ = FixedEffect(contrast)
model_ = 1 + term_
slm = SLM(model_, contrast = contrast)
slm.fit(Y)
Tvals = slm.t
Tvals.shape
h = h5py.File(os.path.join(odir, 'Tvals_cortex709_%s.h5' % (subfield)), 'w')
h.create_dataset('data', data = Tvals)
h.close() | en | 0.690075 | SLM test for the cortex-to-hippocampus connectivity for individual subfields usage: $ python s16_cortex_testSLM.py LSUB # definde data directories # data dir # final subject list after QC # 709 subjects # subfield = 'LSUB' # here we go # labeling from 360 to 64k points # load poly data for 64k surface (for the test & plotting) # write surface coordinates and triangles in a dictionary # (3, 64984) # (129960, 3) # run slm | 2.131557 | 2 |
tests/migrations/0017_correctmodeltypes.py | gasman/wagtail-transfer | 55 | 6614510 | # Generated by Django 3.0.11 on 2020-12-22 22:53
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('tests', '0016_advert_tags'),
]
operations = [
migrations.AddField(
model_name='advert',
name='run_until',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='advert',
name='run_from',
field=models.DateField(blank=True, null=True),
),
]
| # Generated by Django 3.0.11 on 2020-12-22 22:53
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('tests', '0016_advert_tags'),
]
operations = [
migrations.AddField(
model_name='advert',
name='run_until',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='advert',
name='run_from',
field=models.DateField(blank=True, null=True),
),
]
| en | 0.854056 | # Generated by Django 3.0.11 on 2020-12-22 22:53 | 1.734576 | 2 |
app/api/blueprints/foo.py | bcgov/openshift-launchpad-be-python | 0 | 6614511 | """
Defines the routes for a single REST resource called 'foo'
Blueprints are the most flexible and powerful way to define
routes in Flask. They are easy to read and they gather all
the routing logic together in one place. A common practice
is to have one blueprint per REST resource.+
A good practice is to handle all the http stuff (requests,
responses, error codes, etc.) in the blueprint file but
to keep application logic out as much as possible (single
responsibility principle). In this example, the blueprint
does not have direct access to the database but calls model
object methods that execute business functionality.
If the models start to get really complicated, it's a good idea
to put a service layer between the blueprint and the model
so that the business logic is easily tracked all in one spot.
"""
# pylint: disable=blacklisted-name; delete Foo entity
from flask import Blueprint, jsonify, request
from app.api.models.foo import Foo
FOO_BLUEPRINT = Blueprint('foo', __name__)
@FOO_BLUEPRINT.route('/api/foo', methods=['POST'], strict_slashes=False)
def post():
"""
A route to handle a request to create a new instance of the resource.
Parameters:
foo_id (int): Unique identifier for a foo
Returns:
json: Newly created Foo record
"""
post_data = request.get_json()
if not post_data:
return jsonify({'errors': ['Invalid request.']}), 400
string_field = post_data.get('string_field')
# Validate request data
if len(Foo.find_all_by_string_field(string_field)) > 0:
response_object = {
'errors': [f'String "{string_field}" already exists']
}
return jsonify(response_object), 400
record = Foo(string_field=string_field)
record.save()
return jsonify(record.to_json()), 201
@FOO_BLUEPRINT.route('/api/foo', methods=['GET'], strict_slashes=False)
def get_all():
'''A route to handle a request for a list of all instances of the resource.'''
response_object = {
'records': [foos.to_json() for foos in Foo.find_all()]
}
return jsonify(response_object), 200
@FOO_BLUEPRINT.route('/api/foo/<int:foo_id>', methods=['GET'], strict_slashes=False)
def get(foo_id):
"""
A route to handle a request for a single Foo record (looked up by its id).
Parameters:
foo_id (int): Unique identifier for a foo
Returns:
json: Corresponding Foo record
"""
record = Foo.find_by_id(foo_id)
if not record:
return jsonify({
'errors': [f'No record with id={foo_id} found.']
}), 404
return jsonify(record.to_json()), 200
@FOO_BLUEPRINT.route('/api/foo/<int:foo_id>', methods=['PUT'], strict_slashes=False)
def put(foo_id):
"""
A route to handle a request to update single existing Foo record (looked up by its id).
Parameters:
foo_id (int): Unique identifier for a foo
Returns:
json: Corresponding Foo record
"""
put_data = request.get_json()
if not put_data:
return jsonify({'errors': ['Invalid request.']}), 400
new_string_field = put_data.get('string_field')
# Validate request data
record = Foo.find_by_id(foo_id)
if not record:
response_object = {
'errors': [f'No record with id={foo_id} found.']
}
return jsonify(response_object), 404
if not isinstance(new_string_field, str):
response_object = {
'errors': ['The string_field must be a string.']
}
return jsonify(response_object), 400
record.string_field = new_string_field
record.update()
return jsonify(record.to_json()), 200
@FOO_BLUEPRINT.route('/api/foo/<int:foo_id>', methods=['DELETE'], strict_slashes=False)
def delete(foo_id):
"""
A route to handle a request to delete an existing Foo record (looked up by its id).
Parameters:
foo_id (int): Unique identifier for a foo
Returns:
json: A response code
"""
# Validate request data
record = Foo.query.filter_by(id=foo_id).first()
if not record:
response_object = {
'errors': [f'No record with id={foo_id} found.']
}
return jsonify(response_object), 404
record.delete()
return jsonify({}), 200
| """
Defines the routes for a single REST resource called 'foo'
Blueprints are the most flexible and powerful way to define
routes in Flask. They are easy to read and they gather all
the routing logic together in one place. A common practice
is to have one blueprint per REST resource.+
A good practice is to handle all the http stuff (requests,
responses, error codes, etc.) in the blueprint file but
to keep application logic out as much as possible (single
responsibility principle). In this example, the blueprint
does not have direct access to the database but calls model
object methods that execute business functionality.
If the models start to get really complicated, it's a good idea
to put a service layer between the blueprint and the model
so that the business logic is easily tracked all in one spot.
"""
# pylint: disable=blacklisted-name; delete Foo entity
from flask import Blueprint, jsonify, request
from app.api.models.foo import Foo
FOO_BLUEPRINT = Blueprint('foo', __name__)
@FOO_BLUEPRINT.route('/api/foo', methods=['POST'], strict_slashes=False)
def post():
"""
A route to handle a request to create a new instance of the resource.
Parameters:
foo_id (int): Unique identifier for a foo
Returns:
json: Newly created Foo record
"""
post_data = request.get_json()
if not post_data:
return jsonify({'errors': ['Invalid request.']}), 400
string_field = post_data.get('string_field')
# Validate request data
if len(Foo.find_all_by_string_field(string_field)) > 0:
response_object = {
'errors': [f'String "{string_field}" already exists']
}
return jsonify(response_object), 400
record = Foo(string_field=string_field)
record.save()
return jsonify(record.to_json()), 201
@FOO_BLUEPRINT.route('/api/foo', methods=['GET'], strict_slashes=False)
def get_all():
'''A route to handle a request for a list of all instances of the resource.'''
response_object = {
'records': [foos.to_json() for foos in Foo.find_all()]
}
return jsonify(response_object), 200
@FOO_BLUEPRINT.route('/api/foo/<int:foo_id>', methods=['GET'], strict_slashes=False)
def get(foo_id):
"""
A route to handle a request for a single Foo record (looked up by its id).
Parameters:
foo_id (int): Unique identifier for a foo
Returns:
json: Corresponding Foo record
"""
record = Foo.find_by_id(foo_id)
if not record:
return jsonify({
'errors': [f'No record with id={foo_id} found.']
}), 404
return jsonify(record.to_json()), 200
@FOO_BLUEPRINT.route('/api/foo/<int:foo_id>', methods=['PUT'], strict_slashes=False)
def put(foo_id):
"""
A route to handle a request to update single existing Foo record (looked up by its id).
Parameters:
foo_id (int): Unique identifier for a foo
Returns:
json: Corresponding Foo record
"""
put_data = request.get_json()
if not put_data:
return jsonify({'errors': ['Invalid request.']}), 400
new_string_field = put_data.get('string_field')
# Validate request data
record = Foo.find_by_id(foo_id)
if not record:
response_object = {
'errors': [f'No record with id={foo_id} found.']
}
return jsonify(response_object), 404
if not isinstance(new_string_field, str):
response_object = {
'errors': ['The string_field must be a string.']
}
return jsonify(response_object), 400
record.string_field = new_string_field
record.update()
return jsonify(record.to_json()), 200
@FOO_BLUEPRINT.route('/api/foo/<int:foo_id>', methods=['DELETE'], strict_slashes=False)
def delete(foo_id):
"""
A route to handle a request to delete an existing Foo record (looked up by its id).
Parameters:
foo_id (int): Unique identifier for a foo
Returns:
json: A response code
"""
# Validate request data
record = Foo.query.filter_by(id=foo_id).first()
if not record:
response_object = {
'errors': [f'No record with id={foo_id} found.']
}
return jsonify(response_object), 404
record.delete()
return jsonify({}), 200
| en | 0.881978 | Defines the routes for a single REST resource called 'foo' Blueprints are the most flexible and powerful way to define routes in Flask. They are easy to read and they gather all the routing logic together in one place. A common practice is to have one blueprint per REST resource.+ A good practice is to handle all the http stuff (requests, responses, error codes, etc.) in the blueprint file but to keep application logic out as much as possible (single responsibility principle). In this example, the blueprint does not have direct access to the database but calls model object methods that execute business functionality. If the models start to get really complicated, it's a good idea to put a service layer between the blueprint and the model so that the business logic is easily tracked all in one spot. # pylint: disable=blacklisted-name; delete Foo entity A route to handle a request to create a new instance of the resource. Parameters: foo_id (int): Unique identifier for a foo Returns: json: Newly created Foo record # Validate request data A route to handle a request for a list of all instances of the resource. A route to handle a request for a single Foo record (looked up by its id). Parameters: foo_id (int): Unique identifier for a foo Returns: json: Corresponding Foo record A route to handle a request to update single existing Foo record (looked up by its id). Parameters: foo_id (int): Unique identifier for a foo Returns: json: Corresponding Foo record # Validate request data A route to handle a request to delete an existing Foo record (looked up by its id). Parameters: foo_id (int): Unique identifier for a foo Returns: json: A response code # Validate request data | 3.581386 | 4 |
tests/__init__.py | Omarzintan/bumblebee-ai | 3 | 6614512 | <filename>tests/__init__.py
from utils.speech import BumbleSpeech
from utils.bumblebee_internal_api import BUMBLEBEEAPI
from utils import config_builder
from helpers import bumblebee_root
class MockBee():
def __init__(self,
name: str = 'mock bumblebee',
):
self.name = name
self.speech = BumbleSpeech(speech_mode="silent")
self.bumblebee_dir = bumblebee_root
self.bumblebee_api = BUMBLEBEEAPI(self)
self.thread_failsafes = []
self.global_store = {}
self.config = config_builder.create_fake_config()
def run_feature(self, feature, input):
return feature.action
def get_speech(self):
return self.speech
def get_config(self):
return self.config
def get_internal_state(self):
return {
'global_store': self.global_store,
'thread_failsafes': self.thread_failsafes
}
def load_internal_state(self, state={}):
if not isinstance(state, dict):
raise Exception('Invalid argument, state. Must be a dict')
| <filename>tests/__init__.py
from utils.speech import BumbleSpeech
from utils.bumblebee_internal_api import BUMBLEBEEAPI
from utils import config_builder
from helpers import bumblebee_root
class MockBee():
def __init__(self,
name: str = 'mock bumblebee',
):
self.name = name
self.speech = BumbleSpeech(speech_mode="silent")
self.bumblebee_dir = bumblebee_root
self.bumblebee_api = BUMBLEBEEAPI(self)
self.thread_failsafes = []
self.global_store = {}
self.config = config_builder.create_fake_config()
def run_feature(self, feature, input):
return feature.action
def get_speech(self):
return self.speech
def get_config(self):
return self.config
def get_internal_state(self):
return {
'global_store': self.global_store,
'thread_failsafes': self.thread_failsafes
}
def load_internal_state(self, state={}):
if not isinstance(state, dict):
raise Exception('Invalid argument, state. Must be a dict')
| none | 1 | 2.449159 | 2 | |
tests/test_wps_raven_multi_model.py | fossabot/raven | 29 | 6614513 | <reponame>fossabot/raven
import datetime as dt
import pytest
from pywps import Service
from pywps.tests import assert_response_success
from ravenpy.utilities.testdata import get_local_testdata
from raven.processes import (
GraphEnsUncertaintyProcess,
GraphSingleHydrographProcess,
RavenMultiModelProcess,
)
from .common import CFG_FILE, client_for, get_output
def test_raven_multi_model_process(request):
client = client_for(
Service(
processes=[RavenMultiModelProcess()],
cfgfiles=CFG_FILE,
)
)
gr4jcn = "0.529, -3.396, 407.29, 1.072, 16.9, 0.947"
hmets = (
"9.5019, 0.2774, 6.3942, 0.6884, 1.2875, 5.4134, 2.3641, 0.0973, 0.0464, 0.1998, 0.0222, -1.0919, "
"2.6851, 0.3740, 1.0000, 0.4739, 0.0114, 0.0243, 0.0069, 310.7211, 916.1947"
)
datainputs = (
"ts=files@xlink:href=file://{ts};"
"gr4jcn={gr4jcn};"
"hmets={hmets};"
"start_date={start_date};"
"end_date={end_date};"
"name={name};"
"run_name={run_name};"
"area={area};"
"latitude={latitude};"
"longitude={longitude};"
"elevation={elevation};".format(
ts=get_local_testdata(
"raven-gr4j-cemaneige/Salmon-River-Near-Prince-George_meteo_daily.nc",
),
gr4jcn=gr4jcn,
hmets=hmets,
start_date=dt.datetime(2000, 1, 1),
end_date=dt.datetime(2002, 1, 1),
name="Salmon",
run_name="test",
area="4250.6",
elevation="843.0",
latitude=54.4848,
longitude=-123.3659,
)
)
resp = client.get(
service="WPS",
request="Execute",
version="1.0.0",
identifier="raven-multi-model",
datainputs=datainputs,
)
assert_response_success(resp)
out = get_output(resp.xml)
assert out["hydrograph"].endswith(".zip")
request.config.cache.set("zipfn", out["hydrograph"])
# @pytest.mark.dependency(depends=['test_raven_multi_model_process'])
@pytest.mark.skip
def test_graph_ensemble_uncertainty(request):
client = client_for(
Service(
processes=[
GraphEnsUncertaintyProcess(),
],
cfgfiles=CFG_FILE,
)
)
zipfn = request.config.cache.get("zipfn", None)
resp = client.get(
service="WPS",
request="Execute",
version="1.0.0",
identifier="graph_ensemble_uncertainty",
datainputs="sims=files@xlink:href=file://{};".format(zipfn),
)
assert_response_success(resp)
@pytest.mark.skip
def test_graph_single_hydrograph(request):
client = client_for(
Service(
processes=[
GraphSingleHydrographProcess(),
],
cfgfiles=CFG_FILE,
)
)
datainputs = "sim=files@xlink:href=file://{sim};".format(
sim=get_local_testdata(
"hydro_simulations/raven-gr4j-cemaneige-sim_hmets-0_Hydrographs.nc"
)
)
resp = client.get(
service="WPS",
request="Execute",
version="1.0.0",
identifier="graph_single_hydrograph",
datainputs=datainputs,
)
assert_response_success(resp)
| import datetime as dt
import pytest
from pywps import Service
from pywps.tests import assert_response_success
from ravenpy.utilities.testdata import get_local_testdata
from raven.processes import (
GraphEnsUncertaintyProcess,
GraphSingleHydrographProcess,
RavenMultiModelProcess,
)
from .common import CFG_FILE, client_for, get_output
def test_raven_multi_model_process(request):
client = client_for(
Service(
processes=[RavenMultiModelProcess()],
cfgfiles=CFG_FILE,
)
)
gr4jcn = "0.529, -3.396, 407.29, 1.072, 16.9, 0.947"
hmets = (
"9.5019, 0.2774, 6.3942, 0.6884, 1.2875, 5.4134, 2.3641, 0.0973, 0.0464, 0.1998, 0.0222, -1.0919, "
"2.6851, 0.3740, 1.0000, 0.4739, 0.0114, 0.0243, 0.0069, 310.7211, 916.1947"
)
datainputs = (
"ts=files@xlink:href=file://{ts};"
"gr4jcn={gr4jcn};"
"hmets={hmets};"
"start_date={start_date};"
"end_date={end_date};"
"name={name};"
"run_name={run_name};"
"area={area};"
"latitude={latitude};"
"longitude={longitude};"
"elevation={elevation};".format(
ts=get_local_testdata(
"raven-gr4j-cemaneige/Salmon-River-Near-Prince-George_meteo_daily.nc",
),
gr4jcn=gr4jcn,
hmets=hmets,
start_date=dt.datetime(2000, 1, 1),
end_date=dt.datetime(2002, 1, 1),
name="Salmon",
run_name="test",
area="4250.6",
elevation="843.0",
latitude=54.4848,
longitude=-123.3659,
)
)
resp = client.get(
service="WPS",
request="Execute",
version="1.0.0",
identifier="raven-multi-model",
datainputs=datainputs,
)
assert_response_success(resp)
out = get_output(resp.xml)
assert out["hydrograph"].endswith(".zip")
request.config.cache.set("zipfn", out["hydrograph"])
# @pytest.mark.dependency(depends=['test_raven_multi_model_process'])
@pytest.mark.skip
def test_graph_ensemble_uncertainty(request):
client = client_for(
Service(
processes=[
GraphEnsUncertaintyProcess(),
],
cfgfiles=CFG_FILE,
)
)
zipfn = request.config.cache.get("zipfn", None)
resp = client.get(
service="WPS",
request="Execute",
version="1.0.0",
identifier="graph_ensemble_uncertainty",
datainputs="sims=files@xlink:href=file://{};".format(zipfn),
)
assert_response_success(resp)
@pytest.mark.skip
def test_graph_single_hydrograph(request):
client = client_for(
Service(
processes=[
GraphSingleHydrographProcess(),
],
cfgfiles=CFG_FILE,
)
)
datainputs = "sim=files@xlink:href=file://{sim};".format(
sim=get_local_testdata(
"hydro_simulations/raven-gr4j-cemaneige-sim_hmets-0_Hydrographs.nc"
)
)
resp = client.get(
service="WPS",
request="Execute",
version="1.0.0",
identifier="graph_single_hydrograph",
datainputs=datainputs,
)
assert_response_success(resp) | en | 0.247329 | # @pytest.mark.dependency(depends=['test_raven_multi_model_process']) | 2.22731 | 2 |
src/mds/api/contrib/smslib/znis.py | m-socha/sana.mds | 2 | 6614514 | <reponame>m-socha/sana.mds
'''
Created on Aug 11, 2012
:author: Sana Development Team
:version: 2.0
'''
try:
import json as simplejson
except ImportError, e:
import simplejson
import logging
import urllib
from django.conf import settings
from .messages import format_sms
def send_znisms_notification(message_body, phoneId, formatter=None):
return ZnisOpener().open(message_body, phoneId, formatter=formatter)
class ZnisOpener:
def __init__(self):
pass
def open(self, message_body, phoneId, formatter=None):
"""Sends an SMS message to ZniSMS http interface
ZniSMS API documentation: http://www.znisms.com/api.pdf
ZniSMS url: http://api.znisms.com/post/smsv3.asp?userid=joinus&apikey=xxx&
message=Your+Message&senderid=9123123456&sendto=9123123457
ZniSMS Request params
userid
ZniSMS username
apikey
ZniSMS API key
message
SMS message body to send
senderid
Sender ID (should be alphanumeric)
sendto
Destination number (no +91, 91 or 0 in front)
Parameters:
message_body
Message body
phoneId
Recipient
"""
result = False
try:
messages = formatter(message_body) if formatter else message_body
for message in messages:
params = urllib.urlencode({
'userid': settings.ZNISMS_USER,
'apikey': settings.ZNISMS_APIKEY,
'senderid': settings.ZNISMS_SENDERID,
'sendto': phoneId,
'message': message
})
logging.info("Sending ZniSMS notification %s to %s" %
(message, phoneId))
response = urllib.urlopen(settings.ZNISMS_URL % params).read()
logging.info("ZniSMS response: %s" % response)
result = True
except Exception, e:
logging.error("Couldn't submit ZniSMS notification for %s: %s" % (phoneId, e))
return result | '''
Created on Aug 11, 2012
:author: Sana Development Team
:version: 2.0
'''
try:
import json as simplejson
except ImportError, e:
import simplejson
import logging
import urllib
from django.conf import settings
from .messages import format_sms
def send_znisms_notification(message_body, phoneId, formatter=None):
return ZnisOpener().open(message_body, phoneId, formatter=formatter)
class ZnisOpener:
def __init__(self):
pass
def open(self, message_body, phoneId, formatter=None):
"""Sends an SMS message to ZniSMS http interface
ZniSMS API documentation: http://www.znisms.com/api.pdf
ZniSMS url: http://api.znisms.com/post/smsv3.asp?userid=joinus&apikey=xxx&
message=Your+Message&senderid=9123123456&sendto=9123123457
ZniSMS Request params
userid
ZniSMS username
apikey
ZniSMS API key
message
SMS message body to send
senderid
Sender ID (should be alphanumeric)
sendto
Destination number (no +91, 91 or 0 in front)
Parameters:
message_body
Message body
phoneId
Recipient
"""
result = False
try:
messages = formatter(message_body) if formatter else message_body
for message in messages:
params = urllib.urlencode({
'userid': settings.ZNISMS_USER,
'apikey': settings.ZNISMS_APIKEY,
'senderid': settings.ZNISMS_SENDERID,
'sendto': phoneId,
'message': message
})
logging.info("Sending ZniSMS notification %s to %s" %
(message, phoneId))
response = urllib.urlopen(settings.ZNISMS_URL % params).read()
logging.info("ZniSMS response: %s" % response)
result = True
except Exception, e:
logging.error("Couldn't submit ZniSMS notification for %s: %s" % (phoneId, e))
return result | en | 0.394866 | Created on Aug 11, 2012 :author: Sana Development Team :version: 2.0 Sends an SMS message to ZniSMS http interface ZniSMS API documentation: http://www.znisms.com/api.pdf ZniSMS url: http://api.znisms.com/post/smsv3.asp?userid=joinus&apikey=xxx& message=Your+Message&senderid=9123123456&sendto=9123123457 ZniSMS Request params userid ZniSMS username apikey ZniSMS API key message SMS message body to send senderid Sender ID (should be alphanumeric) sendto Destination number (no +91, 91 or 0 in front) Parameters: message_body Message body phoneId Recipient | 1.971674 | 2 |
lib/__init__.py | mach1el/pmaping | 6 | 6614515 | import os
import sys
import os.path
import pcapy
import xml.etree.ElementTree
from ping import Ping
from random import *
from core.printf import *
from lib.packetHandle import *
from lib.socketHandle import *
from urllib.parse import urlparse
from os.path import dirname,abspath
from core import Exceptions as exce
sys.dont_write_bytecode=True
def parseURL(url):
if url.startswith('http') or url.startswith('ftp'):
parsed_uri = urlparse(url)
old_uri = '{uri.netloc}'.format(uri=parsed_uri)
url = old_uri
return url
def get_ports():
_ROOT = abspath(dirname(__file__))
file = os.path.join(_ROOT,'portsDB',"tcpports.xml")
return file
def portHandler(port,port_list=[]):
if port == None:
database = get_ports()
parse_database = xml.etree.ElementTree.parse(database).getroot()
for type in parse_database.findall('scaninfo'):
ports = type.get('services')
ports = ports.split(',')
for port1 in ports:
if port1.count('-') == 1:
port2 = port1.split('-')
for port in range(int(port2[0]),int(port2[1])+1):
port_list.append(port)
else:
port_list.append(port1)
return(port_list)
else:
if port.count(',') != 0:
ports = tuple(port.split(','))
for _ in ports:
try:
if isinstance(int(_),int) : pass
except:
sys.exit(msgStat
(exce.ValuesError(),err=True
)()
)
elif port.count('-') != 0:
ports = port.split('-')
for _ in ports:
try:
if isinstance(int(_),int):
pass
except:
sys.exit(msgStat
(exce.ValuesError(),err=True
)()
)
_min = int(min([int(x) for x in ports]))
_max = int(max([int(x) for x in ports]))
for port in range(_min,_max+1):
port_list.append(str(port))
ports = port_list
elif port.count(',') == 0 and port.count('-') == 0:
ports = [port]
return ports
class portResult(object):
def __init__(self):
self.skipports = []
def _handle(self,port):
if port not in self.skipports:
self.skipports.append(port)
return port
else : return
class Capture(object):
def __init__(self,tgt):
self.tgt = tgt
self.dev = self._get_online_device()
def _set_tcp(self):
p = pcapy.open_live(self.dev, 65535, 0, 1500)
p.setfilter(('src host ') + str(self.tgt))
return p
def _set_udp(self):
p = pcapy.open_live(self.dev, 99999, False, 1500)
p.setfilter(('src host ') + str(self.tgt))
return p
@staticmethod
def _get_online_device():
_i = os.popen('ip link | grep \"state\" | awk {\'print $2 $9\'}').read()
ifaces = _i.split('\n')
_l = len(ifaces)
ifaces.pop(_l-1)
for i in ifaces:
if "UP" in i:
dev = i.split(":")
_iface = dev[0]
if _iface == None:
sys.exit(msgStat
(
exce.NoOnlineDev(),err=True
)()
)
else:
return _iface
class Header(object):
def __init__(self,scan_type,*args):
super(Header,self).__init__()
self.target = args[0]
self.ports = args[1]
self.threads = args[2]
self.timeout = args[3]
self.quite = args[4]
self.opened = []
self.refused = []
self.filtered = []
self.packets = []
self.conn_type = ""
self.data = '\x00' * 20
self.scan_type = scan_type
self.sip = get_our_addr()
self.portresult = portResult()
self.sport = randrange(1,65535)
self.udp_packet = UDPPacket()
self.port_len = len(self.ports)-1
self.ip = domain_resolver(self.target,True)
self.response_time = Ping(self.target,self.timeout)._start_icmp()
if self.scan_type == "conn" or self.scan_type == "syn":
self.conn_type += "tcp"
elif self.scan_type == "udp":
self.conn_type = self.scan_type
try:
self.ipv6 = [str(ip) for ip in resolve_ipv6(self.target)]
except:
self.ipv6 = None
self.rdns = resolve_PTR(self.ip)
if self.rdns != None:
self.rdns = self.rdns[0]
self.resolved_ips = [str(ip) for ip in resolve_ips(self.target)]
if len(self.resolved_ips) > 1:
if self.ip in self.resolved_ips:
self.resolved_ips.remove(self.ip)
if self.scan_type == "udp":
self.udp_packet_capture = Capture(self.ip)._set_udp()
else:
self.tcp_packet_capture = Capture(self.ip)._set_tcp()
def _start(self):
Banner.portscanner()
msgStat('[{0}] Started port scan process.'.format(timed()),warn=True)()
msgStat('|--- Warning: host name {0} resolves to {1} IPs. Using {2}'.format(
self.target,\
len(self.resolved_ips),\
self.ip),warn=True)()
if self.response_time != None:
init('|--- Host is up ({}s latency).'.format(str(self.response_time)[:5]))
init('|--- rDNS record for {0}: {1}'.format(self.ip,self.rdns))
init('[{0}] Initiating scan.'.format(timed()))
init('[{0}] Scanning {1} ({2}) [{3} ports]'.format(timed(),\
self.target,\
self.ip,\
str(self.port_len)
)
) | import os
import sys
import os.path
import pcapy
import xml.etree.ElementTree
from ping import Ping
from random import *
from core.printf import *
from lib.packetHandle import *
from lib.socketHandle import *
from urllib.parse import urlparse
from os.path import dirname,abspath
from core import Exceptions as exce
sys.dont_write_bytecode=True
def parseURL(url):
if url.startswith('http') or url.startswith('ftp'):
parsed_uri = urlparse(url)
old_uri = '{uri.netloc}'.format(uri=parsed_uri)
url = old_uri
return url
def get_ports():
_ROOT = abspath(dirname(__file__))
file = os.path.join(_ROOT,'portsDB',"tcpports.xml")
return file
def portHandler(port,port_list=[]):
if port == None:
database = get_ports()
parse_database = xml.etree.ElementTree.parse(database).getroot()
for type in parse_database.findall('scaninfo'):
ports = type.get('services')
ports = ports.split(',')
for port1 in ports:
if port1.count('-') == 1:
port2 = port1.split('-')
for port in range(int(port2[0]),int(port2[1])+1):
port_list.append(port)
else:
port_list.append(port1)
return(port_list)
else:
if port.count(',') != 0:
ports = tuple(port.split(','))
for _ in ports:
try:
if isinstance(int(_),int) : pass
except:
sys.exit(msgStat
(exce.ValuesError(),err=True
)()
)
elif port.count('-') != 0:
ports = port.split('-')
for _ in ports:
try:
if isinstance(int(_),int):
pass
except:
sys.exit(msgStat
(exce.ValuesError(),err=True
)()
)
_min = int(min([int(x) for x in ports]))
_max = int(max([int(x) for x in ports]))
for port in range(_min,_max+1):
port_list.append(str(port))
ports = port_list
elif port.count(',') == 0 and port.count('-') == 0:
ports = [port]
return ports
class portResult(object):
def __init__(self):
self.skipports = []
def _handle(self,port):
if port not in self.skipports:
self.skipports.append(port)
return port
else : return
class Capture(object):
def __init__(self,tgt):
self.tgt = tgt
self.dev = self._get_online_device()
def _set_tcp(self):
p = pcapy.open_live(self.dev, 65535, 0, 1500)
p.setfilter(('src host ') + str(self.tgt))
return p
def _set_udp(self):
p = pcapy.open_live(self.dev, 99999, False, 1500)
p.setfilter(('src host ') + str(self.tgt))
return p
@staticmethod
def _get_online_device():
_i = os.popen('ip link | grep \"state\" | awk {\'print $2 $9\'}').read()
ifaces = _i.split('\n')
_l = len(ifaces)
ifaces.pop(_l-1)
for i in ifaces:
if "UP" in i:
dev = i.split(":")
_iface = dev[0]
if _iface == None:
sys.exit(msgStat
(
exce.NoOnlineDev(),err=True
)()
)
else:
return _iface
class Header(object):
def __init__(self,scan_type,*args):
super(Header,self).__init__()
self.target = args[0]
self.ports = args[1]
self.threads = args[2]
self.timeout = args[3]
self.quite = args[4]
self.opened = []
self.refused = []
self.filtered = []
self.packets = []
self.conn_type = ""
self.data = '\x00' * 20
self.scan_type = scan_type
self.sip = get_our_addr()
self.portresult = portResult()
self.sport = randrange(1,65535)
self.udp_packet = UDPPacket()
self.port_len = len(self.ports)-1
self.ip = domain_resolver(self.target,True)
self.response_time = Ping(self.target,self.timeout)._start_icmp()
if self.scan_type == "conn" or self.scan_type == "syn":
self.conn_type += "tcp"
elif self.scan_type == "udp":
self.conn_type = self.scan_type
try:
self.ipv6 = [str(ip) for ip in resolve_ipv6(self.target)]
except:
self.ipv6 = None
self.rdns = resolve_PTR(self.ip)
if self.rdns != None:
self.rdns = self.rdns[0]
self.resolved_ips = [str(ip) for ip in resolve_ips(self.target)]
if len(self.resolved_ips) > 1:
if self.ip in self.resolved_ips:
self.resolved_ips.remove(self.ip)
if self.scan_type == "udp":
self.udp_packet_capture = Capture(self.ip)._set_udp()
else:
self.tcp_packet_capture = Capture(self.ip)._set_tcp()
def _start(self):
Banner.portscanner()
msgStat('[{0}] Started port scan process.'.format(timed()),warn=True)()
msgStat('|--- Warning: host name {0} resolves to {1} IPs. Using {2}'.format(
self.target,\
len(self.resolved_ips),\
self.ip),warn=True)()
if self.response_time != None:
init('|--- Host is up ({}s latency).'.format(str(self.response_time)[:5]))
init('|--- rDNS record for {0}: {1}'.format(self.ip,self.rdns))
init('[{0}] Initiating scan.'.format(timed()))
init('[{0}] Scanning {1} ({2}) [{3} ports]'.format(timed(),\
self.target,\
self.ip,\
str(self.port_len)
)
) | none | 1 | 2.661402 | 3 | |
scripts/keras_market_evaluation_v2.py | JLivingston01/py_research | 1 | 6614516 | <gh_stars>1-10
import yfinance as yf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
import datetime as dt
import tensorflow
from tensorflow import keras
from tensorflow import losses
symbols = ['^VIX','^DJI','^GSPC','^TNX']
dat = yf.download(tickers = " ".join(symbols),
period = "max",
interval = "1d",
group_by = 'ticker')
dat.columns.values
M = dat[[(i,'Close') for i in symbols]].copy()
N = dat[[(i,'Volume') for i in symbols]].copy()
M=M.merge(N,left_index=True,right_index=True,how='left')
M.columns = [i[0].replace("^","")+"_"+i[1] for i in M.columns]
for i in M.columns:
rm = M[i].rolling(4,min_periods=1).mean()
M[i]=np.where(M[i].isna(),rm,M[i])
#KPI is SP500 % change over 15 days
M['GSPC_Close_change15'] = M['GSPC_Close'].shift(-15)/M['GSPC_Close']
#SP500 rolling momentum, shifted 2 days
M['GSPC_Close_rolling125'] = M['GSPC_Close'].rolling(window=125).mean()
M['GSPC_Close_momentum125'] = M['GSPC_Close']/M['GSPC_Close_rolling125']-1
M['GSPC_Close_momentum125_shift2']=M['GSPC_Close_momentum125'].shift(2)
M['GSPC_Close_rolling30'] = M['GSPC_Close'].rolling(window=30).mean()
M['GSPC_Close_momentum30'] = M['GSPC_Close']/M['GSPC_Close_rolling30']-1
M['GSPC_Close_momentum30_shift2']=M['GSPC_Close_momentum30'].shift(2)
M['GSPC_Close_rolling15'] = M['GSPC_Close'].rolling(window=15).mean()
M['GSPC_Close_momentum15'] = M['GSPC_Close']/M['GSPC_Close_rolling15']-1
M['GSPC_Close_momentum15_shift2']=M['GSPC_Close_momentum15'].shift(2)
#DJI rolling momentum shifted 2 days
M['DJI_Close_rolling125'] = M['DJI_Close'].rolling(window=125).mean()
M['DJI_Close_momentum125'] = M['DJI_Close']/M['DJI_Close_rolling125']-1
M['DJI_Close_momentum125_shift2']=M['DJI_Close_momentum125'].shift(2)
M['DJI_Close_rolling30'] = M['DJI_Close'].rolling(window=30).mean()
M['DJI_Close_momentum30'] = M['DJI_Close']/M['DJI_Close_rolling30']-1
M['DJI_Close_momentum30_shift2']=M['DJI_Close_momentum30'].shift(2)
M['DJI_Close_rolling15'] = M['DJI_Close'].rolling(window=15).mean()
M['DJI_Close_momentum15'] = M['DJI_Close']/M['DJI_Close_rolling15']-1
M['DJI_Close_momentum15_shift2']=M['DJI_Close_momentum15'].shift(2)
# VIX change, shifted 2 days
M["VIX_Close_change1_shift2"]=(M["VIX_Close"]/M["VIX_Close"].shift(1)-1).shift(2)
M["VIX_Close_change1_shift2_rolling10dev"]=M["VIX_Close_change1_shift2"].rolling(window=10).std()
M["VIX_Close_change1_shift2_rolling10mean"]=M["VIX_Close_change1_shift2"].rolling(window=10).mean()
# SP500 Change shifted 2 days
M["GSPC_Close_change1_shift2"]=(M['GSPC_Close']/M['GSPC_Close'].shift(1)-1).shift(2)
M["GSPC_Close_change8_shift2"]=(M['GSPC_Close']/M['GSPC_Close'].shift(8)-1).shift(2)
M["GSPC_Close_change20_shift2"]=(M['GSPC_Close']/M['GSPC_Close'].shift(20)-1).shift(2)
M["DJI_Close_change1_shift2"]=(M['DJI_Close']/M['DJI_Close'].shift(1)-1).shift(2)
M["DJI_Close_change8_shift2"]=(M['DJI_Close']/M['DJI_Close'].shift(8)-1).shift(2)
M["DJI_Close_change20_shift2"]=(M['DJI_Close']/M['DJI_Close'].shift(20)-1).shift(2)
#SP500 volume change, shifted 2 days
M['GSPC_Volume_change1_shift2']=(M["GSPC_Volume"]/M["GSPC_Volume"].shift(1)-1).shift(2)
M['GSPC_Volume_change10_shift2']=(M["GSPC_Volume"]/M["GSPC_Volume"].shift(10)-1).shift(2)
M['GSPC_Volume_change1_shift2_rolling10']=M['GSPC_Volume_change1_shift2'].rolling(window=10).mean()
M['GSPC_Volume_change10_shift2_rolling10']=M['GSPC_Volume_change10_shift2'].rolling(window=10).mean()
for i in ['GSPC_Close_change1_shift2',
'GSPC_Close_change8_shift2',
'GSPC_Close_change20_shift2',
'DJI_Close_change1_shift2',
'DJI_Close_change8_shift2',
'DJI_Close_change20_shift2']:
M[i+"_rolling10"] = M[i].rolling(window=10).mean()
M.columns
#Intercept
M['int'] = 1
M.dropna(inplace=True)
features = [
'GSPC_Close_momentum125_shift2',
'GSPC_Close_momentum30_shift2',
'DJI_Close_momentum125_shift2',
'DJI_Close_momentum30_shift2',
'GSPC_Volume_change1_shift2',
'GSPC_Volume_change10_shift2',
'GSPC_Volume_change1_shift2_rolling10',
'GSPC_Volume_change10_shift2_rolling10',
'GSPC_Close_change1_shift2',
'GSPC_Close_change8_shift2',
'GSPC_Close_change20_shift2',
'DJI_Close_change1_shift2',
'DJI_Close_change8_shift2',
'DJI_Close_change20_shift2',
'VIX_Close_change1_shift2',
'GSPC_Close_change1_shift2_rolling10',
'GSPC_Close_change8_shift2_rolling10',
'GSPC_Close_change20_shift2_rolling10',
'DJI_Close_change1_shift2_rolling10',
'DJI_Close_change8_shift2_rolling10',
'DJI_Close_change20_shift2_rolling10',
'VIX_Close_change1_shift2_rolling10dev',
'VIX_Close_change1_shift2_rolling10mean',
'int']
kpi = 'GSPC_Close_change15'
M.corr()[kpi]
split_train = dt.datetime.strftime(dt.datetime.today()-dt.timedelta(60),'%Y-%m-%d')
split_test = dt.datetime.strftime(dt.datetime.today()-dt.timedelta(25),'%Y-%m-%d')
xt = M[M.index<=split_train][features].copy()
yt = M[M.index<=split_train][kpi]
xv = M[(M.index>split_train)&(M.index<=split_test)][features].copy()
yv = M[(M.index>split_train)&(M.index<=split_test)][kpi]
xtest = M[M.index>split_test][features].copy()
ytest = M[M.index>split_test][kpi]
#Linear Model
expon =False
if expon ==True:
coefs= np.linalg.pinv(xt.T@xt)@(xt.T@np.log(yt))
yfit=np.exp(xt@coefs)
ypred=np.exp(xv@coefs)
else:
coefs= np.linalg.pinv(xt.T@xt)@(xt.T@(yt))
yfit=xt@coefs
ypred=xv@coefs
plt.plot(yt)
plt.plot(yfit)
plt.title('regression fit')
plt.xticks(rotation=90)
plt.show()
plt.plot(yv)
plt.plot(ypred)
plt.title('regression pred')
plt.xticks(rotation=90)
plt.show()
pd.DataFrame({'y':yv,'pred':ypred}).corr()
fig,ax1=plt.subplots()
ax1.plot(yv)
plt.xticks(rotation=90)
ax2=ax1.twinx()
ax2.plot(ypred,color='orange')
plt.title('regression pred')
plt.xticks(rotation=90)
plt.show()
#xgboost
import xgboost
xgb = xgboost.XGBRegressor(n_estimators=100,
max_depth =9,
colsample_bytree=1 ,
colsample_bylevel=.9,
colsample_bynode =.9,
n_jobs=4
).fit(xt,yt)
yfit=xgb.predict(xt)
ypred=xgb.predict(xv)
plt.plot(yt)
plt.plot(pd.Series(yfit,index=yt.index))
plt.title('regression fit')
plt.xticks(rotation=90)
plt.show()
plt.plot(yv)
plt.plot(pd.Series(ypred,index=yv.index))
plt.title('regression pred')
plt.xticks(rotation=90)
plt.show()
print(pd.DataFrame({'y':yv,'pred':ypred}).corr())
print(round(np.mean((yv-ypred)**2),5))
#Keras nN
features = [
'GSPC_Close_momentum125_shift2',
'GSPC_Close_momentum30_shift2',
'DJI_Close_momentum125_shift2',
'DJI_Close_momentum30_shift2',
'GSPC_Volume_change1_shift2',
'GSPC_Volume_change10_shift2',
'GSPC_Volume_change1_shift2_rolling10',
'GSPC_Volume_change10_shift2_rolling10',
'GSPC_Close_change1_shift2',
'GSPC_Close_change8_shift2',
'GSPC_Close_change20_shift2',
'DJI_Close_change1_shift2',
'DJI_Close_change8_shift2',
'DJI_Close_change20_shift2',
'VIX_Close_change1_shift2',
'GSPC_Close_change1_shift2_rolling10',
'GSPC_Close_change8_shift2_rolling10',
'GSPC_Close_change20_shift2_rolling10',
'DJI_Close_change1_shift2_rolling10',
'DJI_Close_change8_shift2_rolling10',
'DJI_Close_change20_shift2_rolling10',
'VIX_Close_change1_shift2_rolling10dev',
'VIX_Close_change1_shift2_rolling10mean',
]
split_train = dt.datetime.strftime(dt.datetime.today()-dt.timedelta(60),'%Y-%m-%d')
split_test = dt.datetime.strftime(dt.datetime.today()-dt.timedelta(25),'%Y-%m-%d')
xt = M[M.index<=split_train][features].copy()
yt = M[M.index<=split_train][kpi]
xv = M[(M.index>split_train)&(M.index<=split_test)][features].copy()
yv = M[(M.index>split_train)&(M.index<=split_test)][kpi]
xtest = M[M.index>split_test][features].copy()
ytest = M[M.index>split_test][kpi]
model = keras.Sequential([
#keras.layers.LSTM(units=10),
#keras.layers.Dense(len(xt.columns.values), activation=keras.activations.relu),
keras.layers.Dense(15, activation=keras.activations.linear),
keras.layers.Dropout(.15),
keras.layers.Dense(15, activation=keras.activations.relu),
keras.layers.Dropout(.15),
keras.layers.Dense(15, activation=keras.activations.sigmoid),
#keras.layers.Dense(len(xt.columns.values), activation=keras.activations.linear),
#keras.layers.Dense(15, activation=keras.activations.linear),
#keras.layers.Dense(1,activation = keras.activations.linear),
keras.layers.Dense(1,activation = keras.activations.linear)
#keras.layers.Dense(1,activation = keras.activations.exponential)
])
model.compile(optimizer=tensorflow.optimizers.Adam(),
loss=losses.mean_squared_error,
#batch_size=32,
#loss=losses.categorical_crossentropy,
metrics=[keras.metrics.MeanSquaredError()])
model.fit(
#x=np.array(xt).reshape((xt.shape[0],xt.shape[1],1)),
x=np.array(xt).reshape(xt.shape[0],xt.shape[1],),
y=np.array(yt),
batch_size = 2**10,
validation_data=(np.array(xv).reshape(xv.shape[0],xv.shape[1],),np.array(yv)),
epochs=24000)
model.fit(
#x=np.array(xt).reshape((xt.shape[0],xt.shape[1],1)),
x=np.array(xt).reshape(xt.shape[0],xt.shape[1],),
y=np.array(yt),
#batch_size = 2**9,
validation_data=(np.array(xv).reshape(xv.shape[0],xv.shape[1],),np.array(yv)),
epochs=1800)
model.fit(
#x=np.array(xt).reshape((xt.shape[0],xt.shape[1],1)),
x=np.array(xt).reshape(xt.shape[0],xt.shape[1],),
y=np.array(yt),
batch_size = 2**6,
validation_data=(np.array(xv).reshape(xv.shape[0],xv.shape[1],),np.array(yv)),
epochs=10000)
model.fit(
#x=np.array(xt).reshape((xt.shape[0],xt.shape[1],1)),
x=np.array(xt).reshape(xt.shape[0],xt.shape[1],),
y=np.array(yt),
#batch_size = 2**9,
validation_data=(np.array(xv).reshape(xv.shape[0],xv.shape[1],),np.array(yv)),
epochs=1800)
ypred = model.predict(np.array(xv))
ypred = ypred.reshape(ypred.shape[0])
yfit = model.predict(np.array(xt))
yfit = yfit.reshape(yfit.shape[0])
plt.plot(yt)
plt.plot(pd.Series(yfit,index=yt.index))
plt.title('nn fit')
plt.xticks(rotation=90)
plt.show()
plt.plot(yv)
plt.plot(pd.Series(ypred,index=yv.index))
plt.title('nn pred')
plt.xticks(rotation=90)
plt.show()
fig = plt.figure(figsize=(8,8))
plt.scatter(yfit,yt,s=.5)
plt.scatter(ypred,yv,s=3)
plt.xlim(.8,1.2)
plt.ylim(.8,1.2)
plt.show()
print(pd.DataFrame({'y':yv,'pred':ypred}).corr())
print(round(np.mean((yv-ypred)**2),5))
| import yfinance as yf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
import datetime as dt
import tensorflow
from tensorflow import keras
from tensorflow import losses
symbols = ['^VIX','^DJI','^GSPC','^TNX']
dat = yf.download(tickers = " ".join(symbols),
period = "max",
interval = "1d",
group_by = 'ticker')
dat.columns.values
M = dat[[(i,'Close') for i in symbols]].copy()
N = dat[[(i,'Volume') for i in symbols]].copy()
M=M.merge(N,left_index=True,right_index=True,how='left')
M.columns = [i[0].replace("^","")+"_"+i[1] for i in M.columns]
for i in M.columns:
rm = M[i].rolling(4,min_periods=1).mean()
M[i]=np.where(M[i].isna(),rm,M[i])
#KPI is SP500 % change over 15 days
M['GSPC_Close_change15'] = M['GSPC_Close'].shift(-15)/M['GSPC_Close']
#SP500 rolling momentum, shifted 2 days
M['GSPC_Close_rolling125'] = M['GSPC_Close'].rolling(window=125).mean()
M['GSPC_Close_momentum125'] = M['GSPC_Close']/M['GSPC_Close_rolling125']-1
M['GSPC_Close_momentum125_shift2']=M['GSPC_Close_momentum125'].shift(2)
M['GSPC_Close_rolling30'] = M['GSPC_Close'].rolling(window=30).mean()
M['GSPC_Close_momentum30'] = M['GSPC_Close']/M['GSPC_Close_rolling30']-1
M['GSPC_Close_momentum30_shift2']=M['GSPC_Close_momentum30'].shift(2)
M['GSPC_Close_rolling15'] = M['GSPC_Close'].rolling(window=15).mean()
M['GSPC_Close_momentum15'] = M['GSPC_Close']/M['GSPC_Close_rolling15']-1
M['GSPC_Close_momentum15_shift2']=M['GSPC_Close_momentum15'].shift(2)
#DJI rolling momentum shifted 2 days
M['DJI_Close_rolling125'] = M['DJI_Close'].rolling(window=125).mean()
M['DJI_Close_momentum125'] = M['DJI_Close']/M['DJI_Close_rolling125']-1
M['DJI_Close_momentum125_shift2']=M['DJI_Close_momentum125'].shift(2)
M['DJI_Close_rolling30'] = M['DJI_Close'].rolling(window=30).mean()
M['DJI_Close_momentum30'] = M['DJI_Close']/M['DJI_Close_rolling30']-1
M['DJI_Close_momentum30_shift2']=M['DJI_Close_momentum30'].shift(2)
M['DJI_Close_rolling15'] = M['DJI_Close'].rolling(window=15).mean()
M['DJI_Close_momentum15'] = M['DJI_Close']/M['DJI_Close_rolling15']-1
M['DJI_Close_momentum15_shift2']=M['DJI_Close_momentum15'].shift(2)
# VIX change, shifted 2 days
M["VIX_Close_change1_shift2"]=(M["VIX_Close"]/M["VIX_Close"].shift(1)-1).shift(2)
M["VIX_Close_change1_shift2_rolling10dev"]=M["VIX_Close_change1_shift2"].rolling(window=10).std()
M["VIX_Close_change1_shift2_rolling10mean"]=M["VIX_Close_change1_shift2"].rolling(window=10).mean()
# SP500 Change shifted 2 days
M["GSPC_Close_change1_shift2"]=(M['GSPC_Close']/M['GSPC_Close'].shift(1)-1).shift(2)
M["GSPC_Close_change8_shift2"]=(M['GSPC_Close']/M['GSPC_Close'].shift(8)-1).shift(2)
M["GSPC_Close_change20_shift2"]=(M['GSPC_Close']/M['GSPC_Close'].shift(20)-1).shift(2)
M["DJI_Close_change1_shift2"]=(M['DJI_Close']/M['DJI_Close'].shift(1)-1).shift(2)
M["DJI_Close_change8_shift2"]=(M['DJI_Close']/M['DJI_Close'].shift(8)-1).shift(2)
M["DJI_Close_change20_shift2"]=(M['DJI_Close']/M['DJI_Close'].shift(20)-1).shift(2)
#SP500 volume change, shifted 2 days
M['GSPC_Volume_change1_shift2']=(M["GSPC_Volume"]/M["GSPC_Volume"].shift(1)-1).shift(2)
M['GSPC_Volume_change10_shift2']=(M["GSPC_Volume"]/M["GSPC_Volume"].shift(10)-1).shift(2)
M['GSPC_Volume_change1_shift2_rolling10']=M['GSPC_Volume_change1_shift2'].rolling(window=10).mean()
M['GSPC_Volume_change10_shift2_rolling10']=M['GSPC_Volume_change10_shift2'].rolling(window=10).mean()
for i in ['GSPC_Close_change1_shift2',
'GSPC_Close_change8_shift2',
'GSPC_Close_change20_shift2',
'DJI_Close_change1_shift2',
'DJI_Close_change8_shift2',
'DJI_Close_change20_shift2']:
M[i+"_rolling10"] = M[i].rolling(window=10).mean()
M.columns
#Intercept
M['int'] = 1
M.dropna(inplace=True)
features = [
'GSPC_Close_momentum125_shift2',
'GSPC_Close_momentum30_shift2',
'DJI_Close_momentum125_shift2',
'DJI_Close_momentum30_shift2',
'GSPC_Volume_change1_shift2',
'GSPC_Volume_change10_shift2',
'GSPC_Volume_change1_shift2_rolling10',
'GSPC_Volume_change10_shift2_rolling10',
'GSPC_Close_change1_shift2',
'GSPC_Close_change8_shift2',
'GSPC_Close_change20_shift2',
'DJI_Close_change1_shift2',
'DJI_Close_change8_shift2',
'DJI_Close_change20_shift2',
'VIX_Close_change1_shift2',
'GSPC_Close_change1_shift2_rolling10',
'GSPC_Close_change8_shift2_rolling10',
'GSPC_Close_change20_shift2_rolling10',
'DJI_Close_change1_shift2_rolling10',
'DJI_Close_change8_shift2_rolling10',
'DJI_Close_change20_shift2_rolling10',
'VIX_Close_change1_shift2_rolling10dev',
'VIX_Close_change1_shift2_rolling10mean',
'int']
kpi = 'GSPC_Close_change15'
M.corr()[kpi]
split_train = dt.datetime.strftime(dt.datetime.today()-dt.timedelta(60),'%Y-%m-%d')
split_test = dt.datetime.strftime(dt.datetime.today()-dt.timedelta(25),'%Y-%m-%d')
xt = M[M.index<=split_train][features].copy()
yt = M[M.index<=split_train][kpi]
xv = M[(M.index>split_train)&(M.index<=split_test)][features].copy()
yv = M[(M.index>split_train)&(M.index<=split_test)][kpi]
xtest = M[M.index>split_test][features].copy()
ytest = M[M.index>split_test][kpi]
#Linear Model
expon =False
if expon ==True:
coefs= np.linalg.pinv(xt.T@xt)@(xt.T@np.log(yt))
yfit=np.exp(xt@coefs)
ypred=np.exp(xv@coefs)
else:
coefs= np.linalg.pinv(xt.T@xt)@(xt.T@(yt))
yfit=xt@coefs
ypred=xv@coefs
plt.plot(yt)
plt.plot(yfit)
plt.title('regression fit')
plt.xticks(rotation=90)
plt.show()
plt.plot(yv)
plt.plot(ypred)
plt.title('regression pred')
plt.xticks(rotation=90)
plt.show()
pd.DataFrame({'y':yv,'pred':ypred}).corr()
fig,ax1=plt.subplots()
ax1.plot(yv)
plt.xticks(rotation=90)
ax2=ax1.twinx()
ax2.plot(ypred,color='orange')
plt.title('regression pred')
plt.xticks(rotation=90)
plt.show()
#xgboost
import xgboost
xgb = xgboost.XGBRegressor(n_estimators=100,
max_depth =9,
colsample_bytree=1 ,
colsample_bylevel=.9,
colsample_bynode =.9,
n_jobs=4
).fit(xt,yt)
yfit=xgb.predict(xt)
ypred=xgb.predict(xv)
plt.plot(yt)
plt.plot(pd.Series(yfit,index=yt.index))
plt.title('regression fit')
plt.xticks(rotation=90)
plt.show()
plt.plot(yv)
plt.plot(pd.Series(ypred,index=yv.index))
plt.title('regression pred')
plt.xticks(rotation=90)
plt.show()
print(pd.DataFrame({'y':yv,'pred':ypred}).corr())
print(round(np.mean((yv-ypred)**2),5))
#Keras nN
features = [
'GSPC_Close_momentum125_shift2',
'GSPC_Close_momentum30_shift2',
'DJI_Close_momentum125_shift2',
'DJI_Close_momentum30_shift2',
'GSPC_Volume_change1_shift2',
'GSPC_Volume_change10_shift2',
'GSPC_Volume_change1_shift2_rolling10',
'GSPC_Volume_change10_shift2_rolling10',
'GSPC_Close_change1_shift2',
'GSPC_Close_change8_shift2',
'GSPC_Close_change20_shift2',
'DJI_Close_change1_shift2',
'DJI_Close_change8_shift2',
'DJI_Close_change20_shift2',
'VIX_Close_change1_shift2',
'GSPC_Close_change1_shift2_rolling10',
'GSPC_Close_change8_shift2_rolling10',
'GSPC_Close_change20_shift2_rolling10',
'DJI_Close_change1_shift2_rolling10',
'DJI_Close_change8_shift2_rolling10',
'DJI_Close_change20_shift2_rolling10',
'VIX_Close_change1_shift2_rolling10dev',
'VIX_Close_change1_shift2_rolling10mean',
]
split_train = dt.datetime.strftime(dt.datetime.today()-dt.timedelta(60),'%Y-%m-%d')
split_test = dt.datetime.strftime(dt.datetime.today()-dt.timedelta(25),'%Y-%m-%d')
xt = M[M.index<=split_train][features].copy()
yt = M[M.index<=split_train][kpi]
xv = M[(M.index>split_train)&(M.index<=split_test)][features].copy()
yv = M[(M.index>split_train)&(M.index<=split_test)][kpi]
xtest = M[M.index>split_test][features].copy()
ytest = M[M.index>split_test][kpi]
model = keras.Sequential([
#keras.layers.LSTM(units=10),
#keras.layers.Dense(len(xt.columns.values), activation=keras.activations.relu),
keras.layers.Dense(15, activation=keras.activations.linear),
keras.layers.Dropout(.15),
keras.layers.Dense(15, activation=keras.activations.relu),
keras.layers.Dropout(.15),
keras.layers.Dense(15, activation=keras.activations.sigmoid),
#keras.layers.Dense(len(xt.columns.values), activation=keras.activations.linear),
#keras.layers.Dense(15, activation=keras.activations.linear),
#keras.layers.Dense(1,activation = keras.activations.linear),
keras.layers.Dense(1,activation = keras.activations.linear)
#keras.layers.Dense(1,activation = keras.activations.exponential)
])
model.compile(optimizer=tensorflow.optimizers.Adam(),
loss=losses.mean_squared_error,
#batch_size=32,
#loss=losses.categorical_crossentropy,
metrics=[keras.metrics.MeanSquaredError()])
model.fit(
#x=np.array(xt).reshape((xt.shape[0],xt.shape[1],1)),
x=np.array(xt).reshape(xt.shape[0],xt.shape[1],),
y=np.array(yt),
batch_size = 2**10,
validation_data=(np.array(xv).reshape(xv.shape[0],xv.shape[1],),np.array(yv)),
epochs=24000)
model.fit(
#x=np.array(xt).reshape((xt.shape[0],xt.shape[1],1)),
x=np.array(xt).reshape(xt.shape[0],xt.shape[1],),
y=np.array(yt),
#batch_size = 2**9,
validation_data=(np.array(xv).reshape(xv.shape[0],xv.shape[1],),np.array(yv)),
epochs=1800)
model.fit(
#x=np.array(xt).reshape((xt.shape[0],xt.shape[1],1)),
x=np.array(xt).reshape(xt.shape[0],xt.shape[1],),
y=np.array(yt),
batch_size = 2**6,
validation_data=(np.array(xv).reshape(xv.shape[0],xv.shape[1],),np.array(yv)),
epochs=10000)
model.fit(
#x=np.array(xt).reshape((xt.shape[0],xt.shape[1],1)),
x=np.array(xt).reshape(xt.shape[0],xt.shape[1],),
y=np.array(yt),
#batch_size = 2**9,
validation_data=(np.array(xv).reshape(xv.shape[0],xv.shape[1],),np.array(yv)),
epochs=1800)
ypred = model.predict(np.array(xv))
ypred = ypred.reshape(ypred.shape[0])
yfit = model.predict(np.array(xt))
yfit = yfit.reshape(yfit.shape[0])
plt.plot(yt)
plt.plot(pd.Series(yfit,index=yt.index))
plt.title('nn fit')
plt.xticks(rotation=90)
plt.show()
plt.plot(yv)
plt.plot(pd.Series(ypred,index=yv.index))
plt.title('nn pred')
plt.xticks(rotation=90)
plt.show()
fig = plt.figure(figsize=(8,8))
plt.scatter(yfit,yt,s=.5)
plt.scatter(ypred,yv,s=3)
plt.xlim(.8,1.2)
plt.ylim(.8,1.2)
plt.show()
print(pd.DataFrame({'y':yv,'pred':ypred}).corr())
print(round(np.mean((yv-ypred)**2),5)) | en | 0.334925 | #KPI is SP500 % change over 15 days #SP500 rolling momentum, shifted 2 days #DJI rolling momentum shifted 2 days # VIX change, shifted 2 days # SP500 Change shifted 2 days #SP500 volume change, shifted 2 days #Intercept #Linear Model #xgboost #Keras nN #keras.layers.LSTM(units=10), #keras.layers.Dense(len(xt.columns.values), activation=keras.activations.relu), #keras.layers.Dense(len(xt.columns.values), activation=keras.activations.linear), #keras.layers.Dense(15, activation=keras.activations.linear), #keras.layers.Dense(1,activation = keras.activations.linear), #keras.layers.Dense(1,activation = keras.activations.exponential) #batch_size=32, #loss=losses.categorical_crossentropy, #x=np.array(xt).reshape((xt.shape[0],xt.shape[1],1)), #x=np.array(xt).reshape((xt.shape[0],xt.shape[1],1)), #batch_size = 2**9, #x=np.array(xt).reshape((xt.shape[0],xt.shape[1],1)), #x=np.array(xt).reshape((xt.shape[0],xt.shape[1],1)), #batch_size = 2**9, | 2.113347 | 2 |
project_root/traffic_source/apps.py | saharisrael31/marketing-service-api | 0 | 6614517 | from django.apps import AppConfig
class TrafficSourceConfig(AppConfig):
name = 'traffic_source'
| from django.apps import AppConfig
class TrafficSourceConfig(AppConfig):
name = 'traffic_source'
| none | 1 | 1.072973 | 1 | |
baseline.py | XiaowanYi/Attention_vgg16 | 3 | 6614518 | <reponame>XiaowanYi/Attention_vgg16<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 1 13:58:47 2019
@author: yixiaowan
To get VGG16 baseline performance for the chosen classes.
"""
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import matplotlib
matplotlib.use("Agg")
import keras
from keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.layers import Dense, Flatten, Reshape, Concatenate, Lambda
from keras import optimizers
from keras.models import Sequential, Model
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
from keras import backend as K
from keras.layers import Layer
import numpy as np
import pandas as pd
import h5py
import random
import math
from custom_layer_constraints import CustomConstraint, SinglyConnected
#Forked from Ken's
import keras_custom_objects as KO
from custom_generator import create_good_generator
bs = 64
img_rows = 224
img_cols = 224
classes_list = ['ave', 'canidae', 'cloth', 'felidae', 'kitchen', 'land_trans']
imagenet_test = '/mnt/fast-data16/datasets/ILSVRC/2012/clsloc/val/'
model = VGG16(weights = 'imagenet', include_top=True, input_shape = (img_rows, img_cols, 3))
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
baseline_df = pd.DataFrame(columns = ['class_name', 'ic_acc_baseline', 'oc_acc_baseline'])
for i in range(len(classes_list)):
class_name = classes_list[i]
class_csv_path = 'groupings-csv/' + class_name + '_Imagenet.csv'
df_classes = pd.read_csv(class_csv_path, usecols=['wnid'])
classes = sorted([i for i in df_classes['wnid']])
whole_list = os.listdir(imagenet_test)
oc_classes = sorted([i for i in whole_list if i not in classes])
ImageGen = ImageDataGenerator(fill_mode='nearest',
horizontal_flip=False,
rescale=None,
preprocessing_function=preprocess_input,
data_format="channels_last",
)
in_context_generator, in_context_steps = create_good_generator(ImageGen,
imagenet_test,
batch_size=bs,
target_size = (img_rows, img_cols),
class_mode='sparse',
AlextNetAug=False,
classes=classes)
out_context_generator, out_context_steps = create_good_generator(ImageGen,
imagenet_test,
batch_size=bs,
target_size = (img_rows, img_cols),
class_mode='sparse',
AlextNetAug=False,
classes=oc_classes)
ic_loss, ic_acc = model.evaluate_generator(in_context_generator, in_context_steps, verbose=1)
oc_loss, oc_acc = model.evaluate_generator(out_context_generator, out_context_steps, verbose=1)
baseline_df.loc[i] = {'class_name': class_name,
'ic_acc_baseline': ic_acc,
'oc_acc_baseline': oc_acc}
save_path = 'single_att_results/' + 'baseline.csv'
baseline_df.to_csv(save_path)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 1 13:58:47 2019
@author: yixiaowan
To get VGG16 baseline performance for the chosen classes.
"""
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import matplotlib
matplotlib.use("Agg")
import keras
from keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.layers import Dense, Flatten, Reshape, Concatenate, Lambda
from keras import optimizers
from keras.models import Sequential, Model
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
from keras import backend as K
from keras.layers import Layer
import numpy as np
import pandas as pd
import h5py
import random
import math
from custom_layer_constraints import CustomConstraint, SinglyConnected
#Forked from Ken's
import keras_custom_objects as KO
from custom_generator import create_good_generator
bs = 64
img_rows = 224
img_cols = 224
classes_list = ['ave', 'canidae', 'cloth', 'felidae', 'kitchen', 'land_trans']
imagenet_test = '/mnt/fast-data16/datasets/ILSVRC/2012/clsloc/val/'
model = VGG16(weights = 'imagenet', include_top=True, input_shape = (img_rows, img_cols, 3))
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
baseline_df = pd.DataFrame(columns = ['class_name', 'ic_acc_baseline', 'oc_acc_baseline'])
for i in range(len(classes_list)):
class_name = classes_list[i]
class_csv_path = 'groupings-csv/' + class_name + '_Imagenet.csv'
df_classes = pd.read_csv(class_csv_path, usecols=['wnid'])
classes = sorted([i for i in df_classes['wnid']])
whole_list = os.listdir(imagenet_test)
oc_classes = sorted([i for i in whole_list if i not in classes])
ImageGen = ImageDataGenerator(fill_mode='nearest',
horizontal_flip=False,
rescale=None,
preprocessing_function=preprocess_input,
data_format="channels_last",
)
in_context_generator, in_context_steps = create_good_generator(ImageGen,
imagenet_test,
batch_size=bs,
target_size = (img_rows, img_cols),
class_mode='sparse',
AlextNetAug=False,
classes=classes)
out_context_generator, out_context_steps = create_good_generator(ImageGen,
imagenet_test,
batch_size=bs,
target_size = (img_rows, img_cols),
class_mode='sparse',
AlextNetAug=False,
classes=oc_classes)
ic_loss, ic_acc = model.evaluate_generator(in_context_generator, in_context_steps, verbose=1)
oc_loss, oc_acc = model.evaluate_generator(out_context_generator, out_context_steps, verbose=1)
baseline_df.loc[i] = {'class_name': class_name,
'ic_acc_baseline': ic_acc,
'oc_acc_baseline': oc_acc}
save_path = 'single_att_results/' + 'baseline.csv'
baseline_df.to_csv(save_path) | en | 0.729264 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Thu Aug 1 13:58:47 2019 @author: yixiaowan To get VGG16 baseline performance for the chosen classes. #Forked from Ken's | 2.095703 | 2 |
deliverable1/test_case_02/test_case_02.py | TrackerSB/IEEEAITestChallenge2021 | 1 | 6614519 | from unittest import TestCase
from common import SimConnection, CarControl
from common.scene import load_ego, load_npc, spawn_state
class TestCase02(TestCase):
def test_EGO_following_NPC_without_crash(self):
simConnection = SimConnection()
sim = simConnection.connect()
# Placing the suv - 10m ahead from the starting point
state = spawn_state(sim)
truck_state = CarControl.place_car_from_the_point(dimension="vertical", distance=10, state=state)
truck = load_npc(sim, "BoxTruck", truck_state)
# Driving the truck - speed 5m/s from the starting point
truck.follow_closest_lane(True, 5)
# Driving the ego - speed 1m/s from the starting point
state = spawn_state(sim)
ego_state = CarControl.drive_ego_car(state=state, directions=[("vertical", 4.5)])
ego = load_ego(sim, "Lincoln2017MKZ (Apollo 5.0)", ego_state)
# Run the simulator for 10 seconds with debug mode
simConnection.execute(timeout=10)
self.assertEqual(True, True)
simConnection.sim.close()
| from unittest import TestCase
from common import SimConnection, CarControl
from common.scene import load_ego, load_npc, spawn_state
class TestCase02(TestCase):
def test_EGO_following_NPC_without_crash(self):
simConnection = SimConnection()
sim = simConnection.connect()
# Placing the suv - 10m ahead from the starting point
state = spawn_state(sim)
truck_state = CarControl.place_car_from_the_point(dimension="vertical", distance=10, state=state)
truck = load_npc(sim, "BoxTruck", truck_state)
# Driving the truck - speed 5m/s from the starting point
truck.follow_closest_lane(True, 5)
# Driving the ego - speed 1m/s from the starting point
state = spawn_state(sim)
ego_state = CarControl.drive_ego_car(state=state, directions=[("vertical", 4.5)])
ego = load_ego(sim, "Lincoln2017MKZ (Apollo 5.0)", ego_state)
# Run the simulator for 10 seconds with debug mode
simConnection.execute(timeout=10)
self.assertEqual(True, True)
simConnection.sim.close()
| en | 0.821324 | # Placing the suv - 10m ahead from the starting point # Driving the truck - speed 5m/s from the starting point # Driving the ego - speed 1m/s from the starting point # Run the simulator for 10 seconds with debug mode | 2.819034 | 3 |
Simulate.py | henrymanthorpe/RandomWalks | 0 | 6614520 | <reponame>henrymanthorpe/RandomWalks<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 17 16:12:45 2020
@author: henry
"""
import numpy as np
import quaternion
from Input import Variables
from numpy.random import Generator, PCG64, SeedSequence
vec_z = np.array([0, 0, 1])
pi = np.pi
# %% Tumble Functions
def Normalise(vec):
return vec/np.linalg.norm(vec)
def MakeRotationQuaternion(angle, vec):
a = np.cos(angle/2)
b = np.sin(angle/2)
axis = vec*b
axis = np.append(a, axis)
quat = quaternion.from_float_array(axis)
return quat
def Tumble(diff_angle, spin_angle, vec_int):
diff_vec = Normalise(np.cross(vec_int, vec_z))
diff_quat = MakeRotationQuaternion(diff_angle, diff_vec)
vec_mid = quaternion.rotate_vectors(diff_quat, vec_int)
spin_vec = Normalise(vec_int)
spin_quat = MakeRotationQuaternion(spin_angle, spin_vec)
vec_final = quaternion.rotate_vectors(spin_quat, vec_mid)
return vec_final
# %% Bacterium Class Initialisation
class Bacterium:
def __init__(self, fname):
self.vars = Variables(fname)
self.seed = SeedSequence()
self.rand_gen = Generator(PCG64(self.seed))
self.vector_initial = Normalise(self.rand_gen.uniform(-1,1,3))
self.pos_initial = np.array([0, 0, 0])
self.time = np.full(self.vars.sample_total, self.vars.base_time)
self.time = np.append([0], self.time)
self.time = np.cumsum(self.time)
self.time = np.reshape(self.time, (self.time.size, 1))
# %% Extra Functions
def ReSeed(self, entropy):
self.seed = SeedSequence(entropy)
self.rand_gen = Generator(PCG64(self.seed))
def Linear(self):
self.std_dev_linear = np.sqrt(2*self.vars.diffusion_constant_linear
* self.vars.base_time)
self.linear_diffusion = np.random.normal(
0.0, self.std_dev_linear, (self.vars.sample_total, 3))
def Rotational(self):
self.std_dev_rotational\
= np.sqrt(2*self.vars.diffusion_constant_rotational
* self.vars.base_time)
self.rotational_sample = self.rand_gen.normal(
0.0, self.std_dev_rotational, (2, self.vars.sample_total))
self.diffusion_sample = np.sqrt(np.square(self.rotational_sample[0])
+ np.square(self.rotational_sample[1]))
self.spin_sample = self.rand_gen.random(self.vars.sample_total)*2*pi
self.vectors_cartesian_diffusion\
= np.zeros((self.vars.sample_total, 3))
self.vectors_cartesian_diffusion[0] = Tumble(self.diffusion_sample[0],
self.spin_sample[0],
self.vector_initial)
for i in range(1, self.vars.sample_total):
self.vectors_cartesian_diffusion[i]\
= Tumble(self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian_diffusion[i-1])
# %% Simulation Code Path
def Complete(self):
# Diffusion Sampling
if self.vars.diffusive:
self.std_dev_linear = np.sqrt(2*self.vars.diffusion_constant_linear
* self.vars.base_time)
self.linear_diffusion = self.rand_gen.normal(
0.0, self.std_dev_linear, (self.vars.sample_total, 3))
self.std_dev_rotational\
= np.sqrt(2*self.vars.diffusion_constant_rotational
* self.vars.base_time)
self.rotational_sample = self.rand_gen.normal(
0.0, self.std_dev_rotational, (2, self.vars.sample_total))
self.diffusion_sample = np.sqrt(np.square(self.rotational_sample[0])
+ np.square(self.rotational_sample[1]))
self.spin_sample = self.rand_gen.uniform(
0.0, 2*pi, self.vars.sample_total)
else:
self.linear_diffusion = np.zeros((self.vars.sample_total, 3))
self.diffusion_sample = np.zeros(self.vars.sample_total)
self.spin_sample = np.zeros(self.vars.sample_total)
# Data Array Initialisation
self.vectors_cartesian = np.zeros((self.vars.sample_total, 3))
self.vectors_cartesian = np.vstack(
(self.vector_initial, self.vectors_cartesian))
self.displacement = np.zeros((self.vars.sample_total, 3))
# Linear Diffusion
self.displacement += self.linear_diffusion
self.displacement = np.vstack((self.pos_initial, self.displacement))
if self.vars.chemotactic:
self.state = 'run_chemotactic'
self.chemotactic_memory\
= [0 for x in range(self.vars.chem_mem_size)]
else:
self.state = 'run'
self.elapsed_time = 0
self.run_log = []
self.tumble_log = [] # Logs Run&Tumble Behaviour
self.run_run_cosines = []
if self.vars.run_behaviour is True:
while self.elapsed_time < self.vars.sample_total:
# %% Run Mode - Non Chemotactic
if self.state == 'run':
if self.vars.run_variation is True:
current_run_length = int(np.ceil(
self.rand_gen.exponential(
self.vars.run_length_mean)))
else:
current_run_length = self.vars.run_length_mean
elapsed_run_length = 0
while elapsed_run_length < current_run_length:
if self.elapsed_time >= self.vars.sample_total:
break
i = self.elapsed_time
if self.vars.diffusive:
self.vectors_cartesian[i+1]\
= Tumble(self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
else:
self.vectors_cartesian[i+1] = self.vectors_cartesian[i]
self.displacement[i+1]\
+= self.vectors_cartesian[i+1]\
* self.vars.run_step
self.elapsed_time += 1
elapsed_run_length += 1
self.run_log.append(elapsed_run_length)
if self.vars.archaea_mode:
self.state = 'reverse'
else:
self.state = self.vars.tumble_type
# %% Run Mode - Chemotactic
elif self.state == 'run_chemotactic':
if self.vars.run_variation is True:
current_run_length = int(np.ceil(
self.rand_gen.exponential(
self.vars.run_length_mean)))
else:
current_run_length = self.vars.run_length_mean
elapsed_run_length = 0
chemotactic_factor = np.mean(self.chemotactic_memory)\
* self.vars.chem_factor
if chemotactic_factor < 0:
chemotactic_factor = 0
chemotactic_run_length = current_run_length\
* (chemotactic_factor + 1)
while elapsed_run_length < chemotactic_run_length:
if self.elapsed_time >= self.vars.sample_total:
break
i = self.elapsed_time
if self.vars.diffusive:
self.vectors_cartesian[i + 1] \
= Tumble(self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
else:
self.vectors_cartesian[i + 1] = self.vectors_cartesian[i]
self.displacement[i+1]\
+= self.vectors_cartesian[i+1]\
* self.vars.run_step
self.elapsed_time += 1
elapsed_run_length += 1
chemotactic_value = np.dot(
self.vectors_cartesian[i+1],
self.vars.chem_source)
self.chemotactic_memory.pop(0)
self.chemotactic_memory.append(chemotactic_value)
chemotactic_factor = np.mean(self.chemotactic_memory)\
* self.vars.chem_factor
if chemotactic_factor < 0:
chemotactic_factor = 0
chemotactic_run_length = current_run_length\
* (chemotactic_factor + 1)
self.run_log.append(elapsed_run_length)
if self.vars.archaea_mode:
self.state = 'reverse_chemotactic'
else:
self.state = self.vars.tumble_type
# %% Reverse Mode - For Archaea
elif self.state == 'reverse':
if self.vars.run_variation is True:
current_run_length = int(np.ceil(
self.rand_gen.exponential(
self.vars.run_length_mean)))
else:
current_run_length = self.vars.run_length_mean
elapsed_run_length = 0
while elapsed_run_length < current_run_length:
if self.elapsed_time >= self.vars.sample_total:
break
i = self.elapsed_time
if self.vars.diffusive:
self.vectors_cartesian[i + 1] \
= Tumble(self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
else:
self.vectors_cartesian[i + 1] = self.vectors_cartesian[i]
self.displacement[i+1]\
+= self.vectors_cartesian[i+1]\
* -self.vars.run_step
self.elapsed_time += 1
elapsed_run_length += 1
self.run_log.append(elapsed_run_length)
self.state = 'run'
# %% Reverse Mode - For Chemotactic archaea
elif self.state == 'reverse_chemotactic':
if self.vars.run_variation is True:
current_run_length = int(np.ceil(
self.rand_gen.exponential(
self.vars.run_length_mean)))
else:
current_run_length = self.vars.run_length_mean
elapsed_run_length = 0
chemotactic_factor = np.mean(self.chemotactic_memory)\
* self.vars.chem_factor
if chemotactic_factor < 0:
chemotactic_factor = 0
chemotactic_run_length = current_run_length\
* (chemotactic_factor + 1)
while elapsed_run_length < chemotactic_run_length:
if self.elapsed_time >= self.vars.sample_total:
break
i = self.elapsed_time
if self.vars.diffusive:
self.vectors_cartesian[i + 1] \
= Tumble(self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
else:
self.vectors_cartesian[i + 1] = self.vectors_cartesian[i]
self.displacement[i+1]\
+= self.vectors_cartesian[i+1]\
* -self.vars.run_step
self.elapsed_time += 1
elapsed_run_length += 1
chemotactic_value = np.dot(-self.vectors_cartesian[i+1],
self.vars.chem_source)
self.chemotactic_memory.pop(0)
self.chemotactic_memory.append(chemotactic_value)
chemotactic_factor = np.mean(self.chemotactic_memory)\
* self.vars.chem_factor
if chemotactic_factor < 0:
chemotactic_factor = 0
chemotactic_run_length = current_run_length\
* (chemotactic_factor + 1)
self.run_log.append(elapsed_run_length)
self.state = 'run_chemotactic'
# %% Erratic Tumble Mode
elif self.state == 'erratic':
if self.vars.tumble_duration_mean == 0:
current_tumble_length = 0
elif self.vars.tumble_duration_variation is True:
current_tumble_length\
= int(np.ceil(self.rand_gen.exponential(
self.vars.tumble_length_mean)))
else:
current_tumble_length\
= self.vars.tumble_length_mean
if self.elapsed_time + current_tumble_length\
< self.vars.sample_total:
start_vec = self.vectors_cartesian[self.elapsed_time]
for i in range(self.elapsed_time,
self.elapsed_time
+ current_tumble_length):
if self.vars.diffusive:
self.vectors_cartesian[i + 1] \
= Tumble(self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
else:
self.vectors_cartesian[i + 1] = self.vectors_cartesian[i]
self.vectors_cartesian[i+1]\
= Tumble(self.vars.tumble_ang_step,
self.rand_gen.uniform(0, 2*np.pi),
self.vectors_cartesian[i+1])
self.elapsed_time += current_tumble_length
end_vec = self.vectors_cartesian[self.elapsed_time]
self.run_run_cosines.append(np.dot(start_vec, end_vec))
self.tumble_log.append(current_tumble_length)
if self.vars.chemotactic:
self.state = 'run_chemotactic'
else:
self.state = 'run'
else:
for i in range(self.elapsed_time,
self.vars.sample_total):
self.vectors_cartesian[i+1] = Tumble(
self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
self.tumble_log.append(
self.vars.sample_total-self.elapsed_time)
break
# %% Smooth Tumble Mode
elif self.state == 'smooth':
if self.vars.tumble_duration_mean == 0:
current_tumble_length = 0
elif self.vars.tumble_duration_variation is True:
current_tumble_length\
= int(np.ceil(self.rand_gen.exponential(
self.vars.tumble_length_mean)))
else:
current_tumble_length\
= self.vars.tumble_length_mean
if self.elapsed_time + current_tumble_length\
< self.vars.sample_total:
start_vec = self.vectors_cartesian[self.elapsed_time]
for i in range(self.elapsed_time,
self.elapsed_time
+ current_tumble_length):
if self.vars.diffusive:
self.vectors_cartesian[i + 1] \
= Tumble(self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
else:
self.vectors_cartesian[i + 1] = self.vectors_cartesian[i]
self.elapsed_time += current_tumble_length
tumble_angle = self.vars.tumble_ang_step\
* current_tumble_length
spin_angle = self.rand_gen.uniform(0, 2*np.pi)
self.vectors_cartesian[self.elapsed_time]\
= Tumble(tumble_angle, spin_angle,
self.vectors_cartesian[self.elapsed_time])
end_vec = self.vectors_cartesian[self.elapsed_time]
self.run_run_cosines.append(np.dot(start_vec, end_vec))
self.tumble_log.append(current_tumble_length)
if self.vars.chemotactic:
self.state = 'run_chemotactic'
else:
self.state = 'run'
else:
for i in range(self.elapsed_time,
self.vars.sample_total):
self.vectors_cartesian[i+1] = Tumble(
self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
self.tumble_log.append(
self.vars.sample_total-self.elapsed_time)
break
# %% Pause Tumble Mode
elif self.state == 'pause':
if self.vars.tumble_duration_mean == 0:
current_tumble_length = 0
elif self.vars.tumble_duration_variation is True:
current_tumble_length\
= int(np.ceil(self.rand_gen.exponential(
self.vars.tumble_length_mean)))
else:
current_tumble_length\
= self.vars.tumble_length_mean
if self.elapsed_time + current_tumble_length\
< self.vars.sample_total:
start_vec = self.vectors_cartesian[self.elapsed_time]
for i in range(self.elapsed_time,
self.elapsed_time
+ current_tumble_length):
if self.vars.diffusive:
self.vectors_cartesian[i + 1] \
= Tumble(self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
else:
self.vectors_cartesian[i + 1] = self.vectors_cartesian[i]
self.elapsed_time += current_tumble_length
end_vec = self.vectors_cartesian[self.elapsed_time]
self.run_run_cosines.append(np.dot(start_vec, end_vec))
self.tumble_log.append(current_tumble_length)
if self.vars.chemotactic:
self.state = 'run_chemotactic'
else:
self.state = 'run'
else:
for i in range(self.elapsed_time,
self.vars.sample_total):
self.vectors_cartesian[i+1] = Tumble(
self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
self.tumble_log.append(
self.vars.sample_total-self.elapsed_time)
break
# %% If self.state is unknown
else:
print('Unknown state %s occurred at sim_time %d '
% (self.state, self.elapsed_time))
break
# %% Rotational Diffusion simulation for non-motile samples
else:
for i in range(self.elapsed_time,
self.vars.sample_total):
if self.vars.diffusive:
self.vectors_cartesian[i + 1] \
= Tumble(self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
else:
self.vectors_cartesian[i + 1] = self.vectors_cartesian[i]
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 17 16:12:45 2020
@author: henry
"""
import numpy as np
import quaternion
from Input import Variables
from numpy.random import Generator, PCG64, SeedSequence
vec_z = np.array([0, 0, 1])
pi = np.pi
# %% Tumble Functions
def Normalise(vec):
return vec/np.linalg.norm(vec)
def MakeRotationQuaternion(angle, vec):
a = np.cos(angle/2)
b = np.sin(angle/2)
axis = vec*b
axis = np.append(a, axis)
quat = quaternion.from_float_array(axis)
return quat
def Tumble(diff_angle, spin_angle, vec_int):
diff_vec = Normalise(np.cross(vec_int, vec_z))
diff_quat = MakeRotationQuaternion(diff_angle, diff_vec)
vec_mid = quaternion.rotate_vectors(diff_quat, vec_int)
spin_vec = Normalise(vec_int)
spin_quat = MakeRotationQuaternion(spin_angle, spin_vec)
vec_final = quaternion.rotate_vectors(spin_quat, vec_mid)
return vec_final
# %% Bacterium Class Initialisation
class Bacterium:
def __init__(self, fname):
self.vars = Variables(fname)
self.seed = SeedSequence()
self.rand_gen = Generator(PCG64(self.seed))
self.vector_initial = Normalise(self.rand_gen.uniform(-1,1,3))
self.pos_initial = np.array([0, 0, 0])
self.time = np.full(self.vars.sample_total, self.vars.base_time)
self.time = np.append([0], self.time)
self.time = np.cumsum(self.time)
self.time = np.reshape(self.time, (self.time.size, 1))
# %% Extra Functions
def ReSeed(self, entropy):
self.seed = SeedSequence(entropy)
self.rand_gen = Generator(PCG64(self.seed))
def Linear(self):
self.std_dev_linear = np.sqrt(2*self.vars.diffusion_constant_linear
* self.vars.base_time)
self.linear_diffusion = np.random.normal(
0.0, self.std_dev_linear, (self.vars.sample_total, 3))
def Rotational(self):
self.std_dev_rotational\
= np.sqrt(2*self.vars.diffusion_constant_rotational
* self.vars.base_time)
self.rotational_sample = self.rand_gen.normal(
0.0, self.std_dev_rotational, (2, self.vars.sample_total))
self.diffusion_sample = np.sqrt(np.square(self.rotational_sample[0])
+ np.square(self.rotational_sample[1]))
self.spin_sample = self.rand_gen.random(self.vars.sample_total)*2*pi
self.vectors_cartesian_diffusion\
= np.zeros((self.vars.sample_total, 3))
self.vectors_cartesian_diffusion[0] = Tumble(self.diffusion_sample[0],
self.spin_sample[0],
self.vector_initial)
for i in range(1, self.vars.sample_total):
self.vectors_cartesian_diffusion[i]\
= Tumble(self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian_diffusion[i-1])
# %% Simulation Code Path
def Complete(self):
# Diffusion Sampling
if self.vars.diffusive:
self.std_dev_linear = np.sqrt(2*self.vars.diffusion_constant_linear
* self.vars.base_time)
self.linear_diffusion = self.rand_gen.normal(
0.0, self.std_dev_linear, (self.vars.sample_total, 3))
self.std_dev_rotational\
= np.sqrt(2*self.vars.diffusion_constant_rotational
* self.vars.base_time)
self.rotational_sample = self.rand_gen.normal(
0.0, self.std_dev_rotational, (2, self.vars.sample_total))
self.diffusion_sample = np.sqrt(np.square(self.rotational_sample[0])
+ np.square(self.rotational_sample[1]))
self.spin_sample = self.rand_gen.uniform(
0.0, 2*pi, self.vars.sample_total)
else:
self.linear_diffusion = np.zeros((self.vars.sample_total, 3))
self.diffusion_sample = np.zeros(self.vars.sample_total)
self.spin_sample = np.zeros(self.vars.sample_total)
# Data Array Initialisation
self.vectors_cartesian = np.zeros((self.vars.sample_total, 3))
self.vectors_cartesian = np.vstack(
(self.vector_initial, self.vectors_cartesian))
self.displacement = np.zeros((self.vars.sample_total, 3))
# Linear Diffusion
self.displacement += self.linear_diffusion
self.displacement = np.vstack((self.pos_initial, self.displacement))
if self.vars.chemotactic:
self.state = 'run_chemotactic'
self.chemotactic_memory\
= [0 for x in range(self.vars.chem_mem_size)]
else:
self.state = 'run'
self.elapsed_time = 0
self.run_log = []
self.tumble_log = [] # Logs Run&Tumble Behaviour
self.run_run_cosines = []
if self.vars.run_behaviour is True:
while self.elapsed_time < self.vars.sample_total:
# %% Run Mode - Non Chemotactic
if self.state == 'run':
if self.vars.run_variation is True:
current_run_length = int(np.ceil(
self.rand_gen.exponential(
self.vars.run_length_mean)))
else:
current_run_length = self.vars.run_length_mean
elapsed_run_length = 0
while elapsed_run_length < current_run_length:
if self.elapsed_time >= self.vars.sample_total:
break
i = self.elapsed_time
if self.vars.diffusive:
self.vectors_cartesian[i+1]\
= Tumble(self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
else:
self.vectors_cartesian[i+1] = self.vectors_cartesian[i]
self.displacement[i+1]\
+= self.vectors_cartesian[i+1]\
* self.vars.run_step
self.elapsed_time += 1
elapsed_run_length += 1
self.run_log.append(elapsed_run_length)
if self.vars.archaea_mode:
self.state = 'reverse'
else:
self.state = self.vars.tumble_type
# %% Run Mode - Chemotactic
elif self.state == 'run_chemotactic':
if self.vars.run_variation is True:
current_run_length = int(np.ceil(
self.rand_gen.exponential(
self.vars.run_length_mean)))
else:
current_run_length = self.vars.run_length_mean
elapsed_run_length = 0
chemotactic_factor = np.mean(self.chemotactic_memory)\
* self.vars.chem_factor
if chemotactic_factor < 0:
chemotactic_factor = 0
chemotactic_run_length = current_run_length\
* (chemotactic_factor + 1)
while elapsed_run_length < chemotactic_run_length:
if self.elapsed_time >= self.vars.sample_total:
break
i = self.elapsed_time
if self.vars.diffusive:
self.vectors_cartesian[i + 1] \
= Tumble(self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
else:
self.vectors_cartesian[i + 1] = self.vectors_cartesian[i]
self.displacement[i+1]\
+= self.vectors_cartesian[i+1]\
* self.vars.run_step
self.elapsed_time += 1
elapsed_run_length += 1
chemotactic_value = np.dot(
self.vectors_cartesian[i+1],
self.vars.chem_source)
self.chemotactic_memory.pop(0)
self.chemotactic_memory.append(chemotactic_value)
chemotactic_factor = np.mean(self.chemotactic_memory)\
* self.vars.chem_factor
if chemotactic_factor < 0:
chemotactic_factor = 0
chemotactic_run_length = current_run_length\
* (chemotactic_factor + 1)
self.run_log.append(elapsed_run_length)
if self.vars.archaea_mode:
self.state = 'reverse_chemotactic'
else:
self.state = self.vars.tumble_type
# %% Reverse Mode - For Archaea
elif self.state == 'reverse':
if self.vars.run_variation is True:
current_run_length = int(np.ceil(
self.rand_gen.exponential(
self.vars.run_length_mean)))
else:
current_run_length = self.vars.run_length_mean
elapsed_run_length = 0
while elapsed_run_length < current_run_length:
if self.elapsed_time >= self.vars.sample_total:
break
i = self.elapsed_time
if self.vars.diffusive:
self.vectors_cartesian[i + 1] \
= Tumble(self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
else:
self.vectors_cartesian[i + 1] = self.vectors_cartesian[i]
self.displacement[i+1]\
+= self.vectors_cartesian[i+1]\
* -self.vars.run_step
self.elapsed_time += 1
elapsed_run_length += 1
self.run_log.append(elapsed_run_length)
self.state = 'run'
# %% Reverse Mode - For Chemotactic archaea
elif self.state == 'reverse_chemotactic':
if self.vars.run_variation is True:
current_run_length = int(np.ceil(
self.rand_gen.exponential(
self.vars.run_length_mean)))
else:
current_run_length = self.vars.run_length_mean
elapsed_run_length = 0
chemotactic_factor = np.mean(self.chemotactic_memory)\
* self.vars.chem_factor
if chemotactic_factor < 0:
chemotactic_factor = 0
chemotactic_run_length = current_run_length\
* (chemotactic_factor + 1)
while elapsed_run_length < chemotactic_run_length:
if self.elapsed_time >= self.vars.sample_total:
break
i = self.elapsed_time
if self.vars.diffusive:
self.vectors_cartesian[i + 1] \
= Tumble(self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
else:
self.vectors_cartesian[i + 1] = self.vectors_cartesian[i]
self.displacement[i+1]\
+= self.vectors_cartesian[i+1]\
* -self.vars.run_step
self.elapsed_time += 1
elapsed_run_length += 1
chemotactic_value = np.dot(-self.vectors_cartesian[i+1],
self.vars.chem_source)
self.chemotactic_memory.pop(0)
self.chemotactic_memory.append(chemotactic_value)
chemotactic_factor = np.mean(self.chemotactic_memory)\
* self.vars.chem_factor
if chemotactic_factor < 0:
chemotactic_factor = 0
chemotactic_run_length = current_run_length\
* (chemotactic_factor + 1)
self.run_log.append(elapsed_run_length)
self.state = 'run_chemotactic'
# %% Erratic Tumble Mode
elif self.state == 'erratic':
if self.vars.tumble_duration_mean == 0:
current_tumble_length = 0
elif self.vars.tumble_duration_variation is True:
current_tumble_length\
= int(np.ceil(self.rand_gen.exponential(
self.vars.tumble_length_mean)))
else:
current_tumble_length\
= self.vars.tumble_length_mean
if self.elapsed_time + current_tumble_length\
< self.vars.sample_total:
start_vec = self.vectors_cartesian[self.elapsed_time]
for i in range(self.elapsed_time,
self.elapsed_time
+ current_tumble_length):
if self.vars.diffusive:
self.vectors_cartesian[i + 1] \
= Tumble(self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
else:
self.vectors_cartesian[i + 1] = self.vectors_cartesian[i]
self.vectors_cartesian[i+1]\
= Tumble(self.vars.tumble_ang_step,
self.rand_gen.uniform(0, 2*np.pi),
self.vectors_cartesian[i+1])
self.elapsed_time += current_tumble_length
end_vec = self.vectors_cartesian[self.elapsed_time]
self.run_run_cosines.append(np.dot(start_vec, end_vec))
self.tumble_log.append(current_tumble_length)
if self.vars.chemotactic:
self.state = 'run_chemotactic'
else:
self.state = 'run'
else:
for i in range(self.elapsed_time,
self.vars.sample_total):
self.vectors_cartesian[i+1] = Tumble(
self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
self.tumble_log.append(
self.vars.sample_total-self.elapsed_time)
break
# %% Smooth Tumble Mode
elif self.state == 'smooth':
if self.vars.tumble_duration_mean == 0:
current_tumble_length = 0
elif self.vars.tumble_duration_variation is True:
current_tumble_length\
= int(np.ceil(self.rand_gen.exponential(
self.vars.tumble_length_mean)))
else:
current_tumble_length\
= self.vars.tumble_length_mean
if self.elapsed_time + current_tumble_length\
< self.vars.sample_total:
start_vec = self.vectors_cartesian[self.elapsed_time]
for i in range(self.elapsed_time,
self.elapsed_time
+ current_tumble_length):
if self.vars.diffusive:
self.vectors_cartesian[i + 1] \
= Tumble(self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
else:
self.vectors_cartesian[i + 1] = self.vectors_cartesian[i]
self.elapsed_time += current_tumble_length
tumble_angle = self.vars.tumble_ang_step\
* current_tumble_length
spin_angle = self.rand_gen.uniform(0, 2*np.pi)
self.vectors_cartesian[self.elapsed_time]\
= Tumble(tumble_angle, spin_angle,
self.vectors_cartesian[self.elapsed_time])
end_vec = self.vectors_cartesian[self.elapsed_time]
self.run_run_cosines.append(np.dot(start_vec, end_vec))
self.tumble_log.append(current_tumble_length)
if self.vars.chemotactic:
self.state = 'run_chemotactic'
else:
self.state = 'run'
else:
for i in range(self.elapsed_time,
self.vars.sample_total):
self.vectors_cartesian[i+1] = Tumble(
self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
self.tumble_log.append(
self.vars.sample_total-self.elapsed_time)
break
# %% Pause Tumble Mode
elif self.state == 'pause':
if self.vars.tumble_duration_mean == 0:
current_tumble_length = 0
elif self.vars.tumble_duration_variation is True:
current_tumble_length\
= int(np.ceil(self.rand_gen.exponential(
self.vars.tumble_length_mean)))
else:
current_tumble_length\
= self.vars.tumble_length_mean
if self.elapsed_time + current_tumble_length\
< self.vars.sample_total:
start_vec = self.vectors_cartesian[self.elapsed_time]
for i in range(self.elapsed_time,
self.elapsed_time
+ current_tumble_length):
if self.vars.diffusive:
self.vectors_cartesian[i + 1] \
= Tumble(self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
else:
self.vectors_cartesian[i + 1] = self.vectors_cartesian[i]
self.elapsed_time += current_tumble_length
end_vec = self.vectors_cartesian[self.elapsed_time]
self.run_run_cosines.append(np.dot(start_vec, end_vec))
self.tumble_log.append(current_tumble_length)
if self.vars.chemotactic:
self.state = 'run_chemotactic'
else:
self.state = 'run'
else:
for i in range(self.elapsed_time,
self.vars.sample_total):
self.vectors_cartesian[i+1] = Tumble(
self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
self.tumble_log.append(
self.vars.sample_total-self.elapsed_time)
break
# %% If self.state is unknown
else:
print('Unknown state %s occurred at sim_time %d '
% (self.state, self.elapsed_time))
break
# %% Rotational Diffusion simulation for non-motile samples
else:
for i in range(self.elapsed_time,
self.vars.sample_total):
if self.vars.diffusive:
self.vectors_cartesian[i + 1] \
= Tumble(self.diffusion_sample[i],
self.spin_sample[i],
self.vectors_cartesian[i])
else:
self.vectors_cartesian[i + 1] = self.vectors_cartesian[i] | en | 0.385685 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Mon Feb 17 16:12:45 2020 @author: henry # %% Tumble Functions # %% Bacterium Class Initialisation # %% Extra Functions # %% Simulation Code Path # Diffusion Sampling # Data Array Initialisation # Linear Diffusion # Logs Run&Tumble Behaviour # %% Run Mode - Non Chemotactic # %% Run Mode - Chemotactic # %% Reverse Mode - For Archaea # %% Reverse Mode - For Chemotactic archaea # %% Erratic Tumble Mode # %% Smooth Tumble Mode # %% Pause Tumble Mode # %% If self.state is unknown # %% Rotational Diffusion simulation for non-motile samples | 2.924198 | 3 |
python-cim/samples/dump_class_instance.py | dnides/flare-wmi | 0 | 6614521 | import logging
import traceback
import hexdump
from cim import CIM
from cim import Index
from cim.objects import InstanceKey
from cim.objects import ObjectResolver
from cim.formatters import dump_instance
# this is surprising... what happens to unicode data?
ENCODING = "ascii"
def compute_instance_hash(index, instance):
keys = instance.class_layout.class_definition.keys
key = instance.key
print(key)
parts = []
for k in keys:
print(k, key[k])
parts.append(key[k].encode(ENCODING) + "\x00".encode(ENCODING))
import itertools
for u in itertools.permutations(parts):
hexdump.hexdump(b"\xff\xff".join(u))
print(" -->" + index.hash(b"\xff\xff".join(u)))
print(" -->" + index.hash(b"\xff".join(u)))
print(" -->" + index.hash(b"".join(u)))
print(str(keys))
return ""
def main(type_, path, namespaceName, className, key_specifier=None):
if type_ not in ("xp", "win7"):
raise RuntimeError("Invalid mapping type: {:s}".format(type_))
c = CIM(type_, path)
index = Index(c.cim_type, c.logical_index_store)
o = ObjectResolver(c, index)
cd = o.get_cd(namespaceName, className)
cl = o.get_cl(namespaceName, className)
instances = []
if key_specifier:
key_values = key_specifier.split(",")
key = InstanceKey()
for key_value in key_values:
if "=" not in key_value:
raise RuntimeError("Invalid key specifier: " + str(key_value))
k, _, v = key_value.partition("=")
key[k] = v
print(str(key))
ci = o.get_ci(namespaceName, className, key)
instances.append(ci)
else:
for instance in o.get_cd_children_ci(namespaceName, className):
ci = o.get_ci(namespaceName, className, instance.instance_key)
instances.append(ci)
for instance in instances:
print("%s" % "=" * 80)
#print(compute_instance_hash(index, instance))
try:
print(dump_instance(instance, encoding='ascii', encoding_errors='ignore'))
except:
print("ERROR: failed to dump class instance!")
print(traceback.format_exc())
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
import sys
main(*sys.argv[1:])
| import logging
import traceback
import hexdump
from cim import CIM
from cim import Index
from cim.objects import InstanceKey
from cim.objects import ObjectResolver
from cim.formatters import dump_instance
# this is surprising... what happens to unicode data?
ENCODING = "ascii"
def compute_instance_hash(index, instance):
keys = instance.class_layout.class_definition.keys
key = instance.key
print(key)
parts = []
for k in keys:
print(k, key[k])
parts.append(key[k].encode(ENCODING) + "\x00".encode(ENCODING))
import itertools
for u in itertools.permutations(parts):
hexdump.hexdump(b"\xff\xff".join(u))
print(" -->" + index.hash(b"\xff\xff".join(u)))
print(" -->" + index.hash(b"\xff".join(u)))
print(" -->" + index.hash(b"".join(u)))
print(str(keys))
return ""
def main(type_, path, namespaceName, className, key_specifier=None):
if type_ not in ("xp", "win7"):
raise RuntimeError("Invalid mapping type: {:s}".format(type_))
c = CIM(type_, path)
index = Index(c.cim_type, c.logical_index_store)
o = ObjectResolver(c, index)
cd = o.get_cd(namespaceName, className)
cl = o.get_cl(namespaceName, className)
instances = []
if key_specifier:
key_values = key_specifier.split(",")
key = InstanceKey()
for key_value in key_values:
if "=" not in key_value:
raise RuntimeError("Invalid key specifier: " + str(key_value))
k, _, v = key_value.partition("=")
key[k] = v
print(str(key))
ci = o.get_ci(namespaceName, className, key)
instances.append(ci)
else:
for instance in o.get_cd_children_ci(namespaceName, className):
ci = o.get_ci(namespaceName, className, instance.instance_key)
instances.append(ci)
for instance in instances:
print("%s" % "=" * 80)
#print(compute_instance_hash(index, instance))
try:
print(dump_instance(instance, encoding='ascii', encoding_errors='ignore'))
except:
print("ERROR: failed to dump class instance!")
print(traceback.format_exc())
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
import sys
main(*sys.argv[1:])
| en | 0.69268 | # this is surprising... what happens to unicode data? #print(compute_instance_hash(index, instance)) | 2.303893 | 2 |
metagenscope_cli/cli/get_cli.py | LongTailBio/python-metagenscope | 0 | 6614522 | <gh_stars>0
"""CLI to get data from a MetaGenScope Server."""
from sys import stderr
import click
from requests.exceptions import HTTPError
from .utils import add_authorization
@click.group()
def get():
"""Get data from the server."""
pass
@get.command(name='orgs')
@add_authorization()
def get_orgs(uploader):
"""Get a list of organizations."""
try:
response = uploader.knex.get('/api/v1/organizations')
click.echo(response)
except HTTPError as exc:
print(f'{exc}', file=stderr)
@get.group()
def uuids():
"""Get UUIDs from the server."""
pass
def report_uuid(name, uuid):
"""Report a uuid to the user."""
click.echo(f'{name}\t{uuid}')
@uuids.command(name='samples')
@add_authorization()
@click.argument('sample_names', nargs=-1)
def sample_uuids(uploader, sample_names):
"""Get UUIDs for the given sample names."""
for sample_name in sample_names:
response = uploader.knex.get(f'/api/v1/samples/getid/{sample_name}')
report_uuid(response['data']['sample_name'],
response['data']['sample_uuid'])
@uuids.command(name='groups')
@add_authorization()
@click.argument('sample_group_names', nargs=-1)
def sample_group_uuids(uploader, sample_group_names):
"""Get UUIDs for the given sample groups."""
for sample_group_name in sample_group_names:
try:
response = uploader.knex.get(f'/api/v1/sample_groups/getid/{sample_group_name}')
report_uuid(response['data']['sample_group_name'],
response['data']['sample_group_uuid'])
except Exception: # pylint: disable=broad-except
print(f'Failed to get uuid for {sample_group_name}', file=stderr)
@uuids.command(name='orgs')
@add_authorization()
@click.argument('org_names', nargs=-1)
def org_uuids(uploader, org_names):
"""Get UUIDs for the given sample groups."""
for org_name in org_names:
try:
response = uploader.knex.get(f'/api/v1/organizations/getid/{org_name}')
report_uuid(response['data']['organization_name'],
response['data']['organization_uuid'])
except Exception: # pylint: disable=broad-except
print(f'Failed to get uuid for {org_name}', file=stderr)
| """CLI to get data from a MetaGenScope Server."""
from sys import stderr
import click
from requests.exceptions import HTTPError
from .utils import add_authorization
@click.group()
def get():
"""Get data from the server."""
pass
@get.command(name='orgs')
@add_authorization()
def get_orgs(uploader):
"""Get a list of organizations."""
try:
response = uploader.knex.get('/api/v1/organizations')
click.echo(response)
except HTTPError as exc:
print(f'{exc}', file=stderr)
@get.group()
def uuids():
"""Get UUIDs from the server."""
pass
def report_uuid(name, uuid):
"""Report a uuid to the user."""
click.echo(f'{name}\t{uuid}')
@uuids.command(name='samples')
@add_authorization()
@click.argument('sample_names', nargs=-1)
def sample_uuids(uploader, sample_names):
"""Get UUIDs for the given sample names."""
for sample_name in sample_names:
response = uploader.knex.get(f'/api/v1/samples/getid/{sample_name}')
report_uuid(response['data']['sample_name'],
response['data']['sample_uuid'])
@uuids.command(name='groups')
@add_authorization()
@click.argument('sample_group_names', nargs=-1)
def sample_group_uuids(uploader, sample_group_names):
"""Get UUIDs for the given sample groups."""
for sample_group_name in sample_group_names:
try:
response = uploader.knex.get(f'/api/v1/sample_groups/getid/{sample_group_name}')
report_uuid(response['data']['sample_group_name'],
response['data']['sample_group_uuid'])
except Exception: # pylint: disable=broad-except
print(f'Failed to get uuid for {sample_group_name}', file=stderr)
@uuids.command(name='orgs')
@add_authorization()
@click.argument('org_names', nargs=-1)
def org_uuids(uploader, org_names):
"""Get UUIDs for the given sample groups."""
for org_name in org_names:
try:
response = uploader.knex.get(f'/api/v1/organizations/getid/{org_name}')
report_uuid(response['data']['organization_name'],
response['data']['organization_uuid'])
except Exception: # pylint: disable=broad-except
print(f'Failed to get uuid for {org_name}', file=stderr) | en | 0.811655 | CLI to get data from a MetaGenScope Server. Get data from the server. Get a list of organizations. Get UUIDs from the server. Report a uuid to the user. Get UUIDs for the given sample names. Get UUIDs for the given sample groups. # pylint: disable=broad-except Get UUIDs for the given sample groups. # pylint: disable=broad-except | 2.733287 | 3 |
app/test_can_delete/apps.py | J0hnLee/pharmXbackend | 0 | 6614523 | from django.apps import AppConfig
class TestCanDeleteConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'test_can_delete'
| from django.apps import AppConfig
class TestCanDeleteConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'test_can_delete'
| none | 1 | 1.335424 | 1 | |
gradient_debug.py | mrluin/ESFNet-Pytorch | 39 | 6614524 | import torch
def get_printer(msg):
"""
returns a printer function, that prints information about a tensor's gradient
Used by register_hook in the backward pass.
:param msg:
:return: printer function
"""
def printer(tensor):
if tensor.nelement == 1:
print("{} {}".format(msg, tensor))
else:
print("{} shape: {}"
"max: {} min: {}"
"mean: {}"
.format(msg, tensor.shape, tensor.max(), tensor.min(), tensor.mean()))
return printer
def register_hook(tensor, msg):
"""
Utility function to call retain_grad and register_hook in a single line
:param tensor:
:param msg:
:return:
"""
tensor.retain_grad()
tensor.register_hook(get_printer(msg))
if __name__ == '__main__':
x = torch.randn((1,1), requires_grad=True)
y = 3*x
z = y**2
register_hook(y, 'y')
z.backward()
| import torch
def get_printer(msg):
"""
returns a printer function, that prints information about a tensor's gradient
Used by register_hook in the backward pass.
:param msg:
:return: printer function
"""
def printer(tensor):
if tensor.nelement == 1:
print("{} {}".format(msg, tensor))
else:
print("{} shape: {}"
"max: {} min: {}"
"mean: {}"
.format(msg, tensor.shape, tensor.max(), tensor.min(), tensor.mean()))
return printer
def register_hook(tensor, msg):
"""
Utility function to call retain_grad and register_hook in a single line
:param tensor:
:param msg:
:return:
"""
tensor.retain_grad()
tensor.register_hook(get_printer(msg))
if __name__ == '__main__':
x = torch.randn((1,1), requires_grad=True)
y = 3*x
z = y**2
register_hook(y, 'y')
z.backward()
| en | 0.700906 | returns a printer function, that prints information about a tensor's gradient
Used by register_hook in the backward pass.
:param msg:
:return: printer function Utility function to call retain_grad and register_hook in a single line
:param tensor:
:param msg:
:return: | 3.296411 | 3 |
examples/example_peaksearch.py | mauricioAyllon/NASA-gamma | 5 | 6614525 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 27 14:24:22 2020
@author: mauricio
Example of peaksearch functionality
"""
from nasagamma import spectrum as sp
import numpy as np
import pandas as pd
from nasagamma import peaksearch as ps
# dataset 1
file = "data/SSR-mcnp.hdf"
df = pd.read_hdf(file, key="data")
# delete first (large) bin
df = df.iloc[1:, :]
cts_np = df.cts.to_numpy() * 1e8
erg = np.array(df.index)
# instantiate a Spectrum object
spect = sp.Spectrum(counts=cts_np, energies=erg)
# Required input parameters (in channels)
fwhm_at_0 = 1
ref_fwhm = 35
ref_x = 1220
# instantiate a peaksearch object
search = ps.PeakSearch(spect, ref_x, ref_fwhm, fwhm_at_0, min_snr=0.1)
search.plot_kernel()
search.plot_peaks()
search.plot_components()
| # -*- coding: utf-8 -*-
"""
Created on Tue Oct 27 14:24:22 2020
@author: mauricio
Example of peaksearch functionality
"""
from nasagamma import spectrum as sp
import numpy as np
import pandas as pd
from nasagamma import peaksearch as ps
# dataset 1
file = "data/SSR-mcnp.hdf"
df = pd.read_hdf(file, key="data")
# delete first (large) bin
df = df.iloc[1:, :]
cts_np = df.cts.to_numpy() * 1e8
erg = np.array(df.index)
# instantiate a Spectrum object
spect = sp.Spectrum(counts=cts_np, energies=erg)
# Required input parameters (in channels)
fwhm_at_0 = 1
ref_fwhm = 35
ref_x = 1220
# instantiate a peaksearch object
search = ps.PeakSearch(spect, ref_x, ref_fwhm, fwhm_at_0, min_snr=0.1)
search.plot_kernel()
search.plot_peaks()
search.plot_components()
| en | 0.70037 | # -*- coding: utf-8 -*- Created on Tue Oct 27 14:24:22 2020 @author: mauricio Example of peaksearch functionality # dataset 1 # delete first (large) bin # instantiate a Spectrum object # Required input parameters (in channels) # instantiate a peaksearch object | 2.743626 | 3 |
aula#13/desafio047.py | daramariabs/exercicios-python | 0 | 6614526 | """ CRIE UM PROGRAMA QUE MOSTRE NA TELA TODOS OS NUMEROS PARES QUE
ESTÃO NO INTERVALO ENTRE 1 E 50. """
for i in range(0,50+1,2):
if i != 0:
print(i, end=' ')
print('FIM') | """ CRIE UM PROGRAMA QUE MOSTRE NA TELA TODOS OS NUMEROS PARES QUE
ESTÃO NO INTERVALO ENTRE 1 E 50. """
for i in range(0,50+1,2):
if i != 0:
print(i, end=' ')
print('FIM') | en | 0.155613 | CRIE UM PROGRAMA QUE MOSTRE NA TELA TODOS OS NUMEROS PARES QUE ESTÃO NO INTERVALO ENTRE 1 E 50. | 3.881284 | 4 |
train.py | kibernetika-ai/first-order-model | 0 | 6614527 | import sys
import cv2
import numpy as np
import tensorboardX
import torch
from tqdm import trange
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.data import DataLoader
from logger import Logger
from modules.model import GeneratorFullModel, DiscriminatorFullModel
from sync_batchnorm import DataParallelWithCallback
from frames_dataset import DatasetRepeater
def print_fun(s):
print(s)
sys.stdout.flush()
def train(config, generator, discriminator, kp_detector, checkpoint, log_dir, dataset, device_ids):
train_params = config['train_params']
optimizer_generator = torch.optim.Adam(generator.parameters(), lr=train_params['lr_generator'], betas=(0.5, 0.999))
optimizer_discriminator = torch.optim.Adam(discriminator.parameters(), lr=train_params['lr_discriminator'], betas=(0.5, 0.999))
optimizer_kp_detector = torch.optim.Adam(kp_detector.parameters(), lr=train_params['lr_kp_detector'], betas=(0.5, 0.999))
if checkpoint is not None:
start_epoch = Logger.load_cpk(checkpoint, generator, discriminator, kp_detector,
optimizer_generator, optimizer_discriminator,
None if train_params['lr_kp_detector'] == 0 else optimizer_kp_detector)
else:
start_epoch = 0
scheduler_generator = MultiStepLR(optimizer_generator, train_params['epoch_milestones'], gamma=0.1,
last_epoch=start_epoch - 1)
scheduler_discriminator = MultiStepLR(optimizer_discriminator, train_params['epoch_milestones'], gamma=0.1,
last_epoch=start_epoch - 1)
scheduler_kp_detector = MultiStepLR(optimizer_kp_detector, train_params['epoch_milestones'], gamma=0.1,
last_epoch=-1 + start_epoch * (train_params['lr_kp_detector'] != 0))
if 'num_repeats' in train_params or train_params['num_repeats'] != 1:
dataset = DatasetRepeater(dataset, train_params['num_repeats'])
dataloader = DataLoader(
dataset,
batch_size=train_params['batch_size'],
shuffle=True,
drop_last=True,
num_workers=4
)
print_fun(f'Full dataset length (with repeats): {len(dataset)}')
generator_full = GeneratorFullModel(kp_detector, generator, discriminator, train_params)
discriminator_full = DiscriminatorFullModel(kp_detector, generator, discriminator, train_params)
if torch.cuda.is_available():
generator_full = DataParallelWithCallback(generator_full, device_ids=device_ids)
discriminator_full = DataParallelWithCallback(discriminator_full, device_ids=device_ids)
writer = tensorboardX.SummaryWriter(log_dir, flush_secs=60)
with Logger(log_dir=log_dir, visualizer_params=config['visualizer_params'], checkpoint_freq=train_params['checkpoint_freq']) as logger:
for epoch in trange(start_epoch, train_params['num_epochs'], disable=None):
for i, x in enumerate(dataloader):
losses_generator, generated = generator_full(x)
loss_values = [val.mean() for val in losses_generator.values()]
loss = sum(loss_values)
loss.backward()
optimizer_generator.step()
optimizer_generator.zero_grad()
optimizer_kp_detector.step()
optimizer_kp_detector.zero_grad()
if train_params['loss_weights']['generator_gan'] != 0:
optimizer_discriminator.zero_grad()
losses_discriminator = discriminator_full(x, generated)
loss_values = [val.mean() for val in losses_discriminator.values()]
loss = sum(loss_values)
loss.backward()
optimizer_discriminator.step()
optimizer_discriminator.zero_grad()
else:
losses_discriminator = {}
losses_generator.update(losses_discriminator)
losses = {key: value.mean().detach().data.cpu().numpy() for key, value in losses_generator.items()}
logger.log_iter(losses=losses)
step = i + int(epoch * len(dataset) / dataloader.batch_size)
if step % 20 == 0:
print_fun(f'Epoch {epoch + 1}, global step {step}: {", ".join([f"{k}={v}" for k, v in losses.items()])}')
if step != 0 and step % 50 == 0:
for k, loss in losses.items():
writer.add_scalar(k, float(loss), global_step=step)
# add images
source = x['source'][0].detach().cpu().numpy().transpose([1, 2, 0])
driving = x['driving'][0].detach().cpu().numpy().transpose([1, 2, 0])
kp_source = generated['kp_source']['value'][0].detach().cpu().numpy()
kp_driving = generated['kp_driving']['value'][0].detach().cpu().numpy()
pred = generated['prediction'][0].detach().cpu().numpy().transpose([1, 2, 0])
kp_source = kp_source * 127.5 + 127.5
kp_driving = kp_driving * 127.5 + 127.5
source = cv2.UMat((source * 255.).clip(0, 255).astype(np.uint8)).get()
driving = cv2.UMat((driving * 255.).clip(0, 255).astype(np.uint8)).get()
pred = (pred * 255.).clip(0, 255).astype(np.uint8)
for x1, y1 in kp_source:
cv2.circle(source, (int(x1), int(y1)), 2, (250, 250, 250), thickness=cv2.FILLED)
for x1, y1 in kp_driving:
cv2.circle(driving, (int(x1), int(y1)), 2, (250, 250, 250), thickness=cv2.FILLED)
writer.add_image(
'SourceDrivingPred', np.hstack((source, driving, pred)),
global_step=step,
dataformats='HWC'
)
writer.flush()
scheduler_generator.step()
scheduler_discriminator.step()
scheduler_kp_detector.step()
logger.log_epoch(epoch, {'generator': generator,
'discriminator': discriminator,
'kp_detector': kp_detector,
'optimizer_generator': optimizer_generator,
'optimizer_discriminator': optimizer_discriminator,
'optimizer_kp_detector': optimizer_kp_detector})
| import sys
import cv2
import numpy as np
import tensorboardX
import torch
from tqdm import trange
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.data import DataLoader
from logger import Logger
from modules.model import GeneratorFullModel, DiscriminatorFullModel
from sync_batchnorm import DataParallelWithCallback
from frames_dataset import DatasetRepeater
def print_fun(s):
print(s)
sys.stdout.flush()
def train(config, generator, discriminator, kp_detector, checkpoint, log_dir, dataset, device_ids):
train_params = config['train_params']
optimizer_generator = torch.optim.Adam(generator.parameters(), lr=train_params['lr_generator'], betas=(0.5, 0.999))
optimizer_discriminator = torch.optim.Adam(discriminator.parameters(), lr=train_params['lr_discriminator'], betas=(0.5, 0.999))
optimizer_kp_detector = torch.optim.Adam(kp_detector.parameters(), lr=train_params['lr_kp_detector'], betas=(0.5, 0.999))
if checkpoint is not None:
start_epoch = Logger.load_cpk(checkpoint, generator, discriminator, kp_detector,
optimizer_generator, optimizer_discriminator,
None if train_params['lr_kp_detector'] == 0 else optimizer_kp_detector)
else:
start_epoch = 0
scheduler_generator = MultiStepLR(optimizer_generator, train_params['epoch_milestones'], gamma=0.1,
last_epoch=start_epoch - 1)
scheduler_discriminator = MultiStepLR(optimizer_discriminator, train_params['epoch_milestones'], gamma=0.1,
last_epoch=start_epoch - 1)
scheduler_kp_detector = MultiStepLR(optimizer_kp_detector, train_params['epoch_milestones'], gamma=0.1,
last_epoch=-1 + start_epoch * (train_params['lr_kp_detector'] != 0))
if 'num_repeats' in train_params or train_params['num_repeats'] != 1:
dataset = DatasetRepeater(dataset, train_params['num_repeats'])
dataloader = DataLoader(
dataset,
batch_size=train_params['batch_size'],
shuffle=True,
drop_last=True,
num_workers=4
)
print_fun(f'Full dataset length (with repeats): {len(dataset)}')
generator_full = GeneratorFullModel(kp_detector, generator, discriminator, train_params)
discriminator_full = DiscriminatorFullModel(kp_detector, generator, discriminator, train_params)
if torch.cuda.is_available():
generator_full = DataParallelWithCallback(generator_full, device_ids=device_ids)
discriminator_full = DataParallelWithCallback(discriminator_full, device_ids=device_ids)
writer = tensorboardX.SummaryWriter(log_dir, flush_secs=60)
with Logger(log_dir=log_dir, visualizer_params=config['visualizer_params'], checkpoint_freq=train_params['checkpoint_freq']) as logger:
for epoch in trange(start_epoch, train_params['num_epochs'], disable=None):
for i, x in enumerate(dataloader):
losses_generator, generated = generator_full(x)
loss_values = [val.mean() for val in losses_generator.values()]
loss = sum(loss_values)
loss.backward()
optimizer_generator.step()
optimizer_generator.zero_grad()
optimizer_kp_detector.step()
optimizer_kp_detector.zero_grad()
if train_params['loss_weights']['generator_gan'] != 0:
optimizer_discriminator.zero_grad()
losses_discriminator = discriminator_full(x, generated)
loss_values = [val.mean() for val in losses_discriminator.values()]
loss = sum(loss_values)
loss.backward()
optimizer_discriminator.step()
optimizer_discriminator.zero_grad()
else:
losses_discriminator = {}
losses_generator.update(losses_discriminator)
losses = {key: value.mean().detach().data.cpu().numpy() for key, value in losses_generator.items()}
logger.log_iter(losses=losses)
step = i + int(epoch * len(dataset) / dataloader.batch_size)
if step % 20 == 0:
print_fun(f'Epoch {epoch + 1}, global step {step}: {", ".join([f"{k}={v}" for k, v in losses.items()])}')
if step != 0 and step % 50 == 0:
for k, loss in losses.items():
writer.add_scalar(k, float(loss), global_step=step)
# add images
source = x['source'][0].detach().cpu().numpy().transpose([1, 2, 0])
driving = x['driving'][0].detach().cpu().numpy().transpose([1, 2, 0])
kp_source = generated['kp_source']['value'][0].detach().cpu().numpy()
kp_driving = generated['kp_driving']['value'][0].detach().cpu().numpy()
pred = generated['prediction'][0].detach().cpu().numpy().transpose([1, 2, 0])
kp_source = kp_source * 127.5 + 127.5
kp_driving = kp_driving * 127.5 + 127.5
source = cv2.UMat((source * 255.).clip(0, 255).astype(np.uint8)).get()
driving = cv2.UMat((driving * 255.).clip(0, 255).astype(np.uint8)).get()
pred = (pred * 255.).clip(0, 255).astype(np.uint8)
for x1, y1 in kp_source:
cv2.circle(source, (int(x1), int(y1)), 2, (250, 250, 250), thickness=cv2.FILLED)
for x1, y1 in kp_driving:
cv2.circle(driving, (int(x1), int(y1)), 2, (250, 250, 250), thickness=cv2.FILLED)
writer.add_image(
'SourceDrivingPred', np.hstack((source, driving, pred)),
global_step=step,
dataformats='HWC'
)
writer.flush()
scheduler_generator.step()
scheduler_discriminator.step()
scheduler_kp_detector.step()
logger.log_epoch(epoch, {'generator': generator,
'discriminator': discriminator,
'kp_detector': kp_detector,
'optimizer_generator': optimizer_generator,
'optimizer_discriminator': optimizer_discriminator,
'optimizer_kp_detector': optimizer_kp_detector})
| ru | 0.19392 | # add images | 2.090656 | 2 |
ImageProcess/AlignImagesRGB.py | soybase/DroneImageScripts | 3 | 6614528 |
# Works with Micasense 5 band images. Outputs orthophotomosaic images of each bandself.
# Required cpp/stitching.cpp to be compiled and executable as 'stitching_multi' . Use g++ stitching.cpp -u /usr/bin/stitching_multi `pkg-config opencv4 --cflags --libs`
# stitching_multi program will use CUDA GPU if opencv was installed with CUDA support
def run():
import sys
import cv2
import numpy as np
import matplotlib.pyplot as plt
import argparse
import os, glob
from multiprocessing import Process, freeze_support
import imutils
import statistics
import matplotlib.pyplot as plt
import csv
freeze_support()
ap = argparse.ArgumentParser()
ap.add_argument("-l", "--log_file_path", required=False, help="file path to write log to. useful for using from the web interface")
ap.add_argument("-a", "--image_path", required=False, help="image path to directory with all images inside of it. useful for using from command line. e.g. /home/nmorales/MicasenseTest/000")
ap.add_argument("-b", "--file_with_image_paths", required=False, help="file path to file that has all image file names and temporary file names for each image in it, comma separated and separated by a newline. useful for using from the web interface. e.g. /home/nmorales/myfilewithnames.txt")
ap.add_argument("-o", "--output_path", required=True, help="output path to directory in which all resulting files will be placed. useful for using from the command line")
ap.add_argument("-y", "--final_rgb_output_path", required=True, help="output file path for stitched RGB image")
ap.add_argument("-w", "--work_megapix", required=False, default=0.6, help="Resolution for image registration step. The default is 0.6 Mpx")
args = vars(ap.parse_args())
log_file_path = args["log_file_path"]
image_path = args["image_path"]
file_with_image_paths = args["file_with_image_paths"]
output_path = args["output_path"]
final_rgb_output_path = args["final_rgb_output_path"]
work_megapix = args["work_megapix"]
if log_file_path is not None and log_file_path != '':
sys.stderr = open(log_file_path, 'a')
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
#Must supply either image_path or file_with_image_paths as a source of images
imageNamesAll = []
imageTempNames = []
if image_path is not None:
imageNamesAll = glob.glob(os.path.join(image_path,'*.tif'))
for i in imageNamesAll:
imageTempNames.append(os.path.join(output_path,i+'temp.tif'))
elif file_with_image_paths is not None:
with open(file_with_image_paths) as fp:
for line in fp:
imageName, tempImageName = line.strip().split(",")
imageNamesAll.append(imageName)
imageTempNames.append(tempImageName)
else:
if log_file_path is not None:
eprint("No input images given. use image_path OR file_with_image_paths args")
else:
print("No input images given. use image_path OR file_with_image_paths args")
os._exit
img_type = "reflectance"
match_index = 0 # Index of the band
max_alignment_iterations = 1000
warp_mode = cv2.MOTION_HOMOGRAPHY # MOTION_HOMOGRAPHY or MOTION_AFFINE. For Altum images only use HOMOGRAPHY
pyramid_levels = None # for images with RigRelatives, setting this to 0 or 1 may improve alignment
sep = " ";
images_string1 = sep.join(imageNamesAll)
log_file_path_string = ''
if log_file_path is not None and log_file_path != '':
log_file_path_string = " --log_file '"+log_file_path+"'"
stitchCmd = "stitching_single "+images_string1+" --result1 '"+final_rgb_output_path+"' "+log_file_path_string
# stitchCmd = "stitching_single "+images_string1+" --result1 '"+final_rgb_output_path+"' --try_cuda yes --log_file "+log_file_path+" --work_megapix "+work_megapix
if log_file_path is not None:
eprint(stitchCmd)
eprint(len(stitchCmd))
else:
print(stitchCmd)
print(len(stitchCmd))
os.system(stitchCmd)
# {
# OK = 0,
# ERR_NEED_MORE_IMGS = 1,
# ERR_HOMOGRAPHY_EST_FAIL = 2,
# ERR_CAMERA_PARAMS_ADJUST_FAIL = 3
# };
if __name__ == '__main__':
run() |
# Works with Micasense 5 band images. Outputs orthophotomosaic images of each bandself.
# Required cpp/stitching.cpp to be compiled and executable as 'stitching_multi' . Use g++ stitching.cpp -u /usr/bin/stitching_multi `pkg-config opencv4 --cflags --libs`
# stitching_multi program will use CUDA GPU if opencv was installed with CUDA support
def run():
import sys
import cv2
import numpy as np
import matplotlib.pyplot as plt
import argparse
import os, glob
from multiprocessing import Process, freeze_support
import imutils
import statistics
import matplotlib.pyplot as plt
import csv
freeze_support()
ap = argparse.ArgumentParser()
ap.add_argument("-l", "--log_file_path", required=False, help="file path to write log to. useful for using from the web interface")
ap.add_argument("-a", "--image_path", required=False, help="image path to directory with all images inside of it. useful for using from command line. e.g. /home/nmorales/MicasenseTest/000")
ap.add_argument("-b", "--file_with_image_paths", required=False, help="file path to file that has all image file names and temporary file names for each image in it, comma separated and separated by a newline. useful for using from the web interface. e.g. /home/nmorales/myfilewithnames.txt")
ap.add_argument("-o", "--output_path", required=True, help="output path to directory in which all resulting files will be placed. useful for using from the command line")
ap.add_argument("-y", "--final_rgb_output_path", required=True, help="output file path for stitched RGB image")
ap.add_argument("-w", "--work_megapix", required=False, default=0.6, help="Resolution for image registration step. The default is 0.6 Mpx")
args = vars(ap.parse_args())
log_file_path = args["log_file_path"]
image_path = args["image_path"]
file_with_image_paths = args["file_with_image_paths"]
output_path = args["output_path"]
final_rgb_output_path = args["final_rgb_output_path"]
work_megapix = args["work_megapix"]
if log_file_path is not None and log_file_path != '':
sys.stderr = open(log_file_path, 'a')
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
#Must supply either image_path or file_with_image_paths as a source of images
imageNamesAll = []
imageTempNames = []
if image_path is not None:
imageNamesAll = glob.glob(os.path.join(image_path,'*.tif'))
for i in imageNamesAll:
imageTempNames.append(os.path.join(output_path,i+'temp.tif'))
elif file_with_image_paths is not None:
with open(file_with_image_paths) as fp:
for line in fp:
imageName, tempImageName = line.strip().split(",")
imageNamesAll.append(imageName)
imageTempNames.append(tempImageName)
else:
if log_file_path is not None:
eprint("No input images given. use image_path OR file_with_image_paths args")
else:
print("No input images given. use image_path OR file_with_image_paths args")
os._exit
img_type = "reflectance"
match_index = 0 # Index of the band
max_alignment_iterations = 1000
warp_mode = cv2.MOTION_HOMOGRAPHY # MOTION_HOMOGRAPHY or MOTION_AFFINE. For Altum images only use HOMOGRAPHY
pyramid_levels = None # for images with RigRelatives, setting this to 0 or 1 may improve alignment
sep = " ";
images_string1 = sep.join(imageNamesAll)
log_file_path_string = ''
if log_file_path is not None and log_file_path != '':
log_file_path_string = " --log_file '"+log_file_path+"'"
stitchCmd = "stitching_single "+images_string1+" --result1 '"+final_rgb_output_path+"' "+log_file_path_string
# stitchCmd = "stitching_single "+images_string1+" --result1 '"+final_rgb_output_path+"' --try_cuda yes --log_file "+log_file_path+" --work_megapix "+work_megapix
if log_file_path is not None:
eprint(stitchCmd)
eprint(len(stitchCmd))
else:
print(stitchCmd)
print(len(stitchCmd))
os.system(stitchCmd)
# {
# OK = 0,
# ERR_NEED_MORE_IMGS = 1,
# ERR_HOMOGRAPHY_EST_FAIL = 2,
# ERR_CAMERA_PARAMS_ADJUST_FAIL = 3
# };
if __name__ == '__main__':
run() | en | 0.642458 | # Works with Micasense 5 band images. Outputs orthophotomosaic images of each bandself. # Required cpp/stitching.cpp to be compiled and executable as 'stitching_multi' . Use g++ stitching.cpp -u /usr/bin/stitching_multi `pkg-config opencv4 --cflags --libs` # stitching_multi program will use CUDA GPU if opencv was installed with CUDA support #Must supply either image_path or file_with_image_paths as a source of images # Index of the band # MOTION_HOMOGRAPHY or MOTION_AFFINE. For Altum images only use HOMOGRAPHY # for images with RigRelatives, setting this to 0 or 1 may improve alignment # stitchCmd = "stitching_single "+images_string1+" --result1 '"+final_rgb_output_path+"' --try_cuda yes --log_file "+log_file_path+" --work_megapix "+work_megapix # { # OK = 0, # ERR_NEED_MORE_IMGS = 1, # ERR_HOMOGRAPHY_EST_FAIL = 2, # ERR_CAMERA_PARAMS_ADJUST_FAIL = 3 # }; | 2.254357 | 2 |
app/config.py | 0x30c4/FastOCR | 10 | 6614529 | <reponame>0x30c4/FastOCR
from os import environ
db_host = environ.get("DATABASE_HOST")
db_port = int(environ.get("DATABSE_PORT"))
db_user = environ.get("POSTGRES_USER")
db_pass = environ.get("POSTGRES_PASSWORD")
db_name = environ.get("POSTGRES_DB")
ALLOWED_FILE_EXT = ("jpeg", "png", "gif", "bmp", "tiff", "jpg")
DATABASE_URL = f"postgresql+psycopg2://{db_user}" \
f":{db_pass}@{db_host}:{db_port}/{db_name}"
UPLOAD_DIR = environ.get("UPLOAD_DIR_CONT", default='')
PORT = int(environ.get("API_PORT"))
WORKERS = int(environ.get("WORKERS"))
HOST = environ.get("HOST")
LOG_LEVEL = environ.get("LOG_LEVEL")
RELOAD = int(environ.get("RELOAD"))
LOG_INI = environ.get("APP_LOG_INI")
| from os import environ
db_host = environ.get("DATABASE_HOST")
db_port = int(environ.get("DATABSE_PORT"))
db_user = environ.get("POSTGRES_USER")
db_pass = environ.get("POSTGRES_PASSWORD")
db_name = environ.get("POSTGRES_DB")
ALLOWED_FILE_EXT = ("jpeg", "png", "gif", "bmp", "tiff", "jpg")
DATABASE_URL = f"postgresql+psycopg2://{db_user}" \
f":{db_pass}@{db_host}:{db_port}/{db_name}"
UPLOAD_DIR = environ.get("UPLOAD_DIR_CONT", default='')
PORT = int(environ.get("API_PORT"))
WORKERS = int(environ.get("WORKERS"))
HOST = environ.get("HOST")
LOG_LEVEL = environ.get("LOG_LEVEL")
RELOAD = int(environ.get("RELOAD"))
LOG_INI = environ.get("APP_LOG_INI") | none | 1 | 2.247323 | 2 | |
Model.py | obinnaeye/addMore | 0 | 6614530 | from flask import Flask
from marshmallow import Schema, fields, pre_load, validate
from flask_marshmallow import Marshmallow
from sqlalchemy import UniqueConstraint
from flask_sqlalchemy import SQLAlchemy
ma = Marshmallow()
db = SQLAlchemy()
class Client(db.Model):
__tablename__ = 'clients'
id = db.Column(db.Integer, primary_key=True)
ClientName = db.Column(db.String(250), nullable=False)
creation_date = db.Column(db.TIMESTAMP, server_default=db.func.current_timestamp(), nullable=False)
def __init__(self, ClientName):
self.ClientName = ClientName
class FeatureRequest(db.Model):
__tablename__ = 'featurerequests'
id = db.Column(db.Integer, primary_key=True)
Title = db.Column(db.String(250), nullable=False)
Description = db.Column(db.String(250), nullable=False)
TargetDate = db.Column(db.DateTime, nullable=False)
ClientPriority = db.Column(db.Integer, nullable=False)
ProductArea = db.Column(db.String(250), nullable=False)
ClientID = db.Column(db.Integer, db.ForeignKey('clients.id', ondelete='CASCADE'), nullable=False)
client = db.relationship('Client', backref=db.backref('featurerequests', lazy='dynamic' ))
def __init__(self, Title, Description, TargetDate, ClientPriority, ProductArea, ClientID):
self.Title = Title
self.Description = Description
self.TargetDate = TargetDate
self.ClientPriority = ClientPriority
self.ProductArea = ProductArea
self.ClientID = ClientID
class ClientSchema(ma.Schema):
id = fields.Integer(dump_only=True)
ClientName = fields.String(required=True, validate=validate.Length(1))
creation_date = fields.DateTime()
class FeatureRequestSchema(ma.Schema):
id = fields.Integer(dump_only=True)
Title = fields.String(required=True, validate=validate.Length(1))
Description = fields.String(required=True, validate=validate.Length(1))
TargetDate = fields.String(required=True, validate=validate.Length(1))
ClientPriority = fields.Integer(required=True)
ProductArea = fields.String(required=True, validate=validate.Length(1))
ClientID = fields.Integer(required=True)
creation_date = fields.DateTime() | from flask import Flask
from marshmallow import Schema, fields, pre_load, validate
from flask_marshmallow import Marshmallow
from sqlalchemy import UniqueConstraint
from flask_sqlalchemy import SQLAlchemy
ma = Marshmallow()
db = SQLAlchemy()
class Client(db.Model):
__tablename__ = 'clients'
id = db.Column(db.Integer, primary_key=True)
ClientName = db.Column(db.String(250), nullable=False)
creation_date = db.Column(db.TIMESTAMP, server_default=db.func.current_timestamp(), nullable=False)
def __init__(self, ClientName):
self.ClientName = ClientName
class FeatureRequest(db.Model):
__tablename__ = 'featurerequests'
id = db.Column(db.Integer, primary_key=True)
Title = db.Column(db.String(250), nullable=False)
Description = db.Column(db.String(250), nullable=False)
TargetDate = db.Column(db.DateTime, nullable=False)
ClientPriority = db.Column(db.Integer, nullable=False)
ProductArea = db.Column(db.String(250), nullable=False)
ClientID = db.Column(db.Integer, db.ForeignKey('clients.id', ondelete='CASCADE'), nullable=False)
client = db.relationship('Client', backref=db.backref('featurerequests', lazy='dynamic' ))
def __init__(self, Title, Description, TargetDate, ClientPriority, ProductArea, ClientID):
self.Title = Title
self.Description = Description
self.TargetDate = TargetDate
self.ClientPriority = ClientPriority
self.ProductArea = ProductArea
self.ClientID = ClientID
class ClientSchema(ma.Schema):
id = fields.Integer(dump_only=True)
ClientName = fields.String(required=True, validate=validate.Length(1))
creation_date = fields.DateTime()
class FeatureRequestSchema(ma.Schema):
id = fields.Integer(dump_only=True)
Title = fields.String(required=True, validate=validate.Length(1))
Description = fields.String(required=True, validate=validate.Length(1))
TargetDate = fields.String(required=True, validate=validate.Length(1))
ClientPriority = fields.Integer(required=True)
ProductArea = fields.String(required=True, validate=validate.Length(1))
ClientID = fields.Integer(required=True)
creation_date = fields.DateTime() | none | 1 | 2.444701 | 2 | |
miniMonitor.py | jmmnn/MiniMonitor | 0 | 6614531 | import pandas as pd
import smtplib
import time
#### Mailer Config####
server = smtplib.SMTP('smtp.gmail.com', 587) #can use 'localhost' without port or authentication
server.starttls()
server.login("<EMAIL>", "YourPassword") #enter your gmail credentials
##### Monitoring task
def myMonitor (csvLogFile):
try:
df = pd.read_csv(csvLogFile, sep='\t', encoding = "ISO-8859-1") #csv to dataframe
except:
print("Error reading the file")
errors = df[df['Status'] == "FinishedFail"] ###For testing: #FinishedSuccess #FinishedFail #randomMessage
#print(df[df['Status'] == "FinishedFail"])
if len(errors.index) > 0:
print ('these are the # of errors: ' , len(errors.index))
messageBody = str(errors.TaskName)
try:
server.sendmail("<EMAIL>", "<EMAIL>", messageBody)
server.quit()
print('Message sent!')
except:
print('failure to connect to mail server')
else:
print('No errors found, no message sent.')
#### Execute the monitor every 60 seconds.
while True:
myMonitor('NYVM0571_TaskExecution_Scheduler.txt')
time.sleep(60)
| import pandas as pd
import smtplib
import time
#### Mailer Config####
server = smtplib.SMTP('smtp.gmail.com', 587) #can use 'localhost' without port or authentication
server.starttls()
server.login("<EMAIL>", "YourPassword") #enter your gmail credentials
##### Monitoring task
def myMonitor (csvLogFile):
try:
df = pd.read_csv(csvLogFile, sep='\t', encoding = "ISO-8859-1") #csv to dataframe
except:
print("Error reading the file")
errors = df[df['Status'] == "FinishedFail"] ###For testing: #FinishedSuccess #FinishedFail #randomMessage
#print(df[df['Status'] == "FinishedFail"])
if len(errors.index) > 0:
print ('these are the # of errors: ' , len(errors.index))
messageBody = str(errors.TaskName)
try:
server.sendmail("<EMAIL>", "<EMAIL>", messageBody)
server.quit()
print('Message sent!')
except:
print('failure to connect to mail server')
else:
print('No errors found, no message sent.')
#### Execute the monitor every 60 seconds.
while True:
myMonitor('NYVM0571_TaskExecution_Scheduler.txt')
time.sleep(60)
| en | 0.457116 | #### Mailer Config#### #can use 'localhost' without port or authentication #enter your gmail credentials ##### Monitoring task #csv to dataframe ###For testing: #FinishedSuccess #FinishedFail #randomMessage #print(df[df['Status'] == "FinishedFail"]) # of errors: ' , len(errors.index)) #### Execute the monitor every 60 seconds. | 3.100309 | 3 |
tests/plotting/config.py | nur-azhar/chia-blockchain | 1 | 6614532 | <filename>tests/plotting/config.py<gh_stars>1-10
parallel = True
install_timelord = False
checkout_blocks_and_plots = True
| <filename>tests/plotting/config.py<gh_stars>1-10
parallel = True
install_timelord = False
checkout_blocks_and_plots = True
| none | 1 | 1.036439 | 1 | |
tests.py | dlukeomalley/hue-sunrise | 0 | 6614533 | <gh_stars>0
import unittest
import config
import datetime
from dateutil.parser import parse
import sunrise
import requests
class Test(unittest.TestCase):
# TODO (dlukeomalley): get schedule and save to rewrite it later
testScheduleId = config.SCHEDULE_ID
testLocation = (37.7749, -122.4194)
testDate = datetime.date(2020, 4, 27)
testSunriseDt = parse("2020-04-27 13:16:53+00:00")
localtimeString = "/schedules/{}/localtime".format(testScheduleId)
def test_get_sunrise_sunset(self):
self.assertEqual(self.testSunriseDt, sunrise.getSunriseDatetime(self.testDate, self.testLocation))
def test_timezone_change(self):
timezone = "US/Pacific"
sunriseTime = self.testSunriseDt - datetime.timedelta(hours=7)
self.assertEqual(sunriseTime.ctime(), sunrise.getSunriseDatetime(self.testDate, self.testLocation, timezone).ctime())
def test_one_time_alarm(self):
success = {"success": {self.localtimeString: "2020-04-27T13:16:53"}}
self.assertIn(success, sunrise.setHueSchedule(self.testScheduleId, self.testSunriseDt))
def test_recurring_alarm(self):
recurrence = 0b01111111 # 127
success = {"success": {self.localtimeString: "W127/T13:16:53"}}
self.assertIn(success, sunrise.setHueSchedule(self.testScheduleId, self.testSunriseDt, recurrence))
if __name__ == '__main__':
unittest.main() | import unittest
import config
import datetime
from dateutil.parser import parse
import sunrise
import requests
class Test(unittest.TestCase):
# TODO (dlukeomalley): get schedule and save to rewrite it later
testScheduleId = config.SCHEDULE_ID
testLocation = (37.7749, -122.4194)
testDate = datetime.date(2020, 4, 27)
testSunriseDt = parse("2020-04-27 13:16:53+00:00")
localtimeString = "/schedules/{}/localtime".format(testScheduleId)
def test_get_sunrise_sunset(self):
self.assertEqual(self.testSunriseDt, sunrise.getSunriseDatetime(self.testDate, self.testLocation))
def test_timezone_change(self):
timezone = "US/Pacific"
sunriseTime = self.testSunriseDt - datetime.timedelta(hours=7)
self.assertEqual(sunriseTime.ctime(), sunrise.getSunriseDatetime(self.testDate, self.testLocation, timezone).ctime())
def test_one_time_alarm(self):
success = {"success": {self.localtimeString: "2020-04-27T13:16:53"}}
self.assertIn(success, sunrise.setHueSchedule(self.testScheduleId, self.testSunriseDt))
def test_recurring_alarm(self):
recurrence = 0b01111111 # 127
success = {"success": {self.localtimeString: "W127/T13:16:53"}}
self.assertIn(success, sunrise.setHueSchedule(self.testScheduleId, self.testSunriseDt, recurrence))
if __name__ == '__main__':
unittest.main() | en | 0.873589 | # TODO (dlukeomalley): get schedule and save to rewrite it later # 127 | 2.943556 | 3 |
mlp.py | Wigder/inns | 5 | 6614534 | from ann_visualizer.visualize import ann_viz
from keras import Sequential
from keras.layers import Dense, Dropout
from load_data import x, y, dimensions
from mccv_keras import mccv
hidden_layers = 5
plot = False # Switch to True to output the architecture as a .png file.
name = "" # If the above is set to True, this will be the name of the output file.
title = "" # If the above is set to True, this will be the title of the graph.
model = Sequential()
model.add(Dense(16, input_dim=dimensions, dtype="float32", activation="relu"))
for i in range(hidden_layers - 1):
model.add(Dense(16, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
model.summary()
if plot:
ann_viz(model, filename="{}.gv".format(name), title=title)
mccv(x, y, model)
| from ann_visualizer.visualize import ann_viz
from keras import Sequential
from keras.layers import Dense, Dropout
from load_data import x, y, dimensions
from mccv_keras import mccv
hidden_layers = 5
plot = False # Switch to True to output the architecture as a .png file.
name = "" # If the above is set to True, this will be the name of the output file.
title = "" # If the above is set to True, this will be the title of the graph.
model = Sequential()
model.add(Dense(16, input_dim=dimensions, dtype="float32", activation="relu"))
for i in range(hidden_layers - 1):
model.add(Dense(16, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
model.summary()
if plot:
ann_viz(model, filename="{}.gv".format(name), title=title)
mccv(x, y, model)
| en | 0.872689 | # Switch to True to output the architecture as a .png file. # If the above is set to True, this will be the name of the output file. # If the above is set to True, this will be the title of the graph. | 3.207402 | 3 |
services/scheduling/tests/intra/test_operational.py | rtubio/server | 4 | 6614535 | """
Copyright 2013, 2014 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = '<EMAIL>'
import datetime
import logging
from django import test
from services.common import misc, simulation, helpers as db_tools
from services.configuration.jrpc.serializers import \
channels as channel_serializers
from services.configuration.jrpc.views import rules as jrpc_rules_if
from services.configuration.jrpc.views.channels import \
groundstations as jrpc_gs_ch_if
from services.configuration.jrpc.views.channels import \
spacecraft as jrpc_sc_ch_if
from services.configuration.models import rules as rule_models
from services.scheduling.models import availability as availability_models
from services.scheduling.models import operational as operational_models
from services.simulation.models import passes as pass_models
class OperationalModels(test.TestCase):
def setUp(self):
"""
This method populates the database with some information to be used
only for this test.
"""
self.__verbose_testing = False
if not self.__verbose_testing:
logging.getLogger('configuration').setLevel(level=logging.CRITICAL)
logging.getLogger('scheduling').setLevel(level=logging.CRITICAL)
self.__rule_1_cfg = db_tools.create_jrpc_daily_rule(
starting_time=misc.localize_time_utc(datetime.time(
hour=8, minute=0, second=0
)),
ending_time=misc.localize_time_utc(datetime.time(
hour=23, minute=55, second=0
))
)
self.__sc_1_id = 'xatcobeo-sc'
self.__sc_1_tle_id = 'HUMSAT-D'
self.__sc_1_ch_1_id = 'xatcobeo-fm'
self.__sc_1_ch_1_cfg = {
channel_serializers.FREQUENCY_K: '437000000',
channel_serializers.MODULATION_K: 'FM',
channel_serializers.POLARIZATION_K: 'LHCP',
channel_serializers.BITRATE_K: '300',
channel_serializers.BANDWIDTH_K: '12.500000000'
}
self.__gs_1_id = 'gs-la'
self.__gs_1_ch_1_id = 'gs-la-fm'
self.__gs_1_ch_1_cfg = {
channel_serializers.BAND_K:
'UHF / U / 435000000.000000 / 438000000.000000',
channel_serializers.AUTOMATED_K: False,
channel_serializers.MODULATIONS_K: ['FM'],
channel_serializers.POLARIZATIONS_K: ['LHCP'],
channel_serializers.BITRATES_K: [300, 600, 900],
channel_serializers.BANDWIDTHS_K: [12.500000000, 25.000000000]
}
self.__gs_1_ch_2_id = 'gs-la-fm-2'
self.__gs_1_ch_2_cfg = {
channel_serializers.BAND_K:
'UHF / U / 435000000.000000 / 438000000.000000',
channel_serializers.AUTOMATED_K: False,
channel_serializers.MODULATIONS_K: ['FM'],
channel_serializers.POLARIZATIONS_K: ['LHCP'],
channel_serializers.BITRATES_K: [300, 600, 900],
channel_serializers.BANDWIDTHS_K: [12.500000000, 25.000000000]
}
# noinspection PyUnresolvedReferences
from services.scheduling.signals import availability
# noinspection PyUnresolvedReferences
from services.scheduling.signals import compatibility
# noinspection PyUnresolvedReferences
from services.scheduling.signals import operational
self.__band = db_tools.create_band()
self.__user_profile = db_tools.create_user_profile()
self.__gs_1 = db_tools.create_gs(
user_profile=self.__user_profile, identifier=self.__gs_1_id,
)
def test_1_compatibility_sc_channel_added_deleted(self):
"""INTR test: compatibility changed generates operational slots
1) +GS_CH
2) +RULE
3) +SC_CH
4) -SC_CH
5) -RULE
6) -GS_CH
OperationalSlots should be available only in bewteen steps 3 and 4.
"""
if self.__verbose_testing:
print('##### test_add_slots: no rules')
self.__sc_1 = db_tools.create_sc(
user_profile=self.__user_profile,
identifier=self.__sc_1_id,
tle_id=self.__sc_1_tle_id,
)
self.assertTrue(
jrpc_gs_ch_if.gs_channel_create(
groundstation_id=self.__gs_1_id,
channel_id=self.__gs_1_ch_1_id,
configuration=self.__gs_1_ch_1_cfg
),
'Channel should have been created!'
)
r_1_id = jrpc_rules_if.add_rule(self.__gs_1_id, self.__rule_1_cfg)
self.assertIsNot(r_1_id, 0, 'Rule should have been added!')
self.assertTrue(
jrpc_sc_ch_if.sc_channel_create(
spacecraft_id=self.__sc_1_id,
channel_id=self.__sc_1_ch_1_id,
configuration=self.__sc_1_ch_1_cfg
), 'Channel should have been created!'
)
a_slots = availability_models.AvailabilitySlot.objects.get_applicable(
groundstation=self.__gs_1
)
if self.__verbose_testing:
misc.print_list(a_slots, 'AvailabilitySlots')
misc.print_list(
operational_models.OperationalSlot.objects.all(),
'OperationalSlots'
)
self.assertGreaterEqual(
len(operational_models.OperationalSlot.objects.all()), 2
)
self.assertTrue(
jrpc_sc_ch_if.sc_channel_delete(
spacecraft_id=self.__sc_1_id, channel_id=self.__sc_1_ch_1_id
)
)
expected = []
actual = list(
operational_models.OperationalSlot.objects.filter(
state=operational_models.STATE_FREE
).values_list('state')
)
if self.__verbose_testing:
print('>>> window = ' + str(
simulation.OrbitalSimulator.get_simulation_window()
))
misc.print_list(rule_models.AvailabilityRule.objects.all())
misc.print_list(availability_models.AvailabilitySlot.objects.all())
misc.print_list(operational_models.OperationalSlot.objects.all())
misc.print_list(actual)
misc.print_list(expected)
self.assertEqual(actual, expected)
self.assertTrue(
jrpc_rules_if.remove_rule(self.__gs_1_id, r_1_id),
'Rule should have been removed!'
)
expected = []
actual = list(
operational_models.OperationalSlot.objects.filter(
state=operational_models.STATE_FREE
).values_list('state')
)
self.assertEqual(actual, expected)
self.assertTrue(
jrpc_gs_ch_if.gs_channel_delete(
groundstation_id=self.__gs_1_id,
channel_id=self.__gs_1_ch_1_id
),
'Could not delete GroundStationChannel = ' + str(
self.__gs_1_ch_1_id
)
)
expected = []
actual = list(
operational_models.OperationalSlot.objects.filter(
state=operational_models.STATE_FREE
).values_list('state')
)
self.assertEqual(actual, expected)
def test_2_no_compatibility_no_slots(self):
"""INTR test: no compatible channels, no slots
(INITIAL): 1 GS, no channels, no rules
(1A - STEP) : +SC
(1B - STEP) : +rule
(1C - CHECK): no operational slots
"""
self.assertEquals(
pass_models.PassSlots.objects.filter(
spacecraft__identifier=self.__sc_1_id
).count(),
0
)
r_1_id = jrpc_rules_if.add_rule(self.__gs_1_id, self.__rule_1_cfg)
self.assertIsNot(r_1_id, 0, 'Rule should have been added!')
self.assertEquals(
len(operational_models.OperationalSlot.objects.all()), 0
)
self.__sc_1 = db_tools.create_sc(
user_profile=self.__user_profile,
identifier=self.__sc_1_id,
tle_id=self.__sc_1_tle_id,
)
self.assertEquals(
len(operational_models.OperationalSlot.objects.all()), 0
)
| """
Copyright 2013, 2014 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = '<EMAIL>'
import datetime
import logging
from django import test
from services.common import misc, simulation, helpers as db_tools
from services.configuration.jrpc.serializers import \
channels as channel_serializers
from services.configuration.jrpc.views import rules as jrpc_rules_if
from services.configuration.jrpc.views.channels import \
groundstations as jrpc_gs_ch_if
from services.configuration.jrpc.views.channels import \
spacecraft as jrpc_sc_ch_if
from services.configuration.models import rules as rule_models
from services.scheduling.models import availability as availability_models
from services.scheduling.models import operational as operational_models
from services.simulation.models import passes as pass_models
class OperationalModels(test.TestCase):
def setUp(self):
"""
This method populates the database with some information to be used
only for this test.
"""
self.__verbose_testing = False
if not self.__verbose_testing:
logging.getLogger('configuration').setLevel(level=logging.CRITICAL)
logging.getLogger('scheduling').setLevel(level=logging.CRITICAL)
self.__rule_1_cfg = db_tools.create_jrpc_daily_rule(
starting_time=misc.localize_time_utc(datetime.time(
hour=8, minute=0, second=0
)),
ending_time=misc.localize_time_utc(datetime.time(
hour=23, minute=55, second=0
))
)
self.__sc_1_id = 'xatcobeo-sc'
self.__sc_1_tle_id = 'HUMSAT-D'
self.__sc_1_ch_1_id = 'xatcobeo-fm'
self.__sc_1_ch_1_cfg = {
channel_serializers.FREQUENCY_K: '437000000',
channel_serializers.MODULATION_K: 'FM',
channel_serializers.POLARIZATION_K: 'LHCP',
channel_serializers.BITRATE_K: '300',
channel_serializers.BANDWIDTH_K: '12.500000000'
}
self.__gs_1_id = 'gs-la'
self.__gs_1_ch_1_id = 'gs-la-fm'
self.__gs_1_ch_1_cfg = {
channel_serializers.BAND_K:
'UHF / U / 435000000.000000 / 438000000.000000',
channel_serializers.AUTOMATED_K: False,
channel_serializers.MODULATIONS_K: ['FM'],
channel_serializers.POLARIZATIONS_K: ['LHCP'],
channel_serializers.BITRATES_K: [300, 600, 900],
channel_serializers.BANDWIDTHS_K: [12.500000000, 25.000000000]
}
self.__gs_1_ch_2_id = 'gs-la-fm-2'
self.__gs_1_ch_2_cfg = {
channel_serializers.BAND_K:
'UHF / U / 435000000.000000 / 438000000.000000',
channel_serializers.AUTOMATED_K: False,
channel_serializers.MODULATIONS_K: ['FM'],
channel_serializers.POLARIZATIONS_K: ['LHCP'],
channel_serializers.BITRATES_K: [300, 600, 900],
channel_serializers.BANDWIDTHS_K: [12.500000000, 25.000000000]
}
# noinspection PyUnresolvedReferences
from services.scheduling.signals import availability
# noinspection PyUnresolvedReferences
from services.scheduling.signals import compatibility
# noinspection PyUnresolvedReferences
from services.scheduling.signals import operational
self.__band = db_tools.create_band()
self.__user_profile = db_tools.create_user_profile()
self.__gs_1 = db_tools.create_gs(
user_profile=self.__user_profile, identifier=self.__gs_1_id,
)
def test_1_compatibility_sc_channel_added_deleted(self):
"""INTR test: compatibility changed generates operational slots
1) +GS_CH
2) +RULE
3) +SC_CH
4) -SC_CH
5) -RULE
6) -GS_CH
OperationalSlots should be available only in bewteen steps 3 and 4.
"""
if self.__verbose_testing:
print('##### test_add_slots: no rules')
self.__sc_1 = db_tools.create_sc(
user_profile=self.__user_profile,
identifier=self.__sc_1_id,
tle_id=self.__sc_1_tle_id,
)
self.assertTrue(
jrpc_gs_ch_if.gs_channel_create(
groundstation_id=self.__gs_1_id,
channel_id=self.__gs_1_ch_1_id,
configuration=self.__gs_1_ch_1_cfg
),
'Channel should have been created!'
)
r_1_id = jrpc_rules_if.add_rule(self.__gs_1_id, self.__rule_1_cfg)
self.assertIsNot(r_1_id, 0, 'Rule should have been added!')
self.assertTrue(
jrpc_sc_ch_if.sc_channel_create(
spacecraft_id=self.__sc_1_id,
channel_id=self.__sc_1_ch_1_id,
configuration=self.__sc_1_ch_1_cfg
), 'Channel should have been created!'
)
a_slots = availability_models.AvailabilitySlot.objects.get_applicable(
groundstation=self.__gs_1
)
if self.__verbose_testing:
misc.print_list(a_slots, 'AvailabilitySlots')
misc.print_list(
operational_models.OperationalSlot.objects.all(),
'OperationalSlots'
)
self.assertGreaterEqual(
len(operational_models.OperationalSlot.objects.all()), 2
)
self.assertTrue(
jrpc_sc_ch_if.sc_channel_delete(
spacecraft_id=self.__sc_1_id, channel_id=self.__sc_1_ch_1_id
)
)
expected = []
actual = list(
operational_models.OperationalSlot.objects.filter(
state=operational_models.STATE_FREE
).values_list('state')
)
if self.__verbose_testing:
print('>>> window = ' + str(
simulation.OrbitalSimulator.get_simulation_window()
))
misc.print_list(rule_models.AvailabilityRule.objects.all())
misc.print_list(availability_models.AvailabilitySlot.objects.all())
misc.print_list(operational_models.OperationalSlot.objects.all())
misc.print_list(actual)
misc.print_list(expected)
self.assertEqual(actual, expected)
self.assertTrue(
jrpc_rules_if.remove_rule(self.__gs_1_id, r_1_id),
'Rule should have been removed!'
)
expected = []
actual = list(
operational_models.OperationalSlot.objects.filter(
state=operational_models.STATE_FREE
).values_list('state')
)
self.assertEqual(actual, expected)
self.assertTrue(
jrpc_gs_ch_if.gs_channel_delete(
groundstation_id=self.__gs_1_id,
channel_id=self.__gs_1_ch_1_id
),
'Could not delete GroundStationChannel = ' + str(
self.__gs_1_ch_1_id
)
)
expected = []
actual = list(
operational_models.OperationalSlot.objects.filter(
state=operational_models.STATE_FREE
).values_list('state')
)
self.assertEqual(actual, expected)
def test_2_no_compatibility_no_slots(self):
"""INTR test: no compatible channels, no slots
(INITIAL): 1 GS, no channels, no rules
(1A - STEP) : +SC
(1B - STEP) : +rule
(1C - CHECK): no operational slots
"""
self.assertEquals(
pass_models.PassSlots.objects.filter(
spacecraft__identifier=self.__sc_1_id
).count(),
0
)
r_1_id = jrpc_rules_if.add_rule(self.__gs_1_id, self.__rule_1_cfg)
self.assertIsNot(r_1_id, 0, 'Rule should have been added!')
self.assertEquals(
len(operational_models.OperationalSlot.objects.all()), 0
)
self.__sc_1 = db_tools.create_sc(
user_profile=self.__user_profile,
identifier=self.__sc_1_id,
tle_id=self.__sc_1_tle_id,
)
self.assertEquals(
len(operational_models.OperationalSlot.objects.all()), 0
)
| en | 0.754477 | Copyright 2013, 2014 <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This method populates the database with some information to be used only for this test. # noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences INTR test: compatibility changed generates operational slots 1) +GS_CH 2) +RULE 3) +SC_CH 4) -SC_CH 5) -RULE 6) -GS_CH OperationalSlots should be available only in bewteen steps 3 and 4. #### test_add_slots: no rules') INTR test: no compatible channels, no slots (INITIAL): 1 GS, no channels, no rules (1A - STEP) : +SC (1B - STEP) : +rule (1C - CHECK): no operational slots | 1.785465 | 2 |
tests/test_gdrive.py | rdmolony/streamlit-cloud-uploader | 0 | 6614536 | <filename>tests/test_gdrive.py
from pathlib import Path
from streamlit_cloud_uploader import gdrive
def test_extract_id_from_google_drive_link():
input_url = "https://drive.google.com/file/d/1mzxpZS_nKx8pOLNLDO2SXzboVTE4rlV-/view?usp=sharing"
expected_output = "1mzxpZS_nKx8pOLNLDO2SXzboVTE4rlV-"
output = gdrive._extract_id_from_google_drive_link(url=input_url)
assert output == expected_output
def test_download_file_from_google_drive(tmp_path: Path):
filepath = tmp_path / "small-file.csv"
gdrive.download_file_from_google_drive(
url="https://drive.google.com/file/d/14rV7E90MgXUd9pxhdas7TyYsDXbPmP2-/view?usp=sharing",
filepath=filepath,
)
assert filepath.exists()
| <filename>tests/test_gdrive.py
from pathlib import Path
from streamlit_cloud_uploader import gdrive
def test_extract_id_from_google_drive_link():
input_url = "https://drive.google.com/file/d/1mzxpZS_nKx8pOLNLDO2SXzboVTE4rlV-/view?usp=sharing"
expected_output = "1mzxpZS_nKx8pOLNLDO2SXzboVTE4rlV-"
output = gdrive._extract_id_from_google_drive_link(url=input_url)
assert output == expected_output
def test_download_file_from_google_drive(tmp_path: Path):
filepath = tmp_path / "small-file.csv"
gdrive.download_file_from_google_drive(
url="https://drive.google.com/file/d/14rV7E90MgXUd9pxhdas7TyYsDXbPmP2-/view?usp=sharing",
filepath=filepath,
)
assert filepath.exists()
| none | 1 | 3.088139 | 3 | |
disp/fws/__init__.py | zhubonan/disp | 1 | 6614537 | """
Fireworks subpackage
"""
| """
Fireworks subpackage
"""
| en | 0.158084 | Fireworks subpackage | 0.936125 | 1 |
modules/ubot_turtle.py | hu-zza/uBot_driver | 1 | 6614538 | """
uBot_firmware // The firmware of the μBot, the educational floor robot. (A MicroPython port to ESP8266 with additional modules.)
This file is part of uBot_firmware.
[https://zza.hu/uBot_firmware]
[https://git.zza.hu/uBot_firmware]
MIT License
Copyright (c) 2020-2021 <NAME> // hu-zza
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from machine import Pin, Timer
import ubot_config as config
import ubot_buzzer as buzzer
import ubot_logger as logger
import ubot_motor as motor
import ubot_data as data
_powerOns = config.get("system", "power_ons")
_namedFolder = config.get("turtle", "named_folder")
_turtleFolder = config.get("turtle", "turtle_folder")
_moveChars = config.get("turtle", "move_chars")
_turtleChars = config.get("turtle", "turtle_chars")
_savedCount = 0
_clockPin = Pin(13, Pin.OUT) # Advances the decade counter (U3).
_clockPin.off()
# Checks the returning signal from turtle HAT.
_inputPin = Pin(16, Pin.OUT) # FUTURE: _inputPin = Pin(16, Pin.IN)
_inputPin.off() # DEPRECATED: New PCB design (2.1) will resolve this.
_inputPin.init(Pin.IN) # DEPRECATED: New PCB design (2.1) will resolve this.
_checkPeriod = config.get("turtle", "check_period")
_counterPosition = 0 # The position of the decade counter (U3).
_pressLength = config.get("turtle", "press_length")
_maxError = config.get("turtle", "max_error")
_lastPressed = [0, 0] # Inside: [last pressed button, elapsed (button check) cycles]
_firstRepeat = config.get("turtle", "first_repeat")
_loopChecking = config.get("turtle", "loop_checking")
_moveLength = config.get("turtle", "move_length")
_turnLength = config.get("turtle", "turn_length")
_halfTurn = _turnLength // 2
_breathLength = config.get("turtle", "breath_length")
_endSignalEnabled = True
_endSignalSkipCount = 0
_stepSignalEnabled = True
_stepSignalSkipCount = 0
_endSignal = config.get("turtle", "end_signal") # Sound indicates the end of a step during execution: buzzer.keyBeep(_endSignal)
_stepSignal = config.get("turtle", "step_signal") # Sound indicates the end of program execution: buzzer.keyBeep(_stepSignal)
_pressedListIndex = 0
_pressedList = [0] * (_pressLength + _maxError) # Low-level: The last N (_pressLength + _maxError) button check results.
_commandArray = bytearray() # High-level: Abstract commands, result of processed button presses.
_commandPointer = 0 # Pointer for _commandArray.
_programArray = bytearray() # High-level: Result of one or more added _commandArray.
_programParts = [0] # Positions by which _programArray can be split into _commandArray(s).
_temporaryCommandPointer = 0 # For stateless run
_temporaryProgramArray = bytearray() # with the capability of
_temporaryProgramParts = [0] # restore unsaved stuff.
_loopCounter = 0 # At loop creation this holds iteration count.
_functionPosition = [-1, -1, -1] # -1 : not defined, -0.1 : under definition, 0+ : defined
# If defined, this index the first command of the function,
# refer to the first command of the function, instead of its curly brace "{".
_blockStartIndex = 0 # At block (loop, fn declaration) creation, this holds block start position.
_blockStartStack = []
_mappingsStack = []
_processingProgram = False
_runningProgram = False
_timer = Timer(-1) # Executes the repeated button checks.
_blockBoundaries = ((40, 41), (123, 125), (126, 126)) # (("(", ")"), ("{", "}"), ("~", "~"))
################################
## PUBLIC METHODS
def isBusy():
return _processingProgram or _runningProgram
def getValidMoveChars():
return _moveChars
def getValidTurtleChars():
return _turtleChars
def checkButtons(timer: Timer = None):
_addCommand(_getValidatedPressedButton())
def press(pressed): # pressed = 1<<buttonOrdinal
if isinstance(pressed, str):
pressed = int(pressed)
_logLastPressed(pressed)
_addCommand(pressed)
def move(direction):
if isinstance(direction, str):
direction = ord(direction)
movementTuple = _moveCharMapping.get(direction)
if movementTuple is not None:
motor.add(movementTuple)
def skipSignal(stepCount: int = 1, endCount: int = 0) -> None:
global _stepSignalSkipCount, _endSignalSkipCount
_stepSignalSkipCount += stepCount
_endSignalSkipCount += endCount
def getProgramsCount():
return sum(len(getProgramListOf(folder)) for folder in getProgramFolders())
def getProgramFolders():
return data.getFoldersOf(data.PROGRAM)
def doesFolderExist(folder: str) -> bool:
path = getPathOf(folder)
return path.isExist and path.isFolder
def createFolder(folder: str) -> bool:
return data.createFolder(getPathOf("program", folder))
def getProgramListOf(folder: str) -> tuple:
return data.getFileNameListOf("program", folder, "txt")
def doesProgramExist(folder, title):
path = getPathOf(folder, title)
return path.isExist and path.isFile
def getProgramCode(folder: str, title: str) -> str:
return "".join(data.getFile(getPathOf(folder, title), False))
def getPathOf(folder: str, title = "") -> data.Path:
return data.createPathOf("program", folder, normalizeProgramTitleFromFolder(title, folder))
def getLastTurtleProgramTitle() -> str:
return sorted(getProgramListOf(_turtleFolder))[-1]
def normalizeProgramTitleFromFolder(title: str, folder: str) -> str:
return normalizeProgramTitle(title, folder == _turtleFolder)
def normalizeProgramTitle(title: object, isTurtle: bool = True) -> str:
turtleTuple = data.extractIntTuple(title, 2) if isTurtle else ()
if 0 < len(turtleTuple):
if 1 == len(turtleTuple):
return "{:010d}_{:05d}.txt".format(_powerOns, turtleTuple[0])
else:
return "{:010d}_{:05d}.txt".format(turtleTuple[0], turtleTuple[1])
else:
return title if isinstance(title, str) and title.endswith(".txt") else "{}.txt".format(title)
def runProgram(folder, title):
if doesProgramExist(folder, title):
retainInTemporary()
loadProgram(folder, title)
press(64)
loadFromTemporary()
return True
else:
return False
def loadProgram(folder, title):
clearMemory()
if doesProgramExist(folder, title):
loadProgramFromString(getProgramCode(folder, title))
return True
else:
return False
def loadProgramFromString(turtleCode):
global _programArray, _programParts
clearMemory()
try:
array = turtleCode.encode()
_programArray = array
_programParts = [len(array)]
return True
except Exception as e:
logger.append(e)
return False
def saveLoadedProgram(folder = "", title = ""):
return saveProgram(folder if data.isStringWithContent(folder) else _namedFolder, title, getProgramArray())
def saveProgram(folder: str = "", title: str = "", program: str = "") -> data.Path:
global _savedCount
folder = folder if data.isStringWithContent(folder) else _namedFolder
isTitleValid = data.isStringWithContent(title)
path = data.createPathOf("program", folder, title) if isTitleValid else _generateFullPathForAutoSave()
result = data.saveFile(path, program, False, True)
if not result and not isTitleValid:
_savedCount -= 1
return path if result else data.INVALID_PATH
def _generateFullPathForAutoSave() -> data.Path:
global _savedCount
_savedCount += 1
return data.createPathOf("program", _turtleFolder, "{:010d}_{:05d}.txt".format(_powerOns, _savedCount))
def deleteProgram(folder: str = "", title: str = "") -> bool:
pass
def _unavailableProgramAction(*args) -> bool:
return False
def _unavailableProgramResultSupplier(*args) -> dict:
return {}
def doProgramAction(folder: str = "", title: str = "", action: str = "run") -> tuple:
folder = folder if data.isStringWithContent(folder) else _turtleFolder
title = title if data.isStringWithContent(title) else getLastTurtleProgramTitle()
action = action.lower()
try:
return _programActions.setdefault(action, _unavailableProgramAction)(folder, title), \
_programResultSupplier.setdefault(action, _unavailableProgramResultSupplier)(folder, title)
except Exception as e:
logger.append(e)
return False, {}
_programActions = {
"run" : runProgram,
"load" : loadProgram,
"delete": deleteProgram
}
_programResultSupplier = {
}
def getCommandArray():
return _commandArray[:_commandPointer].decode()
def getProgramArray():
return _programArray[:_programParts[-1]].decode()
def isMemoryEmpty():
return isCommandMemoryEmpty() and isProgramMemoryEmpty()
def isCommandMemoryEmpty():
return _commandPointer == 0
def isProgramMemoryEmpty():
return _programParts == [0]
def clearMemory():
clearCommandMemory()
clearProgramMemory()
def clearProgramMemory():
global _programParts
_programParts = [0]
def clearCommandMemory():
global _commandPointer
_commandPointer = 0
def retainInTemporary():
global _temporaryCommandPointer, _temporaryProgramParts, _temporaryProgramArray
_temporaryCommandPointer = _commandPointer
_temporaryProgramParts = _programParts
_temporaryProgramArray = _programArray
def loadFromTemporary():
global _commandPointer, _programParts, _programArray
_commandPointer = _temporaryCommandPointer
_programParts = _temporaryProgramParts
_programArray = _temporaryProgramArray
################################################################
################################################################
##########
########## PRIVATE, CLASS-LEVEL METHODS
##########
################################
## BUTTON PRESS PROCESSING
def _startButtonChecking():
_timer.init(
period = _checkPeriod,
mode = Timer.PERIODIC,
callback = checkButtons
)
def _stopButtonChecking():
_timer.deinit()
def _getValidatedPressedButton():
global _lastPressed
pressed = _getPressedButton()
_logLastPressed(pressed)
if _lastPressed[1] == 1 or _firstRepeat < _lastPressed[1]: # Lack of pressing returns same like a button press.
_lastPressed[1] = 1 # In this case the returning value is 0.
return pressed
else:
return 0 # If validation is in progress, returns 0.
def _logLastPressed(pressed):
global _lastPressed
if pressed == _lastPressed[0]:
_lastPressed[1] += 1
else:
_lastPressed = [pressed, 1]
def _getPressedButton():
global _pressedList, _pressedListIndex
pressed = 0
for i in range(10):
# pseudo pull-down # DEPRECATED: New PCB design (2.1) will resolve this.
if _inputPin.value() == 1: # DEPRECATED: New PCB design (2.1) will resolve this.
_inputPin.init(Pin.OUT) # DEPRECATED: New PCB design (2.1) will resolve this.
_inputPin.off() # DEPRECATED: New PCB design (2.1) will resolve this.
_inputPin.init(Pin.IN) # DEPRECATED: New PCB design (2.1) will resolve this.
if _inputPin.value() == 1:
pressed += 1 << _counterPosition # pow(2, _counterPosition)
_advanceCounter()
# shift counter's "resting position" to the closest pressed button to eliminate BTN LED flashing
if 0 < pressed:
while bin(1024 + pressed)[12 - _counterPosition] != "1":
_advanceCounter()
_pressedList[_pressedListIndex] = pressed
_pressedListIndex += 1
if len(_pressedList) <= _pressedListIndex:
_pressedListIndex = 0
errorCount = 0
for pressed in _pressedList:
count = _pressedList.count(pressed)
if _pressLength <= count:
return pressed
errorCount += count
if _maxError < errorCount:
return 0
def _advanceCounter():
global _counterPosition
_clockPin.on()
if 9 <= _counterPosition:
_counterPosition = 0
else:
_counterPosition += 1
_clockPin.off()
################################
## BUTTON PRESS INTERPRETATION
def _addCommand(pressed):
global _processingProgram, _runningProgram
try:
if pressed == 0: # result = 0 means, there is nothing to save to _commandArray.
result = 0 # Not only lack of buttonpress (pressed == 0) returns 0.
elif _runningProgram:
motor.stop() # Stop commands / program execution.
_processingProgram = False
_runningProgram = False
result = _beepAndReturn(("processed", 0)) # Beep and skip the (result) processing.
else:
tupleWithCallable = _currentMapping.get(pressed) # Dictionary based switch...case
if tupleWithCallable is None: # Default branch
result = 0 # Skip the (result) processing.
else:
if tupleWithCallable[1] == ():
result = tupleWithCallable[0]()
else:
result = tupleWithCallable[0](tupleWithCallable[1])
if result != 0:
if isinstance(result, int):
_addToCommandArray(result)
elif isinstance(result, tuple):
for r in result:
_addToCommandArray(r)
else:
print("Wrong result: {}".format(result))
except Exception as e:
logger.append(e)
def _addToCommandArray(command):
global _commandArray, _commandPointer
if _commandPointer < len(_commandArray):
_commandArray[_commandPointer] = command
else:
_commandArray.append(command)
_commandPointer += 1
################################
## HELPER METHODS FOR BLOCKS
def _blockStarted(newMapping):
global _blockStartIndex, _currentMapping
_blockStartStack.append(_blockStartIndex)
_blockStartIndex = _commandPointer
_mappingsStack.append(_currentMapping)
_currentMapping = newMapping
buzzer.setDefaultState(1)
buzzer.keyBeep("started")
def _blockCompleted(deleteFlag):
global _commandPointer, _blockStartIndex, _currentMapping
if len(_mappingsStack) != 0:
if deleteFlag:
_commandPointer = _blockStartIndex
_blockStartIndex = _blockStartStack.pop()
_currentMapping = _mappingsStack.pop()
if len(_mappingsStack) == 0: # len(_mappingsStack) == 0 means all blocks are closed.
buzzer.setDefaultState(0)
if deleteFlag:
buzzer.keyBeep("deleted")
return True
else:
buzzer.keyBeep("completed")
return False
def _getOppositeBoundary(commandPointer):
boundary = _commandArray[commandPointer]
for boundaryPair in _blockBoundaries:
if boundary == boundaryPair[0]:
return boundaryPair[1]
elif boundary == boundaryPair[1]:
return boundaryPair[0]
return -1
def _isTagBoundary(commandPointer):
return _commandArray[commandPointer] == _getOppositeBoundary(commandPointer)
################################
## STANDARDIZED FUNCTIONS
def _start(arguments): # (blockLevel,)
global _processingProgram, _runningProgram
buzzer.keyBeep("processed")
_processingProgram = True
_runningProgram = True
_stopButtonChecking()
if arguments[0] or 0 < _commandPointer: # Executing the body of a block or the _commandArray
_toPlay = _commandArray
_upperBoundary = _commandPointer
else: # Executing the _programArray
_toPlay = _programArray
_upperBoundary = _programParts[-1]
_pointer = _blockStartIndex + 1 if arguments[0] else 0
_pointerStack = []
_counterStack = []
config.saveDateTime()
logger.logCommandsAndProgram()
motor.setCallback(0, _callbackEnd)
motor.setCallback(1, _callbackStep)
#counter = 0 # Debug
#print("_toPlay[:_pointer]", "_toPlay[_pointer:]", "\t\t\t", "counter", "_pointer", "_toPlay[_pointer]") # Debug
while _processingProgram:
remaining = _upperBoundary - 1 - _pointer # Remaining bytes in _toPlay bytearray. 0 if _toPlay[_pointer] == _toPlay[-1]
checkCounter = False
if remaining < 0: # If everything is executed, exits.
_processingProgram = False
elif _toPlay[_pointer] == 40: # "(" The block-level previews are excluded. (Those starts from first statement.)
_pointerStack.append(_pointer) # Save the position of the loop's starting parentheses: "("
while _pointer < _upperBoundary and _toPlay[_pointer] != 42: # "*" Jump to the end of the loop's body.
_pointer += 1
remaining = _upperBoundary - 1 - _pointer
if 2 <= remaining and _toPlay[_pointer] == 42: # If the loop is complete and the pointer is at the end of its body.
_counterStack.append(_toPlay[_pointer + 1] - 48) # Counter was increased at definition by 48. b'0' == 48
checkCounter = True
else: # Maybe it's an error, so stop execution.
_processingProgram = False
elif _toPlay[_pointer] == 42: # "*" End of the body of the loop.
_counterStack[-1] -= 1 # Decrease the loop counter.
checkCounter = True
elif _toPlay[_pointer] == 123: # "{" Start of a function.
while _pointer < _upperBoundary and _toPlay[_pointer] != 125: # "}" Jump to the function's closing curly brace.
_pointer += 1
if _toPlay[_pointer] != 125: # Means the _pointer < _upperBoundary breaks the while loop.
_processingProgram = False
elif _toPlay[_pointer] == 124: # "|" End of the currently executed function.
_pointer = _pointerStack.pop() # Jump back to where the function call occurred.
elif _toPlay[_pointer] == 126: # "~"
if 2 <= remaining and _toPlay[_pointer + 2] == 126: # Double-check: 1. Enough remaining to close function call; 2. "~"
_pointerStack.append(_pointer + 2) # Save the returning position as the second tilde: "~"
_index = _toPlay[_pointer + 1] - 49 # Not 48! functionId - 1 = array index
_jumpTo = -1 # Declared with -1 because of the check "_pointer != _jumpTo".
if _index < len(_functionPosition): # If the _functionPosition contains the given function index.
_jumpTo = _functionPosition[_index]
if 0 <= _jumpTo: # If the retrieved value from _functionPosition is a real position.
_pointer = _jumpTo
if _pointer != _jumpTo: # This handles both else branch of previous two if statements:
del _pointerStack[-1] # The function call failed, there is no need for "jump back" index.
_pointer += 2 # Jump to the second tilde: "~" (Skip the whole function call.)
else: # Maybe it's an error, so stop execution.
_processingProgram = False
else:
move(_toPlay[_pointer]) # Try to execute the command as move. It can fail without exception.
if checkCounter:
if 0 < _counterStack[-1]: # If the loop counter is greater than 0.
_pointer = _pointerStack[-1] # Jump back to the loop starting position.
else:
del _pointerStack[-1] # Delete the loop's starting position from stack.
del _counterStack[-1] # Delete the loop's counter from stack.
_pointer += 2 # Jump to the loop's closing parentheses: ")"
_pointer += 1
_processingProgram = False
_startButtonChecking()
return 0
# COMMAND AND PROGRAM ARRAY
def _addToProgOrSave():
global _commandPointer, _programArray
if _commandPointer != 0:
for i in range(_commandPointer):
if _programParts[-1] + i < len(_programArray):
_programArray[_programParts[-1] + i] = _commandArray[i]
else:
_programArray.append(_commandArray[i])
_programParts.append(_programParts[-1] + _commandPointer)
_commandPointer = 0
buzzer.keyBeep("added")
elif _programParts[-1] != 0:
saveLoadedProgram()
buzzer.keyBeep("saved")
return 0
# LOOP
def _createLoop(arguments): # (creationState,) 40 [statements...] 42 [iteration count] 41
global _currentMapping, _loopCounter
if arguments[0] == 40:
_blockStarted(_loopBeginMapping)
_loopCounter = 0
return 40
elif arguments[0] == 42:
if _commandPointer - _blockStartIndex < 2: # If the body of the loop is empty,
_blockCompleted(True) # close and delete the complete block.
return 0
else:
_currentMapping = _loopCounterMapping
buzzer.keyBeep("input_needed")
return 42
elif arguments[0] == 41:
# _blockCompleted deletes the loop if counter is zero, and returns with the result of the
# deletion (True if deleted). This returning value is used as index: False == 0, and True == 1
# Increase _loopCounter by 48 = human-friendly bytes: 48 -> "0", 49 -> "1", ...
return ((_loopCounter + 48, 41), 0)[_blockCompleted(_loopCounter == 0)]
def _modifyLoopCounter(arguments): # (value,) Increasing by this value, if value == 0, it resets he counter
global _loopCounter
if _loopCounter + arguments[0] < 0: # Checks lower boundary.
_loopCounter = 0
buzzer.keyBeep("boundary")
elif 255 < _loopCounter + arguments[0]: # Checks upper boundary.
_loopCounter = 255
buzzer.keyBeep("boundary")
elif arguments[0] == 0: # Reset the counter. Use case: forget the exact count and press 'X'.
_loopCounter = 0
buzzer.keyBeep("deleted")
else: # General modification.
_loopCounter += arguments[0]
buzzer.keyBeep("change_count")
return 0
def _checkLoopCounter():
global _loopChecking
if _loopChecking == 2 or (_loopChecking == 1 and _loopCounter <= 20):
buzzer.keyBeep("attention")
buzzer.midiBeep(64, 100, 500, _loopCounter)
else:
buzzer.keyBeep("too_long")
buzzer.rest(1000)
return 0
# FUNCTION
def _manageFunction(arguments): # (functionId, onlyCall) 123 [statements...] 124 [id] 125
global _functionPosition # function call: 126 [id] 126
index = arguments[0] - 1 # functionId - 1 == Index in _functionPosition
if index < 0 or len(_functionPosition) < index: # The given index is out of array.
buzzer.keyBeep("boundary")
return 0 # Ignore and return.
elif len(_functionPosition) == index: # The given index is out of array. However, it's subsequent:
_functionPosition.append(-1) # Extending the array and continue.
# Calling the function if it is defined, or flag 'only call' is True and it is not under definition.
# In the second case, position -1 (undefined) is fine. (lazy initialization)
if 0 <= _functionPosition[index] or (arguments[1] and _functionPosition[index] != -0.1):
buzzer.keyBeep("processed")
return 126, arguments[0] + 48, 126 # Increase by 48 = human-friendly bytes: 48 -> "0", 49 -> "1", ...
elif _functionPosition[index] == -0.1: # End of defining the function
# Save index to _functionPosition, because _blockStartIndex will be destroyed during _blockCompleted().
_functionPosition[index] = len(_programArray) + _blockStartIndex
# If function contains nothing
# (_commandPointer - _blockStartIndex < 2 -> Function start and end are adjacent.),
# delete it by _blockCompleted() which return a boolean (True if deleted).
# If this returning value is True, retain len(_programArray) + _blockStartIndex, else overwrite it with -1.
if _blockCompleted(_commandPointer - _blockStartIndex < 2):
_functionPosition[index] = -1
return (0, (124, arguments[0] + 48, 125))[0 <= _functionPosition[index]] # False == 0, and True == 1 (defined)
else: # Beginning of defining the function
_blockStarted(_functionMapping)
_functionPosition[index] = -0.1 # In progress, so it isn't -1 (undefined) or 0+ (defined).
return 123
# GENERAL
def _beepAndReturn(arguments): # (keyOfBeep, returningValue)
buzzer.keyBeep(arguments[0])
return arguments[1]
def _undo(arguments): # (blockLevel,)
global _commandPointer, _commandArray, _functionPosition
# Sets the maximum range of undo in according to blockLevel flag.
undoLowerBoundary = _blockStartIndex + 1 if arguments[0] else 0
if undoLowerBoundary < _commandPointer: # If there is anything that can be undone.
_commandPointer -= 1
buzzer.keyBeep("undone")
# _getOppositeBoundary returns -1 if byte at _commandPointer is not boundary or its pair.
boundary = _getOppositeBoundary(_commandPointer)
if boundary != -1:
if boundary == 123: # "{" If it undoes a function declaration, unregister:
_functionPosition[_commandArray[_commandPointer - 1] - 49] = -1 # Not 48! functionId - 1 = array index
while True: # General undo decreases the pointer by one, so this
_commandPointer -= 1 # do...while loop can handle identical boundary pairs.
if _commandArray[_commandPointer] == boundary or _commandPointer == undoLowerBoundary:
break
if not _isTagBoundary(_commandPointer): # Tags (like function calling) need no keyBeep().
buzzer.keyBeep("deleted")
if _commandPointer == undoLowerBoundary:
buzzer.keyBeep("boundary")
else:
if arguments[0] or _programParts == [0]: # If block-level undo or no more loadable command from _programArray.
buzzer.keyBeep("boundary")
else:
_commandPointer = _programParts[-1] - _programParts[-2]
_commandArray = _programArray[_programParts[-2] : _programParts[-1]]
del _programParts[-1]
buzzer.keyBeep("loaded")
return 0
def _delete(arguments): # (blockLevel,)
global _commandPointer, _programParts, _functionPosition
if arguments[0]: # Block-level: delete only the unfinished block.
_blockCompleted(True) # buzzer.keyBeep("deleted") is called inside _blockCompleted(True)
for i in range(len(_functionPosition)): # Maybe there are user defined functions, so not range(3).
if _functionPosition[i] == -0.1: # If this function is unfinished.
_functionPosition[i] = -1 # Set as undefined.
else: # Not block-level: the whole array is affected.
if _commandPointer != 0: # _commandArray isn't "empty", so "clear" it.
for i in range(_commandPointer - 3): # Unregister functions defined in deleted range.
if _commandArray[i] == 124 and _commandArray[i + 2] == 125: # "|" and "}"
_functionPosition[_commandArray[i + 1] - 49] = -1 # Not 48! functionId - 1 = array index
_commandPointer = 0 # "Clear" _commandArray.
buzzer.keyBeep("deleted")
elif _programParts != [0]: # _commandArray is "empty", but _programArray is not, "clear" it.
_functionPosition = [-1] * len(_functionPosition) # User may want to use higher ids first (from the
# previously used ones). So it is not [-1, -1, -1]
_programParts = [0] # "Clear" _programArray.
buzzer.keyBeep("deleted")
if _commandPointer == 0 and _programParts == [0]: # If _commandArray and _programArray are "empty".
buzzer.keyBeep("boundary")
return 0
def _customMapping():
buzzer.keyBeep("loaded")
return 0
################################
## CALLBACK FUNCTIONS
def _callbackStep():
global _stepSignalSkipCount
if _stepSignalEnabled and 0 == _stepSignalSkipCount and _stepSignal != "":
buzzer.keyBeep(_stepSignal)
else:
_stepSignalSkipCount -= 1
checkButtons()
def _callbackEnd():
global _endSignalSkipCount, _runningProgram
if _endSignalEnabled and 0 == _endSignalSkipCount and _endSignal != "":
buzzer.keyBeep(_endSignal)
else:
_endSignalSkipCount -= 1
_runningProgram = False
################################
## MAPPINGS
# For turtle hat
_defaultMapping = {
1: (_beepAndReturn, ("processed", 70)), # FORWARD
2: (_beepAndReturn, ("processed", 80)), # PAUSE
4: (_createLoop, (40,)), # REPEAT (start)
6: (_manageFunction, (1, False)), # F1
8: (_addToProgOrSave, ()), # ADD
10: (_manageFunction, (2, False)), # F2
12: (_manageFunction, (3, False)), # F3
16: (_beepAndReturn, ("processed", 82)), # RIGHT
32: (_beepAndReturn, ("processed", 66)), # BACKWARD
64: (_start, (False,)), # START / STOP (start)
128: (_beepAndReturn, ("processed", 76)), # LEFT
256: (_undo, (False,)), # UNDO
512: (_delete, (False,)), # DELETE
1023: (_customMapping, ()) # MAPPING
}
_loopBeginMapping = {
1: (_beepAndReturn, ("processed", 70)), # FORWARD
2: (_beepAndReturn, ("processed", 80)), # PAUSE
4: (_createLoop, (42,)), # REPEAT (*)
6: (_manageFunction, (1, True)), # F1
10: (_manageFunction, (2, True)), # F2
12: (_manageFunction, (3, True)), # F3
16: (_beepAndReturn, ("processed", 82)), # RIGHT
32: (_beepAndReturn, ("processed", 66)), # BACKWARD
64: (_start, (True,)), # START / STOP (block-level start)
128: (_beepAndReturn, ("processed", 76)), # LEFT
256: (_undo, (True,)), # UNDO
512: (_delete, (True,)) # DELETE
}
_loopCounterMapping = {
1: (_modifyLoopCounter, (1,)), # FORWARD
4: (_createLoop, (41,)), # REPEAT (end)
16: (_modifyLoopCounter, (1,)), # RIGHT
32: (_modifyLoopCounter, (-1,)), # BACKWARD
64: (_checkLoopCounter, ()), # START / STOP (check counter)
128: (_modifyLoopCounter, (-1,)), # LEFT
512: (_modifyLoopCounter, (0,)) # DELETE
}
_functionMapping = {
1: (_beepAndReturn, ("processed", 70)), # FORWARD
2: (_beepAndReturn, ("processed", 80)), # PAUSE
4: (_createLoop, (40,)), # REPEAT (start)
6: (_manageFunction, (1, True)), # F1
10: (_manageFunction, (2, True)), # F2
12: (_manageFunction, (3, True)), # F3
16: (_beepAndReturn, ("processed", 82)), # RIGHT
32: (_beepAndReturn, ("processed", 66)), # BACKWARD
64: (_start, (True,)), # START / STOP (block-level start)
128: (_beepAndReturn, ("processed", 76)), # LEFT
256: (_undo, (True,)), # UNDO
512: (_delete, (True,)) # DELETE
}
# For other purpose
_moveCharMapping = {
70: (1, _moveLength), # "F" - FORWARD
66: (4, _moveLength), # "B" - BACKWARD
76: (2, _turnLength), # "L" - LEFT (90°)
108: (2, _halfTurn), # "l" - LEFT (45°)
82: (3, _turnLength), # "R" - RIGHT (90°)
114: (3, _halfTurn), # "r" - RIGHT (45°)
80: (0, _moveLength), # "P" - PAUSE
75: (2, _halfTurn), # "K" - LEFT (45°) alias for URL usage ( L - 1 = K ~ l )
81: (3, _halfTurn) # "Q" - RIGHT (45°) alias for URL usage ( R - 1 = Q ~ r )
}
################################
## LAST PART OF INITIALISATION
_currentMapping = _defaultMapping
_startButtonChecking()
| """
uBot_firmware // The firmware of the μBot, the educational floor robot. (A MicroPython port to ESP8266 with additional modules.)
This file is part of uBot_firmware.
[https://zza.hu/uBot_firmware]
[https://git.zza.hu/uBot_firmware]
MIT License
Copyright (c) 2020-2021 <NAME> // hu-zza
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from machine import Pin, Timer
import ubot_config as config
import ubot_buzzer as buzzer
import ubot_logger as logger
import ubot_motor as motor
import ubot_data as data
_powerOns = config.get("system", "power_ons")
_namedFolder = config.get("turtle", "named_folder")
_turtleFolder = config.get("turtle", "turtle_folder")
_moveChars = config.get("turtle", "move_chars")
_turtleChars = config.get("turtle", "turtle_chars")
_savedCount = 0
_clockPin = Pin(13, Pin.OUT) # Advances the decade counter (U3).
_clockPin.off()
# Checks the returning signal from turtle HAT.
_inputPin = Pin(16, Pin.OUT) # FUTURE: _inputPin = Pin(16, Pin.IN)
_inputPin.off() # DEPRECATED: New PCB design (2.1) will resolve this.
_inputPin.init(Pin.IN) # DEPRECATED: New PCB design (2.1) will resolve this.
_checkPeriod = config.get("turtle", "check_period")
_counterPosition = 0 # The position of the decade counter (U3).
_pressLength = config.get("turtle", "press_length")
_maxError = config.get("turtle", "max_error")
_lastPressed = [0, 0] # Inside: [last pressed button, elapsed (button check) cycles]
_firstRepeat = config.get("turtle", "first_repeat")
_loopChecking = config.get("turtle", "loop_checking")
_moveLength = config.get("turtle", "move_length")
_turnLength = config.get("turtle", "turn_length")
_halfTurn = _turnLength // 2
_breathLength = config.get("turtle", "breath_length")
_endSignalEnabled = True
_endSignalSkipCount = 0
_stepSignalEnabled = True
_stepSignalSkipCount = 0
_endSignal = config.get("turtle", "end_signal") # Sound indicates the end of a step during execution: buzzer.keyBeep(_endSignal)
_stepSignal = config.get("turtle", "step_signal") # Sound indicates the end of program execution: buzzer.keyBeep(_stepSignal)
_pressedListIndex = 0
_pressedList = [0] * (_pressLength + _maxError) # Low-level: The last N (_pressLength + _maxError) button check results.
_commandArray = bytearray() # High-level: Abstract commands, result of processed button presses.
_commandPointer = 0 # Pointer for _commandArray.
_programArray = bytearray() # High-level: Result of one or more added _commandArray.
_programParts = [0] # Positions by which _programArray can be split into _commandArray(s).
_temporaryCommandPointer = 0 # For stateless run
_temporaryProgramArray = bytearray() # with the capability of
_temporaryProgramParts = [0] # restore unsaved stuff.
_loopCounter = 0 # At loop creation this holds iteration count.
_functionPosition = [-1, -1, -1] # -1 : not defined, -0.1 : under definition, 0+ : defined
# If defined, this index the first command of the function,
# refer to the first command of the function, instead of its curly brace "{".
_blockStartIndex = 0 # At block (loop, fn declaration) creation, this holds block start position.
_blockStartStack = []
_mappingsStack = []
_processingProgram = False
_runningProgram = False
_timer = Timer(-1) # Executes the repeated button checks.
_blockBoundaries = ((40, 41), (123, 125), (126, 126)) # (("(", ")"), ("{", "}"), ("~", "~"))
################################
## PUBLIC METHODS
def isBusy():
return _processingProgram or _runningProgram
def getValidMoveChars():
return _moveChars
def getValidTurtleChars():
return _turtleChars
def checkButtons(timer: Timer = None):
_addCommand(_getValidatedPressedButton())
def press(pressed): # pressed = 1<<buttonOrdinal
if isinstance(pressed, str):
pressed = int(pressed)
_logLastPressed(pressed)
_addCommand(pressed)
def move(direction):
if isinstance(direction, str):
direction = ord(direction)
movementTuple = _moveCharMapping.get(direction)
if movementTuple is not None:
motor.add(movementTuple)
def skipSignal(stepCount: int = 1, endCount: int = 0) -> None:
global _stepSignalSkipCount, _endSignalSkipCount
_stepSignalSkipCount += stepCount
_endSignalSkipCount += endCount
def getProgramsCount():
return sum(len(getProgramListOf(folder)) for folder in getProgramFolders())
def getProgramFolders():
return data.getFoldersOf(data.PROGRAM)
def doesFolderExist(folder: str) -> bool:
path = getPathOf(folder)
return path.isExist and path.isFolder
def createFolder(folder: str) -> bool:
return data.createFolder(getPathOf("program", folder))
def getProgramListOf(folder: str) -> tuple:
return data.getFileNameListOf("program", folder, "txt")
def doesProgramExist(folder, title):
path = getPathOf(folder, title)
return path.isExist and path.isFile
def getProgramCode(folder: str, title: str) -> str:
return "".join(data.getFile(getPathOf(folder, title), False))
def getPathOf(folder: str, title = "") -> data.Path:
return data.createPathOf("program", folder, normalizeProgramTitleFromFolder(title, folder))
def getLastTurtleProgramTitle() -> str:
return sorted(getProgramListOf(_turtleFolder))[-1]
def normalizeProgramTitleFromFolder(title: str, folder: str) -> str:
return normalizeProgramTitle(title, folder == _turtleFolder)
def normalizeProgramTitle(title: object, isTurtle: bool = True) -> str:
turtleTuple = data.extractIntTuple(title, 2) if isTurtle else ()
if 0 < len(turtleTuple):
if 1 == len(turtleTuple):
return "{:010d}_{:05d}.txt".format(_powerOns, turtleTuple[0])
else:
return "{:010d}_{:05d}.txt".format(turtleTuple[0], turtleTuple[1])
else:
return title if isinstance(title, str) and title.endswith(".txt") else "{}.txt".format(title)
def runProgram(folder, title):
if doesProgramExist(folder, title):
retainInTemporary()
loadProgram(folder, title)
press(64)
loadFromTemporary()
return True
else:
return False
def loadProgram(folder, title):
clearMemory()
if doesProgramExist(folder, title):
loadProgramFromString(getProgramCode(folder, title))
return True
else:
return False
def loadProgramFromString(turtleCode):
global _programArray, _programParts
clearMemory()
try:
array = turtleCode.encode()
_programArray = array
_programParts = [len(array)]
return True
except Exception as e:
logger.append(e)
return False
def saveLoadedProgram(folder = "", title = ""):
return saveProgram(folder if data.isStringWithContent(folder) else _namedFolder, title, getProgramArray())
def saveProgram(folder: str = "", title: str = "", program: str = "") -> data.Path:
global _savedCount
folder = folder if data.isStringWithContent(folder) else _namedFolder
isTitleValid = data.isStringWithContent(title)
path = data.createPathOf("program", folder, title) if isTitleValid else _generateFullPathForAutoSave()
result = data.saveFile(path, program, False, True)
if not result and not isTitleValid:
_savedCount -= 1
return path if result else data.INVALID_PATH
def _generateFullPathForAutoSave() -> data.Path:
global _savedCount
_savedCount += 1
return data.createPathOf("program", _turtleFolder, "{:010d}_{:05d}.txt".format(_powerOns, _savedCount))
def deleteProgram(folder: str = "", title: str = "") -> bool:
pass
def _unavailableProgramAction(*args) -> bool:
return False
def _unavailableProgramResultSupplier(*args) -> dict:
return {}
def doProgramAction(folder: str = "", title: str = "", action: str = "run") -> tuple:
folder = folder if data.isStringWithContent(folder) else _turtleFolder
title = title if data.isStringWithContent(title) else getLastTurtleProgramTitle()
action = action.lower()
try:
return _programActions.setdefault(action, _unavailableProgramAction)(folder, title), \
_programResultSupplier.setdefault(action, _unavailableProgramResultSupplier)(folder, title)
except Exception as e:
logger.append(e)
return False, {}
_programActions = {
"run" : runProgram,
"load" : loadProgram,
"delete": deleteProgram
}
_programResultSupplier = {
}
def getCommandArray():
return _commandArray[:_commandPointer].decode()
def getProgramArray():
return _programArray[:_programParts[-1]].decode()
def isMemoryEmpty():
return isCommandMemoryEmpty() and isProgramMemoryEmpty()
def isCommandMemoryEmpty():
return _commandPointer == 0
def isProgramMemoryEmpty():
return _programParts == [0]
def clearMemory():
clearCommandMemory()
clearProgramMemory()
def clearProgramMemory():
global _programParts
_programParts = [0]
def clearCommandMemory():
global _commandPointer
_commandPointer = 0
def retainInTemporary():
global _temporaryCommandPointer, _temporaryProgramParts, _temporaryProgramArray
_temporaryCommandPointer = _commandPointer
_temporaryProgramParts = _programParts
_temporaryProgramArray = _programArray
def loadFromTemporary():
global _commandPointer, _programParts, _programArray
_commandPointer = _temporaryCommandPointer
_programParts = _temporaryProgramParts
_programArray = _temporaryProgramArray
################################################################
################################################################
##########
########## PRIVATE, CLASS-LEVEL METHODS
##########
################################
## BUTTON PRESS PROCESSING
def _startButtonChecking():
_timer.init(
period = _checkPeriod,
mode = Timer.PERIODIC,
callback = checkButtons
)
def _stopButtonChecking():
_timer.deinit()
def _getValidatedPressedButton():
global _lastPressed
pressed = _getPressedButton()
_logLastPressed(pressed)
if _lastPressed[1] == 1 or _firstRepeat < _lastPressed[1]: # Lack of pressing returns same like a button press.
_lastPressed[1] = 1 # In this case the returning value is 0.
return pressed
else:
return 0 # If validation is in progress, returns 0.
def _logLastPressed(pressed):
global _lastPressed
if pressed == _lastPressed[0]:
_lastPressed[1] += 1
else:
_lastPressed = [pressed, 1]
def _getPressedButton():
global _pressedList, _pressedListIndex
pressed = 0
for i in range(10):
# pseudo pull-down # DEPRECATED: New PCB design (2.1) will resolve this.
if _inputPin.value() == 1: # DEPRECATED: New PCB design (2.1) will resolve this.
_inputPin.init(Pin.OUT) # DEPRECATED: New PCB design (2.1) will resolve this.
_inputPin.off() # DEPRECATED: New PCB design (2.1) will resolve this.
_inputPin.init(Pin.IN) # DEPRECATED: New PCB design (2.1) will resolve this.
if _inputPin.value() == 1:
pressed += 1 << _counterPosition # pow(2, _counterPosition)
_advanceCounter()
# shift counter's "resting position" to the closest pressed button to eliminate BTN LED flashing
if 0 < pressed:
while bin(1024 + pressed)[12 - _counterPosition] != "1":
_advanceCounter()
_pressedList[_pressedListIndex] = pressed
_pressedListIndex += 1
if len(_pressedList) <= _pressedListIndex:
_pressedListIndex = 0
errorCount = 0
for pressed in _pressedList:
count = _pressedList.count(pressed)
if _pressLength <= count:
return pressed
errorCount += count
if _maxError < errorCount:
return 0
def _advanceCounter():
global _counterPosition
_clockPin.on()
if 9 <= _counterPosition:
_counterPosition = 0
else:
_counterPosition += 1
_clockPin.off()
################################
## BUTTON PRESS INTERPRETATION
def _addCommand(pressed):
global _processingProgram, _runningProgram
try:
if pressed == 0: # result = 0 means, there is nothing to save to _commandArray.
result = 0 # Not only lack of buttonpress (pressed == 0) returns 0.
elif _runningProgram:
motor.stop() # Stop commands / program execution.
_processingProgram = False
_runningProgram = False
result = _beepAndReturn(("processed", 0)) # Beep and skip the (result) processing.
else:
tupleWithCallable = _currentMapping.get(pressed) # Dictionary based switch...case
if tupleWithCallable is None: # Default branch
result = 0 # Skip the (result) processing.
else:
if tupleWithCallable[1] == ():
result = tupleWithCallable[0]()
else:
result = tupleWithCallable[0](tupleWithCallable[1])
if result != 0:
if isinstance(result, int):
_addToCommandArray(result)
elif isinstance(result, tuple):
for r in result:
_addToCommandArray(r)
else:
print("Wrong result: {}".format(result))
except Exception as e:
logger.append(e)
def _addToCommandArray(command):
global _commandArray, _commandPointer
if _commandPointer < len(_commandArray):
_commandArray[_commandPointer] = command
else:
_commandArray.append(command)
_commandPointer += 1
################################
## HELPER METHODS FOR BLOCKS
def _blockStarted(newMapping):
global _blockStartIndex, _currentMapping
_blockStartStack.append(_blockStartIndex)
_blockStartIndex = _commandPointer
_mappingsStack.append(_currentMapping)
_currentMapping = newMapping
buzzer.setDefaultState(1)
buzzer.keyBeep("started")
def _blockCompleted(deleteFlag):
global _commandPointer, _blockStartIndex, _currentMapping
if len(_mappingsStack) != 0:
if deleteFlag:
_commandPointer = _blockStartIndex
_blockStartIndex = _blockStartStack.pop()
_currentMapping = _mappingsStack.pop()
if len(_mappingsStack) == 0: # len(_mappingsStack) == 0 means all blocks are closed.
buzzer.setDefaultState(0)
if deleteFlag:
buzzer.keyBeep("deleted")
return True
else:
buzzer.keyBeep("completed")
return False
def _getOppositeBoundary(commandPointer):
boundary = _commandArray[commandPointer]
for boundaryPair in _blockBoundaries:
if boundary == boundaryPair[0]:
return boundaryPair[1]
elif boundary == boundaryPair[1]:
return boundaryPair[0]
return -1
def _isTagBoundary(commandPointer):
return _commandArray[commandPointer] == _getOppositeBoundary(commandPointer)
################################
## STANDARDIZED FUNCTIONS
def _start(arguments): # (blockLevel,)
global _processingProgram, _runningProgram
buzzer.keyBeep("processed")
_processingProgram = True
_runningProgram = True
_stopButtonChecking()
if arguments[0] or 0 < _commandPointer: # Executing the body of a block or the _commandArray
_toPlay = _commandArray
_upperBoundary = _commandPointer
else: # Executing the _programArray
_toPlay = _programArray
_upperBoundary = _programParts[-1]
_pointer = _blockStartIndex + 1 if arguments[0] else 0
_pointerStack = []
_counterStack = []
config.saveDateTime()
logger.logCommandsAndProgram()
motor.setCallback(0, _callbackEnd)
motor.setCallback(1, _callbackStep)
#counter = 0 # Debug
#print("_toPlay[:_pointer]", "_toPlay[_pointer:]", "\t\t\t", "counter", "_pointer", "_toPlay[_pointer]") # Debug
while _processingProgram:
remaining = _upperBoundary - 1 - _pointer # Remaining bytes in _toPlay bytearray. 0 if _toPlay[_pointer] == _toPlay[-1]
checkCounter = False
if remaining < 0: # If everything is executed, exits.
_processingProgram = False
elif _toPlay[_pointer] == 40: # "(" The block-level previews are excluded. (Those starts from first statement.)
_pointerStack.append(_pointer) # Save the position of the loop's starting parentheses: "("
while _pointer < _upperBoundary and _toPlay[_pointer] != 42: # "*" Jump to the end of the loop's body.
_pointer += 1
remaining = _upperBoundary - 1 - _pointer
if 2 <= remaining and _toPlay[_pointer] == 42: # If the loop is complete and the pointer is at the end of its body.
_counterStack.append(_toPlay[_pointer + 1] - 48) # Counter was increased at definition by 48. b'0' == 48
checkCounter = True
else: # Maybe it's an error, so stop execution.
_processingProgram = False
elif _toPlay[_pointer] == 42: # "*" End of the body of the loop.
_counterStack[-1] -= 1 # Decrease the loop counter.
checkCounter = True
elif _toPlay[_pointer] == 123: # "{" Start of a function.
while _pointer < _upperBoundary and _toPlay[_pointer] != 125: # "}" Jump to the function's closing curly brace.
_pointer += 1
if _toPlay[_pointer] != 125: # Means the _pointer < _upperBoundary breaks the while loop.
_processingProgram = False
elif _toPlay[_pointer] == 124: # "|" End of the currently executed function.
_pointer = _pointerStack.pop() # Jump back to where the function call occurred.
elif _toPlay[_pointer] == 126: # "~"
if 2 <= remaining and _toPlay[_pointer + 2] == 126: # Double-check: 1. Enough remaining to close function call; 2. "~"
_pointerStack.append(_pointer + 2) # Save the returning position as the second tilde: "~"
_index = _toPlay[_pointer + 1] - 49 # Not 48! functionId - 1 = array index
_jumpTo = -1 # Declared with -1 because of the check "_pointer != _jumpTo".
if _index < len(_functionPosition): # If the _functionPosition contains the given function index.
_jumpTo = _functionPosition[_index]
if 0 <= _jumpTo: # If the retrieved value from _functionPosition is a real position.
_pointer = _jumpTo
if _pointer != _jumpTo: # This handles both else branch of previous two if statements:
del _pointerStack[-1] # The function call failed, there is no need for "jump back" index.
_pointer += 2 # Jump to the second tilde: "~" (Skip the whole function call.)
else: # Maybe it's an error, so stop execution.
_processingProgram = False
else:
move(_toPlay[_pointer]) # Try to execute the command as move. It can fail without exception.
if checkCounter:
if 0 < _counterStack[-1]: # If the loop counter is greater than 0.
_pointer = _pointerStack[-1] # Jump back to the loop starting position.
else:
del _pointerStack[-1] # Delete the loop's starting position from stack.
del _counterStack[-1] # Delete the loop's counter from stack.
_pointer += 2 # Jump to the loop's closing parentheses: ")"
_pointer += 1
_processingProgram = False
_startButtonChecking()
return 0
# COMMAND AND PROGRAM ARRAY
def _addToProgOrSave():
global _commandPointer, _programArray
if _commandPointer != 0:
for i in range(_commandPointer):
if _programParts[-1] + i < len(_programArray):
_programArray[_programParts[-1] + i] = _commandArray[i]
else:
_programArray.append(_commandArray[i])
_programParts.append(_programParts[-1] + _commandPointer)
_commandPointer = 0
buzzer.keyBeep("added")
elif _programParts[-1] != 0:
saveLoadedProgram()
buzzer.keyBeep("saved")
return 0
# LOOP
def _createLoop(arguments): # (creationState,) 40 [statements...] 42 [iteration count] 41
global _currentMapping, _loopCounter
if arguments[0] == 40:
_blockStarted(_loopBeginMapping)
_loopCounter = 0
return 40
elif arguments[0] == 42:
if _commandPointer - _blockStartIndex < 2: # If the body of the loop is empty,
_blockCompleted(True) # close and delete the complete block.
return 0
else:
_currentMapping = _loopCounterMapping
buzzer.keyBeep("input_needed")
return 42
elif arguments[0] == 41:
# _blockCompleted deletes the loop if counter is zero, and returns with the result of the
# deletion (True if deleted). This returning value is used as index: False == 0, and True == 1
# Increase _loopCounter by 48 = human-friendly bytes: 48 -> "0", 49 -> "1", ...
return ((_loopCounter + 48, 41), 0)[_blockCompleted(_loopCounter == 0)]
def _modifyLoopCounter(arguments): # (value,) Increasing by this value, if value == 0, it resets he counter
global _loopCounter
if _loopCounter + arguments[0] < 0: # Checks lower boundary.
_loopCounter = 0
buzzer.keyBeep("boundary")
elif 255 < _loopCounter + arguments[0]: # Checks upper boundary.
_loopCounter = 255
buzzer.keyBeep("boundary")
elif arguments[0] == 0: # Reset the counter. Use case: forget the exact count and press 'X'.
_loopCounter = 0
buzzer.keyBeep("deleted")
else: # General modification.
_loopCounter += arguments[0]
buzzer.keyBeep("change_count")
return 0
def _checkLoopCounter():
global _loopChecking
if _loopChecking == 2 or (_loopChecking == 1 and _loopCounter <= 20):
buzzer.keyBeep("attention")
buzzer.midiBeep(64, 100, 500, _loopCounter)
else:
buzzer.keyBeep("too_long")
buzzer.rest(1000)
return 0
# FUNCTION
def _manageFunction(arguments): # (functionId, onlyCall) 123 [statements...] 124 [id] 125
global _functionPosition # function call: 126 [id] 126
index = arguments[0] - 1 # functionId - 1 == Index in _functionPosition
if index < 0 or len(_functionPosition) < index: # The given index is out of array.
buzzer.keyBeep("boundary")
return 0 # Ignore and return.
elif len(_functionPosition) == index: # The given index is out of array. However, it's subsequent:
_functionPosition.append(-1) # Extending the array and continue.
# Calling the function if it is defined, or flag 'only call' is True and it is not under definition.
# In the second case, position -1 (undefined) is fine. (lazy initialization)
if 0 <= _functionPosition[index] or (arguments[1] and _functionPosition[index] != -0.1):
buzzer.keyBeep("processed")
return 126, arguments[0] + 48, 126 # Increase by 48 = human-friendly bytes: 48 -> "0", 49 -> "1", ...
elif _functionPosition[index] == -0.1: # End of defining the function
# Save index to _functionPosition, because _blockStartIndex will be destroyed during _blockCompleted().
_functionPosition[index] = len(_programArray) + _blockStartIndex
# If function contains nothing
# (_commandPointer - _blockStartIndex < 2 -> Function start and end are adjacent.),
# delete it by _blockCompleted() which return a boolean (True if deleted).
# If this returning value is True, retain len(_programArray) + _blockStartIndex, else overwrite it with -1.
if _blockCompleted(_commandPointer - _blockStartIndex < 2):
_functionPosition[index] = -1
return (0, (124, arguments[0] + 48, 125))[0 <= _functionPosition[index]] # False == 0, and True == 1 (defined)
else: # Beginning of defining the function
_blockStarted(_functionMapping)
_functionPosition[index] = -0.1 # In progress, so it isn't -1 (undefined) or 0+ (defined).
return 123
# GENERAL
def _beepAndReturn(arguments): # (keyOfBeep, returningValue)
buzzer.keyBeep(arguments[0])
return arguments[1]
def _undo(arguments): # (blockLevel,)
global _commandPointer, _commandArray, _functionPosition
# Sets the maximum range of undo in according to blockLevel flag.
undoLowerBoundary = _blockStartIndex + 1 if arguments[0] else 0
if undoLowerBoundary < _commandPointer: # If there is anything that can be undone.
_commandPointer -= 1
buzzer.keyBeep("undone")
# _getOppositeBoundary returns -1 if byte at _commandPointer is not boundary or its pair.
boundary = _getOppositeBoundary(_commandPointer)
if boundary != -1:
if boundary == 123: # "{" If it undoes a function declaration, unregister:
_functionPosition[_commandArray[_commandPointer - 1] - 49] = -1 # Not 48! functionId - 1 = array index
while True: # General undo decreases the pointer by one, so this
_commandPointer -= 1 # do...while loop can handle identical boundary pairs.
if _commandArray[_commandPointer] == boundary or _commandPointer == undoLowerBoundary:
break
if not _isTagBoundary(_commandPointer): # Tags (like function calling) need no keyBeep().
buzzer.keyBeep("deleted")
if _commandPointer == undoLowerBoundary:
buzzer.keyBeep("boundary")
else:
if arguments[0] or _programParts == [0]: # If block-level undo or no more loadable command from _programArray.
buzzer.keyBeep("boundary")
else:
_commandPointer = _programParts[-1] - _programParts[-2]
_commandArray = _programArray[_programParts[-2] : _programParts[-1]]
del _programParts[-1]
buzzer.keyBeep("loaded")
return 0
def _delete(arguments): # (blockLevel,)
global _commandPointer, _programParts, _functionPosition
if arguments[0]: # Block-level: delete only the unfinished block.
_blockCompleted(True) # buzzer.keyBeep("deleted") is called inside _blockCompleted(True)
for i in range(len(_functionPosition)): # Maybe there are user defined functions, so not range(3).
if _functionPosition[i] == -0.1: # If this function is unfinished.
_functionPosition[i] = -1 # Set as undefined.
else: # Not block-level: the whole array is affected.
if _commandPointer != 0: # _commandArray isn't "empty", so "clear" it.
for i in range(_commandPointer - 3): # Unregister functions defined in deleted range.
if _commandArray[i] == 124 and _commandArray[i + 2] == 125: # "|" and "}"
_functionPosition[_commandArray[i + 1] - 49] = -1 # Not 48! functionId - 1 = array index
_commandPointer = 0 # "Clear" _commandArray.
buzzer.keyBeep("deleted")
elif _programParts != [0]: # _commandArray is "empty", but _programArray is not, "clear" it.
_functionPosition = [-1] * len(_functionPosition) # User may want to use higher ids first (from the
# previously used ones). So it is not [-1, -1, -1]
_programParts = [0] # "Clear" _programArray.
buzzer.keyBeep("deleted")
if _commandPointer == 0 and _programParts == [0]: # If _commandArray and _programArray are "empty".
buzzer.keyBeep("boundary")
return 0
def _customMapping():
buzzer.keyBeep("loaded")
return 0
################################
## CALLBACK FUNCTIONS
def _callbackStep():
global _stepSignalSkipCount
if _stepSignalEnabled and 0 == _stepSignalSkipCount and _stepSignal != "":
buzzer.keyBeep(_stepSignal)
else:
_stepSignalSkipCount -= 1
checkButtons()
def _callbackEnd():
global _endSignalSkipCount, _runningProgram
if _endSignalEnabled and 0 == _endSignalSkipCount and _endSignal != "":
buzzer.keyBeep(_endSignal)
else:
_endSignalSkipCount -= 1
_runningProgram = False
################################
## MAPPINGS
# For turtle hat
_defaultMapping = {
1: (_beepAndReturn, ("processed", 70)), # FORWARD
2: (_beepAndReturn, ("processed", 80)), # PAUSE
4: (_createLoop, (40,)), # REPEAT (start)
6: (_manageFunction, (1, False)), # F1
8: (_addToProgOrSave, ()), # ADD
10: (_manageFunction, (2, False)), # F2
12: (_manageFunction, (3, False)), # F3
16: (_beepAndReturn, ("processed", 82)), # RIGHT
32: (_beepAndReturn, ("processed", 66)), # BACKWARD
64: (_start, (False,)), # START / STOP (start)
128: (_beepAndReturn, ("processed", 76)), # LEFT
256: (_undo, (False,)), # UNDO
512: (_delete, (False,)), # DELETE
1023: (_customMapping, ()) # MAPPING
}
_loopBeginMapping = {
1: (_beepAndReturn, ("processed", 70)), # FORWARD
2: (_beepAndReturn, ("processed", 80)), # PAUSE
4: (_createLoop, (42,)), # REPEAT (*)
6: (_manageFunction, (1, True)), # F1
10: (_manageFunction, (2, True)), # F2
12: (_manageFunction, (3, True)), # F3
16: (_beepAndReturn, ("processed", 82)), # RIGHT
32: (_beepAndReturn, ("processed", 66)), # BACKWARD
64: (_start, (True,)), # START / STOP (block-level start)
128: (_beepAndReturn, ("processed", 76)), # LEFT
256: (_undo, (True,)), # UNDO
512: (_delete, (True,)) # DELETE
}
_loopCounterMapping = {
1: (_modifyLoopCounter, (1,)), # FORWARD
4: (_createLoop, (41,)), # REPEAT (end)
16: (_modifyLoopCounter, (1,)), # RIGHT
32: (_modifyLoopCounter, (-1,)), # BACKWARD
64: (_checkLoopCounter, ()), # START / STOP (check counter)
128: (_modifyLoopCounter, (-1,)), # LEFT
512: (_modifyLoopCounter, (0,)) # DELETE
}
_functionMapping = {
1: (_beepAndReturn, ("processed", 70)), # FORWARD
2: (_beepAndReturn, ("processed", 80)), # PAUSE
4: (_createLoop, (40,)), # REPEAT (start)
6: (_manageFunction, (1, True)), # F1
10: (_manageFunction, (2, True)), # F2
12: (_manageFunction, (3, True)), # F3
16: (_beepAndReturn, ("processed", 82)), # RIGHT
32: (_beepAndReturn, ("processed", 66)), # BACKWARD
64: (_start, (True,)), # START / STOP (block-level start)
128: (_beepAndReturn, ("processed", 76)), # LEFT
256: (_undo, (True,)), # UNDO
512: (_delete, (True,)) # DELETE
}
# For other purpose
_moveCharMapping = {
70: (1, _moveLength), # "F" - FORWARD
66: (4, _moveLength), # "B" - BACKWARD
76: (2, _turnLength), # "L" - LEFT (90°)
108: (2, _halfTurn), # "l" - LEFT (45°)
82: (3, _turnLength), # "R" - RIGHT (90°)
114: (3, _halfTurn), # "r" - RIGHT (45°)
80: (0, _moveLength), # "P" - PAUSE
75: (2, _halfTurn), # "K" - LEFT (45°) alias for URL usage ( L - 1 = K ~ l )
81: (3, _halfTurn) # "Q" - RIGHT (45°) alias for URL usage ( R - 1 = Q ~ r )
}
################################
## LAST PART OF INITIALISATION
_currentMapping = _defaultMapping
_startButtonChecking()
| en | 0.771409 | uBot_firmware // The firmware of the μBot, the educational floor robot. (A MicroPython port to ESP8266 with additional modules.) This file is part of uBot_firmware. [https://zza.hu/uBot_firmware] [https://git.zza.hu/uBot_firmware] MIT License Copyright (c) 2020-2021 <NAME> // hu-zza Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # Advances the decade counter (U3). # Checks the returning signal from turtle HAT. # FUTURE: _inputPin = Pin(16, Pin.IN) # DEPRECATED: New PCB design (2.1) will resolve this. # DEPRECATED: New PCB design (2.1) will resolve this. # The position of the decade counter (U3). # Inside: [last pressed button, elapsed (button check) cycles] # Sound indicates the end of a step during execution: buzzer.keyBeep(_endSignal) # Sound indicates the end of program execution: buzzer.keyBeep(_stepSignal) # Low-level: The last N (_pressLength + _maxError) button check results. # High-level: Abstract commands, result of processed button presses. # Pointer for _commandArray. # High-level: Result of one or more added _commandArray. # Positions by which _programArray can be split into _commandArray(s). # For stateless run # with the capability of # restore unsaved stuff. # At loop creation this holds iteration count. # -1 : not defined, -0.1 : under definition, 0+ : defined # If defined, this index the first command of the function, # refer to the first command of the function, instead of its curly brace "{". # At block (loop, fn declaration) creation, this holds block start position. # Executes the repeated button checks. # (("(", ")"), ("{", "}"), ("~", "~")) ################################ ## PUBLIC METHODS # pressed = 1<<buttonOrdinal ################################################################ ################################################################ ########## ########## PRIVATE, CLASS-LEVEL METHODS ########## ################################ ## BUTTON PRESS PROCESSING # Lack of pressing returns same like a button press. # In this case the returning value is 0. # If validation is in progress, returns 0. # pseudo pull-down # DEPRECATED: New PCB design (2.1) will resolve this. # DEPRECATED: New PCB design (2.1) will resolve this. # DEPRECATED: New PCB design (2.1) will resolve this. # DEPRECATED: New PCB design (2.1) will resolve this. # DEPRECATED: New PCB design (2.1) will resolve this. # pow(2, _counterPosition) # shift counter's "resting position" to the closest pressed button to eliminate BTN LED flashing ################################ ## BUTTON PRESS INTERPRETATION # result = 0 means, there is nothing to save to _commandArray. # Not only lack of buttonpress (pressed == 0) returns 0. # Stop commands / program execution. # Beep and skip the (result) processing. # Dictionary based switch...case # Default branch # Skip the (result) processing. ################################ ## HELPER METHODS FOR BLOCKS # len(_mappingsStack) == 0 means all blocks are closed. ################################ ## STANDARDIZED FUNCTIONS # (blockLevel,) # Executing the body of a block or the _commandArray # Executing the _programArray #counter = 0 # Debug #print("_toPlay[:_pointer]", "_toPlay[_pointer:]", "\t\t\t", "counter", "_pointer", "_toPlay[_pointer]") # Debug # Remaining bytes in _toPlay bytearray. 0 if _toPlay[_pointer] == _toPlay[-1] # If everything is executed, exits. # "(" The block-level previews are excluded. (Those starts from first statement.) # Save the position of the loop's starting parentheses: "(" # "*" Jump to the end of the loop's body. # If the loop is complete and the pointer is at the end of its body. # Counter was increased at definition by 48. b'0' == 48 # Maybe it's an error, so stop execution. # "*" End of the body of the loop. # Decrease the loop counter. # "{" Start of a function. # "}" Jump to the function's closing curly brace. # Means the _pointer < _upperBoundary breaks the while loop. # "|" End of the currently executed function. # Jump back to where the function call occurred. # "~" # Double-check: 1. Enough remaining to close function call; 2. "~" # Save the returning position as the second tilde: "~" # Not 48! functionId - 1 = array index # Declared with -1 because of the check "_pointer != _jumpTo". # If the _functionPosition contains the given function index. # If the retrieved value from _functionPosition is a real position. # This handles both else branch of previous two if statements: # The function call failed, there is no need for "jump back" index. # Jump to the second tilde: "~" (Skip the whole function call.) # Maybe it's an error, so stop execution. # Try to execute the command as move. It can fail without exception. # If the loop counter is greater than 0. # Jump back to the loop starting position. # Delete the loop's starting position from stack. # Delete the loop's counter from stack. # Jump to the loop's closing parentheses: ")" # COMMAND AND PROGRAM ARRAY # LOOP # (creationState,) 40 [statements...] 42 [iteration count] 41 # If the body of the loop is empty, # close and delete the complete block. # _blockCompleted deletes the loop if counter is zero, and returns with the result of the # deletion (True if deleted). This returning value is used as index: False == 0, and True == 1 # Increase _loopCounter by 48 = human-friendly bytes: 48 -> "0", 49 -> "1", ... # (value,) Increasing by this value, if value == 0, it resets he counter # Checks lower boundary. # Checks upper boundary. # Reset the counter. Use case: forget the exact count and press 'X'. # General modification. # FUNCTION # (functionId, onlyCall) 123 [statements...] 124 [id] 125 # function call: 126 [id] 126 # functionId - 1 == Index in _functionPosition # The given index is out of array. # Ignore and return. # The given index is out of array. However, it's subsequent: # Extending the array and continue. # Calling the function if it is defined, or flag 'only call' is True and it is not under definition. # In the second case, position -1 (undefined) is fine. (lazy initialization) # Increase by 48 = human-friendly bytes: 48 -> "0", 49 -> "1", ... # End of defining the function # Save index to _functionPosition, because _blockStartIndex will be destroyed during _blockCompleted(). # If function contains nothing # (_commandPointer - _blockStartIndex < 2 -> Function start and end are adjacent.), # delete it by _blockCompleted() which return a boolean (True if deleted). # If this returning value is True, retain len(_programArray) + _blockStartIndex, else overwrite it with -1. # False == 0, and True == 1 (defined) # Beginning of defining the function # In progress, so it isn't -1 (undefined) or 0+ (defined). # GENERAL # (keyOfBeep, returningValue) # (blockLevel,) # Sets the maximum range of undo in according to blockLevel flag. # If there is anything that can be undone. # _getOppositeBoundary returns -1 if byte at _commandPointer is not boundary or its pair. # "{" If it undoes a function declaration, unregister: # Not 48! functionId - 1 = array index # General undo decreases the pointer by one, so this # do...while loop can handle identical boundary pairs. # Tags (like function calling) need no keyBeep(). # If block-level undo or no more loadable command from _programArray. # (blockLevel,) # Block-level: delete only the unfinished block. # buzzer.keyBeep("deleted") is called inside _blockCompleted(True) # Maybe there are user defined functions, so not range(3). # If this function is unfinished. # Set as undefined. # Not block-level: the whole array is affected. # _commandArray isn't "empty", so "clear" it. # Unregister functions defined in deleted range. # "|" and "}" # Not 48! functionId - 1 = array index # "Clear" _commandArray. # _commandArray is "empty", but _programArray is not, "clear" it. # User may want to use higher ids first (from the # previously used ones). So it is not [-1, -1, -1] # "Clear" _programArray. # If _commandArray and _programArray are "empty". ################################ ## CALLBACK FUNCTIONS ################################ ## MAPPINGS # For turtle hat # FORWARD # PAUSE # REPEAT (start) # F1 # ADD # F2 # F3 # RIGHT # BACKWARD # START / STOP (start) # LEFT # UNDO # DELETE # MAPPING # FORWARD # PAUSE # REPEAT (*) # F1 # F2 # F3 # RIGHT # BACKWARD # START / STOP (block-level start) # LEFT # UNDO # DELETE # FORWARD # REPEAT (end) # RIGHT # BACKWARD # START / STOP (check counter) # LEFT # DELETE # FORWARD # PAUSE # REPEAT (start) # F1 # F2 # F3 # RIGHT # BACKWARD # START / STOP (block-level start) # LEFT # UNDO # DELETE # For other purpose # "F" - FORWARD # "B" - BACKWARD # "L" - LEFT (90°) # "l" - LEFT (45°) # "R" - RIGHT (90°) # "r" - RIGHT (45°) # "P" - PAUSE # "K" - LEFT (45°) alias for URL usage ( L - 1 = K ~ l ) # "Q" - RIGHT (45°) alias for URL usage ( R - 1 = Q ~ r ) ################################ ## LAST PART OF INITIALISATION | 2.205459 | 2 |
vegadns_client/release_version.py | shupp/VegaDNS-CLI | 3 | 6614539 | <gh_stars>1-10
from vegadns_client.common import AbstractResourceCollection
from vegadns_client.exceptions import ClientException
class ReleaseVersion(AbstractResourceCollection):
def __call__(self):
r = self.client.get("/release_version")
if r.status_code != 200:
raise ClientException(r.status_code, r.content)
decoded = r.json()
return decoded['release_version']
| from vegadns_client.common import AbstractResourceCollection
from vegadns_client.exceptions import ClientException
class ReleaseVersion(AbstractResourceCollection):
def __call__(self):
r = self.client.get("/release_version")
if r.status_code != 200:
raise ClientException(r.status_code, r.content)
decoded = r.json()
return decoded['release_version'] | none | 1 | 2.080978 | 2 | |
flydra_analysis/flydra_analysis/a2/get_2D_image_latency_plot.py | liyi2017/flydra | 0 | 6614540 | <filename>flydra_analysis/flydra_analysis/a2/get_2D_image_latency_plot.py<gh_stars>0
#!/usr/bin/env python
"""Get Flydra Latency.
Usage:
get_2D_image_latency_plot.py FILENAME [options]
Options:
-h --help Show this screen.
--3d Plot the 3D tracking latency
--2d Plot the 2D tracking latency
--end-idx=N Only show this many rows [default: 100000]
"""
from __future__ import print_function
from docopt import docopt
import tables
import matplotlib.pyplot as plt
import pandas as pd
import sys
import numpy as np
import flydra_analysis.analysis.result_utils as result_utils
def plot_latency(fname, do_3d_latency=False, do_2d_latency=False, end_idx=100000):
if do_3d_latency==False and do_2d_latency==False:
print('hmm, not plotting 3d or 2d data. nothing to do')
return
with tables.open_file(fname, mode='r') as h5:
if do_2d_latency:
d2d = h5.root.data2d_distorted[:end_idx]
if do_3d_latency:
dk = h5.root.kalman_estimates[:end_idx]
camn2cam_id, cam_id2camns = result_utils.get_caminfo_dicts(h5)
time_model=result_utils.get_time_model_from_data(h5)
if do_2d_latency:
df2d = pd.DataFrame(d2d)
camn_list = list(df2d['camn'].unique())
camn_list.sort()
if do_3d_latency:
dfk = pd.DataFrame(dk)
fig = plt.figure()
ax = fig.add_subplot(111)
for obj_id, dfobj in dfk.groupby('obj_id'):
frame = dfobj['frame'].values
reconstruct_timestamp = dfobj['timestamp'].values
trigger_timestamp = time_model.framestamp2timestamp(frame)
latency = reconstruct_timestamp-trigger_timestamp
latency[ latency < -1e8 ] = np.nan
ax.plot(frame,latency,'b.-')
ax.text(0,1,'3D reconstruction', va='top', ha='left', transform=ax.transAxes)
ax.set_xlabel('frame')
ax.set_ylabel('time (s)')
if do_2d_latency:
fig2 = plt.figure()
axn=None
fig3 = plt.figure()
ax3n = None
fig4 = plt.figure()
ax4n = None
for camn, dfcam in df2d.groupby('camn'):
cam_id = camn2cam_id[camn]
df0 = dfcam[ dfcam['frame_pt_idx']==0 ]
ts0s = df0['timestamp'].values
tss = df0['cam_received_timestamp'].values
frames = df0['frame'].values
dts = tss-ts0s
axn = fig2.add_subplot( len(camn_list), 1, camn_list.index(camn)+1,sharex=axn)
axn.plot(frames,dts,'r.-',label='camnode latency' )
axn.plot( frames[:-1], ts0s[1:]-ts0s[:-1], 'g.-', label='inter-frame interval' )
axn.set_xlabel('frame')
axn.set_ylabel('time (s)')
axn.text(0,1,cam_id, va='top', ha='left', transform=axn.transAxes)
if camn_list.index(camn)==0:
axn.legend()
ax3n = fig3.add_subplot( len(camn_list), 1, camn_list.index(camn)+1,sharex=ax3n)
ax3n.plot(frames,ts0s,'g.-', label='calculated triggerbox timestamp')
ax3n.set_xlabel('frame')
ax3n.set_ylabel('time (s)')
ax3n.text(0,1,cam_id, va='top', ha='left', transform=ax3n.transAxes)
if camn_list.index(camn)==0:
ax3n.legend()
ax4n = fig4.add_subplot( len(camn_list), 1, camn_list.index(camn)+1,sharex=ax4n)
ax4n.plot(frames[:-1],ts0s[1:]-ts0s[:-1],'g.-')
ax4n.set_xlabel('frame')
ax4n.set_ylabel('inter-frame-interval (s)')
ax4n.text(0,1,cam_id, va='top', ha='left', transform=ax4n.transAxes)
plt.show()
def main():
args = docopt(__doc__)
fname = args['FILENAME']
plot_latency(fname, do_3d_latency=args['--3d'], do_2d_latency=args['--2d'], end_idx=int(args['--end-idx']))
if __name__=='__main__':
main()
| <filename>flydra_analysis/flydra_analysis/a2/get_2D_image_latency_plot.py<gh_stars>0
#!/usr/bin/env python
"""Get Flydra Latency.
Usage:
get_2D_image_latency_plot.py FILENAME [options]
Options:
-h --help Show this screen.
--3d Plot the 3D tracking latency
--2d Plot the 2D tracking latency
--end-idx=N Only show this many rows [default: 100000]
"""
from __future__ import print_function
from docopt import docopt
import tables
import matplotlib.pyplot as plt
import pandas as pd
import sys
import numpy as np
import flydra_analysis.analysis.result_utils as result_utils
def plot_latency(fname, do_3d_latency=False, do_2d_latency=False, end_idx=100000):
if do_3d_latency==False and do_2d_latency==False:
print('hmm, not plotting 3d or 2d data. nothing to do')
return
with tables.open_file(fname, mode='r') as h5:
if do_2d_latency:
d2d = h5.root.data2d_distorted[:end_idx]
if do_3d_latency:
dk = h5.root.kalman_estimates[:end_idx]
camn2cam_id, cam_id2camns = result_utils.get_caminfo_dicts(h5)
time_model=result_utils.get_time_model_from_data(h5)
if do_2d_latency:
df2d = pd.DataFrame(d2d)
camn_list = list(df2d['camn'].unique())
camn_list.sort()
if do_3d_latency:
dfk = pd.DataFrame(dk)
fig = plt.figure()
ax = fig.add_subplot(111)
for obj_id, dfobj in dfk.groupby('obj_id'):
frame = dfobj['frame'].values
reconstruct_timestamp = dfobj['timestamp'].values
trigger_timestamp = time_model.framestamp2timestamp(frame)
latency = reconstruct_timestamp-trigger_timestamp
latency[ latency < -1e8 ] = np.nan
ax.plot(frame,latency,'b.-')
ax.text(0,1,'3D reconstruction', va='top', ha='left', transform=ax.transAxes)
ax.set_xlabel('frame')
ax.set_ylabel('time (s)')
if do_2d_latency:
fig2 = plt.figure()
axn=None
fig3 = plt.figure()
ax3n = None
fig4 = plt.figure()
ax4n = None
for camn, dfcam in df2d.groupby('camn'):
cam_id = camn2cam_id[camn]
df0 = dfcam[ dfcam['frame_pt_idx']==0 ]
ts0s = df0['timestamp'].values
tss = df0['cam_received_timestamp'].values
frames = df0['frame'].values
dts = tss-ts0s
axn = fig2.add_subplot( len(camn_list), 1, camn_list.index(camn)+1,sharex=axn)
axn.plot(frames,dts,'r.-',label='camnode latency' )
axn.plot( frames[:-1], ts0s[1:]-ts0s[:-1], 'g.-', label='inter-frame interval' )
axn.set_xlabel('frame')
axn.set_ylabel('time (s)')
axn.text(0,1,cam_id, va='top', ha='left', transform=axn.transAxes)
if camn_list.index(camn)==0:
axn.legend()
ax3n = fig3.add_subplot( len(camn_list), 1, camn_list.index(camn)+1,sharex=ax3n)
ax3n.plot(frames,ts0s,'g.-', label='calculated triggerbox timestamp')
ax3n.set_xlabel('frame')
ax3n.set_ylabel('time (s)')
ax3n.text(0,1,cam_id, va='top', ha='left', transform=ax3n.transAxes)
if camn_list.index(camn)==0:
ax3n.legend()
ax4n = fig4.add_subplot( len(camn_list), 1, camn_list.index(camn)+1,sharex=ax4n)
ax4n.plot(frames[:-1],ts0s[1:]-ts0s[:-1],'g.-')
ax4n.set_xlabel('frame')
ax4n.set_ylabel('inter-frame-interval (s)')
ax4n.text(0,1,cam_id, va='top', ha='left', transform=ax4n.transAxes)
plt.show()
def main():
args = docopt(__doc__)
fname = args['FILENAME']
plot_latency(fname, do_3d_latency=args['--3d'], do_2d_latency=args['--2d'], end_idx=int(args['--end-idx']))
if __name__=='__main__':
main()
| en | 0.361729 | #!/usr/bin/env python Get Flydra Latency. Usage: get_2D_image_latency_plot.py FILENAME [options] Options: -h --help Show this screen. --3d Plot the 3D tracking latency --2d Plot the 2D tracking latency --end-idx=N Only show this many rows [default: 100000] | 2.6271 | 3 |
lang/Python/csv-data-manipulation-3.py | ethansaxenian/RosettaDecode | 1 | 6614541 | import pandas as pd
filepath = 'data.csv'
df = pd.read_csv(filepath)
rows_sums = df.sum(axis=1)
df['SUM'] = rows_sums
df.to_csv(filepath, index=False)
| import pandas as pd
filepath = 'data.csv'
df = pd.read_csv(filepath)
rows_sums = df.sum(axis=1)
df['SUM'] = rows_sums
df.to_csv(filepath, index=False)
| none | 1 | 2.843831 | 3 | |
src/openeo_grass_gis_driver/actinia_processing/__init__.py | marcjansen/openeo-grassgis-driver | 7 | 6614542 | # -*- coding: utf-8 -*-
# Import the actinia_processing to fill the process.PROCESS_DICT with
# actinia_processing
from . import add_dimension_process
# from . import aggregate_spatial_process
# from . import apply_mask_process
from . import apply_process
from . import array_element_process
# from . import bbox_from_raster_process
from . import filter_bands_process
from . import filter_bbox_process
from . import filter_spatial_process
from . import filter_temporal_process
# from . import hants_process
from . import load_collection_process
# from . import mask_invalid_values_process
from . import mask_process
from . import mask_polygon_process
from . import merge_cubes_process
# from . import multilayer_mask_process
from . import ndvi_process
# from . import evi_process
# from . import normalized_difference_process
# from . import map_algebra_process
# from . import percentile_time_process
# from . import reduce_time_process
from . import reduce_dimension_process
from . import rename_labels_process
from . import resample_spatial_process
from . import run_udf_process
# from . import udf_reduce_time
from . import raster_exporter
from . import save_result_process
from . import trim_cube_process
# from . import rgb_raster_exporter
# from . import scale_minmax_process
# from . import zonal_statistics
# from . import temporal_algebra_process
# logical processes
from . import logic_and_process
from . import logic_if_process
from . import logic_not_process
from . import logic_or_process
from . import logic_xor_process
# math processes
from . import math_abs_process
from . import math_add_process
from . import math_clip_process
from . import math_divide_process
from . import math_eq_process
from . import math_exp_process
from . import math_gt_process
from . import math_int_process
from . import math_isnan_process
from . import math_isnodata_process
from . import math_ln_process
from . import math_lt_process
from . import math_lte_process
from . import math_max_process
from . import math_mean_process
from . import math_median_process
from . import math_min_process
from . import math_mod_process
from . import math_multiply_process
from . import math_neq_process
from . import math_normdiff_process
from . import math_power_process
from . import math_product_process
from . import math_quantiles_process
from . import math_sd_process
from . import math_sgn_process
from . import math_sqrt_process
from . import math_subtract_process
from . import math_sum_process
from . import math_variance_process
| # -*- coding: utf-8 -*-
# Import the actinia_processing to fill the process.PROCESS_DICT with
# actinia_processing
from . import add_dimension_process
# from . import aggregate_spatial_process
# from . import apply_mask_process
from . import apply_process
from . import array_element_process
# from . import bbox_from_raster_process
from . import filter_bands_process
from . import filter_bbox_process
from . import filter_spatial_process
from . import filter_temporal_process
# from . import hants_process
from . import load_collection_process
# from . import mask_invalid_values_process
from . import mask_process
from . import mask_polygon_process
from . import merge_cubes_process
# from . import multilayer_mask_process
from . import ndvi_process
# from . import evi_process
# from . import normalized_difference_process
# from . import map_algebra_process
# from . import percentile_time_process
# from . import reduce_time_process
from . import reduce_dimension_process
from . import rename_labels_process
from . import resample_spatial_process
from . import run_udf_process
# from . import udf_reduce_time
from . import raster_exporter
from . import save_result_process
from . import trim_cube_process
# from . import rgb_raster_exporter
# from . import scale_minmax_process
# from . import zonal_statistics
# from . import temporal_algebra_process
# logical processes
from . import logic_and_process
from . import logic_if_process
from . import logic_not_process
from . import logic_or_process
from . import logic_xor_process
# math processes
from . import math_abs_process
from . import math_add_process
from . import math_clip_process
from . import math_divide_process
from . import math_eq_process
from . import math_exp_process
from . import math_gt_process
from . import math_int_process
from . import math_isnan_process
from . import math_isnodata_process
from . import math_ln_process
from . import math_lt_process
from . import math_lte_process
from . import math_max_process
from . import math_mean_process
from . import math_median_process
from . import math_min_process
from . import math_mod_process
from . import math_multiply_process
from . import math_neq_process
from . import math_normdiff_process
from . import math_power_process
from . import math_product_process
from . import math_quantiles_process
from . import math_sd_process
from . import math_sgn_process
from . import math_sqrt_process
from . import math_subtract_process
from . import math_sum_process
from . import math_variance_process
| en | 0.57337 | # -*- coding: utf-8 -*- # Import the actinia_processing to fill the process.PROCESS_DICT with # actinia_processing # from . import aggregate_spatial_process # from . import apply_mask_process # from . import bbox_from_raster_process # from . import hants_process # from . import mask_invalid_values_process # from . import multilayer_mask_process # from . import evi_process # from . import normalized_difference_process # from . import map_algebra_process # from . import percentile_time_process # from . import reduce_time_process # from . import udf_reduce_time # from . import rgb_raster_exporter # from . import scale_minmax_process # from . import zonal_statistics # from . import temporal_algebra_process # logical processes # math processes | 1.426776 | 1 |
tests/pandas/test_pandas.py | lvgig/test-aide | 2 | 6614543 | import pytest
import test_aide
try:
import pandas as pd # noqa
has_pandas = True
except ModuleNotFoundError:
has_pandas = False
@pytest.mark.skipif(has_pandas, reason="pandas installed")
def test_no_pandas_module_if_not_installed():
"""Test that test_aide has not pandas module if the library is not installed."""
assert "pandas" not in dir(
test_aide
), "pandas module is available in test_aide when pandas is not installed"
| import pytest
import test_aide
try:
import pandas as pd # noqa
has_pandas = True
except ModuleNotFoundError:
has_pandas = False
@pytest.mark.skipif(has_pandas, reason="pandas installed")
def test_no_pandas_module_if_not_installed():
"""Test that test_aide has not pandas module if the library is not installed."""
assert "pandas" not in dir(
test_aide
), "pandas module is available in test_aide when pandas is not installed"
| en | 0.783296 | # noqa Test that test_aide has not pandas module if the library is not installed. | 2.570917 | 3 |
virtual_machine.py | darbaga/simple_compiler | 0 | 6614544 | <filename>virtual_machine.py
class VirtualMachine:
def __init__(self, ram_size=512, executing=True):
self.data = {i: None for i in range(ram_size)}
self.stack = []
self.executing = executing
self.pc = 0
self.devices_start = 256
def push(self, value):
"""Push something onto the stack."""
self.stack += [value]
def pop(self):
"""Pop something from the stack. Crash if empty."""
return self.stack.pop()
def read_memory(self, index):
"""Read from memory, crashing if index is out of bounds."""
if isinstance(self.data[index], DeviceProxy):
return self.data[index].read(index)
else:
return self.data[index]
def write_memory(self, index, value):
"""Write to memory. Crash if index is out of bounds."""
if isinstance(self.data[index], DeviceProxy):
self.data[index].write(index, value)
else:
self.data[index] = value
def register_device(self, device, needed_addresses):
"""Given an instantiated device and the number of required addresses, registers it in memory"""
# If not enough addresses, just error out
if self.devices_start+needed_addresses > len(self.data):
raise Exception('Not enough addresses to allocate')
proxyed_device = DeviceProxy(device, self.devices_start)
for i in range(self.devices_start, self.devices_start+needed_addresses):
self.data[i] = proxyed_device
self.devices_start += needed_addresses
def run(self, bytecodes):
self.bytecodes = bytecodes
while self.executing:
increment = self.bytecodes[self.pc].autoincrement
self.bytecodes[self.pc].execute(self)
if increment:
self.pc += 1
class DeviceProxy:
"""Manages address translation between devices"""
def __init__(self, device, pos):
self.device = device
self.pos = pos
def read(self, index):
return self.device.read(self.pos-index)
def write(self, index, value):
self.device.write(self.pos-index, value)
| <filename>virtual_machine.py
class VirtualMachine:
def __init__(self, ram_size=512, executing=True):
self.data = {i: None for i in range(ram_size)}
self.stack = []
self.executing = executing
self.pc = 0
self.devices_start = 256
def push(self, value):
"""Push something onto the stack."""
self.stack += [value]
def pop(self):
"""Pop something from the stack. Crash if empty."""
return self.stack.pop()
def read_memory(self, index):
"""Read from memory, crashing if index is out of bounds."""
if isinstance(self.data[index], DeviceProxy):
return self.data[index].read(index)
else:
return self.data[index]
def write_memory(self, index, value):
"""Write to memory. Crash if index is out of bounds."""
if isinstance(self.data[index], DeviceProxy):
self.data[index].write(index, value)
else:
self.data[index] = value
def register_device(self, device, needed_addresses):
"""Given an instantiated device and the number of required addresses, registers it in memory"""
# If not enough addresses, just error out
if self.devices_start+needed_addresses > len(self.data):
raise Exception('Not enough addresses to allocate')
proxyed_device = DeviceProxy(device, self.devices_start)
for i in range(self.devices_start, self.devices_start+needed_addresses):
self.data[i] = proxyed_device
self.devices_start += needed_addresses
def run(self, bytecodes):
self.bytecodes = bytecodes
while self.executing:
increment = self.bytecodes[self.pc].autoincrement
self.bytecodes[self.pc].execute(self)
if increment:
self.pc += 1
class DeviceProxy:
"""Manages address translation between devices"""
def __init__(self, device, pos):
self.device = device
self.pos = pos
def read(self, index):
return self.device.read(self.pos-index)
def write(self, index, value):
self.device.write(self.pos-index, value)
| en | 0.918658 | Push something onto the stack. Pop something from the stack. Crash if empty. Read from memory, crashing if index is out of bounds. Write to memory. Crash if index is out of bounds. Given an instantiated device and the number of required addresses, registers it in memory # If not enough addresses, just error out Manages address translation between devices | 3.486701 | 3 |
parser/source/parser.py | rafaatsouza/ese-parser | 1 | 6614545 | import os
from xml.dom import minidom
from enum import Enum
class Tools(Enum):
JasperReports = 'jasper'
JFreeChart = 'jfree'
JHotDraw = 'jhot'
JMeter = 'jmeter'
Struts = 'struts'
class Parser:
def __init__(self, tool, filepath):
if not isinstance(tool, Tools):
raise Exception('Invalid tool')
if not filepath:
raise Exception('Empty filepath')
if not os.path.isfile(filepath):
raise Exception('Invalid filepath')
if not filepath.endswith('.xml'):
raise Exception('Invalid filepath')
tools = {
Tools.JasperReports: 'JasperReports',
Tools.JFreeChart: 'JFreeChart',
Tools.JHotDraw: 'JHotDraw',
Tools.JMeter: 'JMeter',
Tools.Struts: 'Struts'
}
self.tool = tools[tool]
self.filepath = filepath
def printParsedXml(self):
xml = minidom.parse(self.filepath)
patternName = xml.getElementsByTagName(
'PatternName')[0].firstChild.data
classes = self.getClasses(xml)
self.printOutput(self.tool, patternName, classes)
def getOutputFilePath(self):
parts = self.filepath.split('/')
parts[-1] = 'parsed-{}'.format(parts[-1])
return '/'.join(parts).replace('.xml', '.csv')
def getClasses(self, xml):
classes = []
anchors = xml.getElementsByTagName('AnchorsInstance')
roles = xml.getElementsByTagName('RoleInstance')
if anchors:
for anchor in anchors:
if anchor.attributes['value'].value not in classes:
classes.append(anchor.attributes['value'].value)
if roles:
for role in roles:
if role.attributes['value'].value not in classes:
classes.append(role.attributes['value'].value)
return classes
def printOutput(self, tool, patternName, classes):
with open(self.getOutputFilePath(), 'w') as file:
file.write('tool;pattern;class\n')
for _class in classes:
file.write('{};{};{}\n'.format(tool, patternName, _class))
| import os
from xml.dom import minidom
from enum import Enum
class Tools(Enum):
JasperReports = 'jasper'
JFreeChart = 'jfree'
JHotDraw = 'jhot'
JMeter = 'jmeter'
Struts = 'struts'
class Parser:
def __init__(self, tool, filepath):
if not isinstance(tool, Tools):
raise Exception('Invalid tool')
if not filepath:
raise Exception('Empty filepath')
if not os.path.isfile(filepath):
raise Exception('Invalid filepath')
if not filepath.endswith('.xml'):
raise Exception('Invalid filepath')
tools = {
Tools.JasperReports: 'JasperReports',
Tools.JFreeChart: 'JFreeChart',
Tools.JHotDraw: 'JHotDraw',
Tools.JMeter: 'JMeter',
Tools.Struts: 'Struts'
}
self.tool = tools[tool]
self.filepath = filepath
def printParsedXml(self):
xml = minidom.parse(self.filepath)
patternName = xml.getElementsByTagName(
'PatternName')[0].firstChild.data
classes = self.getClasses(xml)
self.printOutput(self.tool, patternName, classes)
def getOutputFilePath(self):
parts = self.filepath.split('/')
parts[-1] = 'parsed-{}'.format(parts[-1])
return '/'.join(parts).replace('.xml', '.csv')
def getClasses(self, xml):
classes = []
anchors = xml.getElementsByTagName('AnchorsInstance')
roles = xml.getElementsByTagName('RoleInstance')
if anchors:
for anchor in anchors:
if anchor.attributes['value'].value not in classes:
classes.append(anchor.attributes['value'].value)
if roles:
for role in roles:
if role.attributes['value'].value not in classes:
classes.append(role.attributes['value'].value)
return classes
def printOutput(self, tool, patternName, classes):
with open(self.getOutputFilePath(), 'w') as file:
file.write('tool;pattern;class\n')
for _class in classes:
file.write('{};{};{}\n'.format(tool, patternName, _class))
| none | 1 | 2.599155 | 3 | |
Desafios/Desafio#001.py | uKaigo/Atlanta-CodeChallenge | 1 | 6614546 | """
Desafio #01 - Números Primos
Jonas estava na sua aula de matemática e o conteúdo do dia era números primos,
ele então decidiu escrever um algoritmo para calcular números primos. Para começar,
ele gostaria de saber de todos os números de 1 até 10000 quais eram primos. Sua tarefa
é escrever um programa que liste todos os números primos de 1 até 10000.
"""
# Por Kaigo. Ganho de 145 pontos
# Todos os números primos obtidos
primos = []
# Pega todos os números entre 1 e 10000 (para determinar se é primo ou não)
for number in range (2, 10000):
div = False # Necessário para resetar o estado
# Pega todos os números até o número atual (para testar divisão)
for inter in range(2, number-1): # Se houver alguma divisão aqui, o número não é primo, inter = intermediario
if number % inter == 0: # Se o resto da divisão do número for 0
div = True
break
if div == True: # Caso tenha ocorrido uma divisão, ele continua o programa
continue
primos.append(str(number)) # Não ocorreu, o número é adicionado à lista.
print('Os números primos de 1 até 10000 são:')
print(', '.join(primos)) # Formata a saída
| """
Desafio #01 - Números Primos
Jonas estava na sua aula de matemática e o conteúdo do dia era números primos,
ele então decidiu escrever um algoritmo para calcular números primos. Para começar,
ele gostaria de saber de todos os números de 1 até 10000 quais eram primos. Sua tarefa
é escrever um programa que liste todos os números primos de 1 até 10000.
"""
# Por Kaigo. Ganho de 145 pontos
# Todos os números primos obtidos
primos = []
# Pega todos os números entre 1 e 10000 (para determinar se é primo ou não)
for number in range (2, 10000):
div = False # Necessário para resetar o estado
# Pega todos os números até o número atual (para testar divisão)
for inter in range(2, number-1): # Se houver alguma divisão aqui, o número não é primo, inter = intermediario
if number % inter == 0: # Se o resto da divisão do número for 0
div = True
break
if div == True: # Caso tenha ocorrido uma divisão, ele continua o programa
continue
primos.append(str(number)) # Não ocorreu, o número é adicionado à lista.
print('Os números primos de 1 até 10000 são:')
print(', '.join(primos)) # Formata a saída
| pt | 0.992893 | Desafio #01 - Números Primos Jonas estava na sua aula de matemática e o conteúdo do dia era números primos, ele então decidiu escrever um algoritmo para calcular números primos. Para começar, ele gostaria de saber de todos os números de 1 até 10000 quais eram primos. Sua tarefa é escrever um programa que liste todos os números primos de 1 até 10000. # Por Kaigo. Ganho de 145 pontos # Todos os números primos obtidos # Pega todos os números entre 1 e 10000 (para determinar se é primo ou não) # Necessário para resetar o estado # Pega todos os números até o número atual (para testar divisão) # Se houver alguma divisão aqui, o número não é primo, inter = intermediario # Se o resto da divisão do número for 0 # Caso tenha ocorrido uma divisão, ele continua o programa # Não ocorreu, o número é adicionado à lista. # Formata a saída | 3.827387 | 4 |
setup.py | htwenning/sanic-jinja | 0 | 6614547 | <reponame>htwenning/sanic-jinja<filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import sys
if sys.version_info[0] == 2:
raise Exception('python3 required.')
install_requirements = [
'sanic==18.12.0',
'Jinja2==2.10'
]
setup(
name='Sanic_Jinja',
version='0.0.1',
url='https://github.com/htwenning/sanic-jinja',
license='MIT',
author='wenning',
author_email='<EMAIL>',
description='simple jinja2 template renderer for sanic',
packages=['sanic_jinja'],
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=install_requirements,
) | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import sys
if sys.version_info[0] == 2:
raise Exception('python3 required.')
install_requirements = [
'sanic==18.12.0',
'Jinja2==2.10'
]
setup(
name='Sanic_Jinja',
version='0.0.1',
url='https://github.com/htwenning/sanic-jinja',
license='MIT',
author='wenning',
author_email='<EMAIL>',
description='simple jinja2 template renderer for sanic',
packages=['sanic_jinja'],
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=install_requirements,
) | en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 1.503517 | 2 |
tests/test_HTMLFileHandler.py | Sudoblark/w3c-html-python-validator | 0 | 6614548 | from src.HTMLFileHandler import HTMLFileHandler
import random
import string
def test_init():
html_file_handler = HTMLFileHandler()
assert (len(html_file_handler.return_html_array()) == 0)
def test_add_to_array():
html_file_handler = HTMLFileHandler()
letters = string.ascii_lowercase
random_string = ''.join(random.choice(letters) for i in range(20))
html_file_handler.add_to_html_array(random_string)
assert (len(html_file_handler.return_html_array()) == 1)
def test_reset_html_array():
html_file_handler = HTMLFileHandler()
letters = string.ascii_lowercase
random_string = ''.join(random.choice(letters) for i in range(20))
html_file_handler.add_to_html_array(random_string)
html_file_handler.reset_html_array()
assert (len(html_file_handler.return_html_array()) == 0)
def test_return_html_array():
html_file_handler = HTMLFileHandler()
assert (html_file_handler.return_html_array() is not None)
def test_find_html_files_from_dir():
html_file_handler = HTMLFileHandler()
html_file_handler.find_html_files_from_dir("./test_files")
assert (len(html_file_handler.return_html_array()) == 2)
if __name__ == "__main__":
test_init()
test_add_to_array()
test_find_html_files_from_dir()
test_return_html_array()
test_reset_html_array()
| from src.HTMLFileHandler import HTMLFileHandler
import random
import string
def test_init():
html_file_handler = HTMLFileHandler()
assert (len(html_file_handler.return_html_array()) == 0)
def test_add_to_array():
html_file_handler = HTMLFileHandler()
letters = string.ascii_lowercase
random_string = ''.join(random.choice(letters) for i in range(20))
html_file_handler.add_to_html_array(random_string)
assert (len(html_file_handler.return_html_array()) == 1)
def test_reset_html_array():
html_file_handler = HTMLFileHandler()
letters = string.ascii_lowercase
random_string = ''.join(random.choice(letters) for i in range(20))
html_file_handler.add_to_html_array(random_string)
html_file_handler.reset_html_array()
assert (len(html_file_handler.return_html_array()) == 0)
def test_return_html_array():
html_file_handler = HTMLFileHandler()
assert (html_file_handler.return_html_array() is not None)
def test_find_html_files_from_dir():
html_file_handler = HTMLFileHandler()
html_file_handler.find_html_files_from_dir("./test_files")
assert (len(html_file_handler.return_html_array()) == 2)
if __name__ == "__main__":
test_init()
test_add_to_array()
test_find_html_files_from_dir()
test_return_html_array()
test_reset_html_array()
| none | 1 | 2.889652 | 3 | |
src/commands/seek.py | kintrix007/play-next | 0 | 6614549 | <filename>src/commands/seek.py
import os
from src.args import ParsedArgs
from src.config import Config
from src.play_json import load_play_next, overwrite_play_json
cmd_name = "seek"
def run(parsed: ParsedArgs, config: Config) -> None:
cwd = os.getcwd()
play_next = load_play_next(cwd)
seek_ep = parsed.command.params[0]
if seek_ep[0] in [ "+", "-" ]:
play_next.watched += int(seek_ep)
else:
play_next.watched = int(seek_ep) - 1
play_next.watched = max(0, play_next.watched)
if play_next.ep_count: play_next.watched = min(play_next.ep_count, play_next.watched)
overwrite_play_json(cwd, play_next)
print(f"Next episode will be ep '{play_next.watched+1}'")
| <filename>src/commands/seek.py
import os
from src.args import ParsedArgs
from src.config import Config
from src.play_json import load_play_next, overwrite_play_json
cmd_name = "seek"
def run(parsed: ParsedArgs, config: Config) -> None:
cwd = os.getcwd()
play_next = load_play_next(cwd)
seek_ep = parsed.command.params[0]
if seek_ep[0] in [ "+", "-" ]:
play_next.watched += int(seek_ep)
else:
play_next.watched = int(seek_ep) - 1
play_next.watched = max(0, play_next.watched)
if play_next.ep_count: play_next.watched = min(play_next.ep_count, play_next.watched)
overwrite_play_json(cwd, play_next)
print(f"Next episode will be ep '{play_next.watched+1}'")
| none | 1 | 2.671555 | 3 | |
maskrcnn_predict.py | Sun-Deep/mask-rcnn | 1 | 6614550 | #import necessary packages
from mrcnn.config import Config
from mrcnn import model as modellib
from mrcnn import visualize
import numpy as np
import colorsys
import argparse
import imutils
import random
import cv2
import os
#construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-w", "--weights", required=True, help="path to Mask R-CNN model weights pre-trained on COCO")
ap.add_argument("-l", "--labels", required=True, help="path to class labels file")
ap.add_argument("-i", "--image", required=True, help="path to input image to apply Mask R-CNN to")
args = vars(ap.parse_args())
#load the class label names from disk, one label per line
CLASS_NAMES = open(args["labels"]).read().strip().split("\n")
#generate random (but visually distinct) colors for each class label
#thanks to matterport mask r-cnn for the method
hsv = [(i/len(CLASS_NAMES), 1, 1.0) for i in range(len(CLASS_NAMES))]
COLORS = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.seed(42)
random.shuffle(COLORS)
#lets construct our simple config class
class SimpleConfig(Config):
#give the configuration a recognizable name
NAME = "coco_inference"
#set the number of GPUs to use along with the number of images per GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
#number of classes
NUM_CLASSES = len(CLASS_NAMES)
#initialize the inference configuration
config = SimpleConfig()
#initialize the mask r-cnn model for inference and then load the weights
print("[INFO] loading Mask R-CNN model....")
model = modellib.MaskRCNN(mode="inference", config=config, model_dir=os.getcwd())
model.load_weights(args["weights"], by_name=True)
#load the input image, convert it from BGR to RGB channel ordering and resize the image
image = cv2.imread(args['image'])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = imutils.resize(image, width=512)
#perform the forward pass of the network to obtain the results
print('[INFO] making predictions with Mask R-CNN...')
r = model.detect([image], verbose=1)[0]
#loop over of the detected object's bounding boxes and masks
for i in range(0, r["rois"].shape[0]):
#extract the class Id and mask for the current detection then grab the color to visualize the mask (in BGR format)
classID = r["class_ids"][i]
mask = r["masks"][:, :, i]
color = COLORS[classID][::-1]
#visualize the pixel-wise mask of the object
image = visualize.apply_mask(image, mask, color, alpha=0.5)
#convert the image back to BGR so we can use OpenCV's drawing functions
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
#loop over the predicted scores and class labels
for i in range(0, len(r["scores"])):
#extract the bounding box information, classId, label, predicted scores, visualization color
(startY, startX, endY, endX) = r["rois"][i]
classID = r["class_ids"][i]
label = CLASS_NAMES[classID]
score = r["scores"][i]
color = [int(c) for c in np.array(COLORS[classID])* 255]
#draw the bounding box, class labels, and scores of the object
cv2.rectangle(image, (startX, startY), (endX, endY), color, 2)
text = "{}:{:.3f}".format(label, score)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.putText(image, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
#show the output image
cv2.imshow("Output", image)
cv2.waitKey()
| #import necessary packages
from mrcnn.config import Config
from mrcnn import model as modellib
from mrcnn import visualize
import numpy as np
import colorsys
import argparse
import imutils
import random
import cv2
import os
#construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-w", "--weights", required=True, help="path to Mask R-CNN model weights pre-trained on COCO")
ap.add_argument("-l", "--labels", required=True, help="path to class labels file")
ap.add_argument("-i", "--image", required=True, help="path to input image to apply Mask R-CNN to")
args = vars(ap.parse_args())
#load the class label names from disk, one label per line
CLASS_NAMES = open(args["labels"]).read().strip().split("\n")
#generate random (but visually distinct) colors for each class label
#thanks to matterport mask r-cnn for the method
hsv = [(i/len(CLASS_NAMES), 1, 1.0) for i in range(len(CLASS_NAMES))]
COLORS = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.seed(42)
random.shuffle(COLORS)
#lets construct our simple config class
class SimpleConfig(Config):
#give the configuration a recognizable name
NAME = "coco_inference"
#set the number of GPUs to use along with the number of images per GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
#number of classes
NUM_CLASSES = len(CLASS_NAMES)
#initialize the inference configuration
config = SimpleConfig()
#initialize the mask r-cnn model for inference and then load the weights
print("[INFO] loading Mask R-CNN model....")
model = modellib.MaskRCNN(mode="inference", config=config, model_dir=os.getcwd())
model.load_weights(args["weights"], by_name=True)
#load the input image, convert it from BGR to RGB channel ordering and resize the image
image = cv2.imread(args['image'])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = imutils.resize(image, width=512)
#perform the forward pass of the network to obtain the results
print('[INFO] making predictions with Mask R-CNN...')
r = model.detect([image], verbose=1)[0]
#loop over of the detected object's bounding boxes and masks
for i in range(0, r["rois"].shape[0]):
#extract the class Id and mask for the current detection then grab the color to visualize the mask (in BGR format)
classID = r["class_ids"][i]
mask = r["masks"][:, :, i]
color = COLORS[classID][::-1]
#visualize the pixel-wise mask of the object
image = visualize.apply_mask(image, mask, color, alpha=0.5)
#convert the image back to BGR so we can use OpenCV's drawing functions
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
#loop over the predicted scores and class labels
for i in range(0, len(r["scores"])):
#extract the bounding box information, classId, label, predicted scores, visualization color
(startY, startX, endY, endX) = r["rois"][i]
classID = r["class_ids"][i]
label = CLASS_NAMES[classID]
score = r["scores"][i]
color = [int(c) for c in np.array(COLORS[classID])* 255]
#draw the bounding box, class labels, and scores of the object
cv2.rectangle(image, (startX, startY), (endX, endY), color, 2)
text = "{}:{:.3f}".format(label, score)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.putText(image, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
#show the output image
cv2.imshow("Output", image)
cv2.waitKey()
| en | 0.657345 | #import necessary packages #construct the argument parse and parse the arguments #load the class label names from disk, one label per line #generate random (but visually distinct) colors for each class label #thanks to matterport mask r-cnn for the method #lets construct our simple config class #give the configuration a recognizable name #set the number of GPUs to use along with the number of images per GPU #number of classes #initialize the inference configuration #initialize the mask r-cnn model for inference and then load the weights #load the input image, convert it from BGR to RGB channel ordering and resize the image #perform the forward pass of the network to obtain the results #loop over of the detected object's bounding boxes and masks #extract the class Id and mask for the current detection then grab the color to visualize the mask (in BGR format) #visualize the pixel-wise mask of the object #convert the image back to BGR so we can use OpenCV's drawing functions #loop over the predicted scores and class labels #extract the bounding box information, classId, label, predicted scores, visualization color #draw the bounding box, class labels, and scores of the object #show the output image | 2.535644 | 3 |