code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import mysql.connector
def insertOwnerDataIntoDB(myDB, myCursor, shopName, ownerName, shopType, openingTime, closingTime, timeReqPerUser,
address, city, pinCode, phone, email, password):
print('end')
myDB.autocommit = False
cursor = myDB.cursor(dictionary=True)
try:
cursor.execute(
"INSERT INTO `tokendatabase`.`owner` (`shop_name`, `owner_name`, `shop_type`, `opening_time`, "
"`closing_time`, " +
"`time_req_per_user`, `address`, `city`, `pin_code`, `phone`, `email`, `password`) VALUES ('" + shopName +
"', '" + ownerName + "', '" + shopType + "', '" + openingTime + "', '" + closingTime + "', '" + timeReqPerUser + "', '" +
address + "', '" + city + "', '" + pinCode + "', '" + phone + "', '" + email + "', '" + password + "');"
)
print('inserted')
# get shop_id from 'owner' table
cursor.execute(
"SELECT shop_id FROM `tokendatabase`.`owner` WHERE email = '" + email + "';"
)
print('query executed')
shop_id = ''
for x in cursor:
shop_id = str(x['shop_id'])
if shop_id is not None:
break
print('shop_id: ' + shop_id)
# now, create separate table 'event'
cursor.execute(
"CREATE TABLE `tokendatabase`. `event_" + shop_id + "`(`date` DATE NOT NULL, `user_email` VARCHAR(100) " +
"NOT NULL, `incoming_time` TIME NOT NULL, `outgoing_time` TIME NULL, `is_event_over` CHAR(1) " +
"NULL DEFAULT '0', `status` VARCHAR(100) NULL, PRIMARY KEY(`date`, `incoming_time`, `user_email`)); "
)
print('event table created')
myDB.commit()
print(True)
return True
except mysql.connector.Error as err:
print("MySQL Error:")
print(err)
myDB.rollback()
return False
except BaseException as error:
print("Error:" + error)
myDB.rollback()
return False
finally:
myDB.autocommit = True
def insertUserDataIntoDB(myDB, myCursor, userName, address, city, pinCode, phone, email, password):
print('end')
try:
myCursor.execute(
"INSERT INTO `tokendatabase`.`user` (`user_name`, `address`, `city`, `pin_code`, `phone`, `email`,"
" `password`) VALUES ('" + userName + "', '" + address + "', '" + city + "', '" + pinCode + "', '" + phone + "'," +
"'" + email + "', '" + password + "');"
)
myDB.commit()
return True
except mysql.connector.Error as error:
print(error)
return False
def updatePassword(myDB, myCursor, of, email, password):
print('end')
try:
myCursor.execute(
"UPDATE `tokendatabase`.`" + of + "` SET `password` = '" + password + "' WHERE(`email` = '" + email + "');"
)
myDB.commit()
return True
except BaseException as error:
print('Error in InsertIntoTable.updatePassword: ', error)
return False
def addBooking(myDB, myCursor, shopId, userEmail, ownerEmail, note, incomingTime, outgoingTime, status):
# this function inserts user booking details to `task` and `event_<shopId>` table
try:
myDB.autocommit = False
# insert in `task`
myCursor.execute(
"INSERT INTO `tokendatabase`.`task`(`user_email`, `owner_email`, `note`, `date`, `incoming_time`, "
"`outgoing_time`, "
"`status`) VALUES('" + userEmail + "', '" + ownerEmail + "', '" + note + "', CURDATE(),'" + incomingTime + "', "
"'" + outgoingTime + "', 'booked');"
)
# insert in `event_<shopId>`
# currentDate is done using CURDATE() function of database
myCursor.execute(
"INSERT INTO `tokendatabase`.`event_" + shopId + "`(`date`, `user_email`, `incoming_time`, "
"`outgoing_time`, `status`) VALUES(CURDATE(), '" + userEmail + "', '" + incomingTime + "', "
"'" + outgoingTime + "', '" + status + "');"
)
print('Booking details inserted successfully')
myDB.commit()
return True
except mysql.connector.Error as err:
print('MySQL Error:')
print(err)
myDB.rollback()
return False
except BaseException as error:
print('Error:')
print(error)
myDB.rollback()
return False
finally:
myDB.autocommit = True | hackathon_covid_19_Token_System_Server/InsertIntoTable.py | import mysql.connector
def insertOwnerDataIntoDB(myDB, myCursor, shopName, ownerName, shopType, openingTime, closingTime, timeReqPerUser,
address, city, pinCode, phone, email, password):
print('end')
myDB.autocommit = False
cursor = myDB.cursor(dictionary=True)
try:
cursor.execute(
"INSERT INTO `tokendatabase`.`owner` (`shop_name`, `owner_name`, `shop_type`, `opening_time`, "
"`closing_time`, " +
"`time_req_per_user`, `address`, `city`, `pin_code`, `phone`, `email`, `password`) VALUES ('" + shopName +
"', '" + ownerName + "', '" + shopType + "', '" + openingTime + "', '" + closingTime + "', '" + timeReqPerUser + "', '" +
address + "', '" + city + "', '" + pinCode + "', '" + phone + "', '" + email + "', '" + password + "');"
)
print('inserted')
# get shop_id from 'owner' table
cursor.execute(
"SELECT shop_id FROM `tokendatabase`.`owner` WHERE email = '" + email + "';"
)
print('query executed')
shop_id = ''
for x in cursor:
shop_id = str(x['shop_id'])
if shop_id is not None:
break
print('shop_id: ' + shop_id)
# now, create separate table 'event'
cursor.execute(
"CREATE TABLE `tokendatabase`. `event_" + shop_id + "`(`date` DATE NOT NULL, `user_email` VARCHAR(100) " +
"NOT NULL, `incoming_time` TIME NOT NULL, `outgoing_time` TIME NULL, `is_event_over` CHAR(1) " +
"NULL DEFAULT '0', `status` VARCHAR(100) NULL, PRIMARY KEY(`date`, `incoming_time`, `user_email`)); "
)
print('event table created')
myDB.commit()
print(True)
return True
except mysql.connector.Error as err:
print("MySQL Error:")
print(err)
myDB.rollback()
return False
except BaseException as error:
print("Error:" + error)
myDB.rollback()
return False
finally:
myDB.autocommit = True
def insertUserDataIntoDB(myDB, myCursor, userName, address, city, pinCode, phone, email, password):
print('end')
try:
myCursor.execute(
"INSERT INTO `tokendatabase`.`user` (`user_name`, `address`, `city`, `pin_code`, `phone`, `email`,"
" `password`) VALUES ('" + userName + "', '" + address + "', '" + city + "', '" + pinCode + "', '" + phone + "'," +
"'" + email + "', '" + password + "');"
)
myDB.commit()
return True
except mysql.connector.Error as error:
print(error)
return False
def updatePassword(myDB, myCursor, of, email, password):
print('end')
try:
myCursor.execute(
"UPDATE `tokendatabase`.`" + of + "` SET `password` = '" + password + "' WHERE(`email` = '" + email + "');"
)
myDB.commit()
return True
except BaseException as error:
print('Error in InsertIntoTable.updatePassword: ', error)
return False
def addBooking(myDB, myCursor, shopId, userEmail, ownerEmail, note, incomingTime, outgoingTime, status):
# this function inserts user booking details to `task` and `event_<shopId>` table
try:
myDB.autocommit = False
# insert in `task`
myCursor.execute(
"INSERT INTO `tokendatabase`.`task`(`user_email`, `owner_email`, `note`, `date`, `incoming_time`, "
"`outgoing_time`, "
"`status`) VALUES('" + userEmail + "', '" + ownerEmail + "', '" + note + "', CURDATE(),'" + incomingTime + "', "
"'" + outgoingTime + "', 'booked');"
)
# insert in `event_<shopId>`
# currentDate is done using CURDATE() function of database
myCursor.execute(
"INSERT INTO `tokendatabase`.`event_" + shopId + "`(`date`, `user_email`, `incoming_time`, "
"`outgoing_time`, `status`) VALUES(CURDATE(), '" + userEmail + "', '" + incomingTime + "', "
"'" + outgoingTime + "', '" + status + "');"
)
print('Booking details inserted successfully')
myDB.commit()
return True
except mysql.connector.Error as err:
print('MySQL Error:')
print(err)
myDB.rollback()
return False
except BaseException as error:
print('Error:')
print(error)
myDB.rollback()
return False
finally:
myDB.autocommit = True | 0.293 | 0.047581 |
from flask import Flask, redirect
from flask_cors import CORS
import logging
from marshmallow.exceptions import ValidationError
from . import Environment
from . extensions import db, filtr
from . config import Config
from . api import blueprint as api_v1
from . util import error
logger = logging.getLogger(__name__)
def create_app(env='prd') -> Flask:
""" application factory creates and configures app """
app = Flask(__name__)
configure_app(app, env)
register_extensions(app)
setup_db(app)
register_error_handlers(app)
register_blueprints(app)
add_special_routes(app)
return app
def configure_app(app: Flask, env: str) -> None:
config = Config.get(env)
app.config.from_object(config)
app.logger.setLevel(logging.INFO)
Environment.set(config.ENV)
def register_blueprints(app: Flask):
app.register_blueprint(api_v1, url_prefix="/api/v1")
def register_extensions(app: Flask) -> None:
db.init_app(app)
filtr.init_app(app)
CORS(app)
def setup_db(app: Flask) -> None:
if app.config.get('CREATE_SCHEMA'):
logger.warning("Setting up local database for development work!")
with app.app_context():
try:
db.create_all()
logger.warning(f"CREATE DATABASE @ {app.config.get('SQLALCHEMY_DATABASE_URI')}")
except:
logger.error(f"Database creation failed. It probably already exists")
def add_special_routes(app: Flask):
@app.route("/swagger")
def swagger():
return redirect("/api/v1")
def register_error_handlers(app: Flask) -> None:
@app.errorhandler(ValidationError)
def validation_error_handler(err):
logger.error("marshmallow validation error", err)
return error(422, [{"message": "Validation error", "details": str(err)}])
@app.errorhandler(Exception)
def generic_error_handler(err):
logger.error("unhandled application exception", err)
code = getattr(err, "status_code", 500)
message = getattr(err, "messages", "uncaught exception")
return error(code, [dict(message=message, details=str(err))]) | api/honeyhole/app.py | from flask import Flask, redirect
from flask_cors import CORS
import logging
from marshmallow.exceptions import ValidationError
from . import Environment
from . extensions import db, filtr
from . config import Config
from . api import blueprint as api_v1
from . util import error
logger = logging.getLogger(__name__)
def create_app(env='prd') -> Flask:
""" application factory creates and configures app """
app = Flask(__name__)
configure_app(app, env)
register_extensions(app)
setup_db(app)
register_error_handlers(app)
register_blueprints(app)
add_special_routes(app)
return app
def configure_app(app: Flask, env: str) -> None:
config = Config.get(env)
app.config.from_object(config)
app.logger.setLevel(logging.INFO)
Environment.set(config.ENV)
def register_blueprints(app: Flask):
app.register_blueprint(api_v1, url_prefix="/api/v1")
def register_extensions(app: Flask) -> None:
db.init_app(app)
filtr.init_app(app)
CORS(app)
def setup_db(app: Flask) -> None:
if app.config.get('CREATE_SCHEMA'):
logger.warning("Setting up local database for development work!")
with app.app_context():
try:
db.create_all()
logger.warning(f"CREATE DATABASE @ {app.config.get('SQLALCHEMY_DATABASE_URI')}")
except:
logger.error(f"Database creation failed. It probably already exists")
def add_special_routes(app: Flask):
@app.route("/swagger")
def swagger():
return redirect("/api/v1")
def register_error_handlers(app: Flask) -> None:
@app.errorhandler(ValidationError)
def validation_error_handler(err):
logger.error("marshmallow validation error", err)
return error(422, [{"message": "Validation error", "details": str(err)}])
@app.errorhandler(Exception)
def generic_error_handler(err):
logger.error("unhandled application exception", err)
code = getattr(err, "status_code", 500)
message = getattr(err, "messages", "uncaught exception")
return error(code, [dict(message=message, details=str(err))]) | 0.506591 | 0.063251 |
import logging
import sys
from abc import ABC, abstractmethod
from filelock import SoftFileLock
from disco.storage.db import create_engine
from disco.storage.ingesters import OutputIngester, dump_storage_index
from disco.storage.outputs import get_simulation_output
from disco.storage.parsers import OutputParser
logger = logging.getLogger(__name__)
class PipelineStep(ABC):
"""Abstract pipeline step"""
@staticmethod
def execute(self):
"""Execute the pipeline step"""
class PipelineBase(ABC):
"""Abstract pipeline class"""
def __init__(self, database):
self.database = database
@abstractmethod
def run(self, data):
"""Entry point of running pipeline steps"""
class InitializationStep(PipelineStep):
"""A step for initializing and preparing data required by pipeline"""
def execute(self, data):
"""Execute this step to initialize pipeline"""
logger.info("Initializing data pipeline.")
if not self.validate_task(data):
task_name = data["task_name"]
logger.error(f"Task '{task_name}' already exists, please try another --task-name.")
sys.exit(1)
return data
def validate_task(self, data):
"""Return False if task_name already """
if not data["database"]:
return True
engine = create_engine(data["database"])
with engine.connect() as conn:
queryset = conn.execute("SELECT name FROM task")
existing_names = set([row[0] for row in queryset])
if data["task_name"] in existing_names:
return False
return True
class OutputParsingStep(PipelineStep):
"""A step for parsing output of DISCO simulation/analysis """
def __init__(self, output):
self.output = get_simulation_output(output)
def execute(self, data):
"""Parse desired data from output"""
parser = OutputParser(
task_name=data["task_name"],
model_inputs=data["model_inputs"],
notes=data["notes"],
)
result = parser.parse(output=self.output)
return result
class ResultIngestionStep(PipelineStep):
"""A step for ingesting parsed result into database"""
def __init__(self, database):
self.database = database
def execute(self, data):
"""Ingest parsed data into database"""
logger.info("Ingesting results into database.")
lock_file = self.database + ".lock"
with SoftFileLock(lock_file=lock_file, timeout=3600):
ingester = OutputIngester(database=self.database)
indexes = ingester.ingest(data=data)
return indexes
class FinalizatonStep(PipelineStep):
"""A step for closing pipeline"""
def __init__(self, output):
self.output = output
def execute(self, data):
"""Store indexes of db storage into JSON file"""
logger.info("Closing data ingestion pipeline.")
dump_storage_index(output=self.output, indexes=data)
class StoragePipeline(PipelineBase):
"""Pipeline class for parsing and ingesting DISCO simulation/analysis results"""
def run(self, data):
"""Execute pipeline steps in sequential order"""
step1 = InitializationStep()
valid_data = step1.execute(data=data)
step2 = OutputParsingStep(output=data["output"])
result = step2.execute(data=valid_data)
step3 = ResultIngestionStep(database=data["database"])
indexes = step3.execute(data=result)
step4 = FinalizatonStep(output=data["output"])
step4.execute(data=indexes)
logger.info(f"Done! Tables were ingested into database - {data['database']}") | disco/storage/core.py | import logging
import sys
from abc import ABC, abstractmethod
from filelock import SoftFileLock
from disco.storage.db import create_engine
from disco.storage.ingesters import OutputIngester, dump_storage_index
from disco.storage.outputs import get_simulation_output
from disco.storage.parsers import OutputParser
logger = logging.getLogger(__name__)
class PipelineStep(ABC):
"""Abstract pipeline step"""
@staticmethod
def execute(self):
"""Execute the pipeline step"""
class PipelineBase(ABC):
"""Abstract pipeline class"""
def __init__(self, database):
self.database = database
@abstractmethod
def run(self, data):
"""Entry point of running pipeline steps"""
class InitializationStep(PipelineStep):
"""A step for initializing and preparing data required by pipeline"""
def execute(self, data):
"""Execute this step to initialize pipeline"""
logger.info("Initializing data pipeline.")
if not self.validate_task(data):
task_name = data["task_name"]
logger.error(f"Task '{task_name}' already exists, please try another --task-name.")
sys.exit(1)
return data
def validate_task(self, data):
"""Return False if task_name already """
if not data["database"]:
return True
engine = create_engine(data["database"])
with engine.connect() as conn:
queryset = conn.execute("SELECT name FROM task")
existing_names = set([row[0] for row in queryset])
if data["task_name"] in existing_names:
return False
return True
class OutputParsingStep(PipelineStep):
"""A step for parsing output of DISCO simulation/analysis """
def __init__(self, output):
self.output = get_simulation_output(output)
def execute(self, data):
"""Parse desired data from output"""
parser = OutputParser(
task_name=data["task_name"],
model_inputs=data["model_inputs"],
notes=data["notes"],
)
result = parser.parse(output=self.output)
return result
class ResultIngestionStep(PipelineStep):
"""A step for ingesting parsed result into database"""
def __init__(self, database):
self.database = database
def execute(self, data):
"""Ingest parsed data into database"""
logger.info("Ingesting results into database.")
lock_file = self.database + ".lock"
with SoftFileLock(lock_file=lock_file, timeout=3600):
ingester = OutputIngester(database=self.database)
indexes = ingester.ingest(data=data)
return indexes
class FinalizatonStep(PipelineStep):
"""A step for closing pipeline"""
def __init__(self, output):
self.output = output
def execute(self, data):
"""Store indexes of db storage into JSON file"""
logger.info("Closing data ingestion pipeline.")
dump_storage_index(output=self.output, indexes=data)
class StoragePipeline(PipelineBase):
"""Pipeline class for parsing and ingesting DISCO simulation/analysis results"""
def run(self, data):
"""Execute pipeline steps in sequential order"""
step1 = InitializationStep()
valid_data = step1.execute(data=data)
step2 = OutputParsingStep(output=data["output"])
result = step2.execute(data=valid_data)
step3 = ResultIngestionStep(database=data["database"])
indexes = step3.execute(data=result)
step4 = FinalizatonStep(output=data["output"])
step4.execute(data=indexes)
logger.info(f"Done! Tables were ingested into database - {data['database']}") | 0.556761 | 0.314314 |
import sys
from wrap_smhi_api import *
from collections import defaultdict
start_year = 1961
end_year = 2018
margin = 100 # Allowed missed data points
min_no_of_data_points = (end_year - start_year + 1)*365.25 - 200
stations = get_stations()
all_lines = [] # Text lines to output
num_saved_stations = 0
for i, name in stations:
sys.stderr.write(str(i) + "; " + name + '\n')
data = list(get_data(i))
if len(data)>1:
sys.stderr.write("First data point: " + str(data[0]) + "\n")
sys.stderr.write("Last data point: " + str(data[-1]) + "\n")
sys.stderr.write("Number of data points: " + str(len(data)) + "\n")
(y1, m1, d1), t1 = data[0]
(y2, m2, d2), t2 = data[-1]
datapoints_per_year = len(data)//(y2-y1+1)
sys.stderr.write("Average data points per year: " + str(datapoints_per_year) + "\n")
# Skip stations with too few data points overall
if len(data) < min_no_of_data_points: continue
# Skip stations that start measuring later than start year
(y, m, d), t = data[0]
if y>start_year: continue
station_lines = [] # Text lines to output for this station
station_lines.append("{} {}".format(i, name))
for (y, m, d), t in data:
# Use only data points between start and end year, inclusive
if y>=start_year and y<=end_year:
station_lines.append("{} {} {} {}".format(y, m, d, t))
(y1, m1, d1, t1) = station_lines[1].split() # First data point
(y2, m2, d2, t2) = station_lines[-1].split() # Last data point
# Skip stations that don't start and end at the desired dates
if (y1, m1, d1)==(str(start_year), "1", "1") and (y2, m2, d2)==(str(end_year), "12", "31"):
# Skip stations with too few data points in the desired years
if len(station_lines)>min_no_of_data_points:
all_lines += station_lines
all_lines.append('')
num_saved_stations += 1
sys.stderr.write("Saved\n")
print('\n'.join(all_lines))
sys.stderr.write("Saved a total of " + str(num_saved_stations) + " stations") | for_developers/smhidata/fetchdata.py | import sys
from wrap_smhi_api import *
from collections import defaultdict
start_year = 1961
end_year = 2018
margin = 100 # Allowed missed data points
min_no_of_data_points = (end_year - start_year + 1)*365.25 - 200
stations = get_stations()
all_lines = [] # Text lines to output
num_saved_stations = 0
for i, name in stations:
sys.stderr.write(str(i) + "; " + name + '\n')
data = list(get_data(i))
if len(data)>1:
sys.stderr.write("First data point: " + str(data[0]) + "\n")
sys.stderr.write("Last data point: " + str(data[-1]) + "\n")
sys.stderr.write("Number of data points: " + str(len(data)) + "\n")
(y1, m1, d1), t1 = data[0]
(y2, m2, d2), t2 = data[-1]
datapoints_per_year = len(data)//(y2-y1+1)
sys.stderr.write("Average data points per year: " + str(datapoints_per_year) + "\n")
# Skip stations with too few data points overall
if len(data) < min_no_of_data_points: continue
# Skip stations that start measuring later than start year
(y, m, d), t = data[0]
if y>start_year: continue
station_lines = [] # Text lines to output for this station
station_lines.append("{} {}".format(i, name))
for (y, m, d), t in data:
# Use only data points between start and end year, inclusive
if y>=start_year and y<=end_year:
station_lines.append("{} {} {} {}".format(y, m, d, t))
(y1, m1, d1, t1) = station_lines[1].split() # First data point
(y2, m2, d2, t2) = station_lines[-1].split() # Last data point
# Skip stations that don't start and end at the desired dates
if (y1, m1, d1)==(str(start_year), "1", "1") and (y2, m2, d2)==(str(end_year), "12", "31"):
# Skip stations with too few data points in the desired years
if len(station_lines)>min_no_of_data_points:
all_lines += station_lines
all_lines.append('')
num_saved_stations += 1
sys.stderr.write("Saved\n")
print('\n'.join(all_lines))
sys.stderr.write("Saved a total of " + str(num_saved_stations) + " stations") | 0.202996 | 0.29419 |
from functools import wraps
from datetime import datetime, date
def _is_period_now(begin, end, **kwargs):
"""
Utility method. Determines if the current moment falls within the given
time period. Works with periods that fall over two days e.g. 10pm - 6am.
:param begin: time period begins in isoformat 'HH:MM:SS'
:param end: time period ends in isoformat 'HH:MM:SS'
:return: True if current moment falls between period_begins and period_ends
"""
# This makes the function testable as you can pass in your desired 'now'
# as a datetime.
if 'now' in kwargs:
now = kwargs.get('now')
else:
now = datetime.utcnow()
today = date.today().isoformat()
begins_today = datetime.fromisoformat(f'{today} {begin}')
ends_today = datetime.fromisoformat(f'{today} {end}')
return (begins_today > ends_today and
(now < ends_today or now > begins_today)) \
or (now > begins_today and now < ends_today)
def modify(timeout=0, begin='00:01:00', end='06:00:00'):
"""
Use to wrap Flask-Caching @cache.cached decorator. Modifies the cache
timeout during the specified period every day. At other times, the cache
timeout will revert to whatever is default or is specified on the
@cache.cached decorator. Example use case: set a longer timeout at night to
save unnecessary API calls when people are asleep.
Example::
@cache_timeout.modify(timeout=3600, # 1 hour
begin='22:00:00', # 10pm
end='06:00:00') # 6am
@cache.cached(timeout=60) # 1 minute
def myFunc():
# function body
:param timeout: cache timeout period in seconds
:param begin: time period begins in isoformat 'HH:MM:SS'
:param end: time period ends in isoformat 'HH:MM:SS'
:return: cached function with modified timeout
"""
def decorated_function(func):
@wraps(func)
def wrapper(*args, **kwargs):
if (_is_period_now(begin, end)):
func.cache_timeout = timeout
return func(*args, **kwargs)
return wrapper
return decorated_function | app/main/cache_timeout.py |
from functools import wraps
from datetime import datetime, date
def _is_period_now(begin, end, **kwargs):
"""
Utility method. Determines if the current moment falls within the given
time period. Works with periods that fall over two days e.g. 10pm - 6am.
:param begin: time period begins in isoformat 'HH:MM:SS'
:param end: time period ends in isoformat 'HH:MM:SS'
:return: True if current moment falls between period_begins and period_ends
"""
# This makes the function testable as you can pass in your desired 'now'
# as a datetime.
if 'now' in kwargs:
now = kwargs.get('now')
else:
now = datetime.utcnow()
today = date.today().isoformat()
begins_today = datetime.fromisoformat(f'{today} {begin}')
ends_today = datetime.fromisoformat(f'{today} {end}')
return (begins_today > ends_today and
(now < ends_today or now > begins_today)) \
or (now > begins_today and now < ends_today)
def modify(timeout=0, begin='00:01:00', end='06:00:00'):
"""
Use to wrap Flask-Caching @cache.cached decorator. Modifies the cache
timeout during the specified period every day. At other times, the cache
timeout will revert to whatever is default or is specified on the
@cache.cached decorator. Example use case: set a longer timeout at night to
save unnecessary API calls when people are asleep.
Example::
@cache_timeout.modify(timeout=3600, # 1 hour
begin='22:00:00', # 10pm
end='06:00:00') # 6am
@cache.cached(timeout=60) # 1 minute
def myFunc():
# function body
:param timeout: cache timeout period in seconds
:param begin: time period begins in isoformat 'HH:MM:SS'
:param end: time period ends in isoformat 'HH:MM:SS'
:return: cached function with modified timeout
"""
def decorated_function(func):
@wraps(func)
def wrapper(*args, **kwargs):
if (_is_period_now(begin, end)):
func.cache_timeout = timeout
return func(*args, **kwargs)
return wrapper
return decorated_function | 0.830766 | 0.529811 |
import sys
class HDL_Signal:
"""Holds all the info for an HDL signal."""
def __init__(self, name, msb, lsb, array_ind):
self.local_name = name.split('.')[-1]
self.hierarchy = name[0:-len(self.local_name + '.')]
self.lsb = lsb
self.msb = msb
self.array_ind = array_ind
self.isff = False
self.isinput = False
self.conn = []
self.vcd_symbol = None
self.tv_index = 0
self.visited = False
def width(self):
return (self.msb - self.lsb + 1)
def is_simulated(self):
if self.vcd_symbol:
return True
else:
return False
def basename(self):
return (self.hierarchy + '.' + self.local_name)
def fullname(self):
if self.array_ind is not None:
return (self.basename() + '[' + str(self.array_ind) + ']')
else:
return self.basename()
def sliced_fullname(self):
return self.fullname() + "[" + str(self.msb) + ":" + str(self.lsb) + "]"
def connections(self):
return self.conn
def get_sorted_time_values(self, vcd):
# Check signal was simulated
assert self.is_simulated()
# Get all time values
tvs = vcd[self.vcd_symbol]['tv']
# Sort time values
return sorted(tvs, key=lambda x: x[0])
def get_time_value_at_index(self, vcd, index):
# Check signal was simulated
assert self.is_simulated()
# Get time value at index
tv = vcd[self.vcd_symbol]['tv'][index]
return tv[0], tv[1]
def get_value_at_time(self, vcd, time):
# Check signal was simulated
assert self.is_simulated()
# Iterate over time values
curr_time = None
curr_value = None
for i in range(len(vcd[self.vcd_symbol]['tv'])):
curr_time = vcd[self.vcd_symbol]['tv'][i][0]
curr_value = vcd[self.vcd_symbol]['tv'][i][1]
if curr_time == time:
return curr_value
elif curr_time > time:
if i > 0:
return vcd[self.vcd_symbol]['tv'][i - 1][1]
else:
print("ERROR: no value for time (%d)" % time)
sys.exit(-1)
if curr_value:
return curr_value
else:
print("ERROR: no value for time (%d)" % time)
sys.exit(-1)
def get_time_value(self, vcd, time_limit):
# Check signal was simulated
assert self.is_simulated()
# Get all time values
tvs = vcd[self.vcd_symbol]['tv']
# Check if reached last index of time values
if (self.tv_index == len(tvs)):
return None, None
# Get current time value
current_tv = tvs[self.tv_index]
current_time = current_tv[0]
current_value = current_tv[1]
# Increment time value index and return value
if current_time <= time_limit:
self.tv_index += 1
return current_time, current_value
else:
return None, None
def add_conn(self, c):
self.conn.append(c)
def debug_print(self):
print("\tSignal: %s" % (self.fullname()))
print("\t\tHierarchy: %s" % (self.hierarchy))
print("\t\tLocal Name: %s" % (self.local_name))
print("\t\tLSB: %d" % (self.lsb))
print("\t\tMSB: %d" % (self.msb))
print("\t\tWidth: %d" % (self.width()))
print("\t\tIs Flip-Flop: %s" % (self.isff))
print("\t\tIs Input: %s" % (self.isinput))
print("\t\tVCD Symbol: %s" % (self.vcd_symbol))
print("\t\tTV Index: %d" % (self.tv_index))
def debug_print_wtvs(self, vcd):
self.debug_print()
if self.vcd_symbol:
tvs = vcd[self.vcd_symbol]['tv']
print(" Time Values (%d):" % len(tvs))
for tv in tvs:
print(" %4d -- %s" % (tv[0], tv[1])) | bomberman/hdl_signal.py |
import sys
class HDL_Signal:
"""Holds all the info for an HDL signal."""
def __init__(self, name, msb, lsb, array_ind):
self.local_name = name.split('.')[-1]
self.hierarchy = name[0:-len(self.local_name + '.')]
self.lsb = lsb
self.msb = msb
self.array_ind = array_ind
self.isff = False
self.isinput = False
self.conn = []
self.vcd_symbol = None
self.tv_index = 0
self.visited = False
def width(self):
return (self.msb - self.lsb + 1)
def is_simulated(self):
if self.vcd_symbol:
return True
else:
return False
def basename(self):
return (self.hierarchy + '.' + self.local_name)
def fullname(self):
if self.array_ind is not None:
return (self.basename() + '[' + str(self.array_ind) + ']')
else:
return self.basename()
def sliced_fullname(self):
return self.fullname() + "[" + str(self.msb) + ":" + str(self.lsb) + "]"
def connections(self):
return self.conn
def get_sorted_time_values(self, vcd):
# Check signal was simulated
assert self.is_simulated()
# Get all time values
tvs = vcd[self.vcd_symbol]['tv']
# Sort time values
return sorted(tvs, key=lambda x: x[0])
def get_time_value_at_index(self, vcd, index):
# Check signal was simulated
assert self.is_simulated()
# Get time value at index
tv = vcd[self.vcd_symbol]['tv'][index]
return tv[0], tv[1]
def get_value_at_time(self, vcd, time):
# Check signal was simulated
assert self.is_simulated()
# Iterate over time values
curr_time = None
curr_value = None
for i in range(len(vcd[self.vcd_symbol]['tv'])):
curr_time = vcd[self.vcd_symbol]['tv'][i][0]
curr_value = vcd[self.vcd_symbol]['tv'][i][1]
if curr_time == time:
return curr_value
elif curr_time > time:
if i > 0:
return vcd[self.vcd_symbol]['tv'][i - 1][1]
else:
print("ERROR: no value for time (%d)" % time)
sys.exit(-1)
if curr_value:
return curr_value
else:
print("ERROR: no value for time (%d)" % time)
sys.exit(-1)
def get_time_value(self, vcd, time_limit):
# Check signal was simulated
assert self.is_simulated()
# Get all time values
tvs = vcd[self.vcd_symbol]['tv']
# Check if reached last index of time values
if (self.tv_index == len(tvs)):
return None, None
# Get current time value
current_tv = tvs[self.tv_index]
current_time = current_tv[0]
current_value = current_tv[1]
# Increment time value index and return value
if current_time <= time_limit:
self.tv_index += 1
return current_time, current_value
else:
return None, None
def add_conn(self, c):
self.conn.append(c)
def debug_print(self):
print("\tSignal: %s" % (self.fullname()))
print("\t\tHierarchy: %s" % (self.hierarchy))
print("\t\tLocal Name: %s" % (self.local_name))
print("\t\tLSB: %d" % (self.lsb))
print("\t\tMSB: %d" % (self.msb))
print("\t\tWidth: %d" % (self.width()))
print("\t\tIs Flip-Flop: %s" % (self.isff))
print("\t\tIs Input: %s" % (self.isinput))
print("\t\tVCD Symbol: %s" % (self.vcd_symbol))
print("\t\tTV Index: %d" % (self.tv_index))
def debug_print_wtvs(self, vcd):
self.debug_print()
if self.vcd_symbol:
tvs = vcd[self.vcd_symbol]['tv']
print(" Time Values (%d):" % len(tvs))
for tv in tvs:
print(" %4d -- %s" % (tv[0], tv[1])) | 0.484624 | 0.35407 |
import numpy as np
import sys
import math
class kmean:
def __init__(self, data) -> None:
self.data = np.array(data)
self.p1set = []
self.p2set = []
def normalize(self, data):
norm = np.linalg.norm(data)
if norm == 0:
return data
return data/norm
def giveVAlue(self, data):
self.data = data
def initalizeRandom(self, kn):
self.point = np.random.random((kn, len(self.data[0])))
for i in range(len(self.data[0])):
self.point[:,i] = (self.point[:,i] * (np.max(self.data[:,i]))-np.min(self.data[:,i])) + np.min(self.data[:,i])
def assignk(self, first, data):
va = False
for i in range(len(data)):
pt = np.array(data[i])
if first:
min = sys.maxsize
else:
min = np.linalg.norm(self.point[self.assig[i]] - pt)
for y in range(len(self.point)):
k = self.point[y]
if np.linalg.norm(k - pt) < min:
va = True
min = np.linalg.norm(k - pt)
self.assig[i] = y
return va
def fit(self, kn):
self.initalizeRandom(kn)
move = True
self.assig = np.zeros((len(self.data)), int)
self.assignk(True, self.data)
while move:
for i in range(len(self.point)):
sum = np.zeros((len(self.data[0])))
num = 0
for y in range(len(self.assig)):
if i == self.assig[y]:
if(not math.isnan(np.sum(self.data[y]))):
sum += self.data[y]
num += 1
if num > 0:
self.point[i] = sum/num
move = self.assignk(False, self.data)
return self.assig
def predic(self, data):
data = np.array(data)
self.assig = np.zeros((len(data)), int)
self.assignk(True, data)
return self.assig | ex2function.py | import numpy as np
import sys
import math
class kmean:
def __init__(self, data) -> None:
self.data = np.array(data)
self.p1set = []
self.p2set = []
def normalize(self, data):
norm = np.linalg.norm(data)
if norm == 0:
return data
return data/norm
def giveVAlue(self, data):
self.data = data
def initalizeRandom(self, kn):
self.point = np.random.random((kn, len(self.data[0])))
for i in range(len(self.data[0])):
self.point[:,i] = (self.point[:,i] * (np.max(self.data[:,i]))-np.min(self.data[:,i])) + np.min(self.data[:,i])
def assignk(self, first, data):
va = False
for i in range(len(data)):
pt = np.array(data[i])
if first:
min = sys.maxsize
else:
min = np.linalg.norm(self.point[self.assig[i]] - pt)
for y in range(len(self.point)):
k = self.point[y]
if np.linalg.norm(k - pt) < min:
va = True
min = np.linalg.norm(k - pt)
self.assig[i] = y
return va
def fit(self, kn):
self.initalizeRandom(kn)
move = True
self.assig = np.zeros((len(self.data)), int)
self.assignk(True, self.data)
while move:
for i in range(len(self.point)):
sum = np.zeros((len(self.data[0])))
num = 0
for y in range(len(self.assig)):
if i == self.assig[y]:
if(not math.isnan(np.sum(self.data[y]))):
sum += self.data[y]
num += 1
if num > 0:
self.point[i] = sum/num
move = self.assignk(False, self.data)
return self.assig
def predic(self, data):
data = np.array(data)
self.assig = np.zeros((len(data)), int)
self.assignk(True, data)
return self.assig | 0.096211 | 0.415432 |
from wake.utils import *
import jshlib
class Store(object):
"""
(#SPC-arch.store): The default pkg and module storage boject.
Stores objects on the local filesystem.
"""
def __init__(self, base, store_dir):
self.store_dir = store_dir
self.defined = pjoin(self.store_dir, "pkgsDefined")
self.pkgs = pjoin(self.store_dir, "pkgs")
self.pkgs_local = path.join(base, ".wake", "pkgsLocal")
self.retrievals = path.join(base, ".wake", "retrieve")
def init_store(self):
os.makedirs(self.pkgs_local, exist_ok=True)
os.makedirs(self.defined, exist_ok=True)
os.makedirs(self.pkgs, exist_ok=True)
def remove_store(self):
rmtree(self.pkgs_local)
rmtree(self.defined)
rmtree(self.pkgs)
if os.path.exists(self.retrievals):
rmtree(self.retrievals)
def get_retrieval_dir(self):
rdir = self.retrievals
if os.path.exists(rdir):
rmtree(rdir)
copy_fsentry(
wakeConstantsPath,
path.join(rdir, DIR_WAKE, FILE_CONSTANTS),
)
return self.retrievals
def add_pkg(self, pkg_config, simple_pkg, local=False):
if local:
pcache = pjoin(self.pkgs_local, simple_pkg.pkg_ver)
else:
pcache = pjoin(self.pkgs, simple_pkg.pkg_ver)
if load_pkg_meta(pcache):
return
os.mkdir(pcache)
for fsentry_rel in simple_pkg.get_fsentries():
copy_fsentry(pkg_config.path_abs(fsentry_rel),
pjoin(pcache, fsentry_rel))
copy_fsentry(pkg_config.pkg_fingerprint, pcache)
meta = StoreMeta(state=S_DECLARED)
jsondumpf(pkg_meta_path(pcache), meta.to_dict())
def get_pkg_path(self, pkg_ver, def_okay=False):
pkg_str = str(pkg_ver)
pkgPath = pjoin(self.pkgs_local, pkg_str)
if load_pkg_meta(pkgPath):
return pkgPath
pkgPath = pjoin(self.pkgs, pkg_str)
if load_pkg_meta(pkgPath):
return pkgPath
if def_okay:
pkgPath = pjoin(self.defined, pkg_str)
if load_pkg_meta(pkgPath):
return pkgPath
return None
class StoreMeta(object):
def __init__(self, state):
self.state = state
@classmethod
def from_dict(cls, dct):
return cls(state=dct['state'], )
def to_dict(self):
return {
'state': self.state,
}
def load_pkg_meta(pcache_path):
if not os.path.exists(pcache_path):
return None
try:
dct = jsonloadf(pkg_meta_path(pcache_path))
return StoreMeta.from_dict(dct)
except (json.decoder.JSONDecodeError, KeyError):
# There was something at the path, but it was a partial pkg. It must be
# removed.
shutil.rmtree(pcache_path)
return None
def pkg_meta_path(pcache_path):
return path.join(pcache_path, DIR_WAKE, FILE_STORE_META) | oldwake/store.py |
from wake.utils import *
import jshlib
class Store(object):
"""
(#SPC-arch.store): The default pkg and module storage boject.
Stores objects on the local filesystem.
"""
def __init__(self, base, store_dir):
self.store_dir = store_dir
self.defined = pjoin(self.store_dir, "pkgsDefined")
self.pkgs = pjoin(self.store_dir, "pkgs")
self.pkgs_local = path.join(base, ".wake", "pkgsLocal")
self.retrievals = path.join(base, ".wake", "retrieve")
def init_store(self):
os.makedirs(self.pkgs_local, exist_ok=True)
os.makedirs(self.defined, exist_ok=True)
os.makedirs(self.pkgs, exist_ok=True)
def remove_store(self):
rmtree(self.pkgs_local)
rmtree(self.defined)
rmtree(self.pkgs)
if os.path.exists(self.retrievals):
rmtree(self.retrievals)
def get_retrieval_dir(self):
rdir = self.retrievals
if os.path.exists(rdir):
rmtree(rdir)
copy_fsentry(
wakeConstantsPath,
path.join(rdir, DIR_WAKE, FILE_CONSTANTS),
)
return self.retrievals
def add_pkg(self, pkg_config, simple_pkg, local=False):
if local:
pcache = pjoin(self.pkgs_local, simple_pkg.pkg_ver)
else:
pcache = pjoin(self.pkgs, simple_pkg.pkg_ver)
if load_pkg_meta(pcache):
return
os.mkdir(pcache)
for fsentry_rel in simple_pkg.get_fsentries():
copy_fsentry(pkg_config.path_abs(fsentry_rel),
pjoin(pcache, fsentry_rel))
copy_fsentry(pkg_config.pkg_fingerprint, pcache)
meta = StoreMeta(state=S_DECLARED)
jsondumpf(pkg_meta_path(pcache), meta.to_dict())
def get_pkg_path(self, pkg_ver, def_okay=False):
pkg_str = str(pkg_ver)
pkgPath = pjoin(self.pkgs_local, pkg_str)
if load_pkg_meta(pkgPath):
return pkgPath
pkgPath = pjoin(self.pkgs, pkg_str)
if load_pkg_meta(pkgPath):
return pkgPath
if def_okay:
pkgPath = pjoin(self.defined, pkg_str)
if load_pkg_meta(pkgPath):
return pkgPath
return None
class StoreMeta(object):
def __init__(self, state):
self.state = state
@classmethod
def from_dict(cls, dct):
return cls(state=dct['state'], )
def to_dict(self):
return {
'state': self.state,
}
def load_pkg_meta(pcache_path):
if not os.path.exists(pcache_path):
return None
try:
dct = jsonloadf(pkg_meta_path(pcache_path))
return StoreMeta.from_dict(dct)
except (json.decoder.JSONDecodeError, KeyError):
# There was something at the path, but it was a partial pkg. It must be
# removed.
shutil.rmtree(pcache_path)
return None
def pkg_meta_path(pcache_path):
return path.join(pcache_path, DIR_WAKE, FILE_STORE_META) | 0.430746 | 0.106505 |
from collections import defaultdict
from functools import lru_cache
from typing import Dict, List
import logging
from hardware.config import Config
from hardware.device import WrappedNode
from hardware.fdt import FdtParser
from hardware.memory import Region
def get_macro_str(macro: str) -> str:
''' Helper function that returns the appropriate C preprocessor line for a given macro '''
if macro is None:
return ''
if macro[0] == '!':
return '#ifndef ' + macro[1:]
return '#ifdef ' + macro
def get_endif(macro: str) -> str:
''' Helper function that returns the appropriate endif line for a given macro '''
if macro is None:
return ''
return '#endif /* {} */'.format(macro)
class KernelRegionGroup:
''' wraps a contiguous region of memory that is mapped into the kernel. '''
def __init__(self, region: Region, kernel_name: str, page_bits: int, max_size: int, condition_macro: str = None, user_ok: bool = False):
self.macro = condition_macro
self.desc = region.owner.path
self.kernel_offset = -1
self.page_bits = page_bits
self.labels = {} # dict of label => offset within region.
self.user_ok = user_ok
region.size = min(max_size, region.size)
aligned = region.align_size(page_bits)
self.size = aligned.size
self.base = aligned.base
self.regions = aligned.make_chunks(1 << page_bits)
self.labels[kernel_name] = region.base - aligned.base
def has_macro(self):
''' True if this group has a macro '''
return self.macro is not None
def take_labels(self, other_group: 'KernelRegionGroup'):
''' Take another group's labels and add them to our own '''
if self != other_group:
raise ValueError('need to have equal size and base to take labels')
for (k, v) in other_group.labels.items():
self.labels[k] = v
self.desc += ', ' + other_group.desc
def get_macro(self):
''' Get the #ifdef line for this region group '''
return get_macro_str(self.macro)
def get_endif(self):
''' Get the #endif line for this region group '''
return get_endif(self.macro)
def set_kernel_offset(self, offset):
''' Set the base offset that this region is mapped at in the kernel.
Returns the next free address in the kernel (i.e. base offset + region size) '''
self.kernel_offset = offset
return offset + self.size
def get_labelled_addresses(self):
''' Get a dict of address -> label for the kernel '''
ret = {}
for (k, v) in self.labels.items():
ret[v + self.kernel_offset] = k
return ret
def get_map_offset(self, reg):
''' Get the offset that the given region is mapped at. '''
index = self.regions.index(reg)
return self.kernel_offset + (index * (1 << self.page_bits))
def get_desc(self):
''' Get this region group's description '''
return self.desc
def __repr__(self):
return 'KernelRegion(reg={},labels={})'.format(self.regions, self.labels)
def __eq__(self, other):
return other.base == self.base and other.size == self.size
class KernelInterrupt:
''' Represents an interrupt that is used by the kernel. '''
def __init__(self, label: str, irq: int, prio: int = 0, sel_macro: str = None, false_irq: int = -1, enable_macro: str = None, desc: str = None):
self.label = label
self.irq = irq
self.prio = prio
self.sel_macro = sel_macro
self.false_irq = false_irq
self.enable_macro = enable_macro
self.desc = desc
def get_enable_macro_str(self):
''' Get the enable macro #ifdef line '''
return get_macro_str(self.enable_macro)
def has_enable(self):
''' True if this interrupt has an enable macro '''
return self.enable_macro is not None
def get_enable_endif(self):
''' Get the enable macro #endif line '''
return get_endif(self.enable_macro)
def get_sel_macro_str(self):
''' Get the select macro #ifdef line '''
return get_macro_str(self.sel_macro)
def has_sel(self):
''' True if this interrupt has a select macro '''
return self.sel_macro is not None
def get_sel_endif(self):
''' Get the select macro #endif line '''
return get_endif(self.sel_macro)
def __repr__(self):
return 'KernelInterrupt(label={},irq={},sel_macro={},false_irq={})'.format(self.label, self.irq, self.sel_macro, self.false_irq)
class DeviceRule:
''' Represents a single rule in hardware.yml '''
def __init__(self, rule: dict, config: Config):
self.rule = rule
self.regions: Dict[int, Dict] = {}
self.interrupts = rule.get('interrupts', {})
self.config = config
for reg in rule.get('regions', []):
self.regions[reg['index']] = reg
@lru_cache()
def get_regions(self, node: WrappedNode) -> List[KernelRegionGroup]:
''' Returns a list of KernelRegionGroups that this rule specifies should be mapped into the kernel for this device. '''
ret = []
regions = node.get_regions()
for (i, rule) in self.regions.items():
if i >= len(regions):
# XXX: skip this rule silently
continue
reg = regions[i]
kernel_name = rule['kernel']
user = rule.get('user', False)
macro = rule.get('macro', None)
max_size = 1 << self.config.get_page_bits()
if 'kernel_size' in rule:
max_size = rule['kernel_size']
elif max_size < reg.size:
logging.warning(
"Only mapping {}/{} bytes from node {}, region {}. Set kernel_size in YAML to silence.".format(max_size, reg.size, node.path, i))
ret.append(KernelRegionGroup(reg, kernel_name,
self.config.get_page_bits(), max_size, macro, user))
return ret
@lru_cache()
def get_interrupts(self, tree: FdtParser, node: WrappedNode) -> List[KernelInterrupt]:
''' Returns a list of KernelInterrupts that this rule says are used by the kernel for this device. '''
ret = []
interrupts = node.get_interrupts(tree)
for name, rule in self.interrupts.items():
irq_desc = '{} generated from {}'.format(name, node.path)
if type(rule) == dict:
en_macro = rule.get('enable_macro', None)
if rule['index'] >= len(interrupts):
# XXX: skip this rule silently.
continue
defaultIrq = interrupts[rule['index']]
sel_macro = rule.get('sel_macro', None)
falseIrq = interrupts[rule['undef_index']] if 'undef_index' in rule else -1
prio = rule.get('priority', 0)
irq = KernelInterrupt(name, defaultIrq, prio, sel_macro,
falseIrq, en_macro, desc=irq_desc)
elif type(rule) == int:
if rule >= len(interrupts):
# XXX: skip this rule silently.
continue
irq = KernelInterrupt(name, interrupts[rule], desc=irq_desc)
else: # rule == 'boot-cpu'
affinities = node.get_interrupt_affinities()
boot_cpu = tree.get_boot_cpu()
idx = affinities.index(boot_cpu)
irq = KernelInterrupt(name, interrupts[idx])
ret.append(irq)
return ret
class HardwareYaml:
''' Represents the hardware configuration file '''
def __init__(self, yaml: dict, config: Config):
self.rules = {}
for dev in yaml['devices']:
rule = DeviceRule(dev, config)
for compat in dev['compatible']:
self.rules[compat] = rule
def get_rule(self, device: WrappedNode) -> DeviceRule:
''' Returns the matching DeviceRule for this device. '''
if not device.has_prop('compatible'):
raise ValueError(
'Not sure what to do with node {} with no compatible!'.format(device.path))
for compat in device.get_prop('compatible').strings:
if compat in self.rules:
return self.rules[compat]
raise ValueError('Failed to match compatibles "{}" for node {}!'.format(
', '.join(device.get_prop('compatible').strings), device.path))
def get_matched_compatible(self, device: WrappedNode) -> str:
''' Returns the best matching compatible string for this device '''
if not device.has_prop('compatible'):
raise ValueError(
'Not sure what to do with node {} with no compatible!'.format(device.path))
for compat in device.get_prop('compatible').strings:
if compat in self.rules:
return compat
return None | kernel/tools/hardware/utils/rule.py |
from collections import defaultdict
from functools import lru_cache
from typing import Dict, List
import logging
from hardware.config import Config
from hardware.device import WrappedNode
from hardware.fdt import FdtParser
from hardware.memory import Region
def get_macro_str(macro: str) -> str:
''' Helper function that returns the appropriate C preprocessor line for a given macro '''
if macro is None:
return ''
if macro[0] == '!':
return '#ifndef ' + macro[1:]
return '#ifdef ' + macro
def get_endif(macro: str) -> str:
''' Helper function that returns the appropriate endif line for a given macro '''
if macro is None:
return ''
return '#endif /* {} */'.format(macro)
class KernelRegionGroup:
''' wraps a contiguous region of memory that is mapped into the kernel. '''
def __init__(self, region: Region, kernel_name: str, page_bits: int, max_size: int, condition_macro: str = None, user_ok: bool = False):
self.macro = condition_macro
self.desc = region.owner.path
self.kernel_offset = -1
self.page_bits = page_bits
self.labels = {} # dict of label => offset within region.
self.user_ok = user_ok
region.size = min(max_size, region.size)
aligned = region.align_size(page_bits)
self.size = aligned.size
self.base = aligned.base
self.regions = aligned.make_chunks(1 << page_bits)
self.labels[kernel_name] = region.base - aligned.base
def has_macro(self):
''' True if this group has a macro '''
return self.macro is not None
def take_labels(self, other_group: 'KernelRegionGroup'):
''' Take another group's labels and add them to our own '''
if self != other_group:
raise ValueError('need to have equal size and base to take labels')
for (k, v) in other_group.labels.items():
self.labels[k] = v
self.desc += ', ' + other_group.desc
def get_macro(self):
''' Get the #ifdef line for this region group '''
return get_macro_str(self.macro)
def get_endif(self):
''' Get the #endif line for this region group '''
return get_endif(self.macro)
def set_kernel_offset(self, offset):
''' Set the base offset that this region is mapped at in the kernel.
Returns the next free address in the kernel (i.e. base offset + region size) '''
self.kernel_offset = offset
return offset + self.size
def get_labelled_addresses(self):
''' Get a dict of address -> label for the kernel '''
ret = {}
for (k, v) in self.labels.items():
ret[v + self.kernel_offset] = k
return ret
def get_map_offset(self, reg):
''' Get the offset that the given region is mapped at. '''
index = self.regions.index(reg)
return self.kernel_offset + (index * (1 << self.page_bits))
def get_desc(self):
''' Get this region group's description '''
return self.desc
def __repr__(self):
return 'KernelRegion(reg={},labels={})'.format(self.regions, self.labels)
def __eq__(self, other):
return other.base == self.base and other.size == self.size
class KernelInterrupt:
''' Represents an interrupt that is used by the kernel. '''
def __init__(self, label: str, irq: int, prio: int = 0, sel_macro: str = None, false_irq: int = -1, enable_macro: str = None, desc: str = None):
self.label = label
self.irq = irq
self.prio = prio
self.sel_macro = sel_macro
self.false_irq = false_irq
self.enable_macro = enable_macro
self.desc = desc
def get_enable_macro_str(self):
''' Get the enable macro #ifdef line '''
return get_macro_str(self.enable_macro)
def has_enable(self):
''' True if this interrupt has an enable macro '''
return self.enable_macro is not None
def get_enable_endif(self):
''' Get the enable macro #endif line '''
return get_endif(self.enable_macro)
def get_sel_macro_str(self):
''' Get the select macro #ifdef line '''
return get_macro_str(self.sel_macro)
def has_sel(self):
''' True if this interrupt has a select macro '''
return self.sel_macro is not None
def get_sel_endif(self):
''' Get the select macro #endif line '''
return get_endif(self.sel_macro)
def __repr__(self):
return 'KernelInterrupt(label={},irq={},sel_macro={},false_irq={})'.format(self.label, self.irq, self.sel_macro, self.false_irq)
class DeviceRule:
''' Represents a single rule in hardware.yml '''
def __init__(self, rule: dict, config: Config):
self.rule = rule
self.regions: Dict[int, Dict] = {}
self.interrupts = rule.get('interrupts', {})
self.config = config
for reg in rule.get('regions', []):
self.regions[reg['index']] = reg
@lru_cache()
def get_regions(self, node: WrappedNode) -> List[KernelRegionGroup]:
''' Returns a list of KernelRegionGroups that this rule specifies should be mapped into the kernel for this device. '''
ret = []
regions = node.get_regions()
for (i, rule) in self.regions.items():
if i >= len(regions):
# XXX: skip this rule silently
continue
reg = regions[i]
kernel_name = rule['kernel']
user = rule.get('user', False)
macro = rule.get('macro', None)
max_size = 1 << self.config.get_page_bits()
if 'kernel_size' in rule:
max_size = rule['kernel_size']
elif max_size < reg.size:
logging.warning(
"Only mapping {}/{} bytes from node {}, region {}. Set kernel_size in YAML to silence.".format(max_size, reg.size, node.path, i))
ret.append(KernelRegionGroup(reg, kernel_name,
self.config.get_page_bits(), max_size, macro, user))
return ret
@lru_cache()
def get_interrupts(self, tree: FdtParser, node: WrappedNode) -> List[KernelInterrupt]:
''' Returns a list of KernelInterrupts that this rule says are used by the kernel for this device. '''
ret = []
interrupts = node.get_interrupts(tree)
for name, rule in self.interrupts.items():
irq_desc = '{} generated from {}'.format(name, node.path)
if type(rule) == dict:
en_macro = rule.get('enable_macro', None)
if rule['index'] >= len(interrupts):
# XXX: skip this rule silently.
continue
defaultIrq = interrupts[rule['index']]
sel_macro = rule.get('sel_macro', None)
falseIrq = interrupts[rule['undef_index']] if 'undef_index' in rule else -1
prio = rule.get('priority', 0)
irq = KernelInterrupt(name, defaultIrq, prio, sel_macro,
falseIrq, en_macro, desc=irq_desc)
elif type(rule) == int:
if rule >= len(interrupts):
# XXX: skip this rule silently.
continue
irq = KernelInterrupt(name, interrupts[rule], desc=irq_desc)
else: # rule == 'boot-cpu'
affinities = node.get_interrupt_affinities()
boot_cpu = tree.get_boot_cpu()
idx = affinities.index(boot_cpu)
irq = KernelInterrupt(name, interrupts[idx])
ret.append(irq)
return ret
class HardwareYaml:
''' Represents the hardware configuration file '''
def __init__(self, yaml: dict, config: Config):
self.rules = {}
for dev in yaml['devices']:
rule = DeviceRule(dev, config)
for compat in dev['compatible']:
self.rules[compat] = rule
def get_rule(self, device: WrappedNode) -> DeviceRule:
''' Returns the matching DeviceRule for this device. '''
if not device.has_prop('compatible'):
raise ValueError(
'Not sure what to do with node {} with no compatible!'.format(device.path))
for compat in device.get_prop('compatible').strings:
if compat in self.rules:
return self.rules[compat]
raise ValueError('Failed to match compatibles "{}" for node {}!'.format(
', '.join(device.get_prop('compatible').strings), device.path))
def get_matched_compatible(self, device: WrappedNode) -> str:
''' Returns the best matching compatible string for this device '''
if not device.has_prop('compatible'):
raise ValueError(
'Not sure what to do with node {} with no compatible!'.format(device.path))
for compat in device.get_prop('compatible').strings:
if compat in self.rules:
return compat
return None | 0.782829 | 0.205994 |
import randmac
import math
import random
FIELDSIZE = 600
uelist = []
class ESP:
def __init__(self,name,outputmode,espnum):
self.name = name
self.pos = (random.randint(16, FIELDSIZE - 16),random.randint(16, FIELDSIZE - 16))
self.outputmode = outputmode
print("Created ESP #{} name: {}, pos: {}".format(name[3:],name,self.pos))
def sniff(self):
returnable = []
for ue in uelist:
uepos = ue.get_pos()
uedist = self.get_distance(uepos)
if uedist < 30:
uename, uemac = ue.get_identifiers()
returnable.append({
"uename": uename,
"macaddr": uemac,
"rssi": round(self.get_RSSI(uedist),3),
"dist": round(self.get_distance(ue.get_pos()),3),
"pos": ue.get_pos()})
return returnable
def get_distance(self, uepos):
return ((uepos[0]-self.pos[0])**2 + (uepos[1]-self.pos[1])**2)**0.5
def get_RSSI(self,distance):
if distance < 25:
"""
distance -> rssi conversion found here:
https://www.ijcaonline.org/research/volume137/number13/jayakody-2016-ijca-909028.pdf
-(10n*log(d)+A) where:
n is path-loss exponent (usually between 1.7-3.0 for indoor spaces
d is distance
A is RSSI value at 1 meter reference distance -- estimated to be 50
from this experiment:
https://github.com/neXenio/BLE-Indoor-Positioning/wiki/RSSI-Measurements
"""
return -((10*(random.uniform(1.7,2))*math.log(distance,10))+(50))
else:
return 80.001
class UE:
def __init__(self, name):
self.name = name
self.macaddr = randmac.RandMac("00:00:00:00:00:00")
self.pos = (random.randint(0,FIELDSIZE),random.randint(0,FIELDSIZE))
self.velocity = random.randint(-5,5)
self.direction = (random.randint(-1,1),random.randint(-1,1))
uelist.append(self)
def get_pos(self):
return self.pos
def get_name(self):
return self.name
def update(self):
if not self.check_bounds():
self.velocity += random.randint(-2,2)
self.direction = (self.direction[0]-random.randint(-1,1),self.direction[1]-random.randint(-1,1))
self.pos = (self.pos[0] + self.velocity*self.direction[0],self.pos[1]+self.velocity*self.direction[1])
def get_identifiers(self):
return self.name, str(self.macaddr)
def check_bounds(self):
if (self.pos[0] < 5) or (self.pos[0] > FIELDSIZE-5) or (self.pos[1] < 5) or (self.pos[1] > FIELDSIZE-5):
self.velocity = 5
self.direction = (0-self.direction[0],0-self.direction[1])
return False
else:
return True | models.py | import randmac
import math
import random
FIELDSIZE = 600
uelist = []
class ESP:
def __init__(self,name,outputmode,espnum):
self.name = name
self.pos = (random.randint(16, FIELDSIZE - 16),random.randint(16, FIELDSIZE - 16))
self.outputmode = outputmode
print("Created ESP #{} name: {}, pos: {}".format(name[3:],name,self.pos))
def sniff(self):
returnable = []
for ue in uelist:
uepos = ue.get_pos()
uedist = self.get_distance(uepos)
if uedist < 30:
uename, uemac = ue.get_identifiers()
returnable.append({
"uename": uename,
"macaddr": uemac,
"rssi": round(self.get_RSSI(uedist),3),
"dist": round(self.get_distance(ue.get_pos()),3),
"pos": ue.get_pos()})
return returnable
def get_distance(self, uepos):
return ((uepos[0]-self.pos[0])**2 + (uepos[1]-self.pos[1])**2)**0.5
def get_RSSI(self,distance):
if distance < 25:
"""
distance -> rssi conversion found here:
https://www.ijcaonline.org/research/volume137/number13/jayakody-2016-ijca-909028.pdf
-(10n*log(d)+A) where:
n is path-loss exponent (usually between 1.7-3.0 for indoor spaces
d is distance
A is RSSI value at 1 meter reference distance -- estimated to be 50
from this experiment:
https://github.com/neXenio/BLE-Indoor-Positioning/wiki/RSSI-Measurements
"""
return -((10*(random.uniform(1.7,2))*math.log(distance,10))+(50))
else:
return 80.001
class UE:
def __init__(self, name):
self.name = name
self.macaddr = randmac.RandMac("00:00:00:00:00:00")
self.pos = (random.randint(0,FIELDSIZE),random.randint(0,FIELDSIZE))
self.velocity = random.randint(-5,5)
self.direction = (random.randint(-1,1),random.randint(-1,1))
uelist.append(self)
def get_pos(self):
return self.pos
def get_name(self):
return self.name
def update(self):
if not self.check_bounds():
self.velocity += random.randint(-2,2)
self.direction = (self.direction[0]-random.randint(-1,1),self.direction[1]-random.randint(-1,1))
self.pos = (self.pos[0] + self.velocity*self.direction[0],self.pos[1]+self.velocity*self.direction[1])
def get_identifiers(self):
return self.name, str(self.macaddr)
def check_bounds(self):
if (self.pos[0] < 5) or (self.pos[0] > FIELDSIZE-5) or (self.pos[1] < 5) or (self.pos[1] > FIELDSIZE-5):
self.velocity = 5
self.direction = (0-self.direction[0],0-self.direction[1])
return False
else:
return True | 0.394201 | 0.304778 |
import pytest
import time
from datetime import datetime, timezone
from coinflow.protocol.structs import *
def test_varint():
uint8_t = Varint(0x10)
uint16_t = Varint(0x1000)
uint32_t = Varint(0x10000000)
uint64_t = Varint(0x1000000000000000)
assert Varint.decode(uint8_t.encode()) == (0x10, 1)
assert Varint.decode(uint16_t.encode()) == (0x1000, 3)
assert Varint.decode(uint32_t.encode()) == (0x10000000, 5)
assert Varint.decode(uint64_t.encode()) == (0x1000000000000000, 7)
def test_varstr():
short = 'Hello world'
rlong = '''
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis dignissim ante id auctor ultrices. Vestibulum vitae nisl nisi. Morbi et pretium elit. Suspendisse eget cursus eros, vitae convallis massa. Suspendisse ullamcorper neque a lacus consectetur, at lobortis dui feugiat. Maecenas sit amet justo finibus, vehicula justo at, porttitor lacus. Integer mollis faucibus urna eu interdum. Maecenas aliquam dolor a eleifend ullamcorper. Nulla imperdiet ipsum eu posuere sagittis. Integer eu ultricies risus, eu faucibus eros. In ac nibh vitae quam varius dictum. Interdum et malesuada fames ac ante ipsum primis in faucibus. In nunc libero, lobortis non metus ac, lobortis tempor lacus. In justo nibh, pretium vel mollis eget, semper at nibh. Pellentesque ac elit metus. Nullam lorem turpis, congue vel sapien et, porta varius lacus.
Sed sed erat at turpis lobortis elementum quis vitae felis. In vitae lacinia lorem. Aenean pulvinar nisl velit, in lobortis justo pellentesque et. Aenean et tellus ac ligula mattis condimentum. Vestibulum efficitur tristique enim, vel rhoncus diam. Aliquam varius gravida augue. Sed suscipit elit in porta consectetur. Morbi blandit viverra consectetur. Fusce congue massa neque. In lobortis est sed congue fermentum.
Suspendisse tristique dui pharetra leo laoreet sodales. Sed scelerisque est orci. Mauris eget diam viverra mi lobortis pulvinar eget nec urna. Quisque tellus leo, ornare quis nisi sed, consequat mollis ligula. Ut porta sapien sed tellus gravida dictum. Proin rutrum tortor sed lacus fermentum, et mollis diam dictum. Pellentesque pulvinar sed nunc et sagittis. Suspendisse fringilla tortor vitae arcu euismod ullamcorper.
Fusce aliquam lectus nibh, non facilisis arcu scelerisque sit amet. Aenean a ante nunc. Integer et ligula a lacus tempus consequat. Sed finibus, neque ut aliquam rutrum, risus tellus euismod magna, eget iaculis sem tellus at elit. Pellentesque condimentum faucibus metus in euismod. Ut porttitor malesuada mi. Nulla eu urna rutrum, vehicula elit vitae, fringilla nulla. Donec dictum facilisis aliquam. Nulla facilisi. Vestibulum vel dolor quis eros eleifend vestibulum in non odio. Aliquam luctus sapien est, sit amet tempus libero imperdiet dignissim. Suspendisse pellentesque enim a diam consectetur, eget euismod risus suscipit. Morbi tempor ex erat, vitae auctor erat cursus a. Nam vitae tincidunt ipsum.
Vestibulum nec leo justo. Nulla ornare efficitur neque, vitae tincidunt justo ultricies id. Fusce congue sapien eu est molestie sodales. Donec facilisis est at augue vehicula blandit. Quisque blandit felis iaculis, vestibulum nisi non, dignissim urna. Quisque ut viverra lorem. Etiam quis elit enim. Curabitur vitae fringilla sapien, eu accumsan metus. Nunc et turpis nec massa maximus scelerisque. Mauris a vestibulum quam. Fusce sit amet leo non urna hendrerit rhoncus eu eget odio. Aliquam vel volutpat magna, nec laoreet tellus. Nullam eros lacus, placerat nec tempus eu, dictum non ipsum. Vestibulum nec rutrum dolor. Donec congue augue purus, in iaculis mauris malesuada sed. Aliquam erat volutpat.
'''
short_enc = Varstr(short)
rlong_enc = Varstr(rlong)
assert Varstr.decode(short_enc.encode()) == (short, Varint(len(short_enc)))
assert Varstr.decode(rlong_enc.encode()) == (rlong, Varint(len(rlong_enc)))
def test_netaddr():
na = Netaddr('127.0.0.1', 8333, 0)
na_c = Netaddr.from_raw(na.encode())
with pytest.raises(TypeError):
na.ip = '192.168.1.1'
with pytest.raises(TypeError):
na.port = '22'
with pytest.raises(TypeError):
na.services = '11'
assert na_c == na
def test_timestamp():
enc_ts = Timestamp(2017, 1, 1, 10, 0, 0)
assert Timestamp.from_raw(enc_ts.encode()) == enc_ts | tests/test_structs.py | import pytest
import time
from datetime import datetime, timezone
from coinflow.protocol.structs import *
def test_varint():
uint8_t = Varint(0x10)
uint16_t = Varint(0x1000)
uint32_t = Varint(0x10000000)
uint64_t = Varint(0x1000000000000000)
assert Varint.decode(uint8_t.encode()) == (0x10, 1)
assert Varint.decode(uint16_t.encode()) == (0x1000, 3)
assert Varint.decode(uint32_t.encode()) == (0x10000000, 5)
assert Varint.decode(uint64_t.encode()) == (0x1000000000000000, 7)
def test_varstr():
short = 'Hello world'
rlong = '''
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis dignissim ante id auctor ultrices. Vestibulum vitae nisl nisi. Morbi et pretium elit. Suspendisse eget cursus eros, vitae convallis massa. Suspendisse ullamcorper neque a lacus consectetur, at lobortis dui feugiat. Maecenas sit amet justo finibus, vehicula justo at, porttitor lacus. Integer mollis faucibus urna eu interdum. Maecenas aliquam dolor a eleifend ullamcorper. Nulla imperdiet ipsum eu posuere sagittis. Integer eu ultricies risus, eu faucibus eros. In ac nibh vitae quam varius dictum. Interdum et malesuada fames ac ante ipsum primis in faucibus. In nunc libero, lobortis non metus ac, lobortis tempor lacus. In justo nibh, pretium vel mollis eget, semper at nibh. Pellentesque ac elit metus. Nullam lorem turpis, congue vel sapien et, porta varius lacus.
Sed sed erat at turpis lobortis elementum quis vitae felis. In vitae lacinia lorem. Aenean pulvinar nisl velit, in lobortis justo pellentesque et. Aenean et tellus ac ligula mattis condimentum. Vestibulum efficitur tristique enim, vel rhoncus diam. Aliquam varius gravida augue. Sed suscipit elit in porta consectetur. Morbi blandit viverra consectetur. Fusce congue massa neque. In lobortis est sed congue fermentum.
Suspendisse tristique dui pharetra leo laoreet sodales. Sed scelerisque est orci. Mauris eget diam viverra mi lobortis pulvinar eget nec urna. Quisque tellus leo, ornare quis nisi sed, consequat mollis ligula. Ut porta sapien sed tellus gravida dictum. Proin rutrum tortor sed lacus fermentum, et mollis diam dictum. Pellentesque pulvinar sed nunc et sagittis. Suspendisse fringilla tortor vitae arcu euismod ullamcorper.
Fusce aliquam lectus nibh, non facilisis arcu scelerisque sit amet. Aenean a ante nunc. Integer et ligula a lacus tempus consequat. Sed finibus, neque ut aliquam rutrum, risus tellus euismod magna, eget iaculis sem tellus at elit. Pellentesque condimentum faucibus metus in euismod. Ut porttitor malesuada mi. Nulla eu urna rutrum, vehicula elit vitae, fringilla nulla. Donec dictum facilisis aliquam. Nulla facilisi. Vestibulum vel dolor quis eros eleifend vestibulum in non odio. Aliquam luctus sapien est, sit amet tempus libero imperdiet dignissim. Suspendisse pellentesque enim a diam consectetur, eget euismod risus suscipit. Morbi tempor ex erat, vitae auctor erat cursus a. Nam vitae tincidunt ipsum.
Vestibulum nec leo justo. Nulla ornare efficitur neque, vitae tincidunt justo ultricies id. Fusce congue sapien eu est molestie sodales. Donec facilisis est at augue vehicula blandit. Quisque blandit felis iaculis, vestibulum nisi non, dignissim urna. Quisque ut viverra lorem. Etiam quis elit enim. Curabitur vitae fringilla sapien, eu accumsan metus. Nunc et turpis nec massa maximus scelerisque. Mauris a vestibulum quam. Fusce sit amet leo non urna hendrerit rhoncus eu eget odio. Aliquam vel volutpat magna, nec laoreet tellus. Nullam eros lacus, placerat nec tempus eu, dictum non ipsum. Vestibulum nec rutrum dolor. Donec congue augue purus, in iaculis mauris malesuada sed. Aliquam erat volutpat.
'''
short_enc = Varstr(short)
rlong_enc = Varstr(rlong)
assert Varstr.decode(short_enc.encode()) == (short, Varint(len(short_enc)))
assert Varstr.decode(rlong_enc.encode()) == (rlong, Varint(len(rlong_enc)))
def test_netaddr():
na = Netaddr('127.0.0.1', 8333, 0)
na_c = Netaddr.from_raw(na.encode())
with pytest.raises(TypeError):
na.ip = '192.168.1.1'
with pytest.raises(TypeError):
na.port = '22'
with pytest.raises(TypeError):
na.services = '11'
assert na_c == na
def test_timestamp():
enc_ts = Timestamp(2017, 1, 1, 10, 0, 0)
assert Timestamp.from_raw(enc_ts.encode()) == enc_ts | 0.336113 | 0.446736 |
import pytest
@pytest.fixture
def toml_adfh() -> str:
"""Fixture for ADFH in toml."""
return """
[adfh]
version = "1.0"
[[adfh.extra]]
type = "metadata"
name = "name"
value = "todo"
tags = ["oas"]
[[adfh.extra]]
type = "metadata"
name = "title"
value = "ToDo API"
tags = ["oas", "user"]
[[adfh.fields]] # It the same as fields in class, database, Rest API, Graphql, etc....
id = "my awesome id" # this id only exist in ADFH file, make unique
name = "id"
type = "unique id"
mandatory = "no"
text = "A unique for a todo"
[[adfh.fields]]
id = "my awesome title" # this id only exist in ADFH file, make unique
name = "title"
type = "text"
mandatory = "yes"
text = "The title of a todo"
[[adfh.fields]]
id = "my second awesome is done" # this id only exist in ADFH file, make unique
name = "is_done"
type = "checkbox"
mandatory = "no"
[[adfh.fields]]
id = "my awesome status" # this id only exist in ADFH file, make unique
name = "status"
type = "choice"
options = [ "rejected", "approved", "deny",]
mandatory = "no"
[[adfh.fields]]
id = "my awesome view" # this id only exist in ADFH file, make unique
name = "view"
type = "number"
mandatory = "no"
text = "count number of poeple that saw the todo"
[[adfh.models]] # It the same as recored, class, database model, etc....
id = "my awesome Item" # this id only exist in ADFH file, make unique
name = "Item"
text = "A todo item"
[[adfh.models.properties]] # It the same as class, rest api fields, etc....
model = "my awesome Item" # Telling that this property is blong to a model X.
assign = "my awesome id" # Assign the field for the model from adfh.fields.
[[adfh.models.properties]]
model = "my awesome Item"
assign = "my awesome title"
[[adfh.models.properties]]
model = "my awesome Item"
assign = "my awesome status"
[[adfh.models]] # It the same as recored, class, database model, etc....
id = "my awesome Company" # this id only exist in ADFH file, make unique
name = "Company"
text = "A Company"
[[adfh.models.properties]] # It the same as class, rest api fields, etc....
model = "my awesome Company"
assign = "my second awesome is done"
[[adfh.models.properties]]
model = "my awesome Company"
assign = "my awesome view"
[[adfh.actions]] # Operations
id = "my awesome list of todo" # this id only exist in ADFH file, make unique
name = "todoList"
type = "show me list" # Telling what kind of operation.
model = "my awesome Item" # Telling that this property is blong to a model X.
text = "list todo items"
tags = ["oas"]
[[adfh.actions]]
id = "my awesome list of company" # this id only exist in ADFH file, make unique
name = "CompanyList"
type = "show me list"
model = "my awesome Company"
text = "list of Company"
tags = ["oas"]
[[adfh.actions]]
id = "my awesome todo" # this id only exist in ADFH file, make unique
name = "todoDetail"
type = "show me a certain item"
model = "my awesome Item"
subject = "my awesome id" # Telling to search with this field or lookup by that field. Note the field should be inside the model # noqa B950
text = "show the detail of a todo."
tags = ["oas"]
[[adfh.actions]]
id = "my awesome add todo" # this id only exist in ADFH file, make unique
name = "todoAdd"
type = "let me add"
model = "my awesome Item"
text = "creating new todo."
tags = ["oas"]
[[adfh.actions.input]]
action = "my awesome add todo"
assign = "my awesome title"
[[adfh.actions]]
id = "my awesome remove company" # this id only exist in ADFH file, make unique
name = "companyRemove"
type = "let me remove"
model = "my awesome Company"
subject = "my awesome id"
text = "removing a company."
tags = ["oas"]
"""
@pytest.fixture
def yaml_adfh() -> str:
"""Fixture for ADFH in yaml."""
return """
adfh:
actions:
- id: my awesome list of todo
model: my awesome Item
name: todoList
tags:
- oas
text: list todo items
type: show me list
- id: my awesome list of company
model: my awesome Company
name: CompanyList
tags:
- oas
text: list of Company
type: show me list
- id: my awesome todo
model: my awesome Item
name: todoDetail
subject: my awesome id
tags:
- oas
text: show the detail of a todo.
type: show me a certain item
- id: my awesome add todo
input:
- action: my awesome add todo
assign: my awesome title
model: my awesome Item
name: todoAdd
tags:
- oas
text: creating new todo.
type: let me add
- id: my awesome remove company
model: my awesome Company
name: companyRemove
subject: my awesome id
tags:
- oas
text: removing a company.
type: let me remove
extra:
- name: name
tags:
- oas
type: metadata
value: todo
- name: title
tags:
- oas
- user
type: metadata
value: ToDo API
fields:
- id: my awesome id
mandatory: 'no'
name: id
text: A unique for a todo
type: unique id
- id: my awesome title
mandatory: 'yes'
name: title
text: The title of a todo
type: text
- id: my second awesome is done
mandatory: 'no'
name: is_done
type: checkbox
- id: my awesome status
mandatory: 'no'
name: status
options:
- rejected
- approved
- deny
type: choice
- id: my awesome view
mandatory: 'no'
name: view
text: count number of poeple that saw the todo
type: number
models:
- id: my awesome Item
name: Item
properties:
- assign: my awesome id
model: my awesome Item
- assign: my awesome title
model: my awesome Item
- assign: my awesome status
model: my awesome Item
text: A todo item
- id: my awesome Company
name: Company
properties:
- assign: my second awesome is done
model: my awesome Company
- assign: my awesome view
model: my awesome Company
text: A Company
version: '1.0'
"""
@pytest.fixture
def toml_adfh_without_choice() -> str:
"""Fixture for ADFH without choice in toml."""
return """
[adfh]
version = "1.0"
[[adfh.extra]]
type = "metadata"
name = "name"
value = "todo"
tags = ["oas"]
[[adfh.extra]]
type = "metadata"
name = "title"
value = "ToDo API"
tags = ["oas", "user"]
[[adfh.fields]] # It the same as fields in class, database, Rest API, Graphql, etc....
id = "my awesome id" # this id only exist in ADFH file, make unique
name = "id"
type = "unique id"
mandatory = "no"
text = "A unique for a todo"
[[adfh.fields]]
id = "my awesome title" # this id only exist in ADFH file, make unique
name = "title"
type = "text"
mandatory = "yes"
text = "The title of a todo"
[[adfh.fields]]
id = "my second awesome is done" # this id only exist in ADFH file, make unique
name = "is_done"
type = "checkbox"
mandatory = "no"
[[adfh.fields]]
id = "my awesome view" # this id only exist in ADFH file, make unique
name = "view"
type = "number"
mandatory = "no"
text = "count number of poeple that saw the todo"
[[adfh.models]] # It the same as recored, class, database model, etc....
id = "my awesome Item" # this id only exist in ADFH file, make unique
name = "Item"
text = "A todo item"
[[adfh.models.properties]] # It the same as class, rest api fields, etc....
model = "my awesome Item" # Telling that this property is blong to a model X.
assign = "my awesome id" # Assign the field for the model from adfh.fields.
[[adfh.models.properties]]
model = "my awesome Item"
assign = "my awesome title"
[[adfh.models]] # It the same as recored, class, database model, etc....
id = "my awesome Company" # this id only exist in ADFH file, make unique
name = "Company"
text = "A Company"
[[adfh.models.properties]] # It the same as class, rest api fields, etc....
model = "my awesome Company"
assign = "my second awesome is done"
[[adfh.models.properties]]
model = "my awesome Company"
assign = "my awesome view"
[[adfh.actions]] # Operations
id = "my awesome list of todo" # this id only exist in ADFH file, make unique
name = "todoList"
type = "show me list" # Telling what kind of operation.
model = "my awesome Item" # Telling that this property is blong to a model X.
text = "list todo items"
tags = ["oas"]
[[adfh.actions]]
id = "my awesome list of company" # this id only exist in ADFH file, make unique
name = "CompanyList"
type = "show me list"
model = "my awesome Company"
text = "list of Company"
tags = ["oas"]
[[adfh.actions]]
id = "my awesome todo" # this id only exist in ADFH file, make unique
name = "todoDetail"
type = "show me a certain item"
model = "my awesome Item"
subject = "my awesome id" # Telling to search with this field or lookup by that field. Note the field should be inside the model # noqa B950
text = "show the detail of a todo."
tags = ["oas"]
[[adfh.actions]]
id = "my awesome add todo" # this id only exist in ADFH file, make unique
name = "todoAdd"
type = "let me add"
model = "my awesome Item"
text = "creating new todo."
tags = ["oas"]
[[adfh.actions.input]]
action = "my awesome add todo"
assign = "my awesome title"
[[adfh.actions]]
id = "my awesome remove company" # this id only exist in ADFH file, make unique
name = "companyRemove"
type = "let me remove"
model = "my awesome Company"
subject = "my awesome id"
text = "removing a company."
tags = ["oas"]
"""
@pytest.fixture
def toml_adfh_with_choice_and_without_options() -> str:
"""Fixture for ADFH in toml."""
return """
[adfh]
version = "1.0"
[[adfh.extra]]
type = "metadata"
name = "name"
value = "todo"
tags = ["oas"]
[[adfh.extra]]
type = "metadata"
name = "title"
value = "ToDo API"
tags = ["oas", "user"]
[[adfh.fields]] # It the same as fields in class, database, Rest API, Graphql, etc....
id = "my awesome id" # this id only exist in ADFH file, make unique
name = "id"
type = "unique id"
mandatory = "no"
text = "A unique for a todo"
[[adfh.fields]]
id = "my awesome title" # this id only exist in ADFH file, make unique
name = "title"
type = "text"
mandatory = "yes"
text = "The title of a todo"
[[adfh.fields]]
id = "my second awesome is done" # this id only exist in ADFH file, make unique
name = "is_done"
type = "checkbox"
mandatory = "no"
[[adfh.fields]]
id = "my awesome status" # this id only exist in ADFH file, make unique
name = "status"
type = "choice"
options = []
mandatory = "no"
[[adfh.fields]]
id = "my awesome view" # this id only exist in ADFH file, make unique
name = "view"
type = "number"
mandatory = "no"
text = "count number of poeple that saw the todo"
[[adfh.models]] # It the same as recored, class, database model, etc....
id = "my awesome Item" # this id only exist in ADFH file, make unique
name = "Item"
text = "A todo item"
[[adfh.models.properties]] # It the same as class, rest api fields, etc....
model = "my awesome Item" # Telling that this property is blong to a model X.
assign = "my awesome id" # Assign the field for the model from adfh.fields.
[[adfh.models.properties]]
model = "my awesome Item"
assign = "my awesome title"
[[adfh.models.properties]]
model = "my awesome Item"
assign = "my awesome status"
[[adfh.models]] # It the same as recored, class, database model, etc....
id = "my awesome Company" # this id only exist in ADFH file, make unique
name = "Company"
text = "A Company"
[[adfh.models.properties]] # It the same as class, rest api fields, etc....
model = "my awesome Company"
assign = "my second awesome is done"
[[adfh.models.properties]]
model = "my awesome Company"
assign = "my awesome view"
[[adfh.actions]] # Operations
id = "my awesome list of todo" # this id only exist in ADFH file, make unique
name = "todoList"
type = "show me list" # Telling what kind of operation.
model = "my awesome Item" # Telling that this property is blong to a model X.
text = "list todo items"
tags = ["oas"]
[[adfh.actions]]
id = "my awesome list of company" # this id only exist in ADFH file, make unique
name = "CompanyList"
type = "show me list"
model = "my awesome Company"
text = "list of Company"
tags = ["oas"]
[[adfh.actions]]
id = "my awesome todo" # this id only exist in ADFH file, make unique
name = "todoDetail"
type = "show me a certain item"
model = "my awesome Item"
subject = "my awesome id" # Telling to search with this field or lookup by that field. Note the field should be inside the model # noqa B950
text = "show the detail of a todo."
tags = ["oas"]
[[adfh.actions]]
id = "my awesome add todo" # this id only exist in ADFH file, make unique
name = "todoAdd"
type = "let me add"
model = "my awesome Item"
text = "creating new todo."
tags = ["oas"]
[[adfh.actions.input]]
action = "my awesome add todo"
assign = "my awesome title"
[[adfh.actions]]
id = "my awesome remove company" # this id only exist in ADFH file, make unique
name = "companyRemove"
type = "let me remove"
model = "my awesome Company"
subject = "my awesome id"
text = "removing a company."
tags = ["oas"]
""" | tests/conftest.py | import pytest
@pytest.fixture
def toml_adfh() -> str:
"""Fixture for ADFH in toml."""
return """
[adfh]
version = "1.0"
[[adfh.extra]]
type = "metadata"
name = "name"
value = "todo"
tags = ["oas"]
[[adfh.extra]]
type = "metadata"
name = "title"
value = "ToDo API"
tags = ["oas", "user"]
[[adfh.fields]] # It the same as fields in class, database, Rest API, Graphql, etc....
id = "my awesome id" # this id only exist in ADFH file, make unique
name = "id"
type = "unique id"
mandatory = "no"
text = "A unique for a todo"
[[adfh.fields]]
id = "my awesome title" # this id only exist in ADFH file, make unique
name = "title"
type = "text"
mandatory = "yes"
text = "The title of a todo"
[[adfh.fields]]
id = "my second awesome is done" # this id only exist in ADFH file, make unique
name = "is_done"
type = "checkbox"
mandatory = "no"
[[adfh.fields]]
id = "my awesome status" # this id only exist in ADFH file, make unique
name = "status"
type = "choice"
options = [ "rejected", "approved", "deny",]
mandatory = "no"
[[adfh.fields]]
id = "my awesome view" # this id only exist in ADFH file, make unique
name = "view"
type = "number"
mandatory = "no"
text = "count number of poeple that saw the todo"
[[adfh.models]] # It the same as recored, class, database model, etc....
id = "my awesome Item" # this id only exist in ADFH file, make unique
name = "Item"
text = "A todo item"
[[adfh.models.properties]] # It the same as class, rest api fields, etc....
model = "my awesome Item" # Telling that this property is blong to a model X.
assign = "my awesome id" # Assign the field for the model from adfh.fields.
[[adfh.models.properties]]
model = "my awesome Item"
assign = "my awesome title"
[[adfh.models.properties]]
model = "my awesome Item"
assign = "my awesome status"
[[adfh.models]] # It the same as recored, class, database model, etc....
id = "my awesome Company" # this id only exist in ADFH file, make unique
name = "Company"
text = "A Company"
[[adfh.models.properties]] # It the same as class, rest api fields, etc....
model = "my awesome Company"
assign = "my second awesome is done"
[[adfh.models.properties]]
model = "my awesome Company"
assign = "my awesome view"
[[adfh.actions]] # Operations
id = "my awesome list of todo" # this id only exist in ADFH file, make unique
name = "todoList"
type = "show me list" # Telling what kind of operation.
model = "my awesome Item" # Telling that this property is blong to a model X.
text = "list todo items"
tags = ["oas"]
[[adfh.actions]]
id = "my awesome list of company" # this id only exist in ADFH file, make unique
name = "CompanyList"
type = "show me list"
model = "my awesome Company"
text = "list of Company"
tags = ["oas"]
[[adfh.actions]]
id = "my awesome todo" # this id only exist in ADFH file, make unique
name = "todoDetail"
type = "show me a certain item"
model = "my awesome Item"
subject = "my awesome id" # Telling to search with this field or lookup by that field. Note the field should be inside the model # noqa B950
text = "show the detail of a todo."
tags = ["oas"]
[[adfh.actions]]
id = "my awesome add todo" # this id only exist in ADFH file, make unique
name = "todoAdd"
type = "let me add"
model = "my awesome Item"
text = "creating new todo."
tags = ["oas"]
[[adfh.actions.input]]
action = "my awesome add todo"
assign = "my awesome title"
[[adfh.actions]]
id = "my awesome remove company" # this id only exist in ADFH file, make unique
name = "companyRemove"
type = "let me remove"
model = "my awesome Company"
subject = "my awesome id"
text = "removing a company."
tags = ["oas"]
"""
@pytest.fixture
def yaml_adfh() -> str:
"""Fixture for ADFH in yaml."""
return """
adfh:
actions:
- id: my awesome list of todo
model: my awesome Item
name: todoList
tags:
- oas
text: list todo items
type: show me list
- id: my awesome list of company
model: my awesome Company
name: CompanyList
tags:
- oas
text: list of Company
type: show me list
- id: my awesome todo
model: my awesome Item
name: todoDetail
subject: my awesome id
tags:
- oas
text: show the detail of a todo.
type: show me a certain item
- id: my awesome add todo
input:
- action: my awesome add todo
assign: my awesome title
model: my awesome Item
name: todoAdd
tags:
- oas
text: creating new todo.
type: let me add
- id: my awesome remove company
model: my awesome Company
name: companyRemove
subject: my awesome id
tags:
- oas
text: removing a company.
type: let me remove
extra:
- name: name
tags:
- oas
type: metadata
value: todo
- name: title
tags:
- oas
- user
type: metadata
value: ToDo API
fields:
- id: my awesome id
mandatory: 'no'
name: id
text: A unique for a todo
type: unique id
- id: my awesome title
mandatory: 'yes'
name: title
text: The title of a todo
type: text
- id: my second awesome is done
mandatory: 'no'
name: is_done
type: checkbox
- id: my awesome status
mandatory: 'no'
name: status
options:
- rejected
- approved
- deny
type: choice
- id: my awesome view
mandatory: 'no'
name: view
text: count number of poeple that saw the todo
type: number
models:
- id: my awesome Item
name: Item
properties:
- assign: my awesome id
model: my awesome Item
- assign: my awesome title
model: my awesome Item
- assign: my awesome status
model: my awesome Item
text: A todo item
- id: my awesome Company
name: Company
properties:
- assign: my second awesome is done
model: my awesome Company
- assign: my awesome view
model: my awesome Company
text: A Company
version: '1.0'
"""
@pytest.fixture
def toml_adfh_without_choice() -> str:
"""Fixture for ADFH without choice in toml."""
return """
[adfh]
version = "1.0"
[[adfh.extra]]
type = "metadata"
name = "name"
value = "todo"
tags = ["oas"]
[[adfh.extra]]
type = "metadata"
name = "title"
value = "ToDo API"
tags = ["oas", "user"]
[[adfh.fields]] # It the same as fields in class, database, Rest API, Graphql, etc....
id = "my awesome id" # this id only exist in ADFH file, make unique
name = "id"
type = "unique id"
mandatory = "no"
text = "A unique for a todo"
[[adfh.fields]]
id = "my awesome title" # this id only exist in ADFH file, make unique
name = "title"
type = "text"
mandatory = "yes"
text = "The title of a todo"
[[adfh.fields]]
id = "my second awesome is done" # this id only exist in ADFH file, make unique
name = "is_done"
type = "checkbox"
mandatory = "no"
[[adfh.fields]]
id = "my awesome view" # this id only exist in ADFH file, make unique
name = "view"
type = "number"
mandatory = "no"
text = "count number of poeple that saw the todo"
[[adfh.models]] # It the same as recored, class, database model, etc....
id = "my awesome Item" # this id only exist in ADFH file, make unique
name = "Item"
text = "A todo item"
[[adfh.models.properties]] # It the same as class, rest api fields, etc....
model = "my awesome Item" # Telling that this property is blong to a model X.
assign = "my awesome id" # Assign the field for the model from adfh.fields.
[[adfh.models.properties]]
model = "my awesome Item"
assign = "my awesome title"
[[adfh.models]] # It the same as recored, class, database model, etc....
id = "my awesome Company" # this id only exist in ADFH file, make unique
name = "Company"
text = "A Company"
[[adfh.models.properties]] # It the same as class, rest api fields, etc....
model = "my awesome Company"
assign = "my second awesome is done"
[[adfh.models.properties]]
model = "my awesome Company"
assign = "my awesome view"
[[adfh.actions]] # Operations
id = "my awesome list of todo" # this id only exist in ADFH file, make unique
name = "todoList"
type = "show me list" # Telling what kind of operation.
model = "my awesome Item" # Telling that this property is blong to a model X.
text = "list todo items"
tags = ["oas"]
[[adfh.actions]]
id = "my awesome list of company" # this id only exist in ADFH file, make unique
name = "CompanyList"
type = "show me list"
model = "my awesome Company"
text = "list of Company"
tags = ["oas"]
[[adfh.actions]]
id = "my awesome todo" # this id only exist in ADFH file, make unique
name = "todoDetail"
type = "show me a certain item"
model = "my awesome Item"
subject = "my awesome id" # Telling to search with this field or lookup by that field. Note the field should be inside the model # noqa B950
text = "show the detail of a todo."
tags = ["oas"]
[[adfh.actions]]
id = "my awesome add todo" # this id only exist in ADFH file, make unique
name = "todoAdd"
type = "let me add"
model = "my awesome Item"
text = "creating new todo."
tags = ["oas"]
[[adfh.actions.input]]
action = "my awesome add todo"
assign = "my awesome title"
[[adfh.actions]]
id = "my awesome remove company" # this id only exist in ADFH file, make unique
name = "companyRemove"
type = "let me remove"
model = "my awesome Company"
subject = "my awesome id"
text = "removing a company."
tags = ["oas"]
"""
@pytest.fixture
def toml_adfh_with_choice_and_without_options() -> str:
"""Fixture for ADFH in toml."""
return """
[adfh]
version = "1.0"
[[adfh.extra]]
type = "metadata"
name = "name"
value = "todo"
tags = ["oas"]
[[adfh.extra]]
type = "metadata"
name = "title"
value = "ToDo API"
tags = ["oas", "user"]
[[adfh.fields]] # It the same as fields in class, database, Rest API, Graphql, etc....
id = "my awesome id" # this id only exist in ADFH file, make unique
name = "id"
type = "unique id"
mandatory = "no"
text = "A unique for a todo"
[[adfh.fields]]
id = "my awesome title" # this id only exist in ADFH file, make unique
name = "title"
type = "text"
mandatory = "yes"
text = "The title of a todo"
[[adfh.fields]]
id = "my second awesome is done" # this id only exist in ADFH file, make unique
name = "is_done"
type = "checkbox"
mandatory = "no"
[[adfh.fields]]
id = "my awesome status" # this id only exist in ADFH file, make unique
name = "status"
type = "choice"
options = []
mandatory = "no"
[[adfh.fields]]
id = "my awesome view" # this id only exist in ADFH file, make unique
name = "view"
type = "number"
mandatory = "no"
text = "count number of poeple that saw the todo"
[[adfh.models]] # It the same as recored, class, database model, etc....
id = "my awesome Item" # this id only exist in ADFH file, make unique
name = "Item"
text = "A todo item"
[[adfh.models.properties]] # It the same as class, rest api fields, etc....
model = "my awesome Item" # Telling that this property is blong to a model X.
assign = "my awesome id" # Assign the field for the model from adfh.fields.
[[adfh.models.properties]]
model = "my awesome Item"
assign = "my awesome title"
[[adfh.models.properties]]
model = "my awesome Item"
assign = "my awesome status"
[[adfh.models]] # It the same as recored, class, database model, etc....
id = "my awesome Company" # this id only exist in ADFH file, make unique
name = "Company"
text = "A Company"
[[adfh.models.properties]] # It the same as class, rest api fields, etc....
model = "my awesome Company"
assign = "my second awesome is done"
[[adfh.models.properties]]
model = "my awesome Company"
assign = "my awesome view"
[[adfh.actions]] # Operations
id = "my awesome list of todo" # this id only exist in ADFH file, make unique
name = "todoList"
type = "show me list" # Telling what kind of operation.
model = "my awesome Item" # Telling that this property is blong to a model X.
text = "list todo items"
tags = ["oas"]
[[adfh.actions]]
id = "my awesome list of company" # this id only exist in ADFH file, make unique
name = "CompanyList"
type = "show me list"
model = "my awesome Company"
text = "list of Company"
tags = ["oas"]
[[adfh.actions]]
id = "my awesome todo" # this id only exist in ADFH file, make unique
name = "todoDetail"
type = "show me a certain item"
model = "my awesome Item"
subject = "my awesome id" # Telling to search with this field or lookup by that field. Note the field should be inside the model # noqa B950
text = "show the detail of a todo."
tags = ["oas"]
[[adfh.actions]]
id = "my awesome add todo" # this id only exist in ADFH file, make unique
name = "todoAdd"
type = "let me add"
model = "my awesome Item"
text = "creating new todo."
tags = ["oas"]
[[adfh.actions.input]]
action = "my awesome add todo"
assign = "my awesome title"
[[adfh.actions]]
id = "my awesome remove company" # this id only exist in ADFH file, make unique
name = "companyRemove"
type = "let me remove"
model = "my awesome Company"
subject = "my awesome id"
text = "removing a company."
tags = ["oas"]
""" | 0.372734 | 0.211274 |
import sys
if '..' not in sys.path:
sys.path.append('..')
import Mesh.ysMeshUtil as ysu
import Math.mmMath as mm
import Implicit.csIMSModel as cmm
def getSpringLengthsFromMesh(mesh, springConfigs):
springLengths = [None]*len(springConfigs)
for i in range(len(springConfigs)):
springLengths[i] = mm.length(mesh.getVertexPosition(springConfigs[i].particleIndex0) - mesh.getVertexPosition(springConfigs[i].particleIndex1))
return springLengths
def getParticleConfigsFromMesh(mesh, massMap, initialMotion, dynamicMu, staticMu):
particleConfigs = [None]*len(mesh.vertices)
vertexMasses = ysu.getDistributedVertexMasses(mesh, massMap)
# initialVelocities = ysu.meshAnimation2PointMotion(mesh, initialMotion).getPointVelocities(0, 1)
initialVelocities = ysu.meshAnimation2PointMotion(mesh, initialMotion).getVelocities(0, 1)
for i in range(len(mesh.vertices)):
particleConfigs[i] = cmm.ParticleConfig(mesh.vertices[i].pos, vertexMasses[i], \
initialVelocities[i], dynamicMu, staticMu)
return particleConfigs
def getSpringConfigsFromMesh(mesh, Ks, Kd):
springSet = set()
for f in mesh.faces:
for j in range(3):
i0 = f.vertexIndex[j]
oj = j+1
if oj == 3: oj = 0
i1 = f.vertexIndex[oj]
if i0 < i1:
s = (i0, i1)
else:
s = (i1, i0)
if s not in springSet:
springSet.add(s)
muscleSpringSet = set()
for i in range(len(mesh.vertices)):
neighbors = []
for s in springSet:
if i == s[0]:
neighbors.append(s[1])
elif i == s[1]:
neighbors.append(s[0])
for k in range(len(neighbors)):
for m in range(len(neighbors)):
if k != m:
if neighbors[k] < neighbors[m]:
s = (neighbors[k], neighbors[m])
else:
s = (neighbors[m], neighbors[k])
if s not in springSet and s not in muscleSpringSet:
muscleSpringSet.add(s)
subspringsNames = ['']*len(springSet) + ['__MUSCLE__']*len(muscleSpringSet)
springs = list(springSet) + list(muscleSpringSet)
springConfigs = [None]*len(springs)
for i in range(len(springs)):
sc = cmm.SpringConfig(springs[i][0], springs[i][1], Ks, Kd)
sc.subspringsName = subspringsNames[i]
springConfigs[i] = sc
return springConfigs | PyCommon/modules/Simulator/ysIMSUtil.py | import sys
if '..' not in sys.path:
sys.path.append('..')
import Mesh.ysMeshUtil as ysu
import Math.mmMath as mm
import Implicit.csIMSModel as cmm
def getSpringLengthsFromMesh(mesh, springConfigs):
springLengths = [None]*len(springConfigs)
for i in range(len(springConfigs)):
springLengths[i] = mm.length(mesh.getVertexPosition(springConfigs[i].particleIndex0) - mesh.getVertexPosition(springConfigs[i].particleIndex1))
return springLengths
def getParticleConfigsFromMesh(mesh, massMap, initialMotion, dynamicMu, staticMu):
particleConfigs = [None]*len(mesh.vertices)
vertexMasses = ysu.getDistributedVertexMasses(mesh, massMap)
# initialVelocities = ysu.meshAnimation2PointMotion(mesh, initialMotion).getPointVelocities(0, 1)
initialVelocities = ysu.meshAnimation2PointMotion(mesh, initialMotion).getVelocities(0, 1)
for i in range(len(mesh.vertices)):
particleConfigs[i] = cmm.ParticleConfig(mesh.vertices[i].pos, vertexMasses[i], \
initialVelocities[i], dynamicMu, staticMu)
return particleConfigs
def getSpringConfigsFromMesh(mesh, Ks, Kd):
springSet = set()
for f in mesh.faces:
for j in range(3):
i0 = f.vertexIndex[j]
oj = j+1
if oj == 3: oj = 0
i1 = f.vertexIndex[oj]
if i0 < i1:
s = (i0, i1)
else:
s = (i1, i0)
if s not in springSet:
springSet.add(s)
muscleSpringSet = set()
for i in range(len(mesh.vertices)):
neighbors = []
for s in springSet:
if i == s[0]:
neighbors.append(s[1])
elif i == s[1]:
neighbors.append(s[0])
for k in range(len(neighbors)):
for m in range(len(neighbors)):
if k != m:
if neighbors[k] < neighbors[m]:
s = (neighbors[k], neighbors[m])
else:
s = (neighbors[m], neighbors[k])
if s not in springSet and s not in muscleSpringSet:
muscleSpringSet.add(s)
subspringsNames = ['']*len(springSet) + ['__MUSCLE__']*len(muscleSpringSet)
springs = list(springSet) + list(muscleSpringSet)
springConfigs = [None]*len(springs)
for i in range(len(springs)):
sc = cmm.SpringConfig(springs[i][0], springs[i][1], Ks, Kd)
sc.subspringsName = subspringsNames[i]
springConfigs[i] = sc
return springConfigs | 0.076127 | 0.278533 |
"""Tests of streamable Keyword Spotting models implemented in Keras."""
import os
import sys
import pathlib
from absl import app
from absl import flags
from absl import logging
import numpy as np
from pyiree.tf.support import tf_test_utils
from pyiree.tf.support import tf_utils
import tensorflow.compat.v2 as tf
from kws_streaming.layers import modes
from kws_streaming.models import model_params
from kws_streaming.models import models
from kws_streaming.models import utils
from kws_streaming.train import model_flags
FLAGS = flags.FLAGS
ALL_MODELS = list(model_params.HOTWORD_MODEL_PARAMS.keys())
MODELS_HELP = [f"'{name}'" for name in ALL_MODELS]
MODELS_HELP = f'{", ".join(MODELS_HELP[:-1])}, or {MODELS_HELP[-1]}'
flags.DEFINE_string(
'model', 'svdf', f'Name of the model to compile. Either {MODELS_HELP}.\n'
'See https://github.com/google-research/google-research/blob/master/kws_streaming/models/models.py#L38-L58'
)
flags.DEFINE_enum('mode', 'non_streaming',
['non_streaming', 'internal_streaming'],
'Mode to execute the model in.')
MODE_ENUM_TO_MODE = {
'non_streaming': modes.Modes.NON_STREAM_INFERENCE,
'internal_streaming': modes.Modes.STREAM_INTERNAL_STATE_INFERENCE,
}
MODE_TO_INPUT_SHAPE = {
'non_streaming': (1, 16000),
'internal_streaming': (1, 320),
}
def get_input_shape():
return MODE_TO_INPUT_SHAPE[FLAGS.mode]
def initialize_model():
params = model_params.HOTWORD_MODEL_PARAMS[FLAGS.model]
params = model_flags.update_flags(params)
model = models.MODELS[params.model_name](params)
if FLAGS.mode == 'internal_streaming':
mode = MODE_ENUM_TO_MODE[FLAGS.mode]
input_shape = get_input_shape()
params.batch_size = input_shape[0]
params.desired_samples = input_shape[1]
model = utils.to_streaming_inference(model, flags=params, mode=mode)
return model
class KeywordSpottingModule(tf.Module):
def __init__(self):
super().__init__()
self.m = initialize_model()
self.m.predict = lambda x: self.m.call(x, training=False)
self.predict = tf.function(
input_signature=[tf.TensorSpec(get_input_shape())])(self.m.predict)
class KeywordSpottingTest(tf_test_utils.TracedModuleTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._modules = tf_test_utils.compile_tf_module(KeywordSpottingModule,
exported_names=['predict'])
def test_predict(self):
def predict(module):
module.predict(tf_utils.uniform(get_input_shape()), atol=1e-5)
self.compare_backends(predict, self._modules)
def main(argv):
del argv # Unused.
if hasattr(tf, 'enable_v2_behavior'):
tf.enable_v2_behavior()
if FLAGS.model not in ALL_MODELS:
raise ValueError(f'Unsupported model: {FLAGS.model}.\n'
f'Expected one of {MODELS_HELP}.')
KeywordSpottingModule.__name__ = f'kws_{FLAGS.model}'
tf.test.main()
if __name__ == '__main__':
app.run(main) | integrations/tensorflow/e2e/keras/keyword_spotting_streaming_test.py | """Tests of streamable Keyword Spotting models implemented in Keras."""
import os
import sys
import pathlib
from absl import app
from absl import flags
from absl import logging
import numpy as np
from pyiree.tf.support import tf_test_utils
from pyiree.tf.support import tf_utils
import tensorflow.compat.v2 as tf
from kws_streaming.layers import modes
from kws_streaming.models import model_params
from kws_streaming.models import models
from kws_streaming.models import utils
from kws_streaming.train import model_flags
FLAGS = flags.FLAGS
ALL_MODELS = list(model_params.HOTWORD_MODEL_PARAMS.keys())
MODELS_HELP = [f"'{name}'" for name in ALL_MODELS]
MODELS_HELP = f'{", ".join(MODELS_HELP[:-1])}, or {MODELS_HELP[-1]}'
flags.DEFINE_string(
'model', 'svdf', f'Name of the model to compile. Either {MODELS_HELP}.\n'
'See https://github.com/google-research/google-research/blob/master/kws_streaming/models/models.py#L38-L58'
)
flags.DEFINE_enum('mode', 'non_streaming',
['non_streaming', 'internal_streaming'],
'Mode to execute the model in.')
MODE_ENUM_TO_MODE = {
'non_streaming': modes.Modes.NON_STREAM_INFERENCE,
'internal_streaming': modes.Modes.STREAM_INTERNAL_STATE_INFERENCE,
}
MODE_TO_INPUT_SHAPE = {
'non_streaming': (1, 16000),
'internal_streaming': (1, 320),
}
def get_input_shape():
return MODE_TO_INPUT_SHAPE[FLAGS.mode]
def initialize_model():
params = model_params.HOTWORD_MODEL_PARAMS[FLAGS.model]
params = model_flags.update_flags(params)
model = models.MODELS[params.model_name](params)
if FLAGS.mode == 'internal_streaming':
mode = MODE_ENUM_TO_MODE[FLAGS.mode]
input_shape = get_input_shape()
params.batch_size = input_shape[0]
params.desired_samples = input_shape[1]
model = utils.to_streaming_inference(model, flags=params, mode=mode)
return model
class KeywordSpottingModule(tf.Module):
def __init__(self):
super().__init__()
self.m = initialize_model()
self.m.predict = lambda x: self.m.call(x, training=False)
self.predict = tf.function(
input_signature=[tf.TensorSpec(get_input_shape())])(self.m.predict)
class KeywordSpottingTest(tf_test_utils.TracedModuleTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._modules = tf_test_utils.compile_tf_module(KeywordSpottingModule,
exported_names=['predict'])
def test_predict(self):
def predict(module):
module.predict(tf_utils.uniform(get_input_shape()), atol=1e-5)
self.compare_backends(predict, self._modules)
def main(argv):
del argv # Unused.
if hasattr(tf, 'enable_v2_behavior'):
tf.enable_v2_behavior()
if FLAGS.model not in ALL_MODELS:
raise ValueError(f'Unsupported model: {FLAGS.model}.\n'
f'Expected one of {MODELS_HELP}.')
KeywordSpottingModule.__name__ = f'kws_{FLAGS.model}'
tf.test.main()
if __name__ == '__main__':
app.run(main) | 0.64512 | 0.295344 |
import maproulette
import unittest
from tests.sample_data import test_geojson, test_overpassQL_query
from unittest.mock import patch
class TestProjectAPI(unittest.TestCase):
config = maproulette.Configuration(api_key="API_KEY")
api = maproulette.Project(config)
@patch('maproulette.api.maproulette_server.requests.Session.get')
def test_get_project_by_id(self, mock_request, api_instance=api):
test_project_id = '32922'
mock_request.return_value.status_code = '200'
response = api_instance.get_project_by_id(test_project_id)
self.assertEqual(response['status'], '200')
@patch('maproulette.api.maproulette_server.requests.Session.get')
def test_get_project_by_name(self, mock_request, api_instance=api):
test_project_name = 'Maptime!'
mock_request.return_value.status_code = '200'
response = api_instance.get_project_by_name(test_project_name)
self.assertEqual(response['status'], '200')
@patch('maproulette.api.maproulette_server.requests.Session.get')
def test_find_project(self, mock_request, api_instance=api):
test_search = 'Health Facilities in India'
mock_request.return_value.status_code = '200'
response = api_instance.find_project(test_search)
self.assertEqual(response['status'], '200')
@patch('maproulette.api.maproulette_server.requests.Session.get')
def test_get_project_challenges(self, mock_request, api_instance=api):
test_project_id = '12974'
mock_request.return_value.status_code = '200'
response = api_instance.get_project_challenges(test_project_id)
self.assertEqual(response['status'], '200')
@patch('maproulette.api.maproulette_server.requests.Session.post')
def test_create_project(self, mock_request, api_instance=api):
test_project_model = maproulette.ProjectModel(name='Test_Project_Name',
description='This is a test project')
mock_request.return_value.status_code = '200'
response = api_instance.create_project(test_project_model)
self.assertEqual(response['status'], '200')
@patch('maproulette.api.maproulette_server.requests.Session.post')
def test_add_challenge_to_project(self, mock_request, api_instance=api):
test_virtual_project_model = maproulette.ProjectModel(name='Test Virtual Project Name',
id=1234)
test_challenge_model = maproulette.ChallengeModel(name='Test Challenge Name',
id=246)
test_virtual_project_id = test_virtual_project_model.id
test_challenge_id = test_challenge_model.id
mock_request.return_value.status_code = '200'
response = api_instance.add_challenge_to_project(test_virtual_project_id, test_challenge_id)
self.assertEqual(response['status'], '200')
@patch('maproulette.api.maproulette_server.requests.Session.post')
def test_remove_challenge_from_project(self, mock_request, api_instance=api):
test_virtual_project_model = maproulette.ProjectModel(name='Test Virtual Project Name',
id=1234)
test_challenge_model = maproulette.ChallengeModel(name='Test Challenge Name', id=246)
test_virtual_project_id = test_virtual_project_model.id
test_challenge_id = test_challenge_model.id
mock_request.return_value.status_code = '200'
response = api_instance.remove_challenge_from_project(test_virtual_project_id, test_challenge_id)
self.assertEqual(response['status'], '200')
@patch('maproulette.api.maproulette_server.requests.Session.delete')
def test_delete_project(self, mock_request, api_instance=api):
test_project_model = maproulette.ProjectModel(name='Test Project Name', id=1234)
test_project_id = test_project_model.id
mock_request.return_value.status_code = '200'
response = api_instance.delete_project(test_project_id)
self.assertEqual(response['status'], '200')
@patch('maproulette.api.maproulette_server.requests.Session.put')
def test_update_project(self, mock_request, api_instance=api):
test_project_model = maproulette.ProjectModel(name='Test Project Name', id=1234)
test_updated_project_model = maproulette.ProjectModel(name='Test Updated Project Name')
test_project_model_id = test_project_model.id
mock_request.return_value.status_code = '200'
response = api_instance.update_project(test_project_model_id, test_updated_project_model)
self.assertEqual(response['status'], '200')
@patch('maproulette.api.maproulette_server.requests.Session.get')
def test_get_project_by_ids(self, mock_request, api_instance=api):
test_project_ids = '1234,2468,1356'
mock_request.return_value.status_code = '200'
response = api_instance.get_projects_by_ids(test_project_ids)
self.assertEqual(response['status'], '200')
@patch('maproulette.api.maproulette_server.requests.Session.get')
def test_get_random_tasks(self, mock_request, api_instance=api):
test_project_model = maproulette.ProjectModel(name='Test Project Name',
id=1234)
test_project_id = test_project_model.id
mock_request.return_value.status_code = '200'
response = api_instance.get_random_tasks(test_project_id)
self.assertEqual(response['status'], '200') | tests/test_project_api.py | import maproulette
import unittest
from tests.sample_data import test_geojson, test_overpassQL_query
from unittest.mock import patch
class TestProjectAPI(unittest.TestCase):
config = maproulette.Configuration(api_key="API_KEY")
api = maproulette.Project(config)
@patch('maproulette.api.maproulette_server.requests.Session.get')
def test_get_project_by_id(self, mock_request, api_instance=api):
test_project_id = '32922'
mock_request.return_value.status_code = '200'
response = api_instance.get_project_by_id(test_project_id)
self.assertEqual(response['status'], '200')
@patch('maproulette.api.maproulette_server.requests.Session.get')
def test_get_project_by_name(self, mock_request, api_instance=api):
test_project_name = 'Maptime!'
mock_request.return_value.status_code = '200'
response = api_instance.get_project_by_name(test_project_name)
self.assertEqual(response['status'], '200')
@patch('maproulette.api.maproulette_server.requests.Session.get')
def test_find_project(self, mock_request, api_instance=api):
test_search = 'Health Facilities in India'
mock_request.return_value.status_code = '200'
response = api_instance.find_project(test_search)
self.assertEqual(response['status'], '200')
@patch('maproulette.api.maproulette_server.requests.Session.get')
def test_get_project_challenges(self, mock_request, api_instance=api):
test_project_id = '12974'
mock_request.return_value.status_code = '200'
response = api_instance.get_project_challenges(test_project_id)
self.assertEqual(response['status'], '200')
@patch('maproulette.api.maproulette_server.requests.Session.post')
def test_create_project(self, mock_request, api_instance=api):
test_project_model = maproulette.ProjectModel(name='Test_Project_Name',
description='This is a test project')
mock_request.return_value.status_code = '200'
response = api_instance.create_project(test_project_model)
self.assertEqual(response['status'], '200')
@patch('maproulette.api.maproulette_server.requests.Session.post')
def test_add_challenge_to_project(self, mock_request, api_instance=api):
test_virtual_project_model = maproulette.ProjectModel(name='Test Virtual Project Name',
id=1234)
test_challenge_model = maproulette.ChallengeModel(name='Test Challenge Name',
id=246)
test_virtual_project_id = test_virtual_project_model.id
test_challenge_id = test_challenge_model.id
mock_request.return_value.status_code = '200'
response = api_instance.add_challenge_to_project(test_virtual_project_id, test_challenge_id)
self.assertEqual(response['status'], '200')
@patch('maproulette.api.maproulette_server.requests.Session.post')
def test_remove_challenge_from_project(self, mock_request, api_instance=api):
test_virtual_project_model = maproulette.ProjectModel(name='Test Virtual Project Name',
id=1234)
test_challenge_model = maproulette.ChallengeModel(name='Test Challenge Name', id=246)
test_virtual_project_id = test_virtual_project_model.id
test_challenge_id = test_challenge_model.id
mock_request.return_value.status_code = '200'
response = api_instance.remove_challenge_from_project(test_virtual_project_id, test_challenge_id)
self.assertEqual(response['status'], '200')
@patch('maproulette.api.maproulette_server.requests.Session.delete')
def test_delete_project(self, mock_request, api_instance=api):
test_project_model = maproulette.ProjectModel(name='Test Project Name', id=1234)
test_project_id = test_project_model.id
mock_request.return_value.status_code = '200'
response = api_instance.delete_project(test_project_id)
self.assertEqual(response['status'], '200')
@patch('maproulette.api.maproulette_server.requests.Session.put')
def test_update_project(self, mock_request, api_instance=api):
test_project_model = maproulette.ProjectModel(name='Test Project Name', id=1234)
test_updated_project_model = maproulette.ProjectModel(name='Test Updated Project Name')
test_project_model_id = test_project_model.id
mock_request.return_value.status_code = '200'
response = api_instance.update_project(test_project_model_id, test_updated_project_model)
self.assertEqual(response['status'], '200')
@patch('maproulette.api.maproulette_server.requests.Session.get')
def test_get_project_by_ids(self, mock_request, api_instance=api):
test_project_ids = '1234,2468,1356'
mock_request.return_value.status_code = '200'
response = api_instance.get_projects_by_ids(test_project_ids)
self.assertEqual(response['status'], '200')
@patch('maproulette.api.maproulette_server.requests.Session.get')
def test_get_random_tasks(self, mock_request, api_instance=api):
test_project_model = maproulette.ProjectModel(name='Test Project Name',
id=1234)
test_project_id = test_project_model.id
mock_request.return_value.status_code = '200'
response = api_instance.get_random_tasks(test_project_id)
self.assertEqual(response['status'], '200') | 0.550003 | 0.244095 |
from collections import defaultdict
from typing import Union
import pandas as pd
import numpy as np
from remake.schematic.core.transformation import TransformationCore
class SchematicTransformation(TransformationCore):
def __init__(self, **given_params):
self._param_values = {}
for parameter in self.__parameters__:
name, identifier, dtype = parameter.name, parameter.identifier, parameter.dtype
if name not in given_params:
if parameter.default_value is None:
raise ValueError(f"{name} is needed to compute. Reason: {parameter.helper}")
else:
parameter.update(parameter.default_value)
self._param_values[identifier] = parameter.value
else:
if dtype is not None:
if not isinstance(given_params[name], dtype):
raise ValueError(f"{name} has type {type(given_params[name])} but {dtype} was expected")
parameter.update(given_params[name])
self._param_values[identifier] = parameter.value
def __new__(cls, *args, **kwargs) -> Union[TransformationCore, pd.DataFrame]:
assert not (kwargs and args)
obj_ref = super(cls.__bases__[-1], cls).__new__(cls)
obj_ref.__init__(**kwargs)
if not args:
return obj_ref
else:
return obj_ref(*args, **kwargs)
def _apply_pre_conditions(self, df: pd.DataFrame):
for condition in self._pre_conditions:
input_cols, param_cols = self.divide(condition.input_columns, [df.columns, self._param_values.keys()])
parmeters = {param: self._param_values[param] for param in param_cols}
if condition.vectorized:
condition_result = condition.transform(**SchematicTransformation.prepare(df[input_cols]), **parmeters)
else:
condition_result = df[input_cols].apply(lambda row: condition.transform(**row, **parmeters), axis=1)
df = df[condition_result]
return df
def _apply_post_conditions(self, transformed_df, df, aggregates, views):
for condition in self._post_conditions:
target = condition.input_columns
input_data, extra_params = self._multi_source_extract(target, transformed_df, df, aggregates, views)
if condition.vectorized:
condition_result = condition.transform(**SchematicTransformation.prepare(input_data), **extra_params)
else:
condition_result = input_data.apply(lambda row: condition.transform(**row, **extra_params), axis=1)
transformed_df = transformed_df[condition_result]
return transformed_df
def _apply_split_conditions(self, split_df):
for condition in self._split_conditions:
condition_result = condition.transform(**split_df[condition.input_columns].to_dict(orient="series"))
split_df = split_df[condition_result]
return split_df
@staticmethod
def _align_tables(left, right, in_common, unique, names):
# Handle column naming
name_left, name_right = names
left_index, right_index = in_common
column_overlap = (set(left.columns) & set(right.columns)) - set(in_common)
left = left.rename(columns={column: f"{name_left}_{column}" for column in column_overlap})
right = right.rename(columns={column: f"{name_right}_{column}" for column in column_overlap})
left_unique, right_unique = unique
left_unique = left_unique if left_unique not in column_overlap else f"{name_left}_{left_unique}"
right_unique = right_unique if right_unique not in column_overlap else f"{name_right}_{right_unique}"
left.set_index(left_index, inplace=True)
right.set_index(right_index, inplace=True)
right_on_left = left.join(right, how="left")
left_on_right = right.join(left, how="left")
all_rows = pd.concat([right_on_left, left_on_right], axis=0).dropna(subset=[left_unique, right_unique])
unique_rows = all_rows.drop_duplicates(subset=[left_unique, right_unique])
return unique_rows.reset_index()
@staticmethod
def prepare(kwargs):
if isinstance(kwargs, dict):
return {key: SchematicTransformation.prepare(value) for key, value in kwargs.items()}
if isinstance(kwargs, list):
return pd.Series(kwargs)
if isinstance(kwargs, pd.Series):
return kwargs
if isinstance(kwargs, pd.DataFrame):
return {column: kwargs[column] for column in kwargs}
if pd.isna(kwargs):
return None
return kwargs
@staticmethod
def _apply_func(df, transformation, extra_params, is_split):
# @no:format
try:
if len(transformation.input_columns) == 1 and not extra_params:
stream = df[transformation.input_columns[0]]
if transformation.transform is None:
return SchematicTransformation.prepare(stream)
elif is_split:
return transformation.transform(stream)
else:
if transformation.vectorized:
return transformation.transform(stream)
else:
return stream.apply(lambda row: transformation.transform(SchematicTransformation.prepare(row)))
else:
if is_split:
return transformation.transform(**SchematicTransformation.prepare(df.to_dict(orient="series")), **extra_params)
else:
if transformation.vectorized:
return transformation.transform(**SchematicTransformation.prepare(df), **extra_params)
else:
return df.apply(lambda row: transformation.transform(**SchematicTransformation.prepare(row.to_dict()), **extra_params), axis=1)
except Exception as exc:
raise ColumnGenerationError(f"Failed to generate column '{transformation.name}'. REASON: {exc}") from exc
# @do:format
def _generate_columns(self, transformed_df, result, is_split):
if is_split:
for column_name, data in result.items():
transformed_df[column_name] = data
return transformed_df
else:
columns = {}
for col_info in result:
for key in col_info.keys():
if key not in columns:
columns[key] = []
for row in result:
for column in columns.keys():
columns[column].append(row.get(column, None))
for column, values in columns.items():
transformed_df[column] = values
return transformed_df
def _apply_transform(self, transformed_df, input_df, is_split):
aggregates, views = {}, {}
# @no:format
for transformation in self._execution_order(input_df.columns):
reduced_df, extra_params = None, None
try:
reduced_df, extra_params = self._multi_source_extract(transformation.input_columns, transformed_df, input_df, aggregates, views)
except AssertionError as exp:
raise ColumnGenerationError(f"Could not generate column: {transformation.name}. {exp}")
if transformation.generates_columns:
result = self._apply_func(reduced_df, transformation, extra_params, is_split)
transformed_df = self._generate_columns(transformed_df, result, is_split)
elif transformation.is_temporary:
views[transformation.name] = self._apply_func(reduced_df, transformation, extra_params, is_split)
else:
result = self._apply_func(reduced_df, transformation, extra_params, is_split)
if isinstance(result, (np.ndarray, pd.DataFrame, pd.Series)) and len(result) == 0:
raise EmptyColumnError(f"Could not generate {transformation.name}, since its transformation returned an empty result ({result})")
transformed_df[transformation.name] = result
# @do:format
return transformed_df, aggregates, views
def divide(self, target_columns, available_columns):
target_columns = set(target_columns)
selected_columns = set()
column_buckets = []
for columns in available_columns:
matching_column = (target_columns - selected_columns) & set(columns)
selected_columns = selected_columns | matching_column
column_buckets.append(matching_column)
left_over = target_columns - selected_columns
assert len(left_over) == 0, f"{list(left_over)} could not be matched with the input data"
return column_buckets
def _multi_source_extract(self, target_columns, transformed_df, input_df, aggregates, views):
available_columns = list(map(list, [transformed_df.columns, input_df.columns, aggregates.keys(),
views.keys(), self._param_values.keys()]))
trans_cols, input_cols, agg_cols, view_cols, param_cols = self.divide(target_columns, available_columns)
reduced_df = pd.concat([transformed_df[trans_cols], input_df[input_cols]], axis=1)
for column in view_cols:
reduced_df[column] = views[column]
extra = {}
for column in agg_cols:
extra[column] = aggregates[column]
for column in param_cols:
extra[column] = self._param_values[column]
return reduced_df, extra
def _execution_order(self, input_columns):
"""
Calculate all transformations in the correct order
"""
transformations = TransformDAG(list(input_columns) + list(self._param_values.keys()))
all_columns = [column for column in self._columns if not column.name == "*"]
if any([column.name == "*" for column in self._columns]) or not all_columns:
extra_columns = []
for input_column in input_columns:
already_included = False
for column in all_columns:
if input_column == column.name:
already_included = True
break
elif input_column in column.input_columns and column.is_copy:
already_included = True
break
if not already_included:
new_column = ColumnContainer(name=input_column, input_columns=[input_column], is_temporary=False,
transform=None, is_copy=True)
extra_columns.append(new_column)
all_columns += extra_columns
# Add all columns
for column in all_columns:
transformations.add(column)
# Add all aggregates
for aggregate in self._aggregations:
transformations.add(aggregate)
return transformations
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
if df.empty:
raise EmptyDataFrameError(
"PandasDB cannot transform an empty dataframe.\nPlease look at either your initialize funtion or the inputs to the transform")
""" Apply all pre conditions one at a time the dataframe is not grouped, else filter the groups"""
if not self._groups:
df = self._apply_pre_conditions(df)
if df.empty:
raise EmptyDataFrameError(
"The pre_conditions lead to an empty dataframe.\nPlease change the pre_conditions or the inputs to the transform")
""" Group the data if needed """
assert not (self._groups and self._splits), "Groups and Splits cannot be combined"
if self._groups:
if self._pre_conditions:
groups = df.groupby(self._groups.columns)
df = groups.apply(lambda df: self._apply_pre_conditions(df).drop(columns=self._groups.columns))
df = df.groupby(self._groups.columns).agg(list).reset_index()
transformed_df = pd.DataFrame({})
""" Split up the data into groups if needed """
if self._splits:
aggregates, views = defaultdict(list), defaultdict(list)
if self._splits.sort_by.columns:
_splitted = df.sort_values(self._splits.sort_by.columns).groupby(self._splits.group.columns)
else:
_splitted = df.groupby(self._splits.group.columns)
split_transforms = []
for _, gdf in _splitted:
gdf = self._apply_split_conditions(gdf)
curr_df = pd.DataFrame({column: gdf[column] for column in self._splits.sort_by.columns})
curr_df, curr_aggregates, curr_views = self._apply_transform(curr_df, gdf, is_split=True)
split_transforms.append(curr_df)
transformed_df = pd.concat([transformed_df, *split_transforms], axis=0)
else:
transformed_df, aggregates, views = self._apply_transform(transformed_df, df, is_split=False)
""" Apply all post conditions """
transformed_df = self._apply_post_conditions(transformed_df, df, aggregates, views)
""" Finally Index the Dataframe """
if self._indexes.columns:
if len(self._indexes.columns) == 1:
index = self._indexes.columns[0]
else:
index = list(self._indexes.columns)
transformed_df = transformed_df.set_index(index).sort_index()
return transformed_df
def __call__(self, *args, **kwargs) -> pd.DataFrame:
if hasattr(self, "initialize"):
df = to_df(type(self).initialize(*args, **kwargs))
else:
assert args and not kwargs, "SchematicTransformation only accepts a sigle DataFrame"
df = to_df(args[0])
transformed = self.transform(df)
if hasattr(self, "finalize"):
return type(self).finalize(transformed)
else:
return transformed | src/remake/schematic/transform.py | from collections import defaultdict
from typing import Union
import pandas as pd
import numpy as np
from remake.schematic.core.transformation import TransformationCore
class SchematicTransformation(TransformationCore):
def __init__(self, **given_params):
self._param_values = {}
for parameter in self.__parameters__:
name, identifier, dtype = parameter.name, parameter.identifier, parameter.dtype
if name not in given_params:
if parameter.default_value is None:
raise ValueError(f"{name} is needed to compute. Reason: {parameter.helper}")
else:
parameter.update(parameter.default_value)
self._param_values[identifier] = parameter.value
else:
if dtype is not None:
if not isinstance(given_params[name], dtype):
raise ValueError(f"{name} has type {type(given_params[name])} but {dtype} was expected")
parameter.update(given_params[name])
self._param_values[identifier] = parameter.value
def __new__(cls, *args, **kwargs) -> Union[TransformationCore, pd.DataFrame]:
assert not (kwargs and args)
obj_ref = super(cls.__bases__[-1], cls).__new__(cls)
obj_ref.__init__(**kwargs)
if not args:
return obj_ref
else:
return obj_ref(*args, **kwargs)
def _apply_pre_conditions(self, df: pd.DataFrame):
for condition in self._pre_conditions:
input_cols, param_cols = self.divide(condition.input_columns, [df.columns, self._param_values.keys()])
parmeters = {param: self._param_values[param] for param in param_cols}
if condition.vectorized:
condition_result = condition.transform(**SchematicTransformation.prepare(df[input_cols]), **parmeters)
else:
condition_result = df[input_cols].apply(lambda row: condition.transform(**row, **parmeters), axis=1)
df = df[condition_result]
return df
def _apply_post_conditions(self, transformed_df, df, aggregates, views):
for condition in self._post_conditions:
target = condition.input_columns
input_data, extra_params = self._multi_source_extract(target, transformed_df, df, aggregates, views)
if condition.vectorized:
condition_result = condition.transform(**SchematicTransformation.prepare(input_data), **extra_params)
else:
condition_result = input_data.apply(lambda row: condition.transform(**row, **extra_params), axis=1)
transformed_df = transformed_df[condition_result]
return transformed_df
def _apply_split_conditions(self, split_df):
for condition in self._split_conditions:
condition_result = condition.transform(**split_df[condition.input_columns].to_dict(orient="series"))
split_df = split_df[condition_result]
return split_df
@staticmethod
def _align_tables(left, right, in_common, unique, names):
# Handle column naming
name_left, name_right = names
left_index, right_index = in_common
column_overlap = (set(left.columns) & set(right.columns)) - set(in_common)
left = left.rename(columns={column: f"{name_left}_{column}" for column in column_overlap})
right = right.rename(columns={column: f"{name_right}_{column}" for column in column_overlap})
left_unique, right_unique = unique
left_unique = left_unique if left_unique not in column_overlap else f"{name_left}_{left_unique}"
right_unique = right_unique if right_unique not in column_overlap else f"{name_right}_{right_unique}"
left.set_index(left_index, inplace=True)
right.set_index(right_index, inplace=True)
right_on_left = left.join(right, how="left")
left_on_right = right.join(left, how="left")
all_rows = pd.concat([right_on_left, left_on_right], axis=0).dropna(subset=[left_unique, right_unique])
unique_rows = all_rows.drop_duplicates(subset=[left_unique, right_unique])
return unique_rows.reset_index()
@staticmethod
def prepare(kwargs):
if isinstance(kwargs, dict):
return {key: SchematicTransformation.prepare(value) for key, value in kwargs.items()}
if isinstance(kwargs, list):
return pd.Series(kwargs)
if isinstance(kwargs, pd.Series):
return kwargs
if isinstance(kwargs, pd.DataFrame):
return {column: kwargs[column] for column in kwargs}
if pd.isna(kwargs):
return None
return kwargs
@staticmethod
def _apply_func(df, transformation, extra_params, is_split):
# @no:format
try:
if len(transformation.input_columns) == 1 and not extra_params:
stream = df[transformation.input_columns[0]]
if transformation.transform is None:
return SchematicTransformation.prepare(stream)
elif is_split:
return transformation.transform(stream)
else:
if transformation.vectorized:
return transformation.transform(stream)
else:
return stream.apply(lambda row: transformation.transform(SchematicTransformation.prepare(row)))
else:
if is_split:
return transformation.transform(**SchematicTransformation.prepare(df.to_dict(orient="series")), **extra_params)
else:
if transformation.vectorized:
return transformation.transform(**SchematicTransformation.prepare(df), **extra_params)
else:
return df.apply(lambda row: transformation.transform(**SchematicTransformation.prepare(row.to_dict()), **extra_params), axis=1)
except Exception as exc:
raise ColumnGenerationError(f"Failed to generate column '{transformation.name}'. REASON: {exc}") from exc
# @do:format
def _generate_columns(self, transformed_df, result, is_split):
if is_split:
for column_name, data in result.items():
transformed_df[column_name] = data
return transformed_df
else:
columns = {}
for col_info in result:
for key in col_info.keys():
if key not in columns:
columns[key] = []
for row in result:
for column in columns.keys():
columns[column].append(row.get(column, None))
for column, values in columns.items():
transformed_df[column] = values
return transformed_df
def _apply_transform(self, transformed_df, input_df, is_split):
aggregates, views = {}, {}
# @no:format
for transformation in self._execution_order(input_df.columns):
reduced_df, extra_params = None, None
try:
reduced_df, extra_params = self._multi_source_extract(transformation.input_columns, transformed_df, input_df, aggregates, views)
except AssertionError as exp:
raise ColumnGenerationError(f"Could not generate column: {transformation.name}. {exp}")
if transformation.generates_columns:
result = self._apply_func(reduced_df, transformation, extra_params, is_split)
transformed_df = self._generate_columns(transformed_df, result, is_split)
elif transformation.is_temporary:
views[transformation.name] = self._apply_func(reduced_df, transformation, extra_params, is_split)
else:
result = self._apply_func(reduced_df, transformation, extra_params, is_split)
if isinstance(result, (np.ndarray, pd.DataFrame, pd.Series)) and len(result) == 0:
raise EmptyColumnError(f"Could not generate {transformation.name}, since its transformation returned an empty result ({result})")
transformed_df[transformation.name] = result
# @do:format
return transformed_df, aggregates, views
def divide(self, target_columns, available_columns):
target_columns = set(target_columns)
selected_columns = set()
column_buckets = []
for columns in available_columns:
matching_column = (target_columns - selected_columns) & set(columns)
selected_columns = selected_columns | matching_column
column_buckets.append(matching_column)
left_over = target_columns - selected_columns
assert len(left_over) == 0, f"{list(left_over)} could not be matched with the input data"
return column_buckets
def _multi_source_extract(self, target_columns, transformed_df, input_df, aggregates, views):
available_columns = list(map(list, [transformed_df.columns, input_df.columns, aggregates.keys(),
views.keys(), self._param_values.keys()]))
trans_cols, input_cols, agg_cols, view_cols, param_cols = self.divide(target_columns, available_columns)
reduced_df = pd.concat([transformed_df[trans_cols], input_df[input_cols]], axis=1)
for column in view_cols:
reduced_df[column] = views[column]
extra = {}
for column in agg_cols:
extra[column] = aggregates[column]
for column in param_cols:
extra[column] = self._param_values[column]
return reduced_df, extra
def _execution_order(self, input_columns):
"""
Calculate all transformations in the correct order
"""
transformations = TransformDAG(list(input_columns) + list(self._param_values.keys()))
all_columns = [column for column in self._columns if not column.name == "*"]
if any([column.name == "*" for column in self._columns]) or not all_columns:
extra_columns = []
for input_column in input_columns:
already_included = False
for column in all_columns:
if input_column == column.name:
already_included = True
break
elif input_column in column.input_columns and column.is_copy:
already_included = True
break
if not already_included:
new_column = ColumnContainer(name=input_column, input_columns=[input_column], is_temporary=False,
transform=None, is_copy=True)
extra_columns.append(new_column)
all_columns += extra_columns
# Add all columns
for column in all_columns:
transformations.add(column)
# Add all aggregates
for aggregate in self._aggregations:
transformations.add(aggregate)
return transformations
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
if df.empty:
raise EmptyDataFrameError(
"PandasDB cannot transform an empty dataframe.\nPlease look at either your initialize funtion or the inputs to the transform")
""" Apply all pre conditions one at a time the dataframe is not grouped, else filter the groups"""
if not self._groups:
df = self._apply_pre_conditions(df)
if df.empty:
raise EmptyDataFrameError(
"The pre_conditions lead to an empty dataframe.\nPlease change the pre_conditions or the inputs to the transform")
""" Group the data if needed """
assert not (self._groups and self._splits), "Groups and Splits cannot be combined"
if self._groups:
if self._pre_conditions:
groups = df.groupby(self._groups.columns)
df = groups.apply(lambda df: self._apply_pre_conditions(df).drop(columns=self._groups.columns))
df = df.groupby(self._groups.columns).agg(list).reset_index()
transformed_df = pd.DataFrame({})
""" Split up the data into groups if needed """
if self._splits:
aggregates, views = defaultdict(list), defaultdict(list)
if self._splits.sort_by.columns:
_splitted = df.sort_values(self._splits.sort_by.columns).groupby(self._splits.group.columns)
else:
_splitted = df.groupby(self._splits.group.columns)
split_transforms = []
for _, gdf in _splitted:
gdf = self._apply_split_conditions(gdf)
curr_df = pd.DataFrame({column: gdf[column] for column in self._splits.sort_by.columns})
curr_df, curr_aggregates, curr_views = self._apply_transform(curr_df, gdf, is_split=True)
split_transforms.append(curr_df)
transformed_df = pd.concat([transformed_df, *split_transforms], axis=0)
else:
transformed_df, aggregates, views = self._apply_transform(transformed_df, df, is_split=False)
""" Apply all post conditions """
transformed_df = self._apply_post_conditions(transformed_df, df, aggregates, views)
""" Finally Index the Dataframe """
if self._indexes.columns:
if len(self._indexes.columns) == 1:
index = self._indexes.columns[0]
else:
index = list(self._indexes.columns)
transformed_df = transformed_df.set_index(index).sort_index()
return transformed_df
def __call__(self, *args, **kwargs) -> pd.DataFrame:
if hasattr(self, "initialize"):
df = to_df(type(self).initialize(*args, **kwargs))
else:
assert args and not kwargs, "SchematicTransformation only accepts a sigle DataFrame"
df = to_df(args[0])
transformed = self.transform(df)
if hasattr(self, "finalize"):
return type(self).finalize(transformed)
else:
return transformed | 0.880354 | 0.356503 |
import unittest
from aiohttp.web import HTTPNotFound, HTTPMethodNotAllowed
from freesia.route import Route, Router
async def temp():
pass
class RouterTestCase(unittest.TestCase):
def test_add_static_route(self):
r = Route("/", ["GET"], temp, {
"checking_param": False
})
router = Router()
router.add_route(r)
self.assertEqual(len(router.static_url_map), 1)
def test_add_non_static_route(self):
r = Route("/<name>", ["GET"], temp, {
"checking_param": False
})
router = Router()
router.add_route(r)
self.assertIn("GET", router.method_map)
self.assertEqual(len(router.method_map["GET"]), 1)
def test_get_route_from_static(self):
r = Route("/", ["GET"], temp, {
"checking_param": False
})
router = Router()
router.add_route(r)
t, _ = router.get("/", "GET")
self.assertIsNotNone(t)
with self.assertRaises(HTTPNotFound):
router.get("/notexist", "GET")
def test_get_from_dyna(self):
r = Route("/hello/<name>", ["GET"], temp, {
"checking_param": False
})
router = Router()
router.add_route(r)
t, _ = router.get("/hello/name", "GET")
self.assertIsNotNone(t)
with self.assertRaises(HTTPNotFound):
router.get("/hello/not/name", "GET")
def test_method_not_allowed(self):
r = Route("/", ["GET"], temp, {
"checking_param": False
})
router = Router()
router.add_route(r)
with self.assertRaises(HTTPMethodNotAllowed):
router.get("/", "POST")
def test_build_url(self):
r = Route("/test/<float:age>", ["GET"], temp, {
"endpoint": "test",
"checking_param": False
})
router = Router()
router.add_route(r)
self.assertEqual("/test/1.0", router.build_url("test", [1.0]))
with self.assertRaises(ValueError):
router.build_url("test", ["wrong"]) | tests/test_router.py | import unittest
from aiohttp.web import HTTPNotFound, HTTPMethodNotAllowed
from freesia.route import Route, Router
async def temp():
pass
class RouterTestCase(unittest.TestCase):
def test_add_static_route(self):
r = Route("/", ["GET"], temp, {
"checking_param": False
})
router = Router()
router.add_route(r)
self.assertEqual(len(router.static_url_map), 1)
def test_add_non_static_route(self):
r = Route("/<name>", ["GET"], temp, {
"checking_param": False
})
router = Router()
router.add_route(r)
self.assertIn("GET", router.method_map)
self.assertEqual(len(router.method_map["GET"]), 1)
def test_get_route_from_static(self):
r = Route("/", ["GET"], temp, {
"checking_param": False
})
router = Router()
router.add_route(r)
t, _ = router.get("/", "GET")
self.assertIsNotNone(t)
with self.assertRaises(HTTPNotFound):
router.get("/notexist", "GET")
def test_get_from_dyna(self):
r = Route("/hello/<name>", ["GET"], temp, {
"checking_param": False
})
router = Router()
router.add_route(r)
t, _ = router.get("/hello/name", "GET")
self.assertIsNotNone(t)
with self.assertRaises(HTTPNotFound):
router.get("/hello/not/name", "GET")
def test_method_not_allowed(self):
r = Route("/", ["GET"], temp, {
"checking_param": False
})
router = Router()
router.add_route(r)
with self.assertRaises(HTTPMethodNotAllowed):
router.get("/", "POST")
def test_build_url(self):
r = Route("/test/<float:age>", ["GET"], temp, {
"endpoint": "test",
"checking_param": False
})
router = Router()
router.add_route(r)
self.assertEqual("/test/1.0", router.build_url("test", [1.0]))
with self.assertRaises(ValueError):
router.build_url("test", ["wrong"]) | 0.515132 | 0.406744 |
__author__ = '<NAME> <<EMAIL>>'
from unittest import TestSuite
from .testcase_create_job_invalid_data import CreateJobInvalidDataTestCase
from .testcase_create_job_incomplete_data import CreateJobIncompleteDataTestCase
from .testcase_create_job import CreateJobTestCase
from .testcase_get_job import GetJobTestCase
from .testcase_get_non_existent_job import GetNonExistentJobTestCase
from .testcase_get_list_jobs import GetJobListTestCase
from .testcase_get_job_status import GetJobStatusTestCase
from .testcase_transfer_job_to_non_existent_output import TransferJobToNonExistentOutputTestCase
from .testcase_transfer_job_ftp import TransferJobToFTPTestCase
from .testcase_transfer_job_s3 import TransferJobToS3TestCase
from .testcase_create_job_widevine_drm_invalid_config import CreateJobWidevineDrmInvalidConfigTestCase
from .testcase_create_job_widevine_drm import CreateJobWidevineDrmTestCase
from .testcase_create_job_playready_drm_invalid import CreateJobPlayreadyDrmInvalidConfigTestCase
from .testcase_create_job_playready_drm import CreateJobPlayreadyDrmTestCase
from .testcase_create_job_playready_widevine_drm_invalid import CreateJobPlayreadyWidevineDrmInvalidConfigTestCase
from .testcase_create_job_playready_widevine_drm import CreateJobPlayreadyWidevineDrmTestCase
from .testcase_create_job_hls_encryption import CreateJobHLSEncryptionTestCase
from .testcase_create_job_multiple_audio_streams import CreateJobWithMultipleAudioStreamsTestCase
from .testcase_create_job_azure_input import CreateJobAzureInputTestCase
from .testcase_create_job_s3_input import CreateJobS3InputTestCase
from .testcase_transfer_job_gcs import TransferJobToGCSTestCase
from .testcase_autotransfer_job_gcs import AutoTransferJobToGCSTestCase
from .testcase_autotransfer_job_ftp import AutoTransferJobToFTPTestCase
from .testcase_autotransfer_job_s3 import AutoTransferJobToS3TestCase
from .testcase_create_job_rotation import CreateJobWithRotationTestCase
from .testcase_create_job_specific_segment_length import CreateJobWithSpecificSegmentLengthTestCase
from .testcase_create_job_specific_video_audio_sample_rates import CreateJobWithSpecificVideoAndAudioSampleRatesTestCase
from .testcase_create_job_watermarked import CreateJobWithWatermarkTestCase
from .testcase_create_job_cropping_video import CreateJobWithVideoCroppingTestCase
from .testcase_transfer_job_azure import TransferJobToAzureTestCase
from .testcase_create_job_deinterlace import CreateJobWithDeinterlacing
from .testcase_create_job_mp3_audio_only import CreateJobAudioOnlyTestCase
from bitcodin.test.job.testcase_create_thumbnail import CreateThumbnailTestCase
from bitcodin.test.job.testcase_create_job_keep_aspect_ratio import CreateJobKeepAspectRatioTestCase
def get_test_suite():
test_suite = TestSuite()
test_suite.addTest(CreateJobInvalidDataTestCase())
test_suite.addTest(CreateJobIncompleteDataTestCase())
test_suite.addTest(CreateJobTestCase())
test_suite.addTest(CreateJobWithMultipleAudioStreamsTestCase())
test_suite.addTest(CreateJobHLSEncryptionTestCase())
test_suite.addTest(CreateJobWidevineDrmInvalidConfigTestCase())
test_suite.addTest(CreateJobWidevineDrmTestCase())
test_suite.addTest(CreateJobPlayreadyDrmInvalidConfigTestCase())
test_suite.addTest(CreateJobPlayreadyDrmTestCase())
test_suite.addTest(CreateJobPlayreadyWidevineDrmInvalidConfigTestCase())
test_suite.addTest(CreateJobPlayreadyWidevineDrmTestCase())
test_suite.addTest(GetNonExistentJobTestCase())
test_suite.addTest(GetJobTestCase())
# TODO: fix test case to look up pages 2,3,4,5.... eventually the job list is so long that the job
# TODO: is not in the first page anymore
# test_suite.addTest(GetJobListTestCase())
test_suite.addTest(GetJobStatusTestCase())
test_suite.addTest(TransferJobToNonExistentOutputTestCase())
test_suite.addTest(TransferJobToFTPTestCase())
test_suite.addTest(TransferJobToS3TestCase())
test_suite.addTest(CreateJobAzureInputTestCase())
test_suite.addTest(CreateJobS3InputTestCase())
test_suite.addTest(TransferJobToGCSTestCase())
test_suite.addTest(AutoTransferJobToGCSTestCase())
test_suite.addTest(AutoTransferJobToFTPTestCase())
test_suite.addTest(AutoTransferJobToS3TestCase())
test_suite.addTest(CreateJobWithRotationTestCase())
test_suite.addTest(CreateJobWithSpecificSegmentLengthTestCase())
test_suite.addTest(CreateJobWithSpecificVideoAndAudioSampleRatesTestCase())
test_suite.addTest(CreateJobWithWatermarkTestCase())
test_suite.addTest(CreateJobWithVideoCroppingTestCase())
test_suite.addTest(TransferJobToAzureTestCase())
test_suite.addTest(CreateJobWithDeinterlacing())
test_suite.addTest(CreateJobAudioOnlyTestCase())
test_suite.addTest(CreateThumbnailTestCase())
test_suite.addTest(CreateJobKeepAspectRatioTestCase())
return test_suite | bitcodin/test/job/__init__.py | __author__ = '<NAME> <<EMAIL>>'
from unittest import TestSuite
from .testcase_create_job_invalid_data import CreateJobInvalidDataTestCase
from .testcase_create_job_incomplete_data import CreateJobIncompleteDataTestCase
from .testcase_create_job import CreateJobTestCase
from .testcase_get_job import GetJobTestCase
from .testcase_get_non_existent_job import GetNonExistentJobTestCase
from .testcase_get_list_jobs import GetJobListTestCase
from .testcase_get_job_status import GetJobStatusTestCase
from .testcase_transfer_job_to_non_existent_output import TransferJobToNonExistentOutputTestCase
from .testcase_transfer_job_ftp import TransferJobToFTPTestCase
from .testcase_transfer_job_s3 import TransferJobToS3TestCase
from .testcase_create_job_widevine_drm_invalid_config import CreateJobWidevineDrmInvalidConfigTestCase
from .testcase_create_job_widevine_drm import CreateJobWidevineDrmTestCase
from .testcase_create_job_playready_drm_invalid import CreateJobPlayreadyDrmInvalidConfigTestCase
from .testcase_create_job_playready_drm import CreateJobPlayreadyDrmTestCase
from .testcase_create_job_playready_widevine_drm_invalid import CreateJobPlayreadyWidevineDrmInvalidConfigTestCase
from .testcase_create_job_playready_widevine_drm import CreateJobPlayreadyWidevineDrmTestCase
from .testcase_create_job_hls_encryption import CreateJobHLSEncryptionTestCase
from .testcase_create_job_multiple_audio_streams import CreateJobWithMultipleAudioStreamsTestCase
from .testcase_create_job_azure_input import CreateJobAzureInputTestCase
from .testcase_create_job_s3_input import CreateJobS3InputTestCase
from .testcase_transfer_job_gcs import TransferJobToGCSTestCase
from .testcase_autotransfer_job_gcs import AutoTransferJobToGCSTestCase
from .testcase_autotransfer_job_ftp import AutoTransferJobToFTPTestCase
from .testcase_autotransfer_job_s3 import AutoTransferJobToS3TestCase
from .testcase_create_job_rotation import CreateJobWithRotationTestCase
from .testcase_create_job_specific_segment_length import CreateJobWithSpecificSegmentLengthTestCase
from .testcase_create_job_specific_video_audio_sample_rates import CreateJobWithSpecificVideoAndAudioSampleRatesTestCase
from .testcase_create_job_watermarked import CreateJobWithWatermarkTestCase
from .testcase_create_job_cropping_video import CreateJobWithVideoCroppingTestCase
from .testcase_transfer_job_azure import TransferJobToAzureTestCase
from .testcase_create_job_deinterlace import CreateJobWithDeinterlacing
from .testcase_create_job_mp3_audio_only import CreateJobAudioOnlyTestCase
from bitcodin.test.job.testcase_create_thumbnail import CreateThumbnailTestCase
from bitcodin.test.job.testcase_create_job_keep_aspect_ratio import CreateJobKeepAspectRatioTestCase
def get_test_suite():
test_suite = TestSuite()
test_suite.addTest(CreateJobInvalidDataTestCase())
test_suite.addTest(CreateJobIncompleteDataTestCase())
test_suite.addTest(CreateJobTestCase())
test_suite.addTest(CreateJobWithMultipleAudioStreamsTestCase())
test_suite.addTest(CreateJobHLSEncryptionTestCase())
test_suite.addTest(CreateJobWidevineDrmInvalidConfigTestCase())
test_suite.addTest(CreateJobWidevineDrmTestCase())
test_suite.addTest(CreateJobPlayreadyDrmInvalidConfigTestCase())
test_suite.addTest(CreateJobPlayreadyDrmTestCase())
test_suite.addTest(CreateJobPlayreadyWidevineDrmInvalidConfigTestCase())
test_suite.addTest(CreateJobPlayreadyWidevineDrmTestCase())
test_suite.addTest(GetNonExistentJobTestCase())
test_suite.addTest(GetJobTestCase())
# TODO: fix test case to look up pages 2,3,4,5.... eventually the job list is so long that the job
# TODO: is not in the first page anymore
# test_suite.addTest(GetJobListTestCase())
test_suite.addTest(GetJobStatusTestCase())
test_suite.addTest(TransferJobToNonExistentOutputTestCase())
test_suite.addTest(TransferJobToFTPTestCase())
test_suite.addTest(TransferJobToS3TestCase())
test_suite.addTest(CreateJobAzureInputTestCase())
test_suite.addTest(CreateJobS3InputTestCase())
test_suite.addTest(TransferJobToGCSTestCase())
test_suite.addTest(AutoTransferJobToGCSTestCase())
test_suite.addTest(AutoTransferJobToFTPTestCase())
test_suite.addTest(AutoTransferJobToS3TestCase())
test_suite.addTest(CreateJobWithRotationTestCase())
test_suite.addTest(CreateJobWithSpecificSegmentLengthTestCase())
test_suite.addTest(CreateJobWithSpecificVideoAndAudioSampleRatesTestCase())
test_suite.addTest(CreateJobWithWatermarkTestCase())
test_suite.addTest(CreateJobWithVideoCroppingTestCase())
test_suite.addTest(TransferJobToAzureTestCase())
test_suite.addTest(CreateJobWithDeinterlacing())
test_suite.addTest(CreateJobAudioOnlyTestCase())
test_suite.addTest(CreateThumbnailTestCase())
test_suite.addTest(CreateJobKeepAspectRatioTestCase())
return test_suite | 0.23292 | 0.385172 |
import cv2
import sys
import json
import pytesseract
def crop(imageFileName, cropXStart,cropXEnd,cropYStart,cropYEnd):
"""crops the given image with the given bounding box coordinates."""
#print('cropping')
#print(type(cropXEnd))
image = cv2.imread(imageFileName)
croppedImage = image[cropXStart:cropXEnd,cropYStart:cropYEnd]
#cv2.imshow("cropped "+imageFileName,croppedImage)
#cv2.waitKey(0)
return croppedImage
def ocr(image):
tesseract_config = r'--oem 1'
textDetails = pytesseract.image_to_string(image)
#replace '\x0c'
textDetails = textDetails.replace('\x0c','')
#print(textDetails)
#print('returning from crop')
return textDetails
def main():
cropped_images = list()
if len(sys.argv) < 2:
print('usage: image2text.py jsonFileName.jsonFileName')
else:
print("JSON file name",sys.argv[1])
with open(sys.argv[1]) as f:
data = json.load(f)
category_id=0
for category in data['categories']:
if category['name'] == 'title':
category_id = category['id']
print("extracting text for category "+category['name'])
annotations = []
for annotation in data['annotations']:
if annotation['category_id'] == category_id:
annotations.append(annotation)
print('total number of title annotations:'+str(len(annotations)))
#get images that have title annotations.
for annotation in annotations:
image_id = annotation['image_id']
imageFileName = ''
for image in data['images']:
if image['id'] == image_id:
imageFileName = image['file_name']
boundingBox = annotation['bbox']
print("cropping image"+str(image))
cropYStart = int(boundingBox[0])
cropYEnd = int(boundingBox[2])
cropXStart = int(boundingBox[1])
cropXEnd = int(boundingBox[3])
#print("bbox coordinates for image are "+str(boundingBox))
cropped_image = crop(imageFileName,cropXStart,cropXEnd,cropYStart,cropYEnd)
cropped_images.append(cropped_image)
print('created total number of cropped images: '+str(len(cropped_images)))
textDataStrings = []
#cv2.imwrite('cropped.png',cropped_images[0])
for cropped_image in cropped_images:
textData = ocr(cropped_image)
textDataStrings.append(textData)
print('OCR results:')
print('total number of results:'+str(len(textDataStrings)))
print(textDataStrings)
print('done OCRing')
print('exitting')
if __name__ == '__main__':
main() | tools/image2text.py | import cv2
import sys
import json
import pytesseract
def crop(imageFileName, cropXStart,cropXEnd,cropYStart,cropYEnd):
"""crops the given image with the given bounding box coordinates."""
#print('cropping')
#print(type(cropXEnd))
image = cv2.imread(imageFileName)
croppedImage = image[cropXStart:cropXEnd,cropYStart:cropYEnd]
#cv2.imshow("cropped "+imageFileName,croppedImage)
#cv2.waitKey(0)
return croppedImage
def ocr(image):
tesseract_config = r'--oem 1'
textDetails = pytesseract.image_to_string(image)
#replace '\x0c'
textDetails = textDetails.replace('\x0c','')
#print(textDetails)
#print('returning from crop')
return textDetails
def main():
cropped_images = list()
if len(sys.argv) < 2:
print('usage: image2text.py jsonFileName.jsonFileName')
else:
print("JSON file name",sys.argv[1])
with open(sys.argv[1]) as f:
data = json.load(f)
category_id=0
for category in data['categories']:
if category['name'] == 'title':
category_id = category['id']
print("extracting text for category "+category['name'])
annotations = []
for annotation in data['annotations']:
if annotation['category_id'] == category_id:
annotations.append(annotation)
print('total number of title annotations:'+str(len(annotations)))
#get images that have title annotations.
for annotation in annotations:
image_id = annotation['image_id']
imageFileName = ''
for image in data['images']:
if image['id'] == image_id:
imageFileName = image['file_name']
boundingBox = annotation['bbox']
print("cropping image"+str(image))
cropYStart = int(boundingBox[0])
cropYEnd = int(boundingBox[2])
cropXStart = int(boundingBox[1])
cropXEnd = int(boundingBox[3])
#print("bbox coordinates for image are "+str(boundingBox))
cropped_image = crop(imageFileName,cropXStart,cropXEnd,cropYStart,cropYEnd)
cropped_images.append(cropped_image)
print('created total number of cropped images: '+str(len(cropped_images)))
textDataStrings = []
#cv2.imwrite('cropped.png',cropped_images[0])
for cropped_image in cropped_images:
textData = ocr(cropped_image)
textDataStrings.append(textData)
print('OCR results:')
print('total number of results:'+str(len(textDataStrings)))
print(textDataStrings)
print('done OCRing')
print('exitting')
if __name__ == '__main__':
main() | 0.281307 | 0.115911 |
import numpy as np
import unittest
from day_two import unittest_
if False:
class _GradientBoostingRegressorTest(unittest_.TestCase):
def test_bootstrap_method(self):
import day_two
gb_regr = day_two.sklearn_.ensemble._gradient_boosting.GradientBoostingRegressor_()
n_samples = 100
train_indices, test_indices = gb_regr\
._bootstrap_data_indices(n_samples)
self.assertEqual(
len(train_indices) + len(test_indices),
n_samples,
"size of train and test data set must sum to n_samples")
self.assertEqual(
len(test_indices),
len(np.unique(test_indices)),
"test data set is not supposed to be bootstrapped in any way")
self.assertEqual(
len(np.intersect1d(train_indices, test_indices)),
0,
"test and train data must have zero intersection")
def test_single_stage(self):
import day_two
# generate data
t = np.linspace(0, 1, 100)
y = t + 0.1*np.random.randn()
X = np.column_stack((t, t, t))
gb_regr = day_two.sklearn_.ensemble._gradient_boosting.GradientBoostingRegressor_(n_stage_estimators=5)
residual_rate = 0.1
estimator, y_pred, score = gb_regr._fit_stage(
X, y, np.mean(y), residual_rate)
self.assertTrue(
score >= 0,
"estimators with R < 0 should be discarded")
if estimator is not None:
selected_features_columns = estimator._wh
self.assertTrue(
len(selected_features_columns) > 0,
"num of selected features at each stage must be > 0")
y_pred_next = np.mean(y) + residual_rate * estimator.predict(X)\
.reshape(len(y), )
np.testing.assert_array_equal(
y_pred_next,
y_pred,
"stage prediction does not represent stagewise addetive model")
def test_fit(self):
import day_two
# generate data
t = np.linspace(0, 1, 100)
y = t + 0.1*np.random.randn()
X = np.column_stack((t, t, t))
gb_regr = day_two.sklearn_.ensemble._gradient_boosting.GradientBoostingRegressor_(n_max_stages=3)
gb_regr.fit(X, y)
self.assertEqual(
len(gb_regr.estimators_),
gb_regr.n_stages + 1,
"number of used estimators should be equal to #stages + 1")
def test_predict(self):
import day_two
# generate data
t = np.linspace(0, 1, 100)
y = t + 0.1*np.random.randn()
X = np.column_stack((t, t, t))
gb_regr = day_two.sklearn_.ensemble._gradient_boosting.GradientBoostingRegressor_(n_max_stages=3)
gb_regr.fit(X, y)
y_pred = gb_regr.predict(X)
self.assertTrue(
len(y) == len(y_pred),
"dimention of y and y_pred must be the same")
if __name__ == '__main__':
unittest_.main() | test/_gradient_boosting_test______.py | import numpy as np
import unittest
from day_two import unittest_
if False:
class _GradientBoostingRegressorTest(unittest_.TestCase):
def test_bootstrap_method(self):
import day_two
gb_regr = day_two.sklearn_.ensemble._gradient_boosting.GradientBoostingRegressor_()
n_samples = 100
train_indices, test_indices = gb_regr\
._bootstrap_data_indices(n_samples)
self.assertEqual(
len(train_indices) + len(test_indices),
n_samples,
"size of train and test data set must sum to n_samples")
self.assertEqual(
len(test_indices),
len(np.unique(test_indices)),
"test data set is not supposed to be bootstrapped in any way")
self.assertEqual(
len(np.intersect1d(train_indices, test_indices)),
0,
"test and train data must have zero intersection")
def test_single_stage(self):
import day_two
# generate data
t = np.linspace(0, 1, 100)
y = t + 0.1*np.random.randn()
X = np.column_stack((t, t, t))
gb_regr = day_two.sklearn_.ensemble._gradient_boosting.GradientBoostingRegressor_(n_stage_estimators=5)
residual_rate = 0.1
estimator, y_pred, score = gb_regr._fit_stage(
X, y, np.mean(y), residual_rate)
self.assertTrue(
score >= 0,
"estimators with R < 0 should be discarded")
if estimator is not None:
selected_features_columns = estimator._wh
self.assertTrue(
len(selected_features_columns) > 0,
"num of selected features at each stage must be > 0")
y_pred_next = np.mean(y) + residual_rate * estimator.predict(X)\
.reshape(len(y), )
np.testing.assert_array_equal(
y_pred_next,
y_pred,
"stage prediction does not represent stagewise addetive model")
def test_fit(self):
import day_two
# generate data
t = np.linspace(0, 1, 100)
y = t + 0.1*np.random.randn()
X = np.column_stack((t, t, t))
gb_regr = day_two.sklearn_.ensemble._gradient_boosting.GradientBoostingRegressor_(n_max_stages=3)
gb_regr.fit(X, y)
self.assertEqual(
len(gb_regr.estimators_),
gb_regr.n_stages + 1,
"number of used estimators should be equal to #stages + 1")
def test_predict(self):
import day_two
# generate data
t = np.linspace(0, 1, 100)
y = t + 0.1*np.random.randn()
X = np.column_stack((t, t, t))
gb_regr = day_two.sklearn_.ensemble._gradient_boosting.GradientBoostingRegressor_(n_max_stages=3)
gb_regr.fit(X, y)
y_pred = gb_regr.predict(X)
self.assertTrue(
len(y) == len(y_pred),
"dimention of y and y_pred must be the same")
if __name__ == '__main__':
unittest_.main() | 0.67662 | 0.666429 |
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Assessment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=250)),
('pub_date', models.DateTimeField(verbose_name=b'date published')),
('abbreviation', models.CharField(help_text=b'Assessment abbreviation', max_length=250)),
('version', models.CharField(help_text=b'version', max_length=10)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CognitiveAtlasConcept',
fields=[
('name', models.CharField(max_length=200)),
('cog_atlas_id', models.CharField(max_length=200, serialize=False, primary_key=True)),
('definition', models.CharField(default=None, max_length=200)),
],
options={
'ordering': ['name'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CognitiveAtlasTask',
fields=[
('name', models.CharField(max_length=200)),
('cog_atlas_id', models.CharField(max_length=200, serialize=False, primary_key=True)),
],
options={
'ordering': ['name'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.CharField(max_length=500)),
('label', models.CharField(help_text=b'question unique label', unique=True, max_length=250)),
('required', models.BooleanField(default=True, verbose_name=b'Required', choices=[(False, b'Not required'), (True, b'Required')])),
('data_type', models.CharField(help_text=b'Data type of the question answer', max_length=200, verbose_name=b'Data Type', choices=[(b'LONGINT', b'Long Integer'), (b'DATETIME', b'Date/Time'), (b'TEXT', b'Text'), (b'INT', b'Integer'), (b'DOUBLE', b'Double')])),
('options', models.CharField(default=None, max_length=500)),
('assessment', models.ForeignKey(to='assessments.Assessment')),
('cognitive_atlas_concept', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, verbose_name=b'Cognitive Atlas Concept', to='assessments.CognitiveAtlasConcept', help_text=b"Concept defined in the <a href='http://www.cognitiveatlas.org/'>Cognitive Atlas</a>", null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='QuestionOption',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('numerical_score', models.IntegerField()),
('text', models.CharField(max_length=250)),
('questions', models.ManyToManyField(to='assessments.Question')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='assessment',
name='cognitive_atlas_task',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, verbose_name=b'Cognitive Atlas Task', to='assessments.CognitiveAtlasTask', help_text=b"Assessment defined in the <a href='http://www.cognitiveatlas.org/'>Cognitive Atlas</a>", null=True),
preserve_default=True,
),
] | cogpheno/apps/assessments/migrations/old/0001_initial.py | from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Assessment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=250)),
('pub_date', models.DateTimeField(verbose_name=b'date published')),
('abbreviation', models.CharField(help_text=b'Assessment abbreviation', max_length=250)),
('version', models.CharField(help_text=b'version', max_length=10)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CognitiveAtlasConcept',
fields=[
('name', models.CharField(max_length=200)),
('cog_atlas_id', models.CharField(max_length=200, serialize=False, primary_key=True)),
('definition', models.CharField(default=None, max_length=200)),
],
options={
'ordering': ['name'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CognitiveAtlasTask',
fields=[
('name', models.CharField(max_length=200)),
('cog_atlas_id', models.CharField(max_length=200, serialize=False, primary_key=True)),
],
options={
'ordering': ['name'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.CharField(max_length=500)),
('label', models.CharField(help_text=b'question unique label', unique=True, max_length=250)),
('required', models.BooleanField(default=True, verbose_name=b'Required', choices=[(False, b'Not required'), (True, b'Required')])),
('data_type', models.CharField(help_text=b'Data type of the question answer', max_length=200, verbose_name=b'Data Type', choices=[(b'LONGINT', b'Long Integer'), (b'DATETIME', b'Date/Time'), (b'TEXT', b'Text'), (b'INT', b'Integer'), (b'DOUBLE', b'Double')])),
('options', models.CharField(default=None, max_length=500)),
('assessment', models.ForeignKey(to='assessments.Assessment')),
('cognitive_atlas_concept', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, verbose_name=b'Cognitive Atlas Concept', to='assessments.CognitiveAtlasConcept', help_text=b"Concept defined in the <a href='http://www.cognitiveatlas.org/'>Cognitive Atlas</a>", null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='QuestionOption',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('numerical_score', models.IntegerField()),
('text', models.CharField(max_length=250)),
('questions', models.ManyToManyField(to='assessments.Question')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='assessment',
name='cognitive_atlas_task',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, verbose_name=b'Cognitive Atlas Task', to='assessments.CognitiveAtlasTask', help_text=b"Assessment defined in the <a href='http://www.cognitiveatlas.org/'>Cognitive Atlas</a>", null=True),
preserve_default=True,
),
] | 0.656548 | 0.164114 |
# Internal module. Internal API may move, disappear or otherwise change at any
# time and without notice.
from __future__ import print_function, unicode_literals
class LatexContextDb(object):
r"""
Store a database of specifications of known macros, environments, and other
latex specials. This might be, e.g., how many arguments a macro accepts, or
how to determine the text representation of a macro or environment.
When used with :py:class:`pylatexenc.latexwalker.LatexWalker`, the
specifications describe mostly rules for parsing arguments of macros and
environments, and which sequences of characters to consider as "latex
specials". Specifications for macros, environments, and other specials are
stored as :py:class:`MacroSpec`, :py:class:`EnvironmentSpec`, and
:py:class:`SpecialsSpec` instances, respectively.
When used with :py:class:`pylatexenc.latex2text.LatexNodes2Text`, the
specifications for macros, environments, and other specials are stored as
:py:class:`pylatexenc.latex2text.MacroTextSpec` ,
:py:class:`pylatexenc.latex2text.EnvironmentTextSpec`, and
:py:class:`pylatexenc.latex2text.SpecialsTextSpec` instances, respectively.
In fact, the objects stored in this database may be of any type, except that
macro specifications must have an attribute `macroname`, environment
specifications must have an attribute `environmentname`, and specials
specification must have an attribute `specials_chars`.
The `LatexContextDb` instance is meant to be (pseudo-)immutable. Once
constructed and all the definitions added with
:py:meth:`add_context_category()`, one should refrain from modifying it
directly after providing it to, e.g., a
:py:class:`~pylatexenc.latexwalker.LatexWalker` object. The reason is that
the latex walker keeps track of what the latex context was when parsing
nodes, and modifying the context will modify that stored information, too.
Instead of being tempted to modify the object, create a new one with
:py:meth:`filter_context()`.
See :py:func:`pylatexenc.latexwalker.get_default_latex_context_db()` for the
default latex context for `latexwalker` with a default collection of known
latex macros and environments.
See :py:func:`pylatexenc.latex2text.get_default_latex_context_db()` for the
default latex context for `latex2text` with a set of text replacements for a
collection of known macros and environments.
"""
def __init__(self, **kwargs):
super(LatexContextDb, self).__init__(**kwargs)
self.category_list = []
self.d = {}
self.unknown_macro_spec = None
self.unknown_environment_spec = None
self.unknown_specials_spec = None
def add_context_category(self, category, macros=[], environments=[], specials=[],
prepend=False, insert_before=None, insert_after=None):
r"""
Register a category of macro and environment specifications in the context
database.
The category name `category` must not already exist in the database.
The argument `macros` is an iterable (e.g., a list) of macro
specification objects. The argument `environments` is an iterable
(e.g., a list) of environment spec objects. Similarly, the `specials`
argument is an iterable of latex specials spec instances.
If you specify `prepend=True`, then macro and environment lookups will
prioritize this category over other categories. Categories are normally
searched for in the order they are registered to the database; if you
specify `prepend=True`, then the new category is prepended to the
existing list so that it is searched first.
If `insert_before` is not `None`, then it must be a string; the
definitions are inserted in the category list immediately before the
given category name, or at the beginning of the list if the given
category doesn't exist. If `insert_after` is not `None`, then it must
be a string; the definitions are inserted in the category list
immediately after the given category name, or at the end of the list if
the given category doesn't exist.
You may only specify one of `prepend=True`, `insert_before='...'` or
`insert_after='...'`.
"""
if category in self.category_list:
raise ValueError("Category {} is already registered in the context database"
.format(category))
# ensure only one of these options is set
if len([ x for x in (prepend, insert_before, insert_after) if x ]) > 1:
raise TypeError("add_context_category(): You may only specify one of "
"prepend=True, insert_before=... or insert_after=...")
if prepend:
self.category_list.insert(0, category)
elif insert_before:
if insert_before in self.category_list:
i = self.category_list.index(insert_before)
else:
i = 0
self.category_list.insert(i, category)
elif insert_after:
if insert_after in self.category_list:
i = self.category_list.index(insert_after) + 1 # insert after found category
else:
i = len(self.category_list)
self.category_list.insert(i, category)
else:
self.category_list.append(category)
self.d[category] = {
'macros': dict( (m.macroname, m) for m in macros ),
'environments': dict( (e.environmentname, e) for e in environments ),
'specials': dict( (s.specials_chars, s) for s in specials ),
}
def set_unknown_macro_spec(self, macrospec):
r"""
Set the macro spec to use when encountering a macro that is not in the
database.
"""
self.unknown_macro_spec = macrospec
def set_unknown_environment_spec(self, environmentspec):
r"""
Set the environment spec to use when encountering a LaTeX environment that
is not in the database.
"""
self.unknown_environment_spec = environmentspec
def set_unknown_specials_spec(self, specialsspec):
r"""
Set the latex specials spec to use when encountering a LaTeX environment
that is not in the database.
"""
self.unknown_specials_spec = specialsspec
def categories(self):
r"""
Return a list of valid category names that are registered in the current
database context.
"""
return list(self.category_list)
def get_macro_spec(self, macroname):
r"""
Look up a macro specification by macro name. The macro name is searched for
in all categories one by one and the first match is returned.
Returns a macro spec instance that matches the given `macroname`. If
the macro name was not found, we return the default macro specification
set by :py:meth:`set_unknown_macro_spec()` or `None` if no such spec was
set.
"""
for cat in self.category_list:
# search categories in the given order
if macroname in self.d[cat]['macros']:
return self.d[cat]['macros'][macroname]
return self.unknown_macro_spec
def get_environment_spec(self, environmentname):
r"""
Look up an environment specification by environment name. The environment
name is searched for in all categories one by one and the first match is
returned.
Returns the environment spec. If the environment name was not found, we
return the default environment specification set by
:py:meth:`set_unknown_environment_spec()` or `None` if no such spec was
set.
"""
for cat in self.category_list:
# search categories in the given order
if environmentname in self.d[cat]['environments']:
return self.d[cat]['environments'][environmentname]
return self.unknown_environment_spec
def get_specials_spec(self, specials_chars):
r"""
Look up a "latex specials" specification by character sequence. The
sequence name is searched for in all categories one by one and the first
match is returned.
If you are parsing a chunk of LaTeX code, you should use
:py:meth:`test_for_specials()` instead. Unlike
:py:meth:`test_for_specials()`, :py:meth:`get_specials_spec()` returns
the first match regardless of matched length. [Rationale: we only need
to worry about matching the longest specials sequence when parsing LaTeX
code. Calling `get_specials_spec()` means one has already parsed the
sequence and one is looking up additional specs on it.]
Returns the specials spec. If the latex specials was not found, we
return the default latex specials specification set by
:py:meth:`set_unknown_specials_spec()` or `None` if no such spec was
set.
"""
for cat in self.category_list:
# search categories in the given order
if specials_chars in self.d[cat]['specials']:
return self.d[cat]['specials'][specials_chars]
return self.unknown_specials_spec
def test_for_specials(self, s, pos, parsing_state=None):
r"""
Test the given position in the string for any LaTeX specials. The lookup
proceeds by searching for in all categories one by one and the first
match is returned, except that the longest match accross all categories
is returned. For instance, a match of '``' in a later category will
take precedence over a match of '`' in a earlier-searched category.
Returns a specials spec instance, or `None` if no specials are detected
at the position `pos`.
"""
best_match_len = 0
best_match_s = None
for cat in self.category_list:
# search categories in the given order
for specials_chars in self.d[cat]['specials'].keys():
if len(specials_chars) > best_match_len and s.startswith(specials_chars, pos):
best_match_s = self.d[cat]['specials'][specials_chars]
best_match_len = len(specials_chars)
return best_match_s # this is None if no match
def iter_macro_specs(self, categories=None):
r"""
Yield the macro specs corresponding to all macros in the given categories.
If `categories` is `None`, then the known macro specs from all
categories are provided in one long iterable sequence. Otherwise,
`categories` should be a list or iterable of category names (e.g.,
'latex-base') of macro specs to return.
The macro specs from the different categories specified are concatenated
into one long sequence which is yielded spec by spec.
"""
if categories is None:
categories = self.category_list
for c in categories:
if c not in self.category_list:
raise ValueError(
"Invalid latex macro spec db category: {!r} (Expected one of {!r})"
.format(c, self.category_list)
)
for spec in self.d[c]['macros'].values():
yield spec
def iter_environment_specs(self, categories=None):
r"""
Yield the environment specs corresponding to all environments in the given
categories.
If `categories` is `None`, then the known environment specs from all
categories are provided in one long iterable sequence. Otherwise,
`categories` should be a list or iterable of category names (e.g.,
'latex-base') of environment specs to return.
The environment specs from the different categories specified are
concatenated into one long sequence which is yielded spec by spec.
"""
if categories is None:
categories = self.category_list
for c in categories:
if c not in self.category_list:
raise ValueError(
"Invalid latex environment spec db category: {!r} (Expected one of {!r})"
.format(c, self.category_list)
)
for spec in self.d[c]['environments'].values():
yield spec
def iter_specials_specs(self, categories=None):
r"""
Yield the specials specs corresponding to all environments in the given
categories.
If `categories` is `None`, then the known specials specs from all
categories are provided in one long iterable sequence. Otherwise,
`categories` should be a list or iterable of category names (e.g.,
'latex-base') of specials specs to return.
The specials specs from the different categories specified are
concatenated into one long sequence which is yielded spec by spec.
"""
if categories is None:
categories = self.category_list
for c in categories:
if c not in self.category_list:
raise ValueError(
"Invalid latex environment spec db category: {!r} (Expected one of {!r})"
.format(c, self.category_list)
)
for spec in self.d[c]['specials'].values():
yield spec
def filter_context(self, keep_categories=[], exclude_categories=[],
keep_which=[]):
r"""
Return a new :py:class:`LatexContextDb` instance where we only keep
certain categories of macro and environment specifications.
If `keep_categories` is set to a nonempty list, then the returned
context will not contain any definitions that do not correspond to the
specified categories.
If `exclude_categories` is set to a nonempty list, then the returned
context will not contain any definitions that correspond to the
specified categories.
It is explicitly fine to have category names in `keep_categories` and
`exclude_categories` that don't exist in the present object
(cf. :py:meth:`categories()`).
The argument `keep_which`, if non-empty, specifies which definitions to
keep. It should be a subset of the list ['macros', 'environments',
'specials'].
The returned context will make a copy of the dictionaries that store the
macro and environment specifications, but the specification classes (and
corresponding argument parsers) might correspond to the same instances.
I.e., the returned context is not a full deep copy.
"""
new_context = LatexContextDb()
new_context.unknown_macro_spec = self.unknown_macro_spec
new_context.unknown_environment_spec = self.unknown_environment_spec
new_context.unknown_specials_spec = self.unknown_specials_spec
keep_macros = not keep_which or 'macros' in keep_which
keep_environments = not keep_which or 'environments' in keep_which
keep_specials = not keep_which or 'specials' in keep_which
for cat in self.category_list:
if keep_categories and cat not in keep_categories:
continue
if exclude_categories and cat in exclude_categories:
continue
# include this category
new_context.add_context_category(
cat,
macros=self.d[cat]['macros'].values() if keep_macros else [],
environments=self.d[cat]['environments'].values() if keep_environments else [],
specials=self.d[cat]['specials'].values() if keep_specials else [],
)
return new_context | pylatexenc/macrospec/_latexcontextdb.py |
# Internal module. Internal API may move, disappear or otherwise change at any
# time and without notice.
from __future__ import print_function, unicode_literals
class LatexContextDb(object):
r"""
Store a database of specifications of known macros, environments, and other
latex specials. This might be, e.g., how many arguments a macro accepts, or
how to determine the text representation of a macro or environment.
When used with :py:class:`pylatexenc.latexwalker.LatexWalker`, the
specifications describe mostly rules for parsing arguments of macros and
environments, and which sequences of characters to consider as "latex
specials". Specifications for macros, environments, and other specials are
stored as :py:class:`MacroSpec`, :py:class:`EnvironmentSpec`, and
:py:class:`SpecialsSpec` instances, respectively.
When used with :py:class:`pylatexenc.latex2text.LatexNodes2Text`, the
specifications for macros, environments, and other specials are stored as
:py:class:`pylatexenc.latex2text.MacroTextSpec` ,
:py:class:`pylatexenc.latex2text.EnvironmentTextSpec`, and
:py:class:`pylatexenc.latex2text.SpecialsTextSpec` instances, respectively.
In fact, the objects stored in this database may be of any type, except that
macro specifications must have an attribute `macroname`, environment
specifications must have an attribute `environmentname`, and specials
specification must have an attribute `specials_chars`.
The `LatexContextDb` instance is meant to be (pseudo-)immutable. Once
constructed and all the definitions added with
:py:meth:`add_context_category()`, one should refrain from modifying it
directly after providing it to, e.g., a
:py:class:`~pylatexenc.latexwalker.LatexWalker` object. The reason is that
the latex walker keeps track of what the latex context was when parsing
nodes, and modifying the context will modify that stored information, too.
Instead of being tempted to modify the object, create a new one with
:py:meth:`filter_context()`.
See :py:func:`pylatexenc.latexwalker.get_default_latex_context_db()` for the
default latex context for `latexwalker` with a default collection of known
latex macros and environments.
See :py:func:`pylatexenc.latex2text.get_default_latex_context_db()` for the
default latex context for `latex2text` with a set of text replacements for a
collection of known macros and environments.
"""
def __init__(self, **kwargs):
super(LatexContextDb, self).__init__(**kwargs)
self.category_list = []
self.d = {}
self.unknown_macro_spec = None
self.unknown_environment_spec = None
self.unknown_specials_spec = None
def add_context_category(self, category, macros=[], environments=[], specials=[],
prepend=False, insert_before=None, insert_after=None):
r"""
Register a category of macro and environment specifications in the context
database.
The category name `category` must not already exist in the database.
The argument `macros` is an iterable (e.g., a list) of macro
specification objects. The argument `environments` is an iterable
(e.g., a list) of environment spec objects. Similarly, the `specials`
argument is an iterable of latex specials spec instances.
If you specify `prepend=True`, then macro and environment lookups will
prioritize this category over other categories. Categories are normally
searched for in the order they are registered to the database; if you
specify `prepend=True`, then the new category is prepended to the
existing list so that it is searched first.
If `insert_before` is not `None`, then it must be a string; the
definitions are inserted in the category list immediately before the
given category name, or at the beginning of the list if the given
category doesn't exist. If `insert_after` is not `None`, then it must
be a string; the definitions are inserted in the category list
immediately after the given category name, or at the end of the list if
the given category doesn't exist.
You may only specify one of `prepend=True`, `insert_before='...'` or
`insert_after='...'`.
"""
if category in self.category_list:
raise ValueError("Category {} is already registered in the context database"
.format(category))
# ensure only one of these options is set
if len([ x for x in (prepend, insert_before, insert_after) if x ]) > 1:
raise TypeError("add_context_category(): You may only specify one of "
"prepend=True, insert_before=... or insert_after=...")
if prepend:
self.category_list.insert(0, category)
elif insert_before:
if insert_before in self.category_list:
i = self.category_list.index(insert_before)
else:
i = 0
self.category_list.insert(i, category)
elif insert_after:
if insert_after in self.category_list:
i = self.category_list.index(insert_after) + 1 # insert after found category
else:
i = len(self.category_list)
self.category_list.insert(i, category)
else:
self.category_list.append(category)
self.d[category] = {
'macros': dict( (m.macroname, m) for m in macros ),
'environments': dict( (e.environmentname, e) for e in environments ),
'specials': dict( (s.specials_chars, s) for s in specials ),
}
def set_unknown_macro_spec(self, macrospec):
r"""
Set the macro spec to use when encountering a macro that is not in the
database.
"""
self.unknown_macro_spec = macrospec
def set_unknown_environment_spec(self, environmentspec):
r"""
Set the environment spec to use when encountering a LaTeX environment that
is not in the database.
"""
self.unknown_environment_spec = environmentspec
def set_unknown_specials_spec(self, specialsspec):
r"""
Set the latex specials spec to use when encountering a LaTeX environment
that is not in the database.
"""
self.unknown_specials_spec = specialsspec
def categories(self):
r"""
Return a list of valid category names that are registered in the current
database context.
"""
return list(self.category_list)
def get_macro_spec(self, macroname):
r"""
Look up a macro specification by macro name. The macro name is searched for
in all categories one by one and the first match is returned.
Returns a macro spec instance that matches the given `macroname`. If
the macro name was not found, we return the default macro specification
set by :py:meth:`set_unknown_macro_spec()` or `None` if no such spec was
set.
"""
for cat in self.category_list:
# search categories in the given order
if macroname in self.d[cat]['macros']:
return self.d[cat]['macros'][macroname]
return self.unknown_macro_spec
def get_environment_spec(self, environmentname):
r"""
Look up an environment specification by environment name. The environment
name is searched for in all categories one by one and the first match is
returned.
Returns the environment spec. If the environment name was not found, we
return the default environment specification set by
:py:meth:`set_unknown_environment_spec()` or `None` if no such spec was
set.
"""
for cat in self.category_list:
# search categories in the given order
if environmentname in self.d[cat]['environments']:
return self.d[cat]['environments'][environmentname]
return self.unknown_environment_spec
def get_specials_spec(self, specials_chars):
r"""
Look up a "latex specials" specification by character sequence. The
sequence name is searched for in all categories one by one and the first
match is returned.
If you are parsing a chunk of LaTeX code, you should use
:py:meth:`test_for_specials()` instead. Unlike
:py:meth:`test_for_specials()`, :py:meth:`get_specials_spec()` returns
the first match regardless of matched length. [Rationale: we only need
to worry about matching the longest specials sequence when parsing LaTeX
code. Calling `get_specials_spec()` means one has already parsed the
sequence and one is looking up additional specs on it.]
Returns the specials spec. If the latex specials was not found, we
return the default latex specials specification set by
:py:meth:`set_unknown_specials_spec()` or `None` if no such spec was
set.
"""
for cat in self.category_list:
# search categories in the given order
if specials_chars in self.d[cat]['specials']:
return self.d[cat]['specials'][specials_chars]
return self.unknown_specials_spec
def test_for_specials(self, s, pos, parsing_state=None):
r"""
Test the given position in the string for any LaTeX specials. The lookup
proceeds by searching for in all categories one by one and the first
match is returned, except that the longest match accross all categories
is returned. For instance, a match of '``' in a later category will
take precedence over a match of '`' in a earlier-searched category.
Returns a specials spec instance, or `None` if no specials are detected
at the position `pos`.
"""
best_match_len = 0
best_match_s = None
for cat in self.category_list:
# search categories in the given order
for specials_chars in self.d[cat]['specials'].keys():
if len(specials_chars) > best_match_len and s.startswith(specials_chars, pos):
best_match_s = self.d[cat]['specials'][specials_chars]
best_match_len = len(specials_chars)
return best_match_s # this is None if no match
def iter_macro_specs(self, categories=None):
r"""
Yield the macro specs corresponding to all macros in the given categories.
If `categories` is `None`, then the known macro specs from all
categories are provided in one long iterable sequence. Otherwise,
`categories` should be a list or iterable of category names (e.g.,
'latex-base') of macro specs to return.
The macro specs from the different categories specified are concatenated
into one long sequence which is yielded spec by spec.
"""
if categories is None:
categories = self.category_list
for c in categories:
if c not in self.category_list:
raise ValueError(
"Invalid latex macro spec db category: {!r} (Expected one of {!r})"
.format(c, self.category_list)
)
for spec in self.d[c]['macros'].values():
yield spec
def iter_environment_specs(self, categories=None):
r"""
Yield the environment specs corresponding to all environments in the given
categories.
If `categories` is `None`, then the known environment specs from all
categories are provided in one long iterable sequence. Otherwise,
`categories` should be a list or iterable of category names (e.g.,
'latex-base') of environment specs to return.
The environment specs from the different categories specified are
concatenated into one long sequence which is yielded spec by spec.
"""
if categories is None:
categories = self.category_list
for c in categories:
if c not in self.category_list:
raise ValueError(
"Invalid latex environment spec db category: {!r} (Expected one of {!r})"
.format(c, self.category_list)
)
for spec in self.d[c]['environments'].values():
yield spec
def iter_specials_specs(self, categories=None):
r"""
Yield the specials specs corresponding to all environments in the given
categories.
If `categories` is `None`, then the known specials specs from all
categories are provided in one long iterable sequence. Otherwise,
`categories` should be a list or iterable of category names (e.g.,
'latex-base') of specials specs to return.
The specials specs from the different categories specified are
concatenated into one long sequence which is yielded spec by spec.
"""
if categories is None:
categories = self.category_list
for c in categories:
if c not in self.category_list:
raise ValueError(
"Invalid latex environment spec db category: {!r} (Expected one of {!r})"
.format(c, self.category_list)
)
for spec in self.d[c]['specials'].values():
yield spec
def filter_context(self, keep_categories=[], exclude_categories=[],
keep_which=[]):
r"""
Return a new :py:class:`LatexContextDb` instance where we only keep
certain categories of macro and environment specifications.
If `keep_categories` is set to a nonempty list, then the returned
context will not contain any definitions that do not correspond to the
specified categories.
If `exclude_categories` is set to a nonempty list, then the returned
context will not contain any definitions that correspond to the
specified categories.
It is explicitly fine to have category names in `keep_categories` and
`exclude_categories` that don't exist in the present object
(cf. :py:meth:`categories()`).
The argument `keep_which`, if non-empty, specifies which definitions to
keep. It should be a subset of the list ['macros', 'environments',
'specials'].
The returned context will make a copy of the dictionaries that store the
macro and environment specifications, but the specification classes (and
corresponding argument parsers) might correspond to the same instances.
I.e., the returned context is not a full deep copy.
"""
new_context = LatexContextDb()
new_context.unknown_macro_spec = self.unknown_macro_spec
new_context.unknown_environment_spec = self.unknown_environment_spec
new_context.unknown_specials_spec = self.unknown_specials_spec
keep_macros = not keep_which or 'macros' in keep_which
keep_environments = not keep_which or 'environments' in keep_which
keep_specials = not keep_which or 'specials' in keep_which
for cat in self.category_list:
if keep_categories and cat not in keep_categories:
continue
if exclude_categories and cat in exclude_categories:
continue
# include this category
new_context.add_context_category(
cat,
macros=self.d[cat]['macros'].values() if keep_macros else [],
environments=self.d[cat]['environments'].values() if keep_environments else [],
specials=self.d[cat]['specials'].values() if keep_specials else [],
)
return new_context | 0.88063 | 0.538923 |
from common import *
from sys import stderr
def main(options):
read_conf()
if "-h" in options or "--help" in options:
show_help()
if len(options) < 1 or not(options[0] in ["-s", "--service"]):
show_help()
# STEP 0. FETCH ALL INSTALLED SERVICES
INSTALLED_SERVICES = get_installed_services()
# STEP 1. CHECK IF SELECTED SERVICE IS AVAILABLE
target_services = None
if options[0] in ["-s", "--service"]:
if len(options) < 2:
show_help("Lost parameter 'service_name'.")
for service in INSTALLED_SERVICES:
if service.instance_name == options[1]:
target_services = [service]
break
if target_services == None:
show_help("Service \"" + options[1] + "\" is not installed.")
else:
show_help("\"" + options[0] + "\" is not a valid option.")
n_lines = 25
if len(options) > 2 and options[2] in ["-n", "--lines"]:
if len(options) < 4:
show_help("Lost parameter 'NUM'.")
else:
n_lines = options[3]
try:
n_lines = int(n_lines)
except Exception:
show_help("Invalid number of lines 'NUM'.")
# STEP 2. CHECK ALL SELECTED SERVICES
for service in target_services:
check_service(service, n_lines)
exit(0)
def show_help(message=""):
# STEP 0. FETCH ALL INSTALLED SERVICES
INSTALLED_SERVICES = get_installed_services()
print message
print "Usage: service_log -s service_name -n NUM"
print " where"
print " -s, --service : Print the last 25 lines of the log for the selected eBioKit service "
print " -n, --lines : Print the last NUM lines instead of the last 25"
services = []
for service in INSTALLED_SERVICES:
services.append(service.instance_name)
print " Available services: [" + ", ".join(services) + "]"
print ""
exit(1)
def check_service(service, lines):
if not service.enabled:
print "SERVICE IS NOT ENABLED"
return
try:
output, error = ebiokit_remote_launcher("service log", str(lines) + "\" \"" + service.instance_name)
print output
print >> sys.stderr, error
except Exception as ex:
print ex.message
print "UNKNOWN"
return
if __name__ == "__main__":
main(sys.argv[1:]) | server/admin_tools/service_tools/service_log.py |
from common import *
from sys import stderr
def main(options):
read_conf()
if "-h" in options or "--help" in options:
show_help()
if len(options) < 1 or not(options[0] in ["-s", "--service"]):
show_help()
# STEP 0. FETCH ALL INSTALLED SERVICES
INSTALLED_SERVICES = get_installed_services()
# STEP 1. CHECK IF SELECTED SERVICE IS AVAILABLE
target_services = None
if options[0] in ["-s", "--service"]:
if len(options) < 2:
show_help("Lost parameter 'service_name'.")
for service in INSTALLED_SERVICES:
if service.instance_name == options[1]:
target_services = [service]
break
if target_services == None:
show_help("Service \"" + options[1] + "\" is not installed.")
else:
show_help("\"" + options[0] + "\" is not a valid option.")
n_lines = 25
if len(options) > 2 and options[2] in ["-n", "--lines"]:
if len(options) < 4:
show_help("Lost parameter 'NUM'.")
else:
n_lines = options[3]
try:
n_lines = int(n_lines)
except Exception:
show_help("Invalid number of lines 'NUM'.")
# STEP 2. CHECK ALL SELECTED SERVICES
for service in target_services:
check_service(service, n_lines)
exit(0)
def show_help(message=""):
# STEP 0. FETCH ALL INSTALLED SERVICES
INSTALLED_SERVICES = get_installed_services()
print message
print "Usage: service_log -s service_name -n NUM"
print " where"
print " -s, --service : Print the last 25 lines of the log for the selected eBioKit service "
print " -n, --lines : Print the last NUM lines instead of the last 25"
services = []
for service in INSTALLED_SERVICES:
services.append(service.instance_name)
print " Available services: [" + ", ".join(services) + "]"
print ""
exit(1)
def check_service(service, lines):
if not service.enabled:
print "SERVICE IS NOT ENABLED"
return
try:
output, error = ebiokit_remote_launcher("service log", str(lines) + "\" \"" + service.instance_name)
print output
print >> sys.stderr, error
except Exception as ex:
print ex.message
print "UNKNOWN"
return
if __name__ == "__main__":
main(sys.argv[1:]) | 0.202917 | 0.106737 |
import torch
import torch.nn as nn
import torch.optim as optim
from data import make_font_trainloader
from model import Net
use_cuda = torch.cuda.is_available()
device = torch.device('cuda:0' if use_cuda else 'cpu')
host = torch.device('cpu')
contexts = None
def create_inputs(model, objects, labels, criterion, iters=8, eps=32):
global contexts
# reset contexts
noise = torch.rand(objects.shape) / 2
reset = torch.randn(objects.shape[0])
if use_cuda:
noise = noise.to(device)
reset = reset.to(device)
if contexts is None:
contexts = noise
else:
contexts[reset > 0.8] = noise[reset > 0.8]
# observe inputs
inputs = (2 * objects + contexts) / 3
# perform local refinement
eps = eps / 255
inputs.detach_()
training = model.training
model.train(False)
for it in range(iters):
inputs.requires_grad = True
outputs = model(inputs)
model.zero_grad()
loss = criterion(outputs, labels)
loss.backward()
grad = inputs.grad
with torch.no_grad():
sign = grad.sign()
norm = grad / torch.max(torch.abs(grad))
step = ((iters - it) * sign + it * norm) / iters
step = eps / iters * step / 2.5
inputs = inputs + step
inputs = torch.clamp(inputs, min=0, max=1).detach_()
contexts = inputs.clone().detach_()
return inputs
def train(epochs=300, super_batch=5, num_batches=20):
net = Net()
net.train()
optimizer = optim.Adam(net.parameters(),
lr=1e-4, weight_decay=1e-4)
criterion = nn.CrossEntropyLoss()
if use_cuda:
net.to(device)
real_correct = 0
trainloader = make_font_trainloader()
for epoch in range(epochs):
print("\nepoch: %d" % (epoch + 1))
factor = min(1, max(0, (epoch - 2) / 50))
trainloader.dataset.adjust_alpha(factor)
print("factor: %.3f" % factor)
correct = 0
total = 0
total_loss = 0
for nb in range(num_batches):
loss = 0
for sb in range(super_batch):
for objects, labels in trainloader:
if use_cuda:
objects = objects.to(device)
labels = labels.to(device)
inputs = create_inputs(net, objects, labels, criterion)
outputs = net(inputs)
batch_loss = criterion(outputs, labels)
_, predicted = torch.max(outputs, dim=1)
correct += predicted.eq(labels).sum().item()
total += inputs.size(0)
total_loss += batch_loss
loss += batch_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("train: %.1f%% (%d / %d) | loss: %.3f" % (correct / total * 100,
correct, total, total_loss))
net.to(host)
torch.save(net, "checkpoint/net.pth")
if __name__ == "__main__":
train() | mnist/train.py | import torch
import torch.nn as nn
import torch.optim as optim
from data import make_font_trainloader
from model import Net
use_cuda = torch.cuda.is_available()
device = torch.device('cuda:0' if use_cuda else 'cpu')
host = torch.device('cpu')
contexts = None
def create_inputs(model, objects, labels, criterion, iters=8, eps=32):
global contexts
# reset contexts
noise = torch.rand(objects.shape) / 2
reset = torch.randn(objects.shape[0])
if use_cuda:
noise = noise.to(device)
reset = reset.to(device)
if contexts is None:
contexts = noise
else:
contexts[reset > 0.8] = noise[reset > 0.8]
# observe inputs
inputs = (2 * objects + contexts) / 3
# perform local refinement
eps = eps / 255
inputs.detach_()
training = model.training
model.train(False)
for it in range(iters):
inputs.requires_grad = True
outputs = model(inputs)
model.zero_grad()
loss = criterion(outputs, labels)
loss.backward()
grad = inputs.grad
with torch.no_grad():
sign = grad.sign()
norm = grad / torch.max(torch.abs(grad))
step = ((iters - it) * sign + it * norm) / iters
step = eps / iters * step / 2.5
inputs = inputs + step
inputs = torch.clamp(inputs, min=0, max=1).detach_()
contexts = inputs.clone().detach_()
return inputs
def train(epochs=300, super_batch=5, num_batches=20):
net = Net()
net.train()
optimizer = optim.Adam(net.parameters(),
lr=1e-4, weight_decay=1e-4)
criterion = nn.CrossEntropyLoss()
if use_cuda:
net.to(device)
real_correct = 0
trainloader = make_font_trainloader()
for epoch in range(epochs):
print("\nepoch: %d" % (epoch + 1))
factor = min(1, max(0, (epoch - 2) / 50))
trainloader.dataset.adjust_alpha(factor)
print("factor: %.3f" % factor)
correct = 0
total = 0
total_loss = 0
for nb in range(num_batches):
loss = 0
for sb in range(super_batch):
for objects, labels in trainloader:
if use_cuda:
objects = objects.to(device)
labels = labels.to(device)
inputs = create_inputs(net, objects, labels, criterion)
outputs = net(inputs)
batch_loss = criterion(outputs, labels)
_, predicted = torch.max(outputs, dim=1)
correct += predicted.eq(labels).sum().item()
total += inputs.size(0)
total_loss += batch_loss
loss += batch_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("train: %.1f%% (%d / %d) | loss: %.3f" % (correct / total * 100,
correct, total, total_loss))
net.to(host)
torch.save(net, "checkpoint/net.pth")
if __name__ == "__main__":
train() | 0.850748 | 0.441974 |
from multiprocessing.pool import ThreadPool
import pandas as pd
import requests
import xmltodict
from helper import *
class Data:
def __init__(self):
self.df = pd.DataFrame(data={})
# https://volby.cz/pls/ps2017nss/vysledky_okres?nuts=CZ0806
self.downloaded = 0
self.to_download = len(NUTS)
return
# another approach would be having an index of what cities/towns are in NUTS and then
# download only needed data.
def update(self):
"""
Overwrite existing data with new ones downloaded from volby.cz by __fetch_data().
Data is downloaded for all NUTS units separately due to the limitation on server site.
:return:
"""
# It there was a change, then without a diff it is cheaper (and much faster) to delete everything
# and extract again.
# possible improvements: Keep hashes of already downloaded files.
# If newly downloaded file has the same hash, I don't need to extract it
self.df.drop(self.df.index, inplace=True)
self.df = pd.DataFrame(data={})
# Multithreading helps a lot here.
# ~ 2.717305s - with multithreading
# ~ 18.856571s - without multithreading
# still very slow though
# print("Downloading data...")
pool = ThreadPool(processes=32)
multiple_results = [pool.apply_async(
Data.__fetch_data, (self, nuts)) for nuts in NUTS]
[self.__add_to_dataframe(res.get(timeout=10)) for res in multiple_results]
#print("\n{} entries imported".format(len(self.df)))
def get_progress(self):
return "{} / {} downloaded".format(self.downloaded, self.to_download)
def __add_to_dataframe(self, x):
"""
:param x: Array of dictionaries with following keys: 'district_name', 'city_id', 'city_name', 'party',
'party_votes_percent', total_votes' [optional]
:return: Updates self.df pandas dataframe
"""
# print("#", end="")
self.downloaded += 1
self.df = self.df.append(x)
def __fetch_data(self, nuts):
"""
Download elections data from volby.cz based on selected NUTS (something between region and district) and extract
usedful data
:param nuts: something between region and district, e.g. "CZ0412", see nuts_dial.txt
:return: array[Dict of extracted data]
"""
url = "https://volby.cz/pls/ps2017nss/vysledky_okres?nuts={}".format(
nuts)
r = requests.get(url, allow_redirects=True)
while r.status_code != 200:
r = requests.get(url, allow_redirects=True)
print('Retrying {}!'.format(nuts))
filename = '{}.xml'.format(nuts)
open(filename, 'wb').write(r.content)
tmp_data = [] # performing concat once finished is cheaper that an append in foreach
with open(filename) as xml_file:
data_dict = xmltodict.parse(xml_file.read())
res = data_dict.get("VYSLEDKY_OKRES")
district = res.get("OKRES")
cities = res.get("OBEC")
for city in cities:
votes = city.get("HLASY_STRANA")
for party in votes:
tmp_data.append(
{
'district_name': district["@NAZ_OKRES"],
'city_id': city["@CIS_OBEC"],
'city_name': city["@NAZ_OBEC"],
'party': party["@KSTRANA"],
'party_votes_percent': party["@PROC_HLASU"],
'total_votes': city["UCAST"]["@PLATNE_HLASY"]
}
)
os.remove(filename)
# need to add data to pandas in the main thread, otherwise it causes data loss
return tmp_data
def find_places_by_name(self, qu):
"""
Find places by name from pandas dataframe
:param qu: Name of place, e.g. "<NAME>" (case sensitive, uses diacritics)
:return: Array of results containing "city_id", "city_name", "district_name" if found, otherwise empty dataframe
"""
# qu = "<NAME>"
# todo: make it case insensitive
res = self.df.loc[self.df['city_name'].str.startswith(qu)]
# res = self.df.loc[str.lower(self.df['city_name'].str).contains(qu, case=False)]
options = res[["city_id", "city_name",
"district_name"]].drop_duplicates()
return options
def get_votes_by_city_id(self, city_id):
return self.df.loc[self.df['city_id'] == str(city_id)] | data.py | from multiprocessing.pool import ThreadPool
import pandas as pd
import requests
import xmltodict
from helper import *
class Data:
def __init__(self):
self.df = pd.DataFrame(data={})
# https://volby.cz/pls/ps2017nss/vysledky_okres?nuts=CZ0806
self.downloaded = 0
self.to_download = len(NUTS)
return
# another approach would be having an index of what cities/towns are in NUTS and then
# download only needed data.
def update(self):
"""
Overwrite existing data with new ones downloaded from volby.cz by __fetch_data().
Data is downloaded for all NUTS units separately due to the limitation on server site.
:return:
"""
# It there was a change, then without a diff it is cheaper (and much faster) to delete everything
# and extract again.
# possible improvements: Keep hashes of already downloaded files.
# If newly downloaded file has the same hash, I don't need to extract it
self.df.drop(self.df.index, inplace=True)
self.df = pd.DataFrame(data={})
# Multithreading helps a lot here.
# ~ 2.717305s - with multithreading
# ~ 18.856571s - without multithreading
# still very slow though
# print("Downloading data...")
pool = ThreadPool(processes=32)
multiple_results = [pool.apply_async(
Data.__fetch_data, (self, nuts)) for nuts in NUTS]
[self.__add_to_dataframe(res.get(timeout=10)) for res in multiple_results]
#print("\n{} entries imported".format(len(self.df)))
def get_progress(self):
return "{} / {} downloaded".format(self.downloaded, self.to_download)
def __add_to_dataframe(self, x):
"""
:param x: Array of dictionaries with following keys: 'district_name', 'city_id', 'city_name', 'party',
'party_votes_percent', total_votes' [optional]
:return: Updates self.df pandas dataframe
"""
# print("#", end="")
self.downloaded += 1
self.df = self.df.append(x)
def __fetch_data(self, nuts):
"""
Download elections data from volby.cz based on selected NUTS (something between region and district) and extract
usedful data
:param nuts: something between region and district, e.g. "CZ0412", see nuts_dial.txt
:return: array[Dict of extracted data]
"""
url = "https://volby.cz/pls/ps2017nss/vysledky_okres?nuts={}".format(
nuts)
r = requests.get(url, allow_redirects=True)
while r.status_code != 200:
r = requests.get(url, allow_redirects=True)
print('Retrying {}!'.format(nuts))
filename = '{}.xml'.format(nuts)
open(filename, 'wb').write(r.content)
tmp_data = [] # performing concat once finished is cheaper that an append in foreach
with open(filename) as xml_file:
data_dict = xmltodict.parse(xml_file.read())
res = data_dict.get("VYSLEDKY_OKRES")
district = res.get("OKRES")
cities = res.get("OBEC")
for city in cities:
votes = city.get("HLASY_STRANA")
for party in votes:
tmp_data.append(
{
'district_name': district["@NAZ_OKRES"],
'city_id': city["@CIS_OBEC"],
'city_name': city["@NAZ_OBEC"],
'party': party["@KSTRANA"],
'party_votes_percent': party["@PROC_HLASU"],
'total_votes': city["UCAST"]["@PLATNE_HLASY"]
}
)
os.remove(filename)
# need to add data to pandas in the main thread, otherwise it causes data loss
return tmp_data
def find_places_by_name(self, qu):
"""
Find places by name from pandas dataframe
:param qu: Name of place, e.g. "<NAME>" (case sensitive, uses diacritics)
:return: Array of results containing "city_id", "city_name", "district_name" if found, otherwise empty dataframe
"""
# qu = "<NAME>"
# todo: make it case insensitive
res = self.df.loc[self.df['city_name'].str.startswith(qu)]
# res = self.df.loc[str.lower(self.df['city_name'].str).contains(qu, case=False)]
options = res[["city_id", "city_name",
"district_name"]].drop_duplicates()
return options
def get_votes_by_city_id(self, city_id):
return self.df.loc[self.df['city_id'] == str(city_id)] | 0.438545 | 0.279343 |
from types import MethodType
import os
import numpy as np
import cv2
from pointsmap import invertTransform, combineTransforms
from h5dataloader.common.structure import *
from h5dataloader.common.create_funcs import NORMALIZE_INF
from h5dataloader.pytorch import HDF5Dataset
from h5dataloader.pytorch.structure import CONVERT_TORCH, DTYPE_TORCH
import torch
from ..model.constant import DATASET_MAP, DATASET_POSE_ERR
class PMOD_Train_Dataset(HDF5Dataset):
def __init__(self, h5_paths: List[str], config: str, quiet: bool = True, block_size: int = 0, use_mods: Tuple[int, int] = None,
visibility_filter_radius: int = 0, visibility_filter_threshold: float = 3.0, tr_err_range: float = 2.0, rot_err_range: float = 10.0) -> None:
super(PMOD_Train_Dataset, self).__init__(h5_paths, config, quiet, block_size,
use_mods, visibility_filter_radius, visibility_filter_threshold)
# Random Pose Error
self.tr_err_range: float = np.array(tr_err_range, dtype=np.float32)
self.rot_err_range: float = np.deg2rad(rot_err_range)
self.minibatch[DATASET_MAP][CONFIG_TAG_TF].append(('', False))
def __getitem__(self, index: int) -> Dict[str, torch.Tensor]:
# Random Pose Error
rot_vec: np.ndarray = (np.random.rand(3) * 2.0 - 1.0)
rot_vec /= np.linalg.norm(rot_vec)
rot_abs: float = np.random.rand() * self.rot_err_range
self.q_err: np.ndarray = self.__vec2quat(rot_vec, rot_abs)
self.tr_err: np.ndarray = (np.random.rand(
3) * 2.0 - 1.0) * self.tr_err_range
# Get Items.
items: Dict[str, torch.Tensor] = super().__getitem__(index)
# Add Pose Error.
tr_norm: float = 1.0
if self.minibatch[DATASET_MAP][CONFIG_TAG_NORMALIZE] is True:
tr_norm *= self.minibatch[DATASET_MAP][CONFIG_TAG_RANGE][1]
items[DATASET_POSE_ERR] = torch.from_numpy(CONVERT_TORCH[TYPE_POSE](
DTYPE_TORCH[TYPE_POSE](np.concatenate([self.tr_err / tr_norm, self.q_err]))))
return items
def __vec2quat(self, vec: np.ndarray, abs: float) -> np.ndarray:
# Rotation vector to Quaternion.
xyz: np.ndarray = vec * np.sin(abs * 0.5)
return np.append(xyz, np.cos(abs * 0.5))
def depth_common(self, src: np.ndarray, minibatch_config: Dict[str, Union[str, Dict[str, str], List[int], bool, List[float], MethodType]]) -> np.ndarray:
dst = src
if minibatch_config[CONFIG_TAG_NORMALIZE] is True:
range_min, range_max = minibatch_config[CONFIG_TAG_RANGE][:2]
dst = np.where(range_max < dst, NORMALIZE_INF,
(dst - range_min) / (range_max - range_min))
shape = minibatch_config[CONFIG_TAG_SHAPE]
if shape != dst.shape[:2]:
dst = cv2.resize(dst, dsize=(
shape[1], shape[0]), interpolation=cv2.INTER_NEAREST)
return dst
def create_pose_from_pose(self, key: str, link_idx: int, minibatch_config: Dict[str, Union[str, Dict[str, str], List[int], bool, List[float], MethodType]]) -> np.ndarray:
"""create_pose_from_pose
"pose"を生成する
Args:
key (str): HDF5DatasetNumpy.get_key() で生成されたキー
link_idx (int): linkの番号
minibatch_config (dict): mini-batchの設定
Returns:
np.ndarray: [tx, ty, tz, qx, qy, qz, qw]
"""
translations = [np.array([0.0, 0.0, 0.0], dtype=np.float32)]
quaternions = [np.array([0.0, 0.0, 0.0, 1.0], dtype=np.float32)]
for child_frame_id, invert in minibatch_config[CONFIG_TAG_TF]:
tf_data: Dict[str, str] = self.tf[CONFIG_TAG_DATA].get(
child_frame_id)
if tf_data is None:
# Random Pose Error
trns: np.ndarray = self.tr_err
qtrn: np.ndarray = self.q_err
else:
h5_key = tf_data[CONFIG_TAG_KEY]
if h5_key[0] == '/':
h5_key = str(link_idx) + h5_key
else:
h5_key = os.path.join(key, h5_key)
trns: np.ndarray = self.h5links[h5_key][SUBTYPE_TRANSLATION][(
)]
qtrn: np.ndarray = self.h5links[h5_key][SUBTYPE_ROTATION][()]
if invert is True:
trns, qtrn = invertTransform(translation=trns, quaternion=qtrn)
translations.append(trns)
quaternions.append(qtrn)
translation, quaternion = combineTransforms(
translations=translations, quaternions=quaternions)
return np.concatenate([translation, quaternion])
class PMOD_Test_Dataset(HDF5Dataset):
def __init__(self, h5_paths: List[str], config: str, quiet: bool = True, block_size: int = 0, use_mods: Tuple[int, int] = None,
visibility_filter_radius: int = 0, visibility_filter_threshold: float = 3.0, tr_err_range: float = 2.0, rot_err_range: float = 10.0) -> None:
super(PMOD_Test_Dataset, self).__init__(h5_paths, config, quiet, block_size,
use_mods, visibility_filter_radius, visibility_filter_threshold)
if self.minibatch.get(DATASET_POSE_ERR) is None:
self.random_pose: bool = True
# Random Pose Error
self.tr_err_range: float = np.array(tr_err_range, dtype=np.float32)
self.rot_err_range: float = np.deg2rad(rot_err_range)
self.minibatch[DATASET_MAP][CONFIG_TAG_TF].append(('', False))
rot_vec: np.ndarray = (np.random.rand(self.length, 3) * 2.0 - 1.0)
rot_vec /= np.linalg.norm(rot_vec, axis=1, keepdims=True)
rot_abs: float = np.random.rand(self.length) * self.rot_err_range
self.q_err_list: np.ndarray = self.__vec2quat(rot_vec, rot_abs)
self.tr_err_list: np.ndarray = (np.random.rand(
self.length, 3) * 2.0 - 1.0) * self.tr_err_range
else:
self.random_pose: bool = False
def __vec2quat(self, vec: np.ndarray, abs: float) -> np.ndarray:
# Rotation vector to Quaternion.
xyz: np.ndarray = vec * \
np.sin(np.repeat(abs[:, np.newaxis], 3, axis=1) * 0.5)
return np.append(xyz, np.cos(abs * 0.5)[:, np.newaxis], axis=1)
def create_pose_from_pose(self, key: str, link_idx: int, minibatch_config: Dict[str, Union[str, Dict[str, str], List[int], bool, List[float], MethodType]]) -> np.ndarray:
"""create_pose_from_pose
"pose"を生成する
Args:
key (str): HDF5DatasetNumpy.get_key() で生成されたキー
link_idx (int): linkの番号
minibatch_config (dict): mini-batchの設定
Returns:
np.ndarray: [tx, ty, tz, qx, qy, qz, qw]
"""
translations = [np.array([0.0, 0.0, 0.0], dtype=np.float32)]
quaternions = [np.array([0.0, 0.0, 0.0, 1.0], dtype=np.float32)]
for child_frame_id, invert in minibatch_config[CONFIG_TAG_TF]:
tf_data: Dict[str, str] = self.tf[CONFIG_TAG_DATA].get(
child_frame_id)
if tf_data is None:
# Random Pose Error
trns: np.ndarray = self.tr_err
qtrn: np.ndarray = self.q_err
else:
h5_key = tf_data[CONFIG_TAG_KEY]
if h5_key[0] == '/':
h5_key = str(link_idx) + h5_key
else:
h5_key = os.path.join(key, h5_key)
trns: np.ndarray = self.h5links[h5_key][SUBTYPE_TRANSLATION][(
)]
qtrn: np.ndarray = self.h5links[h5_key][SUBTYPE_ROTATION][()]
if invert is True:
trns, qtrn = invertTransform(translation=trns, quaternion=qtrn)
translations.append(trns)
quaternions.append(qtrn)
translation, quaternion = combineTransforms(
translations=translations, quaternions=quaternions)
return np.concatenate([translation, quaternion])
def __getitem__(self, index: int) -> dict:
if self.random_pose is True:
self.tr_err = self.tr_err_list[index]
self.q_err = self.q_err_list[index]
items: Dict[str, torch.Tensor] = super().__getitem__(index)
tr_norm: float = 1.0
if self.minibatch[DATASET_MAP][CONFIG_TAG_NORMALIZE] is True:
tr_norm *= self.minibatch[DATASET_MAP][CONFIG_TAG_RANGE][1]
if self.random_pose is True:
items[DATASET_POSE_ERR] = torch.from_numpy(CONVERT_TORCH[TYPE_POSE](
DTYPE_TORCH[TYPE_POSE](np.concatenate([self.tr_err / tr_norm, self.q_err]))))
else:
items[DATASET_POSE_ERR][:3] = items[DATASET_POSE_ERR][:3] / tr_norm
return items | pmod/dataloader/pmod_dataset.py | from types import MethodType
import os
import numpy as np
import cv2
from pointsmap import invertTransform, combineTransforms
from h5dataloader.common.structure import *
from h5dataloader.common.create_funcs import NORMALIZE_INF
from h5dataloader.pytorch import HDF5Dataset
from h5dataloader.pytorch.structure import CONVERT_TORCH, DTYPE_TORCH
import torch
from ..model.constant import DATASET_MAP, DATASET_POSE_ERR
class PMOD_Train_Dataset(HDF5Dataset):
def __init__(self, h5_paths: List[str], config: str, quiet: bool = True, block_size: int = 0, use_mods: Tuple[int, int] = None,
visibility_filter_radius: int = 0, visibility_filter_threshold: float = 3.0, tr_err_range: float = 2.0, rot_err_range: float = 10.0) -> None:
super(PMOD_Train_Dataset, self).__init__(h5_paths, config, quiet, block_size,
use_mods, visibility_filter_radius, visibility_filter_threshold)
# Random Pose Error
self.tr_err_range: float = np.array(tr_err_range, dtype=np.float32)
self.rot_err_range: float = np.deg2rad(rot_err_range)
self.minibatch[DATASET_MAP][CONFIG_TAG_TF].append(('', False))
def __getitem__(self, index: int) -> Dict[str, torch.Tensor]:
# Random Pose Error
rot_vec: np.ndarray = (np.random.rand(3) * 2.0 - 1.0)
rot_vec /= np.linalg.norm(rot_vec)
rot_abs: float = np.random.rand() * self.rot_err_range
self.q_err: np.ndarray = self.__vec2quat(rot_vec, rot_abs)
self.tr_err: np.ndarray = (np.random.rand(
3) * 2.0 - 1.0) * self.tr_err_range
# Get Items.
items: Dict[str, torch.Tensor] = super().__getitem__(index)
# Add Pose Error.
tr_norm: float = 1.0
if self.minibatch[DATASET_MAP][CONFIG_TAG_NORMALIZE] is True:
tr_norm *= self.minibatch[DATASET_MAP][CONFIG_TAG_RANGE][1]
items[DATASET_POSE_ERR] = torch.from_numpy(CONVERT_TORCH[TYPE_POSE](
DTYPE_TORCH[TYPE_POSE](np.concatenate([self.tr_err / tr_norm, self.q_err]))))
return items
def __vec2quat(self, vec: np.ndarray, abs: float) -> np.ndarray:
# Rotation vector to Quaternion.
xyz: np.ndarray = vec * np.sin(abs * 0.5)
return np.append(xyz, np.cos(abs * 0.5))
def depth_common(self, src: np.ndarray, minibatch_config: Dict[str, Union[str, Dict[str, str], List[int], bool, List[float], MethodType]]) -> np.ndarray:
dst = src
if minibatch_config[CONFIG_TAG_NORMALIZE] is True:
range_min, range_max = minibatch_config[CONFIG_TAG_RANGE][:2]
dst = np.where(range_max < dst, NORMALIZE_INF,
(dst - range_min) / (range_max - range_min))
shape = minibatch_config[CONFIG_TAG_SHAPE]
if shape != dst.shape[:2]:
dst = cv2.resize(dst, dsize=(
shape[1], shape[0]), interpolation=cv2.INTER_NEAREST)
return dst
def create_pose_from_pose(self, key: str, link_idx: int, minibatch_config: Dict[str, Union[str, Dict[str, str], List[int], bool, List[float], MethodType]]) -> np.ndarray:
"""create_pose_from_pose
"pose"を生成する
Args:
key (str): HDF5DatasetNumpy.get_key() で生成されたキー
link_idx (int): linkの番号
minibatch_config (dict): mini-batchの設定
Returns:
np.ndarray: [tx, ty, tz, qx, qy, qz, qw]
"""
translations = [np.array([0.0, 0.0, 0.0], dtype=np.float32)]
quaternions = [np.array([0.0, 0.0, 0.0, 1.0], dtype=np.float32)]
for child_frame_id, invert in minibatch_config[CONFIG_TAG_TF]:
tf_data: Dict[str, str] = self.tf[CONFIG_TAG_DATA].get(
child_frame_id)
if tf_data is None:
# Random Pose Error
trns: np.ndarray = self.tr_err
qtrn: np.ndarray = self.q_err
else:
h5_key = tf_data[CONFIG_TAG_KEY]
if h5_key[0] == '/':
h5_key = str(link_idx) + h5_key
else:
h5_key = os.path.join(key, h5_key)
trns: np.ndarray = self.h5links[h5_key][SUBTYPE_TRANSLATION][(
)]
qtrn: np.ndarray = self.h5links[h5_key][SUBTYPE_ROTATION][()]
if invert is True:
trns, qtrn = invertTransform(translation=trns, quaternion=qtrn)
translations.append(trns)
quaternions.append(qtrn)
translation, quaternion = combineTransforms(
translations=translations, quaternions=quaternions)
return np.concatenate([translation, quaternion])
class PMOD_Test_Dataset(HDF5Dataset):
def __init__(self, h5_paths: List[str], config: str, quiet: bool = True, block_size: int = 0, use_mods: Tuple[int, int] = None,
visibility_filter_radius: int = 0, visibility_filter_threshold: float = 3.0, tr_err_range: float = 2.0, rot_err_range: float = 10.0) -> None:
super(PMOD_Test_Dataset, self).__init__(h5_paths, config, quiet, block_size,
use_mods, visibility_filter_radius, visibility_filter_threshold)
if self.minibatch.get(DATASET_POSE_ERR) is None:
self.random_pose: bool = True
# Random Pose Error
self.tr_err_range: float = np.array(tr_err_range, dtype=np.float32)
self.rot_err_range: float = np.deg2rad(rot_err_range)
self.minibatch[DATASET_MAP][CONFIG_TAG_TF].append(('', False))
rot_vec: np.ndarray = (np.random.rand(self.length, 3) * 2.0 - 1.0)
rot_vec /= np.linalg.norm(rot_vec, axis=1, keepdims=True)
rot_abs: float = np.random.rand(self.length) * self.rot_err_range
self.q_err_list: np.ndarray = self.__vec2quat(rot_vec, rot_abs)
self.tr_err_list: np.ndarray = (np.random.rand(
self.length, 3) * 2.0 - 1.0) * self.tr_err_range
else:
self.random_pose: bool = False
def __vec2quat(self, vec: np.ndarray, abs: float) -> np.ndarray:
# Rotation vector to Quaternion.
xyz: np.ndarray = vec * \
np.sin(np.repeat(abs[:, np.newaxis], 3, axis=1) * 0.5)
return np.append(xyz, np.cos(abs * 0.5)[:, np.newaxis], axis=1)
def create_pose_from_pose(self, key: str, link_idx: int, minibatch_config: Dict[str, Union[str, Dict[str, str], List[int], bool, List[float], MethodType]]) -> np.ndarray:
"""create_pose_from_pose
"pose"を生成する
Args:
key (str): HDF5DatasetNumpy.get_key() で生成されたキー
link_idx (int): linkの番号
minibatch_config (dict): mini-batchの設定
Returns:
np.ndarray: [tx, ty, tz, qx, qy, qz, qw]
"""
translations = [np.array([0.0, 0.0, 0.0], dtype=np.float32)]
quaternions = [np.array([0.0, 0.0, 0.0, 1.0], dtype=np.float32)]
for child_frame_id, invert in minibatch_config[CONFIG_TAG_TF]:
tf_data: Dict[str, str] = self.tf[CONFIG_TAG_DATA].get(
child_frame_id)
if tf_data is None:
# Random Pose Error
trns: np.ndarray = self.tr_err
qtrn: np.ndarray = self.q_err
else:
h5_key = tf_data[CONFIG_TAG_KEY]
if h5_key[0] == '/':
h5_key = str(link_idx) + h5_key
else:
h5_key = os.path.join(key, h5_key)
trns: np.ndarray = self.h5links[h5_key][SUBTYPE_TRANSLATION][(
)]
qtrn: np.ndarray = self.h5links[h5_key][SUBTYPE_ROTATION][()]
if invert is True:
trns, qtrn = invertTransform(translation=trns, quaternion=qtrn)
translations.append(trns)
quaternions.append(qtrn)
translation, quaternion = combineTransforms(
translations=translations, quaternions=quaternions)
return np.concatenate([translation, quaternion])
def __getitem__(self, index: int) -> dict:
if self.random_pose is True:
self.tr_err = self.tr_err_list[index]
self.q_err = self.q_err_list[index]
items: Dict[str, torch.Tensor] = super().__getitem__(index)
tr_norm: float = 1.0
if self.minibatch[DATASET_MAP][CONFIG_TAG_NORMALIZE] is True:
tr_norm *= self.minibatch[DATASET_MAP][CONFIG_TAG_RANGE][1]
if self.random_pose is True:
items[DATASET_POSE_ERR] = torch.from_numpy(CONVERT_TORCH[TYPE_POSE](
DTYPE_TORCH[TYPE_POSE](np.concatenate([self.tr_err / tr_norm, self.q_err]))))
else:
items[DATASET_POSE_ERR][:3] = items[DATASET_POSE_ERR][:3] / tr_norm
return items | 0.708112 | 0.322459 |
import gc
import six
import numpy as np
from abc import ABCMeta, abstractmethod
from .base import BaseEstimator
class BaseSVM(six.with_metaclass(ABCMeta, BaseEstimator)):
def __init__(self, kernel='linear', degree=3, C=1.0, epsilon=1e-3, max_iter=100):
self.max_iter = max_iter
self._kernel = kernel
self.degree = degree
self.C = C
self.epsilon = epsilon
# 显式的初始化变量
def init(self, X: np.ndarray, y: np.ndarray):
self.m, self.n = X.shape
self.X = X
self.y = y
self.b = 0.0
self.K = self.kernel_mat(X, X) # 全部计算好的kernel
self.alpha = np.zeros(self.m)
self.g_v = self.g_vec()
self.E = self.g_v - self.y # 将Ei保存在一个列表里
# g(x),输入xi(X[i])
def g(self, i):
in_sigma = self.alpha * self.y * self.K[i]
self.g_v[i] = np.sum(in_sigma) + self.b
return self.g_v[i]
# vec结尾的是函数,v结尾的是缓存向量
def g_vec(self):
before_sigma = self.K * self.alpha * self.y
return np.sum(before_sigma, axis=-1) + self.b
# E(x)为g(x)对输入x的预测值和y的差
def _e(self, i):
return self.g(i) - self.y[i]
# 核函数
def kernel(self, x1: np.ndarray, x2: np.ndarray):
if self._kernel == 'linear':
return np.sum(x1 * x2)
elif self._kernel == 'poly':
return (np.sum(x1 * x2) + 1) ** self.degree
return 0
# 向量化kernel,一次获得xi对所有x的
def kernel_vec(self, x: np.ndarray): # todo: 待添加更多kernel
if self._kernel == 'linear':
return np.sum(self.X * x, axis=-1)
elif self._kernel == 'poly':
return (np.sum(self.X * x, axis=-1) + 1) ** self.degree
return None
# 直接计算所有kernel,以后直接调用
def kernel_mat(self, X1: np.ndarray, X2: np.ndarray):
x1 = X1[np.newaxis, ...]
x2 = X2[:, np.newaxis, :]
if self._kernel == 'linear':
return np.sum(x1 * x2, axis=-1) # 广播,对最后一个维度求和
elif self._kernel == 'poly':
return (np.sum(x1 * x2, axis=-1) + 1) ** self.degree
return None
# 选择α1 α2,返回index
def select_alpha(self):
_a = self.alpha
# 得到mask
con1, con2 = (_a > 0), (_a < self.C)
# yi*g(xi)
ygx = self.y * self.g_v
# αi == 0 and yi*gxi ≥ 1-ε KKT条件
err1 = ygx - 1 + self.epsilon
err1[(con1 & (err1 <= 0)) | (~con1 & (err1 > 0))] = 0 # 不在此类或符合置0
# 0 < αi < C and abs(yi*gxi - 1) ≤ ε
err2 = np.abs(ygx - 1) - self.epsilon
err2[~con1 | ~con2] = 0 # 置 αi ≤ 0 and αi ≥ C 的为0
# αi == C and yi*gxi ≤ 1+ε
err3 = ygx - 1 - self.epsilon
err3[(con2 & (err3 >= 0)) | (~con2 & (err3 < 0))] = 0
# 计算总error,排序获得index
err = err1 ** 2 + err2 ** 2 + err3 ** 2
# α1为违反KKT条件最严重的点
i1 = np.argmax(err)
# 如果E1是+,选择最小的;如果E1是负的,选择最大的
i2 = np.argmin(self.E) if self.E[i1] >= 0 else np.argmax(self.E)
if i2 == i1: # 如果相等,返回第二大or小的
if self.E[i1] >= 0:
i2 = np.argsort(self.E)[1]
else:
i2 = np.argsort(self.E)[-2]
return i1, i2
# 优化α1 α2,更新b
def optimize(self, i1, i2):
a1_old, a2_old, b_old = self.alpha[i1], self.alpha[i2], self.b
_y, _K = self.y, self.K
# 边界
if _y[i1] == _y[i2]:
L = max(0, a2_old + a1_old - self.C)
H = min(self.C, a2_old + a1_old)
else:
L = max(0, a2_old - a1_old)
H = min(self.C, self.C + a2_old - a1_old)
E1, E2 = self.E[i1], self.E[i2]
eta = _K[i1, i1] + _K[i2, i2] - 2 * _K[i1, i2] # 7.107 η = K11 + K22 - 2K12
if eta <= 0:
print('eta <= 0')
return
a2_new_unc = a2_old + _y[i2] * (E1 - E2) / eta # 7.106
def cut_alpha(a, h, l): # 7.108
if a > h:
return h
elif a < l:
return l
else:
return a
a2_new = cut_alpha(a2_new_unc, H, L)
a1_new = a1_old + _y[i1] * _y[i2] * (a2_old - a2_new) # 7.109
b1_new = -E1 - _y[i1] * _K[i1, i1] * (a1_new - a1_old) - \
_y[i2] * _K[i2, i1] * (a2_new - a2_old) + b_old # 7.115
b2_new = -E2 - _y[i1] * _K[i1, i2] * (a1_new - a1_old) - \
_y[i2] * _K[i2, i2] * (a2_new - a2_old) + b_old # 7.116
if 0 < a1_new < self.C:
b_new = b1_new
elif 0 < a2_new < self.C:
b_new = b2_new
else: # 选择中点,否则b1=b2的
b_new = (b1_new + b2_new) / 2
def e_new(i):
in_sigma = self.alpha * self.y * self.K[i]
in_sigma[self.alpha <= 0] = 0
in_sigma[self.alpha > self.C] = 0
return np.sum(in_sigma) + self.b - self.y[i]
# 更新参数
self.alpha[i1], self.alpha[i2] = a1_new, a2_new
self.b = b_new
self.E[i1], self.E[i2] = e_new(i1), e_new(i2)
return None
# 停机条件
def shutdown(self):
c, eps = self.C, self.epsilon
_a, _y = self.alpha, self.y
if np.any(_a < 0) | np.any(_a > c):
return False
elif np.abs(np.sum(_a * _y)) > eps:
return False
else:
ygx = _y * self.g_v
if np.any(ygx[_a == 0] < 1 - eps):
return False
elif np.any(np.abs(ygx[(0 < _a) & (_a < c)] - 1) > eps):
return False
elif np.any(ygx[_a == c] > 1 + eps):
return False
return True
def fit(self, X: np.ndarray, y: np.ndarray):
self.init(X, y)
for t in range(self.max_iter):
i1, i2 = self.select_alpha() # 选择优化变量
self.optimize(i1, i2) # 求解两个变量的最优化问题
if self.shutdown(): # 停机条件
# print('Early stop')
break
return
def predict(self, X):
fx = self.f(X)
fx[fx >= 0] = 1
fx[fx < 0] = -1
return fx
# 决策函数
def f(self, X):
k_mat = self.kernel_mat(self.X, X) # shape: n_test, n_train
in_sigma = k_mat * self.alpha * self.y # 广播
return np.sum(in_sigma, axis=-1) + self.b
class SVC(BaseSVM):
def __init__(self, kernel='linear', degree=3, C=1.0, epsilon=1e-3,
max_iter=100, decision_function_shape='ovr'):
super(SVC, self).__init__(kernel=kernel, degree=degree, C=C,
epsilon=epsilon, max_iter=max_iter)
self.decision_function_shape = decision_function_shape
# 生成二分类的数据
def get_xyi(self, X, y, i, j=0):
xi, yi = X.copy(), y.copy()
if self.decision_function_shape == 'ovr':
yi[yi != i] = -1
yi[yi == i] = 1
elif self.decision_function_shape == 'ovo':
pi, pj = yi == i, yi == j
xi, yi = xi[pi | pj], yi[pi | pj]
yi[yi == j] = -1
yi[yi == i] = 1
return xi, yi
def fit(self, X: np.ndarray, y: np.ndarray):
self.n_classes = len(set(y.tolist()))
if self.decision_function_shape == 'ovr':
self.K = self.kernel_mat(X, X)
self.svms = list()
for i in range(self.n_classes):
svm = BaseSVM(self._kernel, self.degree, self.C, self.epsilon, self.max_iter)
svm.fit(*self.get_xyi(X, y, i))
del svm.K
gc.collect()
svm.K = self.K
self.svms.append(svm)
elif self.decision_function_shape == 'ovo':
self.svmd = dict()
for i in range(self.n_classes):
for j in range(i + 1, self.n_classes):
svm = BaseSVM(self._kernel, self.degree, self.C, self.epsilon, self.max_iter)
xij, yij = self.get_xyi(X, y, i, j)
svm.fit(xij, yij)
self.svmd[f"{i}-{j}"] = svm
else:
raise ValueError("不支持的策略!")
# 多分类决策函数
def decision_function(self, predictions: np.ndarray) -> np.ndarray:
pred_y = np.zeros(predictions.shape[0])
if self.decision_function_shape == 'ovr':
pred_y = predictions.argmax(axis=-1)
elif self.decision_function_shape == 'ovo':
pred_y = predictions.argmax(axis=-1)
return pred_y
def predict(self, X):
n_samples = X.shape[0]
predictions = np.zeros([n_samples, self.n_classes])
if self.decision_function_shape == 'ovr':
for i, svm in enumerate(self.svms):
f = svm.f(X) # todo 函数值尺度不一,有偏见
f_max, f_min = f.max(), f.min()
predictions[:, i] = f - f_min / (f_max - f_min)
elif self.decision_function_shape == 'ovo':
for k, svm in self.svmd.items():
i, j = int(k[0]), int(k[2])
pred = svm.predict(X)
pred[pred == -1] = 0
predictions[:, i] += pred
predictions[:, j] += -pred + 1
return self.decision_function(predictions) | hklearn/svm.py | import gc
import six
import numpy as np
from abc import ABCMeta, abstractmethod
from .base import BaseEstimator
class BaseSVM(six.with_metaclass(ABCMeta, BaseEstimator)):
def __init__(self, kernel='linear', degree=3, C=1.0, epsilon=1e-3, max_iter=100):
self.max_iter = max_iter
self._kernel = kernel
self.degree = degree
self.C = C
self.epsilon = epsilon
# 显式的初始化变量
def init(self, X: np.ndarray, y: np.ndarray):
self.m, self.n = X.shape
self.X = X
self.y = y
self.b = 0.0
self.K = self.kernel_mat(X, X) # 全部计算好的kernel
self.alpha = np.zeros(self.m)
self.g_v = self.g_vec()
self.E = self.g_v - self.y # 将Ei保存在一个列表里
# g(x),输入xi(X[i])
def g(self, i):
in_sigma = self.alpha * self.y * self.K[i]
self.g_v[i] = np.sum(in_sigma) + self.b
return self.g_v[i]
# vec结尾的是函数,v结尾的是缓存向量
def g_vec(self):
before_sigma = self.K * self.alpha * self.y
return np.sum(before_sigma, axis=-1) + self.b
# E(x)为g(x)对输入x的预测值和y的差
def _e(self, i):
return self.g(i) - self.y[i]
# 核函数
def kernel(self, x1: np.ndarray, x2: np.ndarray):
if self._kernel == 'linear':
return np.sum(x1 * x2)
elif self._kernel == 'poly':
return (np.sum(x1 * x2) + 1) ** self.degree
return 0
# 向量化kernel,一次获得xi对所有x的
def kernel_vec(self, x: np.ndarray): # todo: 待添加更多kernel
if self._kernel == 'linear':
return np.sum(self.X * x, axis=-1)
elif self._kernel == 'poly':
return (np.sum(self.X * x, axis=-1) + 1) ** self.degree
return None
# 直接计算所有kernel,以后直接调用
def kernel_mat(self, X1: np.ndarray, X2: np.ndarray):
x1 = X1[np.newaxis, ...]
x2 = X2[:, np.newaxis, :]
if self._kernel == 'linear':
return np.sum(x1 * x2, axis=-1) # 广播,对最后一个维度求和
elif self._kernel == 'poly':
return (np.sum(x1 * x2, axis=-1) + 1) ** self.degree
return None
# 选择α1 α2,返回index
def select_alpha(self):
_a = self.alpha
# 得到mask
con1, con2 = (_a > 0), (_a < self.C)
# yi*g(xi)
ygx = self.y * self.g_v
# αi == 0 and yi*gxi ≥ 1-ε KKT条件
err1 = ygx - 1 + self.epsilon
err1[(con1 & (err1 <= 0)) | (~con1 & (err1 > 0))] = 0 # 不在此类或符合置0
# 0 < αi < C and abs(yi*gxi - 1) ≤ ε
err2 = np.abs(ygx - 1) - self.epsilon
err2[~con1 | ~con2] = 0 # 置 αi ≤ 0 and αi ≥ C 的为0
# αi == C and yi*gxi ≤ 1+ε
err3 = ygx - 1 - self.epsilon
err3[(con2 & (err3 >= 0)) | (~con2 & (err3 < 0))] = 0
# 计算总error,排序获得index
err = err1 ** 2 + err2 ** 2 + err3 ** 2
# α1为违反KKT条件最严重的点
i1 = np.argmax(err)
# 如果E1是+,选择最小的;如果E1是负的,选择最大的
i2 = np.argmin(self.E) if self.E[i1] >= 0 else np.argmax(self.E)
if i2 == i1: # 如果相等,返回第二大or小的
if self.E[i1] >= 0:
i2 = np.argsort(self.E)[1]
else:
i2 = np.argsort(self.E)[-2]
return i1, i2
# 优化α1 α2,更新b
def optimize(self, i1, i2):
a1_old, a2_old, b_old = self.alpha[i1], self.alpha[i2], self.b
_y, _K = self.y, self.K
# 边界
if _y[i1] == _y[i2]:
L = max(0, a2_old + a1_old - self.C)
H = min(self.C, a2_old + a1_old)
else:
L = max(0, a2_old - a1_old)
H = min(self.C, self.C + a2_old - a1_old)
E1, E2 = self.E[i1], self.E[i2]
eta = _K[i1, i1] + _K[i2, i2] - 2 * _K[i1, i2] # 7.107 η = K11 + K22 - 2K12
if eta <= 0:
print('eta <= 0')
return
a2_new_unc = a2_old + _y[i2] * (E1 - E2) / eta # 7.106
def cut_alpha(a, h, l): # 7.108
if a > h:
return h
elif a < l:
return l
else:
return a
a2_new = cut_alpha(a2_new_unc, H, L)
a1_new = a1_old + _y[i1] * _y[i2] * (a2_old - a2_new) # 7.109
b1_new = -E1 - _y[i1] * _K[i1, i1] * (a1_new - a1_old) - \
_y[i2] * _K[i2, i1] * (a2_new - a2_old) + b_old # 7.115
b2_new = -E2 - _y[i1] * _K[i1, i2] * (a1_new - a1_old) - \
_y[i2] * _K[i2, i2] * (a2_new - a2_old) + b_old # 7.116
if 0 < a1_new < self.C:
b_new = b1_new
elif 0 < a2_new < self.C:
b_new = b2_new
else: # 选择中点,否则b1=b2的
b_new = (b1_new + b2_new) / 2
def e_new(i):
in_sigma = self.alpha * self.y * self.K[i]
in_sigma[self.alpha <= 0] = 0
in_sigma[self.alpha > self.C] = 0
return np.sum(in_sigma) + self.b - self.y[i]
# 更新参数
self.alpha[i1], self.alpha[i2] = a1_new, a2_new
self.b = b_new
self.E[i1], self.E[i2] = e_new(i1), e_new(i2)
return None
# 停机条件
def shutdown(self):
c, eps = self.C, self.epsilon
_a, _y = self.alpha, self.y
if np.any(_a < 0) | np.any(_a > c):
return False
elif np.abs(np.sum(_a * _y)) > eps:
return False
else:
ygx = _y * self.g_v
if np.any(ygx[_a == 0] < 1 - eps):
return False
elif np.any(np.abs(ygx[(0 < _a) & (_a < c)] - 1) > eps):
return False
elif np.any(ygx[_a == c] > 1 + eps):
return False
return True
def fit(self, X: np.ndarray, y: np.ndarray):
self.init(X, y)
for t in range(self.max_iter):
i1, i2 = self.select_alpha() # 选择优化变量
self.optimize(i1, i2) # 求解两个变量的最优化问题
if self.shutdown(): # 停机条件
# print('Early stop')
break
return
def predict(self, X):
fx = self.f(X)
fx[fx >= 0] = 1
fx[fx < 0] = -1
return fx
# 决策函数
def f(self, X):
k_mat = self.kernel_mat(self.X, X) # shape: n_test, n_train
in_sigma = k_mat * self.alpha * self.y # 广播
return np.sum(in_sigma, axis=-1) + self.b
class SVC(BaseSVM):
def __init__(self, kernel='linear', degree=3, C=1.0, epsilon=1e-3,
max_iter=100, decision_function_shape='ovr'):
super(SVC, self).__init__(kernel=kernel, degree=degree, C=C,
epsilon=epsilon, max_iter=max_iter)
self.decision_function_shape = decision_function_shape
# 生成二分类的数据
def get_xyi(self, X, y, i, j=0):
xi, yi = X.copy(), y.copy()
if self.decision_function_shape == 'ovr':
yi[yi != i] = -1
yi[yi == i] = 1
elif self.decision_function_shape == 'ovo':
pi, pj = yi == i, yi == j
xi, yi = xi[pi | pj], yi[pi | pj]
yi[yi == j] = -1
yi[yi == i] = 1
return xi, yi
def fit(self, X: np.ndarray, y: np.ndarray):
self.n_classes = len(set(y.tolist()))
if self.decision_function_shape == 'ovr':
self.K = self.kernel_mat(X, X)
self.svms = list()
for i in range(self.n_classes):
svm = BaseSVM(self._kernel, self.degree, self.C, self.epsilon, self.max_iter)
svm.fit(*self.get_xyi(X, y, i))
del svm.K
gc.collect()
svm.K = self.K
self.svms.append(svm)
elif self.decision_function_shape == 'ovo':
self.svmd = dict()
for i in range(self.n_classes):
for j in range(i + 1, self.n_classes):
svm = BaseSVM(self._kernel, self.degree, self.C, self.epsilon, self.max_iter)
xij, yij = self.get_xyi(X, y, i, j)
svm.fit(xij, yij)
self.svmd[f"{i}-{j}"] = svm
else:
raise ValueError("不支持的策略!")
# 多分类决策函数
def decision_function(self, predictions: np.ndarray) -> np.ndarray:
pred_y = np.zeros(predictions.shape[0])
if self.decision_function_shape == 'ovr':
pred_y = predictions.argmax(axis=-1)
elif self.decision_function_shape == 'ovo':
pred_y = predictions.argmax(axis=-1)
return pred_y
def predict(self, X):
n_samples = X.shape[0]
predictions = np.zeros([n_samples, self.n_classes])
if self.decision_function_shape == 'ovr':
for i, svm in enumerate(self.svms):
f = svm.f(X) # todo 函数值尺度不一,有偏见
f_max, f_min = f.max(), f.min()
predictions[:, i] = f - f_min / (f_max - f_min)
elif self.decision_function_shape == 'ovo':
for k, svm in self.svmd.items():
i, j = int(k[0]), int(k[2])
pred = svm.predict(X)
pred[pred == -1] = 0
predictions[:, i] += pred
predictions[:, j] += -pred + 1
return self.decision_function(predictions) | 0.237753 | 0.375477 |
import sys
from traceback import format_exception
from tuttle.error import TuttleError
from tuttle.report.dot_repport import create_dot_report
from tuttle.report.html_repport import create_html_report
from pickle import dump, load
from tuttle.workflow_runner import WorkflowRunner, TuttleEnv
from tuttle_directories import TuttleDirectories
from tuttle.log_follower import LogsFollower
from tuttle.version import version
class ProcessDependencyIterator:
""" Provides an iterator on processes according to dependency order"""
def __init__(self, workflow):
self._resources_to_build = {r for r in workflow.iter_resources() if r.creator_process}
self._processes_to_run = {p for p in workflow.iter_processes()}
def all_inputs_built(self, process):
""" Returns True if all inputs of this process where build, ie if the process can be executed """
for input_res in process.iter_inputs():
if input_res in self._resources_to_build:
return False
return True
def pick_a_process(self):
""" Pick an executable process, if there is one
"""
for process in self._processes_to_run:
if self.all_inputs_built(process):
return process
# No more process to pick
return None
def iter_processes(self):
# The idea is to remove the resource from the list as we simulate execution of _processes
p = self.pick_a_process()
while p:
for r in p.iter_outputs():
self._resources_to_build.remove(r)
self._processes_to_run.remove(p)
yield p
p = self.pick_a_process()
def remaining(self):
return self._processes_to_run
class Workflow:
""" A workflow is a dependency tree of processes
"""
def __init__(self, resources):
self._processes = []
self._preprocesses = []
self._resources = resources
self._signatures = {}
self.tuttle_version = version
def add_process(self, process):
""" Adds a process
:param process:
:return:
"""
self._processes.append(process)
def add_preprocess(self, preprocess):
""" Adds a preprocess
:param preprocess:
:return:
"""
self._preprocesses.append(preprocess)
def iter_processes(self):
for process in self._processes:
yield process
def iter_preprocesses(self):
for preprocess in self._preprocesses:
yield preprocess
def nb_resources(self):
return len(self._resources)
def iter_resources(self):
return self._resources.itervalues()
def has_preprocesses(self):
""" Has preprocesses ?
:return: True if the workflow has preprocesses
"""
return len(self._preprocesses) > 0
def primary_inputs_not_available(self):
""" Check that all primary resources (external resources) that are necessary to run the workflow are available
:return: a list of missing resources
:rtype: list
"""
missing = []
for resource in self._resources.itervalues():
if resource.is_primary():
if not self.resource_available(resource.url):
missing.append(resource)
return missing
def circular_references(self):
""" Return a list of processes that won't be able to run according to to dependency graph, because
of circular references, ie when A is produced by B... And B produced by A.
:return: a list of process that won't be able to run. No special indication about circular groups
:rtype: list
"""
process_iterator = ProcessDependencyIterator(self)
for _ in process_iterator.iter_processes():
pass
return process_iterator.remaining()
def static_check_processes(self):
""" Runs a pre-check for every process, in order to catch early obvious errors, even before invalidation
:return: None
"""
for process in self.iter_processes():
process.static_check()
def check_resources_consistency(self):
resource_classes = { res.__class__ for res in self.iter_resources()}
for resource_class in resource_classes:
resource_class.check_consistency(self)
def update_signatures(self, signatures):
""" updates the workflow's signatures after the process has run
:param signatures: a dictionary of signatures indexed by urls
"""
self._signatures.update(signatures)
def run_pre_processes(self):
""" Runs all the preprocesses
:return:
:raises ExecutionError if an error occurs
"""
TuttleDirectories.create_tuttle_dirs()
TuttleDirectories.empty_extension_dir()
if not self.has_preprocesses():
return
lt = LogsFollower()
WorkflowRunner.print_preprocesses_header()
for process in self.iter_preprocesses():
TuttleDirectories.prepare_and_assign_paths(process)
lt.follow_process(process.log_stdout, process.log_stderr, None)
with lt.trace_in_background(), TuttleEnv():
for preprocess in self.iter_preprocesses():
WorkflowRunner.print_preprocess_header(preprocess, lt._logger)
success = True
error_msg = None
try:
preprocess.set_start()
preprocess.processor.run(preprocess, preprocess._reserved_path,
preprocess.log_stdout, preprocess.log_stderr)
except TuttleError as e:
success = False
error_msg = str(e)
raise
except Exception:
exc_info = sys.exc_info()
stacktrace = "".join(format_exception(*exc_info))
error_msg = "An unexpected error have happened in tuttle preprocessor {} : \n" \
"{}\n" \
"Preprocess {} will not complete.".format(preprocess._processor.name, stacktrace, preprocess.id)
finally:
preprocess.set_end(success, error_msg)
self.create_reports()
WorkflowRunner.print_preprocesses_footer()
def create_reports(self):
""" Write to disk files describing the workflow, with color for states
:return: None
"""
create_html_report(self, TuttleDirectories.tuttle_dir("report.html"))
create_dot_report(self, TuttleDirectories.tuttle_dir("report.dot"))
def dump(self):
""" Pickles the workflow and writes it to last_workflow.pickle
:return: None
"""
with open(TuttleDirectories.tuttle_dir("last_workflow.pickle"), "w") as f:
dump(self, f)
def export(self):
""" Export the workflow for external use : a dump for running tuttle later and a report for human users
:return: None
"""
self.dump()
self.create_reports()
@staticmethod
def load():
try:
with open(TuttleDirectories.tuttle_dir("last_workflow.pickle"), "r") as f:
return load(f)
except:
return None
def get_extensions(self):
return TuttleDirectories.list_extensions()
def find_process_that_creates(self, url):
"""
:param url: Returns the process that creates this url. this url is supposed to be created by this workflow,
so check creates_url() before calling this method
:return:
"""
if url in self._resources:
return self._resources[url].creator_process
def find_resource(self, url):
if url in self._resources:
return self._resources[url]
else:
return None
def compute_dependencies(self):
""" Feeds the dependant_processes field in every resource
:return: Nothing
"""
for resource in self._resources.itervalues():
resource.dependant_processes = []
for process in self.iter_processes():
for resource in process.iter_inputs():
resource.dependant_processes.append(process)
def iter_available_signatures(self):
return self._signatures.iteritems()
def retrieve_signatures(self, previous):
""" Retrieve the signatures from the former workflow. Useful to detect what has changed.
Returns True if some resources where in previous and no longer exist in self
"""
for url, signature in previous.iter_available_signatures():
if (url in self._signatures) and (self._signatures[url] == "DISCOVERED"):
self._signatures[url] = signature
def pick_a_failing_process(self):
for process in self.iter_processes():
if process.end is not None and process.success is False:
return process
return None
def reset_failures(self):
workflow_changed = False
for process in self._processes:
if process.success is False:
process.reset_execution_info()
workflow_changed = True
return workflow_changed
def all_inputs_available(self, process):
"""
:return: True if all input resources for this process are vailable, False otherwise
"""
for in_res in process.iter_inputs():
if not self.resource_available(in_res.url):
return False
return True
def runnable_processes(self):
""" List processes that can be run (because they have all inputs)
:return:
"""
res = set()
for process in self.iter_processes():
if process.start is None and self.all_inputs_available(process):
res.add(process)
return res
def discover_runnable_processes(self, complete_process):
""" List processes that can be run (because they have all inputs)
:return:
"""
res = set()
for process in self.iter_processes():
if process.start is None:
if process.depends_on_process(complete_process):
if self.all_inputs_available(process):
res.add(process)
return res
def discover_resources(self):
for resource in self._resources.itervalues():
if resource.exists():
if resource.is_primary():
self._signatures[resource.url] = resource.signature()
else:
self._signatures[resource.url] = "DISCOVERED"
def signature(self, url):
# TODO simplier with __get__ ?
if url in self._signatures:
return self._signatures[url]
else:
return None
def resource_available(self, url):
return url in self._signatures
def clear_signatures(self, urls):
for url in urls:
if url in self._signatures:
del self._signatures[url]
def fill_missing_availability(self):
for url, signature in self.iter_available_signatures():
if signature is True:
print("Filling availability for {}".format(url))
resource = self.find_resource(url)
new_signature = resource.signature()
self._signatures[url] = new_signature
def similar_process(self, process_from_other_workflow):
output_resource = process_from_other_workflow.pick_an_output()
if output_resource:
return self.find_process_that_creates(output_resource.url)
else:
other_wf_urls = process_from_other_workflow.input_urls()
for process in self.iter_processes():
if not process.has_outputs() and process.input_urls() == other_wf_urls:
return process
return None
def iter_processes_on_dependency_order(self):
""" returns an iterator on processes according to dependency order"""
process_iterator = ProcessDependencyIterator(self)
return process_iterator.iter_processes()
def contains_resource(self, resource):
return resource.url in self._resources | tuttle/workflow.py | import sys
from traceback import format_exception
from tuttle.error import TuttleError
from tuttle.report.dot_repport import create_dot_report
from tuttle.report.html_repport import create_html_report
from pickle import dump, load
from tuttle.workflow_runner import WorkflowRunner, TuttleEnv
from tuttle_directories import TuttleDirectories
from tuttle.log_follower import LogsFollower
from tuttle.version import version
class ProcessDependencyIterator:
""" Provides an iterator on processes according to dependency order"""
def __init__(self, workflow):
self._resources_to_build = {r for r in workflow.iter_resources() if r.creator_process}
self._processes_to_run = {p for p in workflow.iter_processes()}
def all_inputs_built(self, process):
""" Returns True if all inputs of this process where build, ie if the process can be executed """
for input_res in process.iter_inputs():
if input_res in self._resources_to_build:
return False
return True
def pick_a_process(self):
""" Pick an executable process, if there is one
"""
for process in self._processes_to_run:
if self.all_inputs_built(process):
return process
# No more process to pick
return None
def iter_processes(self):
# The idea is to remove the resource from the list as we simulate execution of _processes
p = self.pick_a_process()
while p:
for r in p.iter_outputs():
self._resources_to_build.remove(r)
self._processes_to_run.remove(p)
yield p
p = self.pick_a_process()
def remaining(self):
return self._processes_to_run
class Workflow:
""" A workflow is a dependency tree of processes
"""
def __init__(self, resources):
self._processes = []
self._preprocesses = []
self._resources = resources
self._signatures = {}
self.tuttle_version = version
def add_process(self, process):
""" Adds a process
:param process:
:return:
"""
self._processes.append(process)
def add_preprocess(self, preprocess):
""" Adds a preprocess
:param preprocess:
:return:
"""
self._preprocesses.append(preprocess)
def iter_processes(self):
for process in self._processes:
yield process
def iter_preprocesses(self):
for preprocess in self._preprocesses:
yield preprocess
def nb_resources(self):
return len(self._resources)
def iter_resources(self):
return self._resources.itervalues()
def has_preprocesses(self):
""" Has preprocesses ?
:return: True if the workflow has preprocesses
"""
return len(self._preprocesses) > 0
def primary_inputs_not_available(self):
""" Check that all primary resources (external resources) that are necessary to run the workflow are available
:return: a list of missing resources
:rtype: list
"""
missing = []
for resource in self._resources.itervalues():
if resource.is_primary():
if not self.resource_available(resource.url):
missing.append(resource)
return missing
def circular_references(self):
""" Return a list of processes that won't be able to run according to to dependency graph, because
of circular references, ie when A is produced by B... And B produced by A.
:return: a list of process that won't be able to run. No special indication about circular groups
:rtype: list
"""
process_iterator = ProcessDependencyIterator(self)
for _ in process_iterator.iter_processes():
pass
return process_iterator.remaining()
def static_check_processes(self):
""" Runs a pre-check for every process, in order to catch early obvious errors, even before invalidation
:return: None
"""
for process in self.iter_processes():
process.static_check()
def check_resources_consistency(self):
resource_classes = { res.__class__ for res in self.iter_resources()}
for resource_class in resource_classes:
resource_class.check_consistency(self)
def update_signatures(self, signatures):
""" updates the workflow's signatures after the process has run
:param signatures: a dictionary of signatures indexed by urls
"""
self._signatures.update(signatures)
def run_pre_processes(self):
""" Runs all the preprocesses
:return:
:raises ExecutionError if an error occurs
"""
TuttleDirectories.create_tuttle_dirs()
TuttleDirectories.empty_extension_dir()
if not self.has_preprocesses():
return
lt = LogsFollower()
WorkflowRunner.print_preprocesses_header()
for process in self.iter_preprocesses():
TuttleDirectories.prepare_and_assign_paths(process)
lt.follow_process(process.log_stdout, process.log_stderr, None)
with lt.trace_in_background(), TuttleEnv():
for preprocess in self.iter_preprocesses():
WorkflowRunner.print_preprocess_header(preprocess, lt._logger)
success = True
error_msg = None
try:
preprocess.set_start()
preprocess.processor.run(preprocess, preprocess._reserved_path,
preprocess.log_stdout, preprocess.log_stderr)
except TuttleError as e:
success = False
error_msg = str(e)
raise
except Exception:
exc_info = sys.exc_info()
stacktrace = "".join(format_exception(*exc_info))
error_msg = "An unexpected error have happened in tuttle preprocessor {} : \n" \
"{}\n" \
"Preprocess {} will not complete.".format(preprocess._processor.name, stacktrace, preprocess.id)
finally:
preprocess.set_end(success, error_msg)
self.create_reports()
WorkflowRunner.print_preprocesses_footer()
def create_reports(self):
""" Write to disk files describing the workflow, with color for states
:return: None
"""
create_html_report(self, TuttleDirectories.tuttle_dir("report.html"))
create_dot_report(self, TuttleDirectories.tuttle_dir("report.dot"))
def dump(self):
""" Pickles the workflow and writes it to last_workflow.pickle
:return: None
"""
with open(TuttleDirectories.tuttle_dir("last_workflow.pickle"), "w") as f:
dump(self, f)
def export(self):
""" Export the workflow for external use : a dump for running tuttle later and a report for human users
:return: None
"""
self.dump()
self.create_reports()
@staticmethod
def load():
try:
with open(TuttleDirectories.tuttle_dir("last_workflow.pickle"), "r") as f:
return load(f)
except:
return None
def get_extensions(self):
return TuttleDirectories.list_extensions()
def find_process_that_creates(self, url):
"""
:param url: Returns the process that creates this url. this url is supposed to be created by this workflow,
so check creates_url() before calling this method
:return:
"""
if url in self._resources:
return self._resources[url].creator_process
def find_resource(self, url):
if url in self._resources:
return self._resources[url]
else:
return None
def compute_dependencies(self):
""" Feeds the dependant_processes field in every resource
:return: Nothing
"""
for resource in self._resources.itervalues():
resource.dependant_processes = []
for process in self.iter_processes():
for resource in process.iter_inputs():
resource.dependant_processes.append(process)
def iter_available_signatures(self):
return self._signatures.iteritems()
def retrieve_signatures(self, previous):
""" Retrieve the signatures from the former workflow. Useful to detect what has changed.
Returns True if some resources where in previous and no longer exist in self
"""
for url, signature in previous.iter_available_signatures():
if (url in self._signatures) and (self._signatures[url] == "DISCOVERED"):
self._signatures[url] = signature
def pick_a_failing_process(self):
for process in self.iter_processes():
if process.end is not None and process.success is False:
return process
return None
def reset_failures(self):
workflow_changed = False
for process in self._processes:
if process.success is False:
process.reset_execution_info()
workflow_changed = True
return workflow_changed
def all_inputs_available(self, process):
"""
:return: True if all input resources for this process are vailable, False otherwise
"""
for in_res in process.iter_inputs():
if not self.resource_available(in_res.url):
return False
return True
def runnable_processes(self):
""" List processes that can be run (because they have all inputs)
:return:
"""
res = set()
for process in self.iter_processes():
if process.start is None and self.all_inputs_available(process):
res.add(process)
return res
def discover_runnable_processes(self, complete_process):
""" List processes that can be run (because they have all inputs)
:return:
"""
res = set()
for process in self.iter_processes():
if process.start is None:
if process.depends_on_process(complete_process):
if self.all_inputs_available(process):
res.add(process)
return res
def discover_resources(self):
for resource in self._resources.itervalues():
if resource.exists():
if resource.is_primary():
self._signatures[resource.url] = resource.signature()
else:
self._signatures[resource.url] = "DISCOVERED"
def signature(self, url):
# TODO simplier with __get__ ?
if url in self._signatures:
return self._signatures[url]
else:
return None
def resource_available(self, url):
return url in self._signatures
def clear_signatures(self, urls):
for url in urls:
if url in self._signatures:
del self._signatures[url]
def fill_missing_availability(self):
for url, signature in self.iter_available_signatures():
if signature is True:
print("Filling availability for {}".format(url))
resource = self.find_resource(url)
new_signature = resource.signature()
self._signatures[url] = new_signature
def similar_process(self, process_from_other_workflow):
output_resource = process_from_other_workflow.pick_an_output()
if output_resource:
return self.find_process_that_creates(output_resource.url)
else:
other_wf_urls = process_from_other_workflow.input_urls()
for process in self.iter_processes():
if not process.has_outputs() and process.input_urls() == other_wf_urls:
return process
return None
def iter_processes_on_dependency_order(self):
""" returns an iterator on processes according to dependency order"""
process_iterator = ProcessDependencyIterator(self)
return process_iterator.iter_processes()
def contains_resource(self, resource):
return resource.url in self._resources | 0.551091 | 0.117978 |
import unittest
import os, shutil
from cybercaptain.visualization.map import visualization_map
from cybercaptain.utils.exceptions import ValidationError
TESTDATA_FOLDER = os.path.join(os.path.dirname(__file__), '../assets')
TESTDATA_GEN_OUTPUT_FOLDER = os.path.join(TESTDATA_FOLDER, 'output')
# Append Needed Args - Related to Root Config projectName / projectRoot / moduleName
def append_needed_args(existing_args):
return {**existing_args, 'projectRoot': TESTDATA_FOLDER, 'projectName': "UNITTEST.cckv", 'moduleName': "UNITEST_MODULE"}
class DataVisualizationMapValidateTest(unittest.TestCase):
"""
Test the visualization map chart png validation.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
arguments = append_needed_args({'src': '.',
'map' : 'world',
'type' : '.',
'colormap' : '.',
'countryCodeAttribute' : '.',
'groupedValueAttribute' : '.',
'displayLegend' : 'yes',
'displayLabels' : 'yes',
'labelsThreshold' : 1,
'title' : '.',
'target': '.'})
self.visualization = visualization_map(**arguments)
def setUp(self):
if not os.path.exists(TESTDATA_GEN_OUTPUT_FOLDER):
os.makedirs(TESTDATA_GEN_OUTPUT_FOLDER)
def tearDown(self):
shutil.rmtree(TESTDATA_GEN_OUTPUT_FOLDER)
def test_validation_heatmap(self):
"""
Testing the heatmap validation.
"""
# SRC missing
arg1 = append_needed_args({
'map' : 'world',
'type' : 'viridis',
'colormap' : '.',
'countryCodeAttribute' : '.',
'groupedValueAttribute' : '.',
'displayLegend' : 'yes',
'displayLabels' : 'yes',
'labelsThreshold' : 1,
'title' : '.',
'target': '.'})
with self.assertRaises(ValidationError):
self.visualization.validate(arg1)
# map missing
arg1 = append_needed_args({'src': '.',
'type' : '.',
'colormap' : 'viridis',
'countryCodeAttribute' : '.',
'groupedValueAttribute' : '.',
'displayLegend' : 'yes',
'displayLabels' : 'yes',
'labelsThreshold' : 1,
'title' : '.',
'target': '.'})
with self.assertRaises(ValidationError):
self.visualization.validate(arg1)
# type missing
arg1 = append_needed_args({'src': '.',
'map' : 'world',
'colormap' : 'viridis',
'countryCodeAttribute' : '.',
'groupedValueAttribute' : '.',
'displayLegend' : 'yes',
'displayLabels' : 'yes',
'labelsThreshold' : 1,
'title' : '.',
'target': '.'})
with self.assertRaises(ValidationError):
self.visualization.validate(arg1)
# colormap missing
arg1 = append_needed_args({'src': '.',
'map' : 'world',
'countryCodeAttribute' : '.',
'groupedValueAttribute' : '.',
'displayLegend' : 'yes',
'displayLabels' : 'yes',
'labelsThreshold' : 1,
'title' : '.',
'target': '.'})
with self.assertRaises(ValidationError):
self.visualization.validate(arg1)
# colormap not existing
arg1 = append_needed_args({'src': '.',
'map' : 'world',
'colormap' : 'NOTEXISTINGCOLOR',
'countryCodeAttribute' : '.',
'groupedValueAttribute' : '.',
'displayLegend' : 'yes',
'displayLabels' : 'yes',
'labelsThreshold' : 1,
'title' : '.',
'target': '.'})
with self.assertRaises(ValidationError):
self.visualization.validate(arg1)
# Heatmap: countryCodeAttribute missing
arg1 = append_needed_args({'src': '.',
'map' : 'world',
'type' : 'heatmap',
'colormap' : 'viridis',
'groupedValueAttribute' : '.',
'displayLegend' : 'yes',
'displayLabels' : 'yes',
'labelsThreshold' : 1,
'title' : '.',
'target': '.'})
with self.assertRaises(ValidationError):
self.visualization.validate(arg1)
# Heatmap: groupedValueAttribute missing
arg1 = append_needed_args({'src': '.',
'map' : 'world',
'type' : 'heatmap',
'colormap' : 'viridis',
'countryCodeAttribute' : '.',
'displayLegend' : 'yes',
'displayLabels' : 'yes',
'labelsThreshold' : 1,
'title' : '.',
'target': '.'})
with self.assertRaises(ValidationError):
self.visualization.validate(arg1)
# Heatmap: threshold not int
arg1 = append_needed_args({'src': '.',
'map' : 'world',
'type' : 'heatmap',
'colormap' : 'viridis',
'countryCodeAttribute' : '.',
'groupedValueAttribute' : '.',
'displayLegend' : 'yes',
'displayLabels' : 'yes',
'labelsThreshold' : 'not int',
'title' : '.',
'target': '.'})
with self.assertRaises(ValidationError):
self.visualization.validate(arg1) | src/unittest/python/modules/visualization/map_validate_tests.py | import unittest
import os, shutil
from cybercaptain.visualization.map import visualization_map
from cybercaptain.utils.exceptions import ValidationError
TESTDATA_FOLDER = os.path.join(os.path.dirname(__file__), '../assets')
TESTDATA_GEN_OUTPUT_FOLDER = os.path.join(TESTDATA_FOLDER, 'output')
# Append Needed Args - Related to Root Config projectName / projectRoot / moduleName
def append_needed_args(existing_args):
return {**existing_args, 'projectRoot': TESTDATA_FOLDER, 'projectName': "UNITTEST.cckv", 'moduleName': "UNITEST_MODULE"}
class DataVisualizationMapValidateTest(unittest.TestCase):
"""
Test the visualization map chart png validation.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
arguments = append_needed_args({'src': '.',
'map' : 'world',
'type' : '.',
'colormap' : '.',
'countryCodeAttribute' : '.',
'groupedValueAttribute' : '.',
'displayLegend' : 'yes',
'displayLabels' : 'yes',
'labelsThreshold' : 1,
'title' : '.',
'target': '.'})
self.visualization = visualization_map(**arguments)
def setUp(self):
if not os.path.exists(TESTDATA_GEN_OUTPUT_FOLDER):
os.makedirs(TESTDATA_GEN_OUTPUT_FOLDER)
def tearDown(self):
shutil.rmtree(TESTDATA_GEN_OUTPUT_FOLDER)
def test_validation_heatmap(self):
"""
Testing the heatmap validation.
"""
# SRC missing
arg1 = append_needed_args({
'map' : 'world',
'type' : 'viridis',
'colormap' : '.',
'countryCodeAttribute' : '.',
'groupedValueAttribute' : '.',
'displayLegend' : 'yes',
'displayLabels' : 'yes',
'labelsThreshold' : 1,
'title' : '.',
'target': '.'})
with self.assertRaises(ValidationError):
self.visualization.validate(arg1)
# map missing
arg1 = append_needed_args({'src': '.',
'type' : '.',
'colormap' : 'viridis',
'countryCodeAttribute' : '.',
'groupedValueAttribute' : '.',
'displayLegend' : 'yes',
'displayLabels' : 'yes',
'labelsThreshold' : 1,
'title' : '.',
'target': '.'})
with self.assertRaises(ValidationError):
self.visualization.validate(arg1)
# type missing
arg1 = append_needed_args({'src': '.',
'map' : 'world',
'colormap' : 'viridis',
'countryCodeAttribute' : '.',
'groupedValueAttribute' : '.',
'displayLegend' : 'yes',
'displayLabels' : 'yes',
'labelsThreshold' : 1,
'title' : '.',
'target': '.'})
with self.assertRaises(ValidationError):
self.visualization.validate(arg1)
# colormap missing
arg1 = append_needed_args({'src': '.',
'map' : 'world',
'countryCodeAttribute' : '.',
'groupedValueAttribute' : '.',
'displayLegend' : 'yes',
'displayLabels' : 'yes',
'labelsThreshold' : 1,
'title' : '.',
'target': '.'})
with self.assertRaises(ValidationError):
self.visualization.validate(arg1)
# colormap not existing
arg1 = append_needed_args({'src': '.',
'map' : 'world',
'colormap' : 'NOTEXISTINGCOLOR',
'countryCodeAttribute' : '.',
'groupedValueAttribute' : '.',
'displayLegend' : 'yes',
'displayLabels' : 'yes',
'labelsThreshold' : 1,
'title' : '.',
'target': '.'})
with self.assertRaises(ValidationError):
self.visualization.validate(arg1)
# Heatmap: countryCodeAttribute missing
arg1 = append_needed_args({'src': '.',
'map' : 'world',
'type' : 'heatmap',
'colormap' : 'viridis',
'groupedValueAttribute' : '.',
'displayLegend' : 'yes',
'displayLabels' : 'yes',
'labelsThreshold' : 1,
'title' : '.',
'target': '.'})
with self.assertRaises(ValidationError):
self.visualization.validate(arg1)
# Heatmap: groupedValueAttribute missing
arg1 = append_needed_args({'src': '.',
'map' : 'world',
'type' : 'heatmap',
'colormap' : 'viridis',
'countryCodeAttribute' : '.',
'displayLegend' : 'yes',
'displayLabels' : 'yes',
'labelsThreshold' : 1,
'title' : '.',
'target': '.'})
with self.assertRaises(ValidationError):
self.visualization.validate(arg1)
# Heatmap: threshold not int
arg1 = append_needed_args({'src': '.',
'map' : 'world',
'type' : 'heatmap',
'colormap' : 'viridis',
'countryCodeAttribute' : '.',
'groupedValueAttribute' : '.',
'displayLegend' : 'yes',
'displayLabels' : 'yes',
'labelsThreshold' : 'not int',
'title' : '.',
'target': '.'})
with self.assertRaises(ValidationError):
self.visualization.validate(arg1) | 0.551332 | 0.343837 |
from azext_iot.common.sas_token_auth import SasTokenAuthentication
from azext_iot.common.shared import SdkType
def iot_hub_service_factory(cli_ctx, *_):
"""
Factory for importing deps and getting service client resources.
Args:
cli_ctx (knack.cli.CLI): CLI context.
*_ : all other args ignored.
Returns:
iot_hub_resource (IotHubClient.iot_hub_resource): operational resource for
working with IoT Hub.
"""
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.mgmt.iothub.iot_hub_client import IotHubClient
return get_mgmt_service_client(cli_ctx, IotHubClient).iot_hub_resource
def iot_service_provisioning_factory(cli_ctx, *_):
"""
Factory for importing deps and getting service client resources.
Args:
cli_ctx (knack.cli.CLI): CLI context.
*_ : all other args ignored.
Returns:
service_client (IotDpsClient): operational resource for
working with IoT Hub Device Provisioning Service.
"""
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.mgmt.iothubprovisioningservices.iot_dps_client import IotDpsClient
return get_mgmt_service_client(cli_ctx, IotDpsClient)
def _bind_sdk(target, sdk_type, device_id=None, auth=None):
from azext_iot.sdk.device.iot_hub_gateway_device_apis import IotHubGatewayDeviceAPIs
from azext_iot.sdk.service.iot_hub_gateway_service_apis import IotHubGatewayServiceAPIs
from azext_iot.sdk.custom.custom_api import CustomClient
from azext_iot.sdk.dps import ProvisioningServiceClient
from azext_iot.sdk.pnp.digital_twin_repository_service import DigitalTwinRepositoryService
sas_uri = target['entity']
endpoint = "https://{}".format(sas_uri)
if device_id:
sas_uri = '{}/devices/{}'.format(sas_uri, device_id)
if sdk_type is SdkType.pnp_sdk:
return (
DigitalTwinRepositoryService(endpoint),
_get_sdk_exception_type(sdk_type)
)
if not auth:
auth = SasTokenAuthentication(sas_uri, target['policy'], target['primarykey'])
if sdk_type is SdkType.device_sdk:
return (
IotHubGatewayDeviceAPIs(auth, endpoint),
_get_sdk_exception_type(sdk_type)
)
if sdk_type is SdkType.service_sdk:
return (
IotHubGatewayServiceAPIs(auth, endpoint),
_get_sdk_exception_type(sdk_type)
)
if sdk_type is SdkType.custom_sdk:
return (
CustomClient(auth, endpoint),
_get_sdk_exception_type(sdk_type)
)
if sdk_type is SdkType.dps_sdk:
return (
ProvisioningServiceClient(auth, endpoint),
_get_sdk_exception_type(sdk_type)
)
return None
def _get_sdk_exception_type(sdk_type):
from importlib import import_module
exception_library = {
SdkType.custom_sdk: import_module('azext_iot.sdk.custom.models.error_details'),
SdkType.service_sdk: import_module('msrestazure.azure_exceptions'),
SdkType.device_sdk: import_module('msrestazure.azure_exceptions'),
SdkType.dps_sdk: import_module('azext_iot.sdk.dps.models.provisioning_service_error_details'),
SdkType.pnp_sdk: import_module('msrest.exceptions')
}
return exception_library.get(sdk_type, None) | azext_iot/_factory.py | from azext_iot.common.sas_token_auth import SasTokenAuthentication
from azext_iot.common.shared import SdkType
def iot_hub_service_factory(cli_ctx, *_):
"""
Factory for importing deps and getting service client resources.
Args:
cli_ctx (knack.cli.CLI): CLI context.
*_ : all other args ignored.
Returns:
iot_hub_resource (IotHubClient.iot_hub_resource): operational resource for
working with IoT Hub.
"""
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.mgmt.iothub.iot_hub_client import IotHubClient
return get_mgmt_service_client(cli_ctx, IotHubClient).iot_hub_resource
def iot_service_provisioning_factory(cli_ctx, *_):
"""
Factory for importing deps and getting service client resources.
Args:
cli_ctx (knack.cli.CLI): CLI context.
*_ : all other args ignored.
Returns:
service_client (IotDpsClient): operational resource for
working with IoT Hub Device Provisioning Service.
"""
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.mgmt.iothubprovisioningservices.iot_dps_client import IotDpsClient
return get_mgmt_service_client(cli_ctx, IotDpsClient)
def _bind_sdk(target, sdk_type, device_id=None, auth=None):
from azext_iot.sdk.device.iot_hub_gateway_device_apis import IotHubGatewayDeviceAPIs
from azext_iot.sdk.service.iot_hub_gateway_service_apis import IotHubGatewayServiceAPIs
from azext_iot.sdk.custom.custom_api import CustomClient
from azext_iot.sdk.dps import ProvisioningServiceClient
from azext_iot.sdk.pnp.digital_twin_repository_service import DigitalTwinRepositoryService
sas_uri = target['entity']
endpoint = "https://{}".format(sas_uri)
if device_id:
sas_uri = '{}/devices/{}'.format(sas_uri, device_id)
if sdk_type is SdkType.pnp_sdk:
return (
DigitalTwinRepositoryService(endpoint),
_get_sdk_exception_type(sdk_type)
)
if not auth:
auth = SasTokenAuthentication(sas_uri, target['policy'], target['primarykey'])
if sdk_type is SdkType.device_sdk:
return (
IotHubGatewayDeviceAPIs(auth, endpoint),
_get_sdk_exception_type(sdk_type)
)
if sdk_type is SdkType.service_sdk:
return (
IotHubGatewayServiceAPIs(auth, endpoint),
_get_sdk_exception_type(sdk_type)
)
if sdk_type is SdkType.custom_sdk:
return (
CustomClient(auth, endpoint),
_get_sdk_exception_type(sdk_type)
)
if sdk_type is SdkType.dps_sdk:
return (
ProvisioningServiceClient(auth, endpoint),
_get_sdk_exception_type(sdk_type)
)
return None
def _get_sdk_exception_type(sdk_type):
from importlib import import_module
exception_library = {
SdkType.custom_sdk: import_module('azext_iot.sdk.custom.models.error_details'),
SdkType.service_sdk: import_module('msrestazure.azure_exceptions'),
SdkType.device_sdk: import_module('msrestazure.azure_exceptions'),
SdkType.dps_sdk: import_module('azext_iot.sdk.dps.models.provisioning_service_error_details'),
SdkType.pnp_sdk: import_module('msrest.exceptions')
}
return exception_library.get(sdk_type, None) | 0.765769 | 0.092729 |
def parse_data(data, outpath):
import numpy as np
import matplotlib.pyplot as plt
import os
import datetime
from hurry.filesize import size
df = data['df']
world_size = data['world_size']
plt.figure(figsize=(9,8/5*world_size))
for proc in range(0, world_size):
X = df[df['rank'] == proc]
if proc == 0:
ax0 = plt.subplot(world_size, 1, proc+1)
else:
ax = plt.subplot(world_size, 1, proc+1, sharex=ax0)
plt.stackplot(X['starttime'].values,
X['time_comp'].values,
X['time_send'].values,
X['time_recv'].values,
X['time_idle'].values,
labels=["Comp", "Send", "Recv", "Idle"])
if proc == world_size-1:
plt.legend()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.xlabel("Seconds")
plt.ylabel("Seconds")
else:
plt.axis("off")
plt.savefig(os.path.join(outpath, "fig01.png"), format='png', dpi=300)
plt.figure(figsize=(9,8/5*world_size))
for proc in range(0, world_size):
X = df[df['rank'] == proc]
if proc == 0:
ax0 = plt.subplot(world_size, 1, proc+1)
else:
ax = plt.subplot(world_size, 1, proc+1, sharex=ax0)
ind = X['starttime'].values <= 1;
plt.stackplot(X['starttime'].values[ind],
X['time_comp'].values[ind],
X['time_send'].values[ind],
X['time_recv'].values[ind],
X['time_idle'].values[ind],
labels=["Comp", "Send", "Recv", "Idle"])
if proc == world_size-1:
plt.legend()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.xlabel("Seconds")
plt.ylabel("Seconds")
else:
plt.axis("off")
plt.savefig(os.path.join(outpath, "fig01b.png"), format='png', dpi=300)
df_w = data['df_w']
data_w = data['data_w']
plt.figure()
plt.plot(df_w['x'], df_w['weight'], '+-', label="parallel")
plt.plot(data_w[:, 0], data_w[:, 1], 'm--.', label="sequential (reference)")
plt.title("Weights after Simulation")
plt.legend()
plt.savefig(os.path.join(outpath, "fig_weights.png"), format='png', dpi=300)
# plt.figure()
# plt.hist(np.sum(times[:, 3:], axis=1), 500);
# plt.savefig(os.path.join(outpath, "fig02.png"), format='png', dpi=300)
# plt.figure()
# plt.hist(times[np.sum(times[:, 3:], axis=1) >= 0.012, 6], 500);
# plt.savefig(os.path.join(outpath, "fig03.png"), format='png', dpi=300)
# print("mean = {}".format(np.mean(times[np.sum(times[:, 3:], axis=1) >= 0.012, 6])))
# runtime
total_seconds = df['endtime'].max() - df['starttime'].min()
hours, rem = divmod(total_seconds, 60*60)
minutes, rem = divmod(rem, 60)
seconds, rem = divmod(rem, 1)
data['runtime'] = "{:02}:{:02}:{:02}.{:02}".format(int(hours),
int(minutes), int(seconds), int(rem*100))
# starttime
starttime = datetime.datetime.fromtimestamp(data['unix_timestamp_start'])
data["starttime"] = starttime.strftime('%B %-d, %Y %H:%M:%S')
data['nb_total_send'] = df['stats_nb_send'].sum()
data['nb_total_recv'] = df['stats_nb_recv'].sum()
print("total_send = {}".format(data['nb_total_send']))
print("total_recv = {}".format(data['nb_total_recv']))
assert (data['nb_total_send'] == data['nb_total_recv']), "Number of Send/Recv should be equal"
# Sizes
data["buffer_size"] = size(data["buffer_size"])
if "max_used_buffer" in data:
data["max_used_buffer"] = size(data["max_used_buffer"])
else:
data["max_used_buffer"] = size(data["-"]) | tex/templates/single_run/single_run.py | def parse_data(data, outpath):
import numpy as np
import matplotlib.pyplot as plt
import os
import datetime
from hurry.filesize import size
df = data['df']
world_size = data['world_size']
plt.figure(figsize=(9,8/5*world_size))
for proc in range(0, world_size):
X = df[df['rank'] == proc]
if proc == 0:
ax0 = plt.subplot(world_size, 1, proc+1)
else:
ax = plt.subplot(world_size, 1, proc+1, sharex=ax0)
plt.stackplot(X['starttime'].values,
X['time_comp'].values,
X['time_send'].values,
X['time_recv'].values,
X['time_idle'].values,
labels=["Comp", "Send", "Recv", "Idle"])
if proc == world_size-1:
plt.legend()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.xlabel("Seconds")
plt.ylabel("Seconds")
else:
plt.axis("off")
plt.savefig(os.path.join(outpath, "fig01.png"), format='png', dpi=300)
plt.figure(figsize=(9,8/5*world_size))
for proc in range(0, world_size):
X = df[df['rank'] == proc]
if proc == 0:
ax0 = plt.subplot(world_size, 1, proc+1)
else:
ax = plt.subplot(world_size, 1, proc+1, sharex=ax0)
ind = X['starttime'].values <= 1;
plt.stackplot(X['starttime'].values[ind],
X['time_comp'].values[ind],
X['time_send'].values[ind],
X['time_recv'].values[ind],
X['time_idle'].values[ind],
labels=["Comp", "Send", "Recv", "Idle"])
if proc == world_size-1:
plt.legend()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.xlabel("Seconds")
plt.ylabel("Seconds")
else:
plt.axis("off")
plt.savefig(os.path.join(outpath, "fig01b.png"), format='png', dpi=300)
df_w = data['df_w']
data_w = data['data_w']
plt.figure()
plt.plot(df_w['x'], df_w['weight'], '+-', label="parallel")
plt.plot(data_w[:, 0], data_w[:, 1], 'm--.', label="sequential (reference)")
plt.title("Weights after Simulation")
plt.legend()
plt.savefig(os.path.join(outpath, "fig_weights.png"), format='png', dpi=300)
# plt.figure()
# plt.hist(np.sum(times[:, 3:], axis=1), 500);
# plt.savefig(os.path.join(outpath, "fig02.png"), format='png', dpi=300)
# plt.figure()
# plt.hist(times[np.sum(times[:, 3:], axis=1) >= 0.012, 6], 500);
# plt.savefig(os.path.join(outpath, "fig03.png"), format='png', dpi=300)
# print("mean = {}".format(np.mean(times[np.sum(times[:, 3:], axis=1) >= 0.012, 6])))
# runtime
total_seconds = df['endtime'].max() - df['starttime'].min()
hours, rem = divmod(total_seconds, 60*60)
minutes, rem = divmod(rem, 60)
seconds, rem = divmod(rem, 1)
data['runtime'] = "{:02}:{:02}:{:02}.{:02}".format(int(hours),
int(minutes), int(seconds), int(rem*100))
# starttime
starttime = datetime.datetime.fromtimestamp(data['unix_timestamp_start'])
data["starttime"] = starttime.strftime('%B %-d, %Y %H:%M:%S')
data['nb_total_send'] = df['stats_nb_send'].sum()
data['nb_total_recv'] = df['stats_nb_recv'].sum()
print("total_send = {}".format(data['nb_total_send']))
print("total_recv = {}".format(data['nb_total_recv']))
assert (data['nb_total_send'] == data['nb_total_recv']), "Number of Send/Recv should be equal"
# Sizes
data["buffer_size"] = size(data["buffer_size"])
if "max_used_buffer" in data:
data["max_used_buffer"] = size(data["max_used_buffer"])
else:
data["max_used_buffer"] = size(data["-"]) | 0.130258 | 0.637609 |
import warnings
warnings.filterwarnings('ignore')
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import model_selection
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier
class Comparison(object):
"""docstring for Comparison
Args:
scoring: a string format score, sklearn.metrics. 'roc_auc', 'average_precision'
"""
def __init__(self, data, target, features, scoring, record_file):
super(Comparison, self).__init__()
self.data = data
self.target = target
self.features = features
self.scoring = scoring
self.record_file = record_file
self.df_prep = pd.DataFrame(SimpleImputer(strategy='mean').fit_transform(self.data), columns=self.data.columns)
self.AmongResults = pd.DataFrame(columns=['algorithm', 'score_mean', 'score_std', 'time'])
self.results = []
self.names = []
self.cost = []
self.means = []
self.stds = []
def AmongModels(self):
models = []
models.append(('LR', LogisticRegression()))
models.append(('SDG', SGDClassifier()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('NB', GaussianNB()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('BAG', BaggingClassifier(DecisionTreeClassifier(), bootstrap=True, oob_score=True, n_jobs=-1)))
models.append(('RF', RandomForestClassifier(n_jobs=-1)))
models.append(('ERT', ExtraTreesClassifier(n_jobs=-1)))
models.append(('ABDT', AdaBoostClassifier(DecisionTreeClassifier())))
models.append(('GBDT', GradientBoostingClassifier()))
models.append(('MLP', MLPClassifier()))
with open(self.record_file, 'a') as file:
file.write('\n'+'='*20+'\n')
for name, model in models:
start = time.time()
kfold = model_selection.KFold(n_splits=10, shuffle=True, random_state=0)
cv_results = model_selection.cross_val_score(model, self.df_prep[self.features], self.df_prep[self.target], cv=kfold, scoring=self.scoring)
time_cost = time.time()-start
score_mean = cv_results.mean()
score_std = cv_results.std()
msg = "%s:\t%f (%f)\ttime: %f s" % (name, score_mean, score_std, time_cost)
with open(self.record_file, 'a') as file:
file.write(msg)
print(msg)
self.results.append(cv_results)
self.names.append(name)
self.means.append(score_mean)
self.stds.append(score_std)
self.cost.append(time_cost)
self.AmongResults['algorithm'] = self.names
self.AmongResults['score_mean'] = self.means
self.AmongResults['score_std'] = self.stds
self.AmongResults['time'] = self.cost
self.AmongResults['ratio'] = np.power(self.AmongResults.score_mean, 2)*np.power(self.AmongResults.time, -1/10)
self.AmongResults = self.AmongResults.sort_values(by='score_mean', ascending=False)
return self.AmongResults
def Visual(self, time=False, figsize=(8, 8)):
fig = plt.figure(figsize=figsize)
if not time:
ax = fig.add_subplot(111)
plt.boxplot(self.results)
ax.set_xticklabels(self.names)
plt.title('Algorithm Comparison')
else:
fig.suptitle('Algorithm Comparison')
ax1=fig.add_subplot(111, label="1")
ax2=fig.add_subplot(111, label="2", frame_on=False)
ax1.errorbar(self.names, self.means, self.stds, color="C0", linestyle='None', marker='o')
ax1.set_xlabel("algorithm", color="C0")
ax1.set_ylabel("score mean", color="C0")
ax1.tick_params(axis="both", colors="C0")
ax2.bar(self.names, self.cost, color="C1", alpha=0.3, width=0.5)
ax2.xaxis.tick_top()
ax2.yaxis.tick_right()
ax2.set_xlabel('algorithm', color="C1")
ax2.set_ylabel('time', color="C1")
ax2.xaxis.set_label_position('top')
ax2.yaxis.set_label_position('right')
ax2.tick_params(axis='both', colors="C1")
plt.grid()
plt.show()
return None | gossipcat/lab/Comparison.py | import warnings
warnings.filterwarnings('ignore')
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import model_selection
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier
class Comparison(object):
"""docstring for Comparison
Args:
scoring: a string format score, sklearn.metrics. 'roc_auc', 'average_precision'
"""
def __init__(self, data, target, features, scoring, record_file):
super(Comparison, self).__init__()
self.data = data
self.target = target
self.features = features
self.scoring = scoring
self.record_file = record_file
self.df_prep = pd.DataFrame(SimpleImputer(strategy='mean').fit_transform(self.data), columns=self.data.columns)
self.AmongResults = pd.DataFrame(columns=['algorithm', 'score_mean', 'score_std', 'time'])
self.results = []
self.names = []
self.cost = []
self.means = []
self.stds = []
def AmongModels(self):
models = []
models.append(('LR', LogisticRegression()))
models.append(('SDG', SGDClassifier()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('NB', GaussianNB()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('BAG', BaggingClassifier(DecisionTreeClassifier(), bootstrap=True, oob_score=True, n_jobs=-1)))
models.append(('RF', RandomForestClassifier(n_jobs=-1)))
models.append(('ERT', ExtraTreesClassifier(n_jobs=-1)))
models.append(('ABDT', AdaBoostClassifier(DecisionTreeClassifier())))
models.append(('GBDT', GradientBoostingClassifier()))
models.append(('MLP', MLPClassifier()))
with open(self.record_file, 'a') as file:
file.write('\n'+'='*20+'\n')
for name, model in models:
start = time.time()
kfold = model_selection.KFold(n_splits=10, shuffle=True, random_state=0)
cv_results = model_selection.cross_val_score(model, self.df_prep[self.features], self.df_prep[self.target], cv=kfold, scoring=self.scoring)
time_cost = time.time()-start
score_mean = cv_results.mean()
score_std = cv_results.std()
msg = "%s:\t%f (%f)\ttime: %f s" % (name, score_mean, score_std, time_cost)
with open(self.record_file, 'a') as file:
file.write(msg)
print(msg)
self.results.append(cv_results)
self.names.append(name)
self.means.append(score_mean)
self.stds.append(score_std)
self.cost.append(time_cost)
self.AmongResults['algorithm'] = self.names
self.AmongResults['score_mean'] = self.means
self.AmongResults['score_std'] = self.stds
self.AmongResults['time'] = self.cost
self.AmongResults['ratio'] = np.power(self.AmongResults.score_mean, 2)*np.power(self.AmongResults.time, -1/10)
self.AmongResults = self.AmongResults.sort_values(by='score_mean', ascending=False)
return self.AmongResults
def Visual(self, time=False, figsize=(8, 8)):
fig = plt.figure(figsize=figsize)
if not time:
ax = fig.add_subplot(111)
plt.boxplot(self.results)
ax.set_xticklabels(self.names)
plt.title('Algorithm Comparison')
else:
fig.suptitle('Algorithm Comparison')
ax1=fig.add_subplot(111, label="1")
ax2=fig.add_subplot(111, label="2", frame_on=False)
ax1.errorbar(self.names, self.means, self.stds, color="C0", linestyle='None', marker='o')
ax1.set_xlabel("algorithm", color="C0")
ax1.set_ylabel("score mean", color="C0")
ax1.tick_params(axis="both", colors="C0")
ax2.bar(self.names, self.cost, color="C1", alpha=0.3, width=0.5)
ax2.xaxis.tick_top()
ax2.yaxis.tick_right()
ax2.set_xlabel('algorithm', color="C1")
ax2.set_ylabel('time', color="C1")
ax2.xaxis.set_label_position('top')
ax2.yaxis.set_label_position('right')
ax2.tick_params(axis='both', colors="C1")
plt.grid()
plt.show()
return None | 0.66454 | 0.333123 |
import os
import re
from fbs.proc.common_util.util import FileFormatError
from fbs.proc.file_handlers import generic_file
from fbs.proc.file_handlers import netcdf_file
from fbs.proc.file_handlers import nasaames_file
from fbs.proc.file_handlers import pp_file
from fbs.proc.file_handlers import grib_file
from fbs.proc.file_handlers import esasafe_file
from fbs.proc.file_handlers import kmz_file
from fbs.proc.file_handlers import hdf_file
from fbs.proc.file_handlers import badc_csv_file
from fbs.proc.file_handlers import metadata_tags_json_file
import magic as magic_number_reader
import fbs.proc.common_util.util as util
import six
class HandlerPicker(object):
"""
Returns a file handler for the supplied file.
"""
HANDLER_MAP = {
'.nc': netcdf_file.NetCdfFile,
'.na': nasaames_file.NasaAmesFile,
'.pp': pp_file.PpFile,
'.grb': grib_file.GribFile,
'.grib': grib_file.GribFile,
'.manifest': esasafe_file.EsaSafeFile,
'.kmz': kmz_file.KmzFile,
'.hdf': hdf_file.HdfFile
}
def __init__(self):
self.handlers_and_dirs = {}
self.NETCDF_PYTHON_MAGIC_NUM_RES = "NetCDF Data Format data"
self.ASCII_PYTHON_MAGIC_NUM_RES = "ASCII text"
self.DATA_PYTHON_MAGIC_NUM_RES = "data"
def pick_best_handler(self, filename):
"""
:param filename : the file to be scanned.
:returns handler: Returns an appropriate handler
for the given file.
"""
file_dir = os.path.dirname(filename)
file_basename = os.path.basename(filename)
if file_basename == "metadata_tags.json":
handler = metadata_tags_json_file.MetadataTagsJsonFile
else:
# Try returning a handler based on file extension.
extension = os.path.splitext(filename)[1]
extension = extension.lower()
if extension == '.csv':
try:
header = util.get_bytes_from_file(filename, 500)
pattern_to_search = "Conventions,G,BADC-CSV"
res = header.find(pattern_to_search)
if res != -1:
handler = badc_csv_file.BadcCsvFile
else:
handler = generic_file.GenericFile
except Exception: # catch everything... if there is an error just return the generic handler.
handler = generic_file.GenericFile
else:
handler = self.HANDLER_MAP.get(extension, generic_file.GenericFile)
if handler is not None:
self.handlers_and_dirs[file_dir] = handler
return handler
# Try returning a handler based on file's magic number.
try:
res = magic_number_reader.from_file(filename)
if res == self.NETCDF_PYTHON_MAGIC_NUM_RES:
handler = netcdf_file.NetCdfFile
elif res == self.ASCII_PYTHON_MAGIC_NUM_RES:
# ok lets see if it is a na file.
first_line = util.get_file_header(filename)
tokens = first_line.split(" ")
if len(tokens) >= 2:
if tokens[0].isdigit() and tokens[1].isdigit():
handler = nasaames_file.NasaAmesFile
else:
handler = generic_file.GenericFile
# This can be a grb file.
elif res == self.DATA_PYTHON_MAGIC_NUM_RES:
res = util.get_bytes_from_file(filename, 4)
if res == "GRIB":
handler = grib_file.GribFile
else:
handler = generic_file.GenericFile
except Exception: # catch everything... if there is an error just return the generic handler.
handler = generic_file.GenericFile
if handler is not None:
self.handlers_and_dirs[file_dir] = handler
return handler
# Try to return last handler used in this directory.
if file_dir in self.handlers_and_dirs.keys():
handler = self.handlers_and_dirs[file_dir]
if handler is not None:
return handler
# Nothing worked, return the generic handler.
handler = generic_file.GenericFile
return handler
def __enter__(self):
return self
def __exit__(self, *args):
pass | python/src/fbs/proc/file_handlers/handler_picker.py | import os
import re
from fbs.proc.common_util.util import FileFormatError
from fbs.proc.file_handlers import generic_file
from fbs.proc.file_handlers import netcdf_file
from fbs.proc.file_handlers import nasaames_file
from fbs.proc.file_handlers import pp_file
from fbs.proc.file_handlers import grib_file
from fbs.proc.file_handlers import esasafe_file
from fbs.proc.file_handlers import kmz_file
from fbs.proc.file_handlers import hdf_file
from fbs.proc.file_handlers import badc_csv_file
from fbs.proc.file_handlers import metadata_tags_json_file
import magic as magic_number_reader
import fbs.proc.common_util.util as util
import six
class HandlerPicker(object):
"""
Returns a file handler for the supplied file.
"""
HANDLER_MAP = {
'.nc': netcdf_file.NetCdfFile,
'.na': nasaames_file.NasaAmesFile,
'.pp': pp_file.PpFile,
'.grb': grib_file.GribFile,
'.grib': grib_file.GribFile,
'.manifest': esasafe_file.EsaSafeFile,
'.kmz': kmz_file.KmzFile,
'.hdf': hdf_file.HdfFile
}
def __init__(self):
self.handlers_and_dirs = {}
self.NETCDF_PYTHON_MAGIC_NUM_RES = "NetCDF Data Format data"
self.ASCII_PYTHON_MAGIC_NUM_RES = "ASCII text"
self.DATA_PYTHON_MAGIC_NUM_RES = "data"
def pick_best_handler(self, filename):
"""
:param filename : the file to be scanned.
:returns handler: Returns an appropriate handler
for the given file.
"""
file_dir = os.path.dirname(filename)
file_basename = os.path.basename(filename)
if file_basename == "metadata_tags.json":
handler = metadata_tags_json_file.MetadataTagsJsonFile
else:
# Try returning a handler based on file extension.
extension = os.path.splitext(filename)[1]
extension = extension.lower()
if extension == '.csv':
try:
header = util.get_bytes_from_file(filename, 500)
pattern_to_search = "Conventions,G,BADC-CSV"
res = header.find(pattern_to_search)
if res != -1:
handler = badc_csv_file.BadcCsvFile
else:
handler = generic_file.GenericFile
except Exception: # catch everything... if there is an error just return the generic handler.
handler = generic_file.GenericFile
else:
handler = self.HANDLER_MAP.get(extension, generic_file.GenericFile)
if handler is not None:
self.handlers_and_dirs[file_dir] = handler
return handler
# Try returning a handler based on file's magic number.
try:
res = magic_number_reader.from_file(filename)
if res == self.NETCDF_PYTHON_MAGIC_NUM_RES:
handler = netcdf_file.NetCdfFile
elif res == self.ASCII_PYTHON_MAGIC_NUM_RES:
# ok lets see if it is a na file.
first_line = util.get_file_header(filename)
tokens = first_line.split(" ")
if len(tokens) >= 2:
if tokens[0].isdigit() and tokens[1].isdigit():
handler = nasaames_file.NasaAmesFile
else:
handler = generic_file.GenericFile
# This can be a grb file.
elif res == self.DATA_PYTHON_MAGIC_NUM_RES:
res = util.get_bytes_from_file(filename, 4)
if res == "GRIB":
handler = grib_file.GribFile
else:
handler = generic_file.GenericFile
except Exception: # catch everything... if there is an error just return the generic handler.
handler = generic_file.GenericFile
if handler is not None:
self.handlers_and_dirs[file_dir] = handler
return handler
# Try to return last handler used in this directory.
if file_dir in self.handlers_and_dirs.keys():
handler = self.handlers_and_dirs[file_dir]
if handler is not None:
return handler
# Nothing worked, return the generic handler.
handler = generic_file.GenericFile
return handler
def __enter__(self):
return self
def __exit__(self, *args):
pass | 0.384334 | 0.091748 |
from twitter_sentiment.params import sw_persian
import pandas as pd
import re
def correction(series:pd.Series):
assert isinstance(series, pd.Series)
for line in series:
line = line.replace('\n', '').replace('.', '')
line = line.split(' ')
yield list(map(int, line))
def remove_emoji(text:str) -> str:
assert isinstance(text, str)
emoji_pattern = re.compile(pattern="["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001F300-\U0001F5FF"
u"\U0001F1E6-\U0001F1FF"
u"\U00002700-\U000027BF"
u"\U0001F900-\U0001F9FF"
u"\U0001F600-\U0001F64F"
u"\U0001F680-\U0001F6FF"
u"\U00002600-\U000026FF"
"]+", flags=re.UNICODE)
return str(emoji_pattern.sub(r'', text))
def remove_redundent_characters(text:str) -> str:
assert isinstance(text, str)
text = re.sub(r'@[A-Za-z0-9]+', ' ', text) # Removed @mentions
text = re.sub(r'_[A-Za-z0-9]+', ' ', text) # Removed underlines
text = re.sub(r'/(\r\n)+|\r+|\n+|\t+/', ' ', text) # Removed \n
text = re.sub(r'#', ' ', text) # Removing the '#' symbol
text = re.sub(r'RT[\s]+', ' ', text) # Removing RT
text = re.sub(r'https?:\/\/\S+', ' ', text) # Remove the hyper link
text = re.sub(r'\([ا-ی]{1,3}\)', ' ', text) # Remove abbreviations
text = re.sub(r"[\(\)]", " ", text) # remove parantesis
text = re.sub(r"\d|[۰-۹]", " ", text)
text = re.sub(r"&|:", " ", text)
text = re.sub(r"[A-Za-z]", " ", text)
text = re.sub(r"[0-9]", " ", text)
text = re.sub(r"\"", " ", text)
text = re.sub(r"\'", " ", text)
text = re.sub(r"_", " ", text)
text = re.sub(r"@|=", " ", text)
text = re.sub(r"^\d+\s|\s\d+\s|\s\d+$", " ", text)
text = re.sub(r"{|}|;|\[|\]|\||؟|!|\+|\-|\*|\$", " ", text)
text = re.sub(r"¹²|\/", " ", text)
text = re.sub(r"»|>|<|«|,|؛|،|%|؟", " ", text)
text = re.sub("\.|\^|,", " ", text)
return text
def remove_stop_words(text:str) -> str:
assert isinstance(text, str)
return ' '.join([word for word in text.split(' ') if word not in sw_persian])
def preprocess(sentence:str) -> str:
return remove_stop_words(
remove_redundent_characters(
remove_emoji(sentence))) | twitter_sentiment/preprocessing/Preprocessing.py | from twitter_sentiment.params import sw_persian
import pandas as pd
import re
def correction(series:pd.Series):
assert isinstance(series, pd.Series)
for line in series:
line = line.replace('\n', '').replace('.', '')
line = line.split(' ')
yield list(map(int, line))
def remove_emoji(text:str) -> str:
assert isinstance(text, str)
emoji_pattern = re.compile(pattern="["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001F300-\U0001F5FF"
u"\U0001F1E6-\U0001F1FF"
u"\U00002700-\U000027BF"
u"\U0001F900-\U0001F9FF"
u"\U0001F600-\U0001F64F"
u"\U0001F680-\U0001F6FF"
u"\U00002600-\U000026FF"
"]+", flags=re.UNICODE)
return str(emoji_pattern.sub(r'', text))
def remove_redundent_characters(text:str) -> str:
assert isinstance(text, str)
text = re.sub(r'@[A-Za-z0-9]+', ' ', text) # Removed @mentions
text = re.sub(r'_[A-Za-z0-9]+', ' ', text) # Removed underlines
text = re.sub(r'/(\r\n)+|\r+|\n+|\t+/', ' ', text) # Removed \n
text = re.sub(r'#', ' ', text) # Removing the '#' symbol
text = re.sub(r'RT[\s]+', ' ', text) # Removing RT
text = re.sub(r'https?:\/\/\S+', ' ', text) # Remove the hyper link
text = re.sub(r'\([ا-ی]{1,3}\)', ' ', text) # Remove abbreviations
text = re.sub(r"[\(\)]", " ", text) # remove parantesis
text = re.sub(r"\d|[۰-۹]", " ", text)
text = re.sub(r"&|:", " ", text)
text = re.sub(r"[A-Za-z]", " ", text)
text = re.sub(r"[0-9]", " ", text)
text = re.sub(r"\"", " ", text)
text = re.sub(r"\'", " ", text)
text = re.sub(r"_", " ", text)
text = re.sub(r"@|=", " ", text)
text = re.sub(r"^\d+\s|\s\d+\s|\s\d+$", " ", text)
text = re.sub(r"{|}|;|\[|\]|\||؟|!|\+|\-|\*|\$", " ", text)
text = re.sub(r"¹²|\/", " ", text)
text = re.sub(r"»|>|<|«|,|؛|،|%|؟", " ", text)
text = re.sub("\.|\^|,", " ", text)
return text
def remove_stop_words(text:str) -> str:
assert isinstance(text, str)
return ' '.join([word for word in text.split(' ') if word not in sw_persian])
def preprocess(sentence:str) -> str:
return remove_stop_words(
remove_redundent_characters(
remove_emoji(sentence))) | 0.514644 | 0.280983 |
from pm4py.util import exec_utils, xes_constants, constants
from typing import Optional, Dict, Any, Union, Tuple, List, Set
from pm4py.objects.log.obj import EventLog
import pandas as pd
from enum import Enum
class Outputs(Enum):
DFG = "dfg"
SEQUENCE = "sequence"
PARALLEL = "parallel"
START_ACTIVITIES = "start_activities"
END_ACTIVITIES = "end_activities"
ACTIVITIES = "activities"
SKIPPABLE = "skippable"
ACTIVITIES_ALWAYS_HAPPENING = "activities_always_happening"
MIN_TRACE_LENGTH = "min_trace_length"
TRACE = "trace"
class Parameters(Enum):
CASE_ID_KEY = constants.PARAMETER_CONSTANT_CASEID_KEY
STRICT = "strict"
def apply_single(log_footprints: Dict[str, Any], model_footprints: Dict[str, Any], parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> Dict[str, Any]:
"""
Apply footprints conformance between a log footprints object
and a model footprints object
Parameters
-----------------
log_footprints
Footprints of the log (NOT a list, but a single footprints object)
model_footprints
Footprints of the model
parameters
Parameters of the algorithm, including:
- Parameters.STRICT => strict check of the footprints
Returns
------------------
violations
Set of all the violations between the log footprints
and the model footprints
"""
if parameters is None:
parameters = {}
strict = exec_utils.get_param_value(Parameters.STRICT, parameters, False)
if strict:
s1 = log_footprints[Outputs.SEQUENCE.value].difference(model_footprints[Outputs.SEQUENCE.value])
s2 = log_footprints[Outputs.PARALLEL.value].difference(model_footprints[Outputs.PARALLEL.value])
violations = s1.union(s2)
else:
s1 = log_footprints[Outputs.SEQUENCE.value].union(log_footprints[Outputs.PARALLEL.value])
s2 = model_footprints[Outputs.SEQUENCE.value].union(model_footprints[Outputs.PARALLEL.value])
violations = s1.difference(s2)
return violations
def apply(log_footprints: Union[Dict[str, Any], List[Dict[str, Any]]], model_footprints: Dict[str, Any], parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> Union[List[Dict[str, Any]], Dict[str, Any]]:
"""
Apply footprints conformance between a log footprints object
and a model footprints object
Parameters
-----------------
log_footprints
Footprints of the log
model_footprints
Footprints of the model
parameters
Parameters of the algorithm, including:
- Parameters.STRICT => strict check of the footprints
Returns
------------------
violations
Set of all the violations between the log footprints
and the model footprints, OR list of case-per-case violations
"""
if type(log_footprints) is list:
ret = []
for case_footprints in log_footprints:
ret.append(apply_single(case_footprints, model_footprints, parameters=parameters))
return ret
return apply_single(log_footprints, model_footprints, parameters=parameters)
def get_diagnostics_dataframe(log: EventLog, conf_result: Union[List[Dict[str, Any]], Dict[str, Any]], parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> pd.DataFrame:
"""
Gets the diagnostics dataframe from the log
and the results of footprints conformance checking
(trace-by-trace)
Parameters
--------------
log
Event log
conf_result
Conformance checking results (trace-by-trace)
Returns
--------------
diagn_dataframe
Diagnostics dataframe
"""
if parameters is None:
parameters = {}
case_id_key = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, xes_constants.DEFAULT_TRACEID_KEY)
import pandas as pd
diagn_stream = []
for index in range(len(log)):
case_id = log[index].attributes[case_id_key]
num_violations = len(conf_result[index])
is_fit = num_violations == 0
diagn_stream.append({"case_id": case_id, "num_violations": num_violations, "is_fit": is_fit})
return pd.DataFrame(diagn_stream) | ws2122-lspm/Lib/site-packages/pm4py/algo/conformance/footprints/variants/log_model.py | from pm4py.util import exec_utils, xes_constants, constants
from typing import Optional, Dict, Any, Union, Tuple, List, Set
from pm4py.objects.log.obj import EventLog
import pandas as pd
from enum import Enum
class Outputs(Enum):
DFG = "dfg"
SEQUENCE = "sequence"
PARALLEL = "parallel"
START_ACTIVITIES = "start_activities"
END_ACTIVITIES = "end_activities"
ACTIVITIES = "activities"
SKIPPABLE = "skippable"
ACTIVITIES_ALWAYS_HAPPENING = "activities_always_happening"
MIN_TRACE_LENGTH = "min_trace_length"
TRACE = "trace"
class Parameters(Enum):
CASE_ID_KEY = constants.PARAMETER_CONSTANT_CASEID_KEY
STRICT = "strict"
def apply_single(log_footprints: Dict[str, Any], model_footprints: Dict[str, Any], parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> Dict[str, Any]:
"""
Apply footprints conformance between a log footprints object
and a model footprints object
Parameters
-----------------
log_footprints
Footprints of the log (NOT a list, but a single footprints object)
model_footprints
Footprints of the model
parameters
Parameters of the algorithm, including:
- Parameters.STRICT => strict check of the footprints
Returns
------------------
violations
Set of all the violations between the log footprints
and the model footprints
"""
if parameters is None:
parameters = {}
strict = exec_utils.get_param_value(Parameters.STRICT, parameters, False)
if strict:
s1 = log_footprints[Outputs.SEQUENCE.value].difference(model_footprints[Outputs.SEQUENCE.value])
s2 = log_footprints[Outputs.PARALLEL.value].difference(model_footprints[Outputs.PARALLEL.value])
violations = s1.union(s2)
else:
s1 = log_footprints[Outputs.SEQUENCE.value].union(log_footprints[Outputs.PARALLEL.value])
s2 = model_footprints[Outputs.SEQUENCE.value].union(model_footprints[Outputs.PARALLEL.value])
violations = s1.difference(s2)
return violations
def apply(log_footprints: Union[Dict[str, Any], List[Dict[str, Any]]], model_footprints: Dict[str, Any], parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> Union[List[Dict[str, Any]], Dict[str, Any]]:
"""
Apply footprints conformance between a log footprints object
and a model footprints object
Parameters
-----------------
log_footprints
Footprints of the log
model_footprints
Footprints of the model
parameters
Parameters of the algorithm, including:
- Parameters.STRICT => strict check of the footprints
Returns
------------------
violations
Set of all the violations between the log footprints
and the model footprints, OR list of case-per-case violations
"""
if type(log_footprints) is list:
ret = []
for case_footprints in log_footprints:
ret.append(apply_single(case_footprints, model_footprints, parameters=parameters))
return ret
return apply_single(log_footprints, model_footprints, parameters=parameters)
def get_diagnostics_dataframe(log: EventLog, conf_result: Union[List[Dict[str, Any]], Dict[str, Any]], parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> pd.DataFrame:
"""
Gets the diagnostics dataframe from the log
and the results of footprints conformance checking
(trace-by-trace)
Parameters
--------------
log
Event log
conf_result
Conformance checking results (trace-by-trace)
Returns
--------------
diagn_dataframe
Diagnostics dataframe
"""
if parameters is None:
parameters = {}
case_id_key = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, xes_constants.DEFAULT_TRACEID_KEY)
import pandas as pd
diagn_stream = []
for index in range(len(log)):
case_id = log[index].attributes[case_id_key]
num_violations = len(conf_result[index])
is_fit = num_violations == 0
diagn_stream.append({"case_id": case_id, "num_violations": num_violations, "is_fit": is_fit})
return pd.DataFrame(diagn_stream) | 0.910446 | 0.331039 |
import h5py
import numpy as np
import cv2
class dbReader:
def __init__(self):
self.FILE_PATH = 'dataset/nyu_depth_v2_labeled.mat'
self.SAVE_PATH = 'dataset/'
def loadData(self, c, d):
"""
'rgb' or 'grayscale'(default)
'origin' or 'normalized'(default)
"""
if c == 'rgb':
fc = open(self.SAVE_PATH+'img_color.npy', 'rb')
self.img = np.load(fc)
fc.close()
else:
fimg = open(self.SAVE_PATH+'img.npy', 'rb')
self.img = np.load(fimg)
fimg.close()
if d == 'origin':
fdps = open(self.SAVE_PATH+'dps_origin.npy', 'rb')
self.depth = np.load(fdps)
fdps.close()
else:
fdps = open(self.SAVE_PATH+'dps.npy', 'rb')
self.depth = np.load(fdps)
fdps.close()
rng_state = np.random.get_state()
np.random.shuffle(self.img)
np.random.set_state(rng_state)
np.random.shuffle(self.depth)
sz = self.img.shape[0]
"""
for i in range(0, sz):
average_color = [self.img[i, :, :, k].mean() for k in range(self.img.shape[-1])]
self.img[i, :, :] -= average_color
"""
def getNextBatchResized(self, batch_size, ratio):
"""
make sure batch_size <= data_count
"""
data_count = self.img.shape[0]
channel_count = len(self.img.shape)
ids = np.random.choice(data_count, batch_size)
im = np.zeros((batch_size, self.img.shape[1], self.img.shape[2], int(channel_count*2-5)), dtype=np.float32)
dp = np.zeros((batch_size, int(self.depth.shape[1]/ratio), int(self.depth.shape[2]/ratio), 1), dtype=np.float32)
for i in range(batch_size):
if channel_count == 3:
im[i, :, :, 0] = self.img[ids[i], :, :]
else:
im[i, :, :, :] = self.img[ids[i], :, :, :]
dp[i, :, :, 0] = cv2.resize(self.depth[ids[i], :, :],None,fx=0.25, fy=0.25, interpolation = cv2.INTER_CUBIC)
return [im, dp]
def getNextBatchResizedTraining(self, batch_size, ratio):
"""
make sure batch_size <= data_count
"""
data_count = 1399
channel_count = len(self.img.shape)
ids = np.random.choice(data_count, batch_size)
im = np.zeros((batch_size, self.img.shape[1], self.img.shape[2], int(channel_count*2-5)), dtype=np.float32)
dp = np.zeros((batch_size, int(self.depth.shape[1]/ratio), int(self.depth.shape[2]/ratio), 1), dtype=np.float32)
flip = np.random.rand()>0.5
for i in range(batch_size):
if channel_count == 3:
if flip:
im[i, :, :, 0] = np.flip(self.img[ids[i], :, :], 1)
else:
im[i, :, :, 0] = self.img[ids[i], :, :]
else:
if flip:
im[i, :, :, :] = np.flip(self.img[ids[i], :, :, :], 1)
else:
im[i, :, :, :] = self.img[ids[i], :, :, :]
dp[i, :, :, 0] = cv2.resize(self.depth[ids[i], :, :],None,fx=1/ratio, fy=1/ratio, interpolation = cv2.INTER_CUBIC)
if flip:
dp[i, :, :, 0] = np.flip(dp[i, :, :, 0], 1)
return [im, dp]
def dataAugmentation(self, idx, ratio):
da_type = np.random.rand()
channel_count = len(self.img.shape)
nim = np.zeros((self.img.shape[1], self.img.shape[2], int(channel_count*2-5)), dtype=np.float32)
ndp = np.zeros((int(self.depth.shape[1]/ratio), int(self.depth.shape[2]/ratio), 1), dtype=np.float32)
imgw = self.img.shape[1]
imgh = self.img.shape[2]
if da_type < 0.5:
# crop
sub_id = np.random.rand()
x_id = sub_id<0.5
x_l = int (int(x_id) * float(imgw) / 2)
x_h = x_l + int( float(imgw) / 2 )
y_id = sub_id<0.25 or sub_id > 0.75
y_l = int (int(y_id) * float(imgh) / 2)
y_h = y_l + int( float(imgh) / 2 )
if channel_count == 3:
tmpim = self.img[idx, x_l:x_h, y_l:y_h, :]
else:
tmpim = self.img[idx, x_l:x_h, y_l:y_h]
nim = cv2.resize(tmpim, None, fx=ratio/2, fy=ratio/2, interpolation = cv2.INTER_CUBIC)
ndp[:, :, 0] = cv2.resize(self.depth[idx, x_l:x_h, y_l:y_h], None, fx=2/ratio, fy=2/ratio, interpolation = cv2.INTER_CUBIC)
else:
if channel_count == 3:
nim = self.img[idx, :, :, :]
else:
nim = self.img[idx, :, :]
ndp[:, :, 0] = cv2.resize(self.depth[idx, :, :], None, fx=1/ratio, fy=1/ratio, interpolation = cv2.INTER_CUBIC)
if da_type < 0.25 or da_type > 0.75:
# flip
if channel_count == 3:
nim[:, :, 0] = np.flip(nim[:, :, 0], 1)
else:
nim[:, :, :] = np.flip(nim[:, :, :], 1)
ndp[:, :, 0] = np.flip(ndp[:, :, 0], 1)
maxi = np.amax(ndp)
ndp[:, :, 0] = ndp[:, :, 0]/maxi
return [nim, ndp]
def dataAugmentationCrop(self, idx, ratio):
da_type = np.random.rand()
channel_count = len(self.img.shape)
nim = np.zeros((self.img.shape[1], self.img.shape[2], int(channel_count*2-5)), dtype=np.float32)
ndp = np.zeros((int(self.depth.shape[1]/ratio), int(self.depth.shape[2]/ratio), 1), dtype=np.float32)
imgw = self.img.shape[1]
imgh = self.img.shape[2]
if da_type < 0.5:
# crop
x_l = np.random.randint(imgw/2)
x_h = np.random.randint(x_l + imgw/2, imgw)
y_l = np.random.randint(imgh/2)
y_h = int( float(imgh) * float(imgw) * float(x_h - x_l) )
if channel_count == 3:
tmpim = self.img[idx, x_l:x_h, y_l:y_h, :]
else:
tmpim = self.img[idx, x_l:x_h, y_l:y_h]
nim = cv2.resize(tmpim, (int(imgh), int(imgw)), interpolation = cv2.INTER_CUBIC)
ndp[:, :, 0] = cv2.resize(self.depth[idx, x_l:x_h, y_l:y_h], (int(imgh/ratio), int(imgw/ratio)), interpolation = cv2.INTER_CUBIC)
else:
if channel_count == 3:
nim = self.img[idx, :, :, :]
else:
nim = self.img[idx, :, :]
ndp[:, :, 0] = cv2.resize(self.depth[idx, :, :], None, fx=1/ratio, fy=1/ratio, interpolation = cv2.INTER_CUBIC)
if da_type < 0.25 or da_type > 0.75:
# flip
if channel_count == 3:
nim[:, :, 0] = np.flip(nim[:, :, 0], 1)
else:
nim[:, :, :] = np.flip(nim[:, :, :], 1)
ndp[:, :, 0] = np.flip(ndp[:, :, 0], 1)
maxi = np.amax(ndp)
mini = np.amin(ndp)
ndp[:, :, 0] = (ndp[:, :, 0]-mini)/(maxi-mini)
return [nim, ndp]
def getNextBatchResizedTrainingNew(self, batch_size, ratio):
"""
make sure batch_size <= data_count
"""
data_count = 1399
channel_count = len(self.img.shape)
ids = np.random.choice(data_count, batch_size)
im = np.zeros((batch_size, self.img.shape[1], self.img.shape[2], int(channel_count*2-5)), dtype=np.float32)
dp = np.zeros((batch_size, int(self.depth.shape[1]/ratio), int(self.depth.shape[2]/ratio), 1), dtype=np.float32)
flip = np.random.rand()>0.5
for i in range(batch_size):
if channel_count == 3:
[im[i, :, :, 0], dp[i, :, :, :]] = self.dataAugmentation(ids[i], ratio)
else:
[im[i, :, :, :], dp[i, :, :, :]] = self.dataAugmentation(ids[i], ratio)
return [im, dp]
def getNextBatchResizedTrainingWithRandCrop(self, batch_size, ratio):
"""
make sure batch_size <= data_count
"""
data_count = 1399
channel_count = len(self.img.shape)
ids = np.random.choice(data_count, batch_size)
im = np.zeros((batch_size, self.img.shape[1], self.img.shape[2], int(channel_count*2-5)), dtype=np.float32)
dp = np.zeros((batch_size, int(self.depth.shape[1]/ratio), int(self.depth.shape[2]/ratio), 1), dtype=np.float32)
flip = np.random.rand()>0.5
for i in range(batch_size):
if channel_count == 3:
[im[i, :, :, 0], dp[i, :, :, :]] = self.dataAugmentationCrop(ids[i], ratio)
else:
[im[i, :, :, :], dp[i, :, :, :]] = self.dataAugmentationCrop(ids[i], ratio)
return [im, dp]
def getTest(self, ratio, sz=50):
"""
make sure batch_size <= data_count
"""
channel_count = len(self.img.shape)
im = np.zeros((sz, self.img.shape[1], self.img.shape[2], int(channel_count*2-5)), dtype=np.float32)
dp = np.zeros((sz, int(self.depth.shape[1]/ratio), int(self.depth.shape[2]/ratio), 1), dtype=np.float32)
for i in range(1399,1399+sz):
if channel_count == 3:
im[i-1399, :, :, 0] = self.img[i, :, :]
else:
im[i-1399, :, :, :] = self.img[i, :, :, :]
dp[i-1399, :, :, 0] = cv2.resize(self.depth[i, :, :],None,fx=1/ratio, fy=1/ratio, interpolation = cv2.INTER_CUBIC)
return [im, dp] | dbReader_simplified.py | import h5py
import numpy as np
import cv2
class dbReader:
def __init__(self):
self.FILE_PATH = 'dataset/nyu_depth_v2_labeled.mat'
self.SAVE_PATH = 'dataset/'
def loadData(self, c, d):
"""
'rgb' or 'grayscale'(default)
'origin' or 'normalized'(default)
"""
if c == 'rgb':
fc = open(self.SAVE_PATH+'img_color.npy', 'rb')
self.img = np.load(fc)
fc.close()
else:
fimg = open(self.SAVE_PATH+'img.npy', 'rb')
self.img = np.load(fimg)
fimg.close()
if d == 'origin':
fdps = open(self.SAVE_PATH+'dps_origin.npy', 'rb')
self.depth = np.load(fdps)
fdps.close()
else:
fdps = open(self.SAVE_PATH+'dps.npy', 'rb')
self.depth = np.load(fdps)
fdps.close()
rng_state = np.random.get_state()
np.random.shuffle(self.img)
np.random.set_state(rng_state)
np.random.shuffle(self.depth)
sz = self.img.shape[0]
"""
for i in range(0, sz):
average_color = [self.img[i, :, :, k].mean() for k in range(self.img.shape[-1])]
self.img[i, :, :] -= average_color
"""
def getNextBatchResized(self, batch_size, ratio):
"""
make sure batch_size <= data_count
"""
data_count = self.img.shape[0]
channel_count = len(self.img.shape)
ids = np.random.choice(data_count, batch_size)
im = np.zeros((batch_size, self.img.shape[1], self.img.shape[2], int(channel_count*2-5)), dtype=np.float32)
dp = np.zeros((batch_size, int(self.depth.shape[1]/ratio), int(self.depth.shape[2]/ratio), 1), dtype=np.float32)
for i in range(batch_size):
if channel_count == 3:
im[i, :, :, 0] = self.img[ids[i], :, :]
else:
im[i, :, :, :] = self.img[ids[i], :, :, :]
dp[i, :, :, 0] = cv2.resize(self.depth[ids[i], :, :],None,fx=0.25, fy=0.25, interpolation = cv2.INTER_CUBIC)
return [im, dp]
def getNextBatchResizedTraining(self, batch_size, ratio):
"""
make sure batch_size <= data_count
"""
data_count = 1399
channel_count = len(self.img.shape)
ids = np.random.choice(data_count, batch_size)
im = np.zeros((batch_size, self.img.shape[1], self.img.shape[2], int(channel_count*2-5)), dtype=np.float32)
dp = np.zeros((batch_size, int(self.depth.shape[1]/ratio), int(self.depth.shape[2]/ratio), 1), dtype=np.float32)
flip = np.random.rand()>0.5
for i in range(batch_size):
if channel_count == 3:
if flip:
im[i, :, :, 0] = np.flip(self.img[ids[i], :, :], 1)
else:
im[i, :, :, 0] = self.img[ids[i], :, :]
else:
if flip:
im[i, :, :, :] = np.flip(self.img[ids[i], :, :, :], 1)
else:
im[i, :, :, :] = self.img[ids[i], :, :, :]
dp[i, :, :, 0] = cv2.resize(self.depth[ids[i], :, :],None,fx=1/ratio, fy=1/ratio, interpolation = cv2.INTER_CUBIC)
if flip:
dp[i, :, :, 0] = np.flip(dp[i, :, :, 0], 1)
return [im, dp]
def dataAugmentation(self, idx, ratio):
da_type = np.random.rand()
channel_count = len(self.img.shape)
nim = np.zeros((self.img.shape[1], self.img.shape[2], int(channel_count*2-5)), dtype=np.float32)
ndp = np.zeros((int(self.depth.shape[1]/ratio), int(self.depth.shape[2]/ratio), 1), dtype=np.float32)
imgw = self.img.shape[1]
imgh = self.img.shape[2]
if da_type < 0.5:
# crop
sub_id = np.random.rand()
x_id = sub_id<0.5
x_l = int (int(x_id) * float(imgw) / 2)
x_h = x_l + int( float(imgw) / 2 )
y_id = sub_id<0.25 or sub_id > 0.75
y_l = int (int(y_id) * float(imgh) / 2)
y_h = y_l + int( float(imgh) / 2 )
if channel_count == 3:
tmpim = self.img[idx, x_l:x_h, y_l:y_h, :]
else:
tmpim = self.img[idx, x_l:x_h, y_l:y_h]
nim = cv2.resize(tmpim, None, fx=ratio/2, fy=ratio/2, interpolation = cv2.INTER_CUBIC)
ndp[:, :, 0] = cv2.resize(self.depth[idx, x_l:x_h, y_l:y_h], None, fx=2/ratio, fy=2/ratio, interpolation = cv2.INTER_CUBIC)
else:
if channel_count == 3:
nim = self.img[idx, :, :, :]
else:
nim = self.img[idx, :, :]
ndp[:, :, 0] = cv2.resize(self.depth[idx, :, :], None, fx=1/ratio, fy=1/ratio, interpolation = cv2.INTER_CUBIC)
if da_type < 0.25 or da_type > 0.75:
# flip
if channel_count == 3:
nim[:, :, 0] = np.flip(nim[:, :, 0], 1)
else:
nim[:, :, :] = np.flip(nim[:, :, :], 1)
ndp[:, :, 0] = np.flip(ndp[:, :, 0], 1)
maxi = np.amax(ndp)
ndp[:, :, 0] = ndp[:, :, 0]/maxi
return [nim, ndp]
def dataAugmentationCrop(self, idx, ratio):
da_type = np.random.rand()
channel_count = len(self.img.shape)
nim = np.zeros((self.img.shape[1], self.img.shape[2], int(channel_count*2-5)), dtype=np.float32)
ndp = np.zeros((int(self.depth.shape[1]/ratio), int(self.depth.shape[2]/ratio), 1), dtype=np.float32)
imgw = self.img.shape[1]
imgh = self.img.shape[2]
if da_type < 0.5:
# crop
x_l = np.random.randint(imgw/2)
x_h = np.random.randint(x_l + imgw/2, imgw)
y_l = np.random.randint(imgh/2)
y_h = int( float(imgh) * float(imgw) * float(x_h - x_l) )
if channel_count == 3:
tmpim = self.img[idx, x_l:x_h, y_l:y_h, :]
else:
tmpim = self.img[idx, x_l:x_h, y_l:y_h]
nim = cv2.resize(tmpim, (int(imgh), int(imgw)), interpolation = cv2.INTER_CUBIC)
ndp[:, :, 0] = cv2.resize(self.depth[idx, x_l:x_h, y_l:y_h], (int(imgh/ratio), int(imgw/ratio)), interpolation = cv2.INTER_CUBIC)
else:
if channel_count == 3:
nim = self.img[idx, :, :, :]
else:
nim = self.img[idx, :, :]
ndp[:, :, 0] = cv2.resize(self.depth[idx, :, :], None, fx=1/ratio, fy=1/ratio, interpolation = cv2.INTER_CUBIC)
if da_type < 0.25 or da_type > 0.75:
# flip
if channel_count == 3:
nim[:, :, 0] = np.flip(nim[:, :, 0], 1)
else:
nim[:, :, :] = np.flip(nim[:, :, :], 1)
ndp[:, :, 0] = np.flip(ndp[:, :, 0], 1)
maxi = np.amax(ndp)
mini = np.amin(ndp)
ndp[:, :, 0] = (ndp[:, :, 0]-mini)/(maxi-mini)
return [nim, ndp]
def getNextBatchResizedTrainingNew(self, batch_size, ratio):
"""
make sure batch_size <= data_count
"""
data_count = 1399
channel_count = len(self.img.shape)
ids = np.random.choice(data_count, batch_size)
im = np.zeros((batch_size, self.img.shape[1], self.img.shape[2], int(channel_count*2-5)), dtype=np.float32)
dp = np.zeros((batch_size, int(self.depth.shape[1]/ratio), int(self.depth.shape[2]/ratio), 1), dtype=np.float32)
flip = np.random.rand()>0.5
for i in range(batch_size):
if channel_count == 3:
[im[i, :, :, 0], dp[i, :, :, :]] = self.dataAugmentation(ids[i], ratio)
else:
[im[i, :, :, :], dp[i, :, :, :]] = self.dataAugmentation(ids[i], ratio)
return [im, dp]
def getNextBatchResizedTrainingWithRandCrop(self, batch_size, ratio):
"""
make sure batch_size <= data_count
"""
data_count = 1399
channel_count = len(self.img.shape)
ids = np.random.choice(data_count, batch_size)
im = np.zeros((batch_size, self.img.shape[1], self.img.shape[2], int(channel_count*2-5)), dtype=np.float32)
dp = np.zeros((batch_size, int(self.depth.shape[1]/ratio), int(self.depth.shape[2]/ratio), 1), dtype=np.float32)
flip = np.random.rand()>0.5
for i in range(batch_size):
if channel_count == 3:
[im[i, :, :, 0], dp[i, :, :, :]] = self.dataAugmentationCrop(ids[i], ratio)
else:
[im[i, :, :, :], dp[i, :, :, :]] = self.dataAugmentationCrop(ids[i], ratio)
return [im, dp]
def getTest(self, ratio, sz=50):
"""
make sure batch_size <= data_count
"""
channel_count = len(self.img.shape)
im = np.zeros((sz, self.img.shape[1], self.img.shape[2], int(channel_count*2-5)), dtype=np.float32)
dp = np.zeros((sz, int(self.depth.shape[1]/ratio), int(self.depth.shape[2]/ratio), 1), dtype=np.float32)
for i in range(1399,1399+sz):
if channel_count == 3:
im[i-1399, :, :, 0] = self.img[i, :, :]
else:
im[i-1399, :, :, :] = self.img[i, :, :, :]
dp[i-1399, :, :, 0] = cv2.resize(self.depth[i, :, :],None,fx=1/ratio, fy=1/ratio, interpolation = cv2.INTER_CUBIC)
return [im, dp] | 0.330687 | 0.359701 |
from . import map_kor_to_braille
import re
UNRECOGNIZED = '?'
open_quotes = True
BASE_CODE, CHOSUNG, JUNGSUNG = 44032, 588, 28
# 초성 리스트. 00 ~ 18
CHOSUNG_LIST = ['ㄱ', 'ㄲ', 'ㄴ', 'ㄷ', 'ㄸ', 'ㄹ', 'ㅁ',
'ㅂ', 'ㅃ', 'ㅅ', 'ㅆ', 'ㅇ', 'ㅈ', 'ㅉ',
'ㅊ', 'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ']
# 중성 리스트. 00 ~ 20
JUNGSUNG_LIST = ['ㅏ', 'ㅐ', 'ㅑ', 'ㅒ', 'ㅓ', 'ㅔ', 'ㅕ',
'ㅖ', 'ㅗ', 'ㅘ', 'ㅙ', 'ㅚ', 'ㅛ', 'ㅜ',
'ㅝ', 'ㅞ', 'ㅟ', 'ㅠ', 'ㅡ', 'ㅢ', 'ㅣ']
# 종성 리스트. 00 ~ 27 + 1(1개 없음)
JONGSUNG_LIST = [' ', 'ㄱ', 'ㄲ', 'ㄳ', 'ㄴ', 'ㄵ', 'ㄶ', 'ㄷ',
'ㄹ', 'ㄺ', 'ㄻ', 'ㄼ', 'ㄽ', 'ㄾ', 'ㄿ', 'ㅀ',
'ㅁ', 'ㅂ', 'ㅄ', 'ㅅ','ㅆ', 'ㅇ', 'ㅈ', 'ㅊ',
'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ']
def extract_words(string):
words = string.split(" ")
result = []
for word in words:
temp = word.split("\n")
for item in temp:
result.append(item)
return result
def check_contraction(word, index, braille):
for key, value in map_kor_to_braille.contractions.items():
if word[index:].startswith(key):
braille.append({'braille' : value, 'category' : '약어', 'original' : key})
return len(key)
return 0
def check_number(word, index, braille):
if word[index].isdigit():
if index is not 0:
if word[index - 1].isdigit():
braille.append({'braille' : map_kor_to_braille.numbers[word[index]], 'category' : '숫자', 'original' : word[index]})
else:
braille.append({'braille' : map_kor_to_braille.number_start + map_kor_to_braille.numbers[word[index]], 'category' : '숫자', 'original' : word[index]})
else:
braille.append({'braille' : map_kor_to_braille.number_start + map_kor_to_braille.numbers[word[index]], 'category' : '숫자', 'original' : word[index]})
return True
return False
def check_punctuation(word, index, braille):
for key, value in map_kor_to_braille.punctuation.items():
if key is word[index]:
braille.append({'braille' : value, 'category' : '문장기호', 'original' : key})
return True
return False
def check_character(word, index, braille):
key = word[index]
if re.match('.*[ㄱ-ㅎㅏ-ㅣ가-힣]+.*', key) is not None:
char = ord(key) - BASE_CODE
char1 = int(char / CHOSUNG)
char2 = int((char - (CHOSUNG * char1)) / JUNGSUNG)
char3 = int((char - (CHOSUNG * char1) - (JUNGSUNG * char2)))
braille.append({'braille' : map_kor_to_braille.CHOSUNG_letters.get(CHOSUNG_LIST[char1]), 'category' : '초성', 'original' : CHOSUNG_LIST[char1]})
braille.append({'braille' : map_kor_to_braille.JUNGSUNG_letters.get(JUNGSUNG_LIST[char2]), 'category' : '중성', 'original' : JUNGSUNG_LIST[char2]})
if char3 is not 0:
braille.append({'braille' : map_kor_to_braille.JONGSUNG_letters.get(JONGSUNG_LIST[char3]), 'category' : '종성', 'original' : JONGSUNG_LIST[char3]})
return True
return False
def translate(string):
words = extract_words(string)
braille = []
for word in words:
i = 0
while (i < len(word)):
check_cont = check_contraction(word, i, braille)
if check_cont:
i += check_cont
continue
if check_number(word, i, braille):
i += 1
continue
if check_punctuation(word, i, braille):
i += 1
continue
check_character(word, i, braille)
i += 1
braille.append({'braille' : ' ', 'category' : 'space', 'original' : ' '})
return braille
if __name__ == "__main__":
print(translate("오늘 밤에도 별은 바람에 스치운다.")) | braille_experience/braille_translator_kor/kor_to_braille2.py | from . import map_kor_to_braille
import re
UNRECOGNIZED = '?'
open_quotes = True
BASE_CODE, CHOSUNG, JUNGSUNG = 44032, 588, 28
# 초성 리스트. 00 ~ 18
CHOSUNG_LIST = ['ㄱ', 'ㄲ', 'ㄴ', 'ㄷ', 'ㄸ', 'ㄹ', 'ㅁ',
'ㅂ', 'ㅃ', 'ㅅ', 'ㅆ', 'ㅇ', 'ㅈ', 'ㅉ',
'ㅊ', 'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ']
# 중성 리스트. 00 ~ 20
JUNGSUNG_LIST = ['ㅏ', 'ㅐ', 'ㅑ', 'ㅒ', 'ㅓ', 'ㅔ', 'ㅕ',
'ㅖ', 'ㅗ', 'ㅘ', 'ㅙ', 'ㅚ', 'ㅛ', 'ㅜ',
'ㅝ', 'ㅞ', 'ㅟ', 'ㅠ', 'ㅡ', 'ㅢ', 'ㅣ']
# 종성 리스트. 00 ~ 27 + 1(1개 없음)
JONGSUNG_LIST = [' ', 'ㄱ', 'ㄲ', 'ㄳ', 'ㄴ', 'ㄵ', 'ㄶ', 'ㄷ',
'ㄹ', 'ㄺ', 'ㄻ', 'ㄼ', 'ㄽ', 'ㄾ', 'ㄿ', 'ㅀ',
'ㅁ', 'ㅂ', 'ㅄ', 'ㅅ','ㅆ', 'ㅇ', 'ㅈ', 'ㅊ',
'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ']
def extract_words(string):
words = string.split(" ")
result = []
for word in words:
temp = word.split("\n")
for item in temp:
result.append(item)
return result
def check_contraction(word, index, braille):
for key, value in map_kor_to_braille.contractions.items():
if word[index:].startswith(key):
braille.append({'braille' : value, 'category' : '약어', 'original' : key})
return len(key)
return 0
def check_number(word, index, braille):
if word[index].isdigit():
if index is not 0:
if word[index - 1].isdigit():
braille.append({'braille' : map_kor_to_braille.numbers[word[index]], 'category' : '숫자', 'original' : word[index]})
else:
braille.append({'braille' : map_kor_to_braille.number_start + map_kor_to_braille.numbers[word[index]], 'category' : '숫자', 'original' : word[index]})
else:
braille.append({'braille' : map_kor_to_braille.number_start + map_kor_to_braille.numbers[word[index]], 'category' : '숫자', 'original' : word[index]})
return True
return False
def check_punctuation(word, index, braille):
for key, value in map_kor_to_braille.punctuation.items():
if key is word[index]:
braille.append({'braille' : value, 'category' : '문장기호', 'original' : key})
return True
return False
def check_character(word, index, braille):
key = word[index]
if re.match('.*[ㄱ-ㅎㅏ-ㅣ가-힣]+.*', key) is not None:
char = ord(key) - BASE_CODE
char1 = int(char / CHOSUNG)
char2 = int((char - (CHOSUNG * char1)) / JUNGSUNG)
char3 = int((char - (CHOSUNG * char1) - (JUNGSUNG * char2)))
braille.append({'braille' : map_kor_to_braille.CHOSUNG_letters.get(CHOSUNG_LIST[char1]), 'category' : '초성', 'original' : CHOSUNG_LIST[char1]})
braille.append({'braille' : map_kor_to_braille.JUNGSUNG_letters.get(JUNGSUNG_LIST[char2]), 'category' : '중성', 'original' : JUNGSUNG_LIST[char2]})
if char3 is not 0:
braille.append({'braille' : map_kor_to_braille.JONGSUNG_letters.get(JONGSUNG_LIST[char3]), 'category' : '종성', 'original' : JONGSUNG_LIST[char3]})
return True
return False
def translate(string):
words = extract_words(string)
braille = []
for word in words:
i = 0
while (i < len(word)):
check_cont = check_contraction(word, i, braille)
if check_cont:
i += check_cont
continue
if check_number(word, i, braille):
i += 1
continue
if check_punctuation(word, i, braille):
i += 1
continue
check_character(word, i, braille)
i += 1
braille.append({'braille' : ' ', 'category' : 'space', 'original' : ' '})
return braille
if __name__ == "__main__":
print(translate("오늘 밤에도 별은 바람에 스치운다.")) | 0.156169 | 0.205137 |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs, make_moons, make_circles
X, y = make_moons(n_samples=1000, noise=0.1, random_state=2)
data = []
data2 = []
for i in range(len(y)):
if y[i] == 0: # the target distribution
data.append(X[i]+1)
else: # the source distribution
data2.append(X[i]+1)
data = np.asarray(data) # the target distribution
data2 = np.asarray(data2) # the source distribution
# print(data2.shape)
# (2, 1000)
plt.scatter(data2[:,0], data2[:,1], c='green') # the source distribution
plt.scatter(data[:,0], data[:,1], c='yellow') # the target distribution
plt.show()
#%%
import torch
import torch.nn as nn
import numpy as np
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.model = nn.Sequential(
nn.Linear(2, 56),
nn.ReLU(),
nn.Linear(56, 128),
nn.ReLU(),
nn.Linear(128, 256),
nn.BatchNorm1d(256),
nn.ReLU(),
)
def forward(self, img):
x = self.model(img)
return x
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.model = nn.Sequential(
nn.Linear(256, 128),
nn.ReLU(),
nn.Linear(128, 56),
nn.ReLU(),
nn.Linear(56, 2),
nn.BatchNorm1d(2),
nn.ReLU(),
)
def forward(self, x):
img = self.model(x)
return img
#%%
x_train = data # the target distribution
y_train = data2 # the source distribution
x_train = torch.FloatTensor(x_train)
y_train = torch.FloatTensor(y_train)
x_test = data
x_test = torch.FloatTensor(x_test)
y_test = data2
y_test = torch.FloatTensor(y_test)
encoder = Encoder()
decoder = Decoder()
criterion = torch.nn.L1Loss()
op_e = torch.optim.SGD(encoder.parameters(), lr = 0.01)
op_d = torch.optim.SGD(decoder.parameters(), lr = 0.01)
encoder.eval()
decoder.eval()
z = encoder(x_train)
y_pred = decoder(z)
before_train = criterion(y_pred.squeeze(), y_test)
print('Test loss before training' , before_train.item())
#%%
encoder.train()
decoder.train()
epoch = 10000
for epoch in range(epoch):
op_e.zero_grad()
op_d.zero_grad()
# Forward pass
z = encoder(x_train)
y_pred = decoder(z)
# Compute Loss
loss = criterion(y_pred.squeeze(), y_train)
print('Epoch {}: train loss: {}'.format(epoch, loss.item()))
# Backward pass
loss.backward()
op_e.step()
op_d.step()
#%%
encoder.eval()
decoder.eval()
z = encoder(x_train)
y_pred = decoder(z)
y_pred=y_pred.cpu().detach().numpy()
plt.scatter(data2[:,0], data2[:,1], c='green') # the source distribution
plt.scatter(y_pred[:,0], y_pred[:,1], c='blue') # the predicted target distribution
plt.show() | Synthetic Experiments/moon.py | import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs, make_moons, make_circles
X, y = make_moons(n_samples=1000, noise=0.1, random_state=2)
data = []
data2 = []
for i in range(len(y)):
if y[i] == 0: # the target distribution
data.append(X[i]+1)
else: # the source distribution
data2.append(X[i]+1)
data = np.asarray(data) # the target distribution
data2 = np.asarray(data2) # the source distribution
# print(data2.shape)
# (2, 1000)
plt.scatter(data2[:,0], data2[:,1], c='green') # the source distribution
plt.scatter(data[:,0], data[:,1], c='yellow') # the target distribution
plt.show()
#%%
import torch
import torch.nn as nn
import numpy as np
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.model = nn.Sequential(
nn.Linear(2, 56),
nn.ReLU(),
nn.Linear(56, 128),
nn.ReLU(),
nn.Linear(128, 256),
nn.BatchNorm1d(256),
nn.ReLU(),
)
def forward(self, img):
x = self.model(img)
return x
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.model = nn.Sequential(
nn.Linear(256, 128),
nn.ReLU(),
nn.Linear(128, 56),
nn.ReLU(),
nn.Linear(56, 2),
nn.BatchNorm1d(2),
nn.ReLU(),
)
def forward(self, x):
img = self.model(x)
return img
#%%
x_train = data # the target distribution
y_train = data2 # the source distribution
x_train = torch.FloatTensor(x_train)
y_train = torch.FloatTensor(y_train)
x_test = data
x_test = torch.FloatTensor(x_test)
y_test = data2
y_test = torch.FloatTensor(y_test)
encoder = Encoder()
decoder = Decoder()
criterion = torch.nn.L1Loss()
op_e = torch.optim.SGD(encoder.parameters(), lr = 0.01)
op_d = torch.optim.SGD(decoder.parameters(), lr = 0.01)
encoder.eval()
decoder.eval()
z = encoder(x_train)
y_pred = decoder(z)
before_train = criterion(y_pred.squeeze(), y_test)
print('Test loss before training' , before_train.item())
#%%
encoder.train()
decoder.train()
epoch = 10000
for epoch in range(epoch):
op_e.zero_grad()
op_d.zero_grad()
# Forward pass
z = encoder(x_train)
y_pred = decoder(z)
# Compute Loss
loss = criterion(y_pred.squeeze(), y_train)
print('Epoch {}: train loss: {}'.format(epoch, loss.item()))
# Backward pass
loss.backward()
op_e.step()
op_d.step()
#%%
encoder.eval()
decoder.eval()
z = encoder(x_train)
y_pred = decoder(z)
y_pred=y_pred.cpu().detach().numpy()
plt.scatter(data2[:,0], data2[:,1], c='green') # the source distribution
plt.scatter(y_pred[:,0], y_pred[:,1], c='blue') # the predicted target distribution
plt.show() | 0.797083 | 0.626238 |
# Standard Library
from typing import List
from datetime import datetime
# Third Party
from pydantic import StrictInt, StrictStr, StrictBool, constr, root_validator
# Project
from hyperglass.log import log
# Local
from ..main import HyperglassModel
from .serialized import ParsedRoutes
VyOSPeerType = constr(regex=r"(internal|external)")
def _alias_generator(field):
components = field.split("_")
return components[0] + "".join(x.title() for x in components[1:])
class _VyOSBase(HyperglassModel):
class Config:
alias_generator = _alias_generator
extra = "ignore"
class VyOSNextHop(_VyOSBase):
"""VyOS Next Hop Model."""
ip: StrictStr
afi: StrictStr
metric: StrictInt
accessible: StrictBool
used: StrictBool
class VyOSPeer(_VyOSBase):
"""VyOS Peer Model."""
peerId: StrictStr
routerId: StrictStr
type: VyOSPeerType
class VyOSLastUpdate(_VyOSBase):
"""VyOS Last Update Model"""
epoch: StrictInt
string: StrictStr
class VyOSPath(_VyOSBase):
"""VyOS Path Model."""
prefix: StrictStr
aspath: StrictStr
med: StrictInt = 0
localpref: StrictInt = 100
weight: StrictInt = 0
valid: StrictBool
lastUpdate: List[VyOSLastUpdate]
bestpath: StrictBool
community: StrictStr
nexthops: List[VyOSNextHop]
peer: List[VyOSPeer]
@root_validator(pre=True)
def validate_path(cls, values):
"""Extract meaningful data from VyOS response."""
new = values.copy()
new["aspath"] = values["aspath"]["segments"][0]["list"]
new["community"] = values["community"]["list"]
new["lastUpdate"] = values["lastUpdate"]["epoch"]
bestpath = values.get("bestpath", {})
new["bestpath"] = bestpath.get("overall", False)
return new
class VyOSRoute(_VyOSBase):
"""VyOS Route Model."""
prefix: StrictStr
paths: List[VyOSPath] = []
def serialize(self):
"""Convert the VyOS-specific fields to standard parsed data model."""
# TODO: somehow, get the actual VRF
vrf = "default"
vrfs = ['default']
routes = []
for route in self.paths:
now = datetime.utcnow().timestamp()
then = datetime.utcfromtimestamp(route.lastUpdate).timestamp()
age = int(now - then)
routes.append(
{
"prefix": self.prefix,
"active": route.bestpath,
"age": age,
"weight": route.weight,
"med": route.med,
"local_preference": route.localpref,
"as_path": route.aspath,
"communities": route.community,
"next_hop": route.nexthops[0].ip,
# TODO: Revisit the source as situation
"source_as": route.aspath[-1],
"source_rid": '1.1.1.1',
"peer_rid": route.peer.peerId,
# TODO: somehow, get the actual RPKI state
"rpki_state": 3,
}
)
serialized = ParsedRoutes(
vrf=vrf, count=len(routes), routes=routes, winning_weight="high", vrfs=vrfs,
)
log.info("Serialized VyOS response: {}", serialized)
return serialized | hyperglass/models/parsing/vyos.py |
# Standard Library
from typing import List
from datetime import datetime
# Third Party
from pydantic import StrictInt, StrictStr, StrictBool, constr, root_validator
# Project
from hyperglass.log import log
# Local
from ..main import HyperglassModel
from .serialized import ParsedRoutes
VyOSPeerType = constr(regex=r"(internal|external)")
def _alias_generator(field):
components = field.split("_")
return components[0] + "".join(x.title() for x in components[1:])
class _VyOSBase(HyperglassModel):
class Config:
alias_generator = _alias_generator
extra = "ignore"
class VyOSNextHop(_VyOSBase):
"""VyOS Next Hop Model."""
ip: StrictStr
afi: StrictStr
metric: StrictInt
accessible: StrictBool
used: StrictBool
class VyOSPeer(_VyOSBase):
"""VyOS Peer Model."""
peerId: StrictStr
routerId: StrictStr
type: VyOSPeerType
class VyOSLastUpdate(_VyOSBase):
"""VyOS Last Update Model"""
epoch: StrictInt
string: StrictStr
class VyOSPath(_VyOSBase):
"""VyOS Path Model."""
prefix: StrictStr
aspath: StrictStr
med: StrictInt = 0
localpref: StrictInt = 100
weight: StrictInt = 0
valid: StrictBool
lastUpdate: List[VyOSLastUpdate]
bestpath: StrictBool
community: StrictStr
nexthops: List[VyOSNextHop]
peer: List[VyOSPeer]
@root_validator(pre=True)
def validate_path(cls, values):
"""Extract meaningful data from VyOS response."""
new = values.copy()
new["aspath"] = values["aspath"]["segments"][0]["list"]
new["community"] = values["community"]["list"]
new["lastUpdate"] = values["lastUpdate"]["epoch"]
bestpath = values.get("bestpath", {})
new["bestpath"] = bestpath.get("overall", False)
return new
class VyOSRoute(_VyOSBase):
"""VyOS Route Model."""
prefix: StrictStr
paths: List[VyOSPath] = []
def serialize(self):
"""Convert the VyOS-specific fields to standard parsed data model."""
# TODO: somehow, get the actual VRF
vrf = "default"
vrfs = ['default']
routes = []
for route in self.paths:
now = datetime.utcnow().timestamp()
then = datetime.utcfromtimestamp(route.lastUpdate).timestamp()
age = int(now - then)
routes.append(
{
"prefix": self.prefix,
"active": route.bestpath,
"age": age,
"weight": route.weight,
"med": route.med,
"local_preference": route.localpref,
"as_path": route.aspath,
"communities": route.community,
"next_hop": route.nexthops[0].ip,
# TODO: Revisit the source as situation
"source_as": route.aspath[-1],
"source_rid": '1.1.1.1',
"peer_rid": route.peer.peerId,
# TODO: somehow, get the actual RPKI state
"rpki_state": 3,
}
)
serialized = ParsedRoutes(
vrf=vrf, count=len(routes), routes=routes, winning_weight="high", vrfs=vrfs,
)
log.info("Serialized VyOS response: {}", serialized)
return serialized | 0.719876 | 0.300425 |
# Config file description:
# test ECAL sequence: tcc hw input -> tp digi -> tcc hw input
# check consistency of original and created tcc hardware input files
import FWCore.ParameterSet.Config as cms
process = cms.Process("TCCFlat2Flat")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
#-#-# Flat -> Digi [create digis from input (set of) flat file(s)]
process.load("SimCalorimetry.EcalElectronicsEmulation.EcalFEtoDigi_cfi")
#-#-# Digi -> Flat
process.load("SimCalorimetry.EcalElectronicsEmulation.EcalSimRawData_cfi")
#dump digi collections
process.load("L1Trigger.HardwareValidation.L1Comparator_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(2048)
)
process.source = cms.Source("EmptySource")
process.tpparams6 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGLutGroupRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams7 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGLutIdMapRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.EcalTrigPrimESProducer = cms.ESProducer("EcalTrigPrimESProducer",
DatabaseFileEE = cms.untracked.string('TPG_EE.txt'),
#untracked string DatabaseFileEB = "TPG_poweron.txt"//identity tcc lut
DatabaseFileEB = cms.untracked.string('TPG_EB.txt')
)
process.outputEvents = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('ecalFlat2Flat.root')
)
process.p = cms.Path(process.tccFlatToDigi*process.ecalSimRawData*process.l1Compare)
process.outpath = cms.EndPath(process.outputEvents)
process.tccFlatToDigi.SuperModuleId = -1
process.tccFlatToDigi.FlatBaseName = 'data_in/ecal_tcc'
process.tccFlatToDigi.FileEventOffset = 0
process.tccFlatToDigi.UseIdentityLUT = False
process.tccFlatToDigi.debugPrintFlag = False
process.ecalSimRawData.tcc2dccData = False
process.ecalSimRawData.srp2dccData = False
process.ecalSimRawData.fe2dccData = False
process.ecalSimRawData.trigPrimProducer = 'tccFlatToDigi'
process.ecalSimRawData.tcpDigiCollection = 'formatTCP'
process.ecalSimRawData.tpVerbose = False
process.ecalSimRawData.tccInDefaultVal = 0
process.ecalSimRawData.tccNum = -1
process.ecalSimRawData.outputBaseName = 'data_out/ecal'
process.l1Compare.ETP_dataLabel = 'tccFlatToDigi'
process.l1Compare.ETP_emulLabel = 'tccFlatToDigi'
process.l1Compare.DumpFile = 'dump_flat.txt'
process.l1Compare.DumpMode = 1
process.l1Compare.COMPARE_COLLS = [1, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0] | SimCalorimetry/EcalElectronicsEmulation/test/Flat2Flat_cfg.py |
# Config file description:
# test ECAL sequence: tcc hw input -> tp digi -> tcc hw input
# check consistency of original and created tcc hardware input files
import FWCore.ParameterSet.Config as cms
process = cms.Process("TCCFlat2Flat")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
#-#-# Flat -> Digi [create digis from input (set of) flat file(s)]
process.load("SimCalorimetry.EcalElectronicsEmulation.EcalFEtoDigi_cfi")
#-#-# Digi -> Flat
process.load("SimCalorimetry.EcalElectronicsEmulation.EcalSimRawData_cfi")
#dump digi collections
process.load("L1Trigger.HardwareValidation.L1Comparator_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(2048)
)
process.source = cms.Source("EmptySource")
process.tpparams6 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGLutGroupRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.tpparams7 = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGLutIdMapRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
process.EcalTrigPrimESProducer = cms.ESProducer("EcalTrigPrimESProducer",
DatabaseFileEE = cms.untracked.string('TPG_EE.txt'),
#untracked string DatabaseFileEB = "TPG_poweron.txt"//identity tcc lut
DatabaseFileEB = cms.untracked.string('TPG_EB.txt')
)
process.outputEvents = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('ecalFlat2Flat.root')
)
process.p = cms.Path(process.tccFlatToDigi*process.ecalSimRawData*process.l1Compare)
process.outpath = cms.EndPath(process.outputEvents)
process.tccFlatToDigi.SuperModuleId = -1
process.tccFlatToDigi.FlatBaseName = 'data_in/ecal_tcc'
process.tccFlatToDigi.FileEventOffset = 0
process.tccFlatToDigi.UseIdentityLUT = False
process.tccFlatToDigi.debugPrintFlag = False
process.ecalSimRawData.tcc2dccData = False
process.ecalSimRawData.srp2dccData = False
process.ecalSimRawData.fe2dccData = False
process.ecalSimRawData.trigPrimProducer = 'tccFlatToDigi'
process.ecalSimRawData.tcpDigiCollection = 'formatTCP'
process.ecalSimRawData.tpVerbose = False
process.ecalSimRawData.tccInDefaultVal = 0
process.ecalSimRawData.tccNum = -1
process.ecalSimRawData.outputBaseName = 'data_out/ecal'
process.l1Compare.ETP_dataLabel = 'tccFlatToDigi'
process.l1Compare.ETP_emulLabel = 'tccFlatToDigi'
process.l1Compare.DumpFile = 'dump_flat.txt'
process.l1Compare.DumpMode = 1
process.l1Compare.COMPARE_COLLS = [1, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0] | 0.252292 | 0.223589 |
import _nx
import warnings
from .utils import bit, cached_property
AUTO_PLAYER_1_ID = 10
def refresh_inputs():
"""Refreshes inputs.
Should normally be called at least once
within every iteration of your main loop.
"""
_nx.hid_scan_input()
def _determine_controller_type(player):
# TODO determine the type of the controller for this player via _nx
return DualJoyconController
class Controller:
"""
Represents an abstract controller.
:attribute: player
:type: Player
The player to whom the Controller belongs to.
:attribute: a_button
:type: Button
The A button of the controller.
:attribute: b_button
:type: Button
The B button of the controller.
:attribute: x_button
:type: Button
The X button of the controller.
:attribute: y_button
:type: Button
The Y button of the controller.
"""
def __init__(self, player):
self.player = player
self.a_button = Button(self.player, bit(0))
self.b_button = Button(self.player, bit(1))
self.x_button = Button(self.player, bit(2))
self.y_button = Button(self.player, bit(3))
@staticmethod
def from_player(player):
"""
<todo>
:param player: <todo>
:returns: <todo> Controller class
:rtype: Controller
"""
controller_class = _determine_controller_type(player)
return controller_class(player)
class JoyconController(Controller):
"""
Represents a single Joycon controller.
:attribute: is_left
:type: bool
Whether the JoyconController is the left or right Joy-Con.
:attribute: parent
:type: <todo>
The parent Controller of the Joy-Con.
:attribute: stick_button
:type: Button
The button located in the analogue stick, when it is pressed.
:attribute: l_or_r_button
:type: Button
Either the L or R button on the controller, dependent on which Joy-Con.
:attribute: zl_or_zr_button
:type: Button
Either the ZL or ZR button on the controller, dependent on which Joy-Con.
:attribute: plus_or_minus_button
:type: Button
Either the + or - button on the controller, dependent on which Joy-Con.
:attribute: stick
:type: Stick
The analogue stick of the controller.
:attribute: left
:type: Button
The analogue stick in the left position.
:attribute: right
:type: Button
The analogue stick in the right position.
:attribute: up
:type: Button
The analogue stick in the up position.
:attribute: down
:type: Button
The analogue stick in the down position.
"""
def __init__(self, player, is_left, parent=None):
super().__init__(player)
self.is_left = is_left
self.parent = parent
if is_left:
self.stick_button = Button(self.player, bit(4))
self.l_or_r_button = Button(self.player, bit(6))
self.zl_or_zr_button = Button(self.player, bit(8))
self.plus_or_minus_button = Button(self.player, bit(11))
self.stick = Stick(self.player, is_left=True)
else:
self.stick_button = Button(self.player, bit(5))
self.l_or_r_button = Button(self.player, bit(7))
self.zl_or_zr_button = Button(self.player, bit(9))
self.plus_or_minus_button = Button(self.player, bit(10))
self.stick = Stick(self.player, is_left=False)
self.left = Button(player, self.stick.left_key_bit)
self.right = Button(player, self.stick.right_key_bit)
self.up = Button(player, self.stick.up_key_bit)
self.down = Button(player, self.stick.down_key_bit)
@cached_property
def sl_button(self):
if self.parent is not None and self.parent.is_attached:
return None
return Button(self.player, bit(24))
@cached_property
def sr_button(self):
if self.parent is not None and self.parent.is_attached:
return None
return Button(self.player, bit(25))
class StandardController(Controller):
def __init__(self, player):
super().__init__(player)
self.left_stick_button = Button(self.player, bit(4))
self.right_stick_button = Button(self.player, bit(5))
self.l_button = Button(self.player, bit(6))
self.r_button = Button(self.player, bit(7))
self.zl_button = Button(self.player, bit(8))
self.zr_button = Button(self.player, bit(9))
self.plus_button = Button(self.player, bit(10))
self.minus_button = Button(self.player, bit(11))
self.left_button = Button(self.player, bit(12))
self.up_button = Button(self.player, bit(13))
self.right_button = Button(self.player, bit(14))
self.down_button = Button(self.player, bit(15))
self.left_stick = Stick(self.player, is_left=True)
self.right_stick = Stick(self.player, is_left=False)
self.stick = self.left_stick
self.left = Button(player, self.stick.left_key_bit, self.left_button.key_bits[0])
self.right = Button(player, self.stick.right_key_bit, self.right_button.key_bits[0])
self.up = Button(player, self.stick.up_key_bit, self.up_button.key_bits[0])
self.down = Button(player, self.stick.down_key_bit, self.down_button.key_bits[0])
class SwitchProController(StandardController):
"""Represents a Switch Pro Controller.
Can also be a similar controller with the same buttons.
"""
pass
class DualJoyconController(StandardController):
"""Represents two Joy-Cons in combination, attached to rails"""
is_attached = True
def __init__(self, player):
super().__init__(player)
self.left_joycon = JoyconController(player, is_left=True, parent=self)
self.right_joycon = JoyconController(player, is_left=False, parent=self)
class FreeDualJoyconController(DualJoyconController):
"""Represents two Joy-Cons in combination, detached from rails"""
is_attached = False
class Button:
"""Represents a button or button-like object."""
def __init__(self, player, *key_bits):
self.player = player
self.key_bits = key_bits
@property
def is_pressed(self):
"""Indicates whether the Button is pressed."""
return any_pressed(self.player, self)
def __eq__(self, other):
if not isinstance(other, Button):
raise TypeError("Can only compare a Button to another Button")
return self.key_bits == other.key_bits
class ButtonGroup(Button):
"""Represents a group of :class:`Button` objects."""
def __init__(self, *buttons):
if not buttons:
raise TypeError("At least one Button must be passed")
key_bits = [key_bit for button in buttons for key_bit in button.key_bits]
super().__init__(buttons[0].player, *key_bits)
self.buttons = buttons
@property
def pressed(self):
return which_pressed(self.player, *self.buttons)
class Stick:
"""Represents the analogue stick on the controller."""
def __init__(self, player, is_left):
self.player = player
self.is_left = is_left
if is_left:
self.left_key_bit = bit(16)
self.right_key_bit = bit(18)
self.up_key_bit = bit(17)
self.down_key_bit = bit(19)
else:
self.left_key_bit = bit(20)
self.right_key_bit = bit(22)
self.up_key_bit = bit(21)
self.down_key_bit = bit(23)
@property
def left(self):
"""
:return: A value indicating whether or not the stick is in the left position
:rtype: bool
"""
return self.x < 0.0
@property
def right(self):
"""
:return: A value indicating whether or not the stick is in the right position
:rtype: bool
"""
return self.x > 0.0
@property
def up(self):
"""
:return: A value indicating whether or not the stick is in the up position
:rtype: bool
"""
return self.y > 0.0
@property
def down(self):
"""
:return: A value indicating whether or not the stick is in the down position
:rtype: bool
"""
return self.y < 0.0
@property
def x(self):
"""
The current x value of the analogue stick
:return: The float value of the stick's x location.
:rtype: float
"""
keys_pressed = _nx.hid_keys_down(self.player.number - 1 if self.player.number != 1 else AUTO_PLAYER_1_ID)
if keys_pressed & self.left_key_bit:
return -1.0
if keys_pressed & self.right_key_bit:
return 1.0
return 0.0
@property
def y(self):
"""
The current y value of the analogue stick
:return: The float value of the stick's y location.
:rtype: float
"""
keys_pressed = _nx.hid_keys_down(self.player.number - 1 if self.player.number != 1 else AUTO_PLAYER_1_ID)
if keys_pressed & self.up_key_bit:
return 1.0
if keys_pressed & self.down_key_bit:
return -1.0
return 0.0
def any_pressed(player, *buttons: Button, refresh_input=False):
"""Checks if any of the given buttons are pressed, or if
any buttons are pressed at all in case no buttons are given.
Parameters
----------
player: :class:`Player`
The player to check with.
buttons: Optional[one or more :class:`Button` objects OR Tuple[Button]]
Buttons to check for. Checks if no Button is pressed if none given.
refresh_input: Optional[bool]
Whether or not to check for new inputs.
Checks with inputs from last refresh if False.
Defaults to False.
"""
if refresh_input:
refresh_inputs()
keys_pressed = _nx.hid_keys_down(player.number - 1 if player.number != 1 else AUTO_PLAYER_1_ID)
if len(buttons) == 0:
return not keys_pressed == 0
for button in buttons:
for key_bit in button.key_bits:
if keys_pressed & key_bit:
return True
return False
def is_pressed(player, button: Button, refresh_input=False):
"""Checks if any of the given buttons are pressed, or if
any buttons are pressed at all in case no buttons are given.
Parameters
----------
player: :class:`Player`
The player to check with.
button: :class:`Button`
Button to check for.
refresh_input: Optional[bool]
Whether or not to check for new inputs.
Checks with inputs from last refresh if False.
Defaults to False.
"""
return any_pressed(player, button, refresh_input=refresh_input)
def which_pressed(player, *buttons: Button, refresh_input=False):
"""Checks which of the given buttons are pressed.
Parameters
----------
player: :class:`Player`
The player to check with.
buttons: one or more :class:`Button` objects OR Tuple[Button]
Buttons to check for.
refresh_input: Optional[bool]
Whether or not to check for new inputs.
Checks with inputs from last refresh if False.
Defaults to False.
Returns
-------
A list of :class:`Button` objects.
"""
if refresh_input:
_nx.hid_scan_input()
keys_pressed = _nx.hid_keys_down(player.number - 1 if player.number != 1 else AUTO_PLAYER_1_ID)
if not buttons:
raise TypeError("At least one Button must be passed")
if refresh_input:
refresh_inputs()
keys_pressed = _nx.hid_keys_down(player.number - 1 if player.number != 1 else AUTO_PLAYER_1_ID)
buttons_pressed = []
for button in buttons:
for key_bit in button.key_bits:
if keys_pressed & key_bit:
buttons_pressed.append(button)
return buttons_pressed | lib/python3.5/nx/controllers.py | import _nx
import warnings
from .utils import bit, cached_property
AUTO_PLAYER_1_ID = 10
def refresh_inputs():
"""Refreshes inputs.
Should normally be called at least once
within every iteration of your main loop.
"""
_nx.hid_scan_input()
def _determine_controller_type(player):
# TODO determine the type of the controller for this player via _nx
return DualJoyconController
class Controller:
"""
Represents an abstract controller.
:attribute: player
:type: Player
The player to whom the Controller belongs to.
:attribute: a_button
:type: Button
The A button of the controller.
:attribute: b_button
:type: Button
The B button of the controller.
:attribute: x_button
:type: Button
The X button of the controller.
:attribute: y_button
:type: Button
The Y button of the controller.
"""
def __init__(self, player):
self.player = player
self.a_button = Button(self.player, bit(0))
self.b_button = Button(self.player, bit(1))
self.x_button = Button(self.player, bit(2))
self.y_button = Button(self.player, bit(3))
@staticmethod
def from_player(player):
"""
<todo>
:param player: <todo>
:returns: <todo> Controller class
:rtype: Controller
"""
controller_class = _determine_controller_type(player)
return controller_class(player)
class JoyconController(Controller):
"""
Represents a single Joycon controller.
:attribute: is_left
:type: bool
Whether the JoyconController is the left or right Joy-Con.
:attribute: parent
:type: <todo>
The parent Controller of the Joy-Con.
:attribute: stick_button
:type: Button
The button located in the analogue stick, when it is pressed.
:attribute: l_or_r_button
:type: Button
Either the L or R button on the controller, dependent on which Joy-Con.
:attribute: zl_or_zr_button
:type: Button
Either the ZL or ZR button on the controller, dependent on which Joy-Con.
:attribute: plus_or_minus_button
:type: Button
Either the + or - button on the controller, dependent on which Joy-Con.
:attribute: stick
:type: Stick
The analogue stick of the controller.
:attribute: left
:type: Button
The analogue stick in the left position.
:attribute: right
:type: Button
The analogue stick in the right position.
:attribute: up
:type: Button
The analogue stick in the up position.
:attribute: down
:type: Button
The analogue stick in the down position.
"""
def __init__(self, player, is_left, parent=None):
super().__init__(player)
self.is_left = is_left
self.parent = parent
if is_left:
self.stick_button = Button(self.player, bit(4))
self.l_or_r_button = Button(self.player, bit(6))
self.zl_or_zr_button = Button(self.player, bit(8))
self.plus_or_minus_button = Button(self.player, bit(11))
self.stick = Stick(self.player, is_left=True)
else:
self.stick_button = Button(self.player, bit(5))
self.l_or_r_button = Button(self.player, bit(7))
self.zl_or_zr_button = Button(self.player, bit(9))
self.plus_or_minus_button = Button(self.player, bit(10))
self.stick = Stick(self.player, is_left=False)
self.left = Button(player, self.stick.left_key_bit)
self.right = Button(player, self.stick.right_key_bit)
self.up = Button(player, self.stick.up_key_bit)
self.down = Button(player, self.stick.down_key_bit)
@cached_property
def sl_button(self):
if self.parent is not None and self.parent.is_attached:
return None
return Button(self.player, bit(24))
@cached_property
def sr_button(self):
if self.parent is not None and self.parent.is_attached:
return None
return Button(self.player, bit(25))
class StandardController(Controller):
def __init__(self, player):
super().__init__(player)
self.left_stick_button = Button(self.player, bit(4))
self.right_stick_button = Button(self.player, bit(5))
self.l_button = Button(self.player, bit(6))
self.r_button = Button(self.player, bit(7))
self.zl_button = Button(self.player, bit(8))
self.zr_button = Button(self.player, bit(9))
self.plus_button = Button(self.player, bit(10))
self.minus_button = Button(self.player, bit(11))
self.left_button = Button(self.player, bit(12))
self.up_button = Button(self.player, bit(13))
self.right_button = Button(self.player, bit(14))
self.down_button = Button(self.player, bit(15))
self.left_stick = Stick(self.player, is_left=True)
self.right_stick = Stick(self.player, is_left=False)
self.stick = self.left_stick
self.left = Button(player, self.stick.left_key_bit, self.left_button.key_bits[0])
self.right = Button(player, self.stick.right_key_bit, self.right_button.key_bits[0])
self.up = Button(player, self.stick.up_key_bit, self.up_button.key_bits[0])
self.down = Button(player, self.stick.down_key_bit, self.down_button.key_bits[0])
class SwitchProController(StandardController):
"""Represents a Switch Pro Controller.
Can also be a similar controller with the same buttons.
"""
pass
class DualJoyconController(StandardController):
"""Represents two Joy-Cons in combination, attached to rails"""
is_attached = True
def __init__(self, player):
super().__init__(player)
self.left_joycon = JoyconController(player, is_left=True, parent=self)
self.right_joycon = JoyconController(player, is_left=False, parent=self)
class FreeDualJoyconController(DualJoyconController):
"""Represents two Joy-Cons in combination, detached from rails"""
is_attached = False
class Button:
"""Represents a button or button-like object."""
def __init__(self, player, *key_bits):
self.player = player
self.key_bits = key_bits
@property
def is_pressed(self):
"""Indicates whether the Button is pressed."""
return any_pressed(self.player, self)
def __eq__(self, other):
if not isinstance(other, Button):
raise TypeError("Can only compare a Button to another Button")
return self.key_bits == other.key_bits
class ButtonGroup(Button):
"""Represents a group of :class:`Button` objects."""
def __init__(self, *buttons):
if not buttons:
raise TypeError("At least one Button must be passed")
key_bits = [key_bit for button in buttons for key_bit in button.key_bits]
super().__init__(buttons[0].player, *key_bits)
self.buttons = buttons
@property
def pressed(self):
return which_pressed(self.player, *self.buttons)
class Stick:
"""Represents the analogue stick on the controller."""
def __init__(self, player, is_left):
self.player = player
self.is_left = is_left
if is_left:
self.left_key_bit = bit(16)
self.right_key_bit = bit(18)
self.up_key_bit = bit(17)
self.down_key_bit = bit(19)
else:
self.left_key_bit = bit(20)
self.right_key_bit = bit(22)
self.up_key_bit = bit(21)
self.down_key_bit = bit(23)
@property
def left(self):
"""
:return: A value indicating whether or not the stick is in the left position
:rtype: bool
"""
return self.x < 0.0
@property
def right(self):
"""
:return: A value indicating whether or not the stick is in the right position
:rtype: bool
"""
return self.x > 0.0
@property
def up(self):
"""
:return: A value indicating whether or not the stick is in the up position
:rtype: bool
"""
return self.y > 0.0
@property
def down(self):
"""
:return: A value indicating whether or not the stick is in the down position
:rtype: bool
"""
return self.y < 0.0
@property
def x(self):
"""
The current x value of the analogue stick
:return: The float value of the stick's x location.
:rtype: float
"""
keys_pressed = _nx.hid_keys_down(self.player.number - 1 if self.player.number != 1 else AUTO_PLAYER_1_ID)
if keys_pressed & self.left_key_bit:
return -1.0
if keys_pressed & self.right_key_bit:
return 1.0
return 0.0
@property
def y(self):
"""
The current y value of the analogue stick
:return: The float value of the stick's y location.
:rtype: float
"""
keys_pressed = _nx.hid_keys_down(self.player.number - 1 if self.player.number != 1 else AUTO_PLAYER_1_ID)
if keys_pressed & self.up_key_bit:
return 1.0
if keys_pressed & self.down_key_bit:
return -1.0
return 0.0
def any_pressed(player, *buttons: Button, refresh_input=False):
"""Checks if any of the given buttons are pressed, or if
any buttons are pressed at all in case no buttons are given.
Parameters
----------
player: :class:`Player`
The player to check with.
buttons: Optional[one or more :class:`Button` objects OR Tuple[Button]]
Buttons to check for. Checks if no Button is pressed if none given.
refresh_input: Optional[bool]
Whether or not to check for new inputs.
Checks with inputs from last refresh if False.
Defaults to False.
"""
if refresh_input:
refresh_inputs()
keys_pressed = _nx.hid_keys_down(player.number - 1 if player.number != 1 else AUTO_PLAYER_1_ID)
if len(buttons) == 0:
return not keys_pressed == 0
for button in buttons:
for key_bit in button.key_bits:
if keys_pressed & key_bit:
return True
return False
def is_pressed(player, button: Button, refresh_input=False):
"""Checks if any of the given buttons are pressed, or if
any buttons are pressed at all in case no buttons are given.
Parameters
----------
player: :class:`Player`
The player to check with.
button: :class:`Button`
Button to check for.
refresh_input: Optional[bool]
Whether or not to check for new inputs.
Checks with inputs from last refresh if False.
Defaults to False.
"""
return any_pressed(player, button, refresh_input=refresh_input)
def which_pressed(player, *buttons: Button, refresh_input=False):
"""Checks which of the given buttons are pressed.
Parameters
----------
player: :class:`Player`
The player to check with.
buttons: one or more :class:`Button` objects OR Tuple[Button]
Buttons to check for.
refresh_input: Optional[bool]
Whether or not to check for new inputs.
Checks with inputs from last refresh if False.
Defaults to False.
Returns
-------
A list of :class:`Button` objects.
"""
if refresh_input:
_nx.hid_scan_input()
keys_pressed = _nx.hid_keys_down(player.number - 1 if player.number != 1 else AUTO_PLAYER_1_ID)
if not buttons:
raise TypeError("At least one Button must be passed")
if refresh_input:
refresh_inputs()
keys_pressed = _nx.hid_keys_down(player.number - 1 if player.number != 1 else AUTO_PLAYER_1_ID)
buttons_pressed = []
for button in buttons:
for key_bit in button.key_bits:
if keys_pressed & key_bit:
buttons_pressed.append(button)
return buttons_pressed | 0.592313 | 0.44571 |
import re, os
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
def find_entities_in_tables(dirctory_path: str, file_list, entities: dict):
data_frame = []
columns= ['DOI', 'Tab_nr', 'Tab_text'] + [key for key in entities.keys()]
count = 0
for file_name in file_list:
with open(os.path.join(dirctory_path, file_name)) as file:
soup = BeautifulSoup(file)
tables = []
tables.extend(soup.find_all("table-wrap"))
tables.extend(soup.find_all("ce:table"))
doi = file_name.replace('%', '/').replace('.xml', '')
print(f'File now being searched: {doi}')
for table in tables:
instance = [doi]
try:
instance.append(table.find("ce:label").getText())
except:
try:
instance.append(table.find("label").getText())
except:
instance.append(np.NaN)
try:
instance.append(table.find("ce:simple-para").getText())
except:
try:
instance.append(table.find("p").getText())
except:
instance.append(np.NaN)
table_string = str(table.text)
for expression in expressions.values():
instance.append(len(re.findall(expression, table_string)))
data_frame.append(instance)
count += 1
print(f'file {count} of {len(file_list)} processed')
df = pd.DataFrame(data_frame, columns=columns)
df.to_excel("Entities_in_tables.xlsx")
file_list = pd.read_excel("Files_classifications.xlsx")
src = "/XML directory/path"
expressions = {
'316SS': 'SS\s*[-]?\s*316|316\w*\s*[-]?\s*SS|316\w*\s*[-]?\s*(stainless)?\s*[-]?\s*steel|(stainless)?\s*[-]?\s*steel\s*[-]?\s*316|AISI\s*[-]?\s*\w*\s*[-]?\s*316',
'HT9': '(?:HT|Ht|ht)\s*[-]?\s*9',
'Zr4': '([Zz]ircal{1,2}oy|[Zz]r)\s*[-]?\s*4',
'Dose': '\b[Dd]ose\b|\bdpa\b|\bDPA\b|n\/cm\^*(<sup loc="post">)*2|[Nn]*(eutron)*\s*[-]?\s*([Ff]luence|[Ff]lux)|[Ff]luence|[Ff]lux',
'Temp': '[Tt]emperature|[Tt]emp\.*|°C|°F|\bT\b|\bK\b',
'YS': '[Yy]ield[–]?\s*[-]?\s*[Ss](?:trength|tress)|\bYS\b',
'UTS': '(?:[Uu]ltimate\s*[-]?\s*(?:[Tt]ensile)*|[Tt]ensile)\s*[-]?\s*[Ss](?:trength|tress)|\bUTS\b|\bTS\b',
'HV': '[Hh]ardness|[Hh]ardening|\bHV\b|\bHRC?\b',
'UE': '[Uu]niform\s*[-]?\s*(?:[Pp]lastic)?\s*[-]?\s*(?:[Ee]longation|[Ss]train)|\bUE\b',
'TE': '(?:[Tt]otal|[Uu]ltimate)\s*[-]?\s*(?:[Pp]lastic)?\s*[-]?\s*(?:[Ee]longation|[Ss]train)|(?:[Ee]longation|[Ss]train)\s*[-]?\s*at\s*[-]?\s*(?:[Bb]reak|[Ff]racture)|\bTE\b',
'F_tough': '[Ff]racture\s*[-]?\s*[Tt]oughness|[Jj]\s*[-]?\s*[Ii]ntegral|k?J\/m\^*(<sup loc="post">)*2|MPa\s?.?\s?m\^*(<sup loc="post">)*1\/2|[Ff]racture',
'Embrit': '(?:[Dd]uctile\s*[-]?\s*[Bb]rittle)?\s*[-]?\s*[Tt]ransition\s*[-]?\s*(?:[Tt]emperature|[Tt]emp\.*|\bΔT\b|\bTT\b)|(?:[Uu]pper\s*[-]?\s*[Ss]helf\s*[-]?\s*[Ee]nergy|USE)\s*[-]?\s*[Ss]hift|\bΔ?USE\b|[Ee]mbrittlement|[Ii]mpact\s*[-]?\s*[Tt]est|Charpy',
'Creep': '[Cc]reep\s*[-]?\s*(?:[Rr]ate|[Ss]train|[Cc]ompliance)|[Tt]ensile\s*[-]?\s*[Ss]train|[Ss]train\s*[-]?\s*[Rr]ate|[Cc]reep',
'Swelling': '[Ss]welling\s*[-]?\s*[Rr]ate|[Ss]welling'
}
find_entities_in_tables(src, file_list.values, expressions) | Python code/records_tables.py | import re, os
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
def find_entities_in_tables(dirctory_path: str, file_list, entities: dict):
data_frame = []
columns= ['DOI', 'Tab_nr', 'Tab_text'] + [key for key in entities.keys()]
count = 0
for file_name in file_list:
with open(os.path.join(dirctory_path, file_name)) as file:
soup = BeautifulSoup(file)
tables = []
tables.extend(soup.find_all("table-wrap"))
tables.extend(soup.find_all("ce:table"))
doi = file_name.replace('%', '/').replace('.xml', '')
print(f'File now being searched: {doi}')
for table in tables:
instance = [doi]
try:
instance.append(table.find("ce:label").getText())
except:
try:
instance.append(table.find("label").getText())
except:
instance.append(np.NaN)
try:
instance.append(table.find("ce:simple-para").getText())
except:
try:
instance.append(table.find("p").getText())
except:
instance.append(np.NaN)
table_string = str(table.text)
for expression in expressions.values():
instance.append(len(re.findall(expression, table_string)))
data_frame.append(instance)
count += 1
print(f'file {count} of {len(file_list)} processed')
df = pd.DataFrame(data_frame, columns=columns)
df.to_excel("Entities_in_tables.xlsx")
file_list = pd.read_excel("Files_classifications.xlsx")
src = "/XML directory/path"
expressions = {
'316SS': 'SS\s*[-]?\s*316|316\w*\s*[-]?\s*SS|316\w*\s*[-]?\s*(stainless)?\s*[-]?\s*steel|(stainless)?\s*[-]?\s*steel\s*[-]?\s*316|AISI\s*[-]?\s*\w*\s*[-]?\s*316',
'HT9': '(?:HT|Ht|ht)\s*[-]?\s*9',
'Zr4': '([Zz]ircal{1,2}oy|[Zz]r)\s*[-]?\s*4',
'Dose': '\b[Dd]ose\b|\bdpa\b|\bDPA\b|n\/cm\^*(<sup loc="post">)*2|[Nn]*(eutron)*\s*[-]?\s*([Ff]luence|[Ff]lux)|[Ff]luence|[Ff]lux',
'Temp': '[Tt]emperature|[Tt]emp\.*|°C|°F|\bT\b|\bK\b',
'YS': '[Yy]ield[–]?\s*[-]?\s*[Ss](?:trength|tress)|\bYS\b',
'UTS': '(?:[Uu]ltimate\s*[-]?\s*(?:[Tt]ensile)*|[Tt]ensile)\s*[-]?\s*[Ss](?:trength|tress)|\bUTS\b|\bTS\b',
'HV': '[Hh]ardness|[Hh]ardening|\bHV\b|\bHRC?\b',
'UE': '[Uu]niform\s*[-]?\s*(?:[Pp]lastic)?\s*[-]?\s*(?:[Ee]longation|[Ss]train)|\bUE\b',
'TE': '(?:[Tt]otal|[Uu]ltimate)\s*[-]?\s*(?:[Pp]lastic)?\s*[-]?\s*(?:[Ee]longation|[Ss]train)|(?:[Ee]longation|[Ss]train)\s*[-]?\s*at\s*[-]?\s*(?:[Bb]reak|[Ff]racture)|\bTE\b',
'F_tough': '[Ff]racture\s*[-]?\s*[Tt]oughness|[Jj]\s*[-]?\s*[Ii]ntegral|k?J\/m\^*(<sup loc="post">)*2|MPa\s?.?\s?m\^*(<sup loc="post">)*1\/2|[Ff]racture',
'Embrit': '(?:[Dd]uctile\s*[-]?\s*[Bb]rittle)?\s*[-]?\s*[Tt]ransition\s*[-]?\s*(?:[Tt]emperature|[Tt]emp\.*|\bΔT\b|\bTT\b)|(?:[Uu]pper\s*[-]?\s*[Ss]helf\s*[-]?\s*[Ee]nergy|USE)\s*[-]?\s*[Ss]hift|\bΔ?USE\b|[Ee]mbrittlement|[Ii]mpact\s*[-]?\s*[Tt]est|Charpy',
'Creep': '[Cc]reep\s*[-]?\s*(?:[Rr]ate|[Ss]train|[Cc]ompliance)|[Tt]ensile\s*[-]?\s*[Ss]train|[Ss]train\s*[-]?\s*[Rr]ate|[Cc]reep',
'Swelling': '[Ss]welling\s*[-]?\s*[Rr]ate|[Ss]welling'
}
find_entities_in_tables(src, file_list.values, expressions) | 0.118181 | 0.291006 |
from yalexs.activity import (
ACTION_BRIDGE_OFFLINE,
ACTION_BRIDGE_ONLINE,
ACTION_DOOR_CLOSED,
ACTION_DOOR_OPEN,
ACTION_DOORBELL_BUTTON_PUSHED,
ACTION_DOORBELL_IMAGE_CAPTURE,
ACTION_DOORBELL_MOTION_DETECTED,
ACTION_LOCK_JAMMED,
ACTION_LOCK_LOCK,
ACTION_LOCK_LOCKING,
ACTION_LOCK_UNLOCK,
ACTION_LOCK_UNLOCKING,
SOURCE_PUBNUB,
)
from yalexs.api_common import _activity_from_dict
from yalexs.doorbell import DOORBELL_STATUS_KEY, DoorbellDetail
from yalexs.lock import (
DOOR_STATE_KEY,
LOCK_STATUS_KEY,
LockDetail,
LockDoorStatus,
LockStatus,
determine_door_state,
determine_lock_status,
)
def activities_from_pubnub_message(device, date_time, message):
"""Create activities from pubnub."""
activities = []
activity_dict = {
"deviceID": device.device_id,
"house": device.house_id,
"dateTime": date_time.timestamp() * 1000,
"deviceName": device.device_name,
}
if isinstance(device, LockDetail):
activity_dict["deviceType"] = "lock"
activity_dict["info"] = message.get("info", {})
if "remoteEvent" in message:
activity_dict["info"]["remote"] = True
if LOCK_STATUS_KEY in message:
status = message[LOCK_STATUS_KEY]
if status == ACTION_BRIDGE_ONLINE:
_add_activity(activities, activity_dict, ACTION_BRIDGE_ONLINE)
elif status == ACTION_BRIDGE_OFFLINE:
_add_activity(activities, activity_dict, ACTION_BRIDGE_OFFLINE)
lock_status = determine_lock_status(status)
if lock_status == LockStatus.LOCKED:
_add_activity(activities, activity_dict, ACTION_LOCK_LOCK)
elif lock_status == LockStatus.UNLOCKED:
_add_activity(activities, activity_dict, ACTION_LOCK_UNLOCK)
elif lock_status == LockStatus.LOCKING:
_add_activity(activities, activity_dict, ACTION_LOCK_LOCKING)
elif lock_status == LockStatus.UNLOCKING:
_add_activity(activities, activity_dict, ACTION_LOCK_UNLOCKING)
elif lock_status == LockStatus.JAMMED:
_add_activity(activities, activity_dict, ACTION_LOCK_JAMMED)
if DOOR_STATE_KEY in message:
door_state = determine_door_state(message[DOOR_STATE_KEY])
if door_state == LockDoorStatus.OPEN:
_add_activity(activities, activity_dict, ACTION_DOOR_OPEN)
elif door_state == LockDoorStatus.CLOSED:
_add_activity(activities, activity_dict, ACTION_DOOR_CLOSED)
elif isinstance(device, DoorbellDetail):
activity_dict["deviceType"] = "doorbell"
info = activity_dict["info"] = message.get("data", {})
info.setdefault("image", info.get("result", {}))
info.setdefault("started", activity_dict["dateTime"])
info.setdefault("ended", activity_dict["dateTime"])
if DOORBELL_STATUS_KEY in message:
status = message[DOORBELL_STATUS_KEY]
if status in (
ACTION_DOORBELL_MOTION_DETECTED,
ACTION_DOORBELL_IMAGE_CAPTURE,
ACTION_DOORBELL_BUTTON_PUSHED,
):
_add_activity(activities, activity_dict, status)
return activities
def _add_activity(activities, activity_dict, action):
activity_dict = activity_dict.copy()
activity_dict["action"] = action
activities.append(_activity_from_dict(SOURCE_PUBNUB, activity_dict)) | yalexs/pubnub_activity.py | from yalexs.activity import (
ACTION_BRIDGE_OFFLINE,
ACTION_BRIDGE_ONLINE,
ACTION_DOOR_CLOSED,
ACTION_DOOR_OPEN,
ACTION_DOORBELL_BUTTON_PUSHED,
ACTION_DOORBELL_IMAGE_CAPTURE,
ACTION_DOORBELL_MOTION_DETECTED,
ACTION_LOCK_JAMMED,
ACTION_LOCK_LOCK,
ACTION_LOCK_LOCKING,
ACTION_LOCK_UNLOCK,
ACTION_LOCK_UNLOCKING,
SOURCE_PUBNUB,
)
from yalexs.api_common import _activity_from_dict
from yalexs.doorbell import DOORBELL_STATUS_KEY, DoorbellDetail
from yalexs.lock import (
DOOR_STATE_KEY,
LOCK_STATUS_KEY,
LockDetail,
LockDoorStatus,
LockStatus,
determine_door_state,
determine_lock_status,
)
def activities_from_pubnub_message(device, date_time, message):
"""Create activities from pubnub."""
activities = []
activity_dict = {
"deviceID": device.device_id,
"house": device.house_id,
"dateTime": date_time.timestamp() * 1000,
"deviceName": device.device_name,
}
if isinstance(device, LockDetail):
activity_dict["deviceType"] = "lock"
activity_dict["info"] = message.get("info", {})
if "remoteEvent" in message:
activity_dict["info"]["remote"] = True
if LOCK_STATUS_KEY in message:
status = message[LOCK_STATUS_KEY]
if status == ACTION_BRIDGE_ONLINE:
_add_activity(activities, activity_dict, ACTION_BRIDGE_ONLINE)
elif status == ACTION_BRIDGE_OFFLINE:
_add_activity(activities, activity_dict, ACTION_BRIDGE_OFFLINE)
lock_status = determine_lock_status(status)
if lock_status == LockStatus.LOCKED:
_add_activity(activities, activity_dict, ACTION_LOCK_LOCK)
elif lock_status == LockStatus.UNLOCKED:
_add_activity(activities, activity_dict, ACTION_LOCK_UNLOCK)
elif lock_status == LockStatus.LOCKING:
_add_activity(activities, activity_dict, ACTION_LOCK_LOCKING)
elif lock_status == LockStatus.UNLOCKING:
_add_activity(activities, activity_dict, ACTION_LOCK_UNLOCKING)
elif lock_status == LockStatus.JAMMED:
_add_activity(activities, activity_dict, ACTION_LOCK_JAMMED)
if DOOR_STATE_KEY in message:
door_state = determine_door_state(message[DOOR_STATE_KEY])
if door_state == LockDoorStatus.OPEN:
_add_activity(activities, activity_dict, ACTION_DOOR_OPEN)
elif door_state == LockDoorStatus.CLOSED:
_add_activity(activities, activity_dict, ACTION_DOOR_CLOSED)
elif isinstance(device, DoorbellDetail):
activity_dict["deviceType"] = "doorbell"
info = activity_dict["info"] = message.get("data", {})
info.setdefault("image", info.get("result", {}))
info.setdefault("started", activity_dict["dateTime"])
info.setdefault("ended", activity_dict["dateTime"])
if DOORBELL_STATUS_KEY in message:
status = message[DOORBELL_STATUS_KEY]
if status in (
ACTION_DOORBELL_MOTION_DETECTED,
ACTION_DOORBELL_IMAGE_CAPTURE,
ACTION_DOORBELL_BUTTON_PUSHED,
):
_add_activity(activities, activity_dict, status)
return activities
def _add_activity(activities, activity_dict, action):
activity_dict = activity_dict.copy()
activity_dict["action"] = action
activities.append(_activity_from_dict(SOURCE_PUBNUB, activity_dict)) | 0.439988 | 0.082365 |
import collections.abc
import copy
import os
import re
from functools import wraps
from werkzeug.datastructures import MultiDict
from flask import current_app, g, flash, abort
from flask_login import current_user
from .paths import ShellDirectory
def read_rules_file(rule_file):
"""Generate (key, value) tuples from the rules file.
:param rule_file: The ``rules`` file to read from.
:type rule_file: str
"""
lines = rule_file.readlines()
good_lines = list(filter(lambda line: len(line) > 2, lines))
sorted_lines = sorted(good_lines,
key=lambda line: re.match(r"^(\w+)\=", line).group(1))
for line in sorted_lines:
pair = re.search(r"(\w*)\=([/\.\w]*)", line)
yield (pair.group(1), pair.group(2))
def enforce_mapped(mapped_dirs, requested_path, for_upload=False):
"""Enforce the rules from the rules file on requested_path.
:param mapped_dirs: A collection of mapped directories.
:type mapped_dirs: An instance of :class:`MappedDirectories`.
:param requested_path: The path of the directory to check permissions of.
:type requested_path: str
:param for_upload: Whether or not to enforce for an upload. **Default: False**
:type for_upload: bool
"""
requested_md = mapped_dirs.get_mapped_dir(requested_path)
for mapped_dir in mapped_dirs:
if for_upload:
if requested_md == mapped_dir:
if not mapped_dir.dir_allowuploads:
break
return
else:
if requested_md == mapped_dir:
if not mapped_dir.dir_allowed:
break
return
# Can't find a damn thing? Abort!
abort(403)
def needs_rules(needing_method):
"""A decorator to wrap around ``routes`` requiring rules.
"""
@wraps(needing_method)
def load_rules(*args, **kwargs):
rules_file = current_app.config['RULES_FILE']
if not hasattr(g, 'fm_rules'):
the_rules = Rules(rules_file)
if current_user.is_authenticated:
users_rules = VirtualRules.make_virtual(the_rules)
users_rules.allowed(current_user.home_folder)
users_rules.allow_uploads(current_user.home_folder)
# Add shares
if current_user.shares_received.count() > 0:
for share in current_user.shares_received.all():
users_rules.allowed(share.owner.home_folder)
the_rules = users_rules
g.fm_rules = the_rules
# RULES ARE PRIMED AND READY!
return needing_method(*args, **kwargs)
return load_rules
class Rules:
"""Class representing the ``rules`` file.
:param rule_file: Path to the ``rules`` file
:type rule_file: str
"""
def __init__(self, rule_file):
try:
if rule_file is not None:
with open(rule_file, 'r') as f:
self._rules = MultiDict(read_rules_file(f))
else:
self._rules = MultiDict()
except FileNotFoundError:
self._rules = MultiDict()
@property
def rules(self):
"""A *werkzeug* **MultiDict** of rules.
"""
return self._rules
@property
def num_rules(self):
"""The number of rules in the ``rules`` file.
"""
rule_keys = ('Allowed', 'AllowUpload', 'AllowUploads', 'Disallowed',
'DisAllowed')
count_o_rules = 0
for key, count_us in self._rules.lists():
if key in rule_keys:
count_o_rules += len(count_us)
return count_o_rules
def __len__(self):
return self.num_rules
# C'mon pylint VirtualRules derives from Rules
# Derived classes get them juicy protecteds
# pylint: disable=protected-access
class VirtualRules(Rules):
"""Mutable version of :class:`Rules`.
Construction from a file in this derivation is handled by ``template`` param.
To copy from a :class:`Rules` use :meth:`make_virtual`.
:param template: Identical to the ``rule_file`` param in :class:`Rules`.
:type template: str
"""
def __init__(self, template=None):
Rules.__init__(self, template)
def _remove_item(self, key, value):
value_list = self._rules.poplist(key)
if not value_list:
return
for val in value_list:
if val == value:
continue
self._rules.add(key, val)
@classmethod
def make_virtual(cls, rules_class):
"""Converts an immutable :class:`Rules` into a mutable :class:`VirtualRules`.
:param rules_class: What to convert.
:type rules_class: Instance of :class:`Rules`
"""
now_virtual = cls(None)
now_virtual._rules = copy.copy(rules_class._rules)
return now_virtual
def allowed(self, directory, remove=False):
"""Add or remove an *Allowed* rule for ``directory``.
:param directory: The directory to create this rule for.
:type directory: str
:param remove: Remove this rule for ``directory``. **Default:** *False*.
:type remove: bool
"""
if remove:
self._remove_item('Allowed', directory)
return
self._rules.add('Allowed', directory)
def allow_uploads(self, directory, remove=False):
"""Add or remove an *Allowed* rule for ``directory``.
:param directory: The directory to create this rule for.
:type directory: str
:param remove: Remove this rule for ``directory``. **Default:** *False*.
:type remove: bool
"""
if remove:
self._remove_item('AllowUploads', directory)
return
self._rules.add('AllowUploads', directory)
def disallowed(self, directory, remove=False):
"""Add or remove an *Allowed* rule for ``directory``.
:param directory: The directory to create this rule for.
:type directory: str
:param remove: Remove this rule for ``directory``. **Default:** *False*.
:type remove: bool
"""
if remove:
self._remove_item('Disallowed', directory)
return
self._rules.add('Disallowed', directory)
class MappedDirectory:
"""Represents a directory that is in the rules file, having rules.
:param dir_path: Path of the directory.
:type dir_path: str
:param dir_allowed: Whether or not to allow access.
:type dir_allowed: bool
:param dir_allowuploads: Whether or not to allow uploads.
:type dir_allowuploads: bool
"""
def __init__(self, dir_path, dir_allowed, dir_allowuploads):
self._dir_path = dir_path
self._dir_allowed = dir_allowed
self._dir_allowuploads = dir_allowuploads
@classmethod
def create_from_mapping(cls, mapping, path_key):
"""Instantiate a :class:`MappedDirectory` from a path corresponding to an
entry within :class:`MappedDirectories`.
:param mapping: The container to operate on.
:type mapping: An instance of :class:`MappedDirectories`
:param path_key: The path of the directory; It will be within **mapping**.
:type path_key: str
"""
try:
allowed, allowuploads = mapping.get(path_key)
except TypeError:
allowed, allowuploads = False, False
return cls(path_key, allowed, allowuploads)
@property
def dir_path(self):
"""The path of this :class:`MappedDirectory`.
"""
return self._dir_path
@property
def dir_allowed(self):
"""Whether or not FLFM is allowed in this :class:`MappedDirectory`.
"""
return self._dir_allowed
@property
def dir_allowuploads(self):
"""Whether or not uploads are allowed in this :class:`MappedDirectory`.
"""
return self._dir_allowuploads
def __repr__(self):
return '<MappedDirectory \'{}\': {}>'.format(self.dir_path, self.__dict__)
def __eq__(self, other):
total_equates = 3
equates = 0
if self.dir_path == other.dir_path:
equates += 1
if self.dir_allowed == other.dir_allowed:
equates += 1
if self.dir_allowuploads == other.dir_allowuploads:
equates += 1
return equates is total_equates
def is_in_tree(self, check_path):
"""Is a path denoted in ``check_path`` a subdirectory or in tree??
:param check_path: The path to check against.
:type check_path: str
:returns: bool
"""
common_path = os.path.commonpath([self.dir_path, check_path])
if common_path.count('\\') > 0:
common_path = common_path.replace('\\', '/')
if common_path == self.dir_path:
return True
return False
def as_shell(self):
"""Convert this :class:`MappedDirectory` into a
:class:`~flfm.shell.paths.ShellPath`.
:returns: A :class:`~flfm.shell.paths.ShellPath` representing this directory.
"""
return ShellDirectory.from_str_loc(self.dir_path)
# `D` is an inherited property
# pylint: disable=invalid-name
# MappedDirectories' __iter__ must use yield
# pylint: disable=stop-iteration-return
class MappedDirectories(collections.abc.Mapping):
"""A mapping, `collections.abc.Mapping <https://docs.python.org/3/library/collections.abc.html#collections.abc.Mapping>`_,
of :class:`MappedDirectory`'s.
Internally, the mapped directories are a dictionary of *Path*, *tuple*.
:param some_dict: A dictionary to populate this :class:`MappedDirectories`.
:type some_dict: dict
"""
def __init__(self, some_dict):
self.D = some_dict
@classmethod
def from_rules(cls, rules):
"""Create from a :class:`Rules`.
:param rules: The rules to create this mapping from.
:type rules: A :class:`Rules` instance.
"""
rule_dict = dict()
if rules.num_rules > 0:
# Tuple entries are as such:
# (ALLOWED??, UPLOAD_ALLOWED??)
for k, v in rules.rules.items(True):
if 'Allowed' in k:
current = rule_dict.get(v, None)
if current is None:
rule_dict[v] = (True, False)
else:
rule_dict[v] = (True, current[1])
elif 'Disallowed' in k or 'DisAllowed' in k:
# what is the point of other properties in a disallow??
# just overwrite
rule_dict[v] = (False, False)
elif 'AllowUploads' in k or 'AllowUpload' in k:
current = rule_dict.get(v, None)
if current is None:
# Mark as allowed also since not in dict
rule_dict[v] = (True, True)
else:
rule_dict[v] = (current[0], True)
else:
continue
return cls(rule_dict)
@classmethod
def from_shell_path(cls, shell_path):
"""Create from a :class:`~flfm.shell.paths.ShellPath`
:param shell_path: Create this mapping, without permission, from this.
:type shell_path: A :class:`~flfm.shell.paths.ShellPath` instance.
"""
the_dict = dict()
default_tuple = (False, False)
current_dir_path = shell_path.str_path
the_dict[current_dir_path] = default_tuple
for subdir in shell_path.directories:
the_dict[subdir.path] = default_tuple
return cls(the_dict)
def __getitem__(self, key):
return self.D.get(key)
def __setitem__(self, key, item):
if isinstance(item, MappedDirectory):
new_item = (item.dir_allowed, item.dir_allowuploads)
self.D[key] = new_item
return
self.D[key] = item
def __len__(self):
return len(self.D)
def __iter__(self):
num_yielded = 0
iterator = iter(self.D)
while True:
if num_yielded >= len(self):
break
# guard the next()
try:
mapped_dir = next(iterator)
except StopIteration:
break
num_yielded += 1
yield MappedDirectory(mapped_dir, self.D[mapped_dir][0],
self.D[mapped_dir][1])
def __contains__(self, value):
if isinstance(value, MappedDirectory):
return value.dir_path in self.D
return super().__contains__(value)
def __eq__(self, other):
if not isinstance(other, MappedDirectories):
return False
return self.D == other.D
def get_mapped_dir(self, dir_path):
"""Select a specific mapped directory from within this container.
:param dir_path: The path to select.
:type dir_path: str
:returns: A :class:`MappedDirectory`.
.. note::
If ``_dir_path`` does not exist, the returned :class:`MappedDirectory`
will have no permissions assigned. They will all be ``False``.
"""
return MappedDirectory.create_from_mapping(self, dir_path)
def apply_rule_map(self, rule_map):
"""Merge the rules of this :class:`MappedDirectories` and another.
:param rule_map: Another rule/directories map.
:type rule_map: Another :class:`MappedDirectories`
:returns: ``self``, this instance but updated.
"""
def length_paths(other_map):
for md in other_map:
yield len(md.dir_path)
def difference_length(my_length, all_lengths):
for length in all_lengths:
yield abs(length-my_length)
# the length of each path in the rule mapping
rule_map_lens = list(length_paths(rule_map))
for my_dir in self:
# apply rule directly on top
# iterate, because it's been explicitly set
if my_dir in rule_map:
self[my_dir.dir_path] = rule_map.get_mapped_dir(my_dir.dir_path)
continue
for rule_dir in rule_map:
# are we in the tree of a ruled directory?
if rule_dir.is_in_tree(my_dir.dir_path):
# we are in a tree of a disallowed directory
# prevent overwriting permissions
if not rule_dir.dir_allowed:
self[my_dir.dir_path] = rule_dir
break
my_length = len(my_dir.dir_path)
rd_length = len(rule_dir.dir_path)
# only lengths of what's in tree
rule_map_lens = list(filter(lambda x, l=rd_length: x <= l,
rule_map_lens))
# apply rules to subdirectories of a ruled directory
# the most-common parent path from the rule mapping is the
# # one whose permissions shall be applied to the subdirectory
if my_length == min(difference_length(my_length, rule_map_lens))+rd_length:
self[my_dir.dir_path] = rule_dir
# reset for next iteration
rule_map_lens = list(length_paths(rule_map))
return self
@property
def num_allowed(self):
"""Number of mapped directories that are allowed.
"""
count_allowed = 0
for md in self:
count_allowed += 1 if md.dir_allowed else 0
return count_allowed
@property
def num_disallowed(self):
"""Number of mapped directories that are disallowed.
"""
count_disallowed = 0
for md in self:
count_disallowed += 1 if not md.dir_allowed else 0
return count_disallowed | flfm/shell/rules.py | import collections.abc
import copy
import os
import re
from functools import wraps
from werkzeug.datastructures import MultiDict
from flask import current_app, g, flash, abort
from flask_login import current_user
from .paths import ShellDirectory
def read_rules_file(rule_file):
"""Generate (key, value) tuples from the rules file.
:param rule_file: The ``rules`` file to read from.
:type rule_file: str
"""
lines = rule_file.readlines()
good_lines = list(filter(lambda line: len(line) > 2, lines))
sorted_lines = sorted(good_lines,
key=lambda line: re.match(r"^(\w+)\=", line).group(1))
for line in sorted_lines:
pair = re.search(r"(\w*)\=([/\.\w]*)", line)
yield (pair.group(1), pair.group(2))
def enforce_mapped(mapped_dirs, requested_path, for_upload=False):
"""Enforce the rules from the rules file on requested_path.
:param mapped_dirs: A collection of mapped directories.
:type mapped_dirs: An instance of :class:`MappedDirectories`.
:param requested_path: The path of the directory to check permissions of.
:type requested_path: str
:param for_upload: Whether or not to enforce for an upload. **Default: False**
:type for_upload: bool
"""
requested_md = mapped_dirs.get_mapped_dir(requested_path)
for mapped_dir in mapped_dirs:
if for_upload:
if requested_md == mapped_dir:
if not mapped_dir.dir_allowuploads:
break
return
else:
if requested_md == mapped_dir:
if not mapped_dir.dir_allowed:
break
return
# Can't find a damn thing? Abort!
abort(403)
def needs_rules(needing_method):
"""A decorator to wrap around ``routes`` requiring rules.
"""
@wraps(needing_method)
def load_rules(*args, **kwargs):
rules_file = current_app.config['RULES_FILE']
if not hasattr(g, 'fm_rules'):
the_rules = Rules(rules_file)
if current_user.is_authenticated:
users_rules = VirtualRules.make_virtual(the_rules)
users_rules.allowed(current_user.home_folder)
users_rules.allow_uploads(current_user.home_folder)
# Add shares
if current_user.shares_received.count() > 0:
for share in current_user.shares_received.all():
users_rules.allowed(share.owner.home_folder)
the_rules = users_rules
g.fm_rules = the_rules
# RULES ARE PRIMED AND READY!
return needing_method(*args, **kwargs)
return load_rules
class Rules:
"""Class representing the ``rules`` file.
:param rule_file: Path to the ``rules`` file
:type rule_file: str
"""
def __init__(self, rule_file):
try:
if rule_file is not None:
with open(rule_file, 'r') as f:
self._rules = MultiDict(read_rules_file(f))
else:
self._rules = MultiDict()
except FileNotFoundError:
self._rules = MultiDict()
@property
def rules(self):
"""A *werkzeug* **MultiDict** of rules.
"""
return self._rules
@property
def num_rules(self):
"""The number of rules in the ``rules`` file.
"""
rule_keys = ('Allowed', 'AllowUpload', 'AllowUploads', 'Disallowed',
'DisAllowed')
count_o_rules = 0
for key, count_us in self._rules.lists():
if key in rule_keys:
count_o_rules += len(count_us)
return count_o_rules
def __len__(self):
return self.num_rules
# C'mon pylint VirtualRules derives from Rules
# Derived classes get them juicy protecteds
# pylint: disable=protected-access
class VirtualRules(Rules):
"""Mutable version of :class:`Rules`.
Construction from a file in this derivation is handled by ``template`` param.
To copy from a :class:`Rules` use :meth:`make_virtual`.
:param template: Identical to the ``rule_file`` param in :class:`Rules`.
:type template: str
"""
def __init__(self, template=None):
Rules.__init__(self, template)
def _remove_item(self, key, value):
value_list = self._rules.poplist(key)
if not value_list:
return
for val in value_list:
if val == value:
continue
self._rules.add(key, val)
@classmethod
def make_virtual(cls, rules_class):
"""Converts an immutable :class:`Rules` into a mutable :class:`VirtualRules`.
:param rules_class: What to convert.
:type rules_class: Instance of :class:`Rules`
"""
now_virtual = cls(None)
now_virtual._rules = copy.copy(rules_class._rules)
return now_virtual
def allowed(self, directory, remove=False):
"""Add or remove an *Allowed* rule for ``directory``.
:param directory: The directory to create this rule for.
:type directory: str
:param remove: Remove this rule for ``directory``. **Default:** *False*.
:type remove: bool
"""
if remove:
self._remove_item('Allowed', directory)
return
self._rules.add('Allowed', directory)
def allow_uploads(self, directory, remove=False):
"""Add or remove an *Allowed* rule for ``directory``.
:param directory: The directory to create this rule for.
:type directory: str
:param remove: Remove this rule for ``directory``. **Default:** *False*.
:type remove: bool
"""
if remove:
self._remove_item('AllowUploads', directory)
return
self._rules.add('AllowUploads', directory)
def disallowed(self, directory, remove=False):
"""Add or remove an *Allowed* rule for ``directory``.
:param directory: The directory to create this rule for.
:type directory: str
:param remove: Remove this rule for ``directory``. **Default:** *False*.
:type remove: bool
"""
if remove:
self._remove_item('Disallowed', directory)
return
self._rules.add('Disallowed', directory)
class MappedDirectory:
"""Represents a directory that is in the rules file, having rules.
:param dir_path: Path of the directory.
:type dir_path: str
:param dir_allowed: Whether or not to allow access.
:type dir_allowed: bool
:param dir_allowuploads: Whether or not to allow uploads.
:type dir_allowuploads: bool
"""
def __init__(self, dir_path, dir_allowed, dir_allowuploads):
self._dir_path = dir_path
self._dir_allowed = dir_allowed
self._dir_allowuploads = dir_allowuploads
@classmethod
def create_from_mapping(cls, mapping, path_key):
"""Instantiate a :class:`MappedDirectory` from a path corresponding to an
entry within :class:`MappedDirectories`.
:param mapping: The container to operate on.
:type mapping: An instance of :class:`MappedDirectories`
:param path_key: The path of the directory; It will be within **mapping**.
:type path_key: str
"""
try:
allowed, allowuploads = mapping.get(path_key)
except TypeError:
allowed, allowuploads = False, False
return cls(path_key, allowed, allowuploads)
@property
def dir_path(self):
"""The path of this :class:`MappedDirectory`.
"""
return self._dir_path
@property
def dir_allowed(self):
"""Whether or not FLFM is allowed in this :class:`MappedDirectory`.
"""
return self._dir_allowed
@property
def dir_allowuploads(self):
"""Whether or not uploads are allowed in this :class:`MappedDirectory`.
"""
return self._dir_allowuploads
def __repr__(self):
return '<MappedDirectory \'{}\': {}>'.format(self.dir_path, self.__dict__)
def __eq__(self, other):
total_equates = 3
equates = 0
if self.dir_path == other.dir_path:
equates += 1
if self.dir_allowed == other.dir_allowed:
equates += 1
if self.dir_allowuploads == other.dir_allowuploads:
equates += 1
return equates is total_equates
def is_in_tree(self, check_path):
"""Is a path denoted in ``check_path`` a subdirectory or in tree??
:param check_path: The path to check against.
:type check_path: str
:returns: bool
"""
common_path = os.path.commonpath([self.dir_path, check_path])
if common_path.count('\\') > 0:
common_path = common_path.replace('\\', '/')
if common_path == self.dir_path:
return True
return False
def as_shell(self):
"""Convert this :class:`MappedDirectory` into a
:class:`~flfm.shell.paths.ShellPath`.
:returns: A :class:`~flfm.shell.paths.ShellPath` representing this directory.
"""
return ShellDirectory.from_str_loc(self.dir_path)
# `D` is an inherited property
# pylint: disable=invalid-name
# MappedDirectories' __iter__ must use yield
# pylint: disable=stop-iteration-return
class MappedDirectories(collections.abc.Mapping):
"""A mapping, `collections.abc.Mapping <https://docs.python.org/3/library/collections.abc.html#collections.abc.Mapping>`_,
of :class:`MappedDirectory`'s.
Internally, the mapped directories are a dictionary of *Path*, *tuple*.
:param some_dict: A dictionary to populate this :class:`MappedDirectories`.
:type some_dict: dict
"""
def __init__(self, some_dict):
self.D = some_dict
@classmethod
def from_rules(cls, rules):
"""Create from a :class:`Rules`.
:param rules: The rules to create this mapping from.
:type rules: A :class:`Rules` instance.
"""
rule_dict = dict()
if rules.num_rules > 0:
# Tuple entries are as such:
# (ALLOWED??, UPLOAD_ALLOWED??)
for k, v in rules.rules.items(True):
if 'Allowed' in k:
current = rule_dict.get(v, None)
if current is None:
rule_dict[v] = (True, False)
else:
rule_dict[v] = (True, current[1])
elif 'Disallowed' in k or 'DisAllowed' in k:
# what is the point of other properties in a disallow??
# just overwrite
rule_dict[v] = (False, False)
elif 'AllowUploads' in k or 'AllowUpload' in k:
current = rule_dict.get(v, None)
if current is None:
# Mark as allowed also since not in dict
rule_dict[v] = (True, True)
else:
rule_dict[v] = (current[0], True)
else:
continue
return cls(rule_dict)
@classmethod
def from_shell_path(cls, shell_path):
"""Create from a :class:`~flfm.shell.paths.ShellPath`
:param shell_path: Create this mapping, without permission, from this.
:type shell_path: A :class:`~flfm.shell.paths.ShellPath` instance.
"""
the_dict = dict()
default_tuple = (False, False)
current_dir_path = shell_path.str_path
the_dict[current_dir_path] = default_tuple
for subdir in shell_path.directories:
the_dict[subdir.path] = default_tuple
return cls(the_dict)
def __getitem__(self, key):
return self.D.get(key)
def __setitem__(self, key, item):
if isinstance(item, MappedDirectory):
new_item = (item.dir_allowed, item.dir_allowuploads)
self.D[key] = new_item
return
self.D[key] = item
def __len__(self):
return len(self.D)
def __iter__(self):
num_yielded = 0
iterator = iter(self.D)
while True:
if num_yielded >= len(self):
break
# guard the next()
try:
mapped_dir = next(iterator)
except StopIteration:
break
num_yielded += 1
yield MappedDirectory(mapped_dir, self.D[mapped_dir][0],
self.D[mapped_dir][1])
def __contains__(self, value):
if isinstance(value, MappedDirectory):
return value.dir_path in self.D
return super().__contains__(value)
def __eq__(self, other):
if not isinstance(other, MappedDirectories):
return False
return self.D == other.D
def get_mapped_dir(self, dir_path):
"""Select a specific mapped directory from within this container.
:param dir_path: The path to select.
:type dir_path: str
:returns: A :class:`MappedDirectory`.
.. note::
If ``_dir_path`` does not exist, the returned :class:`MappedDirectory`
will have no permissions assigned. They will all be ``False``.
"""
return MappedDirectory.create_from_mapping(self, dir_path)
def apply_rule_map(self, rule_map):
"""Merge the rules of this :class:`MappedDirectories` and another.
:param rule_map: Another rule/directories map.
:type rule_map: Another :class:`MappedDirectories`
:returns: ``self``, this instance but updated.
"""
def length_paths(other_map):
for md in other_map:
yield len(md.dir_path)
def difference_length(my_length, all_lengths):
for length in all_lengths:
yield abs(length-my_length)
# the length of each path in the rule mapping
rule_map_lens = list(length_paths(rule_map))
for my_dir in self:
# apply rule directly on top
# iterate, because it's been explicitly set
if my_dir in rule_map:
self[my_dir.dir_path] = rule_map.get_mapped_dir(my_dir.dir_path)
continue
for rule_dir in rule_map:
# are we in the tree of a ruled directory?
if rule_dir.is_in_tree(my_dir.dir_path):
# we are in a tree of a disallowed directory
# prevent overwriting permissions
if not rule_dir.dir_allowed:
self[my_dir.dir_path] = rule_dir
break
my_length = len(my_dir.dir_path)
rd_length = len(rule_dir.dir_path)
# only lengths of what's in tree
rule_map_lens = list(filter(lambda x, l=rd_length: x <= l,
rule_map_lens))
# apply rules to subdirectories of a ruled directory
# the most-common parent path from the rule mapping is the
# # one whose permissions shall be applied to the subdirectory
if my_length == min(difference_length(my_length, rule_map_lens))+rd_length:
self[my_dir.dir_path] = rule_dir
# reset for next iteration
rule_map_lens = list(length_paths(rule_map))
return self
@property
def num_allowed(self):
"""Number of mapped directories that are allowed.
"""
count_allowed = 0
for md in self:
count_allowed += 1 if md.dir_allowed else 0
return count_allowed
@property
def num_disallowed(self):
"""Number of mapped directories that are disallowed.
"""
count_disallowed = 0
for md in self:
count_disallowed += 1 if not md.dir_allowed else 0
return count_disallowed | 0.673621 | 0.17749 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.ml_maillist, name='ml_maillist'),
url(r'^ajax_ml_maillist/$', views.ajax_ml_maillist, name='ajax_ml_maillist'),
url(r'^ajax_count/(?P<list_id>\d+)/$', views.ajax_maillist_count, name='ajax_maillist_count'),
url(r'^add/$', views.ml_maillist_add, name='ml_maillist_add'),
url(r'^modify/(?P<list_id>\d+)/$', views.ml_maillist_modify, name='ml_maillist_modify'),
url(r'^maintain/(?P<list_id>\d+)/$', views.ml_maillist_maintain_address, name='ml_maillist_maintain_address'),
url(r'^ajax_add_address/(?P<list_id>\d+)/$', views.ajax_add_address, name='ajax_add_address'),
url(r'^upload/$', views.ml_maillist_upload, name='ml_maillist_upload'),
url(r'^import/log/$', views.ml_import_log, name='ml_import_log'),
url(r'^import/log/ajax/$', views.ajax_ml_import_log, name='ajax_ml_import_log'),
url(r'^invalid/(?P<log_id>\d+)/$', views.invalid_view, name='import_invalid_view'),
url(r'^mul_upld/(?P<list_id>\d+)/$', views.ml_addr_multi_upload, name='ml_addr_multi_upload'),
url(r'^subscribe/(?P<list_id>\d+)/$', views.ml_subscribe_list, name='ml_subscribe_list'),
url(r'^ajax_subscribe_list/(?P<list_id>\d+)/$', views.ajax_subscribe_list, name='ajax_subscribe_list'),
url(r'^subscribe/modify/(?P<list_id>\d+)/(?P<address_id>\d+)/$', views.ml_subscribe_modify, name='ml_subscribe_modify'),
url(r'^ajax_domain_content/(?P<list_id>\d+)/$', views.ajax_domain_content, name='ajax_domain_content'),
url(r'^unsubscribe/(?P<list_id>\d+)/$', views.ml_unsubscribe_list, name='ml_unsubscribe_list'),
url(r'^ajax_unsubscribe_list/(?P<list_id>\d+)/$', views.ajax_unsubscribe_list, name='ajax_unsubscribe_list'),
url(r'^subscribe/add/$', views.add_subscribe_rec, name='add_subscribe_rec'),
url(r'^ajax_add_subscriber/$', views.ajax_add_subscriber, name='ajax_add_subscriber'),
url(r'^export_template_format/$', views.export_template_format, name='export_template_format'),
url(r'^export_address/(?P<list_id>\d+)/$', views.export_address, name='export_address'),
url(r'^export_limit/(?P<user_id>\d+)/(?P<list_id>\d+)/$', views.ajax_export_limit, name='ajax_export_limit'),
] | edm_web1/app/address/urls.py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.ml_maillist, name='ml_maillist'),
url(r'^ajax_ml_maillist/$', views.ajax_ml_maillist, name='ajax_ml_maillist'),
url(r'^ajax_count/(?P<list_id>\d+)/$', views.ajax_maillist_count, name='ajax_maillist_count'),
url(r'^add/$', views.ml_maillist_add, name='ml_maillist_add'),
url(r'^modify/(?P<list_id>\d+)/$', views.ml_maillist_modify, name='ml_maillist_modify'),
url(r'^maintain/(?P<list_id>\d+)/$', views.ml_maillist_maintain_address, name='ml_maillist_maintain_address'),
url(r'^ajax_add_address/(?P<list_id>\d+)/$', views.ajax_add_address, name='ajax_add_address'),
url(r'^upload/$', views.ml_maillist_upload, name='ml_maillist_upload'),
url(r'^import/log/$', views.ml_import_log, name='ml_import_log'),
url(r'^import/log/ajax/$', views.ajax_ml_import_log, name='ajax_ml_import_log'),
url(r'^invalid/(?P<log_id>\d+)/$', views.invalid_view, name='import_invalid_view'),
url(r'^mul_upld/(?P<list_id>\d+)/$', views.ml_addr_multi_upload, name='ml_addr_multi_upload'),
url(r'^subscribe/(?P<list_id>\d+)/$', views.ml_subscribe_list, name='ml_subscribe_list'),
url(r'^ajax_subscribe_list/(?P<list_id>\d+)/$', views.ajax_subscribe_list, name='ajax_subscribe_list'),
url(r'^subscribe/modify/(?P<list_id>\d+)/(?P<address_id>\d+)/$', views.ml_subscribe_modify, name='ml_subscribe_modify'),
url(r'^ajax_domain_content/(?P<list_id>\d+)/$', views.ajax_domain_content, name='ajax_domain_content'),
url(r'^unsubscribe/(?P<list_id>\d+)/$', views.ml_unsubscribe_list, name='ml_unsubscribe_list'),
url(r'^ajax_unsubscribe_list/(?P<list_id>\d+)/$', views.ajax_unsubscribe_list, name='ajax_unsubscribe_list'),
url(r'^subscribe/add/$', views.add_subscribe_rec, name='add_subscribe_rec'),
url(r'^ajax_add_subscriber/$', views.ajax_add_subscriber, name='ajax_add_subscriber'),
url(r'^export_template_format/$', views.export_template_format, name='export_template_format'),
url(r'^export_address/(?P<list_id>\d+)/$', views.export_address, name='export_address'),
url(r'^export_limit/(?P<user_id>\d+)/(?P<list_id>\d+)/$', views.ajax_export_limit, name='ajax_export_limit'),
] | 0.185689 | 0.070784 |
import time
ADS1x15_DEFAULT_ADDRESS = 0x48
ADS1x15_POINTER_CONVERSION = 0x00
ADS1x15_POINTER_CONFIG = 0x01
ADS1x15_POINTER_LOW_THRESHOLD = 0x02
ADS1x15_POINTER_HIGH_THRESHOLD = 0x03
ADS1x15_CONFIG_OS_SINGLE = 0x8000
ADS1x15_CONFIG_MUX_OFFSET = 12
ADS1x15_CONFIG_GAIN = {
2 / 3: 0x0000,
1: 0x0200,
2: 0x0400,
4: 0x0600,
8: 0x0800,
16: 0x0A00
}
ADS1x15_CONFIG_MODE_CONTINUOUS = 0x0000
ADS1x15_CONFIG_MODE_SINGLE = 0x0100
ADS1115_CONFIG_DR = {
8: 0x0000,
16: 0x0020,
32: 0x0040,
64: 0x0060,
128: 0x0080,
250: 0x00A0,
475: 0x00C0,
860: 0x00E0
}
ADS1x15_CONFIG_COMP_WINDOW = 0x0010
ADS1x15_CONFIG_COMP_ACTIVE_HIGH = 0x0008
ADS1x15_CONFIG_COMP_LATCHING = 0x0004
ADS1x15_CONFIG_COMP_QUE = {
1: 0x0000,
2: 0x0001,
4: 0x0002
}
ADS1x15_CONFIG_COMP_QUE_DISABLE = 0x0003
class ADS1115(object):
def __init__(self, address=ADS1x15_DEFAULT_ADDRESS, i2c=None, **kwargs):
if i2c is None:
from . import I2C
i2c = I2C
self._device = i2c.get_i2c_device(address, **kwargs)
def _read(self, mux, gain, data_rate, mode):
config = ADS1x15_CONFIG_OS_SINGLE
config |= (mux & 0x07) << ADS1x15_CONFIG_MUX_OFFSET
if gain not in ADS1x15_CONFIG_GAIN:
raise ValueError('Gain must be one of: 2/3, 1, 2, 4, 8, 16')
config |= ADS1x15_CONFIG_GAIN[gain]
config |= mode
if data_rate is None:
data_rate = self.data_rate_default()
config |= self.data_rate_config(data_rate)
config |= ADS1x15_CONFIG_COMP_QUE_DISABLE
self._device.writeList(ADS1x15_POINTER_CONFIG, [(config >> 8) & 0xFF, config & 0xFF])
time.sleep(1.0 / data_rate + 0.0001)
result = self._device.readList(ADS1x15_POINTER_CONVERSION, 2)
return self.conversion_value(result[1], result[0])
def read_adc(self, channel, gain=1, data_rate=None):
assert 0 <= channel <= 3, 'Channel must be a value within 0-3!'
return self._read(channel + 0x04, gain, data_rate, ADS1x15_CONFIG_MODE_SINGLE)
@staticmethod
def data_rate_default():
return 128
@staticmethod
def data_rate_config(data_rate):
if data_rate not in ADS1115_CONFIG_DR:
raise ValueError('Data rate must be one of: 8, 16, 32, 64, 128, 250, 475, 860')
return ADS1115_CONFIG_DR[data_rate]
@staticmethod
def conversion_value(low, high):
value = ((high & 0xFF) << 8) | (low & 0xFF)
if value & 0x8000 != 0:
value -= 1 << 16
return value | lib/adc/ads1115.py | import time
ADS1x15_DEFAULT_ADDRESS = 0x48
ADS1x15_POINTER_CONVERSION = 0x00
ADS1x15_POINTER_CONFIG = 0x01
ADS1x15_POINTER_LOW_THRESHOLD = 0x02
ADS1x15_POINTER_HIGH_THRESHOLD = 0x03
ADS1x15_CONFIG_OS_SINGLE = 0x8000
ADS1x15_CONFIG_MUX_OFFSET = 12
ADS1x15_CONFIG_GAIN = {
2 / 3: 0x0000,
1: 0x0200,
2: 0x0400,
4: 0x0600,
8: 0x0800,
16: 0x0A00
}
ADS1x15_CONFIG_MODE_CONTINUOUS = 0x0000
ADS1x15_CONFIG_MODE_SINGLE = 0x0100
ADS1115_CONFIG_DR = {
8: 0x0000,
16: 0x0020,
32: 0x0040,
64: 0x0060,
128: 0x0080,
250: 0x00A0,
475: 0x00C0,
860: 0x00E0
}
ADS1x15_CONFIG_COMP_WINDOW = 0x0010
ADS1x15_CONFIG_COMP_ACTIVE_HIGH = 0x0008
ADS1x15_CONFIG_COMP_LATCHING = 0x0004
ADS1x15_CONFIG_COMP_QUE = {
1: 0x0000,
2: 0x0001,
4: 0x0002
}
ADS1x15_CONFIG_COMP_QUE_DISABLE = 0x0003
class ADS1115(object):
def __init__(self, address=ADS1x15_DEFAULT_ADDRESS, i2c=None, **kwargs):
if i2c is None:
from . import I2C
i2c = I2C
self._device = i2c.get_i2c_device(address, **kwargs)
def _read(self, mux, gain, data_rate, mode):
config = ADS1x15_CONFIG_OS_SINGLE
config |= (mux & 0x07) << ADS1x15_CONFIG_MUX_OFFSET
if gain not in ADS1x15_CONFIG_GAIN:
raise ValueError('Gain must be one of: 2/3, 1, 2, 4, 8, 16')
config |= ADS1x15_CONFIG_GAIN[gain]
config |= mode
if data_rate is None:
data_rate = self.data_rate_default()
config |= self.data_rate_config(data_rate)
config |= ADS1x15_CONFIG_COMP_QUE_DISABLE
self._device.writeList(ADS1x15_POINTER_CONFIG, [(config >> 8) & 0xFF, config & 0xFF])
time.sleep(1.0 / data_rate + 0.0001)
result = self._device.readList(ADS1x15_POINTER_CONVERSION, 2)
return self.conversion_value(result[1], result[0])
def read_adc(self, channel, gain=1, data_rate=None):
assert 0 <= channel <= 3, 'Channel must be a value within 0-3!'
return self._read(channel + 0x04, gain, data_rate, ADS1x15_CONFIG_MODE_SINGLE)
@staticmethod
def data_rate_default():
return 128
@staticmethod
def data_rate_config(data_rate):
if data_rate not in ADS1115_CONFIG_DR:
raise ValueError('Data rate must be one of: 8, 16, 32, 64, 128, 250, 475, 860')
return ADS1115_CONFIG_DR[data_rate]
@staticmethod
def conversion_value(low, high):
value = ((high & 0xFF) << 8) | (low & 0xFF)
if value & 0x8000 != 0:
value -= 1 << 16
return value | 0.424889 | 0.244126 |
import torch
import numpy as np
import torch.nn as nn
from pruner.filter_pruner import FilterPruner
from model.MobileNetV2 import InvertedResidual
class FilterPrunerMBNetV2(FilterPruner):
def parse_dependency(self):
pass
def forward(self, x):
if isinstance(self.model, nn.DataParallel):
model = self.model.module
else:
model = self.model
self.activations = []
self.gradients = []
self.weight_grad = []
self.grad_index = 0
self.linear = None
# activation index to the instance of conv layer
self.activation_to_conv = {}
# retrieve next conv using activation index of conv
self.next_conv = {}
# retrieve next immediate bn layer using activation index of conv
self.bn_for_conv = {}
# Chainning convolutions
# (use activation index to represent a conv)
self.chains = {}
activation_index = 0
prev_blk_last_conv = -1
for l1, m1 in enumerate(model.features.children()):
skipped = False
if isinstance(m1, InvertedResidual):
if m1.use_res_connect:
skipped = True
# m1 is nn.Sequential now
m1 = m1.conv
# use for residual
tmp_x = x
# In the beginning of InvertedResidual block, get prev_conv for chaining purpose
if activation_index-1 >= 0:
prev_blk_last_conv = activation_index-1
cnt = 0
for l2, m2 in enumerate(m1.children()):
cnt += 1
x = m2(x)
h = x.shape[2]
w = x.shape[3]
if isinstance(m2, nn.Conv2d):
self.conv_in_channels[activation_index] = m2.weight.size(1)
self.conv_out_channels[activation_index] = m2.weight.size(0)
self.omap_size[activation_index] = (h, w)
self.cost_map[activation_index] = h * w * m2.weight.size(2) * m2.weight.size(3)
self.in_params[activation_index] = m2.weight.size(1) * m2.weight.size(2) * m2.weight.size(3)
self.cur_flops += h * w * m2.weight.size(0) * m2.weight.size(1) * m2.weight.size(2) * m2.weight.size(3)
# If this is full group_conv it should be bounded with last conv
if m2.groups == m2.out_channels and m2.groups == m2.in_channels:
assert activation_index-1 not in self.chains, 'Previous conv has already chained to some other convs!'
self.chains[activation_index-1] = activation_index
if self.rank_type == 'l1_weight':
if activation_index not in self.filter_ranks:
self.filter_ranks[activation_index] = torch.zeros(m2.weight.size(0), device=self.device)
values = (torch.abs(m2.weight.data)).sum(1).sum(1).sum(1)
# Normalize the rank by the filter dimensions
#values = values / (m2.weight.size(1) * m2.weight.size(2) * m2.weight.size(3))
self.filter_ranks[activation_index] = values
elif self.rank_type == 'l2_weight':
if activation_index not in self.filter_ranks:
self.filter_ranks[activation_index] = torch.zeros(m2.weight.size(0), device=self.device)
values = (torch.pow(m2.weight.data, 2)).sum(1).sum(1).sum(1)
# Normalize the rank by the filter dimensions
# values = values / (m2.weight.size(1) * m2.weight.size(2) * m2.weight.size(3))
self.filter_ranks[activation_index] = values
elif self.rank_type == 'l2_bn' or self.rank_type == 'l1_bn':
pass
else:
x.register_hook(self.compute_rank)
self.activations.append(x)
self.rates[activation_index] = self.conv_in_channels[activation_index] * self.cost_map[activation_index]
self.activation_to_conv[activation_index] = m2
if activation_index > 0:
self.next_conv[activation_index-1] = [activation_index]
activation_index += 1
elif isinstance(m2, nn.BatchNorm2d):
# activation-1 since we increased the index right after conv
self.bn_for_conv[activation_index-1] = m2
if self.rank_type == 'l2_bn':
if activation_index-1 not in self.filter_ranks:
self.filter_ranks[activation_index-1] = torch.zeros(m2.weight.size(0), device=self.device)
values = torch.pow(m2.weight.data, 2)
self.filter_ranks[activation_index-1] = values
elif self.rank_type == 'l2_bn_param':
if activation_index-1 not in self.filter_ranks:
self.filter_ranks[activation_index-1] = torch.zeros(m2.weight.size(0), device=self.device)
values = torch.pow(m2.weight.data, 2)
self.filter_ranks[activation_index-1] = values* self.in_params[activation_index-1]
if cnt == 0:
x = m1(x)
# After we parse through the block, if this block is with residual
if skipped:
x = tmp_x + x
if prev_blk_last_conv >= 0:
assert prev_blk_last_conv not in self.chains, 'Previous conv has already chained to some other convs!'
# activation-1 is the current convolution since we just increased the pointer
self.chains[prev_blk_last_conv] = activation_index-1
for m in model.classifier.modules():
if isinstance(m, nn.Linear):
self.linear = m
self.base_flops = np.prod(m.weight.shape)
self.cur_flops += self.base_flops
self.og_conv_in_channels = self.conv_in_channels.copy()
self.og_conv_out_channels = self.conv_out_channels.copy()
self.resource_usage = self.cur_flops
return model.classifier(x.view(x.size(0), -1))
def amc_filter_compress(self, layer_id, action, max_sparsity):
# Chain residual connections
t = layer_id
current_chains = []
while t in self.chains:
current_chains.append(t)
t = self.chains[t]
current_chains.append(t)
prune_away = int(action*self.conv_out_channels[layer_id])
for layer in current_chains:
self.amc_checked.append(layer)
self.conv_out_channels[layer] -= prune_away
rest = 0
rest_min_filters = 0
rest_total_filters = 0
tmp_out_channels = self.og_conv_out_channels.copy()
tmp_in_channels = self.conv_in_channels.copy()
next_layer = layer_id
while next_layer in self.amc_checked:
next_layer += 1
t = next_layer
next_chains = []
if t < len(self.activation_to_conv):
while t in self.chains:
next_chains.append(t)
t = self.chains[t]
next_chains.append(t)
for i in range(next_layer, len(self.activation_to_conv)):
if not i in self.amc_checked:
rest += self.conv_out_channels[i]
if not i in next_chains:
if max_sparsity == 1:
tmp_out_channels[i] = 1
else:
tmp_out_channels[i] = int(np.ceil(tmp_out_channels[i] * (1-max_sparsity)))
rest_total_filters += self.conv_out_channels[i]
rest_min_filters += tmp_out_channels[i]
rest_max_filters = rest_total_filters - rest_min_filters
cost = 0
for key in self.cost_map:
cost += self.conv_out_channels[key]
return next_layer, cost, rest_max_filters
def amc_compress(self, layer_id, action, max_sparsity):
# Chain residual connections
t = layer_id
current_chains = []
while t in self.chains:
current_chains.append(t)
t = self.chains[t]
current_chains.append(t)
prune_away = int(action*self.conv_out_channels[layer_id])
for layer in current_chains:
self.amc_checked.append(layer)
self.conv_out_channels[layer] -= prune_away
next_conv_idx = self.next_conv[layer] if layer in self.next_conv else None
if next_conv_idx:
for next_conv_i in next_conv_idx:
next_conv = self.activation_to_conv[next_conv_i]
if (next_conv.groups != next_conv.out_channels or next_conv.groups != next_conv.in_channels):
self.conv_in_channels[next_conv_i] -= prune_away
rest = 0
rest_min_flops = 0
rest_total_flops = 0
tmp_out_channels = self.og_conv_out_channels.copy()
tmp_in_channels = self.conv_in_channels.copy()
next_layer = layer_id
while next_layer in self.amc_checked:
next_layer += 1
t = next_layer
next_chains = []
if t < len(self.activation_to_conv):
while t in self.chains:
next_chains.append(t)
t = self.chains[t]
next_chains.append(t)
init_in = {}
# If filter in next_chains are prune to maximum, modify the following channels
for t in next_chains:
next_conv_idx = self.next_conv[t] if t in self.next_conv else None
if next_conv_idx:
for next_conv_i in next_conv_idx:
next_conv = self.activation_to_conv[next_conv_i]
if (next_conv.groups != next_conv.out_channels or next_conv.groups != next_conv.in_channels):
tmp_in_channels[next_conv_i] = self.og_conv_in_channels[next_conv_i] * (1-max_sparsity)
init_in[next_conv_i] = self.og_conv_in_channels[next_conv_i] * (1-max_sparsity)
for i in range(next_layer, len(self.activation_to_conv)):
if not i in self.amc_checked:
rest += self.cost_map[i]*self.conv_in_channels[i]*self.conv_out_channels[i]
if not i in next_chains:
tmp_out_channels[i] *= (1-max_sparsity)
next_conv_idx = self.next_conv[i] if i in self.next_conv else None
if next_conv_idx:
for next_conv_i in next_conv_idx:
next_conv = self.activation_to_conv[next_conv_i]
if (next_conv.groups != next_conv.out_channels or next_conv.groups != next_conv.in_channels):
tmp_in_channels[next_conv_i] = tmp_out_channels[i]
if i in init_in:
rest_total_flops += self.cost_map[i]*init_in[i]*self.conv_out_channels[i]
else:
rest_total_flops += self.cost_map[i]*self.conv_in_channels[i]*self.conv_out_channels[i]
rest_min_flops += self.cost_map[i]*tmp_in_channels[i]*tmp_out_channels[i]
rest_max_flops = rest_total_flops - rest_min_flops
cost = 0
for key in self.cost_map:
cost += self.cost_map[key]*self.conv_in_channels[key]*self.conv_out_channels[key]
cost += self.conv_out_channels[key]*self.num_cls
return next_layer, cost, rest, rest_max_flops
def mask_conv_layer_segment(self, layer_index, filter_range):
filters_begin = filter_range[0]
filters_end = filter_range[1]
pruned_filters = filters_end - filters_begin + 1
# Retrive conv based on layer_index
conv = self.activation_to_conv[layer_index]
next_bn = self.bn_for_conv[layer_index]
next_conv_idx = self.next_conv[layer_index] if layer_index in self.next_conv else None
# Surgery on the conv layer to be pruned
# dw-conv, reduce groups as well
conv.weight.data[filters_begin:filters_end+1,:,:,:] = 0
conv.weight.grad = None
if not conv.bias is None:
conv.bias.data[filters_begin:filters_end+1] = 0
conv.bias.grad = None
next_bn.weight.data[filters_begin:filters_end+1] = 0
next_bn.weight.grad = None
next_bn.bias.data[filters_begin:filters_end+1] = 0
next_bn.bias.grad = None
next_bn.running_mean.data[filters_begin:filters_end+1] = 0
next_bn.running_mean.grad = None
next_bn.running_var.data[filters_begin:filters_end+1] = 0
next_bn.running_var.grad = None
def get_valid_filters(self):
filters_to_prune_per_layer = {}
visited = []
for conv_idx in self.activation_to_conv:
if not conv_idx in visited:
cur_chain = []
t = conv_idx
chain_max_dim = self.activation_to_conv[t].weight.size(0)
while t in self.chains:
num_filters = self.activation_to_conv[t].weight.size(0)
chain_max_dim = np.maximum(chain_max_dim, num_filters)
cur_chain.append(t)
t = self.chains[t]
cur_chain.append(t)
visited = visited + cur_chain
mask = np.zeros(chain_max_dim)
for t in cur_chain:
bn = self.bn_for_conv[t]
cur_mask = (torch.abs(bn.weight) > 0).cpu().numpy()
mask = np.logical_or(mask, cur_mask)
inactive_filter = np.where(mask == 0)[0]
if len(inactive_filter) > 0:
for t in cur_chain:
filters_to_prune_per_layer[t] = list(inactive_filter.astype(int))
if len(inactive_filter) == bn.weight.size(0):
filters_to_prune_per_layer[t] = filters_to_prune_per_layer[t][:-2]
return filters_to_prune_per_layer
def get_valid_flops(self):
in_channels = self.conv_in_channels.copy()
out_channels = self.conv_out_channels.copy()
visited = []
for conv_idx in self.activation_to_conv:
if not conv_idx in visited:
cur_chain = []
t = conv_idx
chain_max_dim = self.activation_to_conv[t].weight.size(0)
while t in self.chains:
num_filters = self.activation_to_conv[t].weight.size(0)
chain_max_dim = np.maximum(chain_max_dim, num_filters)
cur_chain.append(t)
t = self.chains[t]
cur_chain.append(t)
visited = visited + cur_chain
mask = np.zeros(chain_max_dim)
for t in cur_chain:
bn = self.bn_for_conv[t]
cur_mask = (torch.abs(bn.weight) > 0).cpu().numpy()
mask = np.logical_or(mask, cur_mask)
inactive_filter = np.where(mask == 0)[0]
if len(inactive_filter) > 0:
for t in cur_chain:
out_channels[t] -= len(inactive_filter)
if len(inactive_filter) == bn.weight.size(0):
out_channels[t] = 2
next_conv_idx = self.next_conv[t] if t in self.next_conv else None
if next_conv_idx:
for next_conv_i in next_conv_idx:
next_conv = self.activation_to_conv[next_conv_i]
if (next_conv.groups != next_conv.out_channels or next_conv.groups != next_conv.in_channels):
in_channels[next_conv_i] = out_channels[t]
flops = 0
for k in self.activation_to_conv:
flops += self.cost_map[k] * in_channels[k] * out_channels[k]
flops += out_channels[k] * self.num_cls
return flops
def prune_conv_layer_segment(self, layer_index, filter_range):
filters_begin = filter_range[0]
filters_end = filter_range[1]
pruned_filters = int(filters_end - filters_begin + 1)
# Retrive conv based on layer_index
conv = self.activation_to_conv[layer_index]
next_bn = self.bn_for_conv[layer_index]
next_conv_idx = self.next_conv[layer_index] if layer_index in self.next_conv else None
# Surgery on the conv layer to be pruned
# dw-conv, reduce groups as well
if conv.groups == conv.out_channels:
new_conv = \
torch.nn.Conv2d(in_channels = conv.out_channels - pruned_filters, \
out_channels = conv.out_channels - pruned_filters,
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups - pruned_filters,
bias = conv.bias)
conv.in_channels -= pruned_filters
conv.out_channels -= pruned_filters
conv.groups -= pruned_filters
else:
new_conv = \
torch.nn.Conv2d(in_channels = conv.in_channels, \
out_channels = conv.out_channels - pruned_filters,
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups,
bias = conv.bias)
conv.out_channels -= pruned_filters
old_weights = conv.weight.data.cpu().numpy()
new_weights = new_conv.weight.data.cpu().numpy()
new_weights[: filters_begin, :, :, :] = old_weights[: filters_begin, :, :, :]
new_weights[filters_begin : , :, :, :] = old_weights[filters_end + 1 :, :, :, :]
conv.weight.data = torch.from_numpy(new_weights).to(self.device)
conv.weight.grad = None
if not conv.bias is None:
bias_numpy = conv.bias.data.cpu().numpy()
bias = np.zeros(shape = (bias_numpy.shape[0] - pruned_filters), dtype = np.float32)
bias[:filters_begin] = bias_numpy[:filters_begin]
bias[filters_begin : ] = bias_numpy[filters_end + 1 :]
conv.bias.data = torch.from_numpy(bias).to(self.device)
conv.bias.grad = None
# Surgery on next batchnorm layer
next_new_bn = \
torch.nn.BatchNorm2d(num_features = next_bn.num_features-pruned_filters,\
eps = next_bn.eps, \
momentum = next_bn.momentum, \
affine = next_bn.affine,
track_running_stats = next_bn.track_running_stats)
next_bn.num_features -= pruned_filters
old_weights = next_bn.weight.data.cpu().numpy()
new_weights = next_new_bn.weight.data.cpu().numpy()
old_bias = next_bn.bias.data.cpu().numpy()
new_bias = next_new_bn.bias.data.cpu().numpy()
old_running_mean = next_bn.running_mean.data.cpu().numpy()
new_running_mean = next_new_bn.running_mean.data.cpu().numpy()
old_running_var = next_bn.running_var.data.cpu().numpy()
new_running_var = next_new_bn.running_var.data.cpu().numpy()
new_weights[: filters_begin] = old_weights[: filters_begin]
new_weights[filters_begin :] = old_weights[filters_end + 1 :]
next_bn.weight.data = torch.from_numpy(new_weights).to(self.device)
next_bn.weight.grad = None
new_bias[: filters_begin] = old_bias[: filters_begin]
new_bias[filters_begin :] = old_bias[filters_end + 1 :]
next_bn.bias.data = torch.from_numpy(new_bias).to(self.device)
next_bn.bias.grad = None
new_running_mean[: filters_begin] = old_running_mean[: filters_begin]
new_running_mean[filters_begin :] = old_running_mean[filters_end + 1 :]
next_bn.running_mean.data = torch.from_numpy(new_running_mean).to(self.device)
next_bn.running_mean.grad = None
new_running_var[: filters_begin] = old_running_var[: filters_begin]
new_running_var[filters_begin :] = old_running_var[filters_end + 1 :]
next_bn.running_var.data = torch.from_numpy(new_running_var).to(self.device)
next_bn.running_var.grad = None
# Found next convolution layer
# If next is dw-conv, don't bother, since it is chained, so it will be pruned properly
if next_conv_idx:
for next_conv_i in next_conv_idx:
next_conv = self.activation_to_conv[next_conv_i]
if (next_conv.groups != next_conv.out_channels or next_conv.groups != next_conv.in_channels):
next_new_conv = \
torch.nn.Conv2d(in_channels = next_conv.in_channels - pruned_filters,\
out_channels = next_conv.out_channels, \
kernel_size = next_conv.kernel_size, \
stride = next_conv.stride,
padding = next_conv.padding,
dilation = next_conv.dilation,
groups = next_conv.groups,
bias = next_conv.bias)
next_conv.in_channels -= pruned_filters
old_weights = next_conv.weight.data.cpu().numpy()
new_weights = next_new_conv.weight.data.cpu().numpy()
new_weights[:, : filters_begin, :, :] = old_weights[:, : filters_begin, :, :]
new_weights[:, filters_begin : , :, :] = old_weights[:, filters_end + 1 :, :, :]
next_conv.weight.data = torch.from_numpy(new_weights).to(self.device)
next_conv.weight.grad = None
else:
#Prunning the last conv layer. This affects the first linear layer of the classifier.
if self.linear is None:
raise BaseException("No linear laye found in classifier")
params_per_input_channel = int(self.linear.in_features / (conv.out_channels+pruned_filters))
new_linear_layer = \
torch.nn.Linear(self.linear.in_features - pruned_filters*params_per_input_channel,
self.linear.out_features)
self.linear.in_features -= pruned_filters*params_per_input_channel
old_weights = self.linear.weight.data.cpu().numpy()
new_weights = new_linear_layer.weight.data.cpu().numpy()
new_weights[:, : int(filters_begin * params_per_input_channel)] = \
old_weights[:, : int(filters_begin * params_per_input_channel)]
new_weights[:, int(filters_begin * params_per_input_channel) :] = \
old_weights[:, int((filters_end + 1) * params_per_input_channel) :]
self.linear.weight.data = torch.from_numpy(new_weights).to(self.device)
self.linear.weight.grad = None | pruner/fp_mbnetv2.py | import torch
import numpy as np
import torch.nn as nn
from pruner.filter_pruner import FilterPruner
from model.MobileNetV2 import InvertedResidual
class FilterPrunerMBNetV2(FilterPruner):
def parse_dependency(self):
pass
def forward(self, x):
if isinstance(self.model, nn.DataParallel):
model = self.model.module
else:
model = self.model
self.activations = []
self.gradients = []
self.weight_grad = []
self.grad_index = 0
self.linear = None
# activation index to the instance of conv layer
self.activation_to_conv = {}
# retrieve next conv using activation index of conv
self.next_conv = {}
# retrieve next immediate bn layer using activation index of conv
self.bn_for_conv = {}
# Chainning convolutions
# (use activation index to represent a conv)
self.chains = {}
activation_index = 0
prev_blk_last_conv = -1
for l1, m1 in enumerate(model.features.children()):
skipped = False
if isinstance(m1, InvertedResidual):
if m1.use_res_connect:
skipped = True
# m1 is nn.Sequential now
m1 = m1.conv
# use for residual
tmp_x = x
# In the beginning of InvertedResidual block, get prev_conv for chaining purpose
if activation_index-1 >= 0:
prev_blk_last_conv = activation_index-1
cnt = 0
for l2, m2 in enumerate(m1.children()):
cnt += 1
x = m2(x)
h = x.shape[2]
w = x.shape[3]
if isinstance(m2, nn.Conv2d):
self.conv_in_channels[activation_index] = m2.weight.size(1)
self.conv_out_channels[activation_index] = m2.weight.size(0)
self.omap_size[activation_index] = (h, w)
self.cost_map[activation_index] = h * w * m2.weight.size(2) * m2.weight.size(3)
self.in_params[activation_index] = m2.weight.size(1) * m2.weight.size(2) * m2.weight.size(3)
self.cur_flops += h * w * m2.weight.size(0) * m2.weight.size(1) * m2.weight.size(2) * m2.weight.size(3)
# If this is full group_conv it should be bounded with last conv
if m2.groups == m2.out_channels and m2.groups == m2.in_channels:
assert activation_index-1 not in self.chains, 'Previous conv has already chained to some other convs!'
self.chains[activation_index-1] = activation_index
if self.rank_type == 'l1_weight':
if activation_index not in self.filter_ranks:
self.filter_ranks[activation_index] = torch.zeros(m2.weight.size(0), device=self.device)
values = (torch.abs(m2.weight.data)).sum(1).sum(1).sum(1)
# Normalize the rank by the filter dimensions
#values = values / (m2.weight.size(1) * m2.weight.size(2) * m2.weight.size(3))
self.filter_ranks[activation_index] = values
elif self.rank_type == 'l2_weight':
if activation_index not in self.filter_ranks:
self.filter_ranks[activation_index] = torch.zeros(m2.weight.size(0), device=self.device)
values = (torch.pow(m2.weight.data, 2)).sum(1).sum(1).sum(1)
# Normalize the rank by the filter dimensions
# values = values / (m2.weight.size(1) * m2.weight.size(2) * m2.weight.size(3))
self.filter_ranks[activation_index] = values
elif self.rank_type == 'l2_bn' or self.rank_type == 'l1_bn':
pass
else:
x.register_hook(self.compute_rank)
self.activations.append(x)
self.rates[activation_index] = self.conv_in_channels[activation_index] * self.cost_map[activation_index]
self.activation_to_conv[activation_index] = m2
if activation_index > 0:
self.next_conv[activation_index-1] = [activation_index]
activation_index += 1
elif isinstance(m2, nn.BatchNorm2d):
# activation-1 since we increased the index right after conv
self.bn_for_conv[activation_index-1] = m2
if self.rank_type == 'l2_bn':
if activation_index-1 not in self.filter_ranks:
self.filter_ranks[activation_index-1] = torch.zeros(m2.weight.size(0), device=self.device)
values = torch.pow(m2.weight.data, 2)
self.filter_ranks[activation_index-1] = values
elif self.rank_type == 'l2_bn_param':
if activation_index-1 not in self.filter_ranks:
self.filter_ranks[activation_index-1] = torch.zeros(m2.weight.size(0), device=self.device)
values = torch.pow(m2.weight.data, 2)
self.filter_ranks[activation_index-1] = values* self.in_params[activation_index-1]
if cnt == 0:
x = m1(x)
# After we parse through the block, if this block is with residual
if skipped:
x = tmp_x + x
if prev_blk_last_conv >= 0:
assert prev_blk_last_conv not in self.chains, 'Previous conv has already chained to some other convs!'
# activation-1 is the current convolution since we just increased the pointer
self.chains[prev_blk_last_conv] = activation_index-1
for m in model.classifier.modules():
if isinstance(m, nn.Linear):
self.linear = m
self.base_flops = np.prod(m.weight.shape)
self.cur_flops += self.base_flops
self.og_conv_in_channels = self.conv_in_channels.copy()
self.og_conv_out_channels = self.conv_out_channels.copy()
self.resource_usage = self.cur_flops
return model.classifier(x.view(x.size(0), -1))
def amc_filter_compress(self, layer_id, action, max_sparsity):
# Chain residual connections
t = layer_id
current_chains = []
while t in self.chains:
current_chains.append(t)
t = self.chains[t]
current_chains.append(t)
prune_away = int(action*self.conv_out_channels[layer_id])
for layer in current_chains:
self.amc_checked.append(layer)
self.conv_out_channels[layer] -= prune_away
rest = 0
rest_min_filters = 0
rest_total_filters = 0
tmp_out_channels = self.og_conv_out_channels.copy()
tmp_in_channels = self.conv_in_channels.copy()
next_layer = layer_id
while next_layer in self.amc_checked:
next_layer += 1
t = next_layer
next_chains = []
if t < len(self.activation_to_conv):
while t in self.chains:
next_chains.append(t)
t = self.chains[t]
next_chains.append(t)
for i in range(next_layer, len(self.activation_to_conv)):
if not i in self.amc_checked:
rest += self.conv_out_channels[i]
if not i in next_chains:
if max_sparsity == 1:
tmp_out_channels[i] = 1
else:
tmp_out_channels[i] = int(np.ceil(tmp_out_channels[i] * (1-max_sparsity)))
rest_total_filters += self.conv_out_channels[i]
rest_min_filters += tmp_out_channels[i]
rest_max_filters = rest_total_filters - rest_min_filters
cost = 0
for key in self.cost_map:
cost += self.conv_out_channels[key]
return next_layer, cost, rest_max_filters
def amc_compress(self, layer_id, action, max_sparsity):
# Chain residual connections
t = layer_id
current_chains = []
while t in self.chains:
current_chains.append(t)
t = self.chains[t]
current_chains.append(t)
prune_away = int(action*self.conv_out_channels[layer_id])
for layer in current_chains:
self.amc_checked.append(layer)
self.conv_out_channels[layer] -= prune_away
next_conv_idx = self.next_conv[layer] if layer in self.next_conv else None
if next_conv_idx:
for next_conv_i in next_conv_idx:
next_conv = self.activation_to_conv[next_conv_i]
if (next_conv.groups != next_conv.out_channels or next_conv.groups != next_conv.in_channels):
self.conv_in_channels[next_conv_i] -= prune_away
rest = 0
rest_min_flops = 0
rest_total_flops = 0
tmp_out_channels = self.og_conv_out_channels.copy()
tmp_in_channels = self.conv_in_channels.copy()
next_layer = layer_id
while next_layer in self.amc_checked:
next_layer += 1
t = next_layer
next_chains = []
if t < len(self.activation_to_conv):
while t in self.chains:
next_chains.append(t)
t = self.chains[t]
next_chains.append(t)
init_in = {}
# If filter in next_chains are prune to maximum, modify the following channels
for t in next_chains:
next_conv_idx = self.next_conv[t] if t in self.next_conv else None
if next_conv_idx:
for next_conv_i in next_conv_idx:
next_conv = self.activation_to_conv[next_conv_i]
if (next_conv.groups != next_conv.out_channels or next_conv.groups != next_conv.in_channels):
tmp_in_channels[next_conv_i] = self.og_conv_in_channels[next_conv_i] * (1-max_sparsity)
init_in[next_conv_i] = self.og_conv_in_channels[next_conv_i] * (1-max_sparsity)
for i in range(next_layer, len(self.activation_to_conv)):
if not i in self.amc_checked:
rest += self.cost_map[i]*self.conv_in_channels[i]*self.conv_out_channels[i]
if not i in next_chains:
tmp_out_channels[i] *= (1-max_sparsity)
next_conv_idx = self.next_conv[i] if i in self.next_conv else None
if next_conv_idx:
for next_conv_i in next_conv_idx:
next_conv = self.activation_to_conv[next_conv_i]
if (next_conv.groups != next_conv.out_channels or next_conv.groups != next_conv.in_channels):
tmp_in_channels[next_conv_i] = tmp_out_channels[i]
if i in init_in:
rest_total_flops += self.cost_map[i]*init_in[i]*self.conv_out_channels[i]
else:
rest_total_flops += self.cost_map[i]*self.conv_in_channels[i]*self.conv_out_channels[i]
rest_min_flops += self.cost_map[i]*tmp_in_channels[i]*tmp_out_channels[i]
rest_max_flops = rest_total_flops - rest_min_flops
cost = 0
for key in self.cost_map:
cost += self.cost_map[key]*self.conv_in_channels[key]*self.conv_out_channels[key]
cost += self.conv_out_channels[key]*self.num_cls
return next_layer, cost, rest, rest_max_flops
def mask_conv_layer_segment(self, layer_index, filter_range):
filters_begin = filter_range[0]
filters_end = filter_range[1]
pruned_filters = filters_end - filters_begin + 1
# Retrive conv based on layer_index
conv = self.activation_to_conv[layer_index]
next_bn = self.bn_for_conv[layer_index]
next_conv_idx = self.next_conv[layer_index] if layer_index in self.next_conv else None
# Surgery on the conv layer to be pruned
# dw-conv, reduce groups as well
conv.weight.data[filters_begin:filters_end+1,:,:,:] = 0
conv.weight.grad = None
if not conv.bias is None:
conv.bias.data[filters_begin:filters_end+1] = 0
conv.bias.grad = None
next_bn.weight.data[filters_begin:filters_end+1] = 0
next_bn.weight.grad = None
next_bn.bias.data[filters_begin:filters_end+1] = 0
next_bn.bias.grad = None
next_bn.running_mean.data[filters_begin:filters_end+1] = 0
next_bn.running_mean.grad = None
next_bn.running_var.data[filters_begin:filters_end+1] = 0
next_bn.running_var.grad = None
def get_valid_filters(self):
filters_to_prune_per_layer = {}
visited = []
for conv_idx in self.activation_to_conv:
if not conv_idx in visited:
cur_chain = []
t = conv_idx
chain_max_dim = self.activation_to_conv[t].weight.size(0)
while t in self.chains:
num_filters = self.activation_to_conv[t].weight.size(0)
chain_max_dim = np.maximum(chain_max_dim, num_filters)
cur_chain.append(t)
t = self.chains[t]
cur_chain.append(t)
visited = visited + cur_chain
mask = np.zeros(chain_max_dim)
for t in cur_chain:
bn = self.bn_for_conv[t]
cur_mask = (torch.abs(bn.weight) > 0).cpu().numpy()
mask = np.logical_or(mask, cur_mask)
inactive_filter = np.where(mask == 0)[0]
if len(inactive_filter) > 0:
for t in cur_chain:
filters_to_prune_per_layer[t] = list(inactive_filter.astype(int))
if len(inactive_filter) == bn.weight.size(0):
filters_to_prune_per_layer[t] = filters_to_prune_per_layer[t][:-2]
return filters_to_prune_per_layer
def get_valid_flops(self):
in_channels = self.conv_in_channels.copy()
out_channels = self.conv_out_channels.copy()
visited = []
for conv_idx in self.activation_to_conv:
if not conv_idx in visited:
cur_chain = []
t = conv_idx
chain_max_dim = self.activation_to_conv[t].weight.size(0)
while t in self.chains:
num_filters = self.activation_to_conv[t].weight.size(0)
chain_max_dim = np.maximum(chain_max_dim, num_filters)
cur_chain.append(t)
t = self.chains[t]
cur_chain.append(t)
visited = visited + cur_chain
mask = np.zeros(chain_max_dim)
for t in cur_chain:
bn = self.bn_for_conv[t]
cur_mask = (torch.abs(bn.weight) > 0).cpu().numpy()
mask = np.logical_or(mask, cur_mask)
inactive_filter = np.where(mask == 0)[0]
if len(inactive_filter) > 0:
for t in cur_chain:
out_channels[t] -= len(inactive_filter)
if len(inactive_filter) == bn.weight.size(0):
out_channels[t] = 2
next_conv_idx = self.next_conv[t] if t in self.next_conv else None
if next_conv_idx:
for next_conv_i in next_conv_idx:
next_conv = self.activation_to_conv[next_conv_i]
if (next_conv.groups != next_conv.out_channels or next_conv.groups != next_conv.in_channels):
in_channels[next_conv_i] = out_channels[t]
flops = 0
for k in self.activation_to_conv:
flops += self.cost_map[k] * in_channels[k] * out_channels[k]
flops += out_channels[k] * self.num_cls
return flops
def prune_conv_layer_segment(self, layer_index, filter_range):
filters_begin = filter_range[0]
filters_end = filter_range[1]
pruned_filters = int(filters_end - filters_begin + 1)
# Retrive conv based on layer_index
conv = self.activation_to_conv[layer_index]
next_bn = self.bn_for_conv[layer_index]
next_conv_idx = self.next_conv[layer_index] if layer_index in self.next_conv else None
# Surgery on the conv layer to be pruned
# dw-conv, reduce groups as well
if conv.groups == conv.out_channels:
new_conv = \
torch.nn.Conv2d(in_channels = conv.out_channels - pruned_filters, \
out_channels = conv.out_channels - pruned_filters,
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups - pruned_filters,
bias = conv.bias)
conv.in_channels -= pruned_filters
conv.out_channels -= pruned_filters
conv.groups -= pruned_filters
else:
new_conv = \
torch.nn.Conv2d(in_channels = conv.in_channels, \
out_channels = conv.out_channels - pruned_filters,
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups,
bias = conv.bias)
conv.out_channels -= pruned_filters
old_weights = conv.weight.data.cpu().numpy()
new_weights = new_conv.weight.data.cpu().numpy()
new_weights[: filters_begin, :, :, :] = old_weights[: filters_begin, :, :, :]
new_weights[filters_begin : , :, :, :] = old_weights[filters_end + 1 :, :, :, :]
conv.weight.data = torch.from_numpy(new_weights).to(self.device)
conv.weight.grad = None
if not conv.bias is None:
bias_numpy = conv.bias.data.cpu().numpy()
bias = np.zeros(shape = (bias_numpy.shape[0] - pruned_filters), dtype = np.float32)
bias[:filters_begin] = bias_numpy[:filters_begin]
bias[filters_begin : ] = bias_numpy[filters_end + 1 :]
conv.bias.data = torch.from_numpy(bias).to(self.device)
conv.bias.grad = None
# Surgery on next batchnorm layer
next_new_bn = \
torch.nn.BatchNorm2d(num_features = next_bn.num_features-pruned_filters,\
eps = next_bn.eps, \
momentum = next_bn.momentum, \
affine = next_bn.affine,
track_running_stats = next_bn.track_running_stats)
next_bn.num_features -= pruned_filters
old_weights = next_bn.weight.data.cpu().numpy()
new_weights = next_new_bn.weight.data.cpu().numpy()
old_bias = next_bn.bias.data.cpu().numpy()
new_bias = next_new_bn.bias.data.cpu().numpy()
old_running_mean = next_bn.running_mean.data.cpu().numpy()
new_running_mean = next_new_bn.running_mean.data.cpu().numpy()
old_running_var = next_bn.running_var.data.cpu().numpy()
new_running_var = next_new_bn.running_var.data.cpu().numpy()
new_weights[: filters_begin] = old_weights[: filters_begin]
new_weights[filters_begin :] = old_weights[filters_end + 1 :]
next_bn.weight.data = torch.from_numpy(new_weights).to(self.device)
next_bn.weight.grad = None
new_bias[: filters_begin] = old_bias[: filters_begin]
new_bias[filters_begin :] = old_bias[filters_end + 1 :]
next_bn.bias.data = torch.from_numpy(new_bias).to(self.device)
next_bn.bias.grad = None
new_running_mean[: filters_begin] = old_running_mean[: filters_begin]
new_running_mean[filters_begin :] = old_running_mean[filters_end + 1 :]
next_bn.running_mean.data = torch.from_numpy(new_running_mean).to(self.device)
next_bn.running_mean.grad = None
new_running_var[: filters_begin] = old_running_var[: filters_begin]
new_running_var[filters_begin :] = old_running_var[filters_end + 1 :]
next_bn.running_var.data = torch.from_numpy(new_running_var).to(self.device)
next_bn.running_var.grad = None
# Found next convolution layer
# If next is dw-conv, don't bother, since it is chained, so it will be pruned properly
if next_conv_idx:
for next_conv_i in next_conv_idx:
next_conv = self.activation_to_conv[next_conv_i]
if (next_conv.groups != next_conv.out_channels or next_conv.groups != next_conv.in_channels):
next_new_conv = \
torch.nn.Conv2d(in_channels = next_conv.in_channels - pruned_filters,\
out_channels = next_conv.out_channels, \
kernel_size = next_conv.kernel_size, \
stride = next_conv.stride,
padding = next_conv.padding,
dilation = next_conv.dilation,
groups = next_conv.groups,
bias = next_conv.bias)
next_conv.in_channels -= pruned_filters
old_weights = next_conv.weight.data.cpu().numpy()
new_weights = next_new_conv.weight.data.cpu().numpy()
new_weights[:, : filters_begin, :, :] = old_weights[:, : filters_begin, :, :]
new_weights[:, filters_begin : , :, :] = old_weights[:, filters_end + 1 :, :, :]
next_conv.weight.data = torch.from_numpy(new_weights).to(self.device)
next_conv.weight.grad = None
else:
#Prunning the last conv layer. This affects the first linear layer of the classifier.
if self.linear is None:
raise BaseException("No linear laye found in classifier")
params_per_input_channel = int(self.linear.in_features / (conv.out_channels+pruned_filters))
new_linear_layer = \
torch.nn.Linear(self.linear.in_features - pruned_filters*params_per_input_channel,
self.linear.out_features)
self.linear.in_features -= pruned_filters*params_per_input_channel
old_weights = self.linear.weight.data.cpu().numpy()
new_weights = new_linear_layer.weight.data.cpu().numpy()
new_weights[:, : int(filters_begin * params_per_input_channel)] = \
old_weights[:, : int(filters_begin * params_per_input_channel)]
new_weights[:, int(filters_begin * params_per_input_channel) :] = \
old_weights[:, int((filters_end + 1) * params_per_input_channel) :]
self.linear.weight.data = torch.from_numpy(new_weights).to(self.device)
self.linear.weight.grad = None | 0.758332 | 0.397851 |
import modules.angles as angles
import modules.dihedrals as dihedrals
class Bond:
pass
class Angle:
pass
class Dihedral:
pass
def dist(t=(), s=(), l=None):
zdist = abs(t[2] - s[2])
if zdist > 0.5 * l:
zdist = l - zdist
square = (t[0] - s[0])**2 + (t[1] - s[1])**2 + zdist**2
distance = square**0.5
return distance
def unoverlap(pset, safety_dist, l):
delete = set([])
for p1 in pset:
for p2 in pset:
distance = dist(p1, p2, l)
if distance <= safety_dist\
and p1 != p2\
and p1 not in delete\
and p2 not in delete:
delete.add(p1)
new_set = pset.difference(delete)
return new_set
def setify(items): # takes a list of bonds/angles and makes them a "set" of "tuples"
my_set = set([])
for item in items.values():
my_set.add(tuple(item.atomids))
return my_set
def findbonds(a, maxdist, l):
bonded = set([])
bond_dict = {}
b_id = 1
for a1 in a:
for a2 in a:
if a1 != a2:
p1 = (a[a1].x, a[a1].y, a[a1].z)
p2 = (a[a2].x, a[a2].y, a[a2].z)
distance = dist(p1, p2, l)
if distance <= maxdist\
and (a1, a2) not in bonded\
and (a2, a1) not in bonded:
bonded.add((a1, a2))
b = Bond()
b.type = 1
b.atomids = [a1, a2]
bond_dict[b_id] = b
b_id += 1
return bond_dict
def findangles(mbonds):
c = {}
# finds angles based on connectivity
bond_set = setify(mbonds)
angle_set = angles.angles(bond_set)
# id counter
group_id = 1
# organize info into a dictionary of Angle objects
for group in angle_set:
an = Angle()
an.type = 1 # dummy type
an.atom1id = group[0]
an.atom2id = group[1]
an.atom3id = group[2]
an.atomids = [an.atom1id, an.atom2id, an.atom3id]
c[group_id] = an
group_id = group_id + 1
return c
def finddihedrals(mbonds, mangles):
d = {}
# finds angles based on connectivity
bond_set = setify(mbonds)
angle_set = setify(mangles)
# finds dihedrals based on connectivity and previously found angles
dihedral_set = dihedrals.dihedrals(bond_set, angle_set)
group_id = 1
# organize info into a dictionary of Dihedral objects
for group in dihedral_set:
di = Dihedral()
di.type = 1 # dummy type
di.atom1id = group[0]
di.atom2id = group[1]
di.atom3id = group[2]
di.atom4id = group[3]
di.atomids = [di.atom1id, di.atom2id, di.atom3id, di.atom4id]
d[group_id] = di
group_id = group_id + 1
return d | Create_Graphene_Sheet/convert_cnt_gnp_2_iff/modules/locality.py | import modules.angles as angles
import modules.dihedrals as dihedrals
class Bond:
pass
class Angle:
pass
class Dihedral:
pass
def dist(t=(), s=(), l=None):
zdist = abs(t[2] - s[2])
if zdist > 0.5 * l:
zdist = l - zdist
square = (t[0] - s[0])**2 + (t[1] - s[1])**2 + zdist**2
distance = square**0.5
return distance
def unoverlap(pset, safety_dist, l):
delete = set([])
for p1 in pset:
for p2 in pset:
distance = dist(p1, p2, l)
if distance <= safety_dist\
and p1 != p2\
and p1 not in delete\
and p2 not in delete:
delete.add(p1)
new_set = pset.difference(delete)
return new_set
def setify(items): # takes a list of bonds/angles and makes them a "set" of "tuples"
my_set = set([])
for item in items.values():
my_set.add(tuple(item.atomids))
return my_set
def findbonds(a, maxdist, l):
bonded = set([])
bond_dict = {}
b_id = 1
for a1 in a:
for a2 in a:
if a1 != a2:
p1 = (a[a1].x, a[a1].y, a[a1].z)
p2 = (a[a2].x, a[a2].y, a[a2].z)
distance = dist(p1, p2, l)
if distance <= maxdist\
and (a1, a2) not in bonded\
and (a2, a1) not in bonded:
bonded.add((a1, a2))
b = Bond()
b.type = 1
b.atomids = [a1, a2]
bond_dict[b_id] = b
b_id += 1
return bond_dict
def findangles(mbonds):
c = {}
# finds angles based on connectivity
bond_set = setify(mbonds)
angle_set = angles.angles(bond_set)
# id counter
group_id = 1
# organize info into a dictionary of Angle objects
for group in angle_set:
an = Angle()
an.type = 1 # dummy type
an.atom1id = group[0]
an.atom2id = group[1]
an.atom3id = group[2]
an.atomids = [an.atom1id, an.atom2id, an.atom3id]
c[group_id] = an
group_id = group_id + 1
return c
def finddihedrals(mbonds, mangles):
d = {}
# finds angles based on connectivity
bond_set = setify(mbonds)
angle_set = setify(mangles)
# finds dihedrals based on connectivity and previously found angles
dihedral_set = dihedrals.dihedrals(bond_set, angle_set)
group_id = 1
# organize info into a dictionary of Dihedral objects
for group in dihedral_set:
di = Dihedral()
di.type = 1 # dummy type
di.atom1id = group[0]
di.atom2id = group[1]
di.atom3id = group[2]
di.atom4id = group[3]
di.atomids = [di.atom1id, di.atom2id, di.atom3id, di.atom4id]
d[group_id] = di
group_id = group_id + 1
return d | 0.665628 | 0.55254 |
from typing import Optional, Union
from .errors import TemplateModifierNotImplemented
from .page_modifier import PageModifierBase
from .wiki_client import WikiClient
class TemplateModifierBase(PageModifierBase):
def __init__(self, site: WikiClient, template, page_list=None, title_list=None, limit=-1, summary=None,
quiet=False, lag=0, tags=None, skip_pages=None,
recursive=True,
startat_page=None,
namespace: Optional[Union[int, str]] = None,
**data):
"""
:param site: WikiClient site
:param template: The template to modify
:param page_list: A default page_list parameter. Otherwise the template's used_in list will be used
:param title_list: See page_list.
:param limit: See PageModifier class.
:param summary: See PageModifier class.
:param quiet: See PageModifier class.
:param lag: See PageModifier class.
:param tags: See PageModifier class.
:param skip_pages: See PageModifier class.
:param recursive: See mwparserfromhell.wikitext.filter_templates method
:param startat_page: See PageModifier class
:param namespace: Do we filter the template's used_in list?
:param data: Extra keywords to save to the class for use in the update_template method
"""
self.template_name = template
self.current_template = None
self.recursive = recursive
if not title_list:
page_list = page_list if page_list else site.pages_using(template, namespace=namespace)
super().__init__(site, page_list=page_list, title_list=title_list, limit=limit, summary=summary,
quiet=quiet, lag=lag, tags=tags, skip_pages=skip_pages,
startat_page=startat_page, **data)
def update_wikitext(self, wikitext):
for template in wikitext.filter_templates(recursive=self.recursive):
if template.name.matches(self.template_name):
self.current_template = template
self.update_template(template)
def update_template(self, template):
raise TemplateModifierNotImplemented() | mwcleric/template_modifier.py | from typing import Optional, Union
from .errors import TemplateModifierNotImplemented
from .page_modifier import PageModifierBase
from .wiki_client import WikiClient
class TemplateModifierBase(PageModifierBase):
def __init__(self, site: WikiClient, template, page_list=None, title_list=None, limit=-1, summary=None,
quiet=False, lag=0, tags=None, skip_pages=None,
recursive=True,
startat_page=None,
namespace: Optional[Union[int, str]] = None,
**data):
"""
:param site: WikiClient site
:param template: The template to modify
:param page_list: A default page_list parameter. Otherwise the template's used_in list will be used
:param title_list: See page_list.
:param limit: See PageModifier class.
:param summary: See PageModifier class.
:param quiet: See PageModifier class.
:param lag: See PageModifier class.
:param tags: See PageModifier class.
:param skip_pages: See PageModifier class.
:param recursive: See mwparserfromhell.wikitext.filter_templates method
:param startat_page: See PageModifier class
:param namespace: Do we filter the template's used_in list?
:param data: Extra keywords to save to the class for use in the update_template method
"""
self.template_name = template
self.current_template = None
self.recursive = recursive
if not title_list:
page_list = page_list if page_list else site.pages_using(template, namespace=namespace)
super().__init__(site, page_list=page_list, title_list=title_list, limit=limit, summary=summary,
quiet=quiet, lag=lag, tags=tags, skip_pages=skip_pages,
startat_page=startat_page, **data)
def update_wikitext(self, wikitext):
for template in wikitext.filter_templates(recursive=self.recursive):
if template.name.matches(self.template_name):
self.current_template = template
self.update_template(template)
def update_template(self, template):
raise TemplateModifierNotImplemented() | 0.877115 | 0.067454 |
from functools import partial, wraps
import attr
import pytest
import trio
import trustme
from async_generator import async_generator, yield_
from trio_websocket import (
connect_websocket,
connect_websocket_url,
ConnectionClosed,
open_websocket,
open_websocket_url,
serve_websocket,
WebSocketServer,
wrap_client_stream,
wrap_server_stream
)
from trio_websocket._impl import ListenPort
HOST = '127.0.0.1'
RESOURCE = '/resource'
DEFAULT_TEST_MAX_DURATION = 1
# Timeout tests follow a general pattern: one side waits TIMEOUT seconds for an
# event. The other side delays for FORCE_TIMEOUT seconds to force the timeout
# to trigger. Each test also has maximum runtime (measure by Trio's clock) to
# prevent a faulty test from hanging the entire suite.
TIMEOUT = 1
FORCE_TIMEOUT = 2
TIMEOUT_TEST_MAX_DURATION = 3
@pytest.fixture
@async_generator
async def echo_server(nursery):
''' A server that reads one message, sends back the same message,
then closes the connection. '''
serve_fn = partial(serve_websocket, echo_request_handler, HOST, 0,
ssl_context=None)
server = await nursery.start(serve_fn)
await yield_(server)
@pytest.fixture
@async_generator
async def echo_conn(echo_server):
''' Return a client connection instance that is connected to an echo
server. '''
async with open_websocket(HOST, echo_server.port, RESOURCE,
use_ssl=False) as conn:
await yield_(conn)
async def echo_request_handler(request):
'''
Accept incoming request and then pass off to echo connection handler.
'''
conn = await request.accept()
try:
msg = await conn.get_message()
await conn.send_message(msg)
except ConnectionClosed:
pass
class fail_after:
''' This decorator fails if the runtime of the decorated function (as
measured by the Trio clock) exceeds the specified value. '''
def __init__(self, seconds):
self._seconds = seconds
def __call__(self, fn):
@wraps(fn)
async def wrapper(*args, **kwargs):
with trio.move_on_after(self._seconds) as cancel_scope:
await fn(*args, **kwargs)
if cancel_scope.cancelled_caught:
pytest.fail('Test runtime exceeded the maximum {} seconds'
.format(self._seconds))
return wrapper
@attr.s(hash=False, cmp=False)
class MemoryListener(trio.abc.Listener):
closed = attr.ib(default=False)
accepted_streams = attr.ib(factory=list)
queued_streams = attr.ib(factory=(lambda: trio.open_memory_channel(1)))
accept_hook = attr.ib(default=None)
async def connect(self):
assert not self.closed
client, server = memory_stream_pair()
await self.queued_streams[0].send(server)
return client
async def accept(self):
await trio.hazmat.checkpoint()
assert not self.closed
if self.accept_hook is not None:
await self.accept_hook()
stream = await self.queued_streams[1].receive()
self.accepted_streams.append(stream)
return stream
async def aclose(self):
self.closed = True
await trio.hazmat.checkpoint()
async def test_listen_port_ipv4():
assert str(ListenPort('10.105.0.2', 80, False)) == 'ws://10.105.0.2:80'
assert str(ListenPort('127.0.0.1', 8000, False)) == 'ws://127.0.0.1:8000'
assert str(ListenPort('0.0.0.0', 443, True)) == 'wss://0.0.0.0:443'
async def test_listen_port_ipv6():
assert str(ListenPort('fdf8:f53e:61e4::18', 80, False)) \
== 'ws://[2599:8807:6201:b7:16cf:bb9c:a6d3:51ab]:80'
assert str(ListenPort('::1', 8000, False)) == 'ws://[::1]:8000'
assert str(ListenPort('::', 443, True)) == 'wss://[::]:443'
async def test_server_has_listeners(nursery):
server = await nursery.start(serve_websocket, echo_request_handler, HOST, 0,
None)
assert len(server.listeners) > 0
assert isinstance(server.listeners[0], ListenPort)
async def test_serve(nursery):
task = trio.hazmat.current_task()
server = await nursery.start(serve_websocket, echo_request_handler, HOST, 0,
None)
port = server.port
assert server.port != 0
# The server nursery begins with one task (server.listen).
assert len(nursery.child_tasks) == 1
no_clients_nursery_count = len(task.child_nurseries)
async with open_websocket(HOST, port, RESOURCE, use_ssl=False) as conn:
# The server nursery has the same number of tasks, but there is now
# one additional nested nursery.
assert len(nursery.child_tasks) == 1
assert len(task.child_nurseries) == no_clients_nursery_count + 1
async def test_serve_ssl(nursery):
server_context = trio.ssl.create_default_context(
trio.ssl.Purpose.CLIENT_AUTH)
client_context = trio.ssl.create_default_context()
ca = trustme.CA()
ca.configure_trust(client_context)
cert = ca.issue_server_cert(HOST)
cert.configure_cert(server_context)
server = await nursery.start(serve_websocket, echo_request_handler, HOST, 0,
server_context)
port = server.port
async with open_websocket(HOST, port, RESOURCE, use_ssl=client_context
) as conn:
assert not conn.closed
async def test_serve_handler_nursery(nursery):
task = trio.hazmat.current_task()
async with trio.open_nursery() as handler_nursery:
serve_with_nursery = partial(serve_websocket, echo_request_handler,
HOST, 0, None, handler_nursery=handler_nursery)
server = await nursery.start(serve_with_nursery)
port = server.port
# The server nursery begins with one task (server.listen).
assert len(nursery.child_tasks) == 1
no_clients_nursery_count = len(task.child_nurseries)
async with open_websocket(HOST, port, RESOURCE, use_ssl=False) as conn:
# The handler nursery should have one task in it
# (conn._reader_task).
assert len(handler_nursery.child_tasks) == 1
async def test_serve_with_zero_listeners(nursery):
task = trio.hazmat.current_task()
with pytest.raises(ValueError):
server = WebSocketServer(echo_request_handler, [])
async def test_serve_non_tcp_listener(nursery):
listeners = [MemoryListener()]
server = WebSocketServer(echo_request_handler, listeners)
await nursery.start(server.run)
assert len(server.listeners) == 1
with pytest.raises(RuntimeError):
server.port
assert server.listeners[0].startswith('MemoryListener(')
async def test_serve_multiple_listeners(nursery):
listener1 = (await trio.open_tcp_listeners(0, host=HOST))[0]
listener2 = MemoryListener()
server = WebSocketServer(echo_request_handler, [listener1, listener2])
await nursery.start(server.run)
assert len(server.listeners) == 2
with pytest.raises(RuntimeError):
# Even though the first listener has a port, this property is only
# usable if you have exactly one listener.
server.port
# The first listener metadata is a ListenPort instance.
assert server.listeners[0].port != 0
# The second listener metadata is a string containing the repr() of a
# MemoryListener object.
assert server.listeners[1].startswith('MemoryListener(')
async def test_client_open(echo_server):
async with open_websocket(HOST, echo_server.port, RESOURCE, use_ssl=False) \
as conn:
assert not conn.closed
async def test_client_open_url(echo_server):
url = 'ws://{}:{}{}/path'.format(HOST, echo_server.port, RESOURCE)
async with open_websocket_url(url) as conn:
assert conn.path == RESOURCE + '/path'
url = 'ws://{}:{}{}?foo=bar'.format(HOST, echo_server.port, RESOURCE)
async with open_websocket_url(url) as conn:
assert conn.path == RESOURCE + '?foo=bar'
async def test_client_open_invalid_url(echo_server):
with pytest.raises(ValueError):
async with open_websocket_url('http://foo.com/bar') as conn:
pass
async def test_client_connect(echo_server, nursery):
conn = await connect_websocket(nursery, HOST, echo_server.port, RESOURCE,
use_ssl=False)
assert not conn.closed
async def test_client_connect_url(echo_server, nursery):
url = 'ws://{}:{}{}'.format(HOST, echo_server.port, RESOURCE)
conn = await connect_websocket_url(nursery, url)
assert not conn.closed
async def test_handshake_subprotocol(nursery):
async def handler(request):
assert request.proposed_subprotocols == ('chat', 'file')
assert request.subprotocol is None
request.subprotocol = 'chat'
assert request.subprotocol == 'chat'
server_ws = await request.accept()
assert server_ws.subprotocol == 'chat'
server = await nursery.start(serve_websocket, handler, HOST, 0, None)
async with open_websocket(HOST, server.port, RESOURCE, use_ssl=False,
subprotocols=('chat', 'file')) as client_ws:
assert client_ws.subprotocol == 'chat'
async def test_client_send_and_receive(echo_conn):
async with echo_conn:
await echo_conn.send_message('This is a test message.')
received_msg = await echo_conn.get_message()
assert received_msg == 'This is a test message.'
async def test_client_ping(echo_conn):
async with echo_conn:
await echo_conn.ping(b'A')
with pytest.raises(ConnectionClosed):
await echo_conn.ping(b'B')
async def test_client_ping_two_payloads(echo_conn):
pong_count = 0
async def ping_and_count():
nonlocal pong_count
await echo_conn.ping()
pong_count += 1
async with echo_conn:
async with trio.open_nursery() as nursery:
nursery.start_soon(ping_and_count)
nursery.start_soon(ping_and_count)
assert pong_count == 2
async def test_client_ping_same_payload(echo_conn):
# This test verifies that two tasks can't ping with the same payload at the
# same time. One of them should succeed and the other should get an
# exception.
exc_count = 0
async def ping_and_catch():
nonlocal exc_count
try:
await echo_conn.ping(b'A')
except ValueError:
exc_count += 1
async with echo_conn:
async with trio.open_nursery() as nursery:
nursery.start_soon(ping_and_catch)
nursery.start_soon(ping_and_catch)
assert exc_count == 1
async def test_client_pong(echo_conn):
async with echo_conn:
await echo_conn.pong(b'A')
with pytest.raises(ConnectionClosed):
await echo_conn.pong(b'B')
async def test_client_default_close(echo_conn):
async with echo_conn:
assert not echo_conn.closed
assert echo_conn.closed.code == 1000
assert echo_conn.closed.reason is None
async def test_client_nondefault_close(echo_conn):
async with echo_conn:
assert not echo_conn.closed
await echo_conn.aclose(code=1001, reason='test reason')
assert echo_conn.closed.code == 1001
assert echo_conn.closed.reason == 'test reason'
async def test_wrap_client_stream(echo_server, nursery):
stream = await trio.open_tcp_stream(HOST, echo_server.port)
conn = await wrap_client_stream(nursery, stream, HOST, RESOURCE)
async with conn:
assert not conn.closed
await conn.send_message('Hello from client!')
msg = await conn.get_message()
assert msg == 'Hello from client!'
assert conn.closed
async def test_wrap_server_stream(nursery):
async def handler(stream):
request = await wrap_server_stream(nursery, stream)
server_ws = await request.accept()
async with server_ws:
assert not server_ws.closed
msg = await server_ws.get_message()
assert msg == 'Hello from client!'
assert server_ws.closed
serve_fn = partial(trio.serve_tcp, handler, 0, host=HOST)
listeners = await nursery.start(serve_fn)
port = listeners[0].socket.getsockname()[1]
async with open_websocket(HOST, port, RESOURCE, use_ssl=False) as client:
await client.send_message('Hello from client!')
@fail_after(TIMEOUT_TEST_MAX_DURATION)
async def test_client_open_timeout(nursery, autojump_clock):
'''
The client times out waiting for the server to complete the opening
handshake.
'''
async def handler(request):
await trio.sleep(FORCE_TIMEOUT)
server_ws = await request.accept()
pytest.fail('Should not reach this line.')
server = await nursery.start(
partial(serve_websocket, handler, HOST, 0, ssl_context=None))
with pytest.raises(trio.TooSlowError):
async with open_websocket(HOST, server.port, '/', use_ssl=False,
connect_timeout=TIMEOUT) as client_ws:
pass
@fail_after(TIMEOUT_TEST_MAX_DURATION)
async def test_client_close_timeout(nursery, autojump_clock):
'''
This client times out waiting for the server to complete the closing
handshake.
To slow down the server's closing handshake, we make sure that its message
queue size is 0, and the client sends it exactly 1 message. This blocks the
server's reader so it won't do the closing handshake for at least
``FORCE_TIMEOUT`` seconds.
'''
async def handler(request):
server_ws = await request.accept()
await trio.sleep(FORCE_TIMEOUT)
# The next line should raise ConnectionClosed.
await server_ws.get_message()
pytest.fail('Should not reach this line.')
server = await nursery.start(
partial(serve_websocket, handler, HOST, 0, ssl_context=None,
message_queue_size=0))
with pytest.raises(trio.TooSlowError):
async with open_websocket(HOST, server.port, RESOURCE, use_ssl=False,
disconnect_timeout=TIMEOUT) as client_ws:
await client_ws.send_message('test')
@fail_after(TIMEOUT_TEST_MAX_DURATION)
async def test_server_open_timeout(autojump_clock):
'''
The server times out waiting for the client to complete the opening
handshake.
Server timeouts don't raise exceptions, because handler tasks are launched
in an internal nursery and sending exceptions wouldn't be helpful. Instead,
timed out tasks silently end.
'''
async def handler(request):
pytest.fail('This handler should not be called.')
async with trio.open_nursery() as nursery:
server = await nursery.start(partial(serve_websocket, handler, HOST, 0,
ssl_context=None, handler_nursery=nursery, connect_timeout=TIMEOUT))
old_task_count = len(nursery.child_tasks)
# This stream is not a WebSocket, so it won't send a handshake:
stream = await trio.open_tcp_stream(HOST, server.port)
# Checkpoint so the server's handler task can spawn:
await trio.sleep(0)
assert len(nursery.child_tasks) == old_task_count + 1, \
"Server's reader task did not spawn"
# Sleep long enough to trigger server's connect_timeout:
await trio.sleep(FORCE_TIMEOUT)
assert len(nursery.child_tasks) == old_task_count, \
"Server's reader task is still running"
# Cancel the server task:
nursery.cancel_scope.cancel()
@fail_after(TIMEOUT_TEST_MAX_DURATION)
async def test_server_close_timeout(autojump_clock):
'''
The server times out waiting for the client to complete the closing
handshake.
Server timeouts don't raise exceptions, because handler tasks are launched
in an internal nursery and sending exceptions wouldn't be helpful. Instead,
timed out tasks silently end.
To prevent the client from doing the closing handshake, we make sure that
its message queue size is 0 and the server sends it exactly 1 message. This
blocks the client's reader and prevents it from doing the client handshake.
'''
async def handler(request):
ws = await request.accept()
# Send one message to block the client's reader task:
await ws.send_message('test')
async with trio.open_nursery() as outer:
server = await outer.start(partial(serve_websocket, handler, HOST, 0,
ssl_context=None, handler_nursery=outer,
disconnect_timeout=TIMEOUT))
old_task_count = len(outer.child_tasks)
# Spawn client inside an inner nursery so that we can cancel it's reader
# so that it won't do a closing handshake.
async with trio.open_nursery() as inner:
ws = await connect_websocket(inner, HOST, server.port, RESOURCE,
use_ssl=False)
# Checkpoint so the server can spawn a handler task:
await trio.sleep(0)
assert len(outer.child_tasks) == old_task_count + 1, \
"Server's reader task did not spawn"
# The client waits long enough to trigger the server's disconnect
# timeout:
await trio.sleep(FORCE_TIMEOUT)
# The server should have cancelled the handler:
assert len(outer.child_tasks) == old_task_count, \
"Server's reader task is still running"
# Cancel the client's reader task:
inner.cancel_scope.cancel()
# Cancel the server task:
outer.cancel_scope.cancel()
async def test_client_does_not_close_handshake(nursery):
async def handler(request):
server_ws = await request.accept()
with pytest.raises(ConnectionClosed):
await server_ws.get_message()
server = await nursery.start(serve_websocket, handler, HOST, 0, None)
stream = await trio.open_tcp_stream(HOST, server.port)
client_ws = await wrap_client_stream(nursery, stream, HOST, RESOURCE)
async with client_ws:
await stream.aclose()
with pytest.raises(ConnectionClosed):
await client_ws.send_message('Hello from client!')
async def test_server_does_not_close_handshake(nursery):
async def handler(stream):
request = await wrap_server_stream(nursery, stream)
server_ws = await request.accept()
async with server_ws:
await stream.aclose()
with pytest.raises(ConnectionClosed):
await server_ws.send_message('Hello from client!')
serve_fn = partial(trio.serve_tcp, handler, 0, host=HOST)
listeners = await nursery.start(serve_fn)
port = listeners[0].socket.getsockname()[1]
async with open_websocket(HOST, port, RESOURCE, use_ssl=False) as client:
with pytest.raises(ConnectionClosed):
await client.get_message()
async def test_server_handler_exit(nursery, autojump_clock):
async def handler(request):
server_ws = await request.accept()
await trio.sleep(1)
server = await nursery.start(
partial(serve_websocket, handler, HOST, 0, ssl_context=None))
# connection should close when server handler exists
with trio.fail_after(2):
async with open_websocket(
HOST, server.port, '/', use_ssl=False) as connection:
with pytest.raises(ConnectionClosed) as exc_info:
await connection.get_message()
exc = exc_info.value
assert exc.reason.name == 'NORMAL_CLOSURE'
@fail_after(DEFAULT_TEST_MAX_DURATION)
async def test_read_messages_after_remote_close(nursery):
'''
When the remote endpoint closes, the local endpoint can still read all
of the messages sent prior to closing. Any attempt to read beyond that will
raise ConnectionClosed.
This test also exercises the configuration of the queue size.
'''
server_closed = trio.Event()
async def handler(request):
server = await request.accept()
async with server:
await server.send_message('1')
await server.send_message('2')
server_closed.set()
server = await nursery.start(
partial(serve_websocket, handler, HOST, 0, ssl_context=None))
# The client needs a message queue of size 2 so that it can buffer both
# incoming messages without blocking the reader task.
async with open_websocket(HOST, server.port, '/', use_ssl=False,
message_queue_size=2) as client:
await server_closed.wait()
assert await client.get_message() == '1'
assert await client.get_message() == '2'
with pytest.raises(ConnectionClosed):
await client.get_message()
async def test_no_messages_after_local_close(nursery):
'''
If the local endpoint initiates closing, then pending messages are discarded
and any attempt to read a message will raise ConnectionClosed.
'''
client_closed = trio.Event()
async def handler(request):
# The server sends some messages and then closes.
server = await request.accept()
async with server:
await server.send_message('1')
await server.send_message('2')
await client_closed.wait()
server = await nursery.start(
partial(serve_websocket, handler, HOST, 0, ssl_context=None))
async with open_websocket(HOST, server.port, '/', use_ssl=False) as client:
pass
with pytest.raises(ConnectionClosed):
await client.get_message()
client_closed.set()
async def test_cm_exit_with_pending_messages(echo_server, autojump_clock):
'''
Regression test for #74, where a context manager was not able to exit when
there were pending messages in the receive queue.
'''
with trio.fail_after(1):
async with open_websocket(HOST, echo_server.port, RESOURCE,
use_ssl=False) as ws:
await ws.send_message('hello')
# allow time for the server to respond
await trio.sleep(.1)
@fail_after(DEFAULT_TEST_MAX_DURATION)
async def test_max_message_size(nursery):
'''
Set the client's max message size to 100 bytes. The client can send a
message larger than 100 bytes, but when it receives a message larger than
100 bytes, it closes the connection with code 1009.
'''
async def handler(request):
''' Similar to the echo_request_handler fixture except it runs in a
loop. '''
conn = await request.accept()
while True:
try:
msg = await conn.get_message()
await conn.send_message(msg)
except ConnectionClosed:
break
server = await nursery.start(
partial(serve_websocket, handler, HOST, 0, ssl_context=None))
async with open_websocket(HOST, server.port, RESOURCE, use_ssl=False,
max_message_size=100) as client:
# We can send and receive 100 bytes:
await client.send_message(b'A' * 100)
msg = await client.get_message()
assert len(msg) == 100
# We can send 101 bytes but cannot receive 101 bytes:
await client.send_message(b'B' * 101)
with pytest.raises(ConnectionClosed):
await client.get_message()
assert client.closed
assert client.closed.code == 1009 | tests/test_connection.py | from functools import partial, wraps
import attr
import pytest
import trio
import trustme
from async_generator import async_generator, yield_
from trio_websocket import (
connect_websocket,
connect_websocket_url,
ConnectionClosed,
open_websocket,
open_websocket_url,
serve_websocket,
WebSocketServer,
wrap_client_stream,
wrap_server_stream
)
from trio_websocket._impl import ListenPort
HOST = '127.0.0.1'
RESOURCE = '/resource'
DEFAULT_TEST_MAX_DURATION = 1
# Timeout tests follow a general pattern: one side waits TIMEOUT seconds for an
# event. The other side delays for FORCE_TIMEOUT seconds to force the timeout
# to trigger. Each test also has maximum runtime (measure by Trio's clock) to
# prevent a faulty test from hanging the entire suite.
TIMEOUT = 1
FORCE_TIMEOUT = 2
TIMEOUT_TEST_MAX_DURATION = 3
@pytest.fixture
@async_generator
async def echo_server(nursery):
''' A server that reads one message, sends back the same message,
then closes the connection. '''
serve_fn = partial(serve_websocket, echo_request_handler, HOST, 0,
ssl_context=None)
server = await nursery.start(serve_fn)
await yield_(server)
@pytest.fixture
@async_generator
async def echo_conn(echo_server):
''' Return a client connection instance that is connected to an echo
server. '''
async with open_websocket(HOST, echo_server.port, RESOURCE,
use_ssl=False) as conn:
await yield_(conn)
async def echo_request_handler(request):
'''
Accept incoming request and then pass off to echo connection handler.
'''
conn = await request.accept()
try:
msg = await conn.get_message()
await conn.send_message(msg)
except ConnectionClosed:
pass
class fail_after:
''' This decorator fails if the runtime of the decorated function (as
measured by the Trio clock) exceeds the specified value. '''
def __init__(self, seconds):
self._seconds = seconds
def __call__(self, fn):
@wraps(fn)
async def wrapper(*args, **kwargs):
with trio.move_on_after(self._seconds) as cancel_scope:
await fn(*args, **kwargs)
if cancel_scope.cancelled_caught:
pytest.fail('Test runtime exceeded the maximum {} seconds'
.format(self._seconds))
return wrapper
@attr.s(hash=False, cmp=False)
class MemoryListener(trio.abc.Listener):
closed = attr.ib(default=False)
accepted_streams = attr.ib(factory=list)
queued_streams = attr.ib(factory=(lambda: trio.open_memory_channel(1)))
accept_hook = attr.ib(default=None)
async def connect(self):
assert not self.closed
client, server = memory_stream_pair()
await self.queued_streams[0].send(server)
return client
async def accept(self):
await trio.hazmat.checkpoint()
assert not self.closed
if self.accept_hook is not None:
await self.accept_hook()
stream = await self.queued_streams[1].receive()
self.accepted_streams.append(stream)
return stream
async def aclose(self):
self.closed = True
await trio.hazmat.checkpoint()
async def test_listen_port_ipv4():
assert str(ListenPort('10.105.0.2', 80, False)) == 'ws://10.105.0.2:80'
assert str(ListenPort('127.0.0.1', 8000, False)) == 'ws://127.0.0.1:8000'
assert str(ListenPort('0.0.0.0', 443, True)) == 'wss://0.0.0.0:443'
async def test_listen_port_ipv6():
assert str(ListenPort('fdf8:f53e:61e4::18', 80, False)) \
== 'ws://[2599:8807:6201:b7:16cf:bb9c:a6d3:51ab]:80'
assert str(ListenPort('::1', 8000, False)) == 'ws://[::1]:8000'
assert str(ListenPort('::', 443, True)) == 'wss://[::]:443'
async def test_server_has_listeners(nursery):
server = await nursery.start(serve_websocket, echo_request_handler, HOST, 0,
None)
assert len(server.listeners) > 0
assert isinstance(server.listeners[0], ListenPort)
async def test_serve(nursery):
task = trio.hazmat.current_task()
server = await nursery.start(serve_websocket, echo_request_handler, HOST, 0,
None)
port = server.port
assert server.port != 0
# The server nursery begins with one task (server.listen).
assert len(nursery.child_tasks) == 1
no_clients_nursery_count = len(task.child_nurseries)
async with open_websocket(HOST, port, RESOURCE, use_ssl=False) as conn:
# The server nursery has the same number of tasks, but there is now
# one additional nested nursery.
assert len(nursery.child_tasks) == 1
assert len(task.child_nurseries) == no_clients_nursery_count + 1
async def test_serve_ssl(nursery):
server_context = trio.ssl.create_default_context(
trio.ssl.Purpose.CLIENT_AUTH)
client_context = trio.ssl.create_default_context()
ca = trustme.CA()
ca.configure_trust(client_context)
cert = ca.issue_server_cert(HOST)
cert.configure_cert(server_context)
server = await nursery.start(serve_websocket, echo_request_handler, HOST, 0,
server_context)
port = server.port
async with open_websocket(HOST, port, RESOURCE, use_ssl=client_context
) as conn:
assert not conn.closed
async def test_serve_handler_nursery(nursery):
task = trio.hazmat.current_task()
async with trio.open_nursery() as handler_nursery:
serve_with_nursery = partial(serve_websocket, echo_request_handler,
HOST, 0, None, handler_nursery=handler_nursery)
server = await nursery.start(serve_with_nursery)
port = server.port
# The server nursery begins with one task (server.listen).
assert len(nursery.child_tasks) == 1
no_clients_nursery_count = len(task.child_nurseries)
async with open_websocket(HOST, port, RESOURCE, use_ssl=False) as conn:
# The handler nursery should have one task in it
# (conn._reader_task).
assert len(handler_nursery.child_tasks) == 1
async def test_serve_with_zero_listeners(nursery):
task = trio.hazmat.current_task()
with pytest.raises(ValueError):
server = WebSocketServer(echo_request_handler, [])
async def test_serve_non_tcp_listener(nursery):
listeners = [MemoryListener()]
server = WebSocketServer(echo_request_handler, listeners)
await nursery.start(server.run)
assert len(server.listeners) == 1
with pytest.raises(RuntimeError):
server.port
assert server.listeners[0].startswith('MemoryListener(')
async def test_serve_multiple_listeners(nursery):
listener1 = (await trio.open_tcp_listeners(0, host=HOST))[0]
listener2 = MemoryListener()
server = WebSocketServer(echo_request_handler, [listener1, listener2])
await nursery.start(server.run)
assert len(server.listeners) == 2
with pytest.raises(RuntimeError):
# Even though the first listener has a port, this property is only
# usable if you have exactly one listener.
server.port
# The first listener metadata is a ListenPort instance.
assert server.listeners[0].port != 0
# The second listener metadata is a string containing the repr() of a
# MemoryListener object.
assert server.listeners[1].startswith('MemoryListener(')
async def test_client_open(echo_server):
async with open_websocket(HOST, echo_server.port, RESOURCE, use_ssl=False) \
as conn:
assert not conn.closed
async def test_client_open_url(echo_server):
url = 'ws://{}:{}{}/path'.format(HOST, echo_server.port, RESOURCE)
async with open_websocket_url(url) as conn:
assert conn.path == RESOURCE + '/path'
url = 'ws://{}:{}{}?foo=bar'.format(HOST, echo_server.port, RESOURCE)
async with open_websocket_url(url) as conn:
assert conn.path == RESOURCE + '?foo=bar'
async def test_client_open_invalid_url(echo_server):
with pytest.raises(ValueError):
async with open_websocket_url('http://foo.com/bar') as conn:
pass
async def test_client_connect(echo_server, nursery):
conn = await connect_websocket(nursery, HOST, echo_server.port, RESOURCE,
use_ssl=False)
assert not conn.closed
async def test_client_connect_url(echo_server, nursery):
url = 'ws://{}:{}{}'.format(HOST, echo_server.port, RESOURCE)
conn = await connect_websocket_url(nursery, url)
assert not conn.closed
async def test_handshake_subprotocol(nursery):
async def handler(request):
assert request.proposed_subprotocols == ('chat', 'file')
assert request.subprotocol is None
request.subprotocol = 'chat'
assert request.subprotocol == 'chat'
server_ws = await request.accept()
assert server_ws.subprotocol == 'chat'
server = await nursery.start(serve_websocket, handler, HOST, 0, None)
async with open_websocket(HOST, server.port, RESOURCE, use_ssl=False,
subprotocols=('chat', 'file')) as client_ws:
assert client_ws.subprotocol == 'chat'
async def test_client_send_and_receive(echo_conn):
async with echo_conn:
await echo_conn.send_message('This is a test message.')
received_msg = await echo_conn.get_message()
assert received_msg == 'This is a test message.'
async def test_client_ping(echo_conn):
async with echo_conn:
await echo_conn.ping(b'A')
with pytest.raises(ConnectionClosed):
await echo_conn.ping(b'B')
async def test_client_ping_two_payloads(echo_conn):
pong_count = 0
async def ping_and_count():
nonlocal pong_count
await echo_conn.ping()
pong_count += 1
async with echo_conn:
async with trio.open_nursery() as nursery:
nursery.start_soon(ping_and_count)
nursery.start_soon(ping_and_count)
assert pong_count == 2
async def test_client_ping_same_payload(echo_conn):
# This test verifies that two tasks can't ping with the same payload at the
# same time. One of them should succeed and the other should get an
# exception.
exc_count = 0
async def ping_and_catch():
nonlocal exc_count
try:
await echo_conn.ping(b'A')
except ValueError:
exc_count += 1
async with echo_conn:
async with trio.open_nursery() as nursery:
nursery.start_soon(ping_and_catch)
nursery.start_soon(ping_and_catch)
assert exc_count == 1
async def test_client_pong(echo_conn):
async with echo_conn:
await echo_conn.pong(b'A')
with pytest.raises(ConnectionClosed):
await echo_conn.pong(b'B')
async def test_client_default_close(echo_conn):
async with echo_conn:
assert not echo_conn.closed
assert echo_conn.closed.code == 1000
assert echo_conn.closed.reason is None
async def test_client_nondefault_close(echo_conn):
async with echo_conn:
assert not echo_conn.closed
await echo_conn.aclose(code=1001, reason='test reason')
assert echo_conn.closed.code == 1001
assert echo_conn.closed.reason == 'test reason'
async def test_wrap_client_stream(echo_server, nursery):
stream = await trio.open_tcp_stream(HOST, echo_server.port)
conn = await wrap_client_stream(nursery, stream, HOST, RESOURCE)
async with conn:
assert not conn.closed
await conn.send_message('Hello from client!')
msg = await conn.get_message()
assert msg == 'Hello from client!'
assert conn.closed
async def test_wrap_server_stream(nursery):
async def handler(stream):
request = await wrap_server_stream(nursery, stream)
server_ws = await request.accept()
async with server_ws:
assert not server_ws.closed
msg = await server_ws.get_message()
assert msg == 'Hello from client!'
assert server_ws.closed
serve_fn = partial(trio.serve_tcp, handler, 0, host=HOST)
listeners = await nursery.start(serve_fn)
port = listeners[0].socket.getsockname()[1]
async with open_websocket(HOST, port, RESOURCE, use_ssl=False) as client:
await client.send_message('Hello from client!')
@fail_after(TIMEOUT_TEST_MAX_DURATION)
async def test_client_open_timeout(nursery, autojump_clock):
'''
The client times out waiting for the server to complete the opening
handshake.
'''
async def handler(request):
await trio.sleep(FORCE_TIMEOUT)
server_ws = await request.accept()
pytest.fail('Should not reach this line.')
server = await nursery.start(
partial(serve_websocket, handler, HOST, 0, ssl_context=None))
with pytest.raises(trio.TooSlowError):
async with open_websocket(HOST, server.port, '/', use_ssl=False,
connect_timeout=TIMEOUT) as client_ws:
pass
@fail_after(TIMEOUT_TEST_MAX_DURATION)
async def test_client_close_timeout(nursery, autojump_clock):
'''
This client times out waiting for the server to complete the closing
handshake.
To slow down the server's closing handshake, we make sure that its message
queue size is 0, and the client sends it exactly 1 message. This blocks the
server's reader so it won't do the closing handshake for at least
``FORCE_TIMEOUT`` seconds.
'''
async def handler(request):
server_ws = await request.accept()
await trio.sleep(FORCE_TIMEOUT)
# The next line should raise ConnectionClosed.
await server_ws.get_message()
pytest.fail('Should not reach this line.')
server = await nursery.start(
partial(serve_websocket, handler, HOST, 0, ssl_context=None,
message_queue_size=0))
with pytest.raises(trio.TooSlowError):
async with open_websocket(HOST, server.port, RESOURCE, use_ssl=False,
disconnect_timeout=TIMEOUT) as client_ws:
await client_ws.send_message('test')
@fail_after(TIMEOUT_TEST_MAX_DURATION)
async def test_server_open_timeout(autojump_clock):
'''
The server times out waiting for the client to complete the opening
handshake.
Server timeouts don't raise exceptions, because handler tasks are launched
in an internal nursery and sending exceptions wouldn't be helpful. Instead,
timed out tasks silently end.
'''
async def handler(request):
pytest.fail('This handler should not be called.')
async with trio.open_nursery() as nursery:
server = await nursery.start(partial(serve_websocket, handler, HOST, 0,
ssl_context=None, handler_nursery=nursery, connect_timeout=TIMEOUT))
old_task_count = len(nursery.child_tasks)
# This stream is not a WebSocket, so it won't send a handshake:
stream = await trio.open_tcp_stream(HOST, server.port)
# Checkpoint so the server's handler task can spawn:
await trio.sleep(0)
assert len(nursery.child_tasks) == old_task_count + 1, \
"Server's reader task did not spawn"
# Sleep long enough to trigger server's connect_timeout:
await trio.sleep(FORCE_TIMEOUT)
assert len(nursery.child_tasks) == old_task_count, \
"Server's reader task is still running"
# Cancel the server task:
nursery.cancel_scope.cancel()
@fail_after(TIMEOUT_TEST_MAX_DURATION)
async def test_server_close_timeout(autojump_clock):
'''
The server times out waiting for the client to complete the closing
handshake.
Server timeouts don't raise exceptions, because handler tasks are launched
in an internal nursery and sending exceptions wouldn't be helpful. Instead,
timed out tasks silently end.
To prevent the client from doing the closing handshake, we make sure that
its message queue size is 0 and the server sends it exactly 1 message. This
blocks the client's reader and prevents it from doing the client handshake.
'''
async def handler(request):
ws = await request.accept()
# Send one message to block the client's reader task:
await ws.send_message('test')
async with trio.open_nursery() as outer:
server = await outer.start(partial(serve_websocket, handler, HOST, 0,
ssl_context=None, handler_nursery=outer,
disconnect_timeout=TIMEOUT))
old_task_count = len(outer.child_tasks)
# Spawn client inside an inner nursery so that we can cancel it's reader
# so that it won't do a closing handshake.
async with trio.open_nursery() as inner:
ws = await connect_websocket(inner, HOST, server.port, RESOURCE,
use_ssl=False)
# Checkpoint so the server can spawn a handler task:
await trio.sleep(0)
assert len(outer.child_tasks) == old_task_count + 1, \
"Server's reader task did not spawn"
# The client waits long enough to trigger the server's disconnect
# timeout:
await trio.sleep(FORCE_TIMEOUT)
# The server should have cancelled the handler:
assert len(outer.child_tasks) == old_task_count, \
"Server's reader task is still running"
# Cancel the client's reader task:
inner.cancel_scope.cancel()
# Cancel the server task:
outer.cancel_scope.cancel()
async def test_client_does_not_close_handshake(nursery):
async def handler(request):
server_ws = await request.accept()
with pytest.raises(ConnectionClosed):
await server_ws.get_message()
server = await nursery.start(serve_websocket, handler, HOST, 0, None)
stream = await trio.open_tcp_stream(HOST, server.port)
client_ws = await wrap_client_stream(nursery, stream, HOST, RESOURCE)
async with client_ws:
await stream.aclose()
with pytest.raises(ConnectionClosed):
await client_ws.send_message('Hello from client!')
async def test_server_does_not_close_handshake(nursery):
async def handler(stream):
request = await wrap_server_stream(nursery, stream)
server_ws = await request.accept()
async with server_ws:
await stream.aclose()
with pytest.raises(ConnectionClosed):
await server_ws.send_message('Hello from client!')
serve_fn = partial(trio.serve_tcp, handler, 0, host=HOST)
listeners = await nursery.start(serve_fn)
port = listeners[0].socket.getsockname()[1]
async with open_websocket(HOST, port, RESOURCE, use_ssl=False) as client:
with pytest.raises(ConnectionClosed):
await client.get_message()
async def test_server_handler_exit(nursery, autojump_clock):
async def handler(request):
server_ws = await request.accept()
await trio.sleep(1)
server = await nursery.start(
partial(serve_websocket, handler, HOST, 0, ssl_context=None))
# connection should close when server handler exists
with trio.fail_after(2):
async with open_websocket(
HOST, server.port, '/', use_ssl=False) as connection:
with pytest.raises(ConnectionClosed) as exc_info:
await connection.get_message()
exc = exc_info.value
assert exc.reason.name == 'NORMAL_CLOSURE'
@fail_after(DEFAULT_TEST_MAX_DURATION)
async def test_read_messages_after_remote_close(nursery):
'''
When the remote endpoint closes, the local endpoint can still read all
of the messages sent prior to closing. Any attempt to read beyond that will
raise ConnectionClosed.
This test also exercises the configuration of the queue size.
'''
server_closed = trio.Event()
async def handler(request):
server = await request.accept()
async with server:
await server.send_message('1')
await server.send_message('2')
server_closed.set()
server = await nursery.start(
partial(serve_websocket, handler, HOST, 0, ssl_context=None))
# The client needs a message queue of size 2 so that it can buffer both
# incoming messages without blocking the reader task.
async with open_websocket(HOST, server.port, '/', use_ssl=False,
message_queue_size=2) as client:
await server_closed.wait()
assert await client.get_message() == '1'
assert await client.get_message() == '2'
with pytest.raises(ConnectionClosed):
await client.get_message()
async def test_no_messages_after_local_close(nursery):
'''
If the local endpoint initiates closing, then pending messages are discarded
and any attempt to read a message will raise ConnectionClosed.
'''
client_closed = trio.Event()
async def handler(request):
# The server sends some messages and then closes.
server = await request.accept()
async with server:
await server.send_message('1')
await server.send_message('2')
await client_closed.wait()
server = await nursery.start(
partial(serve_websocket, handler, HOST, 0, ssl_context=None))
async with open_websocket(HOST, server.port, '/', use_ssl=False) as client:
pass
with pytest.raises(ConnectionClosed):
await client.get_message()
client_closed.set()
async def test_cm_exit_with_pending_messages(echo_server, autojump_clock):
'''
Regression test for #74, where a context manager was not able to exit when
there were pending messages in the receive queue.
'''
with trio.fail_after(1):
async with open_websocket(HOST, echo_server.port, RESOURCE,
use_ssl=False) as ws:
await ws.send_message('hello')
# allow time for the server to respond
await trio.sleep(.1)
@fail_after(DEFAULT_TEST_MAX_DURATION)
async def test_max_message_size(nursery):
'''
Set the client's max message size to 100 bytes. The client can send a
message larger than 100 bytes, but when it receives a message larger than
100 bytes, it closes the connection with code 1009.
'''
async def handler(request):
''' Similar to the echo_request_handler fixture except it runs in a
loop. '''
conn = await request.accept()
while True:
try:
msg = await conn.get_message()
await conn.send_message(msg)
except ConnectionClosed:
break
server = await nursery.start(
partial(serve_websocket, handler, HOST, 0, ssl_context=None))
async with open_websocket(HOST, server.port, RESOURCE, use_ssl=False,
max_message_size=100) as client:
# We can send and receive 100 bytes:
await client.send_message(b'A' * 100)
msg = await client.get_message()
assert len(msg) == 100
# We can send 101 bytes but cannot receive 101 bytes:
await client.send_message(b'B' * 101)
with pytest.raises(ConnectionClosed):
await client.get_message()
assert client.closed
assert client.closed.code == 1009 | 0.652906 | 0.196749 |
from cyclone.web import asynchronous
from xml.sax.saxutils import escape
from plugins.web.request_handler import RequestHandler
__author__ = '<NAME>'
class Route(RequestHandler):
factoids = None
name = "factoids"
def initialize(self):
#: :type: FactoidsPlugin
self.factoids = self.plugin.plugins.get_plugin("factoids")
@asynchronous
def get(self, *args, **kwargs):
s = self.get_session_object()
if not self.plugin.check_permission(self.factoids.PERM_GET % "web", s):
if s is None:
# They need to login
self.redirect(
"/login",
message="You need to login to access this.",
message_colour="red",
redirect="/factoids"
)
else:
# They don't have permission
content = """
<div class="ui red fluid message">
<p>You do not have permission to list the factoids.</p>
<p> If you feel this was in error, tell a bot admin to give you the
<code>factoids.get.web</code> permission.
</p>
</div>
"""
self.render(
"generic.html",
_title="Factoids | No permission",
content=content
)
else:
d = self.factoids.get_all_factoids()
d.addCallbacks(self.success_callback, self.fail_callback)
def success_callback(self, result):
if len(result) < 1:
content = """
<div class="ui yellow fluid segment">
<p>No factoids found.</p>
</div>
"""
else:
content = "<table class=\"ui celled table segment " \
"table-sortable\">"
content += "<thead>" \
"<tr>" + \
"<th>Location</th>" + \
"<th>Protocol</th>" + \
"<th>Channel</th>" + \
"<th>Name</th>" + \
"<th>Content</th>" + \
"</tr></thead>" \
"<tbody>"
for row in result:
content += "<tr>"
for column in row:
content += "<td>%s</td>" % escape(column).replace(
"\n", "<br /><br />"
)
content += "</tr>"
content += "</tbody>" \
"</table>"
self.render(
"generic.html",
_title="Factoids",
content=content
)
def fail_callback(self, failure):
self.set_status(500)
self.write_error(500, exception=failure) | plugins/factoids/route.py |
from cyclone.web import asynchronous
from xml.sax.saxutils import escape
from plugins.web.request_handler import RequestHandler
__author__ = '<NAME>'
class Route(RequestHandler):
factoids = None
name = "factoids"
def initialize(self):
#: :type: FactoidsPlugin
self.factoids = self.plugin.plugins.get_plugin("factoids")
@asynchronous
def get(self, *args, **kwargs):
s = self.get_session_object()
if not self.plugin.check_permission(self.factoids.PERM_GET % "web", s):
if s is None:
# They need to login
self.redirect(
"/login",
message="You need to login to access this.",
message_colour="red",
redirect="/factoids"
)
else:
# They don't have permission
content = """
<div class="ui red fluid message">
<p>You do not have permission to list the factoids.</p>
<p> If you feel this was in error, tell a bot admin to give you the
<code>factoids.get.web</code> permission.
</p>
</div>
"""
self.render(
"generic.html",
_title="Factoids | No permission",
content=content
)
else:
d = self.factoids.get_all_factoids()
d.addCallbacks(self.success_callback, self.fail_callback)
def success_callback(self, result):
if len(result) < 1:
content = """
<div class="ui yellow fluid segment">
<p>No factoids found.</p>
</div>
"""
else:
content = "<table class=\"ui celled table segment " \
"table-sortable\">"
content += "<thead>" \
"<tr>" + \
"<th>Location</th>" + \
"<th>Protocol</th>" + \
"<th>Channel</th>" + \
"<th>Name</th>" + \
"<th>Content</th>" + \
"</tr></thead>" \
"<tbody>"
for row in result:
content += "<tr>"
for column in row:
content += "<td>%s</td>" % escape(column).replace(
"\n", "<br /><br />"
)
content += "</tr>"
content += "</tbody>" \
"</table>"
self.render(
"generic.html",
_title="Factoids",
content=content
)
def fail_callback(self, failure):
self.set_status(500)
self.write_error(500, exception=failure) | 0.42322 | 0.076546 |
contents of files."""
import os
import re
from validator.contextgenerator import ContextGenerator
from validator.decorator import define_post_init
from .base import RegexTestBase
class FileRegexTest(RegexTestBase):
"""Matches regular expressions in complete file texts, with filters
for individual tests."""
TEST_ID = ('testcases_regex', 'raw_file')
# Extensions of files which are likely to contain JavaScript.
JAVASCRIPT_EXTENSIONS = ('.js', '.jsm', '.htm', '.html', '.xhtml',
'.xul', '.xbl')
DEFAULT_FILTER = {
'is_javascript': True,
}
def test(self, string, err, filename, context=None):
extension = os.path.splitext(filename)[1]
filters = {'filename': filename,
'extension': extension,
'is_javascript': extension in self.JAVASCRIPT_EXTENSIONS,
'document': string}
# Don't bother running tests unless some of our tests match the file.
if any(self.check_filter(test, filters)
for test in self.tests.itervalues()):
if context is None:
context = ContextGenerator(string)
super(FileRegexTest, self).test(string, err=err, filters=filters,
filename=filename, context=context)
def strip_whitespace(val):
"""Removes all white space from the given string or unicode value."""
return re.sub(r'\s+', '', val)
FILE_REGEXPS = [
# Access to Sync service modules which don't work well with extensions.
(r'resource://services-sync',
{'err_id': ('testcases_regex', 'file', 'sync-service'),
'warning': 'Sync services objects are not intended to be re-used',
'description': (
'The Sync services objects are not intended to be re-used, and '
'they often change in ways that break add-ons. It is strongly '
'recommended that you do not rely on them.')}),
# Modification of native object prototypes.
# This really needs to be a code test, but that's surprisingly difficult
# to achieve the way things are currently set up.
(r'\b(?:String|Object|Number|Date|RegExp|Function|Boolean|Array|Iterator)'
r'\.prototype(?:\.[a-zA-Z_$][a-zA-Z0-9_$]*|\[[^\]]+\])\s*=',
{'filter': lambda kw: (
kw['is_javascript'] and not (kw['extension'] == '.jsm' or
'EXPORTED_SYMBOLS' in kw['document'])),
'err_id': ('testcases_regex', 'file', 'prototype-extension'),
'warning': 'JavaScript native prototype extension',
'description': 'Due to interoperability concerns, extensions may '
'not extend the prototypes of native objects when '
'running in a shared scope.'}),
]
@define_post_init
def file_tester():
return FileRegexTest(FILE_REGEXPS)
def validate_file(*args, **kw):
return file_tester.test(*args, **kw) | validator/testcases/regex/generic.py | contents of files."""
import os
import re
from validator.contextgenerator import ContextGenerator
from validator.decorator import define_post_init
from .base import RegexTestBase
class FileRegexTest(RegexTestBase):
"""Matches regular expressions in complete file texts, with filters
for individual tests."""
TEST_ID = ('testcases_regex', 'raw_file')
# Extensions of files which are likely to contain JavaScript.
JAVASCRIPT_EXTENSIONS = ('.js', '.jsm', '.htm', '.html', '.xhtml',
'.xul', '.xbl')
DEFAULT_FILTER = {
'is_javascript': True,
}
def test(self, string, err, filename, context=None):
extension = os.path.splitext(filename)[1]
filters = {'filename': filename,
'extension': extension,
'is_javascript': extension in self.JAVASCRIPT_EXTENSIONS,
'document': string}
# Don't bother running tests unless some of our tests match the file.
if any(self.check_filter(test, filters)
for test in self.tests.itervalues()):
if context is None:
context = ContextGenerator(string)
super(FileRegexTest, self).test(string, err=err, filters=filters,
filename=filename, context=context)
def strip_whitespace(val):
"""Removes all white space from the given string or unicode value."""
return re.sub(r'\s+', '', val)
FILE_REGEXPS = [
# Access to Sync service modules which don't work well with extensions.
(r'resource://services-sync',
{'err_id': ('testcases_regex', 'file', 'sync-service'),
'warning': 'Sync services objects are not intended to be re-used',
'description': (
'The Sync services objects are not intended to be re-used, and '
'they often change in ways that break add-ons. It is strongly '
'recommended that you do not rely on them.')}),
# Modification of native object prototypes.
# This really needs to be a code test, but that's surprisingly difficult
# to achieve the way things are currently set up.
(r'\b(?:String|Object|Number|Date|RegExp|Function|Boolean|Array|Iterator)'
r'\.prototype(?:\.[a-zA-Z_$][a-zA-Z0-9_$]*|\[[^\]]+\])\s*=',
{'filter': lambda kw: (
kw['is_javascript'] and not (kw['extension'] == '.jsm' or
'EXPORTED_SYMBOLS' in kw['document'])),
'err_id': ('testcases_regex', 'file', 'prototype-extension'),
'warning': 'JavaScript native prototype extension',
'description': 'Due to interoperability concerns, extensions may '
'not extend the prototypes of native objects when '
'running in a shared scope.'}),
]
@define_post_init
def file_tester():
return FileRegexTest(FILE_REGEXPS)
def validate_file(*args, **kw):
return file_tester.test(*args, **kw) | 0.658198 | 0.256651 |
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# more django apps
'django.contrib.sites',
'django.contrib.flatpages',
'django.contrib.humanize',
# custom
'template3.apps.users',
'template3.apps.core.apps.CoreConfig',
# third party
'allauth',
'allauth.account',
'allauth.socialaccount',
# 'allauth.socialaccount.providers.facebook',
# 'allauth.socialaccount.providers.twitter',
'compressor',
'crispy_forms',
'debug_toolbar',
'django_extensions',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware'
]
ROOT_URLCONF = 'template3.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# custom
"template3.apps.core.context_processors.site_processor",
"template3.apps.core.context_processors.debug_processor",
]
},
},
]
WSGI_APPLICATION = 'template3.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', # noqa
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', # noqa
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', # noqa
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', # noqa
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
# STATICFILES_FINDERS
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# other finders..
'compressor.finders.CompressorFinder',
)
# WHITENOISE
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# AUTHENTICATION_BACKENDS
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend"
)
# auth and allauth settings
SOCIALACCOUNT_QUERY_EMAIL = True
EMAIL_CONFIRMATION_DAYS = 14
ACCOUNT_AUTHENTICATION_METHOD = "username_email"
ACCOUNT_EMAIL_REQUIRED = True
SOCIALACCOUNT_PROVIDERS = {
'facebook': {
'SCOPE': ['email', 'public_profile', 'user_friends'],
'METHOD': 'js_sdk' # instead of 'oauth2'
}
}
# Pagination
PAGINATION_DEFAULT_PAGINATION = 20
# COMPRESSOR
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.CSSMinFilter'
]
# crispy forms
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# Sites
SITE_ID = 1
try:
from .local_settings import * # noqa
except ImportError:
pass | template3/settings.py | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# more django apps
'django.contrib.sites',
'django.contrib.flatpages',
'django.contrib.humanize',
# custom
'template3.apps.users',
'template3.apps.core.apps.CoreConfig',
# third party
'allauth',
'allauth.account',
'allauth.socialaccount',
# 'allauth.socialaccount.providers.facebook',
# 'allauth.socialaccount.providers.twitter',
'compressor',
'crispy_forms',
'debug_toolbar',
'django_extensions',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware'
]
ROOT_URLCONF = 'template3.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# custom
"template3.apps.core.context_processors.site_processor",
"template3.apps.core.context_processors.debug_processor",
]
},
},
]
WSGI_APPLICATION = 'template3.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', # noqa
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', # noqa
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', # noqa
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', # noqa
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
# STATICFILES_FINDERS
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# other finders..
'compressor.finders.CompressorFinder',
)
# WHITENOISE
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# AUTHENTICATION_BACKENDS
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend"
)
# auth and allauth settings
SOCIALACCOUNT_QUERY_EMAIL = True
EMAIL_CONFIRMATION_DAYS = 14
ACCOUNT_AUTHENTICATION_METHOD = "username_email"
ACCOUNT_EMAIL_REQUIRED = True
SOCIALACCOUNT_PROVIDERS = {
'facebook': {
'SCOPE': ['email', 'public_profile', 'user_friends'],
'METHOD': 'js_sdk' # instead of 'oauth2'
}
}
# Pagination
PAGINATION_DEFAULT_PAGINATION = 20
# COMPRESSOR
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.CSSMinFilter'
]
# crispy forms
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# Sites
SITE_ID = 1
try:
from .local_settings import * # noqa
except ImportError:
pass | 0.335351 | 0.07056 |
import numpy as np
from amadeus import Flights
import pandas as pd
import random
import datetime
import time
from ortools.constraint_solver import pywrapcp
from ortools.constraint_solver import routing_enums_pb2
def choose_countries(origin, number_countries, number_continents):
DATA_PATH = 'static/custom.csv'
airport_code = pd.read_csv(DATA_PATH, keep_default_na=False)
list_cities = list(airport_code.city)
list_continents = list(airport_code.continent)
list_countries = list(airport_code.country)
list_codes = list(airport_code.code)
tmp = list(range(len(list_cities)))
index = random.sample(tmp, number_countries)
num_countries = len(set([list_countries[x] for x in index]))
num_continents = len(set([list_continents[x] for x in index]))
while num_continents != number_continents and num_countries != number_countries:
index = random.sample(range(len(list_cities)), num_countries)
num_countries = len(set([list_countries[x] for x in index]))
num_continents = len(set([list_continents[x] for x in index]))
return [origin] + [list_codes[x] for x in index]
def find_best_travel(list_airports=None, days=None, num_countries=None,
departure_date=None):
flights = Flights('rTgACcDGGTOrYi9vGotfQOM2wfHAGly8')
if departure_date == None:
departure_date = time.time()
days_per_city = int(days / num_countries)
distance_matrix = np.zeros((len(list_airports), len(list_airports)))
for i, airport_departure in enumerate(list_airports):
print(airport_departure)
for j, airport_arrival in enumerate(list_airports):
print(airport_arrival)
if airport_arrival == airport_departure:
distance_matrix[i, j] = 0
else:
prices_list = []
for duration in range(0, 40, days_per_city):
print(duration)
date = datetime.datetime.fromtimestamp(departure_date + duration * 24 * 3600).strftime('%Y-%m-%d')
print(date)
resp = flights.low_fare_search(
origin=airport_departure,
destination=airport_arrival,
departure_date=date,
duration='1')
try:
price = resp['results'][0]['fare']['total_price']
except:
continue
print(price)
prices_list.append(float(price))
if prices_list == []:
distance_matrix[i, j] = 10e6
else:
distance_matrix[i, j] = np.mean(prices_list)
return distance_matrix
def compute_optimal_tour(matrix, list_airports):
# Distance callback
class CreateDistanceCallback(object):
"""Create callback to calculate distances between points."""
def __init__(self, matrix):
"""Array of distances between points."""
self.matrix = matrix
def Distance(self, from_node, to_node):
return int(self.matrix[from_node][to_node])
# Cities
city_names = list_airports
tsp_size = len(list_airports)
num_routes = 1 # The number of routes, which is 1 in the TSP.
# Nodes are indexed from 0 to tsp_size - 1. The depot is the starting node of the route.
depot = 0
# Create routing model
if tsp_size > 0:
routing = pywrapcp.RoutingModel(tsp_size, num_routes, depot)
search_parameters = routing.DefaultSearchParameters()
# Create the distance callback, which takes two arguments (the from and to node indices)
# and returns the distance between these nodes.
dist_between_nodes = CreateDistanceCallback(matrix)
dist_callback = dist_between_nodes.Distance
routing.SetArcCostEvaluatorOfAllVehicles(dist_callback)
# Solve, returns a solution if any.
assignment = routing.SolveWithParameters(search_parameters)
if assignment:
# Solution cost.
print ("Total duration: " + str(np.round(assignment.ObjectiveValue(), 3)) + " $\n")
cost = np.round(assignment.ObjectiveValue(), 3)
# Inspect solution.
# Only one route here; otherwise iterate from 0 to routing.vehicles() - 1
route_number = 0
index = routing.Start(route_number) # Index of the variable for the starting node.
route = ''
list_cities = []
while not routing.IsEnd(index):
# Convert variable indices to node indices in the displayed route.
route += str(city_names[routing.IndexToNode(index)]) + ' -> '
list_cities.append(city_names[routing.IndexToNode(index)])
index = assignment.Value(routing.NextVar(index))
route += str(city_names[routing.IndexToNode(index)])
print ("Route:\n\n" + route)
else:
print ('No solution found.')
else:
print ('Specify an instance greater than 0.')
return list_cities, cost | wtour/utils.py | import numpy as np
from amadeus import Flights
import pandas as pd
import random
import datetime
import time
from ortools.constraint_solver import pywrapcp
from ortools.constraint_solver import routing_enums_pb2
def choose_countries(origin, number_countries, number_continents):
DATA_PATH = 'static/custom.csv'
airport_code = pd.read_csv(DATA_PATH, keep_default_na=False)
list_cities = list(airport_code.city)
list_continents = list(airport_code.continent)
list_countries = list(airport_code.country)
list_codes = list(airport_code.code)
tmp = list(range(len(list_cities)))
index = random.sample(tmp, number_countries)
num_countries = len(set([list_countries[x] for x in index]))
num_continents = len(set([list_continents[x] for x in index]))
while num_continents != number_continents and num_countries != number_countries:
index = random.sample(range(len(list_cities)), num_countries)
num_countries = len(set([list_countries[x] for x in index]))
num_continents = len(set([list_continents[x] for x in index]))
return [origin] + [list_codes[x] for x in index]
def find_best_travel(list_airports=None, days=None, num_countries=None,
departure_date=None):
flights = Flights('rTgACcDGGTOrYi9vGotfQOM2wfHAGly8')
if departure_date == None:
departure_date = time.time()
days_per_city = int(days / num_countries)
distance_matrix = np.zeros((len(list_airports), len(list_airports)))
for i, airport_departure in enumerate(list_airports):
print(airport_departure)
for j, airport_arrival in enumerate(list_airports):
print(airport_arrival)
if airport_arrival == airport_departure:
distance_matrix[i, j] = 0
else:
prices_list = []
for duration in range(0, 40, days_per_city):
print(duration)
date = datetime.datetime.fromtimestamp(departure_date + duration * 24 * 3600).strftime('%Y-%m-%d')
print(date)
resp = flights.low_fare_search(
origin=airport_departure,
destination=airport_arrival,
departure_date=date,
duration='1')
try:
price = resp['results'][0]['fare']['total_price']
except:
continue
print(price)
prices_list.append(float(price))
if prices_list == []:
distance_matrix[i, j] = 10e6
else:
distance_matrix[i, j] = np.mean(prices_list)
return distance_matrix
def compute_optimal_tour(matrix, list_airports):
# Distance callback
class CreateDistanceCallback(object):
"""Create callback to calculate distances between points."""
def __init__(self, matrix):
"""Array of distances between points."""
self.matrix = matrix
def Distance(self, from_node, to_node):
return int(self.matrix[from_node][to_node])
# Cities
city_names = list_airports
tsp_size = len(list_airports)
num_routes = 1 # The number of routes, which is 1 in the TSP.
# Nodes are indexed from 0 to tsp_size - 1. The depot is the starting node of the route.
depot = 0
# Create routing model
if tsp_size > 0:
routing = pywrapcp.RoutingModel(tsp_size, num_routes, depot)
search_parameters = routing.DefaultSearchParameters()
# Create the distance callback, which takes two arguments (the from and to node indices)
# and returns the distance between these nodes.
dist_between_nodes = CreateDistanceCallback(matrix)
dist_callback = dist_between_nodes.Distance
routing.SetArcCostEvaluatorOfAllVehicles(dist_callback)
# Solve, returns a solution if any.
assignment = routing.SolveWithParameters(search_parameters)
if assignment:
# Solution cost.
print ("Total duration: " + str(np.round(assignment.ObjectiveValue(), 3)) + " $\n")
cost = np.round(assignment.ObjectiveValue(), 3)
# Inspect solution.
# Only one route here; otherwise iterate from 0 to routing.vehicles() - 1
route_number = 0
index = routing.Start(route_number) # Index of the variable for the starting node.
route = ''
list_cities = []
while not routing.IsEnd(index):
# Convert variable indices to node indices in the displayed route.
route += str(city_names[routing.IndexToNode(index)]) + ' -> '
list_cities.append(city_names[routing.IndexToNode(index)])
index = assignment.Value(routing.NextVar(index))
route += str(city_names[routing.IndexToNode(index)])
print ("Route:\n\n" + route)
else:
print ('No solution found.')
else:
print ('Specify an instance greater than 0.')
return list_cities, cost | 0.4436 | 0.307168 |
from tensorboardX import SummaryWriter
from pathlib import Path
import sys
base_dir = Path(__file__).resolve().parent.parent
sys.path.append(str(base_dir))
from common import *
from log_path import *
from algo.qmix import QMIX
from algo.qmix_s import QMIX_s
from env.chooseenv import make
def step_trainer(args):
print("==algo: ", args.algo)
print(f'device: {device}')
print(f'model episode: {args.model_episode}')
print(f'save interval: {args.save_interval}')
env = make(args.game_name, conf=None)
num_agents = env.n_player
print(f'Total agent number: {num_agents}')
ctrl_agent_index = [0, 1, 2]
enemy_agent_index = [3, 4, 5]
print(f'Agent control by the actor: {ctrl_agent_index}')
ctrl_agent_num = len(ctrl_agent_index)
width = env.board_width
print(f'Game board width: {width}')
height = env.board_height
print(f'Game board height: {height}')
act_dim = env.get_action_dim()
print(f'action dimension: {act_dim}')
obs_dim = 26
print(f'observation dimension: {obs_dim}')
print(f'replay buffer size: {args.buffer_size}')
setup_seed(args.seed)
assert (args.compete + args.well_enemy + args.self_compete) < 2, "can't be both true"
# 定义保存路径
run_dir, log_dir = make_logpath(args.game_name, args.algo)
writer = SummaryWriter(str(log_dir))
save_config(args, log_dir)
model = QMIX_s(obs_dim, act_dim, width, height, 32, ctrl_agent_num, args)
replay_buffer = []
if args.compete:
model_enemy = QMIX_s(obs_dim, act_dim, width, height, 32, ctrl_agent_num, args)
if args.compete or args.self_compete:
enemy_replay_buffer = []
if args.well_enemy:
model_enemy = QMIX_s(obs_dim, act_dim, width, height, 32, ctrl_agent_num, args)
model_enemy.load_model(Path(__file__).resolve().parent / Path("well"), 100000, False)
model_enemy.eval()
if args.load_model:
load_dir = os.path.join(os.path.dirname(run_dir), "run" + str(args.load_model_run))
model.load_model(load_dir, episode=args.load_model_run_episode)
episode = 0
while episode < args.max_episodes:
# Receive initial observation state s1
state = env.reset()
# During training, since all agents are given the same obs, we take the state of 1st agent.
# However, when evaluation in Jidi, each agent get its own state, like state[agent_index]: dict()
# more details refer to https://github.com/jidiai/Competition_3v3snakes/blob/master/run_log.py#L68
# state: list() ; state[0]: dict()
state_to_training = state[0]
# ======================= feature engineering =======================
# since all snakes play independently, we choose first three snakes for training.
# Then, the trained model can apply to other agents. ctrl_agent_index -> [0, 1, 2]
# Noted, the index is different in obs. please refer to env description.
obs, state_map = get_observations(state_to_training, ctrl_agent_index, obs_dim, height, width, args.mode)
if args.compete or args.well_enemy or args.self_compete:
obs_e, state_map_e = get_observations(state_to_training, enemy_agent_index, obs_dim, height, width,
args.mode)
episode += 1
step = 0
episode_reward = np.zeros(6, dtype=int)
episode_tot_reward = 0
episode_tot_split_reward = np.zeros(3)
if args.compete or args.self_compete:
episode_tot_reward_e = 0
if args.compete or args.well_enemy or args.self_compete:
action_available_e = None
if args.compete:
loss_e = []
loss = []
action_available = None
# Just run and collect the experience during one episode
# The environment will be done during every 200 step
while True:
# ================================== inference ========================================
actions_ctrl = model.choose_action(obs, action_available)
actions_ctrl = actions_ctrl.reshape(-1)
# ============================== add opponent actions =================================
# use greedy policy for enemy TODO: both side are QMIX to train
if args.compete or args.well_enemy or args.self_compete:
model_e = model_enemy if args.compete or args.well_enemy else model
actions_e = model_e.choose_action(obs_e, action_available_e)
actions_e = actions_e.reshape(-1)
actions = np.concatenate([actions_ctrl, actions_e], axis=0)
else:
actions = action_greedy(state_to_training, actions_ctrl, height, width)
# actions = action_random(act_dim,actions_ctrl)
# get the limited action in the next state
action_available = get_action_available(actions, ctrl_agent_index, act_dim)
if args.compete or args.well_enemy or args.self_compete:
actions_available_e = get_action_available(actions, enemy_agent_index, act_dim)
# the reward of Env is just the gain of length to each agents
next_state, reward, done, _, info = env.step(env.encode(actions))
next_state_to_training = next_state[0]
next_obs, next_state_map = get_observations(next_state_to_training, ctrl_agent_index, obs_dim, height,
width, args.mode)
if args.compete or args.well_enemy or args.self_compete:
next_obs_e, next_state_map_e = get_observations(next_state_to_training, enemy_agent_index, obs_dim,
height,
width, args.mode)
# =========== ======================= reward shaping ========================================
reward = np.array(reward)
episode_reward += reward
step_reward, split_step_reward = get_reward(info, episode_reward, ctrl_agent_index, enemy_agent_index,
reward, done, args)
if args.compete or args.self_compete:
step_reward_e, _ = get_reward(info, episode_reward, enemy_agent_index, ctrl_agent_index,
reward, done, args)
episode_tot_reward_e += step_reward_e
episode_tot_reward += step_reward
# done = np.array([done] * ctrl_agent_num)
episode_tot_split_reward += split_step_reward
# ================================== collect data ========================================
# Store transition in R
if args.self_compete:
model.replay_buffer.push([state_map_e, obs_e, actions_e, step_reward_e,next_state_map_e,next_obs_e,
actions_available_e, done])
model.replay_buffer.push([state_map, obs, actions_ctrl, step_reward, next_state_map,next_obs,
action_available, done])
model.epsilon_delay()
model.update()
obs, state_map = next_obs, next_state_map
if not args.random:
state_to_training = next_state_to_training # TODO: a great BUG!!!!
if args.compete:
model_enemy.replay_buffer.append([state_map_e, obs_e, actions_e, step_reward_e, next_state_map_e,next_obs_e,
actions_available_e, done])
model_enemy.epsilon_delay()
model_enemy.update()
model_enemy.update_target()
if args.well_enemy or args.compete or args.self_compete:
obs_e, state_map_e = next_obs_e, next_state_map_e
step += 1
if model.loss:
loss.append(model.loss)
if args.compete and model_enemy.loss:
loss_e.append(model_enemy.loss)
if args.episode_length <= step or done:
print(f'[Episode {episode:05d}] total_reward: {np.sum(episode_reward[0:3]):d}')
print(f'\t\t\t\tsnake_1: {episode_reward[0]} '
f'snake_2: {episode_reward[1]} snake_3: {episode_reward[2]}')
print(f'\t\t\t\tsnake_4: {episode_reward[3]} '
f'snake_5: {episode_reward[4]} snake_6: {episode_reward[5]}')
print(
f'\t\t\t\tepisode_win:{np.sum(episode_reward[ctrl_agent_index]) > np.sum(episode_reward[enemy_agent_index])}')
print(
f'\t\t\t\tself_length:{np.sum(episode_reward[ctrl_agent_index]):d} enemy_length:{np.sum(episode_reward[enemy_agent_index])}')
reward_tag = 'reward'
if args.compete or args.self_compete:
writer.add_scalars(reward_tag, global_step=episode,
tag_scalar_dict={'snake_1': episode_reward[0], 'snake_2': episode_reward[1],
'snake_3': episode_reward[2],
'total': np.sum(episode_reward[0:3]),
'snake_4': episode_reward[3], 'snake_5': episode_reward[4],
'snake_6': episode_reward[5],
'enemy_total': np.sum(episode_reward[3:])})
else:
writer.add_scalars(reward_tag, global_step=episode,
tag_scalar_dict={'snake_1': episode_reward[0], 'snake_2': episode_reward[1],
'snake_3': episode_reward[2],
'total': np.sum(episode_reward[0:3])})
score_tag = 'score'
if args.compete or args.self_compete:
writer.add_scalars(score_tag, global_step=episode,
tag_scalar_dict={'mean_step_reward': episode_tot_reward / step,
"mean_sparse_reward": episode_tot_split_reward[0] / step,
'mean_gain_reward': episode_tot_split_reward[1] / step,
'mean_dist_reward': episode_tot_split_reward[2] / step,
'enemy_mean_step_reward': episode_tot_reward_e / step})
else:
writer.add_scalars(score_tag, global_step=episode,
tag_scalar_dict={'mean_step_reward': episode_tot_reward / step,
"mean_sparse_reward": episode_tot_split_reward[0] / step,
'mean_gain_reward': episode_tot_split_reward[1] / step,
'mean_dist_reward': episode_tot_split_reward[2] / step})
win_tag = 'win_rate'
writer.add_scalars(win_tag, global_step=episode,
tag_scalar_dict={'win_rate': int(np.sum(episode_reward[ctrl_agent_index]) > np.sum(
episode_reward[enemy_agent_index]))})
loss_tag = 'loss'
if len(loss) and not args.compete:
writer.add_scalars(loss_tag, global_step=episode,
tag_scalar_dict={'loss': np.mean(np.array(loss))})
else:
if len(loss_e):
writer.add_scalars(loss_tag, global_step=episode,
tag_scalar_dict={'loss': np.mean(np.array(loss)),
'enemy': np.mean(np.array(loss_e))})
if len(loss):
if not args.compete:
print(f'\t\t\t\tloss {np.mean(np.array(loss)):.3f}')
elif len(loss_e):
print(f'\t\t\t\tloss {np.mean(np.array(loss)):.3f}')
print(f'\t\t\t\tloss {np.mean(np.array(loss_e)):.3f}')
env.reset()
break
if episode % args.save_interval == 0:
model.save_model(os.path.join(run_dir, "qmix_agent_%d.pth" % episode))
if args.compete:
model_enemy.save_model(os.path.join(run_dir, "qmix_enemy_agent_%d.pth" % episode))
# model.save_checkpoint(os.path.join(run_dir,"qmix_ckpt_%d.pth"%episode))
def rnn_trainer(args):
print("==algo: ", args.algo)
print(f'device: {device}')
print(f'model episode: {args.model_episode}')
print(f'save interval: {args.save_interval}')
env = make(args.game_name, conf=None)
num_agents = env.n_player
print(f'Total agent number: {num_agents}')
ctrl_agent_index = [0, 1, 2]
enemy_agent_index = [3, 4, 5]
print(f'Agent control by the actor: {ctrl_agent_index}')
ctrl_agent_num = len(ctrl_agent_index)
width = env.board_width
print(f'Game board width: {width}')
height = env.board_height
print(f'Game board height: {height}')
act_dim = env.get_action_dim()
print(f'action dimension: {act_dim}')
obs_dim = 26
print(f'observation dimension: {obs_dim}')
print(f'replay buffer size: {args.buffer_size}')
setup_seed(args.seed)
assert (args.compete+args.well_enemy+args.self_compete) < 2, "can't be both true"
# 定义保存路径
run_dir, log_dir = make_logpath(args.game_name, args.algo)
writer = SummaryWriter(str(log_dir))
save_config(args, log_dir)
model = QMIX(obs_dim, act_dim, width, height, 32, ctrl_agent_num, args)
replay_buffer = []
if args.compete:
model_enemy = QMIX(obs_dim, act_dim, width, height, 32, ctrl_agent_num, args)
if args.compete or args.self_compete:
enemy_replay_buffer = []
if args.well_enemy:
model_enemy = QMIX(obs_dim, act_dim, width, height, 32, ctrl_agent_num, args)
model_enemy.load_model(Path(__file__).resolve().parent / Path("well"), 100000, False)
model_enemy.eval()
if args.load_model:
load_dir = os.path.join(os.path.dirname(run_dir), "run" + str(args.load_model_run))
model.load_model(load_dir, episode=args.load_model_run_episode)
episode = 0
while episode < args.max_episodes:
# Receive initial observation state s1
state = env.reset()
model.reset(1) # hidden layer for agents
if args.compete or args.well_enemy:
model_enemy.reset(1)
# During training, since all agents are given the same obs, we take the state of 1st agent.
# However, when evaluation in Jidi, each agent get its own state, like state[agent_index]: dict()
# more details refer to https://github.com/jidiai/Competition_3v3snakes/blob/master/run_log.py#L68
# state: list() ; state[0]: dict()
state_to_training = state[0]
# ======================= feature engineering =======================
# since all snakes play independently, we choose first three snakes for training.
# Then, the trained model can apply to other agents. ctrl_agent_index -> [0, 1, 2]
# Noted, the index is different in obs. please refer to env description.
obs, state_map = get_observations(state_to_training, ctrl_agent_index, obs_dim, height, width, args.mode)
if args.compete or args.well_enemy or args.self_compete:
obs_e, state_map_e = get_observations(state_to_training, enemy_agent_index, obs_dim, height, width,
args.mode)
episode += 1
step = 0
episode_reward = np.zeros(6, dtype=int)
episode_tot_reward = 0
episode_tot_split_reward = np.zeros(3)
if args.compete or args.self_compete:
episode_tot_reward_e = 0
if args.compete or args.well_enemy or args.self_compete:
action_available_e = None
action_available = None
# Just run and collect the experience during one episode
# The environment will be done during every 200 step
while True:
# ================================== inference ========================================
actions_ctrl = model.choose_action(obs, action_available)
actions_ctrl = actions_ctrl.reshape(-1)
# ============================== add opponent actions =================================
# use greedy policy for enemy TODO: both side are QMIX to train
if args.compete or args.well_enemy or args.self_compete:
model_e = model_enemy if args.compete or args.well_enemy else model
actions_e = model_e.choose_action(obs_e, action_available_e)
actions_e = actions_e.reshape(-1)
actions = np.concatenate([actions_ctrl, actions_e], axis=0)
else:
actions = action_greedy(state_to_training, actions_ctrl, height, width)
# actions = action_random(act_dim,actions_ctrl)
# get the limited action in the next state
action_available = get_action_available(actions, ctrl_agent_index, act_dim)
if args.compete or args.well_enemy or args.self_compete:
actions_available_e = get_action_available(actions, enemy_agent_index, act_dim)
# the reward of Env is just the gain of length to each agents
next_state, reward, done, _, info = env.step(env.encode(actions))
next_state_to_training = next_state[0]
next_obs, next_state_map = get_observations(next_state_to_training, ctrl_agent_index, obs_dim, height,
width, args.mode)
if args.compete or args.well_enemy or args.self_compete:
next_obs_e, next_state_map_e = get_observations(next_state_to_training, enemy_agent_index, obs_dim,
height,
width, args.mode)
# =========== ======================= reward shaping ========================================
reward = np.array(reward)
episode_reward += reward
step_reward, split_step_reward = get_reward(info, episode_reward, ctrl_agent_index, enemy_agent_index,
reward, done, args)
if args.compete or args.self_compete:
step_reward_e, _ = get_reward(info, episode_reward, enemy_agent_index, ctrl_agent_index,
reward, done,args)
episode_tot_reward_e += step_reward_e
episode_tot_reward += step_reward
# done = np.array([done] * ctrl_agent_num)
episode_tot_split_reward += split_step_reward
# ================================== collect data ========================================
# Store transition in R
replay_buffer.append([state_map, obs, actions_ctrl, step_reward, action_available, done])
if args.compete or args.self_compete:
enemy_replay_buffer.append([state_map_e, obs_e, actions_e, step_reward_e, actions_available_e, done])
obs_e, state_map_e = next_obs_e, next_state_map_e
if args.well_enemy:
obs_e, state_map_e = next_obs_e, next_state_map_e
obs, state_map = next_obs, next_state_map
if not args.random:
state_to_training = next_state_to_training # TODO: a great BUG!!!!
step += 1
if args.episode_length <= step or done:
print(f'[Episode {episode:05d}] total_reward: {np.sum(episode_reward[0:3]):d}')
print(f'\t\t\t\tsnake_1: {episode_reward[0]} '
f'snake_2: {episode_reward[1]} snake_3: {episode_reward[2]}')
print(f'\t\t\t\tsnake_4: {episode_reward[3]} '
f'snake_5: {episode_reward[4]} snake_6: {episode_reward[5]}')
print(
f'\t\t\t\tepisode_win:{np.sum(episode_reward[ctrl_agent_index]) > np.sum(episode_reward[enemy_agent_index])}')
print(
f'\t\t\t\tself_length:{np.sum(episode_reward[ctrl_agent_index]):d} enemy_length:{np.sum(episode_reward[enemy_agent_index])}')
reward_tag = 'reward'
if args.compete or args.self_compete:
writer.add_scalars(reward_tag, global_step=episode,
tag_scalar_dict={'snake_1': episode_reward[0], 'snake_2': episode_reward[1],
'snake_3': episode_reward[2],
'total': np.sum(episode_reward[0:3]),
'snake_4': episode_reward[3], 'snake_5': episode_reward[4],
'snake_6': episode_reward[5],
'enemy_total': np.sum(episode_reward[3:])})
else:
writer.add_scalars(reward_tag, global_step=episode,
tag_scalar_dict={'snake_1': episode_reward[0], 'snake_2': episode_reward[1],
'snake_3': episode_reward[2],
'total': np.sum(episode_reward[0:3])})
score_tag = 'score'
if args.compete or args.self_compete:
writer.add_scalars(score_tag, global_step=episode,
tag_scalar_dict={'mean_step_reward': episode_tot_reward / step,
"mean_sparse_reward": episode_tot_split_reward[0] / step,
'mean_gain_reward': episode_tot_split_reward[1] / step,
'mean_dist_reward': episode_tot_split_reward[2] / step,
'enemy_mean_step_reward': episode_tot_reward_e / step})
else:
writer.add_scalars(score_tag, global_step=episode,
tag_scalar_dict={'mean_step_reward': episode_tot_reward / step,
"mean_sparse_reward": episode_tot_split_reward[0] / step,
'mean_gain_reward': episode_tot_split_reward[1] / step,
'mean_dist_reward': episode_tot_split_reward[2] / step})
win_tag = 'win_rate'
writer.add_scalars(win_tag, global_step=episode,
tag_scalar_dict={'win_rate': int(np.sum(episode_reward[ctrl_agent_index]) > np.sum(
episode_reward[enemy_agent_index]))})
env.reset()
break
model.reset(args.batch_size)
if args.self_compete:
model.replay_buffer.push(enemy_replay_buffer)
enemy_replay_buffer.clear()
model.replay_buffer.push(replay_buffer)
replay_buffer.clear()
model.epsilon_delay(step)
model.update()
model.update_target(episode)
if args.compete:
model_enemy.reset(args.batch_size)
model_enemy.replay_buffer.push(enemy_replay_buffer)
enemy_replay_buffer.clear()
model_enemy.epsilon_delay(step)
model_enemy.update()
model_enemy.update_target(episode)
loss_tag = 'loss'
if model.loss:
if not args.compete:
writer.add_scalars(loss_tag, global_step=episode,
tag_scalar_dict={'loss': float(model.loss)})
else:
if model_enemy.loss:
writer.add_scalars(loss_tag, global_step=episode,
tag_scalar_dict={'loss': float(model.loss),
'enemy': float(model_enemy.loss)})
if model.loss:
if not args.compete:
print(f'\t\t\t\tloss {model.loss:.3f}')
elif model_enemy.loss:
print(f'\t\t\t\tloss {model.loss:.3f}')
print(f'\t\t\t\tloss {model_enemy.loss:.3f}')
if episode % args.save_interval == 0:
model.save_model(os.path.join(run_dir, "qmix_agent_%d.pth" % episode))
if args.compete:
model_enemy.save_model(os.path.join(run_dir, "qmix_enemy_agent_%d.pth" % episode))
# model.save_checkpoint(os.path.join(run_dir,"qmix_ckpt_%d.pth"%episode)) | rl_trainer_qmix/trainer.py | from tensorboardX import SummaryWriter
from pathlib import Path
import sys
base_dir = Path(__file__).resolve().parent.parent
sys.path.append(str(base_dir))
from common import *
from log_path import *
from algo.qmix import QMIX
from algo.qmix_s import QMIX_s
from env.chooseenv import make
def step_trainer(args):
print("==algo: ", args.algo)
print(f'device: {device}')
print(f'model episode: {args.model_episode}')
print(f'save interval: {args.save_interval}')
env = make(args.game_name, conf=None)
num_agents = env.n_player
print(f'Total agent number: {num_agents}')
ctrl_agent_index = [0, 1, 2]
enemy_agent_index = [3, 4, 5]
print(f'Agent control by the actor: {ctrl_agent_index}')
ctrl_agent_num = len(ctrl_agent_index)
width = env.board_width
print(f'Game board width: {width}')
height = env.board_height
print(f'Game board height: {height}')
act_dim = env.get_action_dim()
print(f'action dimension: {act_dim}')
obs_dim = 26
print(f'observation dimension: {obs_dim}')
print(f'replay buffer size: {args.buffer_size}')
setup_seed(args.seed)
assert (args.compete + args.well_enemy + args.self_compete) < 2, "can't be both true"
# 定义保存路径
run_dir, log_dir = make_logpath(args.game_name, args.algo)
writer = SummaryWriter(str(log_dir))
save_config(args, log_dir)
model = QMIX_s(obs_dim, act_dim, width, height, 32, ctrl_agent_num, args)
replay_buffer = []
if args.compete:
model_enemy = QMIX_s(obs_dim, act_dim, width, height, 32, ctrl_agent_num, args)
if args.compete or args.self_compete:
enemy_replay_buffer = []
if args.well_enemy:
model_enemy = QMIX_s(obs_dim, act_dim, width, height, 32, ctrl_agent_num, args)
model_enemy.load_model(Path(__file__).resolve().parent / Path("well"), 100000, False)
model_enemy.eval()
if args.load_model:
load_dir = os.path.join(os.path.dirname(run_dir), "run" + str(args.load_model_run))
model.load_model(load_dir, episode=args.load_model_run_episode)
episode = 0
while episode < args.max_episodes:
# Receive initial observation state s1
state = env.reset()
# During training, since all agents are given the same obs, we take the state of 1st agent.
# However, when evaluation in Jidi, each agent get its own state, like state[agent_index]: dict()
# more details refer to https://github.com/jidiai/Competition_3v3snakes/blob/master/run_log.py#L68
# state: list() ; state[0]: dict()
state_to_training = state[0]
# ======================= feature engineering =======================
# since all snakes play independently, we choose first three snakes for training.
# Then, the trained model can apply to other agents. ctrl_agent_index -> [0, 1, 2]
# Noted, the index is different in obs. please refer to env description.
obs, state_map = get_observations(state_to_training, ctrl_agent_index, obs_dim, height, width, args.mode)
if args.compete or args.well_enemy or args.self_compete:
obs_e, state_map_e = get_observations(state_to_training, enemy_agent_index, obs_dim, height, width,
args.mode)
episode += 1
step = 0
episode_reward = np.zeros(6, dtype=int)
episode_tot_reward = 0
episode_tot_split_reward = np.zeros(3)
if args.compete or args.self_compete:
episode_tot_reward_e = 0
if args.compete or args.well_enemy or args.self_compete:
action_available_e = None
if args.compete:
loss_e = []
loss = []
action_available = None
# Just run and collect the experience during one episode
# The environment will be done during every 200 step
while True:
# ================================== inference ========================================
actions_ctrl = model.choose_action(obs, action_available)
actions_ctrl = actions_ctrl.reshape(-1)
# ============================== add opponent actions =================================
# use greedy policy for enemy TODO: both side are QMIX to train
if args.compete or args.well_enemy or args.self_compete:
model_e = model_enemy if args.compete or args.well_enemy else model
actions_e = model_e.choose_action(obs_e, action_available_e)
actions_e = actions_e.reshape(-1)
actions = np.concatenate([actions_ctrl, actions_e], axis=0)
else:
actions = action_greedy(state_to_training, actions_ctrl, height, width)
# actions = action_random(act_dim,actions_ctrl)
# get the limited action in the next state
action_available = get_action_available(actions, ctrl_agent_index, act_dim)
if args.compete or args.well_enemy or args.self_compete:
actions_available_e = get_action_available(actions, enemy_agent_index, act_dim)
# the reward of Env is just the gain of length to each agents
next_state, reward, done, _, info = env.step(env.encode(actions))
next_state_to_training = next_state[0]
next_obs, next_state_map = get_observations(next_state_to_training, ctrl_agent_index, obs_dim, height,
width, args.mode)
if args.compete or args.well_enemy or args.self_compete:
next_obs_e, next_state_map_e = get_observations(next_state_to_training, enemy_agent_index, obs_dim,
height,
width, args.mode)
# =========== ======================= reward shaping ========================================
reward = np.array(reward)
episode_reward += reward
step_reward, split_step_reward = get_reward(info, episode_reward, ctrl_agent_index, enemy_agent_index,
reward, done, args)
if args.compete or args.self_compete:
step_reward_e, _ = get_reward(info, episode_reward, enemy_agent_index, ctrl_agent_index,
reward, done, args)
episode_tot_reward_e += step_reward_e
episode_tot_reward += step_reward
# done = np.array([done] * ctrl_agent_num)
episode_tot_split_reward += split_step_reward
# ================================== collect data ========================================
# Store transition in R
if args.self_compete:
model.replay_buffer.push([state_map_e, obs_e, actions_e, step_reward_e,next_state_map_e,next_obs_e,
actions_available_e, done])
model.replay_buffer.push([state_map, obs, actions_ctrl, step_reward, next_state_map,next_obs,
action_available, done])
model.epsilon_delay()
model.update()
obs, state_map = next_obs, next_state_map
if not args.random:
state_to_training = next_state_to_training # TODO: a great BUG!!!!
if args.compete:
model_enemy.replay_buffer.append([state_map_e, obs_e, actions_e, step_reward_e, next_state_map_e,next_obs_e,
actions_available_e, done])
model_enemy.epsilon_delay()
model_enemy.update()
model_enemy.update_target()
if args.well_enemy or args.compete or args.self_compete:
obs_e, state_map_e = next_obs_e, next_state_map_e
step += 1
if model.loss:
loss.append(model.loss)
if args.compete and model_enemy.loss:
loss_e.append(model_enemy.loss)
if args.episode_length <= step or done:
print(f'[Episode {episode:05d}] total_reward: {np.sum(episode_reward[0:3]):d}')
print(f'\t\t\t\tsnake_1: {episode_reward[0]} '
f'snake_2: {episode_reward[1]} snake_3: {episode_reward[2]}')
print(f'\t\t\t\tsnake_4: {episode_reward[3]} '
f'snake_5: {episode_reward[4]} snake_6: {episode_reward[5]}')
print(
f'\t\t\t\tepisode_win:{np.sum(episode_reward[ctrl_agent_index]) > np.sum(episode_reward[enemy_agent_index])}')
print(
f'\t\t\t\tself_length:{np.sum(episode_reward[ctrl_agent_index]):d} enemy_length:{np.sum(episode_reward[enemy_agent_index])}')
reward_tag = 'reward'
if args.compete or args.self_compete:
writer.add_scalars(reward_tag, global_step=episode,
tag_scalar_dict={'snake_1': episode_reward[0], 'snake_2': episode_reward[1],
'snake_3': episode_reward[2],
'total': np.sum(episode_reward[0:3]),
'snake_4': episode_reward[3], 'snake_5': episode_reward[4],
'snake_6': episode_reward[5],
'enemy_total': np.sum(episode_reward[3:])})
else:
writer.add_scalars(reward_tag, global_step=episode,
tag_scalar_dict={'snake_1': episode_reward[0], 'snake_2': episode_reward[1],
'snake_3': episode_reward[2],
'total': np.sum(episode_reward[0:3])})
score_tag = 'score'
if args.compete or args.self_compete:
writer.add_scalars(score_tag, global_step=episode,
tag_scalar_dict={'mean_step_reward': episode_tot_reward / step,
"mean_sparse_reward": episode_tot_split_reward[0] / step,
'mean_gain_reward': episode_tot_split_reward[1] / step,
'mean_dist_reward': episode_tot_split_reward[2] / step,
'enemy_mean_step_reward': episode_tot_reward_e / step})
else:
writer.add_scalars(score_tag, global_step=episode,
tag_scalar_dict={'mean_step_reward': episode_tot_reward / step,
"mean_sparse_reward": episode_tot_split_reward[0] / step,
'mean_gain_reward': episode_tot_split_reward[1] / step,
'mean_dist_reward': episode_tot_split_reward[2] / step})
win_tag = 'win_rate'
writer.add_scalars(win_tag, global_step=episode,
tag_scalar_dict={'win_rate': int(np.sum(episode_reward[ctrl_agent_index]) > np.sum(
episode_reward[enemy_agent_index]))})
loss_tag = 'loss'
if len(loss) and not args.compete:
writer.add_scalars(loss_tag, global_step=episode,
tag_scalar_dict={'loss': np.mean(np.array(loss))})
else:
if len(loss_e):
writer.add_scalars(loss_tag, global_step=episode,
tag_scalar_dict={'loss': np.mean(np.array(loss)),
'enemy': np.mean(np.array(loss_e))})
if len(loss):
if not args.compete:
print(f'\t\t\t\tloss {np.mean(np.array(loss)):.3f}')
elif len(loss_e):
print(f'\t\t\t\tloss {np.mean(np.array(loss)):.3f}')
print(f'\t\t\t\tloss {np.mean(np.array(loss_e)):.3f}')
env.reset()
break
if episode % args.save_interval == 0:
model.save_model(os.path.join(run_dir, "qmix_agent_%d.pth" % episode))
if args.compete:
model_enemy.save_model(os.path.join(run_dir, "qmix_enemy_agent_%d.pth" % episode))
# model.save_checkpoint(os.path.join(run_dir,"qmix_ckpt_%d.pth"%episode))
def rnn_trainer(args):
print("==algo: ", args.algo)
print(f'device: {device}')
print(f'model episode: {args.model_episode}')
print(f'save interval: {args.save_interval}')
env = make(args.game_name, conf=None)
num_agents = env.n_player
print(f'Total agent number: {num_agents}')
ctrl_agent_index = [0, 1, 2]
enemy_agent_index = [3, 4, 5]
print(f'Agent control by the actor: {ctrl_agent_index}')
ctrl_agent_num = len(ctrl_agent_index)
width = env.board_width
print(f'Game board width: {width}')
height = env.board_height
print(f'Game board height: {height}')
act_dim = env.get_action_dim()
print(f'action dimension: {act_dim}')
obs_dim = 26
print(f'observation dimension: {obs_dim}')
print(f'replay buffer size: {args.buffer_size}')
setup_seed(args.seed)
assert (args.compete+args.well_enemy+args.self_compete) < 2, "can't be both true"
# 定义保存路径
run_dir, log_dir = make_logpath(args.game_name, args.algo)
writer = SummaryWriter(str(log_dir))
save_config(args, log_dir)
model = QMIX(obs_dim, act_dim, width, height, 32, ctrl_agent_num, args)
replay_buffer = []
if args.compete:
model_enemy = QMIX(obs_dim, act_dim, width, height, 32, ctrl_agent_num, args)
if args.compete or args.self_compete:
enemy_replay_buffer = []
if args.well_enemy:
model_enemy = QMIX(obs_dim, act_dim, width, height, 32, ctrl_agent_num, args)
model_enemy.load_model(Path(__file__).resolve().parent / Path("well"), 100000, False)
model_enemy.eval()
if args.load_model:
load_dir = os.path.join(os.path.dirname(run_dir), "run" + str(args.load_model_run))
model.load_model(load_dir, episode=args.load_model_run_episode)
episode = 0
while episode < args.max_episodes:
# Receive initial observation state s1
state = env.reset()
model.reset(1) # hidden layer for agents
if args.compete or args.well_enemy:
model_enemy.reset(1)
# During training, since all agents are given the same obs, we take the state of 1st agent.
# However, when evaluation in Jidi, each agent get its own state, like state[agent_index]: dict()
# more details refer to https://github.com/jidiai/Competition_3v3snakes/blob/master/run_log.py#L68
# state: list() ; state[0]: dict()
state_to_training = state[0]
# ======================= feature engineering =======================
# since all snakes play independently, we choose first three snakes for training.
# Then, the trained model can apply to other agents. ctrl_agent_index -> [0, 1, 2]
# Noted, the index is different in obs. please refer to env description.
obs, state_map = get_observations(state_to_training, ctrl_agent_index, obs_dim, height, width, args.mode)
if args.compete or args.well_enemy or args.self_compete:
obs_e, state_map_e = get_observations(state_to_training, enemy_agent_index, obs_dim, height, width,
args.mode)
episode += 1
step = 0
episode_reward = np.zeros(6, dtype=int)
episode_tot_reward = 0
episode_tot_split_reward = np.zeros(3)
if args.compete or args.self_compete:
episode_tot_reward_e = 0
if args.compete or args.well_enemy or args.self_compete:
action_available_e = None
action_available = None
# Just run and collect the experience during one episode
# The environment will be done during every 200 step
while True:
# ================================== inference ========================================
actions_ctrl = model.choose_action(obs, action_available)
actions_ctrl = actions_ctrl.reshape(-1)
# ============================== add opponent actions =================================
# use greedy policy for enemy TODO: both side are QMIX to train
if args.compete or args.well_enemy or args.self_compete:
model_e = model_enemy if args.compete or args.well_enemy else model
actions_e = model_e.choose_action(obs_e, action_available_e)
actions_e = actions_e.reshape(-1)
actions = np.concatenate([actions_ctrl, actions_e], axis=0)
else:
actions = action_greedy(state_to_training, actions_ctrl, height, width)
# actions = action_random(act_dim,actions_ctrl)
# get the limited action in the next state
action_available = get_action_available(actions, ctrl_agent_index, act_dim)
if args.compete or args.well_enemy or args.self_compete:
actions_available_e = get_action_available(actions, enemy_agent_index, act_dim)
# the reward of Env is just the gain of length to each agents
next_state, reward, done, _, info = env.step(env.encode(actions))
next_state_to_training = next_state[0]
next_obs, next_state_map = get_observations(next_state_to_training, ctrl_agent_index, obs_dim, height,
width, args.mode)
if args.compete or args.well_enemy or args.self_compete:
next_obs_e, next_state_map_e = get_observations(next_state_to_training, enemy_agent_index, obs_dim,
height,
width, args.mode)
# =========== ======================= reward shaping ========================================
reward = np.array(reward)
episode_reward += reward
step_reward, split_step_reward = get_reward(info, episode_reward, ctrl_agent_index, enemy_agent_index,
reward, done, args)
if args.compete or args.self_compete:
step_reward_e, _ = get_reward(info, episode_reward, enemy_agent_index, ctrl_agent_index,
reward, done,args)
episode_tot_reward_e += step_reward_e
episode_tot_reward += step_reward
# done = np.array([done] * ctrl_agent_num)
episode_tot_split_reward += split_step_reward
# ================================== collect data ========================================
# Store transition in R
replay_buffer.append([state_map, obs, actions_ctrl, step_reward, action_available, done])
if args.compete or args.self_compete:
enemy_replay_buffer.append([state_map_e, obs_e, actions_e, step_reward_e, actions_available_e, done])
obs_e, state_map_e = next_obs_e, next_state_map_e
if args.well_enemy:
obs_e, state_map_e = next_obs_e, next_state_map_e
obs, state_map = next_obs, next_state_map
if not args.random:
state_to_training = next_state_to_training # TODO: a great BUG!!!!
step += 1
if args.episode_length <= step or done:
print(f'[Episode {episode:05d}] total_reward: {np.sum(episode_reward[0:3]):d}')
print(f'\t\t\t\tsnake_1: {episode_reward[0]} '
f'snake_2: {episode_reward[1]} snake_3: {episode_reward[2]}')
print(f'\t\t\t\tsnake_4: {episode_reward[3]} '
f'snake_5: {episode_reward[4]} snake_6: {episode_reward[5]}')
print(
f'\t\t\t\tepisode_win:{np.sum(episode_reward[ctrl_agent_index]) > np.sum(episode_reward[enemy_agent_index])}')
print(
f'\t\t\t\tself_length:{np.sum(episode_reward[ctrl_agent_index]):d} enemy_length:{np.sum(episode_reward[enemy_agent_index])}')
reward_tag = 'reward'
if args.compete or args.self_compete:
writer.add_scalars(reward_tag, global_step=episode,
tag_scalar_dict={'snake_1': episode_reward[0], 'snake_2': episode_reward[1],
'snake_3': episode_reward[2],
'total': np.sum(episode_reward[0:3]),
'snake_4': episode_reward[3], 'snake_5': episode_reward[4],
'snake_6': episode_reward[5],
'enemy_total': np.sum(episode_reward[3:])})
else:
writer.add_scalars(reward_tag, global_step=episode,
tag_scalar_dict={'snake_1': episode_reward[0], 'snake_2': episode_reward[1],
'snake_3': episode_reward[2],
'total': np.sum(episode_reward[0:3])})
score_tag = 'score'
if args.compete or args.self_compete:
writer.add_scalars(score_tag, global_step=episode,
tag_scalar_dict={'mean_step_reward': episode_tot_reward / step,
"mean_sparse_reward": episode_tot_split_reward[0] / step,
'mean_gain_reward': episode_tot_split_reward[1] / step,
'mean_dist_reward': episode_tot_split_reward[2] / step,
'enemy_mean_step_reward': episode_tot_reward_e / step})
else:
writer.add_scalars(score_tag, global_step=episode,
tag_scalar_dict={'mean_step_reward': episode_tot_reward / step,
"mean_sparse_reward": episode_tot_split_reward[0] / step,
'mean_gain_reward': episode_tot_split_reward[1] / step,
'mean_dist_reward': episode_tot_split_reward[2] / step})
win_tag = 'win_rate'
writer.add_scalars(win_tag, global_step=episode,
tag_scalar_dict={'win_rate': int(np.sum(episode_reward[ctrl_agent_index]) > np.sum(
episode_reward[enemy_agent_index]))})
env.reset()
break
model.reset(args.batch_size)
if args.self_compete:
model.replay_buffer.push(enemy_replay_buffer)
enemy_replay_buffer.clear()
model.replay_buffer.push(replay_buffer)
replay_buffer.clear()
model.epsilon_delay(step)
model.update()
model.update_target(episode)
if args.compete:
model_enemy.reset(args.batch_size)
model_enemy.replay_buffer.push(enemy_replay_buffer)
enemy_replay_buffer.clear()
model_enemy.epsilon_delay(step)
model_enemy.update()
model_enemy.update_target(episode)
loss_tag = 'loss'
if model.loss:
if not args.compete:
writer.add_scalars(loss_tag, global_step=episode,
tag_scalar_dict={'loss': float(model.loss)})
else:
if model_enemy.loss:
writer.add_scalars(loss_tag, global_step=episode,
tag_scalar_dict={'loss': float(model.loss),
'enemy': float(model_enemy.loss)})
if model.loss:
if not args.compete:
print(f'\t\t\t\tloss {model.loss:.3f}')
elif model_enemy.loss:
print(f'\t\t\t\tloss {model.loss:.3f}')
print(f'\t\t\t\tloss {model_enemy.loss:.3f}')
if episode % args.save_interval == 0:
model.save_model(os.path.join(run_dir, "qmix_agent_%d.pth" % episode))
if args.compete:
model_enemy.save_model(os.path.join(run_dir, "qmix_enemy_agent_%d.pth" % episode))
# model.save_checkpoint(os.path.join(run_dir,"qmix_ckpt_%d.pth"%episode)) | 0.457137 | 0.393502 |
import copy
from django.test import tag
from rest_framework.test import APIClient, APITestCase
from users.models import User
from saef.models import JobSession
from saefportal.settings import MSG_ERROR_INVALID_INPUT, MSG_ERROR_REQUIRED_INPUT, MSG_ERROR_MISSING_OBJECT_INPUT, \
MSG_ERROR_EXISTING
from utils.test_utils import load_test_json, load_test_database
test_data = load_test_json('restapi')
@tag("celery")
class JobSessionStartTests(APITestCase):
@classmethod
def setUpTestData(cls):
load_test_database('saef.applicationtoken')
load_test_database('saef.application')
load_test_database('saef.applicationsession')
load_test_database('saef.job')
def setUp(self):
self.data = copy.deepcopy(test_data)
self.user = User.objects.create_user(**self.data['Credentials'])
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_job_status_post_success(self):
jobs = JobSession.objects.filter().count()
self.assertEqual(jobs, 0)
response = self.client.post('http://localhost:8000/restapi/job_sessions/start/', self.data['JobSessionStart'],
format='json')
self.assertEqual(response.status_code, 200)
jobs = JobSession.objects.filter().count()
self.assertEqual(jobs, 1)
def test_job_status_post_required(self):
self.data['JobSessionStart'].pop('application_execution_id')
response = self.client.post('http://localhost:8000/restapi/job_sessions/start/', self.data['JobSessionStart'],
format='json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['error'],
MSG_ERROR_REQUIRED_INPUT('application_execution_id and job_execution_id'))
def test_job_status_post_invalid(self):
self.data['JobSessionStart']['application_execution_id'] = 'notvalid'
response = self.client.post('http://localhost:8000/restapi/job_sessions/start/', self.data['JobSessionStart'],
format='json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['error'], MSG_ERROR_INVALID_INPUT('UUID'))
def test_job_status_post_missing_object(self):
self.data['JobSessionStart']['application_execution_id'] = '11a1a11a-a11a-1111-1a11-a1a1aaa11111'
response = self.client.post('http://localhost:8000/restapi/job_sessions/start/', self.data['JobSessionStart'],
format='json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['error'], MSG_ERROR_MISSING_OBJECT_INPUT("application execution id or job name"))
class JobSessionEndTests(APITestCase):
@classmethod
def setUpTestData(cls):
load_test_database('saef.applicationtoken')
load_test_database('saef.application')
load_test_database('saef.applicationsession')
load_test_database('saef.job')
load_test_database('saef.jobsession')
def setUp(self):
self.data = copy.deepcopy(test_data)
self.user = User.objects.create_user(**self.data['Credentials'])
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_job_status_post_success(self):
jobs = JobSession.objects.filter(execution_id=self.data['JobSessionEnd']['job_execution_id']).count()
self.assertEqual(jobs, 1)
response = self.client.post('http://localhost:8000/restapi/job_sessions/end/', self.data['JobSessionEnd'],
format='json')
self.assertEqual(response.status_code, 200)
jobs = JobSession.objects.filter(execution_id=self.data['JobSessionEnd']['job_execution_id']).count()
self.assertEqual(jobs, 2)
def test_job_status_post_exist(self):
self.client.post('http://localhost:8000/restapi/job_sessions/end/', self.data['JobSessionEnd'], format='json')
response = self.client.post('http://localhost:8000/restapi/job_sessions/end/', self.data['JobSessionEnd'],
format='json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['error'], MSG_ERROR_EXISTING('job', 'END'))
def test_job_status_post_required(self):
self.data['JobSessionEnd'].pop('job_execution_id')
response = self.client.post('http://localhost:8000/restapi/job_sessions/end/', self.data['JobSessionEnd'],
format='json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['error'], MSG_ERROR_REQUIRED_INPUT('application_execution_id and '
'job_execution_id'))
def test_job_status_post_invalid(self):
self.data['JobSessionEnd']['job_execution_id'] = 'notvalid'
response = self.client.post('http://localhost:8000/restapi/job_sessions/end/', self.data['JobSessionEnd'],
format='json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['error'], MSG_ERROR_INVALID_INPUT('UUID'))
def test_job_status_post_missing_object(self):
self.data['JobSessionEnd']['job_execution_id'] = '11a1a11a-a11a-1111-1a11-a1a1aaa11111'
response = self.client.post('http://localhost:8000/restapi/job_sessions/end/', self.data['JobSessionEnd'],
format='json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['error'], MSG_ERROR_MISSING_OBJECT_INPUT("application or job execution id")) | saefportal/restapi/tests/test_job_session.py | import copy
from django.test import tag
from rest_framework.test import APIClient, APITestCase
from users.models import User
from saef.models import JobSession
from saefportal.settings import MSG_ERROR_INVALID_INPUT, MSG_ERROR_REQUIRED_INPUT, MSG_ERROR_MISSING_OBJECT_INPUT, \
MSG_ERROR_EXISTING
from utils.test_utils import load_test_json, load_test_database
test_data = load_test_json('restapi')
@tag("celery")
class JobSessionStartTests(APITestCase):
@classmethod
def setUpTestData(cls):
load_test_database('saef.applicationtoken')
load_test_database('saef.application')
load_test_database('saef.applicationsession')
load_test_database('saef.job')
def setUp(self):
self.data = copy.deepcopy(test_data)
self.user = User.objects.create_user(**self.data['Credentials'])
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_job_status_post_success(self):
jobs = JobSession.objects.filter().count()
self.assertEqual(jobs, 0)
response = self.client.post('http://localhost:8000/restapi/job_sessions/start/', self.data['JobSessionStart'],
format='json')
self.assertEqual(response.status_code, 200)
jobs = JobSession.objects.filter().count()
self.assertEqual(jobs, 1)
def test_job_status_post_required(self):
self.data['JobSessionStart'].pop('application_execution_id')
response = self.client.post('http://localhost:8000/restapi/job_sessions/start/', self.data['JobSessionStart'],
format='json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['error'],
MSG_ERROR_REQUIRED_INPUT('application_execution_id and job_execution_id'))
def test_job_status_post_invalid(self):
self.data['JobSessionStart']['application_execution_id'] = 'notvalid'
response = self.client.post('http://localhost:8000/restapi/job_sessions/start/', self.data['JobSessionStart'],
format='json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['error'], MSG_ERROR_INVALID_INPUT('UUID'))
def test_job_status_post_missing_object(self):
self.data['JobSessionStart']['application_execution_id'] = '11a1a11a-a11a-1111-1a11-a1a1aaa11111'
response = self.client.post('http://localhost:8000/restapi/job_sessions/start/', self.data['JobSessionStart'],
format='json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['error'], MSG_ERROR_MISSING_OBJECT_INPUT("application execution id or job name"))
class JobSessionEndTests(APITestCase):
@classmethod
def setUpTestData(cls):
load_test_database('saef.applicationtoken')
load_test_database('saef.application')
load_test_database('saef.applicationsession')
load_test_database('saef.job')
load_test_database('saef.jobsession')
def setUp(self):
self.data = copy.deepcopy(test_data)
self.user = User.objects.create_user(**self.data['Credentials'])
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_job_status_post_success(self):
jobs = JobSession.objects.filter(execution_id=self.data['JobSessionEnd']['job_execution_id']).count()
self.assertEqual(jobs, 1)
response = self.client.post('http://localhost:8000/restapi/job_sessions/end/', self.data['JobSessionEnd'],
format='json')
self.assertEqual(response.status_code, 200)
jobs = JobSession.objects.filter(execution_id=self.data['JobSessionEnd']['job_execution_id']).count()
self.assertEqual(jobs, 2)
def test_job_status_post_exist(self):
self.client.post('http://localhost:8000/restapi/job_sessions/end/', self.data['JobSessionEnd'], format='json')
response = self.client.post('http://localhost:8000/restapi/job_sessions/end/', self.data['JobSessionEnd'],
format='json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['error'], MSG_ERROR_EXISTING('job', 'END'))
def test_job_status_post_required(self):
self.data['JobSessionEnd'].pop('job_execution_id')
response = self.client.post('http://localhost:8000/restapi/job_sessions/end/', self.data['JobSessionEnd'],
format='json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['error'], MSG_ERROR_REQUIRED_INPUT('application_execution_id and '
'job_execution_id'))
def test_job_status_post_invalid(self):
self.data['JobSessionEnd']['job_execution_id'] = 'notvalid'
response = self.client.post('http://localhost:8000/restapi/job_sessions/end/', self.data['JobSessionEnd'],
format='json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['error'], MSG_ERROR_INVALID_INPUT('UUID'))
def test_job_status_post_missing_object(self):
self.data['JobSessionEnd']['job_execution_id'] = '11a1a11a-a11a-1111-1a11-a1a1aaa11111'
response = self.client.post('http://localhost:8000/restapi/job_sessions/end/', self.data['JobSessionEnd'],
format='json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['error'], MSG_ERROR_MISSING_OBJECT_INPUT("application or job execution id")) | 0.376165 | 0.152253 |
from subprocess import check_output
nano_results = check_output(
"./generate_table.awk < nano.txt", shell=True, text=True)
micro_results = check_output(
"./generate_table.awk < micro.txt", shell=True, text=True)
samd_results = check_output(
"./generate_table.awk < samd.txt", shell=True, text=True)
stm32_results = check_output(
"./generate_table.awk < stm32.txt", shell=True, text=True)
esp8266_results = check_output(
"./generate_table.awk < esp8266.txt", shell=True, text=True)
esp32_results = check_output(
"./generate_table.awk < esp32.txt", shell=True, text=True)
teensy32_results = check_output(
"./generate_table.awk < teensy32.txt", shell=True, text=True)
print(f"""\
# Memory Benchmark
The `MemoryBenchmark.ino` was compiled with each `FEATURE_*` and the flash
memory and static RAM sizes were recorded. The `FEATURE_BASELINE` selection is
the baseline, and its memory usage numbers are subtracted from the subsequent
`FEATURE_*` memory usage.
**Version**: AceSorting v1.0.0
**DO NOT EDIT**: This file was auto-generated using `make README.md`.
## How to Regenerate
To regenerate this README.md:
```
$ make clean_benchmarks
$ make benchmarks
$ make README.md
```
The `make benchmarks` target uses `collect.sh` script which calls `auniter.sh`
(https://github.com/bxparks/AUniter) to invoke the Arduino IDE programmatically.
It produces a `*.txt` file with the flash and ram usage information (e.g.
`nano.txt`).
The `make README.md` command calls the `generated_readme.py` Python script which
generates this `README.md` file. The ASCII tables below are generated by the
`generate_table.awk` script, which takes each `*.txt` file and converts it to an
ASCII table.
## Library Size Changes
**v0.1**
* Initial version.
* The memory usage for C-library `qsort()` is suspiciously low on the ESP32,
only 88 bytes, compared to 800-1400 bytes for other platforms. I think this
indicates that the `qsort()` function is already compiled into the ESP32
runtime library.
**v0.3**
* Add 3-argument version of sorting functions to pass in a comparison predicate,
and route the 2-argument version into the 3-argument version.
* Usually no difference in flash size, as the compiler seems to be able to
inline the lambda expression. In fact, some actually got a few bytes smaller.
**v1.0.0**
* Some minor changes to total flash consumption due to upgrading the
tool chain for SAMD21, ESP8266 and TeensyDuino. The delta flash consumption
of each algorithm did not change much.
## Arduino Nano
* 16MHz ATmega328P
* Arduino IDE 1.8.16, Arduino CLI 0.19.2
* Arduino AVR Boards 1.8.3
```
{nano_results}
```
## Sparkfun Pro Micro
* 16 MHz ATmega32U4
* Arduino 1.8.16, Arduino CLI 0.19.2
* SparkFun AVR Boards 1.1.13
```
{micro_results}
```
## SAMD21 M0 Mini
* 48 MHz ARM Cortex-M0+
* Arduino 1.8.16, Arduino CLI 0.19.2
* Sparkfun SAMD Boards 1.8.4
```
{samd_results}
```
(SAMD compiler does not produce RAM usage numbers.)
## STM32 Blue Pill
* STM32F103C8, 72 MHz ARM Cortex-M3
* Arduino 1.8.16, Arduino CLI 0.19.2
* STM32duino 2.0.0
```
{stm32_results}
```
## ESP8266
* NodeMCU 1.0, 80MHz ESP8266
* Arduino 1.8.16, Arduino CLI 0.19.2
* ESP8266 Boards 3.0.2
```
{esp8266_results}
```
## ESP32
* ESP32-01 Dev Board, 240 MHz Tensilica LX6
* Arduino 1.8.16, Arduino CLI 0.19.2
* ESP32 Boards 1.0.6
```
{esp32_results}
```
RAM usage remains constant as more objects are created, which indicates that an
initial pool of a certain minimum size is created regardless of the actual RAM
usage by objects.
## Teensy 3.2
* 96 MHz ARM Cortex-M4
* Arduino 1.8.16, Arduino CLI 0.19.2
* Teensyduino 1.55
```
{teensy32_results}
```
""") | examples/MemoryBenchmark/generate_readme.py | $ make clean_benchmarks
$ make benchmarks
$ make README.md
{nano_results}
{micro_results}
{samd_results}
{stm32_results}
{esp8266_results}
{esp32_results}
{teensy32_results} | 0.504883 | 0.81772 |
import time
from collections import OrderedDict
from typing import List, Dict
from trafficgenerator.tgn_utils import is_false, TgnError
from trafficgenerator.tgn_tcl import tcl_str, py_list_to_tcl_list
from ixnetwork.ixn_app import IxnRoot
from ixnetwork.ixn_object import IxnObject
from ixnetwork.api.ixn_rest import IxnRestWrapper
def remove_all_tcl_views() -> None:
IxnRoot.root.execute('removeAllTclViews')
IxnRoot.root.api.commit()
class IxnStatisticsView:
""" Base class for all statistics view.
Note that Flow Statistics are poorly supported in this version as the object name spans over multiple column.
"""
def __init__(self, name: str) -> None:
"""
:param name: Statistics view name.
"""
self.name_caption = view_2_caption.get(name, 'Port Name')
statistics = IxnRoot.root.get_child_static('statistics')
if type(IxnRoot.root.api) is IxnRestWrapper:
views = statistics.get_children('view')
for view in views:
if view.get_attribute('caption') == name:
self.ixn_view = view
break
else:
self.ixn_view = statistics.get_child_static(f'view:"{name}"')
self.captions = []
self.statistics = OrderedDict()
def __repr__(self) -> str:
return self.ixn_view
def read_stats(self) -> None:
""" Reads the statistics view from IXN and saves it in statistics dictionary. """
captions, rows = self._get_pages()
name_caption_index = captions.index(self.name_caption)
captions.pop(name_caption_index)
self.captions = captions
self.statistics = OrderedDict()
for row in rows:
name = row.pop(name_caption_index)
self.statistics[name] = row
def get_all_stats(self) -> Dict[str, Dict[str, str]]:
""" Table of all statistics values for all objects. """
all_stats = OrderedDict()
for obj_name in self.statistics:
all_stats[obj_name] = self.get_object_stats(obj_name)
return all_stats
def get_object_stats(self, obj_name: str) -> Dict[str, str]:
""" Returns table of all statistics values for the requested object.
:param obj_name: requested object name
"""
return dict(zip(self.captions, self.statistics[obj_name]))
def get_stats(self, stat_name: str) -> List[str]:
""" Returns list of all values of the requested statistic for all objects.
:param stat_name: requested statistics name.
"""
return [self.get_stat(r, stat_name) for r in self.statistics.keys()]
def get_stat(self, obj_name: str, stat_name: str) -> str:
""" Returns the value of the requested statics for the requested object.
:param obj_name: requested object name.
:param stat_name: requested statistics name.
"""
return self.statistics[obj_name][self.captions.index(stat_name)]
def get_counters(self, counter_name: str) -> List[int]:
""" Returns list of all int values of the requested counter for all objects.
:param counter_name: requested counter name.
"""
return [int(c) for c in self.get_stats(counter_name)]
def get_counter(self, obj_name: str, counter_name: str) -> int:
""" Returns the int value of the requested counter for the requested object.
:param obj_name: requested object name.
:param counter_name: requested counter name.
"""
return int(self.get_stat(obj_name, counter_name))
def _get_pages(self):
page = self.ixn_view.get_child_static('page')
if is_false(page.get_attribute('isReady')):
raise TgnError(f'"{page.obj}" not ready')
captions = page.get_list_attribute('columnCaptions')
rows = []
page.set_attributes(pageSize=50)
for page_num in range(1, int(page.get_attribute('totalPages')) + 1):
page.set_attributes(commit=True, currentPage=page_num)
rows += page.get_list_attribute('pageValues')
return captions, rows
class IxnPortStatistics(IxnStatisticsView):
""" Port statistics view. """
def __init__(self) -> None:
super().__init__('Port Statistics')
class IxnTrafficItemStatistics(IxnStatisticsView):
""" Traffic items view. """
def __init__(self) -> None:
super().__init__('Traffic Item Statistics')
class IxnUserDefinedStatistics(IxnStatisticsView):
""" User defined statistics view. """
def __init__(self) -> None:
super().__init__('User Defined Statistics')
class IxnFlowStatistics(IxnStatisticsView):
""" Floe statistics view. """
def __init__(self) -> None:
super().__init__('Flow Statistics')
def read_stats(self) -> None:
""" Reads the statistics view from IXN and saves it in statistics dictionary.
Flow statistics require special implementation as the statistics name is dynamic and changes based on the
configuration.
"""
captions, rows = self._get_pages()
name_caption_index = captions.index('Tx Frames')
for _ in range(name_caption_index):
captions.pop(0)
self.captions = captions
self.statistics = OrderedDict()
for row in rows:
name = ''
for _ in range(name_caption_index):
name += row.pop(0) + '/'
name = name[:-1]
self.statistics[name] = row
class IxnDrillDownStatistics(IxnStatisticsView):
def __init__(self, type):
statistics = IxnRoot.root.get_child_static('statistics')
self.ixn_view = IxnObject(parent=statistics, objType='view')
self.ixn_view.set_attributes(caption='Yoram')
self.ixn_view.set_attributes(commit=True, type=type)
self.ixn_view._data['objRef'] = self.ixn_view.api.remapIds(self.ixn_view.ref)
availableTrafficItemFilter = self.ixn_view.get_children('availableTrafficItemFilter')
filter = self.ixn_view.get_child_static('layer23TrafficItemFilter')
filter.set_attributes(trafficItemFilterIds=py_list_to_tcl_list([r.ref for r in availableTrafficItemFilter]))
for statistic in self.ixn_view.get_children('statistic'):
statistic.set_attributes(enabled=True)
self.ixn_view.set_attributes(commit=True, visible=True, enabled=True)
def set_udf(self, option):
ti_stats = IxnTrafficItemStatistics()
dd = ti_stats.ixn_view.get_child_static('drillDown')
dd.set_attributes(commit=True, targetRowIndex=0)
dd.get_attribute('availableDrillDownOptions')
dd.set_attributes(commit=True, targetDrillDownOption=option)
dd.get_attribute('targetRowIndex')
dd.get_attribute('targetRow')
dd.get_attribute('targetDrillDownOption')
IxnRoot.root.api.commit()
dd.execute('doDrillDown', tcl_str(dd.ref))
time.sleep(10)
view_2_caption = {'Flow Statistics': None,
'Port Statistics': 'Port Name',
'Traffic Item Statistics': 'Traffic Item',
'User Defined Statistics': 'IPv4 :Source Address',
} | ixnetwork/ixn_statistics_view.py | import time
from collections import OrderedDict
from typing import List, Dict
from trafficgenerator.tgn_utils import is_false, TgnError
from trafficgenerator.tgn_tcl import tcl_str, py_list_to_tcl_list
from ixnetwork.ixn_app import IxnRoot
from ixnetwork.ixn_object import IxnObject
from ixnetwork.api.ixn_rest import IxnRestWrapper
def remove_all_tcl_views() -> None:
IxnRoot.root.execute('removeAllTclViews')
IxnRoot.root.api.commit()
class IxnStatisticsView:
""" Base class for all statistics view.
Note that Flow Statistics are poorly supported in this version as the object name spans over multiple column.
"""
def __init__(self, name: str) -> None:
"""
:param name: Statistics view name.
"""
self.name_caption = view_2_caption.get(name, 'Port Name')
statistics = IxnRoot.root.get_child_static('statistics')
if type(IxnRoot.root.api) is IxnRestWrapper:
views = statistics.get_children('view')
for view in views:
if view.get_attribute('caption') == name:
self.ixn_view = view
break
else:
self.ixn_view = statistics.get_child_static(f'view:"{name}"')
self.captions = []
self.statistics = OrderedDict()
def __repr__(self) -> str:
return self.ixn_view
def read_stats(self) -> None:
""" Reads the statistics view from IXN and saves it in statistics dictionary. """
captions, rows = self._get_pages()
name_caption_index = captions.index(self.name_caption)
captions.pop(name_caption_index)
self.captions = captions
self.statistics = OrderedDict()
for row in rows:
name = row.pop(name_caption_index)
self.statistics[name] = row
def get_all_stats(self) -> Dict[str, Dict[str, str]]:
""" Table of all statistics values for all objects. """
all_stats = OrderedDict()
for obj_name in self.statistics:
all_stats[obj_name] = self.get_object_stats(obj_name)
return all_stats
def get_object_stats(self, obj_name: str) -> Dict[str, str]:
""" Returns table of all statistics values for the requested object.
:param obj_name: requested object name
"""
return dict(zip(self.captions, self.statistics[obj_name]))
def get_stats(self, stat_name: str) -> List[str]:
""" Returns list of all values of the requested statistic for all objects.
:param stat_name: requested statistics name.
"""
return [self.get_stat(r, stat_name) for r in self.statistics.keys()]
def get_stat(self, obj_name: str, stat_name: str) -> str:
""" Returns the value of the requested statics for the requested object.
:param obj_name: requested object name.
:param stat_name: requested statistics name.
"""
return self.statistics[obj_name][self.captions.index(stat_name)]
def get_counters(self, counter_name: str) -> List[int]:
""" Returns list of all int values of the requested counter for all objects.
:param counter_name: requested counter name.
"""
return [int(c) for c in self.get_stats(counter_name)]
def get_counter(self, obj_name: str, counter_name: str) -> int:
""" Returns the int value of the requested counter for the requested object.
:param obj_name: requested object name.
:param counter_name: requested counter name.
"""
return int(self.get_stat(obj_name, counter_name))
def _get_pages(self):
page = self.ixn_view.get_child_static('page')
if is_false(page.get_attribute('isReady')):
raise TgnError(f'"{page.obj}" not ready')
captions = page.get_list_attribute('columnCaptions')
rows = []
page.set_attributes(pageSize=50)
for page_num in range(1, int(page.get_attribute('totalPages')) + 1):
page.set_attributes(commit=True, currentPage=page_num)
rows += page.get_list_attribute('pageValues')
return captions, rows
class IxnPortStatistics(IxnStatisticsView):
""" Port statistics view. """
def __init__(self) -> None:
super().__init__('Port Statistics')
class IxnTrafficItemStatistics(IxnStatisticsView):
""" Traffic items view. """
def __init__(self) -> None:
super().__init__('Traffic Item Statistics')
class IxnUserDefinedStatistics(IxnStatisticsView):
""" User defined statistics view. """
def __init__(self) -> None:
super().__init__('User Defined Statistics')
class IxnFlowStatistics(IxnStatisticsView):
""" Floe statistics view. """
def __init__(self) -> None:
super().__init__('Flow Statistics')
def read_stats(self) -> None:
""" Reads the statistics view from IXN and saves it in statistics dictionary.
Flow statistics require special implementation as the statistics name is dynamic and changes based on the
configuration.
"""
captions, rows = self._get_pages()
name_caption_index = captions.index('Tx Frames')
for _ in range(name_caption_index):
captions.pop(0)
self.captions = captions
self.statistics = OrderedDict()
for row in rows:
name = ''
for _ in range(name_caption_index):
name += row.pop(0) + '/'
name = name[:-1]
self.statistics[name] = row
class IxnDrillDownStatistics(IxnStatisticsView):
def __init__(self, type):
statistics = IxnRoot.root.get_child_static('statistics')
self.ixn_view = IxnObject(parent=statistics, objType='view')
self.ixn_view.set_attributes(caption='Yoram')
self.ixn_view.set_attributes(commit=True, type=type)
self.ixn_view._data['objRef'] = self.ixn_view.api.remapIds(self.ixn_view.ref)
availableTrafficItemFilter = self.ixn_view.get_children('availableTrafficItemFilter')
filter = self.ixn_view.get_child_static('layer23TrafficItemFilter')
filter.set_attributes(trafficItemFilterIds=py_list_to_tcl_list([r.ref for r in availableTrafficItemFilter]))
for statistic in self.ixn_view.get_children('statistic'):
statistic.set_attributes(enabled=True)
self.ixn_view.set_attributes(commit=True, visible=True, enabled=True)
def set_udf(self, option):
ti_stats = IxnTrafficItemStatistics()
dd = ti_stats.ixn_view.get_child_static('drillDown')
dd.set_attributes(commit=True, targetRowIndex=0)
dd.get_attribute('availableDrillDownOptions')
dd.set_attributes(commit=True, targetDrillDownOption=option)
dd.get_attribute('targetRowIndex')
dd.get_attribute('targetRow')
dd.get_attribute('targetDrillDownOption')
IxnRoot.root.api.commit()
dd.execute('doDrillDown', tcl_str(dd.ref))
time.sleep(10)
view_2_caption = {'Flow Statistics': None,
'Port Statistics': 'Port Name',
'Traffic Item Statistics': 'Traffic Item',
'User Defined Statistics': 'IPv4 :Source Address',
} | 0.807005 | 0.220563 |
from django.test import TestCase
from .product_test_helper import create_product
from .login_test_helper import registrate_new_user, login_user
import json
default_name = "test1234"
default_fk_vendor = 1
default_price = 10.0
default_photo = 'www.google.com'
default_description = 'description'
default_product_id = 1
# Create your tests here.
class ProductTest(TestCase):
def test_product_creation_with_valid_params(self):
email = '<EMAIL>'
responseJson = registrate_new_user(email)
loginResponseJson = login_user(email)
login_token = loginResponseJson["token"]
fk_vendor = loginResponseJson["user"]["pk"]
data = create_product(default_name, fk_vendor, default_price, default_photo, default_description, login_token)
response = self.client.post('/api/create_product/', data=data)
self.assertEqual(response.status_code, 200)
def test_product_creation_with_invalid_params(self):
email = '<EMAIL>'
responseJson = registrate_new_user(email)
loginResponseJson = login_user(email)
fk_vendor = loginResponseJson["user"]["pk"]
data = create_product(default_name, fk_vendor, default_price, default_photo, default_description)
response = self.client.post('/api/create_product/', data=data)
self.assertEqual(response.status_code, 403)
def test_all_products_with_valid_params(self):
# product from user 1
email = '<EMAIL>'
responseJson = registrate_new_user(email)
loginResponseJson = login_user(email)
fk_vendor = loginResponseJson["user"]["pk"]
login_token = loginResponseJson["token"]
data = create_product(default_name, fk_vendor, default_price, default_photo, default_description, login_token)
response = self.client.post('/api/create_product/', data=data)
# product from user 2
email2 = '<EMAIL>'
responseJson2 = registrate_new_user(email2)
loginResponseJson2 = login_user(email2)
fk_vendor2 = loginResponseJson2["user"]["pk"]
login_token2 = loginResponseJson2["token"]
data2 = create_product(default_name, fk_vendor2, default_price, default_photo, default_description, login_token2)
response2 = self.client.post('/api/create_product/', data=data2)
# retriving all products
data3 = {'token': login_token}
response3 = self.client.post('/api/all_products/', data=data3)
# checking pertinent data
self.assertEqual(response3.status_code, 200)
def test_all_products_with_invalid_params(self):
response = self.client.post('/api/all_products/', data = {'token': None})
self.assertEqual(response.status_code, 403)
def test_product_deletion_with_valid_params(self):
email = '<EMAIL>'
responseJson = registrate_new_user(email)
loginResponseJson = login_user(email)
login_token = loginResponseJson["token"]
fk_vendor = loginResponseJson["user"]["pk"]
data = create_product(default_name, fk_vendor, default_price, default_photo, default_description, login_token)
response = self.client.post('/api/create_product/', data=data)
data2 = {'user_id': fk_vendor,'token': login_token}
response2 = self.client.post('/api/my_products_screen/', data=data2)
data3 = {
'fk_vendor': fk_vendor,
'product_id': response2.data[0]["id"],
'token': login_token
}
response3 = self.client.post('/api/delete_product/', data=data3)
self.assertEqual(response3.status_code, 200)
def test_product_deletion_with_invalid_params(self):
data = {
'fk_vendor': default_fk_vendor,
'product_id': default_product_id,
'token': None
}
response = self.client.post('/api/delete_product/', data=data)
self.assertEqual(response.status_code, 403)
def test_my_products_screen_with_valid_params(self):
# product from user 1
email = '<EMAIL>'
responseJson = registrate_new_user(email)
loginResponseJson = login_user(email)
fk_vendor = loginResponseJson["user"]["pk"]
login_token = loginResponseJson["token"]
data = create_product(default_name, fk_vendor, default_price, default_photo, default_description, login_token)
response = self.client.post('/api/create_product/', data=data)
data2 = create_product(default_name, fk_vendor, default_price, default_photo, default_description, login_token)
response2 = self.client.post('/api/create_product/', data=data2)
# retriving user products
data3 = {'user_id': fk_vendor,'token': login_token}
response3 = self.client.post('/api/my_products_screen/', data=data3)
self.assertEqual(response3.data[0]["fk_vendor"], fk_vendor)
self.assertEqual(response3.data[1]["fk_vendor"], fk_vendor)
self.assertEqual(response3.status_code, 200)
def test_my_products_screen_with_invalid_params(self):
data = {'user_id': default_fk_vendor, 'token': None}
response = self.client.post('/api/my_products_screen/', data=data)
self.assertEqual(response.status_code, 403)
def test_get_product_with_valid_params(self):
email = '<EMAIL>'
responseJson = registrate_new_user(email)
loginResponseJson = login_user(email)
fk_vendor = loginResponseJson["user"]["pk"]
login_token = loginResponseJson["token"]
data = create_product(default_name, fk_vendor, default_price, default_photo, default_description, login_token)
response = self.client.post('/api/create_product/', data=data)
data2 = {'user_id': fk_vendor,'token': login_token}
response2 = self.client.post('/api/my_products_screen/', data=data2)
data3 = {
'product_id': response2.data[0]["id"],
'token': login_token
}
response3 = self.client.post('/api/get_product/', data=data3)
self.assertEqual(response3.data["fk_vendor"], data['fk_vendor'])
self.assertEqual(response3.data["name"], data['name'])
self.assertEqual(response3.data["price"], data['price'])
self.assertEqual(response3.data["photo"], data['photo'])
self.assertEqual(response3.data["description"], data['description'])
self.assertEqual(response3.status_code, 200)
def test_get_product_with_invalid_params(self):
data = {
'product_id': default_product_id,
'token': None
}
response = self.client.post('/api/get_product/', data=data)
self.assertEqual(response.status_code, 403)
def test_edit_product_with_valid_params(self):
email = '<EMAIL>'
responseJson = registrate_new_user(email)
loginResponseJson = login_user(email)
fk_vendor = loginResponseJson["user"]["pk"]
login_token = loginResponseJson["token"]
data = create_product(default_name, fk_vendor, default_price, default_photo, default_description, login_token)
response = self.client.post('/api/create_product/', data=data)
# retriving user products
data2 = {'user_id': fk_vendor,'token': login_token}
response2 = self.client.post('/api/my_products_screen/', data=data2)
data3 = {
'product_id': response2.data[0]["id"],
'name': 'Newname',
'price': '1.0',
'photo': 'www.teste.com',
'description': 'desc',
'token': login_token
}
response3 = self.client.post('/api/edit_product/', data=data3)
data4 = {'user_id': fk_vendor,'token': login_token}
response4 = self.client.post('/api/my_products_screen/', data=data4)
self.assertEqual(response4.data[0]["name"], data3["name"])
self.assertEqual(response3.status_code, 200)
def test_edit_product_with_invalid_params(self):
data = {
'product_id': default_product_id,
'name': default_name,
'price': default_price,
'photo': default_photo,
'description': default_description,
'token': None
}
response1 = self.client.post('/api/edit_product/', data=data)
self.assertEqual(response1.status_code, 403) | api/api_gateway/api/tests/test_product.py | from django.test import TestCase
from .product_test_helper import create_product
from .login_test_helper import registrate_new_user, login_user
import json
default_name = "test1234"
default_fk_vendor = 1
default_price = 10.0
default_photo = 'www.google.com'
default_description = 'description'
default_product_id = 1
# Create your tests here.
class ProductTest(TestCase):
def test_product_creation_with_valid_params(self):
email = '<EMAIL>'
responseJson = registrate_new_user(email)
loginResponseJson = login_user(email)
login_token = loginResponseJson["token"]
fk_vendor = loginResponseJson["user"]["pk"]
data = create_product(default_name, fk_vendor, default_price, default_photo, default_description, login_token)
response = self.client.post('/api/create_product/', data=data)
self.assertEqual(response.status_code, 200)
def test_product_creation_with_invalid_params(self):
email = '<EMAIL>'
responseJson = registrate_new_user(email)
loginResponseJson = login_user(email)
fk_vendor = loginResponseJson["user"]["pk"]
data = create_product(default_name, fk_vendor, default_price, default_photo, default_description)
response = self.client.post('/api/create_product/', data=data)
self.assertEqual(response.status_code, 403)
def test_all_products_with_valid_params(self):
# product from user 1
email = '<EMAIL>'
responseJson = registrate_new_user(email)
loginResponseJson = login_user(email)
fk_vendor = loginResponseJson["user"]["pk"]
login_token = loginResponseJson["token"]
data = create_product(default_name, fk_vendor, default_price, default_photo, default_description, login_token)
response = self.client.post('/api/create_product/', data=data)
# product from user 2
email2 = '<EMAIL>'
responseJson2 = registrate_new_user(email2)
loginResponseJson2 = login_user(email2)
fk_vendor2 = loginResponseJson2["user"]["pk"]
login_token2 = loginResponseJson2["token"]
data2 = create_product(default_name, fk_vendor2, default_price, default_photo, default_description, login_token2)
response2 = self.client.post('/api/create_product/', data=data2)
# retriving all products
data3 = {'token': login_token}
response3 = self.client.post('/api/all_products/', data=data3)
# checking pertinent data
self.assertEqual(response3.status_code, 200)
def test_all_products_with_invalid_params(self):
response = self.client.post('/api/all_products/', data = {'token': None})
self.assertEqual(response.status_code, 403)
def test_product_deletion_with_valid_params(self):
email = '<EMAIL>'
responseJson = registrate_new_user(email)
loginResponseJson = login_user(email)
login_token = loginResponseJson["token"]
fk_vendor = loginResponseJson["user"]["pk"]
data = create_product(default_name, fk_vendor, default_price, default_photo, default_description, login_token)
response = self.client.post('/api/create_product/', data=data)
data2 = {'user_id': fk_vendor,'token': login_token}
response2 = self.client.post('/api/my_products_screen/', data=data2)
data3 = {
'fk_vendor': fk_vendor,
'product_id': response2.data[0]["id"],
'token': login_token
}
response3 = self.client.post('/api/delete_product/', data=data3)
self.assertEqual(response3.status_code, 200)
def test_product_deletion_with_invalid_params(self):
data = {
'fk_vendor': default_fk_vendor,
'product_id': default_product_id,
'token': None
}
response = self.client.post('/api/delete_product/', data=data)
self.assertEqual(response.status_code, 403)
def test_my_products_screen_with_valid_params(self):
# product from user 1
email = '<EMAIL>'
responseJson = registrate_new_user(email)
loginResponseJson = login_user(email)
fk_vendor = loginResponseJson["user"]["pk"]
login_token = loginResponseJson["token"]
data = create_product(default_name, fk_vendor, default_price, default_photo, default_description, login_token)
response = self.client.post('/api/create_product/', data=data)
data2 = create_product(default_name, fk_vendor, default_price, default_photo, default_description, login_token)
response2 = self.client.post('/api/create_product/', data=data2)
# retriving user products
data3 = {'user_id': fk_vendor,'token': login_token}
response3 = self.client.post('/api/my_products_screen/', data=data3)
self.assertEqual(response3.data[0]["fk_vendor"], fk_vendor)
self.assertEqual(response3.data[1]["fk_vendor"], fk_vendor)
self.assertEqual(response3.status_code, 200)
def test_my_products_screen_with_invalid_params(self):
data = {'user_id': default_fk_vendor, 'token': None}
response = self.client.post('/api/my_products_screen/', data=data)
self.assertEqual(response.status_code, 403)
def test_get_product_with_valid_params(self):
email = '<EMAIL>'
responseJson = registrate_new_user(email)
loginResponseJson = login_user(email)
fk_vendor = loginResponseJson["user"]["pk"]
login_token = loginResponseJson["token"]
data = create_product(default_name, fk_vendor, default_price, default_photo, default_description, login_token)
response = self.client.post('/api/create_product/', data=data)
data2 = {'user_id': fk_vendor,'token': login_token}
response2 = self.client.post('/api/my_products_screen/', data=data2)
data3 = {
'product_id': response2.data[0]["id"],
'token': login_token
}
response3 = self.client.post('/api/get_product/', data=data3)
self.assertEqual(response3.data["fk_vendor"], data['fk_vendor'])
self.assertEqual(response3.data["name"], data['name'])
self.assertEqual(response3.data["price"], data['price'])
self.assertEqual(response3.data["photo"], data['photo'])
self.assertEqual(response3.data["description"], data['description'])
self.assertEqual(response3.status_code, 200)
def test_get_product_with_invalid_params(self):
data = {
'product_id': default_product_id,
'token': None
}
response = self.client.post('/api/get_product/', data=data)
self.assertEqual(response.status_code, 403)
def test_edit_product_with_valid_params(self):
email = '<EMAIL>'
responseJson = registrate_new_user(email)
loginResponseJson = login_user(email)
fk_vendor = loginResponseJson["user"]["pk"]
login_token = loginResponseJson["token"]
data = create_product(default_name, fk_vendor, default_price, default_photo, default_description, login_token)
response = self.client.post('/api/create_product/', data=data)
# retriving user products
data2 = {'user_id': fk_vendor,'token': login_token}
response2 = self.client.post('/api/my_products_screen/', data=data2)
data3 = {
'product_id': response2.data[0]["id"],
'name': 'Newname',
'price': '1.0',
'photo': 'www.teste.com',
'description': 'desc',
'token': login_token
}
response3 = self.client.post('/api/edit_product/', data=data3)
data4 = {'user_id': fk_vendor,'token': login_token}
response4 = self.client.post('/api/my_products_screen/', data=data4)
self.assertEqual(response4.data[0]["name"], data3["name"])
self.assertEqual(response3.status_code, 200)
def test_edit_product_with_invalid_params(self):
data = {
'product_id': default_product_id,
'name': default_name,
'price': default_price,
'photo': default_photo,
'description': default_description,
'token': None
}
response1 = self.client.post('/api/edit_product/', data=data)
self.assertEqual(response1.status_code, 403) | 0.295738 | 0.085289 |
import torch
from nnrl.nn.critic import ClippedVValue
from nnrl.nn.utils import update_polyak
from nnrl.optim import build_optimizer
from nnrl.types import TensorDict
from ray.rllib.utils import override
from torch import nn
from raylab.options import configure, option
from raylab.policy import TorchPolicy
from raylab.policy.action_dist import WrapDeterministicPolicy
from raylab.policy.losses import FittedQLearning
from raylab.policy.off_policy import OffPolicyMixin, off_policy_options
@configure
@off_policy_options
@option("optimizer/type", "Adam")
@option("optimizer/lr", 3e-4)
@option(
"polyak",
0.995,
help="Interpolation factor in polyak averaging for target networks.",
)
@option("module/type", "NAF")
@option("module/separate_behavior", True)
@option("exploration_config/type", "raylab.utils.exploration.ParameterNoise")
@option(
"exploration_config/param_noise_spec",
{"initial_stddev": 0.1, "desired_action_stddev": 0.2, "adaptation_coeff": 1.01},
)
@option("exploration_config/pure_exploration_steps", 1000)
class NAFTorchPolicy(OffPolicyMixin, TorchPolicy):
"""Normalized Advantage Function policy in Pytorch to use with RLlib."""
# pylint:disable=abstract-method
dist_class = WrapDeterministicPolicy
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.loss_fn = FittedQLearning(
self.module.critics, ClippedVValue(self.module.target_vcritics)
)
self.loss_fn.gamma = self.config["gamma"]
self.build_replay_buffer()
@override(TorchPolicy)
def _make_module(self, obs_space, action_space, config):
module_config = config["module"]
module_config["type"] = "NAF"
# pylint:disable=no-member
return super()._make_module(obs_space, action_space, config)
@override(TorchPolicy)
def _make_optimizers(self):
optimizers = super()._make_optimizers()
optimizers.update(
naf=build_optimizer(self.module.critics, self.config["optimizer"])
)
return optimizers
@override(OffPolicyMixin)
def improve_policy(self, batch: TensorDict) -> dict:
with self.optimizers.optimize("naf"):
loss, info = self.loss_fn(batch)
loss.backward()
info.update(self.extra_grad_info())
vcritics, target_vcritics = self.module.vcritics, self.module.target_vcritics
update_polyak(vcritics, target_vcritics, self.config["polyak"])
return info
@torch.no_grad()
def extra_grad_info(self):
"""Compute gradient norm for components."""
return {
"grad_norm": nn.utils.clip_grad_norm_(
self.module.critics.parameters(), float("inf")
).item()
} | raylab/agents/naf/policy.py | import torch
from nnrl.nn.critic import ClippedVValue
from nnrl.nn.utils import update_polyak
from nnrl.optim import build_optimizer
from nnrl.types import TensorDict
from ray.rllib.utils import override
from torch import nn
from raylab.options import configure, option
from raylab.policy import TorchPolicy
from raylab.policy.action_dist import WrapDeterministicPolicy
from raylab.policy.losses import FittedQLearning
from raylab.policy.off_policy import OffPolicyMixin, off_policy_options
@configure
@off_policy_options
@option("optimizer/type", "Adam")
@option("optimizer/lr", 3e-4)
@option(
"polyak",
0.995,
help="Interpolation factor in polyak averaging for target networks.",
)
@option("module/type", "NAF")
@option("module/separate_behavior", True)
@option("exploration_config/type", "raylab.utils.exploration.ParameterNoise")
@option(
"exploration_config/param_noise_spec",
{"initial_stddev": 0.1, "desired_action_stddev": 0.2, "adaptation_coeff": 1.01},
)
@option("exploration_config/pure_exploration_steps", 1000)
class NAFTorchPolicy(OffPolicyMixin, TorchPolicy):
"""Normalized Advantage Function policy in Pytorch to use with RLlib."""
# pylint:disable=abstract-method
dist_class = WrapDeterministicPolicy
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.loss_fn = FittedQLearning(
self.module.critics, ClippedVValue(self.module.target_vcritics)
)
self.loss_fn.gamma = self.config["gamma"]
self.build_replay_buffer()
@override(TorchPolicy)
def _make_module(self, obs_space, action_space, config):
module_config = config["module"]
module_config["type"] = "NAF"
# pylint:disable=no-member
return super()._make_module(obs_space, action_space, config)
@override(TorchPolicy)
def _make_optimizers(self):
optimizers = super()._make_optimizers()
optimizers.update(
naf=build_optimizer(self.module.critics, self.config["optimizer"])
)
return optimizers
@override(OffPolicyMixin)
def improve_policy(self, batch: TensorDict) -> dict:
with self.optimizers.optimize("naf"):
loss, info = self.loss_fn(batch)
loss.backward()
info.update(self.extra_grad_info())
vcritics, target_vcritics = self.module.vcritics, self.module.target_vcritics
update_polyak(vcritics, target_vcritics, self.config["polyak"])
return info
@torch.no_grad()
def extra_grad_info(self):
"""Compute gradient norm for components."""
return {
"grad_norm": nn.utils.clip_grad_norm_(
self.module.critics.parameters(), float("inf")
).item()
} | 0.915682 | 0.209611 |
from db import ticker
from .status import *
import pandas as pd
import copy
import numpy as np
def get_ticker_data():
"""
Get a list of all tickers' data
:return: List of tickers along with price data list
"""
raw_data = list(ticker.get_all_tickers())
# convert price data into float
def convert_prices_to_float(price):
for k, v in price.items():
if k != 'id' and v is not None:
price[k] = float(v)
return price
for d in raw_data:
if 'price_list' in d:
d['price_list'] = list(map(convert_prices_to_float, d['price_list']))
else:
d['price_list'] = []
return raw_data
def get_symbol_id_map(data):
"""
Get a dict which maps a symbol to a list of full id names
:param data: The return value from get_ticker_data
:return:
"""
result = {}
for t in data:
symbol = t['symbol']
if symbol in result:
result[symbol].append(t['id'])
else:
result[symbol] = [t['id']]
return result
def all_ticker_id(data):
return list(map(lambda t: t['id'], data))
def fill_feature_list(feature_data, timestamp_list, feature):
timestamp_list = sorted(timestamp_list)
feature_data = filter(lambda data: data['last_updated'] is not None and data[feature] is not None, feature_data)
feature_data = sorted(feature_data, key=lambda d: d['last_updated'])
result = [0] * len(timestamp_list)
if len(feature_data) == 0:
return result
i = 0
j = 0
while j < len(feature_data) - 1 and feature_data[j]['last_updated'] == feature_data[j + 1]['last_updated']:
j += 1
if j < len(feature_data) - 1:
j += 1
for d in range(len(timestamp_list)):
if feature_data[i]['last_updated'] <= timestamp_list[d] < feature_data[j]['last_updated']:
result[d] = feature_data[i][feature]
elif timestamp_list[d] >= feature_data[j]['last_updated']:
while j < len(feature_data) - 1 and feature_data[j]['last_updated'] == feature_data[j + 1]['last_updated']:
j += 1
i = j
result[d] = feature_data[i][feature]
if j < len(feature_data) - 1:
j += 1
return result
def build_df(data, feature):
"""
Build a time-based data frame based on give feature
:param feature: The name of feature to build on
:return:
"""
timestamps = list(map(lambda ts: pd.Timestamp(ts_input=ts * 1000000000), sorted(all_timestamp())))
ticker_ids = all_ticker_id(data)
df = pd.DataFrame(columns=timestamps, index=ticker_ids)
for t in data:
sorted_price_list = sorted(t['price_list'],
key=lambda p_list: 0 if p_list['last_updated'] is None else p_list['last_updated'])
feature_data_list = [{'last_updated': p['last_updated'], feature: p[feature]} for p in sorted_price_list]
feature_data_list = fill_feature_list(feature_data_list, all_timestamp(), feature)
new_row_series = pd.Series(data=feature_data_list, index=timestamps)
df.loc[t['id']] = new_row_series
return df
def merge_latest_price(data_point):
"""
For any record in get_ticker_data, flat map the latest price data into the object itself
:param data_point:
:return:
"""
sorted_price_list = sorted(data_point['price_list'],
key=lambda l: 0 if l['last_updated'] is None else l['last_updated'],
reverse=True)
latest_price = sorted_price_list[0]
result = dict(data_point)
del result['price_list']
result.update(latest_price)
return result
reason_functions = {
'diff': lambda new, old: new - old,
'diff-percent': lambda new, old: 1.0 * (new - old) / old
}
def cross_compare(data, interval=24, reason_func=reason_functions['diff']):
"""
Horizontally compares data points (along time axis), and produces a new data frame
with results.
For example, given a data frame like:
| SYM | 00:00 | 01:00 | 02:00 | 03:00 | 04:00 | 05:00 |
-------------------------------------------------------
| BTC | 100 | 200 | 300 | 400 | 500 | 600 |
| ETH | 1 | 2 | 3 | 4 | 5 | 6 |
| BTH | 2 | 2 | 1 | 1 | 2 | 4 |
the result of cross_compare(data, 1) will be:
| SYM | 01:00 | 02:00 | 03:00 | 04:00 | 05:00 |
-----------------------------------------------
| BTC | 100 | 100 | 100 | 100 | 100 |
| ETH | 1 | 1 | 1 | 1 | 1 |
| BTH | 0 | -1 | 0 | 1 | 2 |
the result of cross_compare(data, 2, reason_functions['diff-percent']) will be:
| SYM | 03:00 | 05:00 |
-----------------------
| BTC | 2.0 | 0.5 |
| ETH | 2.0 | 0.5 |
| BTH | -.5 | 3.0 |
:param data: Result from build_df
:param interval: Time interval in hour
:param reason_func: How to compare data points
:return: A new data frame which holds the compared results of data
"""
data = data.dropna(axis=0, how='all') # remove rows that are all None
timestamp_labels = list(data.columns.values)
# find out which labels to keep based on interval, we ASSUME that the interval between
# every two labels is constantly 1 hour
# TODO remove the above assumption by looking at real timestamp data
label_index_range = range(len(timestamp_labels) - 1, -1, -interval)
data_required = data.iloc[:, list(label_index_range)]
# calculate data with provided reasoning function
data_matrix = data_required.as_matrix()
for row in data_matrix:
for i in range(len(row) - 1):
row[i] = reason_func(row[i], row[i + 1])
result = pd.DataFrame(data_matrix, index=list(data_required.index.values),
columns=list(data_required.columns.values))
# drop the last column since there is no previous data to compare with
# and reverse all columns
result = result.iloc[:, :-1]
result = result.iloc[:, ::-1]
return result | insight/ticker.py | from db import ticker
from .status import *
import pandas as pd
import copy
import numpy as np
def get_ticker_data():
"""
Get a list of all tickers' data
:return: List of tickers along with price data list
"""
raw_data = list(ticker.get_all_tickers())
# convert price data into float
def convert_prices_to_float(price):
for k, v in price.items():
if k != 'id' and v is not None:
price[k] = float(v)
return price
for d in raw_data:
if 'price_list' in d:
d['price_list'] = list(map(convert_prices_to_float, d['price_list']))
else:
d['price_list'] = []
return raw_data
def get_symbol_id_map(data):
"""
Get a dict which maps a symbol to a list of full id names
:param data: The return value from get_ticker_data
:return:
"""
result = {}
for t in data:
symbol = t['symbol']
if symbol in result:
result[symbol].append(t['id'])
else:
result[symbol] = [t['id']]
return result
def all_ticker_id(data):
return list(map(lambda t: t['id'], data))
def fill_feature_list(feature_data, timestamp_list, feature):
timestamp_list = sorted(timestamp_list)
feature_data = filter(lambda data: data['last_updated'] is not None and data[feature] is not None, feature_data)
feature_data = sorted(feature_data, key=lambda d: d['last_updated'])
result = [0] * len(timestamp_list)
if len(feature_data) == 0:
return result
i = 0
j = 0
while j < len(feature_data) - 1 and feature_data[j]['last_updated'] == feature_data[j + 1]['last_updated']:
j += 1
if j < len(feature_data) - 1:
j += 1
for d in range(len(timestamp_list)):
if feature_data[i]['last_updated'] <= timestamp_list[d] < feature_data[j]['last_updated']:
result[d] = feature_data[i][feature]
elif timestamp_list[d] >= feature_data[j]['last_updated']:
while j < len(feature_data) - 1 and feature_data[j]['last_updated'] == feature_data[j + 1]['last_updated']:
j += 1
i = j
result[d] = feature_data[i][feature]
if j < len(feature_data) - 1:
j += 1
return result
def build_df(data, feature):
"""
Build a time-based data frame based on give feature
:param feature: The name of feature to build on
:return:
"""
timestamps = list(map(lambda ts: pd.Timestamp(ts_input=ts * 1000000000), sorted(all_timestamp())))
ticker_ids = all_ticker_id(data)
df = pd.DataFrame(columns=timestamps, index=ticker_ids)
for t in data:
sorted_price_list = sorted(t['price_list'],
key=lambda p_list: 0 if p_list['last_updated'] is None else p_list['last_updated'])
feature_data_list = [{'last_updated': p['last_updated'], feature: p[feature]} for p in sorted_price_list]
feature_data_list = fill_feature_list(feature_data_list, all_timestamp(), feature)
new_row_series = pd.Series(data=feature_data_list, index=timestamps)
df.loc[t['id']] = new_row_series
return df
def merge_latest_price(data_point):
"""
For any record in get_ticker_data, flat map the latest price data into the object itself
:param data_point:
:return:
"""
sorted_price_list = sorted(data_point['price_list'],
key=lambda l: 0 if l['last_updated'] is None else l['last_updated'],
reverse=True)
latest_price = sorted_price_list[0]
result = dict(data_point)
del result['price_list']
result.update(latest_price)
return result
reason_functions = {
'diff': lambda new, old: new - old,
'diff-percent': lambda new, old: 1.0 * (new - old) / old
}
def cross_compare(data, interval=24, reason_func=reason_functions['diff']):
"""
Horizontally compares data points (along time axis), and produces a new data frame
with results.
For example, given a data frame like:
| SYM | 00:00 | 01:00 | 02:00 | 03:00 | 04:00 | 05:00 |
-------------------------------------------------------
| BTC | 100 | 200 | 300 | 400 | 500 | 600 |
| ETH | 1 | 2 | 3 | 4 | 5 | 6 |
| BTH | 2 | 2 | 1 | 1 | 2 | 4 |
the result of cross_compare(data, 1) will be:
| SYM | 01:00 | 02:00 | 03:00 | 04:00 | 05:00 |
-----------------------------------------------
| BTC | 100 | 100 | 100 | 100 | 100 |
| ETH | 1 | 1 | 1 | 1 | 1 |
| BTH | 0 | -1 | 0 | 1 | 2 |
the result of cross_compare(data, 2, reason_functions['diff-percent']) will be:
| SYM | 03:00 | 05:00 |
-----------------------
| BTC | 2.0 | 0.5 |
| ETH | 2.0 | 0.5 |
| BTH | -.5 | 3.0 |
:param data: Result from build_df
:param interval: Time interval in hour
:param reason_func: How to compare data points
:return: A new data frame which holds the compared results of data
"""
data = data.dropna(axis=0, how='all') # remove rows that are all None
timestamp_labels = list(data.columns.values)
# find out which labels to keep based on interval, we ASSUME that the interval between
# every two labels is constantly 1 hour
# TODO remove the above assumption by looking at real timestamp data
label_index_range = range(len(timestamp_labels) - 1, -1, -interval)
data_required = data.iloc[:, list(label_index_range)]
# calculate data with provided reasoning function
data_matrix = data_required.as_matrix()
for row in data_matrix:
for i in range(len(row) - 1):
row[i] = reason_func(row[i], row[i + 1])
result = pd.DataFrame(data_matrix, index=list(data_required.index.values),
columns=list(data_required.columns.values))
# drop the last column since there is no previous data to compare with
# and reverse all columns
result = result.iloc[:, :-1]
result = result.iloc[:, ::-1]
return result | 0.566378 | 0.51946 |
class SwitchCode(object):
'''
Container object for storing code filenames for switch configuration.
'''
def __init__(self, modelName, codeFile):
'''
Purpose : Initialize a SwitchCode object
Parameters :
modelName: The model name of a switch. Used to parse codeFile
codeFile: Name of the file that stores code filenames for given
switches. Format should be:
<lowercase model name> <boot code> <primary code> <poe firmware (optional)>
Returns: None
'''
self.__poe = None
self.__pri = None
self.__boot = None
self.__model = modelName
self.__initCode(codeFile)
def getPri(self):
'''
Purpose : Get the primary / secondary code file for this switch
Parameters :
None
Returns: A string containing the filename of the primary flash. None if
it could not be found.
'''
return self.__pri
def getBoot(self):
'''
Purpose : Get the boot code file for this switch
Parameters :
None
Returns: A string containing the filename of the boot flash. None if
it could not be found.
'''
return self.__boot
def getPOE(self):
'''
Purpose : Get the poe firmware code file for this switch
Parameters :
None
Returns: A string containing the filename of the firmware flash. None if
it could not be found.
'''
return self.__poe
def __initCode(self, codeFN):
try:
fin = open(codeFN)
for line in fin:
#Try to find this model in the code file
tokens = line.split()
tokens = [x.strip() for x in tokens]
if len(tokens) > 2:
self.__boot = tokens[1]
self.__pri = tokens[2]
if len(tokens) > 3:
self.__poe = tokens[3]
except:
print "Cannot find", self.__model, "in codefile", codeFN | switchcode.py |
class SwitchCode(object):
'''
Container object for storing code filenames for switch configuration.
'''
def __init__(self, modelName, codeFile):
'''
Purpose : Initialize a SwitchCode object
Parameters :
modelName: The model name of a switch. Used to parse codeFile
codeFile: Name of the file that stores code filenames for given
switches. Format should be:
<lowercase model name> <boot code> <primary code> <poe firmware (optional)>
Returns: None
'''
self.__poe = None
self.__pri = None
self.__boot = None
self.__model = modelName
self.__initCode(codeFile)
def getPri(self):
'''
Purpose : Get the primary / secondary code file for this switch
Parameters :
None
Returns: A string containing the filename of the primary flash. None if
it could not be found.
'''
return self.__pri
def getBoot(self):
'''
Purpose : Get the boot code file for this switch
Parameters :
None
Returns: A string containing the filename of the boot flash. None if
it could not be found.
'''
return self.__boot
def getPOE(self):
'''
Purpose : Get the poe firmware code file for this switch
Parameters :
None
Returns: A string containing the filename of the firmware flash. None if
it could not be found.
'''
return self.__poe
def __initCode(self, codeFN):
try:
fin = open(codeFN)
for line in fin:
#Try to find this model in the code file
tokens = line.split()
tokens = [x.strip() for x in tokens]
if len(tokens) > 2:
self.__boot = tokens[1]
self.__pri = tokens[2]
if len(tokens) > 3:
self.__poe = tokens[3]
except:
print "Cannot find", self.__model, "in codefile", codeFN | 0.684264 | 0.245144 |
try:
import sys
sys.path[1] = '/flash/lib'
mkdir('/flash/lib')
except:
pass
#helper functions
import m5stack
from windows import *
borders(clear=False)
header('boot.py')
mainwindow(clear=False)
home()
def beepHappy():
#Happy
m5stack.tone(4200, 80)
m5stack.tone(2800, 100)
m5stack.tone(4200, 60)
def beepSad():
#Sad
m5stack.tone(1200, duration=100)
m5stack.tone(700, duration=120)
m5stack.tone(300, duration=100)
def log(text):
print(text)
writeln(text)
# Connect to WiFi
def connectWifi():
"Connect to WiFi"
import network, time
#read network info from : myconfig.py
try:
from myconfig import wifi_ssid , wifi_psk
except:
print('No Network configuration file found')
#todo: show help how to create one
return False
wlan = network.WLAN(network.STA_IF)
tmo = 80
if not wlan.isconnected():
print('connecting to network : {}'.format(wifi_ssid))
wlan.active(True)
wlan.connect(wifi_ssid, wifi_psk)
#Wait for WiFi connection
while not wlan.isconnected():
if tmo == 0:
break
print(".", end="")
tmo -= 1
time.sleep_ms(200)
print()
try:
log( 'IP: {}'.format( wlan.ifconfig()[0] ))
return True
except:
pass
if not wlan.isconnected():
beepSad()
return False
def getNetworkTime(timezone = "CET-1CEST"):
"get time via NTP for Central Europe"
# Lobo Specific
import machine
rtc = machine.RTC()
rtc.init((2018, 01, 01, 12, 12, 12))
rtc.ntp_sync(server= "", tz=timezone, update_period=3600)
#need to wait a bit
tmo = 100
while not rtc.synced():
tmo=tmo-1
if tmo==0:
break
time.sleep_ms(10)
#simplify filesystem access from the prompt
from upysh import *
# Connect to WiFi
log('Connect to WiFi...')
connected = connectWifi()
if connected:
#log('Get network Time...')
getNetworkTime()
fmt="%d-%m-%Y, %T %Z" #Europe
#fmt="%b %d %Y, %r %Z" #US
log(time.strftime(fmt,time.localtime()))
#----------
# Start FTP Server
#-----------
StartFTP = False
if StartFTP:
log('Start FTP Server...')
from network import ftp,telnet
ftp.start(user="micro", password="<PASSWORD>")
telnet.start(user="micro", password="<PASSWORD>")
time.sleep(1)
log("FTP server: {}".format(ftp.status()[2]))
log("Telnet server: {}".format(telnet.status()[2]))
import gc;gc.collect() | Labs/Lab-7 Weather/boot.py | try:
import sys
sys.path[1] = '/flash/lib'
mkdir('/flash/lib')
except:
pass
#helper functions
import m5stack
from windows import *
borders(clear=False)
header('boot.py')
mainwindow(clear=False)
home()
def beepHappy():
#Happy
m5stack.tone(4200, 80)
m5stack.tone(2800, 100)
m5stack.tone(4200, 60)
def beepSad():
#Sad
m5stack.tone(1200, duration=100)
m5stack.tone(700, duration=120)
m5stack.tone(300, duration=100)
def log(text):
print(text)
writeln(text)
# Connect to WiFi
def connectWifi():
"Connect to WiFi"
import network, time
#read network info from : myconfig.py
try:
from myconfig import wifi_ssid , wifi_psk
except:
print('No Network configuration file found')
#todo: show help how to create one
return False
wlan = network.WLAN(network.STA_IF)
tmo = 80
if not wlan.isconnected():
print('connecting to network : {}'.format(wifi_ssid))
wlan.active(True)
wlan.connect(wifi_ssid, wifi_psk)
#Wait for WiFi connection
while not wlan.isconnected():
if tmo == 0:
break
print(".", end="")
tmo -= 1
time.sleep_ms(200)
print()
try:
log( 'IP: {}'.format( wlan.ifconfig()[0] ))
return True
except:
pass
if not wlan.isconnected():
beepSad()
return False
def getNetworkTime(timezone = "CET-1CEST"):
"get time via NTP for Central Europe"
# Lobo Specific
import machine
rtc = machine.RTC()
rtc.init((2018, 01, 01, 12, 12, 12))
rtc.ntp_sync(server= "", tz=timezone, update_period=3600)
#need to wait a bit
tmo = 100
while not rtc.synced():
tmo=tmo-1
if tmo==0:
break
time.sleep_ms(10)
#simplify filesystem access from the prompt
from upysh import *
# Connect to WiFi
log('Connect to WiFi...')
connected = connectWifi()
if connected:
#log('Get network Time...')
getNetworkTime()
fmt="%d-%m-%Y, %T %Z" #Europe
#fmt="%b %d %Y, %r %Z" #US
log(time.strftime(fmt,time.localtime()))
#----------
# Start FTP Server
#-----------
StartFTP = False
if StartFTP:
log('Start FTP Server...')
from network import ftp,telnet
ftp.start(user="micro", password="<PASSWORD>")
telnet.start(user="micro", password="<PASSWORD>")
time.sleep(1)
log("FTP server: {}".format(ftp.status()[2]))
log("Telnet server: {}".format(telnet.status()[2]))
import gc;gc.collect() | 0.11645 | 0.113555 |
import simulation
import math
# This is the file to run in order to give this demo a spin. It should work out of the box.
#
# Some parameters can be changed, such as initial conditions, boundary conditions, and which physical problem you want
# to solve. As of now there is only the wave equation and the heat equation, but if you're really ambitious I guess
# you could add your own. Adding a dissipative wave equation wouldn't be that hard, just add a dy/dt term.
# SIMULATION PARAMETERS
string_length = 1
"""The length of the string. This is not really a relevant parameter for any problem; both the heat equation and the
wave equation are scale-invariant, so we can always scale the problem to have length 1. But I've included this parameter
for the sake of completion."""
number_of_points = 100
"""The number of x-coordinates in the simulation. Making this too large leads to instabilities."""
time_step = 0.01
"""The time in between each time-step. It is also used for controlling the frame-rate of the animation."""
# END SIMULATION PARAMETERS
# USEFUL FUNCTIONS GENERATORS
def constant(value=0):
"""A flat initial condition. Rather boring, you probably want a source or some interesting boundary conditions
for this to be fun!
Args:
value (float): The value the function takes everywhere. Defaults to 0."""
return lambda x: value
def gaussian_wave_packet(amplitude=1, position=0.5, width=0.1):
"""Creates a gaussian wave-packet with the given parameters. This is a good standard displacement for an initial
condition, since it is very smooth. Do not use a box pulse; they are not realistic and do not play well with
numerical solvers.
Args:
amplitude (float): The height of the wave-packet. Defaults to 1.
position (float): The center of the wave-packet. Is measured in units of the string_length (i.e., if you want
to have the packet in the middle of the string, the parameter should be 0.5). Defaults to 0.5.
width (float): The width of the wave-packet. This is the standard deviation of the wave-packet, measured in
units of the string length. Defaults to 0.1"""
return lambda x: amplitude*math.exp(-0.5*((x - position*string_length)/(width*string_length))**2)
def standing_wave(amplitude=1, node_count=3, base_period=string_length):
"""Creates a function representing a standing sine-wave. Mostly useful for the wave equation but it could be fun
also in the heat equation, I suppose.
Args:
amplitude (float): The amplitude of the standing wave. The largest value the function attains. Defaults to 1.
node_count (int): The number of nodes in the standing wave. Defaults to 3.
base_period (float): The length of a single period at node_count 1. Defaults to the string_length."""
return lambda x: amplitude*math.sin(node_count*math.pi*x/base_period)
# END USEFUL FUNCTION GENERATORS
# PROBLEM CONDITIONS
initial_conditions = constant() # Change this to change the initial conditions.
"""The choice of initial condition. This should be a function which takes a single real argument and produces a
number."""
general_boundary_condition = simulation.BVPBoundaryCondition(
left_dirichlet_parameter=1,
right_dirichlet_parameter=0,
left_neumann_parameter=0,
right_neumann_parameter=1,
left_constant=gaussian_wave_packet(),
right_constant=constant())
homogeneous_dirichlet = simulation.BOUNDARY_CONDITION_HOMOGENEOUS_DIRICHLET
homogeneous_neumann = simulation.BOUNDARY_CONDITION_HOMOGENEOUS_NEUMANN
boundary_conditions = general_boundary_condition # Change this to change the boundary conditions
def source(t, x):
"""The function used as source function for the BVP. This is a function of both time and position. Change this to
add a heat source for the heat-equation, or a charge for the wave-equation (if we think in terms of physics) or a
force (if we think of it as a string.)"""
return 0
wave_equation_descriptor = simulation.WaveEquationDescriptor(wave_speed=1, boundary_conditions=boundary_conditions,
source_function=source)
heat_equation_descriptor = simulation.HeatEquationDescriptor(diffusivity=0.001, boundary_conditions=boundary_conditions,
source_function=source)
# The diffusivity of the heat equation is low to allow convergence.
problem_descriptor = wave_equation_descriptor # Change this to change equation.
""" Selects the problem we actually want to solve."""
# END PROBLEM CONDITIONS
# THE ACTUAL SIMULATION
# No changes needed here, all parameters should be changed at the appropriate place above.
simulation.sim_and_plot(simulation.BVPSimulation(
length=string_length, time_step=time_step, number_of_points=number_of_points, bvp_descriptor=problem_descriptor,
initial_conditions=initial_conditions)) | main.py | import simulation
import math
# This is the file to run in order to give this demo a spin. It should work out of the box.
#
# Some parameters can be changed, such as initial conditions, boundary conditions, and which physical problem you want
# to solve. As of now there is only the wave equation and the heat equation, but if you're really ambitious I guess
# you could add your own. Adding a dissipative wave equation wouldn't be that hard, just add a dy/dt term.
# SIMULATION PARAMETERS
string_length = 1
"""The length of the string. This is not really a relevant parameter for any problem; both the heat equation and the
wave equation are scale-invariant, so we can always scale the problem to have length 1. But I've included this parameter
for the sake of completion."""
number_of_points = 100
"""The number of x-coordinates in the simulation. Making this too large leads to instabilities."""
time_step = 0.01
"""The time in between each time-step. It is also used for controlling the frame-rate of the animation."""
# END SIMULATION PARAMETERS
# USEFUL FUNCTIONS GENERATORS
def constant(value=0):
"""A flat initial condition. Rather boring, you probably want a source or some interesting boundary conditions
for this to be fun!
Args:
value (float): The value the function takes everywhere. Defaults to 0."""
return lambda x: value
def gaussian_wave_packet(amplitude=1, position=0.5, width=0.1):
"""Creates a gaussian wave-packet with the given parameters. This is a good standard displacement for an initial
condition, since it is very smooth. Do not use a box pulse; they are not realistic and do not play well with
numerical solvers.
Args:
amplitude (float): The height of the wave-packet. Defaults to 1.
position (float): The center of the wave-packet. Is measured in units of the string_length (i.e., if you want
to have the packet in the middle of the string, the parameter should be 0.5). Defaults to 0.5.
width (float): The width of the wave-packet. This is the standard deviation of the wave-packet, measured in
units of the string length. Defaults to 0.1"""
return lambda x: amplitude*math.exp(-0.5*((x - position*string_length)/(width*string_length))**2)
def standing_wave(amplitude=1, node_count=3, base_period=string_length):
"""Creates a function representing a standing sine-wave. Mostly useful for the wave equation but it could be fun
also in the heat equation, I suppose.
Args:
amplitude (float): The amplitude of the standing wave. The largest value the function attains. Defaults to 1.
node_count (int): The number of nodes in the standing wave. Defaults to 3.
base_period (float): The length of a single period at node_count 1. Defaults to the string_length."""
return lambda x: amplitude*math.sin(node_count*math.pi*x/base_period)
# END USEFUL FUNCTION GENERATORS
# PROBLEM CONDITIONS
initial_conditions = constant() # Change this to change the initial conditions.
"""The choice of initial condition. This should be a function which takes a single real argument and produces a
number."""
general_boundary_condition = simulation.BVPBoundaryCondition(
left_dirichlet_parameter=1,
right_dirichlet_parameter=0,
left_neumann_parameter=0,
right_neumann_parameter=1,
left_constant=gaussian_wave_packet(),
right_constant=constant())
homogeneous_dirichlet = simulation.BOUNDARY_CONDITION_HOMOGENEOUS_DIRICHLET
homogeneous_neumann = simulation.BOUNDARY_CONDITION_HOMOGENEOUS_NEUMANN
boundary_conditions = general_boundary_condition # Change this to change the boundary conditions
def source(t, x):
"""The function used as source function for the BVP. This is a function of both time and position. Change this to
add a heat source for the heat-equation, or a charge for the wave-equation (if we think in terms of physics) or a
force (if we think of it as a string.)"""
return 0
wave_equation_descriptor = simulation.WaveEquationDescriptor(wave_speed=1, boundary_conditions=boundary_conditions,
source_function=source)
heat_equation_descriptor = simulation.HeatEquationDescriptor(diffusivity=0.001, boundary_conditions=boundary_conditions,
source_function=source)
# The diffusivity of the heat equation is low to allow convergence.
problem_descriptor = wave_equation_descriptor # Change this to change equation.
""" Selects the problem we actually want to solve."""
# END PROBLEM CONDITIONS
# THE ACTUAL SIMULATION
# No changes needed here, all parameters should be changed at the appropriate place above.
simulation.sim_and_plot(simulation.BVPSimulation(
length=string_length, time_step=time_step, number_of_points=number_of_points, bvp_descriptor=problem_descriptor,
initial_conditions=initial_conditions)) | 0.75392 | 0.634487 |
import tensorflow as tf
import numpy as np
import pytest
import os
from tndm import TNDM
from tndm.utils import AdaptiveWeights, upsert_empty_folder, remove_folder
@pytest.fixture(scope='module', autouse=True)
def cleanup(request):
def remove_test_dir():
folder = os.path.join(
'test', 'models', 'tndm_tmp')
upsert_empty_folder(folder)
remove_folder(folder)
request.addfinalizer(remove_test_dir)
@pytest.fixture(scope='function')
def save_location():
folder = os.path.join('test', 'models', 'tndm_tmp')
upsert_empty_folder(folder)
return folder
@pytest.mark.unit
def test_dimensionality():
input_data = np.exp(np.random.randn(10, 100, 50)
) # trials X time X neurons
model = TNDM(neural_dim=50, behaviour_dim=2, layers={'irrelevant_decoder': {'original_cell': True}, 'relevant_decoder': {'original_cell': True}})
model.build(input_shape=[None] + list(input_data.shape[1:]))
f, b, (g0_r, r_mean, r_logvar), (g0_i, i_mean, i_logvar), _ = model.call(input_data, training=True)
tf.debugging.assert_equal(b.shape, tf.TensorShape([10, 100, 2]))
tf.debugging.assert_equal(f.shape, tf.TensorShape([10, 100, 50]))
tf.debugging.assert_equal(r_mean.shape, tf.TensorShape([10, 64]))
tf.debugging.assert_equal(r_logvar.shape, tf.TensorShape([10, 64]))
tf.debugging.assert_equal(g0_r.shape, tf.TensorShape([10, 64]))
tf.debugging.assert_equal(i_mean.shape, tf.TensorShape([10, 64]))
tf.debugging.assert_equal(i_logvar.shape, tf.TensorShape([10, 64]))
tf.debugging.assert_equal(g0_i.shape, tf.TensorShape([10, 64]))
f, b, (g0_r, r_mean, r_logvar), (g0_i, i_mean, i_logvar), _ = model.call(input_data, training=False)
tf.debugging.assert_equal(b.shape, tf.TensorShape([10, 100, 2]))
tf.debugging.assert_equal(f.shape, tf.TensorShape([10, 100, 50]))
tf.debugging.assert_equal(r_mean.shape, tf.TensorShape([10, 64]))
tf.debugging.assert_equal(r_logvar.shape, tf.TensorShape([10, 64]))
tf.debugging.assert_equal(g0_r.shape, tf.TensorShape([10, 64]))
tf.debugging.assert_equal(i_mean.shape, tf.TensorShape([10, 64]))
tf.debugging.assert_equal(i_logvar.shape, tf.TensorShape([10, 64]))
tf.debugging.assert_equal(g0_i.shape, tf.TensorShape([10, 64]))
@pytest.mark.unit
def test_train_model_quick(save_location):
neural_data_train = np.random.binomial(1, 0.5, (10, 100, 50)).astype(
float) # test_trials X time X neurons
behaviour_data_train = np.exp(np.random.randn(
10, 100, 2)) # test_trials X time X behaviour
neural_data_val = np.random.binomial(1, 0.5, (2, 100, 50)).astype(
float) # val_trials X time X neurons
# val_trials X time X behaviour
behaviour_data_val = np.exp(np.random.randn(2, 100, 2))
adaptive_weights = AdaptiveWeights(
initial=[1.0, .0, .0, .0, 1.0, .0],
update_start=[0, 0, 1000, 1000, 0, 0],
update_rate=[0., 0., 0.0005, 0.0005, 0.0, 0.0005],
min_weight=[1.0, 0.0, 0.0, 0.0, 1.0, 0.0]
)
model = TNDM(neural_dim=50, behaviour_dim=2)
model.build(input_shape=[None] + list(neural_data_train.shape[1:]))
model.compile(
optimizer=tf.keras.optimizers.Adam(1e-3),
loss_weights=adaptive_weights.w
)
model.fit(
x=neural_data_train,
y=behaviour_data_train,
callbacks=[adaptive_weights],
shuffle=True,
epochs=4,
validation_data=(
neural_data_val,
behaviour_data_val))
before = model(neural_data_train, training=False)[0]
model.save(save_location)
model_new = TNDM.load(save_location)
after = model_new(neural_data_train, training=False)[0]
tf.debugging.assert_equal(
before, after
) | test/models/tndm_test.py | import tensorflow as tf
import numpy as np
import pytest
import os
from tndm import TNDM
from tndm.utils import AdaptiveWeights, upsert_empty_folder, remove_folder
@pytest.fixture(scope='module', autouse=True)
def cleanup(request):
def remove_test_dir():
folder = os.path.join(
'test', 'models', 'tndm_tmp')
upsert_empty_folder(folder)
remove_folder(folder)
request.addfinalizer(remove_test_dir)
@pytest.fixture(scope='function')
def save_location():
folder = os.path.join('test', 'models', 'tndm_tmp')
upsert_empty_folder(folder)
return folder
@pytest.mark.unit
def test_dimensionality():
input_data = np.exp(np.random.randn(10, 100, 50)
) # trials X time X neurons
model = TNDM(neural_dim=50, behaviour_dim=2, layers={'irrelevant_decoder': {'original_cell': True}, 'relevant_decoder': {'original_cell': True}})
model.build(input_shape=[None] + list(input_data.shape[1:]))
f, b, (g0_r, r_mean, r_logvar), (g0_i, i_mean, i_logvar), _ = model.call(input_data, training=True)
tf.debugging.assert_equal(b.shape, tf.TensorShape([10, 100, 2]))
tf.debugging.assert_equal(f.shape, tf.TensorShape([10, 100, 50]))
tf.debugging.assert_equal(r_mean.shape, tf.TensorShape([10, 64]))
tf.debugging.assert_equal(r_logvar.shape, tf.TensorShape([10, 64]))
tf.debugging.assert_equal(g0_r.shape, tf.TensorShape([10, 64]))
tf.debugging.assert_equal(i_mean.shape, tf.TensorShape([10, 64]))
tf.debugging.assert_equal(i_logvar.shape, tf.TensorShape([10, 64]))
tf.debugging.assert_equal(g0_i.shape, tf.TensorShape([10, 64]))
f, b, (g0_r, r_mean, r_logvar), (g0_i, i_mean, i_logvar), _ = model.call(input_data, training=False)
tf.debugging.assert_equal(b.shape, tf.TensorShape([10, 100, 2]))
tf.debugging.assert_equal(f.shape, tf.TensorShape([10, 100, 50]))
tf.debugging.assert_equal(r_mean.shape, tf.TensorShape([10, 64]))
tf.debugging.assert_equal(r_logvar.shape, tf.TensorShape([10, 64]))
tf.debugging.assert_equal(g0_r.shape, tf.TensorShape([10, 64]))
tf.debugging.assert_equal(i_mean.shape, tf.TensorShape([10, 64]))
tf.debugging.assert_equal(i_logvar.shape, tf.TensorShape([10, 64]))
tf.debugging.assert_equal(g0_i.shape, tf.TensorShape([10, 64]))
@pytest.mark.unit
def test_train_model_quick(save_location):
neural_data_train = np.random.binomial(1, 0.5, (10, 100, 50)).astype(
float) # test_trials X time X neurons
behaviour_data_train = np.exp(np.random.randn(
10, 100, 2)) # test_trials X time X behaviour
neural_data_val = np.random.binomial(1, 0.5, (2, 100, 50)).astype(
float) # val_trials X time X neurons
# val_trials X time X behaviour
behaviour_data_val = np.exp(np.random.randn(2, 100, 2))
adaptive_weights = AdaptiveWeights(
initial=[1.0, .0, .0, .0, 1.0, .0],
update_start=[0, 0, 1000, 1000, 0, 0],
update_rate=[0., 0., 0.0005, 0.0005, 0.0, 0.0005],
min_weight=[1.0, 0.0, 0.0, 0.0, 1.0, 0.0]
)
model = TNDM(neural_dim=50, behaviour_dim=2)
model.build(input_shape=[None] + list(neural_data_train.shape[1:]))
model.compile(
optimizer=tf.keras.optimizers.Adam(1e-3),
loss_weights=adaptive_weights.w
)
model.fit(
x=neural_data_train,
y=behaviour_data_train,
callbacks=[adaptive_weights],
shuffle=True,
epochs=4,
validation_data=(
neural_data_val,
behaviour_data_val))
before = model(neural_data_train, training=False)[0]
model.save(save_location)
model_new = TNDM.load(save_location)
after = model_new(neural_data_train, training=False)[0]
tf.debugging.assert_equal(
before, after
) | 0.481941 | 0.518668 |
import yaml
import collections as col
import PolyLibScan as pls
import pathlib2 as pl
import LammpsSubmit as LS
import epitopsy as epi
import MDlatticeAnalysisTool as mdl
import sklearn as sk
import operator
import random
import os
import time
class Genetic_algorithm(object):
gene_list = ['BA', 'BP', 'Bor', 'CBS', 'Dan', 'Glu', 'NTA', 'Dod', 'C5-BP',
'C6-BP', 'C6-CyH', 'C6-Sp', 'CBS-Boc', 'ED', 'Iso', 'NTA-Glu']
generation = 0
@classmethod
def update_gen(cls):
cls.generation += 1
def __init__(self, local_project_folder, cluster_project_folder,
best_sample = 5, lucky_few = 2, nr_children = 4,
nr_generations = 6, mutation_rate = 0.02, meshsize = [4,4,4]):
self.best_sample = best_sample
self.lucky_few = lucky_few
self.nr_children = nr_children
self.nr_generations = nr_generations
self.mutation_rate = mutation_rate
self.meshsize = meshsize
self.project_folder = local_project_folder
self.cluster_folder = cluster_project_folder
self._run = self.run(self.nr_generations, self.project_folder,
self.cluster_folder, self.best_sample, self.lucky_few,
self.nr_children)
# This is the analysis part of the fitness function,
# the fitness is calculated by calculating the mean absolute
# error between the probability of a multi-contact event in a
# given gridpoint and the predicted occupancy of the polymer on
# this gridpoint and dividing the result by the total amount of
# multi-contact events. This quantifies the difference between
# the epitopsy prediction and the observed MD data for a
# gridpoint while also accounting for how well in general the
# polymer binds to the protein in MD simulations.
# In this case, the lower the fitness the better
def fitness(self, project, output_folder, job, protein_name, pose_file):
try:
box = epi.DXFile.read_dxfile(output_folder
+ protein_name + '_occ.dx', 'esp')
with open(output_folder + protein_name
+ '_box_coords_job' + str(job) + '.yml') as f_:
coords = yaml.load(f_)
coords_tuple = []
for i in range(len(coords)):
coords_tuple.append(tuple(coords.tolist()[i]))
counter = col.Counter(coords_tuple)
freq_occ_dict = col.OrderedDict()
for key in counter:
freq_occ_dict[key] = counter[key], box.box[key]
frequency = []
occupancy = []
for key in freq_occ_dict:
frequency.append(freq_occ_dict[key][0])
occupancy.append(freq_occ_dict[key][1])
e = mdl.enviroment.Enviroment(output_folder
+ protein_name + '.pdb', output_folder
+ pose_file)
a = mdl.analysis.Analytics(e)
probability = [float(freq)/(len(a.df_dist)
* int(project.jobs[0].meta['sampling_rate']))
for freq in frequency]
return sk.metrics.mean_absolute_error(probability,
occupancy) / sum(probability)
except ValueError:
return None
# Compute performance of the individuals.
def compPerformance(self, population, analysis_folder, generation_pls):
pop_perf = {}
for j in enumerate(population):
ouput_folder = analysis_folder + j[1].name + '/'
job = j[0]
# This could be a problem with the 1A2C / 1lya mixup
protein_name = generation_pls.jobs[job].meta['protein']
pose_file = generation_pls.jobs[job][0].pymol.pose.file_name(
molecule = 'polymer', state = 'full')
pop_perf[j[1].name] = self.fitness(generation_pls, ouput_folder,
job, protein_name, pose_file)
# Remove the jobs where no multi-contact event took place.
for k,v in pop_perf.items():
if v == None:
del pop_perf[k]
return sorted(pop_perf.items(), key = operator.itemgetter(1))
# Select the indiviuals used to generate the next generation.
def selePop(self, pop_sorted, best_sample, lucky_few):
breeder = []
for i in range(best_sample):
breeder.append(pop_sorted[i][0])
for i in range(lucky_few):
breeder.append(random.choice(pop_sorted)[0])
random.shuffle(breeder)
return breeder
# Load the polymer sequences (the 'genome') of each individual.
def load_configs(self, configs):
sequences = {}
for i in range(len(configs)):
with open(str(configs[i])) as f_:
foo = yaml.load(f_)
sequences[configs[i].parent.name] = foo[
'sim_parameter']['poly_sequence']
return sequences
# Recombination function.
def create_child(self, parent1, parent2):
child = []
for i in range(len(parent1)):
if (int(100 * random.random()) < 50):
child.append(parent1[i])
else:
child.append(parent2[i])
return child
# Create the next generation.
def next_pop(self, sequences, breeder, nr_children):
next_pop = []
for i in range(len(breeder)/2):
for j in range(nr_children):
l = self.create_child(sequences[breeder[i]],
sequences[breeder[len(breeder) -1 -i]])
next_pop.append(l)
return next_pop
# Mutate a monomer ('gene') according to the mutation probability,
# the possible genes are given by the gene list.
def mutate(self, next_population):
for i in range(len(next_population)):
for j in range(len(next_population[i])):
if random.random() <= self.mutation_rate:
next_population[i][j] = Genetic_algorithm.gene_list[int(
random.random() * len(Genetic_algorithm.gene_list))]
return next_population
# Save the current best individual, it's generation,
# fitness and sequence.
def currBest(self, pop_perf, current_best, history):
for i in range(len(pop_perf)):
if pop_perf[i][1] < current_best[2]:
current_best = ('generation' + str(Genetic_algorithm.generation),
pop_perf[i][0], pop_perf[i][1],
history['generation' + str(
Genetic_algorithm.generation)][pop_perf[i][0]]['sequence'])
return current_best
# Check the status of jobs running on the cluster.
def jobchecker(self, project):
job_state = []
for job in project:
job.state.update()
job_state.append(job.state.is_finished)
return job_state
def run(self, nr_generations, project_folder, cluster_folder, best_sample,
lucky_few, nr_children):
history = col.OrderedDict()
current_best = ('generationX', 'test', float('inf'), 'No sequence')
while Genetic_algorithm.generation < nr_generations:
print Genetic_algorithm.generation
if Genetic_algorithm.generation != 0:
while True:
if all(self.jobchecker(project)):
break
else:
print "not all MD simulations are finished, going to sleep for 10 min"
time.sleep(600)
print "MD simulation finished, downloading and analysing Data"
os.mkdir(project_folder + 'generation'
+ str(Genetic_algorithm.generation))
gen_folder = project_folder + 'generation' + str(
Genetic_algorithm.generation)
md_folder = project_folder + 'generation' + str(
Genetic_algorithm.generation) + '/MD/'
ana_folder = project_folder + 'generation' + str(
Genetic_algorithm.generation) + '/analysis/'
os.mkdir(ana_folder)
if Genetic_algorithm.generation != 0:
project.retrieve(gen_folder)
# Population = list(pl.Path(md_folder + 'jobs/').glob('*'))
configs = list(pl.Path(md_folder + 'jobs/').glob(
'*/config_with_setup.yml'))
# Getting our pls data and generating our population.
generation_pls = pls.Analysis.Project(md_folder)
population = [pl.Path(md_folder + 'jobs/'
+ job.db_path.parent.name) for job in generation_pls.jobs]
# Getting our MDL & epitopsy data
print "Retrieved MD data, starting Epitopsy analysis"
for individual in enumerate(population):
os.mkdir(ana_folder + '%s' % individual[1].name)
mdl.pipeline.Pipeline_run(md_folder, ana_folder + '%s/'
% individual[1].name, job=individual[0],
meshsize=self.meshsize)
# Calculating the Fitness and creating the next Generation.
print "Calculating Fitness and creating the next Generation"
pop_perf = self.compPerformance(population, ana_folder,
generation_pls)
breeder = self.selePop(pop_perf, best_sample, lucky_few)
sequences = self.load_configs(configs)
next_population = self.mutate(self.next_pop(sequences, breeder,
nr_children))
history['generation' + str(Genetic_algorithm.generation)] = {}
for i in range(len(pop_perf)):
history['generation' + str(
Genetic_algorithm.generation)][pop_perf[i][0]] = {}
history['generation' + str(Genetic_algorithm.generation)][
pop_perf[i][0]]['fitness'] = pop_perf[i][1]
history['generation' + str(Genetic_algorithm.generation)][
pop_perf[i][0]]['sequence'] = sequences[pop_perf[i][0]]
current_best = self.currBest(pop_perf, current_best, history)
print "Preparing MD simulation runs"
# Path to slurm config.
slurm_config = pl.Path('slurm.cfg')
if not slurm_config.exists():
print slurm_config, 'does not exist'
config_folder = pl.Path(md_folder + 'static/')
# Create dict with all static files needed to run the simulation
statics = {'config': config_folder.joinpath('parameters.yml'),
'active_site': config_folder.joinpath('active_sites.h5'),
'protein': config_folder.joinpath(
generation_pls.jobs[0].meta['protein']),
'script': config_folder.joinpath('sampling2.py'),
#'ic50': config_folder.joinpath('ic50.h5'),
'surface_db': config_folder.joinpath(
'hydrophobic_parameters.h5'),
'protonation_db': config_folder.joinpath('protonation.h5')}
# Check if everything is in order.
for file_ in statics.values():
if not file_.exists():
print file_, 'does not exist'
statics = dict(zip(statics.keys(), map(str, statics.values())))
config_defaults = pl.Path(md_folder + '/jobs/abcd/config.yml')
if not config_defaults.exists():
raise FileNotFoundError
# Create project.
project = LS.Project(cluster_folder + 'generation'
+ str(Genetic_algorithm.generation) + '/MD/',
defaults=config_defaults.as_posix(),statics=statics,
slurm_parameters=slurm_config.as_posix())
# Creates folders and copies static data to cluster.
project.create_environment()
# Initialize parameters that are used to vary the job settings
# like polymer type, timesteps, etc.
parameters = {}
# Simulation parameters.
sim_parameter = {}
# Lammps parameters.
lmp_parameter = {}
# Bundling them up.
parameters['lammps_parameter'] = lmp_parameter
parameters['sim_parameter'] = sim_parameter
for genome in next_population:
parameters['sim_parameter']['poly_sequence'] = genome
project.create_job(parameters)
print "Starting MD simulations"
Genetic_algorithm.update_gen()
print Genetic_algorithm.generation
project.run_jobs()
f_ = open(project_folder + 'history.yml', 'w')
yaml.dump(history, stream = f_)
f_.close()
g_ = open(project_folder + 'best_hit.yml', 'w')
yaml.dump(current_best, stream = g_)
g_.close() | genetic_algorithm.py | import yaml
import collections as col
import PolyLibScan as pls
import pathlib2 as pl
import LammpsSubmit as LS
import epitopsy as epi
import MDlatticeAnalysisTool as mdl
import sklearn as sk
import operator
import random
import os
import time
class Genetic_algorithm(object):
gene_list = ['BA', 'BP', 'Bor', 'CBS', 'Dan', 'Glu', 'NTA', 'Dod', 'C5-BP',
'C6-BP', 'C6-CyH', 'C6-Sp', 'CBS-Boc', 'ED', 'Iso', 'NTA-Glu']
generation = 0
@classmethod
def update_gen(cls):
cls.generation += 1
def __init__(self, local_project_folder, cluster_project_folder,
best_sample = 5, lucky_few = 2, nr_children = 4,
nr_generations = 6, mutation_rate = 0.02, meshsize = [4,4,4]):
self.best_sample = best_sample
self.lucky_few = lucky_few
self.nr_children = nr_children
self.nr_generations = nr_generations
self.mutation_rate = mutation_rate
self.meshsize = meshsize
self.project_folder = local_project_folder
self.cluster_folder = cluster_project_folder
self._run = self.run(self.nr_generations, self.project_folder,
self.cluster_folder, self.best_sample, self.lucky_few,
self.nr_children)
# This is the analysis part of the fitness function,
# the fitness is calculated by calculating the mean absolute
# error between the probability of a multi-contact event in a
# given gridpoint and the predicted occupancy of the polymer on
# this gridpoint and dividing the result by the total amount of
# multi-contact events. This quantifies the difference between
# the epitopsy prediction and the observed MD data for a
# gridpoint while also accounting for how well in general the
# polymer binds to the protein in MD simulations.
# In this case, the lower the fitness the better
def fitness(self, project, output_folder, job, protein_name, pose_file):
try:
box = epi.DXFile.read_dxfile(output_folder
+ protein_name + '_occ.dx', 'esp')
with open(output_folder + protein_name
+ '_box_coords_job' + str(job) + '.yml') as f_:
coords = yaml.load(f_)
coords_tuple = []
for i in range(len(coords)):
coords_tuple.append(tuple(coords.tolist()[i]))
counter = col.Counter(coords_tuple)
freq_occ_dict = col.OrderedDict()
for key in counter:
freq_occ_dict[key] = counter[key], box.box[key]
frequency = []
occupancy = []
for key in freq_occ_dict:
frequency.append(freq_occ_dict[key][0])
occupancy.append(freq_occ_dict[key][1])
e = mdl.enviroment.Enviroment(output_folder
+ protein_name + '.pdb', output_folder
+ pose_file)
a = mdl.analysis.Analytics(e)
probability = [float(freq)/(len(a.df_dist)
* int(project.jobs[0].meta['sampling_rate']))
for freq in frequency]
return sk.metrics.mean_absolute_error(probability,
occupancy) / sum(probability)
except ValueError:
return None
# Compute performance of the individuals.
def compPerformance(self, population, analysis_folder, generation_pls):
pop_perf = {}
for j in enumerate(population):
ouput_folder = analysis_folder + j[1].name + '/'
job = j[0]
# This could be a problem with the 1A2C / 1lya mixup
protein_name = generation_pls.jobs[job].meta['protein']
pose_file = generation_pls.jobs[job][0].pymol.pose.file_name(
molecule = 'polymer', state = 'full')
pop_perf[j[1].name] = self.fitness(generation_pls, ouput_folder,
job, protein_name, pose_file)
# Remove the jobs where no multi-contact event took place.
for k,v in pop_perf.items():
if v == None:
del pop_perf[k]
return sorted(pop_perf.items(), key = operator.itemgetter(1))
# Select the indiviuals used to generate the next generation.
def selePop(self, pop_sorted, best_sample, lucky_few):
breeder = []
for i in range(best_sample):
breeder.append(pop_sorted[i][0])
for i in range(lucky_few):
breeder.append(random.choice(pop_sorted)[0])
random.shuffle(breeder)
return breeder
# Load the polymer sequences (the 'genome') of each individual.
def load_configs(self, configs):
sequences = {}
for i in range(len(configs)):
with open(str(configs[i])) as f_:
foo = yaml.load(f_)
sequences[configs[i].parent.name] = foo[
'sim_parameter']['poly_sequence']
return sequences
# Recombination function.
def create_child(self, parent1, parent2):
child = []
for i in range(len(parent1)):
if (int(100 * random.random()) < 50):
child.append(parent1[i])
else:
child.append(parent2[i])
return child
# Create the next generation.
def next_pop(self, sequences, breeder, nr_children):
next_pop = []
for i in range(len(breeder)/2):
for j in range(nr_children):
l = self.create_child(sequences[breeder[i]],
sequences[breeder[len(breeder) -1 -i]])
next_pop.append(l)
return next_pop
# Mutate a monomer ('gene') according to the mutation probability,
# the possible genes are given by the gene list.
def mutate(self, next_population):
for i in range(len(next_population)):
for j in range(len(next_population[i])):
if random.random() <= self.mutation_rate:
next_population[i][j] = Genetic_algorithm.gene_list[int(
random.random() * len(Genetic_algorithm.gene_list))]
return next_population
# Save the current best individual, it's generation,
# fitness and sequence.
def currBest(self, pop_perf, current_best, history):
for i in range(len(pop_perf)):
if pop_perf[i][1] < current_best[2]:
current_best = ('generation' + str(Genetic_algorithm.generation),
pop_perf[i][0], pop_perf[i][1],
history['generation' + str(
Genetic_algorithm.generation)][pop_perf[i][0]]['sequence'])
return current_best
# Check the status of jobs running on the cluster.
def jobchecker(self, project):
job_state = []
for job in project:
job.state.update()
job_state.append(job.state.is_finished)
return job_state
def run(self, nr_generations, project_folder, cluster_folder, best_sample,
lucky_few, nr_children):
history = col.OrderedDict()
current_best = ('generationX', 'test', float('inf'), 'No sequence')
while Genetic_algorithm.generation < nr_generations:
print Genetic_algorithm.generation
if Genetic_algorithm.generation != 0:
while True:
if all(self.jobchecker(project)):
break
else:
print "not all MD simulations are finished, going to sleep for 10 min"
time.sleep(600)
print "MD simulation finished, downloading and analysing Data"
os.mkdir(project_folder + 'generation'
+ str(Genetic_algorithm.generation))
gen_folder = project_folder + 'generation' + str(
Genetic_algorithm.generation)
md_folder = project_folder + 'generation' + str(
Genetic_algorithm.generation) + '/MD/'
ana_folder = project_folder + 'generation' + str(
Genetic_algorithm.generation) + '/analysis/'
os.mkdir(ana_folder)
if Genetic_algorithm.generation != 0:
project.retrieve(gen_folder)
# Population = list(pl.Path(md_folder + 'jobs/').glob('*'))
configs = list(pl.Path(md_folder + 'jobs/').glob(
'*/config_with_setup.yml'))
# Getting our pls data and generating our population.
generation_pls = pls.Analysis.Project(md_folder)
population = [pl.Path(md_folder + 'jobs/'
+ job.db_path.parent.name) for job in generation_pls.jobs]
# Getting our MDL & epitopsy data
print "Retrieved MD data, starting Epitopsy analysis"
for individual in enumerate(population):
os.mkdir(ana_folder + '%s' % individual[1].name)
mdl.pipeline.Pipeline_run(md_folder, ana_folder + '%s/'
% individual[1].name, job=individual[0],
meshsize=self.meshsize)
# Calculating the Fitness and creating the next Generation.
print "Calculating Fitness and creating the next Generation"
pop_perf = self.compPerformance(population, ana_folder,
generation_pls)
breeder = self.selePop(pop_perf, best_sample, lucky_few)
sequences = self.load_configs(configs)
next_population = self.mutate(self.next_pop(sequences, breeder,
nr_children))
history['generation' + str(Genetic_algorithm.generation)] = {}
for i in range(len(pop_perf)):
history['generation' + str(
Genetic_algorithm.generation)][pop_perf[i][0]] = {}
history['generation' + str(Genetic_algorithm.generation)][
pop_perf[i][0]]['fitness'] = pop_perf[i][1]
history['generation' + str(Genetic_algorithm.generation)][
pop_perf[i][0]]['sequence'] = sequences[pop_perf[i][0]]
current_best = self.currBest(pop_perf, current_best, history)
print "Preparing MD simulation runs"
# Path to slurm config.
slurm_config = pl.Path('slurm.cfg')
if not slurm_config.exists():
print slurm_config, 'does not exist'
config_folder = pl.Path(md_folder + 'static/')
# Create dict with all static files needed to run the simulation
statics = {'config': config_folder.joinpath('parameters.yml'),
'active_site': config_folder.joinpath('active_sites.h5'),
'protein': config_folder.joinpath(
generation_pls.jobs[0].meta['protein']),
'script': config_folder.joinpath('sampling2.py'),
#'ic50': config_folder.joinpath('ic50.h5'),
'surface_db': config_folder.joinpath(
'hydrophobic_parameters.h5'),
'protonation_db': config_folder.joinpath('protonation.h5')}
# Check if everything is in order.
for file_ in statics.values():
if not file_.exists():
print file_, 'does not exist'
statics = dict(zip(statics.keys(), map(str, statics.values())))
config_defaults = pl.Path(md_folder + '/jobs/abcd/config.yml')
if not config_defaults.exists():
raise FileNotFoundError
# Create project.
project = LS.Project(cluster_folder + 'generation'
+ str(Genetic_algorithm.generation) + '/MD/',
defaults=config_defaults.as_posix(),statics=statics,
slurm_parameters=slurm_config.as_posix())
# Creates folders and copies static data to cluster.
project.create_environment()
# Initialize parameters that are used to vary the job settings
# like polymer type, timesteps, etc.
parameters = {}
# Simulation parameters.
sim_parameter = {}
# Lammps parameters.
lmp_parameter = {}
# Bundling them up.
parameters['lammps_parameter'] = lmp_parameter
parameters['sim_parameter'] = sim_parameter
for genome in next_population:
parameters['sim_parameter']['poly_sequence'] = genome
project.create_job(parameters)
print "Starting MD simulations"
Genetic_algorithm.update_gen()
print Genetic_algorithm.generation
project.run_jobs()
f_ = open(project_folder + 'history.yml', 'w')
yaml.dump(history, stream = f_)
f_.close()
g_ = open(project_folder + 'best_hit.yml', 'w')
yaml.dump(current_best, stream = g_)
g_.close() | 0.503418 | 0.372049 |
import sublime, sublimeplugin
import os.path
import functools
def findCommonPathPrefix(files):
if len(files) > 1:
# Remove any common prefix from the set of files, to drop redundant
# information
common = os.path.commonprefix(files)
# os.path.commonprefix is calculated character by character, back up
# to the nearest dir, if required.
if not os.path.isdir(common):
common = common[:-len(os.path.basename(common))]
return common
else:
return ""
class PromptSelectFileCommand(sublimeplugin.WindowCommand):
"""Switch to another open file"""
def run(self, window, args):
views = window.views()
fileSet = set()
for v in views:
if v.fileName():
fileSet.add(v.fileName())
commonPrefix = findCommonPathPrefix(fileSet)
display = []
selectedIndex = -1
for v in views:
basename = "untitled"
if v.fileName():
basename = v.fileName()[len(commonPrefix):]
elif v.name():
basename = v.name()
if v.isDirty():
display.append(basename + "*")
else:
display.append(basename)
if window.activeView().id() == v.id():
selectedIndex = len(display) - 1
window.showSelectPanel(display,
functools.partial(self.selectView, views, window), None,
sublime.SELECT_PANEL_FILES, "", selectedIndex)
def selectView(self, views, window, i):
view = views[i];
window.focusView(view)
class PromptOpenFileInCurrentDirectoryCommand(sublimeplugin.WindowCommand):
"""Open a file in the current directory"""
badExtensions = ['.jpg', '.gif', '.png', '.pyc', '.pyo', '.bin', '.o',
'.so', '.obj', '.lib', '.pdb', '.suo', '.ncb', '.dll', '.exe', '.zip',
'.tar', '.gz', '.bz2', '.tgz', '.rar']
def wantFile(self, f):
root, ext = os.path.splitext(f)
return os.path.isfile(f) and not ext in self.badExtensions
def run(self, window, args):
files = [f for f in os.listdir(os.getcwdu()) if self.wantFile(f)]
files.sort()
window.showQuickPanel("", "open", files, files,
sublime.QUICK_PANEL_FILES | sublime.QUICK_PANEL_MULTI_SELECT) | sb/Data/Packages/Default/SelectFile.py | import sublime, sublimeplugin
import os.path
import functools
def findCommonPathPrefix(files):
if len(files) > 1:
# Remove any common prefix from the set of files, to drop redundant
# information
common = os.path.commonprefix(files)
# os.path.commonprefix is calculated character by character, back up
# to the nearest dir, if required.
if not os.path.isdir(common):
common = common[:-len(os.path.basename(common))]
return common
else:
return ""
class PromptSelectFileCommand(sublimeplugin.WindowCommand):
"""Switch to another open file"""
def run(self, window, args):
views = window.views()
fileSet = set()
for v in views:
if v.fileName():
fileSet.add(v.fileName())
commonPrefix = findCommonPathPrefix(fileSet)
display = []
selectedIndex = -1
for v in views:
basename = "untitled"
if v.fileName():
basename = v.fileName()[len(commonPrefix):]
elif v.name():
basename = v.name()
if v.isDirty():
display.append(basename + "*")
else:
display.append(basename)
if window.activeView().id() == v.id():
selectedIndex = len(display) - 1
window.showSelectPanel(display,
functools.partial(self.selectView, views, window), None,
sublime.SELECT_PANEL_FILES, "", selectedIndex)
def selectView(self, views, window, i):
view = views[i];
window.focusView(view)
class PromptOpenFileInCurrentDirectoryCommand(sublimeplugin.WindowCommand):
"""Open a file in the current directory"""
badExtensions = ['.jpg', '.gif', '.png', '.pyc', '.pyo', '.bin', '.o',
'.so', '.obj', '.lib', '.pdb', '.suo', '.ncb', '.dll', '.exe', '.zip',
'.tar', '.gz', '.bz2', '.tgz', '.rar']
def wantFile(self, f):
root, ext = os.path.splitext(f)
return os.path.isfile(f) and not ext in self.badExtensions
def run(self, window, args):
files = [f for f in os.listdir(os.getcwdu()) if self.wantFile(f)]
files.sort()
window.showQuickPanel("", "open", files, files,
sublime.QUICK_PANEL_FILES | sublime.QUICK_PANEL_MULTI_SELECT) | 0.304559 | 0.112016 |
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateTablespaceDetails(object):
"""
The details required to create a tablespace.
"""
#: A constant which can be used with the type property of a CreateTablespaceDetails.
#: This constant has a value of "PERMANENT"
TYPE_PERMANENT = "PERMANENT"
#: A constant which can be used with the type property of a CreateTablespaceDetails.
#: This constant has a value of "TEMPORARY"
TYPE_TEMPORARY = "TEMPORARY"
#: A constant which can be used with the default_compress property of a CreateTablespaceDetails.
#: This constant has a value of "NO_COMPRESS"
DEFAULT_COMPRESS_NO_COMPRESS = "NO_COMPRESS"
#: A constant which can be used with the default_compress property of a CreateTablespaceDetails.
#: This constant has a value of "BASIC_COMPRESS"
DEFAULT_COMPRESS_BASIC_COMPRESS = "BASIC_COMPRESS"
#: A constant which can be used with the status property of a CreateTablespaceDetails.
#: This constant has a value of "READ_ONLY"
STATUS_READ_ONLY = "READ_ONLY"
#: A constant which can be used with the status property of a CreateTablespaceDetails.
#: This constant has a value of "READ_WRITE"
STATUS_READ_WRITE = "READ_WRITE"
#: A constant which can be used with the extent_management property of a CreateTablespaceDetails.
#: This constant has a value of "AUTOALLOCATE"
EXTENT_MANAGEMENT_AUTOALLOCATE = "AUTOALLOCATE"
#: A constant which can be used with the extent_management property of a CreateTablespaceDetails.
#: This constant has a value of "UNIFORM"
EXTENT_MANAGEMENT_UNIFORM = "UNIFORM"
#: A constant which can be used with the segment_management property of a CreateTablespaceDetails.
#: This constant has a value of "AUTO"
SEGMENT_MANAGEMENT_AUTO = "AUTO"
#: A constant which can be used with the segment_management property of a CreateTablespaceDetails.
#: This constant has a value of "MANUAL"
SEGMENT_MANAGEMENT_MANUAL = "MANUAL"
def __init__(self, **kwargs):
"""
Initializes a new CreateTablespaceDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param credential_details:
The value to assign to the credential_details property of this CreateTablespaceDetails.
:type credential_details: oci.database_management.models.TablespaceAdminCredentialDetails
:param name:
The value to assign to the name property of this CreateTablespaceDetails.
:type name: str
:param type:
The value to assign to the type property of this CreateTablespaceDetails.
Allowed values for this property are: "PERMANENT", "TEMPORARY"
:type type: str
:param is_bigfile:
The value to assign to the is_bigfile property of this CreateTablespaceDetails.
:type is_bigfile: bool
:param data_files:
The value to assign to the data_files property of this CreateTablespaceDetails.
:type data_files: list[str]
:param file_count:
The value to assign to the file_count property of this CreateTablespaceDetails.
:type file_count: int
:param file_size:
The value to assign to the file_size property of this CreateTablespaceDetails.
:type file_size: oci.database_management.models.TablespaceStorageSize
:param is_reusable:
The value to assign to the is_reusable property of this CreateTablespaceDetails.
:type is_reusable: bool
:param is_auto_extensible:
The value to assign to the is_auto_extensible property of this CreateTablespaceDetails.
:type is_auto_extensible: bool
:param auto_extend_next_size:
The value to assign to the auto_extend_next_size property of this CreateTablespaceDetails.
:type auto_extend_next_size: oci.database_management.models.TablespaceStorageSize
:param auto_extend_max_size:
The value to assign to the auto_extend_max_size property of this CreateTablespaceDetails.
:type auto_extend_max_size: oci.database_management.models.TablespaceStorageSize
:param is_max_size_unlimited:
The value to assign to the is_max_size_unlimited property of this CreateTablespaceDetails.
:type is_max_size_unlimited: bool
:param block_size_in_kilobytes:
The value to assign to the block_size_in_kilobytes property of this CreateTablespaceDetails.
:type block_size_in_kilobytes: int
:param is_encrypted:
The value to assign to the is_encrypted property of this CreateTablespaceDetails.
:type is_encrypted: bool
:param encryption_algorithm:
The value to assign to the encryption_algorithm property of this CreateTablespaceDetails.
:type encryption_algorithm: str
:param default_compress:
The value to assign to the default_compress property of this CreateTablespaceDetails.
Allowed values for this property are: "NO_COMPRESS", "BASIC_COMPRESS"
:type default_compress: str
:param status:
The value to assign to the status property of this CreateTablespaceDetails.
Allowed values for this property are: "READ_ONLY", "READ_WRITE"
:type status: str
:param extent_management:
The value to assign to the extent_management property of this CreateTablespaceDetails.
Allowed values for this property are: "AUTOALLOCATE", "UNIFORM"
:type extent_management: str
:param extent_uniform_size:
The value to assign to the extent_uniform_size property of this CreateTablespaceDetails.
:type extent_uniform_size: oci.database_management.models.TablespaceStorageSize
:param segment_management:
The value to assign to the segment_management property of this CreateTablespaceDetails.
Allowed values for this property are: "AUTO", "MANUAL"
:type segment_management: str
:param is_default:
The value to assign to the is_default property of this CreateTablespaceDetails.
:type is_default: bool
"""
self.swagger_types = {
'credential_details': 'TablespaceAdminCredentialDetails',
'name': 'str',
'type': 'str',
'is_bigfile': 'bool',
'data_files': 'list[str]',
'file_count': 'int',
'file_size': 'TablespaceStorageSize',
'is_reusable': 'bool',
'is_auto_extensible': 'bool',
'auto_extend_next_size': 'TablespaceStorageSize',
'auto_extend_max_size': 'TablespaceStorageSize',
'is_max_size_unlimited': 'bool',
'block_size_in_kilobytes': 'int',
'is_encrypted': 'bool',
'encryption_algorithm': 'str',
'default_compress': 'str',
'status': 'str',
'extent_management': 'str',
'extent_uniform_size': 'TablespaceStorageSize',
'segment_management': 'str',
'is_default': 'bool'
}
self.attribute_map = {
'credential_details': 'credentialDetails',
'name': 'name',
'type': 'type',
'is_bigfile': 'isBigfile',
'data_files': 'dataFiles',
'file_count': 'fileCount',
'file_size': 'fileSize',
'is_reusable': 'isReusable',
'is_auto_extensible': 'isAutoExtensible',
'auto_extend_next_size': 'autoExtendNextSize',
'auto_extend_max_size': 'autoExtendMaxSize',
'is_max_size_unlimited': 'isMaxSizeUnlimited',
'block_size_in_kilobytes': 'blockSizeInKilobytes',
'is_encrypted': 'isEncrypted',
'encryption_algorithm': 'encryptionAlgorithm',
'default_compress': 'defaultCompress',
'status': 'status',
'extent_management': 'extentManagement',
'extent_uniform_size': 'extentUniformSize',
'segment_management': 'segmentManagement',
'is_default': 'isDefault'
}
self._credential_details = None
self._name = None
self._type = None
self._is_bigfile = None
self._data_files = None
self._file_count = None
self._file_size = None
self._is_reusable = None
self._is_auto_extensible = None
self._auto_extend_next_size = None
self._auto_extend_max_size = None
self._is_max_size_unlimited = None
self._block_size_in_kilobytes = None
self._is_encrypted = None
self._encryption_algorithm = None
self._default_compress = None
self._status = None
self._extent_management = None
self._extent_uniform_size = None
self._segment_management = None
self._is_default = None
@property
def credential_details(self):
"""
**[Required]** Gets the credential_details of this CreateTablespaceDetails.
:return: The credential_details of this CreateTablespaceDetails.
:rtype: oci.database_management.models.TablespaceAdminCredentialDetails
"""
return self._credential_details
@credential_details.setter
def credential_details(self, credential_details):
"""
Sets the credential_details of this CreateTablespaceDetails.
:param credential_details: The credential_details of this CreateTablespaceDetails.
:type: oci.database_management.models.TablespaceAdminCredentialDetails
"""
self._credential_details = credential_details
@property
def name(self):
"""
**[Required]** Gets the name of this CreateTablespaceDetails.
The name of the tablespace. It must be unique within a database.
:return: The name of this CreateTablespaceDetails.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this CreateTablespaceDetails.
The name of the tablespace. It must be unique within a database.
:param name: The name of this CreateTablespaceDetails.
:type: str
"""
self._name = name
@property
def type(self):
"""
Gets the type of this CreateTablespaceDetails.
The type of tablespace.
Allowed values for this property are: "PERMANENT", "TEMPORARY"
:return: The type of this CreateTablespaceDetails.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this CreateTablespaceDetails.
The type of tablespace.
:param type: The type of this CreateTablespaceDetails.
:type: str
"""
allowed_values = ["PERMANENT", "TEMPORARY"]
if not value_allowed_none_or_none_sentinel(type, allowed_values):
raise ValueError(
"Invalid value for `type`, must be None or one of {0}"
.format(allowed_values)
)
self._type = type
@property
def is_bigfile(self):
"""
Gets the is_bigfile of this CreateTablespaceDetails.
Specifies whether the tablespace is a bigfile or smallfile tablespace.
A bigfile tablespace contains only one data file or temp file, which can contain up to approximately 4 billion (232) blocks.
A smallfile tablespace is a traditional Oracle tablespace, which can contain 1022 data files or temp files, each of which can contain up to approximately 4 million (222) blocks.
:return: The is_bigfile of this CreateTablespaceDetails.
:rtype: bool
"""
return self._is_bigfile
@is_bigfile.setter
def is_bigfile(self, is_bigfile):
"""
Sets the is_bigfile of this CreateTablespaceDetails.
Specifies whether the tablespace is a bigfile or smallfile tablespace.
A bigfile tablespace contains only one data file or temp file, which can contain up to approximately 4 billion (232) blocks.
A smallfile tablespace is a traditional Oracle tablespace, which can contain 1022 data files or temp files, each of which can contain up to approximately 4 million (222) blocks.
:param is_bigfile: The is_bigfile of this CreateTablespaceDetails.
:type: bool
"""
self._is_bigfile = is_bigfile
@property
def data_files(self):
"""
Gets the data_files of this CreateTablespaceDetails.
The list of data files or temp files created for the tablespace.
:return: The data_files of this CreateTablespaceDetails.
:rtype: list[str]
"""
return self._data_files
@data_files.setter
def data_files(self, data_files):
"""
Sets the data_files of this CreateTablespaceDetails.
The list of data files or temp files created for the tablespace.
:param data_files: The data_files of this CreateTablespaceDetails.
:type: list[str]
"""
self._data_files = data_files
@property
def file_count(self):
"""
Gets the file_count of this CreateTablespaceDetails.
The number of data files or temp files created for the tablespace. This is for Oracle Managed Files only.
:return: The file_count of this CreateTablespaceDetails.
:rtype: int
"""
return self._file_count
@file_count.setter
def file_count(self, file_count):
"""
Sets the file_count of this CreateTablespaceDetails.
The number of data files or temp files created for the tablespace. This is for Oracle Managed Files only.
:param file_count: The file_count of this CreateTablespaceDetails.
:type: int
"""
self._file_count = file_count
@property
def file_size(self):
"""
Gets the file_size of this CreateTablespaceDetails.
The size of each data file or temp file.
:return: The file_size of this CreateTablespaceDetails.
:rtype: oci.database_management.models.TablespaceStorageSize
"""
return self._file_size
@file_size.setter
def file_size(self, file_size):
"""
Sets the file_size of this CreateTablespaceDetails.
The size of each data file or temp file.
:param file_size: The file_size of this CreateTablespaceDetails.
:type: oci.database_management.models.TablespaceStorageSize
"""
self._file_size = file_size
@property
def is_reusable(self):
"""
Gets the is_reusable of this CreateTablespaceDetails.
Specifies whether Oracle can reuse the data file or temp file. Reuse is only allowed when the file name is provided.
:return: The is_reusable of this CreateTablespaceDetails.
:rtype: bool
"""
return self._is_reusable
@is_reusable.setter
def is_reusable(self, is_reusable):
"""
Sets the is_reusable of this CreateTablespaceDetails.
Specifies whether Oracle can reuse the data file or temp file. Reuse is only allowed when the file name is provided.
:param is_reusable: The is_reusable of this CreateTablespaceDetails.
:type: bool
"""
self._is_reusable = is_reusable
@property
def is_auto_extensible(self):
"""
Gets the is_auto_extensible of this CreateTablespaceDetails.
Specifies whether the data file or temp file can be extended automatically.
:return: The is_auto_extensible of this CreateTablespaceDetails.
:rtype: bool
"""
return self._is_auto_extensible
@is_auto_extensible.setter
def is_auto_extensible(self, is_auto_extensible):
"""
Sets the is_auto_extensible of this CreateTablespaceDetails.
Specifies whether the data file or temp file can be extended automatically.
:param is_auto_extensible: The is_auto_extensible of this CreateTablespaceDetails.
:type: bool
"""
self._is_auto_extensible = is_auto_extensible
@property
def auto_extend_next_size(self):
"""
Gets the auto_extend_next_size of this CreateTablespaceDetails.
The size of the next increment of disk space to be allocated automatically when more extents are required.
:return: The auto_extend_next_size of this CreateTablespaceDetails.
:rtype: oci.database_management.models.TablespaceStorageSize
"""
return self._auto_extend_next_size
@auto_extend_next_size.setter
def auto_extend_next_size(self, auto_extend_next_size):
"""
Sets the auto_extend_next_size of this CreateTablespaceDetails.
The size of the next increment of disk space to be allocated automatically when more extents are required.
:param auto_extend_next_size: The auto_extend_next_size of this CreateTablespaceDetails.
:type: oci.database_management.models.TablespaceStorageSize
"""
self._auto_extend_next_size = auto_extend_next_size
@property
def auto_extend_max_size(self):
"""
Gets the auto_extend_max_size of this CreateTablespaceDetails.
The maximum disk space allowed for automatic extension of the data files or temp files.
:return: The auto_extend_max_size of this CreateTablespaceDetails.
:rtype: oci.database_management.models.TablespaceStorageSize
"""
return self._auto_extend_max_size
@auto_extend_max_size.setter
def auto_extend_max_size(self, auto_extend_max_size):
"""
Sets the auto_extend_max_size of this CreateTablespaceDetails.
The maximum disk space allowed for automatic extension of the data files or temp files.
:param auto_extend_max_size: The auto_extend_max_size of this CreateTablespaceDetails.
:type: oci.database_management.models.TablespaceStorageSize
"""
self._auto_extend_max_size = auto_extend_max_size
@property
def is_max_size_unlimited(self):
"""
Gets the is_max_size_unlimited of this CreateTablespaceDetails.
Specifies whether the disk space of the data file or temp file can be limited.
:return: The is_max_size_unlimited of this CreateTablespaceDetails.
:rtype: bool
"""
return self._is_max_size_unlimited
@is_max_size_unlimited.setter
def is_max_size_unlimited(self, is_max_size_unlimited):
"""
Sets the is_max_size_unlimited of this CreateTablespaceDetails.
Specifies whether the disk space of the data file or temp file can be limited.
:param is_max_size_unlimited: The is_max_size_unlimited of this CreateTablespaceDetails.
:type: bool
"""
self._is_max_size_unlimited = is_max_size_unlimited
@property
def block_size_in_kilobytes(self):
"""
Gets the block_size_in_kilobytes of this CreateTablespaceDetails.
Block size for the tablespace.
:return: The block_size_in_kilobytes of this CreateTablespaceDetails.
:rtype: int
"""
return self._block_size_in_kilobytes
@block_size_in_kilobytes.setter
def block_size_in_kilobytes(self, block_size_in_kilobytes):
"""
Sets the block_size_in_kilobytes of this CreateTablespaceDetails.
Block size for the tablespace.
:param block_size_in_kilobytes: The block_size_in_kilobytes of this CreateTablespaceDetails.
:type: int
"""
self._block_size_in_kilobytes = block_size_in_kilobytes
@property
def is_encrypted(self):
"""
Gets the is_encrypted of this CreateTablespaceDetails.
Indicates whether the tablespace is encrypted.
:return: The is_encrypted of this CreateTablespaceDetails.
:rtype: bool
"""
return self._is_encrypted
@is_encrypted.setter
def is_encrypted(self, is_encrypted):
"""
Sets the is_encrypted of this CreateTablespaceDetails.
Indicates whether the tablespace is encrypted.
:param is_encrypted: The is_encrypted of this CreateTablespaceDetails.
:type: bool
"""
self._is_encrypted = is_encrypted
@property
def encryption_algorithm(self):
"""
Gets the encryption_algorithm of this CreateTablespaceDetails.
The name of the encryption algorithm to be used for tablespace encryption.
:return: The encryption_algorithm of this CreateTablespaceDetails.
:rtype: str
"""
return self._encryption_algorithm
@encryption_algorithm.setter
def encryption_algorithm(self, encryption_algorithm):
"""
Sets the encryption_algorithm of this CreateTablespaceDetails.
The name of the encryption algorithm to be used for tablespace encryption.
:param encryption_algorithm: The encryption_algorithm of this CreateTablespaceDetails.
:type: str
"""
self._encryption_algorithm = encryption_algorithm
@property
def default_compress(self):
"""
Gets the default_compress of this CreateTablespaceDetails.
The default compression of data for all tables created in the tablespace.
Allowed values for this property are: "NO_COMPRESS", "BASIC_COMPRESS"
:return: The default_compress of this CreateTablespaceDetails.
:rtype: str
"""
return self._default_compress
@default_compress.setter
def default_compress(self, default_compress):
"""
Sets the default_compress of this CreateTablespaceDetails.
The default compression of data for all tables created in the tablespace.
:param default_compress: The default_compress of this CreateTablespaceDetails.
:type: str
"""
allowed_values = ["NO_COMPRESS", "BASIC_COMPRESS"]
if not value_allowed_none_or_none_sentinel(default_compress, allowed_values):
raise ValueError(
"Invalid value for `default_compress`, must be None or one of {0}"
.format(allowed_values)
)
self._default_compress = default_compress
@property
def status(self):
"""
Gets the status of this CreateTablespaceDetails.
The status of the tablespace.
Allowed values for this property are: "READ_ONLY", "READ_WRITE"
:return: The status of this CreateTablespaceDetails.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this CreateTablespaceDetails.
The status of the tablespace.
:param status: The status of this CreateTablespaceDetails.
:type: str
"""
allowed_values = ["READ_ONLY", "READ_WRITE"]
if not value_allowed_none_or_none_sentinel(status, allowed_values):
raise ValueError(
"Invalid value for `status`, must be None or one of {0}"
.format(allowed_values)
)
self._status = status
@property
def extent_management(self):
"""
Gets the extent_management of this CreateTablespaceDetails.
Specifies how the extents of the tablespace should be managed.
Allowed values for this property are: "AUTOALLOCATE", "UNIFORM"
:return: The extent_management of this CreateTablespaceDetails.
:rtype: str
"""
return self._extent_management
@extent_management.setter
def extent_management(self, extent_management):
"""
Sets the extent_management of this CreateTablespaceDetails.
Specifies how the extents of the tablespace should be managed.
:param extent_management: The extent_management of this CreateTablespaceDetails.
:type: str
"""
allowed_values = ["AUTOALLOCATE", "UNIFORM"]
if not value_allowed_none_or_none_sentinel(extent_management, allowed_values):
raise ValueError(
"Invalid value for `extent_management`, must be None or one of {0}"
.format(allowed_values)
)
self._extent_management = extent_management
@property
def extent_uniform_size(self):
"""
Gets the extent_uniform_size of this CreateTablespaceDetails.
The size of the extent when the tablespace is managed with uniform extents of a specific size.
:return: The extent_uniform_size of this CreateTablespaceDetails.
:rtype: oci.database_management.models.TablespaceStorageSize
"""
return self._extent_uniform_size
@extent_uniform_size.setter
def extent_uniform_size(self, extent_uniform_size):
"""
Sets the extent_uniform_size of this CreateTablespaceDetails.
The size of the extent when the tablespace is managed with uniform extents of a specific size.
:param extent_uniform_size: The extent_uniform_size of this CreateTablespaceDetails.
:type: oci.database_management.models.TablespaceStorageSize
"""
self._extent_uniform_size = extent_uniform_size
@property
def segment_management(self):
"""
Gets the segment_management of this CreateTablespaceDetails.
Specifies whether tablespace segment management should be automatic or manual.
Allowed values for this property are: "AUTO", "MANUAL"
:return: The segment_management of this CreateTablespaceDetails.
:rtype: str
"""
return self._segment_management
@segment_management.setter
def segment_management(self, segment_management):
"""
Sets the segment_management of this CreateTablespaceDetails.
Specifies whether tablespace segment management should be automatic or manual.
:param segment_management: The segment_management of this CreateTablespaceDetails.
:type: str
"""
allowed_values = ["AUTO", "MANUAL"]
if not value_allowed_none_or_none_sentinel(segment_management, allowed_values):
raise ValueError(
"Invalid value for `segment_management`, must be None or one of {0}"
.format(allowed_values)
)
self._segment_management = segment_management
@property
def is_default(self):
"""
Gets the is_default of this CreateTablespaceDetails.
Specifies whether the tablespace is the default tablespace.
:return: The is_default of this CreateTablespaceDetails.
:rtype: bool
"""
return self._is_default
@is_default.setter
def is_default(self, is_default):
"""
Sets the is_default of this CreateTablespaceDetails.
Specifies whether the tablespace is the default tablespace.
:param is_default: The is_default of this CreateTablespaceDetails.
:type: bool
"""
self._is_default = is_default
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other | src/oci/database_management/models/create_tablespace_details.py |
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateTablespaceDetails(object):
"""
The details required to create a tablespace.
"""
#: A constant which can be used with the type property of a CreateTablespaceDetails.
#: This constant has a value of "PERMANENT"
TYPE_PERMANENT = "PERMANENT"
#: A constant which can be used with the type property of a CreateTablespaceDetails.
#: This constant has a value of "TEMPORARY"
TYPE_TEMPORARY = "TEMPORARY"
#: A constant which can be used with the default_compress property of a CreateTablespaceDetails.
#: This constant has a value of "NO_COMPRESS"
DEFAULT_COMPRESS_NO_COMPRESS = "NO_COMPRESS"
#: A constant which can be used with the default_compress property of a CreateTablespaceDetails.
#: This constant has a value of "BASIC_COMPRESS"
DEFAULT_COMPRESS_BASIC_COMPRESS = "BASIC_COMPRESS"
#: A constant which can be used with the status property of a CreateTablespaceDetails.
#: This constant has a value of "READ_ONLY"
STATUS_READ_ONLY = "READ_ONLY"
#: A constant which can be used with the status property of a CreateTablespaceDetails.
#: This constant has a value of "READ_WRITE"
STATUS_READ_WRITE = "READ_WRITE"
#: A constant which can be used with the extent_management property of a CreateTablespaceDetails.
#: This constant has a value of "AUTOALLOCATE"
EXTENT_MANAGEMENT_AUTOALLOCATE = "AUTOALLOCATE"
#: A constant which can be used with the extent_management property of a CreateTablespaceDetails.
#: This constant has a value of "UNIFORM"
EXTENT_MANAGEMENT_UNIFORM = "UNIFORM"
#: A constant which can be used with the segment_management property of a CreateTablespaceDetails.
#: This constant has a value of "AUTO"
SEGMENT_MANAGEMENT_AUTO = "AUTO"
#: A constant which can be used with the segment_management property of a CreateTablespaceDetails.
#: This constant has a value of "MANUAL"
SEGMENT_MANAGEMENT_MANUAL = "MANUAL"
def __init__(self, **kwargs):
"""
Initializes a new CreateTablespaceDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param credential_details:
The value to assign to the credential_details property of this CreateTablespaceDetails.
:type credential_details: oci.database_management.models.TablespaceAdminCredentialDetails
:param name:
The value to assign to the name property of this CreateTablespaceDetails.
:type name: str
:param type:
The value to assign to the type property of this CreateTablespaceDetails.
Allowed values for this property are: "PERMANENT", "TEMPORARY"
:type type: str
:param is_bigfile:
The value to assign to the is_bigfile property of this CreateTablespaceDetails.
:type is_bigfile: bool
:param data_files:
The value to assign to the data_files property of this CreateTablespaceDetails.
:type data_files: list[str]
:param file_count:
The value to assign to the file_count property of this CreateTablespaceDetails.
:type file_count: int
:param file_size:
The value to assign to the file_size property of this CreateTablespaceDetails.
:type file_size: oci.database_management.models.TablespaceStorageSize
:param is_reusable:
The value to assign to the is_reusable property of this CreateTablespaceDetails.
:type is_reusable: bool
:param is_auto_extensible:
The value to assign to the is_auto_extensible property of this CreateTablespaceDetails.
:type is_auto_extensible: bool
:param auto_extend_next_size:
The value to assign to the auto_extend_next_size property of this CreateTablespaceDetails.
:type auto_extend_next_size: oci.database_management.models.TablespaceStorageSize
:param auto_extend_max_size:
The value to assign to the auto_extend_max_size property of this CreateTablespaceDetails.
:type auto_extend_max_size: oci.database_management.models.TablespaceStorageSize
:param is_max_size_unlimited:
The value to assign to the is_max_size_unlimited property of this CreateTablespaceDetails.
:type is_max_size_unlimited: bool
:param block_size_in_kilobytes:
The value to assign to the block_size_in_kilobytes property of this CreateTablespaceDetails.
:type block_size_in_kilobytes: int
:param is_encrypted:
The value to assign to the is_encrypted property of this CreateTablespaceDetails.
:type is_encrypted: bool
:param encryption_algorithm:
The value to assign to the encryption_algorithm property of this CreateTablespaceDetails.
:type encryption_algorithm: str
:param default_compress:
The value to assign to the default_compress property of this CreateTablespaceDetails.
Allowed values for this property are: "NO_COMPRESS", "BASIC_COMPRESS"
:type default_compress: str
:param status:
The value to assign to the status property of this CreateTablespaceDetails.
Allowed values for this property are: "READ_ONLY", "READ_WRITE"
:type status: str
:param extent_management:
The value to assign to the extent_management property of this CreateTablespaceDetails.
Allowed values for this property are: "AUTOALLOCATE", "UNIFORM"
:type extent_management: str
:param extent_uniform_size:
The value to assign to the extent_uniform_size property of this CreateTablespaceDetails.
:type extent_uniform_size: oci.database_management.models.TablespaceStorageSize
:param segment_management:
The value to assign to the segment_management property of this CreateTablespaceDetails.
Allowed values for this property are: "AUTO", "MANUAL"
:type segment_management: str
:param is_default:
The value to assign to the is_default property of this CreateTablespaceDetails.
:type is_default: bool
"""
self.swagger_types = {
'credential_details': 'TablespaceAdminCredentialDetails',
'name': 'str',
'type': 'str',
'is_bigfile': 'bool',
'data_files': 'list[str]',
'file_count': 'int',
'file_size': 'TablespaceStorageSize',
'is_reusable': 'bool',
'is_auto_extensible': 'bool',
'auto_extend_next_size': 'TablespaceStorageSize',
'auto_extend_max_size': 'TablespaceStorageSize',
'is_max_size_unlimited': 'bool',
'block_size_in_kilobytes': 'int',
'is_encrypted': 'bool',
'encryption_algorithm': 'str',
'default_compress': 'str',
'status': 'str',
'extent_management': 'str',
'extent_uniform_size': 'TablespaceStorageSize',
'segment_management': 'str',
'is_default': 'bool'
}
self.attribute_map = {
'credential_details': 'credentialDetails',
'name': 'name',
'type': 'type',
'is_bigfile': 'isBigfile',
'data_files': 'dataFiles',
'file_count': 'fileCount',
'file_size': 'fileSize',
'is_reusable': 'isReusable',
'is_auto_extensible': 'isAutoExtensible',
'auto_extend_next_size': 'autoExtendNextSize',
'auto_extend_max_size': 'autoExtendMaxSize',
'is_max_size_unlimited': 'isMaxSizeUnlimited',
'block_size_in_kilobytes': 'blockSizeInKilobytes',
'is_encrypted': 'isEncrypted',
'encryption_algorithm': 'encryptionAlgorithm',
'default_compress': 'defaultCompress',
'status': 'status',
'extent_management': 'extentManagement',
'extent_uniform_size': 'extentUniformSize',
'segment_management': 'segmentManagement',
'is_default': 'isDefault'
}
self._credential_details = None
self._name = None
self._type = None
self._is_bigfile = None
self._data_files = None
self._file_count = None
self._file_size = None
self._is_reusable = None
self._is_auto_extensible = None
self._auto_extend_next_size = None
self._auto_extend_max_size = None
self._is_max_size_unlimited = None
self._block_size_in_kilobytes = None
self._is_encrypted = None
self._encryption_algorithm = None
self._default_compress = None
self._status = None
self._extent_management = None
self._extent_uniform_size = None
self._segment_management = None
self._is_default = None
@property
def credential_details(self):
"""
**[Required]** Gets the credential_details of this CreateTablespaceDetails.
:return: The credential_details of this CreateTablespaceDetails.
:rtype: oci.database_management.models.TablespaceAdminCredentialDetails
"""
return self._credential_details
@credential_details.setter
def credential_details(self, credential_details):
"""
Sets the credential_details of this CreateTablespaceDetails.
:param credential_details: The credential_details of this CreateTablespaceDetails.
:type: oci.database_management.models.TablespaceAdminCredentialDetails
"""
self._credential_details = credential_details
@property
def name(self):
"""
**[Required]** Gets the name of this CreateTablespaceDetails.
The name of the tablespace. It must be unique within a database.
:return: The name of this CreateTablespaceDetails.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this CreateTablespaceDetails.
The name of the tablespace. It must be unique within a database.
:param name: The name of this CreateTablespaceDetails.
:type: str
"""
self._name = name
@property
def type(self):
"""
Gets the type of this CreateTablespaceDetails.
The type of tablespace.
Allowed values for this property are: "PERMANENT", "TEMPORARY"
:return: The type of this CreateTablespaceDetails.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this CreateTablespaceDetails.
The type of tablespace.
:param type: The type of this CreateTablespaceDetails.
:type: str
"""
allowed_values = ["PERMANENT", "TEMPORARY"]
if not value_allowed_none_or_none_sentinel(type, allowed_values):
raise ValueError(
"Invalid value for `type`, must be None or one of {0}"
.format(allowed_values)
)
self._type = type
@property
def is_bigfile(self):
"""
Gets the is_bigfile of this CreateTablespaceDetails.
Specifies whether the tablespace is a bigfile or smallfile tablespace.
A bigfile tablespace contains only one data file or temp file, which can contain up to approximately 4 billion (232) blocks.
A smallfile tablespace is a traditional Oracle tablespace, which can contain 1022 data files or temp files, each of which can contain up to approximately 4 million (222) blocks.
:return: The is_bigfile of this CreateTablespaceDetails.
:rtype: bool
"""
return self._is_bigfile
@is_bigfile.setter
def is_bigfile(self, is_bigfile):
"""
Sets the is_bigfile of this CreateTablespaceDetails.
Specifies whether the tablespace is a bigfile or smallfile tablespace.
A bigfile tablespace contains only one data file or temp file, which can contain up to approximately 4 billion (232) blocks.
A smallfile tablespace is a traditional Oracle tablespace, which can contain 1022 data files or temp files, each of which can contain up to approximately 4 million (222) blocks.
:param is_bigfile: The is_bigfile of this CreateTablespaceDetails.
:type: bool
"""
self._is_bigfile = is_bigfile
@property
def data_files(self):
"""
Gets the data_files of this CreateTablespaceDetails.
The list of data files or temp files created for the tablespace.
:return: The data_files of this CreateTablespaceDetails.
:rtype: list[str]
"""
return self._data_files
@data_files.setter
def data_files(self, data_files):
"""
Sets the data_files of this CreateTablespaceDetails.
The list of data files or temp files created for the tablespace.
:param data_files: The data_files of this CreateTablespaceDetails.
:type: list[str]
"""
self._data_files = data_files
@property
def file_count(self):
"""
Gets the file_count of this CreateTablespaceDetails.
The number of data files or temp files created for the tablespace. This is for Oracle Managed Files only.
:return: The file_count of this CreateTablespaceDetails.
:rtype: int
"""
return self._file_count
@file_count.setter
def file_count(self, file_count):
"""
Sets the file_count of this CreateTablespaceDetails.
The number of data files or temp files created for the tablespace. This is for Oracle Managed Files only.
:param file_count: The file_count of this CreateTablespaceDetails.
:type: int
"""
self._file_count = file_count
@property
def file_size(self):
"""
Gets the file_size of this CreateTablespaceDetails.
The size of each data file or temp file.
:return: The file_size of this CreateTablespaceDetails.
:rtype: oci.database_management.models.TablespaceStorageSize
"""
return self._file_size
@file_size.setter
def file_size(self, file_size):
"""
Sets the file_size of this CreateTablespaceDetails.
The size of each data file or temp file.
:param file_size: The file_size of this CreateTablespaceDetails.
:type: oci.database_management.models.TablespaceStorageSize
"""
self._file_size = file_size
@property
def is_reusable(self):
"""
Gets the is_reusable of this CreateTablespaceDetails.
Specifies whether Oracle can reuse the data file or temp file. Reuse is only allowed when the file name is provided.
:return: The is_reusable of this CreateTablespaceDetails.
:rtype: bool
"""
return self._is_reusable
@is_reusable.setter
def is_reusable(self, is_reusable):
"""
Sets the is_reusable of this CreateTablespaceDetails.
Specifies whether Oracle can reuse the data file or temp file. Reuse is only allowed when the file name is provided.
:param is_reusable: The is_reusable of this CreateTablespaceDetails.
:type: bool
"""
self._is_reusable = is_reusable
@property
def is_auto_extensible(self):
"""
Gets the is_auto_extensible of this CreateTablespaceDetails.
Specifies whether the data file or temp file can be extended automatically.
:return: The is_auto_extensible of this CreateTablespaceDetails.
:rtype: bool
"""
return self._is_auto_extensible
@is_auto_extensible.setter
def is_auto_extensible(self, is_auto_extensible):
"""
Sets the is_auto_extensible of this CreateTablespaceDetails.
Specifies whether the data file or temp file can be extended automatically.
:param is_auto_extensible: The is_auto_extensible of this CreateTablespaceDetails.
:type: bool
"""
self._is_auto_extensible = is_auto_extensible
@property
def auto_extend_next_size(self):
"""
Gets the auto_extend_next_size of this CreateTablespaceDetails.
The size of the next increment of disk space to be allocated automatically when more extents are required.
:return: The auto_extend_next_size of this CreateTablespaceDetails.
:rtype: oci.database_management.models.TablespaceStorageSize
"""
return self._auto_extend_next_size
@auto_extend_next_size.setter
def auto_extend_next_size(self, auto_extend_next_size):
"""
Sets the auto_extend_next_size of this CreateTablespaceDetails.
The size of the next increment of disk space to be allocated automatically when more extents are required.
:param auto_extend_next_size: The auto_extend_next_size of this CreateTablespaceDetails.
:type: oci.database_management.models.TablespaceStorageSize
"""
self._auto_extend_next_size = auto_extend_next_size
@property
def auto_extend_max_size(self):
"""
Gets the auto_extend_max_size of this CreateTablespaceDetails.
The maximum disk space allowed for automatic extension of the data files or temp files.
:return: The auto_extend_max_size of this CreateTablespaceDetails.
:rtype: oci.database_management.models.TablespaceStorageSize
"""
return self._auto_extend_max_size
@auto_extend_max_size.setter
def auto_extend_max_size(self, auto_extend_max_size):
"""
Sets the auto_extend_max_size of this CreateTablespaceDetails.
The maximum disk space allowed for automatic extension of the data files or temp files.
:param auto_extend_max_size: The auto_extend_max_size of this CreateTablespaceDetails.
:type: oci.database_management.models.TablespaceStorageSize
"""
self._auto_extend_max_size = auto_extend_max_size
@property
def is_max_size_unlimited(self):
"""
Gets the is_max_size_unlimited of this CreateTablespaceDetails.
Specifies whether the disk space of the data file or temp file can be limited.
:return: The is_max_size_unlimited of this CreateTablespaceDetails.
:rtype: bool
"""
return self._is_max_size_unlimited
@is_max_size_unlimited.setter
def is_max_size_unlimited(self, is_max_size_unlimited):
"""
Sets the is_max_size_unlimited of this CreateTablespaceDetails.
Specifies whether the disk space of the data file or temp file can be limited.
:param is_max_size_unlimited: The is_max_size_unlimited of this CreateTablespaceDetails.
:type: bool
"""
self._is_max_size_unlimited = is_max_size_unlimited
@property
def block_size_in_kilobytes(self):
"""
Gets the block_size_in_kilobytes of this CreateTablespaceDetails.
Block size for the tablespace.
:return: The block_size_in_kilobytes of this CreateTablespaceDetails.
:rtype: int
"""
return self._block_size_in_kilobytes
@block_size_in_kilobytes.setter
def block_size_in_kilobytes(self, block_size_in_kilobytes):
"""
Sets the block_size_in_kilobytes of this CreateTablespaceDetails.
Block size for the tablespace.
:param block_size_in_kilobytes: The block_size_in_kilobytes of this CreateTablespaceDetails.
:type: int
"""
self._block_size_in_kilobytes = block_size_in_kilobytes
@property
def is_encrypted(self):
"""
Gets the is_encrypted of this CreateTablespaceDetails.
Indicates whether the tablespace is encrypted.
:return: The is_encrypted of this CreateTablespaceDetails.
:rtype: bool
"""
return self._is_encrypted
@is_encrypted.setter
def is_encrypted(self, is_encrypted):
"""
Sets the is_encrypted of this CreateTablespaceDetails.
Indicates whether the tablespace is encrypted.
:param is_encrypted: The is_encrypted of this CreateTablespaceDetails.
:type: bool
"""
self._is_encrypted = is_encrypted
@property
def encryption_algorithm(self):
"""
Gets the encryption_algorithm of this CreateTablespaceDetails.
The name of the encryption algorithm to be used for tablespace encryption.
:return: The encryption_algorithm of this CreateTablespaceDetails.
:rtype: str
"""
return self._encryption_algorithm
@encryption_algorithm.setter
def encryption_algorithm(self, encryption_algorithm):
"""
Sets the encryption_algorithm of this CreateTablespaceDetails.
The name of the encryption algorithm to be used for tablespace encryption.
:param encryption_algorithm: The encryption_algorithm of this CreateTablespaceDetails.
:type: str
"""
self._encryption_algorithm = encryption_algorithm
@property
def default_compress(self):
"""
Gets the default_compress of this CreateTablespaceDetails.
The default compression of data for all tables created in the tablespace.
Allowed values for this property are: "NO_COMPRESS", "BASIC_COMPRESS"
:return: The default_compress of this CreateTablespaceDetails.
:rtype: str
"""
return self._default_compress
@default_compress.setter
def default_compress(self, default_compress):
"""
Sets the default_compress of this CreateTablespaceDetails.
The default compression of data for all tables created in the tablespace.
:param default_compress: The default_compress of this CreateTablespaceDetails.
:type: str
"""
allowed_values = ["NO_COMPRESS", "BASIC_COMPRESS"]
if not value_allowed_none_or_none_sentinel(default_compress, allowed_values):
raise ValueError(
"Invalid value for `default_compress`, must be None or one of {0}"
.format(allowed_values)
)
self._default_compress = default_compress
@property
def status(self):
"""
Gets the status of this CreateTablespaceDetails.
The status of the tablespace.
Allowed values for this property are: "READ_ONLY", "READ_WRITE"
:return: The status of this CreateTablespaceDetails.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this CreateTablespaceDetails.
The status of the tablespace.
:param status: The status of this CreateTablespaceDetails.
:type: str
"""
allowed_values = ["READ_ONLY", "READ_WRITE"]
if not value_allowed_none_or_none_sentinel(status, allowed_values):
raise ValueError(
"Invalid value for `status`, must be None or one of {0}"
.format(allowed_values)
)
self._status = status
@property
def extent_management(self):
"""
Gets the extent_management of this CreateTablespaceDetails.
Specifies how the extents of the tablespace should be managed.
Allowed values for this property are: "AUTOALLOCATE", "UNIFORM"
:return: The extent_management of this CreateTablespaceDetails.
:rtype: str
"""
return self._extent_management
@extent_management.setter
def extent_management(self, extent_management):
"""
Sets the extent_management of this CreateTablespaceDetails.
Specifies how the extents of the tablespace should be managed.
:param extent_management: The extent_management of this CreateTablespaceDetails.
:type: str
"""
allowed_values = ["AUTOALLOCATE", "UNIFORM"]
if not value_allowed_none_or_none_sentinel(extent_management, allowed_values):
raise ValueError(
"Invalid value for `extent_management`, must be None or one of {0}"
.format(allowed_values)
)
self._extent_management = extent_management
@property
def extent_uniform_size(self):
"""
Gets the extent_uniform_size of this CreateTablespaceDetails.
The size of the extent when the tablespace is managed with uniform extents of a specific size.
:return: The extent_uniform_size of this CreateTablespaceDetails.
:rtype: oci.database_management.models.TablespaceStorageSize
"""
return self._extent_uniform_size
@extent_uniform_size.setter
def extent_uniform_size(self, extent_uniform_size):
"""
Sets the extent_uniform_size of this CreateTablespaceDetails.
The size of the extent when the tablespace is managed with uniform extents of a specific size.
:param extent_uniform_size: The extent_uniform_size of this CreateTablespaceDetails.
:type: oci.database_management.models.TablespaceStorageSize
"""
self._extent_uniform_size = extent_uniform_size
@property
def segment_management(self):
"""
Gets the segment_management of this CreateTablespaceDetails.
Specifies whether tablespace segment management should be automatic or manual.
Allowed values for this property are: "AUTO", "MANUAL"
:return: The segment_management of this CreateTablespaceDetails.
:rtype: str
"""
return self._segment_management
@segment_management.setter
def segment_management(self, segment_management):
"""
Sets the segment_management of this CreateTablespaceDetails.
Specifies whether tablespace segment management should be automatic or manual.
:param segment_management: The segment_management of this CreateTablespaceDetails.
:type: str
"""
allowed_values = ["AUTO", "MANUAL"]
if not value_allowed_none_or_none_sentinel(segment_management, allowed_values):
raise ValueError(
"Invalid value for `segment_management`, must be None or one of {0}"
.format(allowed_values)
)
self._segment_management = segment_management
@property
def is_default(self):
"""
Gets the is_default of this CreateTablespaceDetails.
Specifies whether the tablespace is the default tablespace.
:return: The is_default of this CreateTablespaceDetails.
:rtype: bool
"""
return self._is_default
@is_default.setter
def is_default(self, is_default):
"""
Sets the is_default of this CreateTablespaceDetails.
Specifies whether the tablespace is the default tablespace.
:param is_default: The is_default of this CreateTablespaceDetails.
:type: bool
"""
self._is_default = is_default
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other | 0.811452 | 0.284854 |
import booksdatasource
import unittest
class BooksDataSourceTester(unittest.TestCase):
def setUp(self):
self.data_source = booksdatasource.BooksDataSource('books1.csv')
def tearDown(self):
pass
def test_unique_author_len(self):
authors = self.data_source.authors('Pratchett')
self.assertTrue(len(authors) == 1)
def test_unique_author_name(self):
authors = self.data_source.authors('Pratchett')
self.assertTrue(authors[0] == booksdatasource.Author('Pratchett', 'Terry'))
def test_unique_title_single_author_len(self):
books = self.data_source.books('Blackout')
self.assertTrue(len(books)== 1)
def test_unique_title_single_author_name(self):
books = self.data_source.books('Blackout')
author_name = '<NAME>'
author = booksdatasource.Author('Pratchett', 'Terry')
author_list = [author]
self.assertTrue(books[0] == booksdatasource.Book('Blackout',2010,author_list))
def test_unique_title_multiple_author_len(self):
books = self.data_source.books('Good Omens')
self.assertTrue(len(books)==1)
def test_unique_title_multiple_author_names(self):
books = self.data_source.books('Good Omens')
author_neil = booksdatasource.Author('Gaiman', 'Neil')
author_pratchett = booksdatasource.Author('Pratchett','Terry')
author_list = [author_neil, author_pratchett]
self.assertTrue(books[0] == booksdatasource.Book('Good Omens', 1990, author_list))
def test_null_title_entry(self):
books = self.data_source.books('')
books = sorted(books, key = booksdatasource.Book.title)
all_books = sorted(self.data_source.all_books(), key = booksdatasource.Book.title)
self.assertTrue( books == all_books)
def test_null_author_entry(self):
authors = self.data_source.authors('')
authors = sorted(authors, key = booksdatasource.Author.given_name)
authors = sorted(authors, key = booksdatasource.Author.surname)
all_authors = sorted(self.data_source.all_authors(), key = booksdatasource.Author.given_name)
all_authors = sorted(all_authors, key = booksdatasource.Author.surname)
self.assertTrue( authors == all_authors)
def test_null_date_entry(self):
books = self.data_source.books_between_years()
books = sorted(books, key = booksdatasource.Book.title)
all_books = sorted(self.data_source.all_books(), key = booksdatasource.Book.title)
self.assertTrue( books == all_books)
def test_non_unique_title_len(self):
books = self.data_source.books('Street')
self.assertTrue(len(books)==2)
def test_non_unique_title_names_alphabetical(self):
books = self.data_source.books('Street', 'title')
author_lewis = booksdatasource.Author('Lewis', 'Sinclair')
author_baldwin = booksdatasource.Author('Baldwin','James')
author_list_main_street = [author_lewis]
author_list_beale = [author_baldwin]
self.assertTrue((books[0] == booksdatasource.Book('If Beale Street Could Talk', 1974, author_list_beale)) and (books[1] == booksdatasource.Book('Main Street', 1920, author_list_main_street)))
def test_non_unique_title_names_chronological(self):
books = self.data_source.books('street', 'year')
author_lewis = booksdatasource.Author('Lewis', 'Sinclair')
author_baldwin = booksdatasource.Author('Baldwin','James')
author_list_main_street = [author_lewis]
author_list_beale = [author_baldwin]
self.assertTrue((books[1] == booksdatasource.Book('If Beale Street Could Talk', 1974, author_list_beale)) and (books[0] == booksdatasource.Book('Main Street', 1920, author_list_main_street)))
def test_non_unique_author_len(self):
authors = self.data_source.authors('Brontë')
self.assertTrue(len(authors) == 3)
def test_non_unique_author_name(self):
authors = self.data_source.authors('Brontë')
self.assertTrue(authors[0] == booksdatasource.Author('Brontë', 'Ann')and authors[1] == booksdatasource.Author('Brontë', 'Charlotte') and authors[2] ==booksdatasource.Author('Brontë', 'Emily'))
def test_date_return_unique_len(self):
books = self.data_source.books_between_years(1848, 1849)
self.assertTrue(len(books) == 1)
def test_date_return_unique_name(self):
books = self.data_source.books_between_years(1848, 1849)
author = booksdatasource.Author('Bronte', 'Ann')
author_list = [author]
self.assertTrue(books[0] == booksdatasource.Book('The Tenant of Wildfell Hall',1848,author_list))
def test_date_return_non_unique_name(self):
books = self.data_source.books_between_years(2020, None)
author_schwab = booksdatasource.Author('Schwab', 'V.E.')
author_list_schwab = [author_schwab]
author_orenstein = booksdatasource.Author('Orenstein', 'Peggy')
author_list_orenstein = [author_orenstein]
self.assertTrue(books[0] == booksdatasource.Book('Boys and Sex',2020,author_list_orenstein) and books[1] ==booksdatasource.Book('The Invisible Life of Addie LaRue',2020,author_list_schwab))
def test_bad_date_input(self):
self.assertRaises(ValueError, self.data_source.books_between_years, "asdfadsadfs")
def test_reversed_dates(self):
self.assertRaises(ValueError, self.data_source.books_between_years,start_year=2021, end_year = 1900)
if __name__ == '__main__':
unittest.main() | books/booksdatasourcetests.py | import booksdatasource
import unittest
class BooksDataSourceTester(unittest.TestCase):
def setUp(self):
self.data_source = booksdatasource.BooksDataSource('books1.csv')
def tearDown(self):
pass
def test_unique_author_len(self):
authors = self.data_source.authors('Pratchett')
self.assertTrue(len(authors) == 1)
def test_unique_author_name(self):
authors = self.data_source.authors('Pratchett')
self.assertTrue(authors[0] == booksdatasource.Author('Pratchett', 'Terry'))
def test_unique_title_single_author_len(self):
books = self.data_source.books('Blackout')
self.assertTrue(len(books)== 1)
def test_unique_title_single_author_name(self):
books = self.data_source.books('Blackout')
author_name = '<NAME>'
author = booksdatasource.Author('Pratchett', 'Terry')
author_list = [author]
self.assertTrue(books[0] == booksdatasource.Book('Blackout',2010,author_list))
def test_unique_title_multiple_author_len(self):
books = self.data_source.books('Good Omens')
self.assertTrue(len(books)==1)
def test_unique_title_multiple_author_names(self):
books = self.data_source.books('Good Omens')
author_neil = booksdatasource.Author('Gaiman', 'Neil')
author_pratchett = booksdatasource.Author('Pratchett','Terry')
author_list = [author_neil, author_pratchett]
self.assertTrue(books[0] == booksdatasource.Book('Good Omens', 1990, author_list))
def test_null_title_entry(self):
books = self.data_source.books('')
books = sorted(books, key = booksdatasource.Book.title)
all_books = sorted(self.data_source.all_books(), key = booksdatasource.Book.title)
self.assertTrue( books == all_books)
def test_null_author_entry(self):
authors = self.data_source.authors('')
authors = sorted(authors, key = booksdatasource.Author.given_name)
authors = sorted(authors, key = booksdatasource.Author.surname)
all_authors = sorted(self.data_source.all_authors(), key = booksdatasource.Author.given_name)
all_authors = sorted(all_authors, key = booksdatasource.Author.surname)
self.assertTrue( authors == all_authors)
def test_null_date_entry(self):
books = self.data_source.books_between_years()
books = sorted(books, key = booksdatasource.Book.title)
all_books = sorted(self.data_source.all_books(), key = booksdatasource.Book.title)
self.assertTrue( books == all_books)
def test_non_unique_title_len(self):
books = self.data_source.books('Street')
self.assertTrue(len(books)==2)
def test_non_unique_title_names_alphabetical(self):
books = self.data_source.books('Street', 'title')
author_lewis = booksdatasource.Author('Lewis', 'Sinclair')
author_baldwin = booksdatasource.Author('Baldwin','James')
author_list_main_street = [author_lewis]
author_list_beale = [author_baldwin]
self.assertTrue((books[0] == booksdatasource.Book('If Beale Street Could Talk', 1974, author_list_beale)) and (books[1] == booksdatasource.Book('Main Street', 1920, author_list_main_street)))
def test_non_unique_title_names_chronological(self):
books = self.data_source.books('street', 'year')
author_lewis = booksdatasource.Author('Lewis', 'Sinclair')
author_baldwin = booksdatasource.Author('Baldwin','James')
author_list_main_street = [author_lewis]
author_list_beale = [author_baldwin]
self.assertTrue((books[1] == booksdatasource.Book('If Beale Street Could Talk', 1974, author_list_beale)) and (books[0] == booksdatasource.Book('Main Street', 1920, author_list_main_street)))
def test_non_unique_author_len(self):
authors = self.data_source.authors('Brontë')
self.assertTrue(len(authors) == 3)
def test_non_unique_author_name(self):
authors = self.data_source.authors('Brontë')
self.assertTrue(authors[0] == booksdatasource.Author('Brontë', 'Ann')and authors[1] == booksdatasource.Author('Brontë', 'Charlotte') and authors[2] ==booksdatasource.Author('Brontë', 'Emily'))
def test_date_return_unique_len(self):
books = self.data_source.books_between_years(1848, 1849)
self.assertTrue(len(books) == 1)
def test_date_return_unique_name(self):
books = self.data_source.books_between_years(1848, 1849)
author = booksdatasource.Author('Bronte', 'Ann')
author_list = [author]
self.assertTrue(books[0] == booksdatasource.Book('The Tenant of Wildfell Hall',1848,author_list))
def test_date_return_non_unique_name(self):
books = self.data_source.books_between_years(2020, None)
author_schwab = booksdatasource.Author('Schwab', 'V.E.')
author_list_schwab = [author_schwab]
author_orenstein = booksdatasource.Author('Orenstein', 'Peggy')
author_list_orenstein = [author_orenstein]
self.assertTrue(books[0] == booksdatasource.Book('Boys and Sex',2020,author_list_orenstein) and books[1] ==booksdatasource.Book('The Invisible Life of Addie LaRue',2020,author_list_schwab))
def test_bad_date_input(self):
self.assertRaises(ValueError, self.data_source.books_between_years, "asdfadsadfs")
def test_reversed_dates(self):
self.assertRaises(ValueError, self.data_source.books_between_years,start_year=2021, end_year = 1900)
if __name__ == '__main__':
unittest.main() | 0.479747 | 0.606265 |
import unittest
from unittest import mock
from airflow.providers.trino.transfers.gcs_to_trino import GCSToTrinoOperator
BUCKET = "source_bucket"
PATH = "path/to/file.csv"
GCP_CONN_ID = "test_gcp"
IMPERSONATION_CHAIN = ["ACCOUNT_1", "ACCOUNT_2", "ACCOUNT_3"]
TRINO_CONN_ID = "test_trino"
TRINO_TABLE = "test_table"
TASK_ID = "test_gcs_to_trino"
SCHEMA_FIELDS = ["colA", "colB", "colC"]
SCHEMA_JSON = "path/to/file.json"
class TestGCSToTrinoOperator(unittest.TestCase):
@mock.patch('airflow.providers.trino.transfers.gcs_to_trino.TrinoHook')
@mock.patch("airflow.providers.trino.transfers.gcs_to_trino.GCSHook")
@mock.patch("airflow.providers.trino.transfers.gcs_to_trino.NamedTemporaryFile")
def test_execute_without_schema(self, mock_tempfile, mock_gcs_hook, mock_trino_hook):
filename = "file://97g23r"
file_handle = mock.MagicMock()
mock_tempfile.return_value.__enter__.return_value = file_handle
mock_tempfile.return_value.__enter__.return_value.name = filename
op = GCSToTrinoOperator(
task_id=TASK_ID,
source_bucket=BUCKET,
source_object=PATH,
trino_table=TRINO_TABLE,
trino_conn_id=TRINO_CONN_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(None)
mock_gcs_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
delegate_to=None,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_trino_hook.assert_called_once_with(trino_conn_id=TRINO_CONN_ID)
mock_download = mock_gcs_hook.return_value.download
mock_download.assert_called_once_with(bucket_name=BUCKET, object_name=PATH, filename=filename)
mock_insert = mock_trino_hook.return_value.insert_rows
mock_insert.assert_called_once()
@mock.patch('airflow.providers.trino.transfers.gcs_to_trino.TrinoHook')
@mock.patch("airflow.providers.trino.transfers.gcs_to_trino.GCSHook")
@mock.patch("airflow.providers.trino.transfers.gcs_to_trino.NamedTemporaryFile")
def test_execute_schema_fields(self, mock_tempfile, mock_gcs_hook, mock_trino_hook):
filename = "file://97g23r"
file_handle = mock.MagicMock()
mock_tempfile.return_value.__enter__.return_value = file_handle
mock_tempfile.return_value.__enter__.return_value.name = filename
op = GCSToTrinoOperator(
task_id=TASK_ID,
source_bucket=BUCKET,
source_object=PATH,
trino_table=TRINO_TABLE,
trino_conn_id=TRINO_CONN_ID,
gcp_conn_id=GCP_CONN_ID,
schema_fields=SCHEMA_FIELDS,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(None)
mock_gcs_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
delegate_to=None,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_trino_hook.assert_called_once_with(trino_conn_id=TRINO_CONN_ID)
mock_download = mock_gcs_hook.return_value.download
mock_download.assert_called_once_with(bucket_name=BUCKET, object_name=PATH, filename=filename)
mock_insert = mock_trino_hook.return_value.insert_rows
mock_insert.assert_called_once()
@mock.patch('airflow.providers.trino.transfers.gcs_to_trino.json.loads')
@mock.patch('airflow.providers.trino.transfers.gcs_to_trino.TrinoHook')
@mock.patch("airflow.providers.trino.transfers.gcs_to_trino.GCSHook")
@mock.patch("airflow.providers.trino.transfers.gcs_to_trino.NamedTemporaryFile")
def test_execute_schema_json(self, mock_tempfile, mock_gcs_hook, mock_trino_hook, mock_json_loader):
filename = "file://97g23r"
file_handle = mock.MagicMock()
mock_tempfile.return_value.__enter__.return_value = file_handle
mock_tempfile.return_value.__enter__.return_value.name = filename
mock_json_loader.return_value = SCHEMA_FIELDS
op = GCSToTrinoOperator(
task_id=TASK_ID,
source_bucket=BUCKET,
source_object=PATH,
trino_table=TRINO_TABLE,
trino_conn_id=TRINO_CONN_ID,
gcp_conn_id=GCP_CONN_ID,
schema_object=SCHEMA_JSON,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(None)
mock_gcs_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
delegate_to=None,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_trino_hook.assert_called_once_with(trino_conn_id=TRINO_CONN_ID)
mock_download = mock_gcs_hook.return_value.download
assert mock_download.call_count == 2
mock_insert = mock_trino_hook.return_value.insert_rows
mock_insert.assert_called_once() | tests/providers/trino/transfers/test_gcs_trino.py | import unittest
from unittest import mock
from airflow.providers.trino.transfers.gcs_to_trino import GCSToTrinoOperator
BUCKET = "source_bucket"
PATH = "path/to/file.csv"
GCP_CONN_ID = "test_gcp"
IMPERSONATION_CHAIN = ["ACCOUNT_1", "ACCOUNT_2", "ACCOUNT_3"]
TRINO_CONN_ID = "test_trino"
TRINO_TABLE = "test_table"
TASK_ID = "test_gcs_to_trino"
SCHEMA_FIELDS = ["colA", "colB", "colC"]
SCHEMA_JSON = "path/to/file.json"
class TestGCSToTrinoOperator(unittest.TestCase):
@mock.patch('airflow.providers.trino.transfers.gcs_to_trino.TrinoHook')
@mock.patch("airflow.providers.trino.transfers.gcs_to_trino.GCSHook")
@mock.patch("airflow.providers.trino.transfers.gcs_to_trino.NamedTemporaryFile")
def test_execute_without_schema(self, mock_tempfile, mock_gcs_hook, mock_trino_hook):
filename = "file://97g23r"
file_handle = mock.MagicMock()
mock_tempfile.return_value.__enter__.return_value = file_handle
mock_tempfile.return_value.__enter__.return_value.name = filename
op = GCSToTrinoOperator(
task_id=TASK_ID,
source_bucket=BUCKET,
source_object=PATH,
trino_table=TRINO_TABLE,
trino_conn_id=TRINO_CONN_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(None)
mock_gcs_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
delegate_to=None,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_trino_hook.assert_called_once_with(trino_conn_id=TRINO_CONN_ID)
mock_download = mock_gcs_hook.return_value.download
mock_download.assert_called_once_with(bucket_name=BUCKET, object_name=PATH, filename=filename)
mock_insert = mock_trino_hook.return_value.insert_rows
mock_insert.assert_called_once()
@mock.patch('airflow.providers.trino.transfers.gcs_to_trino.TrinoHook')
@mock.patch("airflow.providers.trino.transfers.gcs_to_trino.GCSHook")
@mock.patch("airflow.providers.trino.transfers.gcs_to_trino.NamedTemporaryFile")
def test_execute_schema_fields(self, mock_tempfile, mock_gcs_hook, mock_trino_hook):
filename = "file://97g23r"
file_handle = mock.MagicMock()
mock_tempfile.return_value.__enter__.return_value = file_handle
mock_tempfile.return_value.__enter__.return_value.name = filename
op = GCSToTrinoOperator(
task_id=TASK_ID,
source_bucket=BUCKET,
source_object=PATH,
trino_table=TRINO_TABLE,
trino_conn_id=TRINO_CONN_ID,
gcp_conn_id=GCP_CONN_ID,
schema_fields=SCHEMA_FIELDS,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(None)
mock_gcs_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
delegate_to=None,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_trino_hook.assert_called_once_with(trino_conn_id=TRINO_CONN_ID)
mock_download = mock_gcs_hook.return_value.download
mock_download.assert_called_once_with(bucket_name=BUCKET, object_name=PATH, filename=filename)
mock_insert = mock_trino_hook.return_value.insert_rows
mock_insert.assert_called_once()
@mock.patch('airflow.providers.trino.transfers.gcs_to_trino.json.loads')
@mock.patch('airflow.providers.trino.transfers.gcs_to_trino.TrinoHook')
@mock.patch("airflow.providers.trino.transfers.gcs_to_trino.GCSHook")
@mock.patch("airflow.providers.trino.transfers.gcs_to_trino.NamedTemporaryFile")
def test_execute_schema_json(self, mock_tempfile, mock_gcs_hook, mock_trino_hook, mock_json_loader):
filename = "file://97g23r"
file_handle = mock.MagicMock()
mock_tempfile.return_value.__enter__.return_value = file_handle
mock_tempfile.return_value.__enter__.return_value.name = filename
mock_json_loader.return_value = SCHEMA_FIELDS
op = GCSToTrinoOperator(
task_id=TASK_ID,
source_bucket=BUCKET,
source_object=PATH,
trino_table=TRINO_TABLE,
trino_conn_id=TRINO_CONN_ID,
gcp_conn_id=GCP_CONN_ID,
schema_object=SCHEMA_JSON,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(None)
mock_gcs_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
delegate_to=None,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_trino_hook.assert_called_once_with(trino_conn_id=TRINO_CONN_ID)
mock_download = mock_gcs_hook.return_value.download
assert mock_download.call_count == 2
mock_insert = mock_trino_hook.return_value.insert_rows
mock_insert.assert_called_once() | 0.488527 | 0.49762 |
from __future__ import absolute_import
import argparse
import logging
from complete.merge_pipeline.joiner.file_transform import ParseFileTransform
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
def join_info(name_info):
(name, info) = name_info
return '%s; %s; %s' % \
(name, sorted(info['emails']), sorted(info['phones']))
def run(argv=None):
"""Pipeline for reading data from a Cloud Storage bucket and writing the results to BigQuery."""
parser = argparse.ArgumentParser()
parser.add_argument('--credit_card_file',
dest='credit_card_file',
help="File containing a user's credit card details to read in.")
parser.add_argument('--phone_file',
dest='phone_file',
help="File containing a user's phone number to read in.")
parser.add_argument('--output',
dest='output',
help='Output path')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
with beam.Pipeline(options=pipeline_options) as p:
phone_header = ['name', 'phone']
phone_rows = (p
| 'ReadRequestFile' >> ReadFromText(known_args.email_file, skip_header_lines=1)
| 'ParseRequestFile' >> ParseFileTransform(phone_header)
| 'CreateRequestKVPairs' >> beam.Map(lambda x: (x['name'], x['phone']))
)
credit_card_header = ['name', 'credit_card']
credit_card_rows = (p
| 'ReadInformationFile' >> ReadFromText(known_args.phone_file, skip_header_lines=1)
| 'ParseInformationFile' >> ParseFileTransform(credit_card_header)
| 'CreateInformationKVPairs' >> beam.Map(lambda x: (x['name'], x['credit_card']))
)
joined_set = ({'phones': phone_rows, 'credit_card': credit_card_rows}
| 'CoGroupById' >> beam.CoGroupByKey()
| 'Format' >> beam.Map(join_info)
)
joined_set | 'WriteToFile' >> beam.io.WriteToText(known_args.output)
p.run().wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | complete/merge_pipeline/join_pipeline.py |
from __future__ import absolute_import
import argparse
import logging
from complete.merge_pipeline.joiner.file_transform import ParseFileTransform
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
def join_info(name_info):
(name, info) = name_info
return '%s; %s; %s' % \
(name, sorted(info['emails']), sorted(info['phones']))
def run(argv=None):
"""Pipeline for reading data from a Cloud Storage bucket and writing the results to BigQuery."""
parser = argparse.ArgumentParser()
parser.add_argument('--credit_card_file',
dest='credit_card_file',
help="File containing a user's credit card details to read in.")
parser.add_argument('--phone_file',
dest='phone_file',
help="File containing a user's phone number to read in.")
parser.add_argument('--output',
dest='output',
help='Output path')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
with beam.Pipeline(options=pipeline_options) as p:
phone_header = ['name', 'phone']
phone_rows = (p
| 'ReadRequestFile' >> ReadFromText(known_args.email_file, skip_header_lines=1)
| 'ParseRequestFile' >> ParseFileTransform(phone_header)
| 'CreateRequestKVPairs' >> beam.Map(lambda x: (x['name'], x['phone']))
)
credit_card_header = ['name', 'credit_card']
credit_card_rows = (p
| 'ReadInformationFile' >> ReadFromText(known_args.phone_file, skip_header_lines=1)
| 'ParseInformationFile' >> ParseFileTransform(credit_card_header)
| 'CreateInformationKVPairs' >> beam.Map(lambda x: (x['name'], x['credit_card']))
)
joined_set = ({'phones': phone_rows, 'credit_card': credit_card_rows}
| 'CoGroupById' >> beam.CoGroupByKey()
| 'Format' >> beam.Map(join_info)
)
joined_set | 'WriteToFile' >> beam.io.WriteToText(known_args.output)
p.run().wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | 0.71113 | 0.145267 |
import json
import scrapy
from locations.hours import OpeningHours
from locations.items import GeojsonPointItem
class IkeaSpider(scrapy.Spider):
name = "ikea"
item_attributes = {"brand": "IKEA", "brand_wikidata": "Q54078"}
allowed_domains = ["ikea.com"]
start_urls = [
"https://www.ikea.com/ae/ar/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/bh/ar/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/eg/ar/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/jo/ar/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/kw/ar/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/ma/ar/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/qa/ar/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/sa/ar/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/cz/cs/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/dk/da/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/at/de/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/de/de/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/au/en/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/ca/en/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/gb/en/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/ie/en/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/in/en/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/ph/en/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/sg/en/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/us/en/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/cl/es/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/es/es/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/mx/es/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/fi/fi/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/be/fr/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/ch/fr/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/fr/fr/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/il/he/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/hr/hr/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/hu/hu/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/it/it/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/jp/ja/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/kr/ko/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/my/ms/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/nl/nl/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/no/no/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/pl/pl/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/pt/pt/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/ro/ro/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/ru/ru/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/sk/sk/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/si/sl/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/rs/sr/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/se/sv/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/th/th/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/ua/uk/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/cn/zh/meta-data/navigation/stores-detailed.json",
]
def parse(self, response):
data = response.json()
for store in data:
if "storePageUrl" not in store:
continue
opening_hours = OpeningHours()
for day in store["hours"]["normal"]:
if day["open"] != "":
opening_hours.add_range(
day["day"][0:1].upper() + day["day"][1:2].lower(),
day["open"],
day["close"],
)
properties = {
"lat": store["lat"],
"lon": store["lng"],
"name": store["displayName"],
"street": store["address"].get("street"),
"city": store["address"].get("city"),
"postcode": store["address"].get("zipCode"),
"country": response.request.url[21:23].upper(),
"website": store["storePageUrl"],
"ref": store["id"],
"opening_hours": opening_hours.as_opening_hours(),
"extras": {
"store_type": store["buClassification"]["code"],
},
}
if properties["country"] == "US":
properties["state"] = store["address"].get("stateProvinceCode")[2:]
yield GeojsonPointItem(**properties) | locations/spiders/ikea.py | import json
import scrapy
from locations.hours import OpeningHours
from locations.items import GeojsonPointItem
class IkeaSpider(scrapy.Spider):
name = "ikea"
item_attributes = {"brand": "IKEA", "brand_wikidata": "Q54078"}
allowed_domains = ["ikea.com"]
start_urls = [
"https://www.ikea.com/ae/ar/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/bh/ar/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/eg/ar/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/jo/ar/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/kw/ar/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/ma/ar/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/qa/ar/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/sa/ar/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/cz/cs/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/dk/da/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/at/de/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/de/de/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/au/en/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/ca/en/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/gb/en/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/ie/en/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/in/en/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/ph/en/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/sg/en/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/us/en/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/cl/es/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/es/es/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/mx/es/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/fi/fi/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/be/fr/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/ch/fr/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/fr/fr/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/il/he/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/hr/hr/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/hu/hu/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/it/it/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/jp/ja/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/kr/ko/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/my/ms/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/nl/nl/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/no/no/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/pl/pl/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/pt/pt/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/ro/ro/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/ru/ru/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/sk/sk/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/si/sl/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/rs/sr/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/se/sv/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/th/th/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/ua/uk/meta-data/navigation/stores-detailed.json",
"https://www.ikea.com/cn/zh/meta-data/navigation/stores-detailed.json",
]
def parse(self, response):
data = response.json()
for store in data:
if "storePageUrl" not in store:
continue
opening_hours = OpeningHours()
for day in store["hours"]["normal"]:
if day["open"] != "":
opening_hours.add_range(
day["day"][0:1].upper() + day["day"][1:2].lower(),
day["open"],
day["close"],
)
properties = {
"lat": store["lat"],
"lon": store["lng"],
"name": store["displayName"],
"street": store["address"].get("street"),
"city": store["address"].get("city"),
"postcode": store["address"].get("zipCode"),
"country": response.request.url[21:23].upper(),
"website": store["storePageUrl"],
"ref": store["id"],
"opening_hours": opening_hours.as_opening_hours(),
"extras": {
"store_type": store["buClassification"]["code"],
},
}
if properties["country"] == "US":
properties["state"] = store["address"].get("stateProvinceCode")[2:]
yield GeojsonPointItem(**properties) | 0.412175 | 0.298364 |
table = {
'table_name' : 'adm_tran_types',
'module_id' : 'adm',
'short_descr' : 'Transaction types',
'long_descr' : 'Transaction types',
'sub_types' : None,
'sub_trans' : None,
'sequence' : ['seq', ['module_row_id'], None],
'tree_params' : [
'module_row_id', # group parent
['tran_type', 'descr', None, 'seq'], # code, descr, parent_id, seq
None, # levels
],
'roll_params' : None,
'indexes' : None,
'ledger_col' : None,
'defn_company' : None,
'data_company' : None,
'read_only' : False,
}
# column definitions
cols = []
cols.append ({
'col_name' : 'row_id',
'data_type' : 'AUTO',
'short_descr': 'Row id',
'long_descr' : 'Row id',
'col_head' : 'Row',
'key_field' : 'Y',
'data_source': 'gen',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'created_id',
'data_type' : 'INT',
'short_descr': 'Created id',
'long_descr' : 'Created row id',
'col_head' : 'Created',
'key_field' : 'N',
'data_source': 'gen',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : '0',
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'deleted_id',
'data_type' : 'INT',
'short_descr': 'Deleted id',
'long_descr' : 'Deleted row id',
'col_head' : 'Deleted',
'key_field' : 'N',
'data_source': 'gen',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : '0',
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'tran_type',
'data_type' : 'TEXT',
'short_descr': 'Transaction type',
'long_descr' : 'Transaction type',
'col_head' : 'Tran type',
'key_field' : 'A',
'data_source': 'input',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 15,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'descr',
'data_type' : 'TEXT',
'short_descr': 'Description',
'long_descr' : 'Description',
'col_head' : 'Description',
'key_field' : 'N',
'data_source': 'input',
'condition' : None,
'allow_null' : False,
'allow_amend': True,
'max_len' : 30,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'seq',
'data_type' : 'INT',
'short_descr': 'Sequence',
'long_descr' : 'Sequence',
'col_head' : 'Seq',
'key_field' : 'N',
'data_source': 'seq',
'condition' : None,
'allow_null' : False,
'allow_amend': True,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'module_row_id',
'data_type' : 'INT',
'short_descr': 'Module row id',
'long_descr' : 'Module row id',
'col_head' : 'Module',
'key_field' : 'N',
'data_source': 'input',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : ['db_modules', 'row_id', 'module_id', 'module_id', False, None],
'choices' : None,
})
cols.append ({
'col_name' : 'table_id',
'data_type' : 'INT',
'short_descr': 'Table id',
'long_descr' : 'Table id',
'col_head' : 'Table',
'key_field' : 'N',
'data_source': 'input',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : ['db_tables', 'row_id', 'table_name', 'table_name', False, None],
'choices' : None,
})
# virtual column definitions
virt = []
# cursor definitions
cursors = []
cursors.append({
'cursor_name': 'tran_types',
'title': 'Maintain tran types',
'columns': [
['module_id', 80, False, False],
['tran_type', 80, False, False],
['descr', 200, True, False],
],
'filter': [],
'sequence': [['module_row_id', False], ['seq', False]],
'formview_name': None,
})
# actions
actions = [] | aib/init/tables/adm_tran_types.py | table = {
'table_name' : 'adm_tran_types',
'module_id' : 'adm',
'short_descr' : 'Transaction types',
'long_descr' : 'Transaction types',
'sub_types' : None,
'sub_trans' : None,
'sequence' : ['seq', ['module_row_id'], None],
'tree_params' : [
'module_row_id', # group parent
['tran_type', 'descr', None, 'seq'], # code, descr, parent_id, seq
None, # levels
],
'roll_params' : None,
'indexes' : None,
'ledger_col' : None,
'defn_company' : None,
'data_company' : None,
'read_only' : False,
}
# column definitions
cols = []
cols.append ({
'col_name' : 'row_id',
'data_type' : 'AUTO',
'short_descr': 'Row id',
'long_descr' : 'Row id',
'col_head' : 'Row',
'key_field' : 'Y',
'data_source': 'gen',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'created_id',
'data_type' : 'INT',
'short_descr': 'Created id',
'long_descr' : 'Created row id',
'col_head' : 'Created',
'key_field' : 'N',
'data_source': 'gen',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : '0',
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'deleted_id',
'data_type' : 'INT',
'short_descr': 'Deleted id',
'long_descr' : 'Deleted row id',
'col_head' : 'Deleted',
'key_field' : 'N',
'data_source': 'gen',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : '0',
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'tran_type',
'data_type' : 'TEXT',
'short_descr': 'Transaction type',
'long_descr' : 'Transaction type',
'col_head' : 'Tran type',
'key_field' : 'A',
'data_source': 'input',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 15,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'descr',
'data_type' : 'TEXT',
'short_descr': 'Description',
'long_descr' : 'Description',
'col_head' : 'Description',
'key_field' : 'N',
'data_source': 'input',
'condition' : None,
'allow_null' : False,
'allow_amend': True,
'max_len' : 30,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'seq',
'data_type' : 'INT',
'short_descr': 'Sequence',
'long_descr' : 'Sequence',
'col_head' : 'Seq',
'key_field' : 'N',
'data_source': 'seq',
'condition' : None,
'allow_null' : False,
'allow_amend': True,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : None,
'choices' : None,
})
cols.append ({
'col_name' : 'module_row_id',
'data_type' : 'INT',
'short_descr': 'Module row id',
'long_descr' : 'Module row id',
'col_head' : 'Module',
'key_field' : 'N',
'data_source': 'input',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : ['db_modules', 'row_id', 'module_id', 'module_id', False, None],
'choices' : None,
})
cols.append ({
'col_name' : 'table_id',
'data_type' : 'INT',
'short_descr': 'Table id',
'long_descr' : 'Table id',
'col_head' : 'Table',
'key_field' : 'N',
'data_source': 'input',
'condition' : None,
'allow_null' : False,
'allow_amend': False,
'max_len' : 0,
'db_scale' : 0,
'scale_ptr' : None,
'dflt_val' : None,
'dflt_rule' : None,
'col_checks' : None,
'fkey' : ['db_tables', 'row_id', 'table_name', 'table_name', False, None],
'choices' : None,
})
# virtual column definitions
virt = []
# cursor definitions
cursors = []
cursors.append({
'cursor_name': 'tran_types',
'title': 'Maintain tran types',
'columns': [
['module_id', 80, False, False],
['tran_type', 80, False, False],
['descr', 200, True, False],
],
'filter': [],
'sequence': [['module_row_id', False], ['seq', False]],
'formview_name': None,
})
# actions
actions = [] | 0.280814 | 0.199444 |
# W celu przeprowadzenia operacji odczytu z pliku, musimy otworzyć ten plik w trybie
# odczytu (r) lub edycji (w+, a+, x+).
# Istnieje kilka sposobów odczytu zawartości pliku. Przedstawimy je na przykładzie
# pliku plik.txt o następującej zawartości:
# Pierwsza linia.
# Druga linia.
# Trzecia linia.
# Sposób I: read() *****************************************************************
# Metoda read() odczytuje kolejne znaki (w trybie tekstowym, t) lub kolejne bity
# (w trybie binarnym, b), począwszy od aktualnego położenia kursora. Przyjmuje ona
# argument size, określający, ile znaków lub bitów ma odczytać; jeśli nie określimy
# wartości tego argumentu, metoda ta odczytuje plik aż do napotkania jego końca. Po
# zakończeniu odczytu metoda ustawia kursor na końcu odczytanego fragmentu pliku
# i zwraca jako swoją wartość odczytany łańcuch tekstowy lub dane binarne.
with open("plik.txt", "r", encoding = "utf-8") as f:
# Kursor jest początkowo na początku pliku (tak jest zawsze w trybie r).
txt = f.read(4) # Odczyt czterech znaków i przeniesienie kursosa
# za te znaki (czyli przed piąty znak).
print(txt) # Wynik: "Pier".
txt = f.read(3) # Odczyt kolejnych trzech znaków i przeniesienie kursosa
# za te znaki.
print(txt) # Wynik: "wsz".
txt = f.read() # Odczyt reszty pliku (od aktualnego położenia kursora do końca pliku)
# i przeniesienie kursora na koniec pliku.
print(txt) # Wynik: "a linia.
# Druga linia.
# Trzecia linia."
txt = f.read() # Kursor jest obecnie na końcu pliku, więc nic nie zostanie odczytane.
print(txt) # Wynik: "" (pusty łańcuch tekstowy).
# Położeniem kursora możemy sterować za pomocą dwóch metod:
# tell() - zwraca liczbę określającą aktualną pozycję kursora,
# seek(n) - ustawia kursor na pozycji określonej liczbą n.
# Liczba określająca pozycję kursora informuje, za którym znakiem (w trybie tekstowym, t),
# lub bitem (w trybie binarnym, b) znajduje się kursor. I tak, 0 oznacza, że kursor
# znajduje się na początku pliku, 1 - za pierwszym znakiem etc.
i = f.tell() # Zwraca aktualne położenie kursora. Kursor jest na końcu pliku,
# w wyniku otrzymamy zatem ilość znaków w pliku.
print(i) # Wynik: 45.
f.seek(0) # Ustawia kursor na pozycji 0, czyli na początku pliku.
i = f.tell() # Tym razem kursor jest na początku pliku, otrzymamy zatem 0.
print(i) # Wynik: 0.
txt = f.read() # Kursor jest obecnie na początku pliku, więc to polecenie spowoduje
# odczyt całej zawartości pliku i przeniesienie kursora na jego koniec.
print(txt) # Wynik: "Pierwsza linia.
# Druga linia.
# Trzecia linia."
# Sposób II: Pętla for (tryb t) ****************************************************
# Możemy odczytywać zawartość pliku tekstowego linia po linii, stosując pętlę for.
with open("plik.txt", "r", encoding = "utf-8") as f:
for line in f:
print(line, end = "")
# W kolejnych iteracjach pętli zmienna line zawiera kolejne linie pliku plik.txt.
# Każda linia pliku tekstowego zawiera oczywiście na końcu znak nowej linii,
# dlatego znak ten jest również umieszczany na końcu łańcucha tekstowego będącego
# wartością zmiennej line (z tego powodu zmieniliśmy domyślny koniec linii w funkcji
# print() na pusty łańcuch, inaczej znak nowej linii byłby wypisywany dwukrotnie).
# Sposób III: readline() (tryb t) **************************************************
# Metoda readline() odczytuje aktualną linię pliku tekstowego, począwszy od miejsca,
# w którym znajduje się kursor, aż do znaku nowej linii włącznie, a następnie
# ustawia kursor na początku kolejnej linii. Tak więc, wielokrotne wywoływanie
# tej metody pozwala odczytywać kolejne linie pliku.
with open("plik.txt", "r", encoding = "utf-8") as f:
line = f.readline()
print(line, end = "") # Wynik: "Pierwsza linia."
line = f.readline()
print(line, end = "") # Wynik: "Druga linia."
line = f.readline()
print(line, end = "") # Wynik: "Trzecia linia."
# Kursor jest teraz na końcu pliku.
line = f.readline()
print(line, end = "") # Wynik: "" (pusty łańcuch tekstowy).
# Sposób IV: readlines() (tryb t) **************************************************
# Metoda readlines() zwraca w postaci listy kolejne linie pliku, począwszy od miejsca,
# w którym aktualnie znajduje się kursor, aż do końca pliku. Gdy kursor ustawiony
# jest na końcu pliku, metoda ta zwraca pustą listę.
with open("plik.txt", "r", encoding = "utf-8") as f:
lines = f.readlines()
print(lines)
# Wynik: ["Pierwsza linia.", "Druga linia.", "Trzecia linia."]
# W przypadku dużych plików sposób ten powinien być stosowany bardzo ostrożnie,
# wczytanie do pamięci jednocześnie bardzo wielu linii pliku może doprowadzić
# do wystąpienia wyjątku.
# Sposób V: Iterowanie po pliku binarnym (tryb b) **********************************
# Spośród przedstawionych powyżej sposobów pierwszy jest uniwersalny, można go
# z równym powodzeniem stosować zarówno w przypadku plików tekstowych, jak
# i binarnych. Pozostałe sposoby wykorzystują pojęcie "linii", ich działanie jest
# uzależnione od występowania w pliku znaków końca linii. Może się zdarzyć, że
# w pliku binarnym wystąpi sekwencja bitów odpowiadająca temu znakowi, jednak
# będzie to raczej przypadek, stosowanie metod II - IV w przypadku plików binarnych
# nie ma więc sensu.
# W celu odczytu zawartości plików binarnych należy zatem posługiwać się metodą
# read() (sposób I). To, czy odczytujemy cały plik od razu, czy też operujemy na
# jego fragmentach, powinno zależeć od jego struktury (określonej na przykład
# przez specyfikację dango formatu pliku) i naszych celów.
# Możemy iterować po zawartości pliku binarnego (na wzór sposobu II), nie mając
# jednak do dyspozycji pojęcia linii, musimy określić, jakiego rozmiaru fragmenty
# pliku chcemy jednocześnie przetwarzać. Taki iteracyjny odczyt pliku binarnego
# można zrealizować na przykład stosując pętlę while.
with open("plik.txt", "rb") as f:
size = 200
while True:
data = f.read(size)
if not data:
break
print(data)
# Można też napisać funkcję generującą, która pozwoli nam odczytywać zawartość
# plików binarnych za pomocą pętli for (analogicznie jak dla plików tekstowych
# w sposobie II):
def bin_read(f, size = 1024*64):
data = f.read(size)
while data:
yield data
data = f.read(size)
with open("plik.txt", "rb") as f:
for data in bin_read(f):
print(data) | Notatki/2_Pliki/1_Operacje-na-plikach/2_Odczyt-z-pliku.py |
# W celu przeprowadzenia operacji odczytu z pliku, musimy otworzyć ten plik w trybie
# odczytu (r) lub edycji (w+, a+, x+).
# Istnieje kilka sposobów odczytu zawartości pliku. Przedstawimy je na przykładzie
# pliku plik.txt o następującej zawartości:
# Pierwsza linia.
# Druga linia.
# Trzecia linia.
# Sposób I: read() *****************************************************************
# Metoda read() odczytuje kolejne znaki (w trybie tekstowym, t) lub kolejne bity
# (w trybie binarnym, b), począwszy od aktualnego położenia kursora. Przyjmuje ona
# argument size, określający, ile znaków lub bitów ma odczytać; jeśli nie określimy
# wartości tego argumentu, metoda ta odczytuje plik aż do napotkania jego końca. Po
# zakończeniu odczytu metoda ustawia kursor na końcu odczytanego fragmentu pliku
# i zwraca jako swoją wartość odczytany łańcuch tekstowy lub dane binarne.
with open("plik.txt", "r", encoding = "utf-8") as f:
# Kursor jest początkowo na początku pliku (tak jest zawsze w trybie r).
txt = f.read(4) # Odczyt czterech znaków i przeniesienie kursosa
# za te znaki (czyli przed piąty znak).
print(txt) # Wynik: "Pier".
txt = f.read(3) # Odczyt kolejnych trzech znaków i przeniesienie kursosa
# za te znaki.
print(txt) # Wynik: "wsz".
txt = f.read() # Odczyt reszty pliku (od aktualnego położenia kursora do końca pliku)
# i przeniesienie kursora na koniec pliku.
print(txt) # Wynik: "a linia.
# Druga linia.
# Trzecia linia."
txt = f.read() # Kursor jest obecnie na końcu pliku, więc nic nie zostanie odczytane.
print(txt) # Wynik: "" (pusty łańcuch tekstowy).
# Położeniem kursora możemy sterować za pomocą dwóch metod:
# tell() - zwraca liczbę określającą aktualną pozycję kursora,
# seek(n) - ustawia kursor na pozycji określonej liczbą n.
# Liczba określająca pozycję kursora informuje, za którym znakiem (w trybie tekstowym, t),
# lub bitem (w trybie binarnym, b) znajduje się kursor. I tak, 0 oznacza, że kursor
# znajduje się na początku pliku, 1 - za pierwszym znakiem etc.
i = f.tell() # Zwraca aktualne położenie kursora. Kursor jest na końcu pliku,
# w wyniku otrzymamy zatem ilość znaków w pliku.
print(i) # Wynik: 45.
f.seek(0) # Ustawia kursor na pozycji 0, czyli na początku pliku.
i = f.tell() # Tym razem kursor jest na początku pliku, otrzymamy zatem 0.
print(i) # Wynik: 0.
txt = f.read() # Kursor jest obecnie na początku pliku, więc to polecenie spowoduje
# odczyt całej zawartości pliku i przeniesienie kursora na jego koniec.
print(txt) # Wynik: "Pierwsza linia.
# Druga linia.
# Trzecia linia."
# Sposób II: Pętla for (tryb t) ****************************************************
# Możemy odczytywać zawartość pliku tekstowego linia po linii, stosując pętlę for.
with open("plik.txt", "r", encoding = "utf-8") as f:
for line in f:
print(line, end = "")
# W kolejnych iteracjach pętli zmienna line zawiera kolejne linie pliku plik.txt.
# Każda linia pliku tekstowego zawiera oczywiście na końcu znak nowej linii,
# dlatego znak ten jest również umieszczany na końcu łańcucha tekstowego będącego
# wartością zmiennej line (z tego powodu zmieniliśmy domyślny koniec linii w funkcji
# print() na pusty łańcuch, inaczej znak nowej linii byłby wypisywany dwukrotnie).
# Sposób III: readline() (tryb t) **************************************************
# Metoda readline() odczytuje aktualną linię pliku tekstowego, począwszy od miejsca,
# w którym znajduje się kursor, aż do znaku nowej linii włącznie, a następnie
# ustawia kursor na początku kolejnej linii. Tak więc, wielokrotne wywoływanie
# tej metody pozwala odczytywać kolejne linie pliku.
with open("plik.txt", "r", encoding = "utf-8") as f:
line = f.readline()
print(line, end = "") # Wynik: "Pierwsza linia."
line = f.readline()
print(line, end = "") # Wynik: "Druga linia."
line = f.readline()
print(line, end = "") # Wynik: "Trzecia linia."
# Kursor jest teraz na końcu pliku.
line = f.readline()
print(line, end = "") # Wynik: "" (pusty łańcuch tekstowy).
# Sposób IV: readlines() (tryb t) **************************************************
# Metoda readlines() zwraca w postaci listy kolejne linie pliku, począwszy od miejsca,
# w którym aktualnie znajduje się kursor, aż do końca pliku. Gdy kursor ustawiony
# jest na końcu pliku, metoda ta zwraca pustą listę.
with open("plik.txt", "r", encoding = "utf-8") as f:
lines = f.readlines()
print(lines)
# Wynik: ["Pierwsza linia.", "Druga linia.", "Trzecia linia."]
# W przypadku dużych plików sposób ten powinien być stosowany bardzo ostrożnie,
# wczytanie do pamięci jednocześnie bardzo wielu linii pliku może doprowadzić
# do wystąpienia wyjątku.
# Sposób V: Iterowanie po pliku binarnym (tryb b) **********************************
# Spośród przedstawionych powyżej sposobów pierwszy jest uniwersalny, można go
# z równym powodzeniem stosować zarówno w przypadku plików tekstowych, jak
# i binarnych. Pozostałe sposoby wykorzystują pojęcie "linii", ich działanie jest
# uzależnione od występowania w pliku znaków końca linii. Może się zdarzyć, że
# w pliku binarnym wystąpi sekwencja bitów odpowiadająca temu znakowi, jednak
# będzie to raczej przypadek, stosowanie metod II - IV w przypadku plików binarnych
# nie ma więc sensu.
# W celu odczytu zawartości plików binarnych należy zatem posługiwać się metodą
# read() (sposób I). To, czy odczytujemy cały plik od razu, czy też operujemy na
# jego fragmentach, powinno zależeć od jego struktury (określonej na przykład
# przez specyfikację dango formatu pliku) i naszych celów.
# Możemy iterować po zawartości pliku binarnego (na wzór sposobu II), nie mając
# jednak do dyspozycji pojęcia linii, musimy określić, jakiego rozmiaru fragmenty
# pliku chcemy jednocześnie przetwarzać. Taki iteracyjny odczyt pliku binarnego
# można zrealizować na przykład stosując pętlę while.
with open("plik.txt", "rb") as f:
size = 200
while True:
data = f.read(size)
if not data:
break
print(data)
# Można też napisać funkcję generującą, która pozwoli nam odczytywać zawartość
# plików binarnych za pomocą pętli for (analogicznie jak dla plików tekstowych
# w sposobie II):
def bin_read(f, size = 1024*64):
data = f.read(size)
while data:
yield data
data = f.read(size)
with open("plik.txt", "rb") as f:
for data in bin_read(f):
print(data) | 0.154759 | 0.640509 |
r"""Fit subunits with localized sparsity prior."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
import pickle
from absl import app
from absl import flags
import numpy as np
import scipy.io as sio
from tensorflow.python.platform import gfile
from retina.response_model.python.ASM.su_fit_nov import su_model
flags.DEFINE_string('src_dir',
'/home/bhaishahster/NSEM_process/',
'temporary folder on machine for better I/O')
flags.DEFINE_string('tmp_dir',
'/home/bhaishahster/Downloads/'
'NSEM_process/NSEM_preprocess',
'temporary folder on machine for better I/O')
flags.DEFINE_string('save_path', '/home/bhaishahster/'
'su_fits_nsem_3_datasets/',
'where to store results')
flags.DEFINE_string('save_path_partial', '/home/bhaishahster/'
'su_fits_nsem_3_datasets_partial/',
'where to store results')
flags.DEFINE_string('task_params_file',
'/home/bhaishahster/tasks_nsem_3_datasets.txt',
'parameters of individual tasks')
flags.DEFINE_integer('taskid', 0, 'Task ID')
FLAGS = flags.FLAGS
rng = np.random
def main(argv):
# read line corresponding to task
with gfile.Open(FLAGS.task_params_file, 'r') as f:
for _ in range(FLAGS.taskid + 1):
line = f.readline()
line = line[:-1] # Remove \n from end.
print(line)
# get task parameters by parsing the lines
line_split = line.split(';')
cell_idx = line_split[0]
cell_idx = cell_idx[1:-1].split(',')
cell_idx = int(cell_idx[0])
file_list = gfile.ListDirectory(FLAGS.src_dir)
cell_file = file_list[cell_idx]
print('Cell file %s' % cell_file)
nsub = int(line_split[1])
projection_type = line_split[2]
lam_proj = float(line_split[3])
# copy data
dst = os.path.join(FLAGS.tmp_dir, cell_file)
if not gfile.Exists(dst):
print('Started Copy')
src = os.path.join(FLAGS.src_dir, cell_file)
if not gfile.IsDirectory(FLAGS.tmp_dir):
gfile.MkDir(FLAGS.tmp_dir)
gfile.Copy(src, dst)
print('File copied to destination')
else:
print('File exists')
# load stimulus, response data
try:
data = sio.loadmat(dst)
trainMov_filterNSEM = data['trainMov_filterNSEM']
testMov_filterNSEM = data['testMov_filterNSEM']
trainSpksNSEM = data['trainSpksNSEM']
testSpksNSEM = data['testSpksNSEM']
mask = data['mask']
neighbor_mat = su_model.get_neighbormat(mask, nbd=1)
trainMov_filterWN = data['trainMov_filterWN']
testMov_filterWN = data['testMov_filterWN']
trainSpksWN = data['trainSpksWN']
testSpksWN = data['testSpksWN']
# get NSEM stimulus and resposne
stimulus_WN = np.array(trainMov_filterWN.transpose(), dtype='float32')
response_WN = np.array(np.squeeze(trainSpksWN), dtype='float32')
stimulus_NSEM = np.array(trainMov_filterNSEM.transpose(), dtype='float32')
response_NSEM = np.array(np.squeeze(trainSpksNSEM), dtype='float32')
print('Prepared data')
# Do fitting
# set random seed.
np.random.seed(23)
print('Made partitions')
# Do fitting
# WN data
ifrac = 0.8
tms_train_WN = np.arange(0, np.floor(stimulus_WN.shape[0] *
ifrac)).astype(np.int)
tms_test_WN = np.arange(np.floor(stimulus_WN.shape[0] * ifrac),
1 * np.floor(stimulus_WN.shape[0] *
1)).astype(np.int)
# NSEM data
ifrac = 0.8
tms_train_NSEM = np.arange(0, np.floor(stimulus_NSEM.shape[0] *
ifrac)).astype(np.int)
tms_test_NSEM = np.arange(np.floor(stimulus_NSEM.shape[0] * ifrac),
1 * np.floor(stimulus_NSEM.shape[0] *
1)).astype(np.int)
# Give filename
ss = str(cell_idx)
save_filename = os.path.join(FLAGS.save_path,
'Cell_%s_nsub_%d_%s_%.3f_jnt.pkl' %
(ss, nsub, projection_type,
lam_proj))
save_filename_partial = os.path.join(FLAGS.save_path_partial,
'Cell_%s_nsub_%d_%s_%.3f_jnt.pkl' %
(ss, nsub, projection_type,
lam_proj))
## Do fitting
if not gfile.Exists(save_filename):
# Fit SU on WN
print('Fitting started on WN')
op = su_model.Flat_clustering_jnt(stimulus_WN,
np.expand_dims(response_WN, 1), nsub,
tms_train_WN,
tms_test_WN,
steps_max=10000, eps=1e-9,
projection_type=projection_type,
neighbor_mat=neighbor_mat,
lam_proj=lam_proj, eps_proj=0.01,
save_filename_partial=
save_filename_partial,
fitting_phases=[1])
_, _, alpha, lam_log_wn, lam_log_test_wn, fitting_phase, fit_params_wn = op
print('WN fit done')
# Fit on NSEM
op = su_model.fit_scales(stimulus_NSEM[tms_train_NSEM, :],
np.expand_dims(response_NSEM[tms_train_NSEM], 1),
stimulus_NSEM[tms_test_NSEM, :],
np.expand_dims(response_NSEM[tms_test_NSEM], 1),
Ns=nsub,
K=fit_params_wn[0][0], b=fit_params_wn[0][1],
params=fit_params_wn[0][2], lr=0.01, eps=1e-9)
k_nsem, b_nsem, nl_params_nsem, lam_log_nsem, lam_log_test_nsem = op
# Collect results and save
fit_params = fit_params_wn + [[k_nsem, b_nsem, nl_params_nsem]]
lam_log = [lam_log_wn, np.array(lam_log_nsem)]
lam_log_test = [lam_log_test_wn, np.array(lam_log_test_nsem)]
save_dict = {'lam_log': lam_log, 'lam_log_test': lam_log_test,
'fit_params': fit_params, 'mask': mask}
pickle.dump(save_dict, gfile.Open(save_filename, 'w'))
print('Saved results')
except:
print('Error')
if __name__ == '__main__':
app.run(main) | response_model/python/ASM/su_fit_nov/fit_nsem_3_datasets.py | r"""Fit subunits with localized sparsity prior."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
import pickle
from absl import app
from absl import flags
import numpy as np
import scipy.io as sio
from tensorflow.python.platform import gfile
from retina.response_model.python.ASM.su_fit_nov import su_model
flags.DEFINE_string('src_dir',
'/home/bhaishahster/NSEM_process/',
'temporary folder on machine for better I/O')
flags.DEFINE_string('tmp_dir',
'/home/bhaishahster/Downloads/'
'NSEM_process/NSEM_preprocess',
'temporary folder on machine for better I/O')
flags.DEFINE_string('save_path', '/home/bhaishahster/'
'su_fits_nsem_3_datasets/',
'where to store results')
flags.DEFINE_string('save_path_partial', '/home/bhaishahster/'
'su_fits_nsem_3_datasets_partial/',
'where to store results')
flags.DEFINE_string('task_params_file',
'/home/bhaishahster/tasks_nsem_3_datasets.txt',
'parameters of individual tasks')
flags.DEFINE_integer('taskid', 0, 'Task ID')
FLAGS = flags.FLAGS
rng = np.random
def main(argv):
# read line corresponding to task
with gfile.Open(FLAGS.task_params_file, 'r') as f:
for _ in range(FLAGS.taskid + 1):
line = f.readline()
line = line[:-1] # Remove \n from end.
print(line)
# get task parameters by parsing the lines
line_split = line.split(';')
cell_idx = line_split[0]
cell_idx = cell_idx[1:-1].split(',')
cell_idx = int(cell_idx[0])
file_list = gfile.ListDirectory(FLAGS.src_dir)
cell_file = file_list[cell_idx]
print('Cell file %s' % cell_file)
nsub = int(line_split[1])
projection_type = line_split[2]
lam_proj = float(line_split[3])
# copy data
dst = os.path.join(FLAGS.tmp_dir, cell_file)
if not gfile.Exists(dst):
print('Started Copy')
src = os.path.join(FLAGS.src_dir, cell_file)
if not gfile.IsDirectory(FLAGS.tmp_dir):
gfile.MkDir(FLAGS.tmp_dir)
gfile.Copy(src, dst)
print('File copied to destination')
else:
print('File exists')
# load stimulus, response data
try:
data = sio.loadmat(dst)
trainMov_filterNSEM = data['trainMov_filterNSEM']
testMov_filterNSEM = data['testMov_filterNSEM']
trainSpksNSEM = data['trainSpksNSEM']
testSpksNSEM = data['testSpksNSEM']
mask = data['mask']
neighbor_mat = su_model.get_neighbormat(mask, nbd=1)
trainMov_filterWN = data['trainMov_filterWN']
testMov_filterWN = data['testMov_filterWN']
trainSpksWN = data['trainSpksWN']
testSpksWN = data['testSpksWN']
# get NSEM stimulus and resposne
stimulus_WN = np.array(trainMov_filterWN.transpose(), dtype='float32')
response_WN = np.array(np.squeeze(trainSpksWN), dtype='float32')
stimulus_NSEM = np.array(trainMov_filterNSEM.transpose(), dtype='float32')
response_NSEM = np.array(np.squeeze(trainSpksNSEM), dtype='float32')
print('Prepared data')
# Do fitting
# set random seed.
np.random.seed(23)
print('Made partitions')
# Do fitting
# WN data
ifrac = 0.8
tms_train_WN = np.arange(0, np.floor(stimulus_WN.shape[0] *
ifrac)).astype(np.int)
tms_test_WN = np.arange(np.floor(stimulus_WN.shape[0] * ifrac),
1 * np.floor(stimulus_WN.shape[0] *
1)).astype(np.int)
# NSEM data
ifrac = 0.8
tms_train_NSEM = np.arange(0, np.floor(stimulus_NSEM.shape[0] *
ifrac)).astype(np.int)
tms_test_NSEM = np.arange(np.floor(stimulus_NSEM.shape[0] * ifrac),
1 * np.floor(stimulus_NSEM.shape[0] *
1)).astype(np.int)
# Give filename
ss = str(cell_idx)
save_filename = os.path.join(FLAGS.save_path,
'Cell_%s_nsub_%d_%s_%.3f_jnt.pkl' %
(ss, nsub, projection_type,
lam_proj))
save_filename_partial = os.path.join(FLAGS.save_path_partial,
'Cell_%s_nsub_%d_%s_%.3f_jnt.pkl' %
(ss, nsub, projection_type,
lam_proj))
## Do fitting
if not gfile.Exists(save_filename):
# Fit SU on WN
print('Fitting started on WN')
op = su_model.Flat_clustering_jnt(stimulus_WN,
np.expand_dims(response_WN, 1), nsub,
tms_train_WN,
tms_test_WN,
steps_max=10000, eps=1e-9,
projection_type=projection_type,
neighbor_mat=neighbor_mat,
lam_proj=lam_proj, eps_proj=0.01,
save_filename_partial=
save_filename_partial,
fitting_phases=[1])
_, _, alpha, lam_log_wn, lam_log_test_wn, fitting_phase, fit_params_wn = op
print('WN fit done')
# Fit on NSEM
op = su_model.fit_scales(stimulus_NSEM[tms_train_NSEM, :],
np.expand_dims(response_NSEM[tms_train_NSEM], 1),
stimulus_NSEM[tms_test_NSEM, :],
np.expand_dims(response_NSEM[tms_test_NSEM], 1),
Ns=nsub,
K=fit_params_wn[0][0], b=fit_params_wn[0][1],
params=fit_params_wn[0][2], lr=0.01, eps=1e-9)
k_nsem, b_nsem, nl_params_nsem, lam_log_nsem, lam_log_test_nsem = op
# Collect results and save
fit_params = fit_params_wn + [[k_nsem, b_nsem, nl_params_nsem]]
lam_log = [lam_log_wn, np.array(lam_log_nsem)]
lam_log_test = [lam_log_test_wn, np.array(lam_log_test_nsem)]
save_dict = {'lam_log': lam_log, 'lam_log_test': lam_log_test,
'fit_params': fit_params, 'mask': mask}
pickle.dump(save_dict, gfile.Open(save_filename, 'w'))
print('Saved results')
except:
print('Error')
if __name__ == '__main__':
app.run(main) | 0.567218 | 0.222478 |
import logging
from dq0.sdk.estimators.linear_model import sklearn_lm
import numpy as np
logger = logging.getLogger(__name__)
def get_data_int():
X = np.ones((4, 3))
y_int = np.array([1, 2, 3, 4])
return X, y_int
def test_LogisticRegression_001():
X, y = get_data_int()
estimator = sklearn_lm.LogisticRegression()
estimator.fit(X, y)
logger.debug("LogisticRegression.predict_proba(): {}".format(estimator.predict_proba(X)))
logger.debug("LogisticRegression.predict(): {}".format(estimator.predict(X)))
logger.debug("LogisticRegression.score(): {}".format(estimator.score(X, y)))
def test_RidgeClassifier_001():
X, y = get_data_int()
estimator = sklearn_lm.RidgeClassifier()
estimator.fit(X, y)
logger.debug("RidgeClassifier.predict_proba(): {}".format(estimator.predict_proba(X)))
logger.debug("RidgeClassifier.predict(): {}".format(estimator.predict(X)))
logger.debug("RidgeClassifier.score(): {}".format(estimator.score(X, y)))
def test_LinearRegression_001():
X, y = get_data_int()
estimator = sklearn_lm.LinearRegression()
estimator.fit(X, y)
logger.debug("LinearRegression.predict_proba(): {}".format(estimator.predict_proba(X)))
logger.debug("LinearRegression.predict(): {}".format(estimator.predict(X)))
logger.debug("LinearRegression.score(): {}".format(estimator.score(X, y)))
def test_Ridge_001():
X, y = get_data_int()
estimator = sklearn_lm.Ridge()
estimator.fit(X, y)
logger.debug("Ridge.predict_proba(): {}".format(estimator.predict_proba(X)))
logger.debug("Ridge.predict(): {}".format(estimator.predict(X)))
logger.debug("Ridge.score(): {}".format(estimator.score(X, y)))
def test_Lasso_001():
X, y = get_data_int()
estimator = sklearn_lm.Lasso()
estimator.fit(X, y)
logger.debug("Lasso.predict_proba(): {}".format(estimator.predict_proba(X)))
logger.debug("Lasso.predict(): {}".format(estimator.predict(X)))
logger.debug("Lasso.score(): {}".format(estimator.score(X, y)))
def test_ElasticNet_001():
X, y = get_data_int()
estimator = sklearn_lm.ElasticNet()
estimator.fit(X, y)
logger.debug("ElasticNet.predict_proba(): {}".format(estimator.predict_proba(X)))
logger.debug("ElasticNet.predict(): {}".format(estimator.predict(X)))
logger.debug("ElasticNet.score(): {}".format(estimator.score(X, y)))
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
test_LogisticRegression_001()
test_RidgeClassifier_001()
test_LinearRegression_001()
test_Ridge_001()
test_Lasso_001()
test_ElasticNet_001() | tests/test_estimators/linear_model/test_sklearn_linear_models.py | import logging
from dq0.sdk.estimators.linear_model import sklearn_lm
import numpy as np
logger = logging.getLogger(__name__)
def get_data_int():
X = np.ones((4, 3))
y_int = np.array([1, 2, 3, 4])
return X, y_int
def test_LogisticRegression_001():
X, y = get_data_int()
estimator = sklearn_lm.LogisticRegression()
estimator.fit(X, y)
logger.debug("LogisticRegression.predict_proba(): {}".format(estimator.predict_proba(X)))
logger.debug("LogisticRegression.predict(): {}".format(estimator.predict(X)))
logger.debug("LogisticRegression.score(): {}".format(estimator.score(X, y)))
def test_RidgeClassifier_001():
X, y = get_data_int()
estimator = sklearn_lm.RidgeClassifier()
estimator.fit(X, y)
logger.debug("RidgeClassifier.predict_proba(): {}".format(estimator.predict_proba(X)))
logger.debug("RidgeClassifier.predict(): {}".format(estimator.predict(X)))
logger.debug("RidgeClassifier.score(): {}".format(estimator.score(X, y)))
def test_LinearRegression_001():
X, y = get_data_int()
estimator = sklearn_lm.LinearRegression()
estimator.fit(X, y)
logger.debug("LinearRegression.predict_proba(): {}".format(estimator.predict_proba(X)))
logger.debug("LinearRegression.predict(): {}".format(estimator.predict(X)))
logger.debug("LinearRegression.score(): {}".format(estimator.score(X, y)))
def test_Ridge_001():
X, y = get_data_int()
estimator = sklearn_lm.Ridge()
estimator.fit(X, y)
logger.debug("Ridge.predict_proba(): {}".format(estimator.predict_proba(X)))
logger.debug("Ridge.predict(): {}".format(estimator.predict(X)))
logger.debug("Ridge.score(): {}".format(estimator.score(X, y)))
def test_Lasso_001():
X, y = get_data_int()
estimator = sklearn_lm.Lasso()
estimator.fit(X, y)
logger.debug("Lasso.predict_proba(): {}".format(estimator.predict_proba(X)))
logger.debug("Lasso.predict(): {}".format(estimator.predict(X)))
logger.debug("Lasso.score(): {}".format(estimator.score(X, y)))
def test_ElasticNet_001():
X, y = get_data_int()
estimator = sklearn_lm.ElasticNet()
estimator.fit(X, y)
logger.debug("ElasticNet.predict_proba(): {}".format(estimator.predict_proba(X)))
logger.debug("ElasticNet.predict(): {}".format(estimator.predict(X)))
logger.debug("ElasticNet.score(): {}".format(estimator.score(X, y)))
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
test_LogisticRegression_001()
test_RidgeClassifier_001()
test_LinearRegression_001()
test_Ridge_001()
test_Lasso_001()
test_ElasticNet_001() | 0.485112 | 0.362546 |
import json
import jsonpatch
from tomviz import operators, modules
from ._pipeline import PipelineStateManager
from ._utils import to_namespaces
op_class_attrs = ['description', 'label', 'script', 'type']
class InvalidStateError(RuntimeError):
pass
class Base(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class Mortal(object):
_dead = False
def _kill(self):
self._dead = True
def __getattribute__(self, item):
# Make an exception for the attribute we need to check or we will go
# recursive!
if item == '_dead':
return super(Mortal, self).__getattribute__(item)
if self._dead:
raise InvalidStateError(
"'%s' no longer represents a valid pipeline element." % self)
else:
return super(Mortal, self).__getattribute__(item)
class OperatorMeta(type):
def __init__(cls, name, bases, dict):
attrs = {}
for k, v in dict.items():
if k not in op_class_attrs:
attrs[k] = property(lambda self: getattr(self, k),
lambda self, value: setattr(self, k, value))
else:
attrs[k] = v
super(OperatorMeta, cls).__init__(name, bases, attrs)
class ModuleMeta(type):
def __new__(meta, name, parents, dct):
attrs = {}
attrs = {
'_props': dct
}
return super(ModuleMeta, meta).__new__(meta, name, parents, attrs)
class Pipeline(Mortal):
def __init__(self, datasource):
self._datasource = datasource
@property
def dataSource(self):
return self._datasource
def _ds_path(self):
from . import _pipeline_index
pipeline_index = _pipeline_index(self._datasource)
if pipeline_index < 0:
raise Exception('Pipeline is not valid.')
return '/dataSources/%d' % pipeline_index
def pause(self):
PipelineStateManager().pause_pipeline(self._ds_path())
def resume(self):
PipelineStateManager().resume_pipeline(self._ds_path())
def execute(self):
paused = self.paused()
if paused:
self.resume()
PipelineStateManager().execute_pipeline(self._ds_path())
if paused:
self.pause()
def paused(self):
return PipelineStateManager().pipeline_paused(self._ds_path())
class Reader(Base, Mortal):
pass
class DataSource(Base, Mortal):
def __init__(self, **kwargs):
self.reader = Reader(
name=kwargs.pop('name', None),
fileNames=kwargs.pop('fileNames', []),
subsampleSettings=kwargs.pop('subsampleSettings', {}))
self.operators = []
self.modules = [modules.Outline(), modules.Slice()]
super(DataSource, self).__init__(**kwargs)
class Operator(Base, Mortal):
pass
class Module(Mortal):
def __init__(self, **kwargs):
args = self._props
args.update(kwargs)
for k, v in args.items():
if isinstance(v, dict):
v = to_namespaces(v)
setattr(self, k, v)
def _updates(self):
from ._schemata import dump_module
current_state = dump_module(self)
patch = jsonpatch.JsonPatch.from_diff(self._props, current_state)
patch = json.loads(patch.to_string())
return patch
class Tomviz(object):
def __init__(self, pipelines=None):
self.pipelines = pipelines if pipelines else []
def module_json_to_classes(module_json):
for name, info in module_json.items():
info['type'] = name
del info['id']
# Default visibility to true
info['properties']['visibility'] = True
if not hasattr(modules, name):
cls = ModuleMeta(name, (Module,), info)
setattr(modules, name, cls)
def operator_json_to_classes(operator_json):
for name, info in operator_json.items():
del info['id']
if not hasattr(operators, name):
cls = OperatorMeta(name, (Operator,), info)
setattr(operators, name, cls)
def init_modules():
module_json_str = PipelineStateManager().module_json()
module_json = json.loads(module_json_str)
module_json_to_classes(module_json)
def init_operators():
operator_json_str = PipelineStateManager().operator_json()
operator_json = json.loads(operator_json_str)
operator_json_to_classes(operator_json) | tomviz/python/tomviz/state/_models.py | import json
import jsonpatch
from tomviz import operators, modules
from ._pipeline import PipelineStateManager
from ._utils import to_namespaces
op_class_attrs = ['description', 'label', 'script', 'type']
class InvalidStateError(RuntimeError):
pass
class Base(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class Mortal(object):
_dead = False
def _kill(self):
self._dead = True
def __getattribute__(self, item):
# Make an exception for the attribute we need to check or we will go
# recursive!
if item == '_dead':
return super(Mortal, self).__getattribute__(item)
if self._dead:
raise InvalidStateError(
"'%s' no longer represents a valid pipeline element." % self)
else:
return super(Mortal, self).__getattribute__(item)
class OperatorMeta(type):
def __init__(cls, name, bases, dict):
attrs = {}
for k, v in dict.items():
if k not in op_class_attrs:
attrs[k] = property(lambda self: getattr(self, k),
lambda self, value: setattr(self, k, value))
else:
attrs[k] = v
super(OperatorMeta, cls).__init__(name, bases, attrs)
class ModuleMeta(type):
def __new__(meta, name, parents, dct):
attrs = {}
attrs = {
'_props': dct
}
return super(ModuleMeta, meta).__new__(meta, name, parents, attrs)
class Pipeline(Mortal):
def __init__(self, datasource):
self._datasource = datasource
@property
def dataSource(self):
return self._datasource
def _ds_path(self):
from . import _pipeline_index
pipeline_index = _pipeline_index(self._datasource)
if pipeline_index < 0:
raise Exception('Pipeline is not valid.')
return '/dataSources/%d' % pipeline_index
def pause(self):
PipelineStateManager().pause_pipeline(self._ds_path())
def resume(self):
PipelineStateManager().resume_pipeline(self._ds_path())
def execute(self):
paused = self.paused()
if paused:
self.resume()
PipelineStateManager().execute_pipeline(self._ds_path())
if paused:
self.pause()
def paused(self):
return PipelineStateManager().pipeline_paused(self._ds_path())
class Reader(Base, Mortal):
pass
class DataSource(Base, Mortal):
def __init__(self, **kwargs):
self.reader = Reader(
name=kwargs.pop('name', None),
fileNames=kwargs.pop('fileNames', []),
subsampleSettings=kwargs.pop('subsampleSettings', {}))
self.operators = []
self.modules = [modules.Outline(), modules.Slice()]
super(DataSource, self).__init__(**kwargs)
class Operator(Base, Mortal):
pass
class Module(Mortal):
def __init__(self, **kwargs):
args = self._props
args.update(kwargs)
for k, v in args.items():
if isinstance(v, dict):
v = to_namespaces(v)
setattr(self, k, v)
def _updates(self):
from ._schemata import dump_module
current_state = dump_module(self)
patch = jsonpatch.JsonPatch.from_diff(self._props, current_state)
patch = json.loads(patch.to_string())
return patch
class Tomviz(object):
def __init__(self, pipelines=None):
self.pipelines = pipelines if pipelines else []
def module_json_to_classes(module_json):
for name, info in module_json.items():
info['type'] = name
del info['id']
# Default visibility to true
info['properties']['visibility'] = True
if not hasattr(modules, name):
cls = ModuleMeta(name, (Module,), info)
setattr(modules, name, cls)
def operator_json_to_classes(operator_json):
for name, info in operator_json.items():
del info['id']
if not hasattr(operators, name):
cls = OperatorMeta(name, (Operator,), info)
setattr(operators, name, cls)
def init_modules():
module_json_str = PipelineStateManager().module_json()
module_json = json.loads(module_json_str)
module_json_to_classes(module_json)
def init_operators():
operator_json_str = PipelineStateManager().operator_json()
operator_json = json.loads(operator_json_str)
operator_json_to_classes(operator_json) | 0.582135 | 0.127843 |
from . import logging
from . import exceptions
__all__ = ['ScopedContextOverride', 'ScopedActionGroup', 'ScopedProgressManager']
class ScopedContextOverride(object):
"""
A convenience context manager to allow locale, access and retention to be
overridden within a scope.
"""
def __init__(self, context):
super(ScopedContextOverride, self).__init__()
self.context = context
def __enter__(self, *args, **kwargs):
if self.context:
self.oldLocale = self.context.locale
self.oldAccess = self.context.access
self.oldRetention = self.context.retention
return self
def __exit__(self, *args, **kwargs):
if self.context:
self.context.locale = self.oldLocale
self.context.access = self.oldAccess
self.context.retention = self.oldRetention
class ScopedActionGroup(object):
"""
A convenience class to push/pop an action group based on the lifetime of the
object, useful when combined with a 'with' statement.
"""
def __init__(self, session, context, cancelOnException=True):
super(ScopedActionGroup, self).__init__()
self.__session = session
self.__context = context
self.__cancelOnException = cancelOnException
def __enter__(self):
self.__session.pushActionGroup(self.__context)
def __exit__(self, exceptionType, exceptionValue, traceback):
if exceptionType is not None and self.__cancelOnException:
self.__session.cancelActions(self.__context)
else:
self.__session.popActionGroup(self.__context)
class ScopedProgressManager(object):
"""
Helps manage progress steps in iterated code. Allows simple with statements
to be used to signal, update and cancel progress. It will automatically end
progress if an exception is raised.
@todo Check the exception -> cancel behaviour as it isn't always happening
@todo The actual submitted progress values are off at present I think.
@code
items = ...
with ScopedProgressManager(len(items)) as progress:
for i in items:
with progress.step("Doing something to %s" % i):
i.doSomething()
@endcode
"""
class ProgressItem(object):
def __init__(self, manager, msg):
super(ScopedProgressManager.ProgressItem, self).__init__()
self.manager = manager
self.msg = msg
def __enter__(self, *args, **kwargs):
self.manager.startStep(self.msg)
def __exit__(self, *args, **kwargs):
self.manager.finishStep()
def __init__(self, itemCount):
super(ScopedProgressManager, self).__init__()
self.numItems = float(itemCount)
self.currentItem = 0
self.lastMsg = ''
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, *args, **kwargs):
logging.progress(-1)
def step(self, msg=None):
return ScopedProgressManager.ProgressItem(self, msg)
def startStep(self, msg):
self.lastMsg = msg
cancelled = logging.progress(self.currentItem/self.numItems, msg)
if cancelled:
raise exceptions.UserCanceled
return cancelled
def finishStep(self):
self.currentItem += 1
cancelled = logging.progress(self.currentItem/self.numItems,
self.lastMsg)
if cancelled:
raise exceptions.UserCanceled
return cancelled | source/FnAssetAPI/contextManagers.py | from . import logging
from . import exceptions
__all__ = ['ScopedContextOverride', 'ScopedActionGroup', 'ScopedProgressManager']
class ScopedContextOverride(object):
"""
A convenience context manager to allow locale, access and retention to be
overridden within a scope.
"""
def __init__(self, context):
super(ScopedContextOverride, self).__init__()
self.context = context
def __enter__(self, *args, **kwargs):
if self.context:
self.oldLocale = self.context.locale
self.oldAccess = self.context.access
self.oldRetention = self.context.retention
return self
def __exit__(self, *args, **kwargs):
if self.context:
self.context.locale = self.oldLocale
self.context.access = self.oldAccess
self.context.retention = self.oldRetention
class ScopedActionGroup(object):
"""
A convenience class to push/pop an action group based on the lifetime of the
object, useful when combined with a 'with' statement.
"""
def __init__(self, session, context, cancelOnException=True):
super(ScopedActionGroup, self).__init__()
self.__session = session
self.__context = context
self.__cancelOnException = cancelOnException
def __enter__(self):
self.__session.pushActionGroup(self.__context)
def __exit__(self, exceptionType, exceptionValue, traceback):
if exceptionType is not None and self.__cancelOnException:
self.__session.cancelActions(self.__context)
else:
self.__session.popActionGroup(self.__context)
class ScopedProgressManager(object):
"""
Helps manage progress steps in iterated code. Allows simple with statements
to be used to signal, update and cancel progress. It will automatically end
progress if an exception is raised.
@todo Check the exception -> cancel behaviour as it isn't always happening
@todo The actual submitted progress values are off at present I think.
@code
items = ...
with ScopedProgressManager(len(items)) as progress:
for i in items:
with progress.step("Doing something to %s" % i):
i.doSomething()
@endcode
"""
class ProgressItem(object):
def __init__(self, manager, msg):
super(ScopedProgressManager.ProgressItem, self).__init__()
self.manager = manager
self.msg = msg
def __enter__(self, *args, **kwargs):
self.manager.startStep(self.msg)
def __exit__(self, *args, **kwargs):
self.manager.finishStep()
def __init__(self, itemCount):
super(ScopedProgressManager, self).__init__()
self.numItems = float(itemCount)
self.currentItem = 0
self.lastMsg = ''
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, *args, **kwargs):
logging.progress(-1)
def step(self, msg=None):
return ScopedProgressManager.ProgressItem(self, msg)
def startStep(self, msg):
self.lastMsg = msg
cancelled = logging.progress(self.currentItem/self.numItems, msg)
if cancelled:
raise exceptions.UserCanceled
return cancelled
def finishStep(self):
self.currentItem += 1
cancelled = logging.progress(self.currentItem/self.numItems,
self.lastMsg)
if cancelled:
raise exceptions.UserCanceled
return cancelled | 0.482673 | 0.13589 |
import multiprocessing
from datetime import datetime
from methods.fuser import Fuser
from methods.fuser import init_indices
from methods.fuser import init_matched_group
from methods.dna import PatternDNA
from utils import accessor
read_path = "../data_set/Ecoli_K-1_MG1655.protein.fa"
write_path = "../output/first.csv"
def calculate_overlap(pool, row, col, lock, current, total, start):
fuser = Fuser(init_indices(pool[row], pool[col]))
overlap_length = fuser.calculate_overlap(init_matched_group(pool[row], pool[col]))
lock.acquire()
current.value += 1
lock.release()
print("\rdetect: " + str(current.value) + ", (" + str(total.value) +
") || (" + str(row) + ", " + str(col) + ") = " + str(overlap_length) +
" || left time = " + str(datetime.now() - start), end=" ")
del fuser
return [row, col, overlap_length]
if __name__ == '__main__':
start_time = datetime.now()
source_pool = []
indices, data_domains = accessor.read_information_by_fa(read_path)
index = 0
for index in range(len(data_domains)):
source_pool.append(PatternDNA([index], protein=map(ord, data_domains[index])))
index += 1
results = []
overlap_matrix = [[0 for col in range(len(source_pool))] for row in range(len(source_pool) - 1)]
share_lock = multiprocessing.Manager().Lock()
manager = multiprocessing.Manager()
current_count = multiprocessing.Manager().Value("i", 0)
total_count = multiprocessing.Manager().Value("i", int((len(source_pool) * (len(source_pool) - 1)) / 2))
process_pool = multiprocessing.Pool(processes=multiprocessing.cpu_count() - 1)
for row in range(len(source_pool) - 1):
for col in range(row + 1, len(source_pool)):
result = process_pool.apply_async(calculate_overlap, args=(source_pool, row, col, share_lock,
current_count, total_count, start_time))
results.append(result)
process_pool.close()
process_pool.join()
for result in results:
overlap_matrix[result.get()[0]][result.get()[1]] = result.get()[2]
print()
end_time = datetime.now()
print("The function run time is : %.03f seconds" % (end_time - start_time).seconds)
# accessor.format_output("overlap matrix", overlap_matrix)
with open(write_path, "w", encoding="utf-8") as save_file:
for row in range(len(overlap_matrix)):
row_data = str(overlap_matrix[row])[1: -1]
save_file.write(row_data + "\n") | tentative_test/first_matrix.py | import multiprocessing
from datetime import datetime
from methods.fuser import Fuser
from methods.fuser import init_indices
from methods.fuser import init_matched_group
from methods.dna import PatternDNA
from utils import accessor
read_path = "../data_set/Ecoli_K-1_MG1655.protein.fa"
write_path = "../output/first.csv"
def calculate_overlap(pool, row, col, lock, current, total, start):
fuser = Fuser(init_indices(pool[row], pool[col]))
overlap_length = fuser.calculate_overlap(init_matched_group(pool[row], pool[col]))
lock.acquire()
current.value += 1
lock.release()
print("\rdetect: " + str(current.value) + ", (" + str(total.value) +
") || (" + str(row) + ", " + str(col) + ") = " + str(overlap_length) +
" || left time = " + str(datetime.now() - start), end=" ")
del fuser
return [row, col, overlap_length]
if __name__ == '__main__':
start_time = datetime.now()
source_pool = []
indices, data_domains = accessor.read_information_by_fa(read_path)
index = 0
for index in range(len(data_domains)):
source_pool.append(PatternDNA([index], protein=map(ord, data_domains[index])))
index += 1
results = []
overlap_matrix = [[0 for col in range(len(source_pool))] for row in range(len(source_pool) - 1)]
share_lock = multiprocessing.Manager().Lock()
manager = multiprocessing.Manager()
current_count = multiprocessing.Manager().Value("i", 0)
total_count = multiprocessing.Manager().Value("i", int((len(source_pool) * (len(source_pool) - 1)) / 2))
process_pool = multiprocessing.Pool(processes=multiprocessing.cpu_count() - 1)
for row in range(len(source_pool) - 1):
for col in range(row + 1, len(source_pool)):
result = process_pool.apply_async(calculate_overlap, args=(source_pool, row, col, share_lock,
current_count, total_count, start_time))
results.append(result)
process_pool.close()
process_pool.join()
for result in results:
overlap_matrix[result.get()[0]][result.get()[1]] = result.get()[2]
print()
end_time = datetime.now()
print("The function run time is : %.03f seconds" % (end_time - start_time).seconds)
# accessor.format_output("overlap matrix", overlap_matrix)
with open(write_path, "w", encoding="utf-8") as save_file:
for row in range(len(overlap_matrix)):
row_data = str(overlap_matrix[row])[1: -1]
save_file.write(row_data + "\n") | 0.214691 | 0.151278 |
import sys
import random
import csv
import numpy as np
from sklearn.cross_validation import KFold
# custom modules
import network
from utils import data
FOLDS = 10
def main():
filename = sys.argv[1]
X = data.load_dataset('{}_X.npy'.format(filename))
Y = data.load_dataset('{}_Y.npy'.format(filename))
model = network.build_model()
# vizualize the model
network.vizualize_model(model, filename)
# 80:20
# print network.train_model(model, (X, Y))
# score = model.evaluate(X, Y, verbose=0)
# print 'Test score:', score[0]
# K-Fold
val_error = []
losses = []
kf = KFold(Y.shape[0], n_folds=FOLDS, shuffle=True, random_state=None)
for train_index, val_index in kf:
# Generate the dataset for this fold
X_train, X_val = X[train_index], X[val_index]
Y_train, Y_val = Y[train_index], Y[val_index]
print X_train.shape, X_val.shape
print Y_train.shape, Y_val.shape
# Train the model on this dataset
train_history, loss_history = network.train_model(model, (X_train, Y_train), (X_val, Y_val))
# TODO: save the losses to a file.
losses.append(loss_history.losses)
# Evaluate the model
val_error = model.evaluate(X_val, Y_val, verbose=0)
print 'Validation error:', val_error
# NOTE: hack to run only one split
break
# Print final K-Fold error
print "K-Fold Error: %0.2f (+/- %0.2f)" % (val_error.mean(), val_error.std() * 2)
# Predict some labels
# TODO: modify this to suit our image needs.
counter = 0
while counter < 1:
idx = random.choice(xrange(Y.shape[0]))
prediction = network.predict_model(model, np.expand_dims(X[idx,:], axis=0))
print 'Testing: sample={}, prediction={}, actual={}'.format(
idx, prediction, Y[idx,:])
# save this file
data.generate_image(prediction)
counter += 1
# dump the model to the file
network.save_model(model, filename)
if __name__ == '__main__':
main() | run.py | import sys
import random
import csv
import numpy as np
from sklearn.cross_validation import KFold
# custom modules
import network
from utils import data
FOLDS = 10
def main():
filename = sys.argv[1]
X = data.load_dataset('{}_X.npy'.format(filename))
Y = data.load_dataset('{}_Y.npy'.format(filename))
model = network.build_model()
# vizualize the model
network.vizualize_model(model, filename)
# 80:20
# print network.train_model(model, (X, Y))
# score = model.evaluate(X, Y, verbose=0)
# print 'Test score:', score[0]
# K-Fold
val_error = []
losses = []
kf = KFold(Y.shape[0], n_folds=FOLDS, shuffle=True, random_state=None)
for train_index, val_index in kf:
# Generate the dataset for this fold
X_train, X_val = X[train_index], X[val_index]
Y_train, Y_val = Y[train_index], Y[val_index]
print X_train.shape, X_val.shape
print Y_train.shape, Y_val.shape
# Train the model on this dataset
train_history, loss_history = network.train_model(model, (X_train, Y_train), (X_val, Y_val))
# TODO: save the losses to a file.
losses.append(loss_history.losses)
# Evaluate the model
val_error = model.evaluate(X_val, Y_val, verbose=0)
print 'Validation error:', val_error
# NOTE: hack to run only one split
break
# Print final K-Fold error
print "K-Fold Error: %0.2f (+/- %0.2f)" % (val_error.mean(), val_error.std() * 2)
# Predict some labels
# TODO: modify this to suit our image needs.
counter = 0
while counter < 1:
idx = random.choice(xrange(Y.shape[0]))
prediction = network.predict_model(model, np.expand_dims(X[idx,:], axis=0))
print 'Testing: sample={}, prediction={}, actual={}'.format(
idx, prediction, Y[idx,:])
# save this file
data.generate_image(prediction)
counter += 1
# dump the model to the file
network.save_model(model, filename)
if __name__ == '__main__':
main() | 0.192615 | 0.296425 |
import glob
import sys
import time
from typing import List, Optional
import serial
def search_for_serial_devices(device: str) -> List[str]:
"""Returns a list of device paths with corresponding device name.
If the device identification string contains the string given in the input
paramter 'device', the device path is appended to the return list.
Used as a backup method for device lookup in the event user does not
know exact device path - list of device paths allows it to be used as part
of a dropdown selection.
Args:
device: Name of target device.
Returns:
List of device paths for which 'device' partially matches the returned
identifier from a device identification request.
Raises:
EnvironmentError: Unsupported OS.
"""
if sys.platform.startswith("win"):
ports = [f"COM{i}" for i in range(1, 257)]
elif sys.platform.startswith("linux") or sys.platform.startswith("cygwin"):
# this excludes your current terminal "/dev/tty"
ports = glob.glob("/dev/tty[A-Za-z]*")
elif sys.platform.startswith("darwin"):
ports = glob.glob("/dev/tty.*")
else:
raise EnvironmentError("Unsupported platform")
result = []
for port in ports:
try:
s = SerialConnection(port)
try:
id_str = s.getresponse("*IDN?")
finally:
s.close() # guarantee port is closed
if device in id_str:
result.append(port)
except serial.SerialException:
pass
return result
class SerialConnection(serial.Serial):
"""
The USB device is seen as an object through this class,
inherited from the generic serial one.
Note:
Inheritance used because SerialConnection extends Serial with custom
functionality, while avoiding manually exposing methods.
"""
BUFFER_WAITTIME: float = 0.01 # duration to allow buffer to populate, in seconds
def __init__(self, device_path: str, timeout: float = 0.1):
"""Initializes the connection to the USB device.
Args:
device_path: The full path to the serial_device.
timeout: Device timeout.
Raises:
serial.SerialException:
Port does not exist, no access permissions or attempted
read/write on unopened port.
"""
super().__init__(device_path, timeout=timeout)
self.cleanup()
def cleanup(self):
"""Cleans up the device to prepare for IO.
Resets the input and output buffers if data is present, and repeatedly
checks until buffers are empty or read timeout of device is reached
(0.1 seconds if not specified).
Raises:
serial.SerialException: Attempted to access a closed port.
"""
timeout = 0.1 if self.timeout is None else self.timeout
end_time = time.time() + timeout
while True:
time.sleep(SerialConnection.BUFFER_WAITTIME)
if not (self.in_waiting or self.out_waiting):
break
self.reset_input_buffer()
self.reset_output_buffer()
if time.time() > end_time:
break
@classmethod
def connect_by_name(cls, device: str):
"""Searches for and returns a connection to the specified device.
Args:
device: Name of target device.
Returns:
SerialConnection with port opened.
Raises:
serial.SerialException: Number of matching ports not exactly one.
"""
ports: List = search_for_serial_devices(device)
if not ports:
raise serial.SerialException(f"No available '{device}' devices connected.")
if len(ports) > 1:
raise serial.SerialException(
f"More than one '{device}' device available. "
+ "Please specify the full device path."
)
return SerialConnection(ports[0])
def getresponses(self, cmd: str, timeout: Optional[float] = None) -> List[str]:
"""Sends command and reads the device response.
Commands do not need to be terminated by a newline, unless commands
are chained together.
Timeout can be defined independently from the general timeout device.
This is useful for measurements with integration time longer than
communication timeout. The timeout for the response uses the following
values in order of precedence:
1. timeout, if specified
2. SerialConnection.timeout, if not None
3. 0.1 seconds
Args:
cmd: Command to send. No newline is necessary.
timeout: Optional timeout override in seconds. Defaults to None.
Returns:
Multi-line reply of the device, stripped of leading/trailing whitespace.
Raises:
serial.SerialException: Attempted to access a closed port.
Note:
This behaviour seems to identical to a combination of `cleanup()`,
`writeline(cmd)` and `readlines()`, with the exception of the
additional read timeout override. To consider refactoring to
`readlines()` + read timeout adjustment instead.
"""
self.cleanup()
self.writeline(cmd)
# Wait until characters are available, or until timeout reached
if timeout is None:
timeout = 0.1 if self.timeout is None else self.timeout
end_time = time.time() + timeout
while not self.in_waiting:
if time.time() > end_time:
break
# Used instead of Serial.readlines() to allow consecutive blank lines as well
# Flush all the incoming buffer repeatedly
replies = bytearray()
while True:
if not self.in_waiting:
break
replies.extend(self.read(self.in_waiting))
if time.time() > end_time:
break
time.sleep(SerialConnection.BUFFER_WAITTIME)
return [line.strip("\r\n") for line in replies.decode().split("\n")]
def getresponse(self, cmd: str, timeout: Optional[float] = None) -> str:
"""Sends command and reads a single-line device response.
Commands do not need to be terminated by a newline, unless commands
are chained together.
Timeout can be defined independently from the general timeout device.
This is useful for measurements with integration time longer than
communication timeout. The timeout for the response uses the following
values in order of precedence:
1. timeout, if specified
2. SerialConnection.timeout, if not None
3. 0.1 seconds
Args:
cmd: Command to send. No newline is necessary.
timeout: Optional timeout override in seconds. Defaults to None.
Returns:
Single line reply of the device, stripped of leading/trailing whitespace.
Raises:
serial.SerialException: Attempted to access a closed port.
"""
self.cleanup()
self.writeline(cmd)
# Wait until characters are available, or until timeout reached
if timeout is None:
timeout = 0.1 if self.timeout is None else self.timeout
end_time = time.time() + timeout
while not self.in_waiting:
if time.time() > end_time:
break
# Flush all the incoming buffer repeatedly
reply = bytearray()
while True:
reply.extend(self.read_until(b"\n", self.in_waiting))
if reply and reply[-1] == 10: # b'\n' === int(10)
break
if time.time() > end_time:
break
time.sleep(SerialConnection.BUFFER_WAITTIME)
return reply.decode().strip("\r\n")
def writeline(self, cmd: str) -> None:
"""Sends command to device.
Commands do not need to be terminated by a newline, unless commands
are chained together.
Args:
cmd: Command to send. No newline is necessary.
Raises:
serial.SerialException: Attempted to access a closed port.
"""
self.write("{};".format(cmd).encode())
def get_help(self) -> str:
"""Returns the help information stored on device.
Raises:
serial.SerialException: Attempted to access a closed port.
"""
return "\n".join(self.getresponses("HELP"))
def get_identity(self) -> str:
"""Returns identity of device.
Raises:
serial.SerialException: Attempted to access a closed port.
"""
return self.getresponse("*IDN?") | S15lib/instruments/serial_connection.py | import glob
import sys
import time
from typing import List, Optional
import serial
def search_for_serial_devices(device: str) -> List[str]:
"""Returns a list of device paths with corresponding device name.
If the device identification string contains the string given in the input
paramter 'device', the device path is appended to the return list.
Used as a backup method for device lookup in the event user does not
know exact device path - list of device paths allows it to be used as part
of a dropdown selection.
Args:
device: Name of target device.
Returns:
List of device paths for which 'device' partially matches the returned
identifier from a device identification request.
Raises:
EnvironmentError: Unsupported OS.
"""
if sys.platform.startswith("win"):
ports = [f"COM{i}" for i in range(1, 257)]
elif sys.platform.startswith("linux") or sys.platform.startswith("cygwin"):
# this excludes your current terminal "/dev/tty"
ports = glob.glob("/dev/tty[A-Za-z]*")
elif sys.platform.startswith("darwin"):
ports = glob.glob("/dev/tty.*")
else:
raise EnvironmentError("Unsupported platform")
result = []
for port in ports:
try:
s = SerialConnection(port)
try:
id_str = s.getresponse("*IDN?")
finally:
s.close() # guarantee port is closed
if device in id_str:
result.append(port)
except serial.SerialException:
pass
return result
class SerialConnection(serial.Serial):
"""
The USB device is seen as an object through this class,
inherited from the generic serial one.
Note:
Inheritance used because SerialConnection extends Serial with custom
functionality, while avoiding manually exposing methods.
"""
BUFFER_WAITTIME: float = 0.01 # duration to allow buffer to populate, in seconds
def __init__(self, device_path: str, timeout: float = 0.1):
"""Initializes the connection to the USB device.
Args:
device_path: The full path to the serial_device.
timeout: Device timeout.
Raises:
serial.SerialException:
Port does not exist, no access permissions or attempted
read/write on unopened port.
"""
super().__init__(device_path, timeout=timeout)
self.cleanup()
def cleanup(self):
"""Cleans up the device to prepare for IO.
Resets the input and output buffers if data is present, and repeatedly
checks until buffers are empty or read timeout of device is reached
(0.1 seconds if not specified).
Raises:
serial.SerialException: Attempted to access a closed port.
"""
timeout = 0.1 if self.timeout is None else self.timeout
end_time = time.time() + timeout
while True:
time.sleep(SerialConnection.BUFFER_WAITTIME)
if not (self.in_waiting or self.out_waiting):
break
self.reset_input_buffer()
self.reset_output_buffer()
if time.time() > end_time:
break
@classmethod
def connect_by_name(cls, device: str):
"""Searches for and returns a connection to the specified device.
Args:
device: Name of target device.
Returns:
SerialConnection with port opened.
Raises:
serial.SerialException: Number of matching ports not exactly one.
"""
ports: List = search_for_serial_devices(device)
if not ports:
raise serial.SerialException(f"No available '{device}' devices connected.")
if len(ports) > 1:
raise serial.SerialException(
f"More than one '{device}' device available. "
+ "Please specify the full device path."
)
return SerialConnection(ports[0])
def getresponses(self, cmd: str, timeout: Optional[float] = None) -> List[str]:
"""Sends command and reads the device response.
Commands do not need to be terminated by a newline, unless commands
are chained together.
Timeout can be defined independently from the general timeout device.
This is useful for measurements with integration time longer than
communication timeout. The timeout for the response uses the following
values in order of precedence:
1. timeout, if specified
2. SerialConnection.timeout, if not None
3. 0.1 seconds
Args:
cmd: Command to send. No newline is necessary.
timeout: Optional timeout override in seconds. Defaults to None.
Returns:
Multi-line reply of the device, stripped of leading/trailing whitespace.
Raises:
serial.SerialException: Attempted to access a closed port.
Note:
This behaviour seems to identical to a combination of `cleanup()`,
`writeline(cmd)` and `readlines()`, with the exception of the
additional read timeout override. To consider refactoring to
`readlines()` + read timeout adjustment instead.
"""
self.cleanup()
self.writeline(cmd)
# Wait until characters are available, or until timeout reached
if timeout is None:
timeout = 0.1 if self.timeout is None else self.timeout
end_time = time.time() + timeout
while not self.in_waiting:
if time.time() > end_time:
break
# Used instead of Serial.readlines() to allow consecutive blank lines as well
# Flush all the incoming buffer repeatedly
replies = bytearray()
while True:
if not self.in_waiting:
break
replies.extend(self.read(self.in_waiting))
if time.time() > end_time:
break
time.sleep(SerialConnection.BUFFER_WAITTIME)
return [line.strip("\r\n") for line in replies.decode().split("\n")]
def getresponse(self, cmd: str, timeout: Optional[float] = None) -> str:
"""Sends command and reads a single-line device response.
Commands do not need to be terminated by a newline, unless commands
are chained together.
Timeout can be defined independently from the general timeout device.
This is useful for measurements with integration time longer than
communication timeout. The timeout for the response uses the following
values in order of precedence:
1. timeout, if specified
2. SerialConnection.timeout, if not None
3. 0.1 seconds
Args:
cmd: Command to send. No newline is necessary.
timeout: Optional timeout override in seconds. Defaults to None.
Returns:
Single line reply of the device, stripped of leading/trailing whitespace.
Raises:
serial.SerialException: Attempted to access a closed port.
"""
self.cleanup()
self.writeline(cmd)
# Wait until characters are available, or until timeout reached
if timeout is None:
timeout = 0.1 if self.timeout is None else self.timeout
end_time = time.time() + timeout
while not self.in_waiting:
if time.time() > end_time:
break
# Flush all the incoming buffer repeatedly
reply = bytearray()
while True:
reply.extend(self.read_until(b"\n", self.in_waiting))
if reply and reply[-1] == 10: # b'\n' === int(10)
break
if time.time() > end_time:
break
time.sleep(SerialConnection.BUFFER_WAITTIME)
return reply.decode().strip("\r\n")
def writeline(self, cmd: str) -> None:
"""Sends command to device.
Commands do not need to be terminated by a newline, unless commands
are chained together.
Args:
cmd: Command to send. No newline is necessary.
Raises:
serial.SerialException: Attempted to access a closed port.
"""
self.write("{};".format(cmd).encode())
def get_help(self) -> str:
"""Returns the help information stored on device.
Raises:
serial.SerialException: Attempted to access a closed port.
"""
return "\n".join(self.getresponses("HELP"))
def get_identity(self) -> str:
"""Returns identity of device.
Raises:
serial.SerialException: Attempted to access a closed port.
"""
return self.getresponse("*IDN?") | 0.755141 | 0.266738 |
from string import Template
URXVT_TEMPLATE = Template(r"""
{
"app-id": "de.uchuujin.fp.termzoo.urxvt${urxvt_version}",
"runtime": "org.freedesktop.Platform",
"runtime-version": "18.08",
"sdk": "org.freedesktop.Sdk",
"command": "urxvt",
"finish-args": ["--socket=x11", "--device=dri", "--talk-name=org.freedesktop.Flatpak"],
"modules": [
{
"name": "xmu",
"buildsystem": "autotools",
"sources": [
{
"type": "archive",
"url": "https://www.x.org/releases/individual/lib/libXmu-1.1.2.tar.bz2",
"sha512": "eba4e3d10f7d75ba8464881fb69f295a89774a4b37793197d75f3312e3a342b2df8b7e13e3f5c887962704329b5347ff2f3395e229af9dadf46a93b1e8613cfc"
}
]
},
{
"name": "rxvt-unicode",
"buildsystem": "simple",
"build-commands": [
"./configure LDFLAGS='-Wl,--copy-dt-needed-entries -L/app/lib' CPPFLAGS='-fpermissive -I/app/include' --with-res-name=urxvtTZ --with-res-class=URxvtTZ --prefix=/app --enable-256-color --enable-combining --enable-fading --enable-font-styles --enable-iso14755 --enable-keepscrolling --enable-lastlog --enable-mousewheel --enable-next-scroll --enable-pixbuf --enable-pointer-blank --enable-rxvt-scroll --enable-selectionscrolling --enable-slipwheeling --enable-smart-resize --enable-transparency --enable-unicode3 --enable-warnings --enable-xft --enable-xim --enable-xterm-scroll --with-term=rxvt-unicode-256color --disable-perl",
"make",
"make install"
],
"sources": [
{
"type": "archive",
"url": "http://dist.schmorp.de/rxvt-unicode/Attic/${urxvt_file}",
"sha512": "${urxvt_sha}"
}${urxvt_patch}
]
},
{
"name": "scripts",
"buildsystem": "simple",
"build-commands": [
"install -D run-in-host /app/bin/run-in-host"
],
"sources": [
{
"type": "file",
"path": "../run-in-host"
}
]
}
]
}
""")
CPP11_TYPE_PATCH = """,
{
"type": "patch",
"path": "cpp11-typo-fix.patch"
}
"""
urxvt_versions = (
('9_0', 'rxvt-unicode-9.0.tar.bz2', '', 'bf556b1572056a2e531ba0d5789f7ecc1de8ea7657b13ec5b699e894bf7dd8e27b5341635a1bbc8ee287b170b8af943c1cce257bfd4412e6ed44449d29a853ab'),
('9_01', 'rxvt-unicode-9.01.tar.bz2', '', 'f93da5d4ef15319c3b3f19acbc5d9e078978496ca56659d983eedfaa88bb93d4030ca9553096c270fcedae04b00fe6c1641af7b4c9859deca8eaf9764092de25'),
('9_02', 'rxvt-unicode-9.02.tar.bz2', '', '4dc3806fa4fb8ef928321ba44fb1a318b37ce3507fa63527e9de74a7494c4d4f9381070a88511efb114c58412881af7c41163f0c242a829691a4cedfebeab6ab'),
# 9.03 does not exist
# 9.04 does not exist
('9_05', 'rxvt-unicode-9.05.tar.bz2', '', '37169a0f28fec20c268fc92c5c588cc8457b048e1fa5e663475f3d4ef9be631f61967fe1a269f233af69e147cbf9b613105804638fec902c604811fcff07ab5e'),
('9_06', 'rxvt-unicode-9.06.tar.bz2', '', 'fa3754c92f2c06a467b1c1ff78f2ba5ea723239efb54dec0a004490d82e8d55e8734e3abedcd800f769d3c13f99046de87a5abd916d24818915092de902a943c'),
('9_07', 'rxvt-unicode-9.07.tar.bz2', '', '1acdaa6863aeb7f9a861860a898a036900897cd351ed66db006b3612f8750ece042c913b08a125100bd4daad6525cf56246741fc37a49df5bf38b4382f8bd152'),
# 9.08 does not exist
('9_09', 'rxvt-unicode-9.09.tar.bz2', '', '80827a77ff8710f85f32adf9749af8e30315406929f95a1892d645371f2520ad5b60c44dd5a10bd18baeed0850b5dcf532007124b8aae60d916f83a0581edc02'),
('9_10', 'rxvt-unicode-9.10.tar.bz2', '', 'e19cb074d5c279858d3bb68930a56d8f4d9c2eb77fa163a4c5609c491965135dee473230823a9823f59a74cbcc2ad2efc9836c26c96bea76778ae6cad281ec50'),
('9_11', 'rxvt-unicode-9.11.tar.bz2', '', '10738bb7f10c4bea077b8844268155a54febf88bbbd9a87fc29153faeaf4d991020f716fa1e2b6fcc5759a440c523a8f9afb36dc726196e10dded8421ffe65cd'),
('9_12', 'rxvt-unicode-9.12.tar.bz2', '', 'd2307b87f2cf2b63f42f575a72becf637e0455e1ff9aed5ecfba1d8f9991ea8f21654ee34afe16610deda83f39f02d339e59cba1a578cf92c5551735d0a683b0'),
# 9.13 does not exist
('9_14', 'rxvt-unicode-9.14.tar.bz2', '', '913a1ad8a518da798882caaf6edcd34681a8e36fe8a1b9c768a9ee05cd8ddefaf44b3c58a92f89e812f473458666f5221c7952067a67daaf9cf7812fdf42c74e'),
('9_15', 'rxvt-unicode-9.15.tar.bz2', '', '1095fb88502377fa669746bbe9a5597f0c748e2c01a468ce382e91236ed0e6f720f3ca7631f7105aa390afac09588b92cebd70589096b6a20f174c4297463b71'),
('9_16', 'rxvt-unicode-9.16.tar.bz2', CPP11_TYPE_PATCH, 'c22feec33176120944c58abb21c1e0508b5682bec4bde645e9c68735f9cf93e14e1a3b804374c6fd58f9032faa4cbf4fb2122a2307d0f2204e4b9b5a11427092'),
('9_17', 'rxvt-unicode-9.17.tar.bz2', CPP11_TYPE_PATCH, 'e7ba2614cf7f8027170a6adfddedd3fc7a63f219a76f92901b49b5a26295d5191200ac7c1dad1f1e7c90225c8fa2dced3022317e8e876425a57d067c3c0d84ee'),
('9_18', 'rxvt-unicode-9.18.tar.bz2', CPP11_TYPE_PATCH, '8d1abf38c6ad47129fafc22c3996a7e2cd0f0cf4982d441ee30076d64d191637942307efd12cc05dfef6b65136530973be9da89e6769c5967d4e523f33309237'),
('9_19', 'rxvt-unicode-9.19.tar.bz2', CPP11_TYPE_PATCH, '357f2b9a299b816264e8cece3200338369399e4f760622daec1520d05c75e93d44e2dee3351c8e31765ab8f2218dbb9d239960ae8112e2f75d988785373d7f26'),
('9_20', 'rxvt-unicode-9.20.tar.bz2', CPP11_TYPE_PATCH, '39e1574f7b7034c07ab2e836bb77e0fb0536830df873cc54e6c7583be5e20d36dea3fe0fc889283f163530c77534a3a55de227ee0a8f03564d0f030e60461ff9'),
('9_21', 'rxvt-unicode-9.21.tar.bz2', '', 'd50adf6b1e6ae3b13492b4f40455d3a56bb174a7c6db4d4525a1277736994adfb74a2cd1e7d3e8a8cfdc4509a9ae32c05a627829e295dc1bd4a5ba7cc2f80776'),
('9_22', 'rxvt-unicode-9.22.tar.bz2', '', 'b39f1b2cbe6dd3fbd2a0ad6a9d391a2b6f49d7c5e67bc65fe44a9c86937f8db379572c67564c6e21ff6e09b447cdfd4e540544e486179e94da0e0db679c04dd9'),
)
for urxvt_version, urxvt_file, urxvt_patch, urxvt_sha in urxvt_versions:
with open('de.uchuujin.fp.termzoo.urxvt{}.json'.format(urxvt_version), "w") as f:
f.write(URXVT_TEMPLATE.substitute(urxvt_version=urxvt_version,
urxvt_file=urxvt_file, urxvt_patch=urxvt_patch,
urxvt_sha=urxvt_sha)) | rxvt-unicode/generate.py |
from string import Template
URXVT_TEMPLATE = Template(r"""
{
"app-id": "de.uchuujin.fp.termzoo.urxvt${urxvt_version}",
"runtime": "org.freedesktop.Platform",
"runtime-version": "18.08",
"sdk": "org.freedesktop.Sdk",
"command": "urxvt",
"finish-args": ["--socket=x11", "--device=dri", "--talk-name=org.freedesktop.Flatpak"],
"modules": [
{
"name": "xmu",
"buildsystem": "autotools",
"sources": [
{
"type": "archive",
"url": "https://www.x.org/releases/individual/lib/libXmu-1.1.2.tar.bz2",
"sha512": "eba4e3d10f7d75ba8464881fb69f295a89774a4b37793197d75f3312e3a342b2df8b7e13e3f5c887962704329b5347ff2f3395e229af9dadf46a93b1e8613cfc"
}
]
},
{
"name": "rxvt-unicode",
"buildsystem": "simple",
"build-commands": [
"./configure LDFLAGS='-Wl,--copy-dt-needed-entries -L/app/lib' CPPFLAGS='-fpermissive -I/app/include' --with-res-name=urxvtTZ --with-res-class=URxvtTZ --prefix=/app --enable-256-color --enable-combining --enable-fading --enable-font-styles --enable-iso14755 --enable-keepscrolling --enable-lastlog --enable-mousewheel --enable-next-scroll --enable-pixbuf --enable-pointer-blank --enable-rxvt-scroll --enable-selectionscrolling --enable-slipwheeling --enable-smart-resize --enable-transparency --enable-unicode3 --enable-warnings --enable-xft --enable-xim --enable-xterm-scroll --with-term=rxvt-unicode-256color --disable-perl",
"make",
"make install"
],
"sources": [
{
"type": "archive",
"url": "http://dist.schmorp.de/rxvt-unicode/Attic/${urxvt_file}",
"sha512": "${urxvt_sha}"
}${urxvt_patch}
]
},
{
"name": "scripts",
"buildsystem": "simple",
"build-commands": [
"install -D run-in-host /app/bin/run-in-host"
],
"sources": [
{
"type": "file",
"path": "../run-in-host"
}
]
}
]
}
""")
CPP11_TYPE_PATCH = """,
{
"type": "patch",
"path": "cpp11-typo-fix.patch"
}
"""
urxvt_versions = (
('9_0', 'rxvt-unicode-9.0.tar.bz2', '', 'bf556b1572056a2e531ba0d5789f7ecc1de8ea7657b13ec5b699e894bf7dd8e27b5341635a1bbc8ee287b170b8af943c1cce257bfd4412e6ed44449d29a853ab'),
('9_01', 'rxvt-unicode-9.01.tar.bz2', '', 'f93da5d4ef15319c3b3f19acbc5d9e078978496ca56659d983eedfaa88bb93d4030ca9553096c270fcedae04b00fe6c1641af7b4c9859deca8eaf9764092de25'),
('9_02', 'rxvt-unicode-9.02.tar.bz2', '', '4dc3806fa4fb8ef928321ba44fb1a318b37ce3507fa63527e9de74a7494c4d4f9381070a88511efb114c58412881af7c41163f0c242a829691a4cedfebeab6ab'),
# 9.03 does not exist
# 9.04 does not exist
('9_05', 'rxvt-unicode-9.05.tar.bz2', '', '37169a0f28fec20c268fc92c5c588cc8457b048e1fa5e663475f3d4ef9be631f61967fe1a269f233af69e147cbf9b613105804638fec902c604811fcff07ab5e'),
('9_06', 'rxvt-unicode-9.06.tar.bz2', '', 'fa3754c92f2c06a467b1c1ff78f2ba5ea723239efb54dec0a004490d82e8d55e8734e3abedcd800f769d3c13f99046de87a5abd916d24818915092de902a943c'),
('9_07', 'rxvt-unicode-9.07.tar.bz2', '', '1acdaa6863aeb7f9a861860a898a036900897cd351ed66db006b3612f8750ece042c913b08a125100bd4daad6525cf56246741fc37a49df5bf38b4382f8bd152'),
# 9.08 does not exist
('9_09', 'rxvt-unicode-9.09.tar.bz2', '', '80827a77ff8710f85f32adf9749af8e30315406929f95a1892d645371f2520ad5b60c44dd5a10bd18baeed0850b5dcf532007124b8aae60d916f83a0581edc02'),
('9_10', 'rxvt-unicode-9.10.tar.bz2', '', 'e19cb074d5c279858d3bb68930a56d8f4d9c2eb77fa163a4c5609c491965135dee473230823a9823f59a74cbcc2ad2efc9836c26c96bea76778ae6cad281ec50'),
('9_11', 'rxvt-unicode-9.11.tar.bz2', '', '10738bb7f10c4bea077b8844268155a54febf88bbbd9a87fc29153faeaf4d991020f716fa1e2b6fcc5759a440c523a8f9afb36dc726196e10dded8421ffe65cd'),
('9_12', 'rxvt-unicode-9.12.tar.bz2', '', 'd2307b87f2cf2b63f42f575a72becf637e0455e1ff9aed5ecfba1d8f9991ea8f21654ee34afe16610deda83f39f02d339e59cba1a578cf92c5551735d0a683b0'),
# 9.13 does not exist
('9_14', 'rxvt-unicode-9.14.tar.bz2', '', '913a1ad8a518da798882caaf6edcd34681a8e36fe8a1b9c768a9ee05cd8ddefaf44b3c58a92f89e812f473458666f5221c7952067a67daaf9cf7812fdf42c74e'),
('9_15', 'rxvt-unicode-9.15.tar.bz2', '', '1095fb88502377fa669746bbe9a5597f0c748e2c01a468ce382e91236ed0e6f720f3ca7631f7105aa390afac09588b92cebd70589096b6a20f174c4297463b71'),
('9_16', 'rxvt-unicode-9.16.tar.bz2', CPP11_TYPE_PATCH, 'c22feec33176120944c58abb21c1e0508b5682bec4bde645e9c68735f9cf93e14e1a3b804374c6fd58f9032faa4cbf4fb2122a2307d0f2204e4b9b5a11427092'),
('9_17', 'rxvt-unicode-9.17.tar.bz2', CPP11_TYPE_PATCH, 'e7ba2614cf7f8027170a6adfddedd3fc7a63f219a76f92901b49b5a26295d5191200ac7c1dad1f1e7c90225c8fa2dced3022317e8e876425a57d067c3c0d84ee'),
('9_18', 'rxvt-unicode-9.18.tar.bz2', CPP11_TYPE_PATCH, '8d1abf38c6ad47129fafc22c3996a7e2cd0f0cf4982d441ee30076d64d191637942307efd12cc05dfef6b65136530973be9da89e6769c5967d4e523f33309237'),
('9_19', 'rxvt-unicode-9.19.tar.bz2', CPP11_TYPE_PATCH, '357f2b9a299b816264e8cece3200338369399e4f760622daec1520d05c75e93d44e2dee3351c8e31765ab8f2218dbb9d239960ae8112e2f75d988785373d7f26'),
('9_20', 'rxvt-unicode-9.20.tar.bz2', CPP11_TYPE_PATCH, '39e1574f7b7034c07ab2e836bb77e0fb0536830df873cc54e6c7583be5e20d36dea3fe0fc889283f163530c77534a3a55de227ee0a8f03564d0f030e60461ff9'),
('9_21', 'rxvt-unicode-9.21.tar.bz2', '', 'd50adf6b1e6ae3b13492b4f40455d3a56bb174a7c6db4d4525a1277736994adfb74a2cd1e7d3e8a8cfdc4509a9ae32c05a627829e295dc1bd4a5ba7cc2f80776'),
('9_22', 'rxvt-unicode-9.22.tar.bz2', '', 'b39f1b2cbe6dd3fbd2a0ad6a9d391a2b6f49d7c5e67bc65fe44a9c86937f8db379572c67564c6e21ff6e09b447cdfd4e540544e486179e94da0e0db679c04dd9'),
)
for urxvt_version, urxvt_file, urxvt_patch, urxvt_sha in urxvt_versions:
with open('de.uchuujin.fp.termzoo.urxvt{}.json'.format(urxvt_version), "w") as f:
f.write(URXVT_TEMPLATE.substitute(urxvt_version=urxvt_version,
urxvt_file=urxvt_file, urxvt_patch=urxvt_patch,
urxvt_sha=urxvt_sha)) | 0.3512 | 0.152442 |
from __future__ import annotations
from enum import Enum
from flask import g
import pytest
from byceps.util.authorization import create_permission_enum
from byceps.util.authorization import (
has_current_user_any_permission,
has_current_user_permission,
)
ChillPermission = create_permission_enum(
'chill', ['browse_the_web', 'play_videogames', 'watch_movies']
)
class CurrentUserMock:
def __init__(self, permissions: set[Enum]) -> None:
self.permissions = permissions
@pytest.mark.parametrize(
'permissions_assigned, permission_requested, expected',
[
(
{},
ChillPermission.browse_the_web,
False,
),
(
{ChillPermission.watch_movies},
ChillPermission.play_videogames,
False,
),
(
{ChillPermission.watch_movies},
ChillPermission.watch_movies,
True,
),
(
{
ChillPermission.browse_the_web,
ChillPermission.play_videogames,
},
ChillPermission.watch_movies,
False,
),
(
{
ChillPermission.browse_the_web,
ChillPermission.play_videogames,
},
ChillPermission.play_videogames,
True,
),
],
)
def test_has_current_user_permission(
site_app, permissions_assigned, permission_requested, expected
):
g.user = CurrentUserMock(permissions_assigned)
assert has_current_user_permission(permission_requested) == expected
@pytest.mark.parametrize(
'permissions_assigned, permissions_requested, expected',
[
(
{},
{
ChillPermission.browse_the_web,
},
False,
),
(
{ChillPermission.watch_movies},
{
ChillPermission.browse_the_web,
ChillPermission.play_videogames,
},
False,
),
(
{ChillPermission.watch_movies},
{
ChillPermission.play_videogames,
ChillPermission.watch_movies,
},
True,
),
],
)
def test_has_current_user_any_permission(
site_app, permissions_assigned, permissions_requested, expected
):
g.user = CurrentUserMock(permissions_assigned)
assert has_current_user_any_permission(*permissions_requested) == expected | tests/integration/util/test_authorization.py | from __future__ import annotations
from enum import Enum
from flask import g
import pytest
from byceps.util.authorization import create_permission_enum
from byceps.util.authorization import (
has_current_user_any_permission,
has_current_user_permission,
)
ChillPermission = create_permission_enum(
'chill', ['browse_the_web', 'play_videogames', 'watch_movies']
)
class CurrentUserMock:
def __init__(self, permissions: set[Enum]) -> None:
self.permissions = permissions
@pytest.mark.parametrize(
'permissions_assigned, permission_requested, expected',
[
(
{},
ChillPermission.browse_the_web,
False,
),
(
{ChillPermission.watch_movies},
ChillPermission.play_videogames,
False,
),
(
{ChillPermission.watch_movies},
ChillPermission.watch_movies,
True,
),
(
{
ChillPermission.browse_the_web,
ChillPermission.play_videogames,
},
ChillPermission.watch_movies,
False,
),
(
{
ChillPermission.browse_the_web,
ChillPermission.play_videogames,
},
ChillPermission.play_videogames,
True,
),
],
)
def test_has_current_user_permission(
site_app, permissions_assigned, permission_requested, expected
):
g.user = CurrentUserMock(permissions_assigned)
assert has_current_user_permission(permission_requested) == expected
@pytest.mark.parametrize(
'permissions_assigned, permissions_requested, expected',
[
(
{},
{
ChillPermission.browse_the_web,
},
False,
),
(
{ChillPermission.watch_movies},
{
ChillPermission.browse_the_web,
ChillPermission.play_videogames,
},
False,
),
(
{ChillPermission.watch_movies},
{
ChillPermission.play_videogames,
ChillPermission.watch_movies,
},
True,
),
],
)
def test_has_current_user_any_permission(
site_app, permissions_assigned, permissions_requested, expected
):
g.user = CurrentUserMock(permissions_assigned)
assert has_current_user_any_permission(*permissions_requested) == expected | 0.725454 | 0.153137 |
from operator import add
import types
from .vendor.lexicon import Lexicon
from .parser import Context, Argument
from .tasks import Task
class Collection(object):
"""
A collection of executable tasks.
"""
def __init__(self, *args, **kwargs):
"""
Create a new task collection/namespace.
May call with no arguments and use e.g. `.add_task`/`.add_collection` to insert objects, e.g.::
c = Collection()
c.add_task(some_task)
If an initial string argument is given, it is used as the default name
for this collection, should it be inserted into another collection as a
sub-namespace::
docs = Collection('docs')
docs.add_task(doc_task)
ns = Collection()
ns.add_task(top_level_task)
ns.add_collection(docs)
# Valid identifiers are now 'top_level_task' and 'docs.doc_task'
# (assuming the task objects were actually named the same as the
# variables we're using :))
Otherwise, all ``*args`` are expected to be `.Task` or `.Collection`
instances which will be passed to `.add_task`/`.add_collection` as
appropriate. Module objects are also valid (as they are for
`.add_collection`). For example, the below snippet results in the same
two task identifiers as the one above::
ns = Collection(top_level_task, Collection('docs', doc_task))
If any ``**kwargs`` are given, the keywords are used as the initial
name arguments for the respective values::
ns = Collection(
top_level_task=some_other_task,
docs=Collection(doc_task)
)
That's exactly equivalent to::
docs = Collection(doc_task)
ns = Collection()
ns.add_task(some_other_task, 'top_level_task')
ns.add_collection(docs, 'docs')
"""
# Initialize
self.tasks = Lexicon()
self.collections = Lexicon()
self.default = None
self.name = None
# Name if applicable
args = list(args)
if args and isinstance(args[0], basestring):
self.name = args.pop(0)
# Dispatch args/kwargs
for arg in args:
self._add_object(arg)
# Dispatch kwargs
for name, obj in kwargs.iteritems():
self._add_object(obj, name)
def _add_object(self, obj, name=None):
if isinstance(obj, Task):
method = self.add_task
elif isinstance(obj, (Collection, types.ModuleType)):
method = self.add_collection
else:
raise TypeError("No idea how to insert %r!" % type(obj))
return method(obj, name=name)
@classmethod
def from_module(self, module):
"""
Return a new `.Collection` created from ``module``.
Inspects ``module`` for any `.Task` instances and adds them to a new
`.Collection`, returning it. If any explicit namespace collections
exist (named ``ns`` or ``namespace``) they are preferentially loaded
instead.
When the implicit/default collection is generated, it will be named
after the module's ``__name__`` attribute, or its last dotted section
if it's a submodule. (I.e. it should usually map to the actual ``.py``
filename.)
Explicitly given collections will only be given that module-derived
name if they don't already have a valid ``.name`` attribute.
"""
module_name = module.__name__.split('.')[-1]
# See if the module provides a default NS to use in lieu of creating
# our own collection.
for candidate in ('ns', 'namespace'):
obj = getattr(module, candidate, None)
if obj and isinstance(obj, Collection):
if not obj.name:
obj.name = module_name
return obj
# Failing that, make our own collection from the module's tasks.
tasks = filter(
lambda x: isinstance(x[1], Task),
vars(module).items()
)
collection = Collection(module_name)
for name, task in tasks:
collection.add_task(name=name, task=task)
return collection
def add_task(self, task, name=None):
"""
Adds ``Task`` ``task`` to this collection.
If ``name`` is not explicitly given (recommended) the ``.func_name`` of
the ``Task``'s wrapped callable will be used instead. (If the wrapped
callable is not a function, you *must* give ``name``.)
"""
if name is None:
if hasattr(task.body, 'func_name'):
name = task.body.func_name
else:
raise ValueError("'name' may only be empty if 'task' wraps an object exposing .func_name")
if name in self.collections:
raise ValueError("Name conflict: this collection has a sub-collection named %r already" % name)
self.tasks[name] = task
for alias in task.aliases:
self.tasks.alias(alias, to=name)
if task.is_default:
if self.default:
msg = "'%s' cannot be the default because '%s' already is!"
raise ValueError(msg % (name, self.default))
self.default = name
def add_collection(self, coll, name=None):
# Handle module-as-collection
if isinstance(coll, types.ModuleType):
coll = Collection.from_module(coll)
# Ensure we have a name, or die trying
name = name or coll.name
if not name:
raise ValueError("Non-root collections must have a name!")
# Test for conflict
if name in self.tasks:
raise ValueError("Name conflict: this collection has a task named %r already" % name)
# Insert
self.collections[name] = coll
def __getitem__(self, name=None):
"""
Returns task named ``name``. Honors aliases and subcollections.
If this collection has a default task, it is returned when ``name`` is
empty or ``None``. If empty input is given and no task has been
selected as the default, ValueError will be raised.
Tasks within subcollections should be given in dotted form, e.g.
'foo.bar'. Subcollection default tasks will be returned on the
subcollection's name.
"""
# Default task for this collection itself
if not name:
if self.default:
return self[self.default]
else:
raise ValueError("This collection has no default task.")
# Non-default tasks within subcollections
if '.' in name:
parts = name.split('.')
coll = parts.pop(0)
rest = '.'.join(parts)
return self.collections[coll][rest]
# Default task for subcollections (via empty-name lookup)
if name in self.collections:
return self.collections[name]['']
# Regular task lookup
return self.tasks[name]
def __contains__(self, name):
try:
task = self[name]
return True
except KeyError:
return False
def to_contexts(self):
"""
Returns all contained tasks and subtasks as a list of parser contexts.
"""
result = []
for primary, aliases in self.task_names.iteritems():
task = self[primary]
result.append(Context(
name=primary, aliases=aliases, args=task.get_arguments()
))
return result
def subtask_name(self, collection_name, task_name):
return "%s.%s" % (collection_name, task_name)
@property
def task_names(self):
"""
Return all task identifiers for this collection as a dict.
Specifically, a dict with the primary/"real" task names as the key, and
any aliases as a list value.
"""
ret = {}
# Our own tasks get no prefix, just go in as-is: {name: [aliases]}
for name, task in self.tasks.iteritems():
ret[name] = task.aliases
# Subcollection tasks get both name + aliases prefixed
for coll_name, coll in self.collections.iteritems():
for task_name, aliases in coll.task_names.iteritems():
aliases = map(
lambda x: self.subtask_name(coll_name, x),
aliases
)
# Tack on collection name to alias list if this task is the
# collection's default.
if coll.default and coll.default == task_name:
aliases += (coll_name,)
ret[self.subtask_name(coll_name, task_name)] = aliases
return ret | invoke/collection.py | from operator import add
import types
from .vendor.lexicon import Lexicon
from .parser import Context, Argument
from .tasks import Task
class Collection(object):
"""
A collection of executable tasks.
"""
def __init__(self, *args, **kwargs):
"""
Create a new task collection/namespace.
May call with no arguments and use e.g. `.add_task`/`.add_collection` to insert objects, e.g.::
c = Collection()
c.add_task(some_task)
If an initial string argument is given, it is used as the default name
for this collection, should it be inserted into another collection as a
sub-namespace::
docs = Collection('docs')
docs.add_task(doc_task)
ns = Collection()
ns.add_task(top_level_task)
ns.add_collection(docs)
# Valid identifiers are now 'top_level_task' and 'docs.doc_task'
# (assuming the task objects were actually named the same as the
# variables we're using :))
Otherwise, all ``*args`` are expected to be `.Task` or `.Collection`
instances which will be passed to `.add_task`/`.add_collection` as
appropriate. Module objects are also valid (as they are for
`.add_collection`). For example, the below snippet results in the same
two task identifiers as the one above::
ns = Collection(top_level_task, Collection('docs', doc_task))
If any ``**kwargs`` are given, the keywords are used as the initial
name arguments for the respective values::
ns = Collection(
top_level_task=some_other_task,
docs=Collection(doc_task)
)
That's exactly equivalent to::
docs = Collection(doc_task)
ns = Collection()
ns.add_task(some_other_task, 'top_level_task')
ns.add_collection(docs, 'docs')
"""
# Initialize
self.tasks = Lexicon()
self.collections = Lexicon()
self.default = None
self.name = None
# Name if applicable
args = list(args)
if args and isinstance(args[0], basestring):
self.name = args.pop(0)
# Dispatch args/kwargs
for arg in args:
self._add_object(arg)
# Dispatch kwargs
for name, obj in kwargs.iteritems():
self._add_object(obj, name)
def _add_object(self, obj, name=None):
if isinstance(obj, Task):
method = self.add_task
elif isinstance(obj, (Collection, types.ModuleType)):
method = self.add_collection
else:
raise TypeError("No idea how to insert %r!" % type(obj))
return method(obj, name=name)
@classmethod
def from_module(self, module):
"""
Return a new `.Collection` created from ``module``.
Inspects ``module`` for any `.Task` instances and adds them to a new
`.Collection`, returning it. If any explicit namespace collections
exist (named ``ns`` or ``namespace``) they are preferentially loaded
instead.
When the implicit/default collection is generated, it will be named
after the module's ``__name__`` attribute, or its last dotted section
if it's a submodule. (I.e. it should usually map to the actual ``.py``
filename.)
Explicitly given collections will only be given that module-derived
name if they don't already have a valid ``.name`` attribute.
"""
module_name = module.__name__.split('.')[-1]
# See if the module provides a default NS to use in lieu of creating
# our own collection.
for candidate in ('ns', 'namespace'):
obj = getattr(module, candidate, None)
if obj and isinstance(obj, Collection):
if not obj.name:
obj.name = module_name
return obj
# Failing that, make our own collection from the module's tasks.
tasks = filter(
lambda x: isinstance(x[1], Task),
vars(module).items()
)
collection = Collection(module_name)
for name, task in tasks:
collection.add_task(name=name, task=task)
return collection
def add_task(self, task, name=None):
"""
Adds ``Task`` ``task`` to this collection.
If ``name`` is not explicitly given (recommended) the ``.func_name`` of
the ``Task``'s wrapped callable will be used instead. (If the wrapped
callable is not a function, you *must* give ``name``.)
"""
if name is None:
if hasattr(task.body, 'func_name'):
name = task.body.func_name
else:
raise ValueError("'name' may only be empty if 'task' wraps an object exposing .func_name")
if name in self.collections:
raise ValueError("Name conflict: this collection has a sub-collection named %r already" % name)
self.tasks[name] = task
for alias in task.aliases:
self.tasks.alias(alias, to=name)
if task.is_default:
if self.default:
msg = "'%s' cannot be the default because '%s' already is!"
raise ValueError(msg % (name, self.default))
self.default = name
def add_collection(self, coll, name=None):
# Handle module-as-collection
if isinstance(coll, types.ModuleType):
coll = Collection.from_module(coll)
# Ensure we have a name, or die trying
name = name or coll.name
if not name:
raise ValueError("Non-root collections must have a name!")
# Test for conflict
if name in self.tasks:
raise ValueError("Name conflict: this collection has a task named %r already" % name)
# Insert
self.collections[name] = coll
def __getitem__(self, name=None):
"""
Returns task named ``name``. Honors aliases and subcollections.
If this collection has a default task, it is returned when ``name`` is
empty or ``None``. If empty input is given and no task has been
selected as the default, ValueError will be raised.
Tasks within subcollections should be given in dotted form, e.g.
'foo.bar'. Subcollection default tasks will be returned on the
subcollection's name.
"""
# Default task for this collection itself
if not name:
if self.default:
return self[self.default]
else:
raise ValueError("This collection has no default task.")
# Non-default tasks within subcollections
if '.' in name:
parts = name.split('.')
coll = parts.pop(0)
rest = '.'.join(parts)
return self.collections[coll][rest]
# Default task for subcollections (via empty-name lookup)
if name in self.collections:
return self.collections[name]['']
# Regular task lookup
return self.tasks[name]
def __contains__(self, name):
try:
task = self[name]
return True
except KeyError:
return False
def to_contexts(self):
"""
Returns all contained tasks and subtasks as a list of parser contexts.
"""
result = []
for primary, aliases in self.task_names.iteritems():
task = self[primary]
result.append(Context(
name=primary, aliases=aliases, args=task.get_arguments()
))
return result
def subtask_name(self, collection_name, task_name):
return "%s.%s" % (collection_name, task_name)
@property
def task_names(self):
"""
Return all task identifiers for this collection as a dict.
Specifically, a dict with the primary/"real" task names as the key, and
any aliases as a list value.
"""
ret = {}
# Our own tasks get no prefix, just go in as-is: {name: [aliases]}
for name, task in self.tasks.iteritems():
ret[name] = task.aliases
# Subcollection tasks get both name + aliases prefixed
for coll_name, coll in self.collections.iteritems():
for task_name, aliases in coll.task_names.iteritems():
aliases = map(
lambda x: self.subtask_name(coll_name, x),
aliases
)
# Tack on collection name to alias list if this task is the
# collection's default.
if coll.default and coll.default == task_name:
aliases += (coll_name,)
ret[self.subtask_name(coll_name, task_name)] = aliases
return ret | 0.83545 | 0.324784 |
import datetime
import numpy as np
import pytest
import dsch
# Ensure dsch.schema is automatically imported alongside the dsch package.
# Normally, we would get the "schema" shorthand via "from dsch import schema".
schema = dsch.schema
@pytest.fixture(params=('hdf5', 'mat', 'npz'))
def storage_path(request, tmpdir):
if request.param == 'hdf5':
storage_path = str(tmpdir.join('test_file.h5'))
elif request.param == 'mat':
storage_path = str(tmpdir.join('test_file.mat'))
elif request.param == 'npz':
storage_path = str(tmpdir.join('test_file.npz'))
return storage_path
example_values1 = {
schema.Array: np.array([23, 42], dtype='int32'),
schema.Bool: True,
schema.Date: datetime.date(1970, 1, 1),
schema.DateTime: datetime.datetime(1970, 1, 1, 13, 37, 42, 23),
schema.Scalar: np.int32(42),
schema.String: 'spam',
schema.Time: datetime.time(13, 37, 42, 23),
}
example_values2 = {
schema.Array: np.array([1, 2, 3], dtype='int32'),
schema.Bool: False,
schema.Date: datetime.date(1984, 5, 23),
schema.DateTime: datetime.datetime(1984, 5, 23, 1, 2, 3, 4),
schema.Scalar: np.int32(23),
schema.String: 'eggs',
schema.Time: datetime.time(1, 2, 3, 4),
}
@pytest.mark.parametrize('schema_node', (
schema.Array(dtype='int32'),
schema.Bool(),
schema.Date(),
schema.DateTime(),
schema.Scalar(dtype='int32'),
schema.String(),
schema.Time(),
))
def test_item_node(storage_path, schema_node):
storage = dsch.create(storage_path=storage_path,
schema_node=schema_node)
storage.data.value = example_values1[type(schema_node)]
storage.data.validate()
storage.save()
new_storage = dsch.load(storage_path)
assert np.all(new_storage.data.value ==
example_values1[type(schema_node)])
storage.data.value = example_values2[type(schema_node)]
new_storage.save()
def apply_example_values(data_node, example_values, num_list_items=2):
if isinstance(data_node.schema_node, schema.Compilation):
for subnode_name in data_node.schema_node.subnodes:
apply_example_values(getattr(data_node, subnode_name),
example_values)
elif isinstance(data_node.schema_node, schema.List):
for _ in range(num_list_items):
data_node.append()
for item in data_node:
apply_example_values(item, example_values)
else:
data_node.value = example_values[type(data_node.schema_node)]
def assert_example_values(data_node, example_values, num_list_items=2):
if isinstance(data_node.schema_node, schema.Compilation):
for subnode_name in data_node.schema_node.subnodes:
assert_example_values(getattr(data_node, subnode_name),
example_values)
elif isinstance(data_node.schema_node, schema.List):
assert len(data_node) == num_list_items
for item in data_node:
assert_example_values(item, example_values)
else:
assert np.all(data_node.value ==
example_values[type(data_node.schema_node)])
def test_compilation(storage_path):
schema_node = schema.Compilation({
'test_array': schema.Array(dtype='int32'),
'test_bool': schema.Bool(),
'test_date': schema.Date(),
'test_datetime': schema.DateTime(),
'test_scalar': schema.Scalar(dtype='int32'),
'test_string': schema.String(),
'test_time': schema.Time(),
'test_comp': schema.Compilation({
'comp_array': schema.Array(dtype='int32'),
'comp_bool': schema.Bool(),
'comp_date': schema.Date(),
'comp_datetime': schema.DateTime(),
'comp_scalar': schema.Scalar(dtype='int32'),
'comp_string': schema.String(),
'comp_time': schema.Time(),
}),
'test_list_array': schema.List(schema.Array(dtype='int32')),
'test_list_bool': schema.List(schema.Bool()),
'test_list_date': schema.List(schema.Date()),
'test_list_datetime': schema.List(schema.DateTime()),
'test_list_scalar': schema.List(schema.Scalar(dtype='int32')),
'test_list_string': schema.List(schema.String()),
'test_list_time': schema.List(schema.Time()),
'test_complist': schema.List(schema.Compilation({
'complist_array': schema.Array(dtype='int32'),
'complist_bool': schema.Bool(),
'complist_date': schema.Date(),
'complist_datetime': schema.DateTime(),
'complist_scalar': schema.Scalar(dtype='int32'),
'complist_string': schema.String(),
'complist_time': schema.Time(),
})),
'test_listlist': schema.List(schema.List(schema.Bool())),
})
storage = dsch.create(storage_path=storage_path,
schema_node=schema_node)
apply_example_values(storage.data, example_values1)
storage.data.validate()
storage.save()
new_storage = dsch.load(storage_path)
assert_example_values(new_storage.data, example_values1)
apply_example_values(new_storage.data, example_values2)
new_storage.save()
@pytest.mark.parametrize('num_list_items', (0, 1, 2))
def test_list(storage_path, num_list_items):
schema_node = schema.List(
schema.Compilation({
'test_array': schema.Array(dtype='int32'),
'test_bool': schema.Bool(),
'test_date': schema.Date(),
'test_datetime': schema.DateTime(),
'test_scalar': schema.Scalar(dtype='int32'),
'test_string': schema.String(),
'test_time': schema.Time(),
'test_comp': schema.Compilation({
'comp_array': schema.Array(dtype='int32'),
'comp_bool': schema.Bool(),
'comp_date': schema.Date(),
'comp_datetime': schema.DateTime(),
'comp_scalar': schema.Scalar(dtype='int32'),
'comp_string': schema.String(),
'comp_time': schema.Time(),
}),
'test_list_array': schema.List(schema.Array(dtype='int32')),
'test_list_bool': schema.List(schema.Bool()),
'test_list_date': schema.List(schema.Date()),
'test_list_datetime': schema.List(schema.DateTime()),
'test_list_scalar': schema.List(schema.Scalar(dtype='int32')),
'test_list_string': schema.List(schema.String()),
'test_list_time': schema.List(schema.Time()),
'test_listlist': schema.List(schema.List(schema.Bool())),
}))
storage = dsch.create(storage_path=storage_path,
schema_node=schema_node)
apply_example_values(storage.data, example_values1, num_list_items)
storage.data.validate()
storage.save()
new_storage = dsch.load(storage_path)
assert_example_values(new_storage.data, example_values1, num_list_items)
apply_example_values(new_storage.data, example_values2, num_list_items)
new_storage.save()
def test_list_item_order(storage_path):
"""Ensure correct item order in lists.
Background: When lists consist of more than 10 items, the ordering should
still be 0, 1, 2, ... instead of 0, 1, 10, 11, 2, 3, ..., with the latter
being a possible result of simple sorting algorithms.
"""
schema_node = schema.List(schema.Scalar(dtype='int32'))
storage = dsch.create(storage_path=storage_path,
schema_node=schema_node)
num_test_items = 25
for value in range(num_test_items):
storage.data.append(value)
storage.save()
new_storage = dsch.load(storage_path)
for index in range(num_test_items):
assert new_storage.data[index].value == index | tests/test_high_level.py | import datetime
import numpy as np
import pytest
import dsch
# Ensure dsch.schema is automatically imported alongside the dsch package.
# Normally, we would get the "schema" shorthand via "from dsch import schema".
schema = dsch.schema
@pytest.fixture(params=('hdf5', 'mat', 'npz'))
def storage_path(request, tmpdir):
if request.param == 'hdf5':
storage_path = str(tmpdir.join('test_file.h5'))
elif request.param == 'mat':
storage_path = str(tmpdir.join('test_file.mat'))
elif request.param == 'npz':
storage_path = str(tmpdir.join('test_file.npz'))
return storage_path
example_values1 = {
schema.Array: np.array([23, 42], dtype='int32'),
schema.Bool: True,
schema.Date: datetime.date(1970, 1, 1),
schema.DateTime: datetime.datetime(1970, 1, 1, 13, 37, 42, 23),
schema.Scalar: np.int32(42),
schema.String: 'spam',
schema.Time: datetime.time(13, 37, 42, 23),
}
example_values2 = {
schema.Array: np.array([1, 2, 3], dtype='int32'),
schema.Bool: False,
schema.Date: datetime.date(1984, 5, 23),
schema.DateTime: datetime.datetime(1984, 5, 23, 1, 2, 3, 4),
schema.Scalar: np.int32(23),
schema.String: 'eggs',
schema.Time: datetime.time(1, 2, 3, 4),
}
@pytest.mark.parametrize('schema_node', (
schema.Array(dtype='int32'),
schema.Bool(),
schema.Date(),
schema.DateTime(),
schema.Scalar(dtype='int32'),
schema.String(),
schema.Time(),
))
def test_item_node(storage_path, schema_node):
storage = dsch.create(storage_path=storage_path,
schema_node=schema_node)
storage.data.value = example_values1[type(schema_node)]
storage.data.validate()
storage.save()
new_storage = dsch.load(storage_path)
assert np.all(new_storage.data.value ==
example_values1[type(schema_node)])
storage.data.value = example_values2[type(schema_node)]
new_storage.save()
def apply_example_values(data_node, example_values, num_list_items=2):
if isinstance(data_node.schema_node, schema.Compilation):
for subnode_name in data_node.schema_node.subnodes:
apply_example_values(getattr(data_node, subnode_name),
example_values)
elif isinstance(data_node.schema_node, schema.List):
for _ in range(num_list_items):
data_node.append()
for item in data_node:
apply_example_values(item, example_values)
else:
data_node.value = example_values[type(data_node.schema_node)]
def assert_example_values(data_node, example_values, num_list_items=2):
if isinstance(data_node.schema_node, schema.Compilation):
for subnode_name in data_node.schema_node.subnodes:
assert_example_values(getattr(data_node, subnode_name),
example_values)
elif isinstance(data_node.schema_node, schema.List):
assert len(data_node) == num_list_items
for item in data_node:
assert_example_values(item, example_values)
else:
assert np.all(data_node.value ==
example_values[type(data_node.schema_node)])
def test_compilation(storage_path):
schema_node = schema.Compilation({
'test_array': schema.Array(dtype='int32'),
'test_bool': schema.Bool(),
'test_date': schema.Date(),
'test_datetime': schema.DateTime(),
'test_scalar': schema.Scalar(dtype='int32'),
'test_string': schema.String(),
'test_time': schema.Time(),
'test_comp': schema.Compilation({
'comp_array': schema.Array(dtype='int32'),
'comp_bool': schema.Bool(),
'comp_date': schema.Date(),
'comp_datetime': schema.DateTime(),
'comp_scalar': schema.Scalar(dtype='int32'),
'comp_string': schema.String(),
'comp_time': schema.Time(),
}),
'test_list_array': schema.List(schema.Array(dtype='int32')),
'test_list_bool': schema.List(schema.Bool()),
'test_list_date': schema.List(schema.Date()),
'test_list_datetime': schema.List(schema.DateTime()),
'test_list_scalar': schema.List(schema.Scalar(dtype='int32')),
'test_list_string': schema.List(schema.String()),
'test_list_time': schema.List(schema.Time()),
'test_complist': schema.List(schema.Compilation({
'complist_array': schema.Array(dtype='int32'),
'complist_bool': schema.Bool(),
'complist_date': schema.Date(),
'complist_datetime': schema.DateTime(),
'complist_scalar': schema.Scalar(dtype='int32'),
'complist_string': schema.String(),
'complist_time': schema.Time(),
})),
'test_listlist': schema.List(schema.List(schema.Bool())),
})
storage = dsch.create(storage_path=storage_path,
schema_node=schema_node)
apply_example_values(storage.data, example_values1)
storage.data.validate()
storage.save()
new_storage = dsch.load(storage_path)
assert_example_values(new_storage.data, example_values1)
apply_example_values(new_storage.data, example_values2)
new_storage.save()
@pytest.mark.parametrize('num_list_items', (0, 1, 2))
def test_list(storage_path, num_list_items):
schema_node = schema.List(
schema.Compilation({
'test_array': schema.Array(dtype='int32'),
'test_bool': schema.Bool(),
'test_date': schema.Date(),
'test_datetime': schema.DateTime(),
'test_scalar': schema.Scalar(dtype='int32'),
'test_string': schema.String(),
'test_time': schema.Time(),
'test_comp': schema.Compilation({
'comp_array': schema.Array(dtype='int32'),
'comp_bool': schema.Bool(),
'comp_date': schema.Date(),
'comp_datetime': schema.DateTime(),
'comp_scalar': schema.Scalar(dtype='int32'),
'comp_string': schema.String(),
'comp_time': schema.Time(),
}),
'test_list_array': schema.List(schema.Array(dtype='int32')),
'test_list_bool': schema.List(schema.Bool()),
'test_list_date': schema.List(schema.Date()),
'test_list_datetime': schema.List(schema.DateTime()),
'test_list_scalar': schema.List(schema.Scalar(dtype='int32')),
'test_list_string': schema.List(schema.String()),
'test_list_time': schema.List(schema.Time()),
'test_listlist': schema.List(schema.List(schema.Bool())),
}))
storage = dsch.create(storage_path=storage_path,
schema_node=schema_node)
apply_example_values(storage.data, example_values1, num_list_items)
storage.data.validate()
storage.save()
new_storage = dsch.load(storage_path)
assert_example_values(new_storage.data, example_values1, num_list_items)
apply_example_values(new_storage.data, example_values2, num_list_items)
new_storage.save()
def test_list_item_order(storage_path):
"""Ensure correct item order in lists.
Background: When lists consist of more than 10 items, the ordering should
still be 0, 1, 2, ... instead of 0, 1, 10, 11, 2, 3, ..., with the latter
being a possible result of simple sorting algorithms.
"""
schema_node = schema.List(schema.Scalar(dtype='int32'))
storage = dsch.create(storage_path=storage_path,
schema_node=schema_node)
num_test_items = 25
for value in range(num_test_items):
storage.data.append(value)
storage.save()
new_storage = dsch.load(storage_path)
for index in range(num_test_items):
assert new_storage.data[index].value == index | 0.507324 | 0.529203 |
import xml.etree.ElementTree as ET
from detectron2.data import DatasetCatalog, MetadataCatalog
def get_vid_dicts(data_dir):
meta = MetadataCatalog.get("vid")
dataset_dicts = []
vid_dir = os.path.join(data_dir, "VID")
for video in os.listdir(vid_dir):
records = []
jpegs = os.listdir(os.path.join(vid_dir, video, "Data"))
xmls = os.listdir(os.path.join(vid_dir, video, "Annotations"))
for jpeg, xml in zip(jpegs, xmls):
record = {}
record["filename"] = os.path.join(vid_dir, video, "Data", jpeg)
tree = ET.parse(os.path.join(vid_dir, video, "Annotations", xml))
record["height"] = int(tree.find("size").find("height").text)
record["width"] = int(tree.find("size").find("width").text)
boxes, classes, trackids = [], [], []
objects = tree.findall("object")
for obj in objects:
if not obj.find("name").text in meta.classes_to_ind:
continue
bbox = obj.find("bndbox")
box = [
float(bbox.find("xmin").text), float(bbox.find("ymin").text),
float(bbox.find("xmax").text), float(bbox.find("ymax").text)
]
boxes.append(box)
classes.append(
meta.classes_to_ind[obj.find("name").text.lower().strip()]
)
trackids.append(int(obj.find("trackid").text))
record["boxes"] = boxes
record["classes"] = classes
record["trackids"] = trackids
records.append(record)
dataset_dicts.append(records)
# use IMAGENET DET data
if "train" in data_dir:
det_dir = os.path.join(data_dir, "DET")
jpegs = os.listdir(os.path.join(det_dir, "Data"))
xmls = os.listdir(os.path.join(det_dir, "Annotations"))
for jpeg, xml in zip(jpegs, xmls):
record = {}
record["filename"] = os.path.join(det_dir, "Data", jpeg)
tree = ET.parse(os.path.join(det_dir, "Annotations", xml))
record["height"] = int(tree.find("size").find("height").text)
record["width"] = int(tree.find("size").find("width").text)
boxes, classes, trackids = [], [], []
objects = tree.findall("object")
for i, obj in enumerate(objects):
if not obj.find("name").text in meta.classes_to_ind:
continue
bbox = obj.find("bndbox")
box = [
float(bbox.find("xmin").text), float(bbox.find("ymin").text),
float(bbox.find("xmax").text), float(bbox.find("ymax").text)
]
boxes.append(box)
classes.append(
meta.classes_to_ind[obj.find("name").text.lower().strip()]
)
trackids.append(i)
record["boxes"] = boxes
record["classes"] = classes
record["trackids"] = trackids
dataset_dicts.append([record])
return dataset_dicts
def register_vid_instances(name, metadata, image_root):
"""
Args:
name (str): the name that identifies a dataset, e.g. "coco_2014_train".
metadata (dict): extra metadata associated with this dataset. You can
leave it as an empty dict.
image_root (str or path-like): directory which contains all the images.
"""
assert isinstance(name, str), name
assert isinstance(image_root, (str, os.PathLike)), image_root
# 1. register a function which returns dicts
DatasetCatalog.register(name, lambda: globals()[f"get_vid_dicts"](image_root))
# 2. Optionally, add metadata about this dataset,
# since they might be useful in evaluation, visualization or logging
MetadataCatalog.get(name).set(
image_root=image_root, evaluator_type="vid", **metadata
) | projects/e2evod/e2evod/dataset.py | import xml.etree.ElementTree as ET
from detectron2.data import DatasetCatalog, MetadataCatalog
def get_vid_dicts(data_dir):
meta = MetadataCatalog.get("vid")
dataset_dicts = []
vid_dir = os.path.join(data_dir, "VID")
for video in os.listdir(vid_dir):
records = []
jpegs = os.listdir(os.path.join(vid_dir, video, "Data"))
xmls = os.listdir(os.path.join(vid_dir, video, "Annotations"))
for jpeg, xml in zip(jpegs, xmls):
record = {}
record["filename"] = os.path.join(vid_dir, video, "Data", jpeg)
tree = ET.parse(os.path.join(vid_dir, video, "Annotations", xml))
record["height"] = int(tree.find("size").find("height").text)
record["width"] = int(tree.find("size").find("width").text)
boxes, classes, trackids = [], [], []
objects = tree.findall("object")
for obj in objects:
if not obj.find("name").text in meta.classes_to_ind:
continue
bbox = obj.find("bndbox")
box = [
float(bbox.find("xmin").text), float(bbox.find("ymin").text),
float(bbox.find("xmax").text), float(bbox.find("ymax").text)
]
boxes.append(box)
classes.append(
meta.classes_to_ind[obj.find("name").text.lower().strip()]
)
trackids.append(int(obj.find("trackid").text))
record["boxes"] = boxes
record["classes"] = classes
record["trackids"] = trackids
records.append(record)
dataset_dicts.append(records)
# use IMAGENET DET data
if "train" in data_dir:
det_dir = os.path.join(data_dir, "DET")
jpegs = os.listdir(os.path.join(det_dir, "Data"))
xmls = os.listdir(os.path.join(det_dir, "Annotations"))
for jpeg, xml in zip(jpegs, xmls):
record = {}
record["filename"] = os.path.join(det_dir, "Data", jpeg)
tree = ET.parse(os.path.join(det_dir, "Annotations", xml))
record["height"] = int(tree.find("size").find("height").text)
record["width"] = int(tree.find("size").find("width").text)
boxes, classes, trackids = [], [], []
objects = tree.findall("object")
for i, obj in enumerate(objects):
if not obj.find("name").text in meta.classes_to_ind:
continue
bbox = obj.find("bndbox")
box = [
float(bbox.find("xmin").text), float(bbox.find("ymin").text),
float(bbox.find("xmax").text), float(bbox.find("ymax").text)
]
boxes.append(box)
classes.append(
meta.classes_to_ind[obj.find("name").text.lower().strip()]
)
trackids.append(i)
record["boxes"] = boxes
record["classes"] = classes
record["trackids"] = trackids
dataset_dicts.append([record])
return dataset_dicts
def register_vid_instances(name, metadata, image_root):
"""
Args:
name (str): the name that identifies a dataset, e.g. "coco_2014_train".
metadata (dict): extra metadata associated with this dataset. You can
leave it as an empty dict.
image_root (str or path-like): directory which contains all the images.
"""
assert isinstance(name, str), name
assert isinstance(image_root, (str, os.PathLike)), image_root
# 1. register a function which returns dicts
DatasetCatalog.register(name, lambda: globals()[f"get_vid_dicts"](image_root))
# 2. Optionally, add metadata about this dataset,
# since they might be useful in evaluation, visualization or logging
MetadataCatalog.get(name).set(
image_root=image_root, evaluator_type="vid", **metadata
) | 0.468547 | 0.174235 |
import glob
import multiprocessing as mp
from multiprocessing import shared_memory
import argparse
import numpy as np
import xarray as xr
import os
import time
pool = None
data_vars = None
def load_file(file_name,var_mems):
'''Load a netcdf dataset into memory'''
d = xr.open_dataset(file_name)
subset(d,var_mems)
return 'loaded '+file_name
def get_dims(dataset, section="d"):
'''Get the global attributes defining the domain, memory, or tile space'''
results = []
for axis in ["i","j","k"]:
for position in ["s","e"]:
results.append(int(dataset.attrs[axis + section + position]))
return results
def get_dim_offset(dims):
'''Return x_offset, y_offset
For the staggered dims, offset=1, otherwise offset=0'''
x_off = 0
if 'lon_u' in dims: x_off = 1
y_off = 0
if 'lat_v' in dims: y_off = 1
return x_off, y_off
def set_up_data_vars(d,var_names):
ids, ide, jds, jde, kds, kde = get_dims(d, section='d')
nx = ide - ids + 1
ny = jde - jds + 1
nz = kde - kds + 1
data_vars = dict()
shms = dict()
if var_names is None: var_names = d.variables
for v in var_names:
coords = [c for c in d[v].coords]
dims = d[v].dims
name = d[v].name
attrs = d[v].attrs
x_off, y_off = get_dim_offset(dims)
if len(dims) == 1:
nt = d.dims[dims[0]]
data = np.zeros((nt))
if len(dims) == 2:
data = np.zeros((ny + y_off, nx + x_off))
if len(dims) == 3:
data = np.zeros((d.dims[dims[0]], ny + y_off, nx + x_off))
if len(dims) == 4:
nt = d.dims[dims[0]]
nz = d.dims[dims[1]]
data = np.zeros((nt, nz, ny + y_off, nx + x_off))
#make data into shared memory
shm = mp.shared_memory.SharedMemory(create=True, size=data.nbytes)
shms[v] = shm
shm_data = np.ndarray(data.shape, dtype=data.dtype, buffer=shm.buf)
# print(name, data.shape, dims, attrs)
data_vars[v] = xr.DataArray(shm_data, dims=dims, name=name, attrs=attrs)#, coords=coords)
return data_vars, shms
def set_up_dataset(d,data_vars):
'''Create a dataset to cover the entire domain with the variables present in d
d : an input dataset covering part of the domain
d must have global attributes ids, ide, jds, jde, kds, kde that define the full domain
A new dataset is created with all the variables+attributes in d covering the full domain
'''
ds = xr.Dataset(data_vars, attrs=d.attrs)
ds.encoding = d.encoding
ds["time"] = d["time"]
cords = []
for v in d.variables:
if v in data_vars:
for c in d[v].coords:
if not(c in cords) and (c in data_vars):
cords.append(c)
return ds.set_coords(cords)
def agg_file(first_file,var_names,verbose=True):
'''Aggregated all files that come from the same time step as first_file
first_file should have _001_ in the filename somewhere. This will be replaced
with * to search for all matching files from this date. Once files are found, a
dataset containing the entire domain is created and the data from each file are
added to the master dataset.
Result: aggregated dataset is written to a netcdf file'''
if verbose:print(first_file)
date_search = first_file.replace("_000001_","*")
outputfile = first_file.replace("000001_","_").replace("__","_")
if os.path.isfile(outputfile):
return
this_date_files = glob.glob(date_search)
this_date_files.sort()
template = xr.open_dataset(this_date_files[0])
data_vars, shms = set_up_data_vars(template,var_names)
args = [(d,shms) for d in this_date_files]
results = pool.starmap_async(load_file, args)
#Just use get to wait for result
message = results.get()
data_set = set_up_dataset(template,data_vars)
data_set.load().to_netcdf(outputfile)
for key in shms:
shms[key].close()
shms[key].unlink()
def subset(d,var_mems):
ids, ide, jds, jde, kds, kde = get_dims(d, section='d')
ims, ime, jms, jme, kms, kme = get_dims(d, section='m')
its, ite, jts, jte, kts, kte = get_dims(d, section='t')
xts, xte = its - ims, ite - ims + 1
yts, yte = jts - jms, jte - jms + 1
zts, zte = kts - kms, kte - kms + 1
xs, xe = its - ids, ite - ids + 1
ys, ye = jts - jds, jte - jds + 1
zs, ze = kts - kds, kte - kds + 1
nx = ide - ids + 1
ny = jde - jds + 1
nz = kde - kds + 1
if ims==ids:
its = ids
if ime==ide:
ite = ide
if jms==jds:
jts = jds
if jme==jde:
jte = jde
for v in var_mems:
dims = d[v].dims
existing_mem = mp.shared_memory.SharedMemory(name=var_mems[v].name)
x_off, y_off = get_dim_offset(dims)
if len(dims) == 2:
data = np.ndarray((ny + y_off, nx + x_off),buffer=existing_mem.buf)
data[ys:ye, xs:xe] = d[v].values[yts:yte, xts:xte]
if len(dims) == 3:
data = np.ndarray((d.dims[dims[0]], ny + y_off, nx + x_off),buffer=existing_mem.buf)
if dims[0] == "time":
data[:, ys:ye+y_off, xs:xe+x_off] = d[v].values[:, yts:yte+y_off, xts:xte+x_off]
else:
data[zs:ze, ys:ye+y_off, xs:xe+x_off] = d[v].values[zts:zte, yts:yte+y_off, xts:xte+x_off]
if len(dims) == 4:
nt = d.dims[dims[0]]
nz = d.dims[dims[1]]
data = np.ndarray((nt, nz, ny + y_off, nx + x_off),buffer=existing_mem.buf)
data[:,zs:ze, ys:ye+y_off, xs:xe+x_off] = d[v].values[:,zts:zte, yts:yte+y_off, xts:xte+x_off]
existing_mem.close()
return
def main(file_search,cpus,var_names):
first_files = glob.glob(file_search.format(ens="000001"))
first_files.sort()
for f in first_files:
agg_file(f,var_names,verbose=True)
def continuous(file_search,cpus,var_names):
print("Running continuous aggregation, Ctrl-C to stop")
while True:
first_files = glob.glob(file_search.format(ens="000001"))
first_files.sort()
# skip the last file in the list as ICAR might still be running
for f in first_files[:-1]:
agg_file(f, var_names,verbose=False)
time.sleep(10)
if __name__ == '__main__':
# This should be an input, this is the search string that is assumed to match
# the output files to be aggregated.
file_search = "icar_out_{ens}_*"
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--n_cpus", type=int,
help="Number of cpus to use")
parser.add_argument("--continuous", action="store_true",
help="Use continuous aggregation of files")
parser.add_argument("-v", "--vars", type=str,
help="File containing var names to save (comma-delimited; .csv)")
parser.add_argument("-s", "--search_string", type=str,
help="Format of search string")
args = parser.parse_args()
cpus = mp.cpu_count()
if ( args.n_cpus and args.n_cpus > 0 and args.n_cpus < cpus): cpus = args.n_cpus
if ( args.vars): var_names = np.loadtxt(args.vars,dtype=str,delimiter=',')
else: var_names = None
if (args.search_string): file_search = args.search_string
mp.set_start_method('spawn')
pool = mp.Pool(cpus)
if args.continuous:
try:
continuous(file_search,cpus,var_names)
except KeyboardInterrupt:
pass
else:
main(file_search,cpus,var_names)
pool.close() | helpers/aggregate_parallel_files_par.py |
import glob
import multiprocessing as mp
from multiprocessing import shared_memory
import argparse
import numpy as np
import xarray as xr
import os
import time
pool = None
data_vars = None
def load_file(file_name,var_mems):
'''Load a netcdf dataset into memory'''
d = xr.open_dataset(file_name)
subset(d,var_mems)
return 'loaded '+file_name
def get_dims(dataset, section="d"):
'''Get the global attributes defining the domain, memory, or tile space'''
results = []
for axis in ["i","j","k"]:
for position in ["s","e"]:
results.append(int(dataset.attrs[axis + section + position]))
return results
def get_dim_offset(dims):
'''Return x_offset, y_offset
For the staggered dims, offset=1, otherwise offset=0'''
x_off = 0
if 'lon_u' in dims: x_off = 1
y_off = 0
if 'lat_v' in dims: y_off = 1
return x_off, y_off
def set_up_data_vars(d,var_names):
ids, ide, jds, jde, kds, kde = get_dims(d, section='d')
nx = ide - ids + 1
ny = jde - jds + 1
nz = kde - kds + 1
data_vars = dict()
shms = dict()
if var_names is None: var_names = d.variables
for v in var_names:
coords = [c for c in d[v].coords]
dims = d[v].dims
name = d[v].name
attrs = d[v].attrs
x_off, y_off = get_dim_offset(dims)
if len(dims) == 1:
nt = d.dims[dims[0]]
data = np.zeros((nt))
if len(dims) == 2:
data = np.zeros((ny + y_off, nx + x_off))
if len(dims) == 3:
data = np.zeros((d.dims[dims[0]], ny + y_off, nx + x_off))
if len(dims) == 4:
nt = d.dims[dims[0]]
nz = d.dims[dims[1]]
data = np.zeros((nt, nz, ny + y_off, nx + x_off))
#make data into shared memory
shm = mp.shared_memory.SharedMemory(create=True, size=data.nbytes)
shms[v] = shm
shm_data = np.ndarray(data.shape, dtype=data.dtype, buffer=shm.buf)
# print(name, data.shape, dims, attrs)
data_vars[v] = xr.DataArray(shm_data, dims=dims, name=name, attrs=attrs)#, coords=coords)
return data_vars, shms
def set_up_dataset(d,data_vars):
'''Create a dataset to cover the entire domain with the variables present in d
d : an input dataset covering part of the domain
d must have global attributes ids, ide, jds, jde, kds, kde that define the full domain
A new dataset is created with all the variables+attributes in d covering the full domain
'''
ds = xr.Dataset(data_vars, attrs=d.attrs)
ds.encoding = d.encoding
ds["time"] = d["time"]
cords = []
for v in d.variables:
if v in data_vars:
for c in d[v].coords:
if not(c in cords) and (c in data_vars):
cords.append(c)
return ds.set_coords(cords)
def agg_file(first_file,var_names,verbose=True):
'''Aggregated all files that come from the same time step as first_file
first_file should have _001_ in the filename somewhere. This will be replaced
with * to search for all matching files from this date. Once files are found, a
dataset containing the entire domain is created and the data from each file are
added to the master dataset.
Result: aggregated dataset is written to a netcdf file'''
if verbose:print(first_file)
date_search = first_file.replace("_000001_","*")
outputfile = first_file.replace("000001_","_").replace("__","_")
if os.path.isfile(outputfile):
return
this_date_files = glob.glob(date_search)
this_date_files.sort()
template = xr.open_dataset(this_date_files[0])
data_vars, shms = set_up_data_vars(template,var_names)
args = [(d,shms) for d in this_date_files]
results = pool.starmap_async(load_file, args)
#Just use get to wait for result
message = results.get()
data_set = set_up_dataset(template,data_vars)
data_set.load().to_netcdf(outputfile)
for key in shms:
shms[key].close()
shms[key].unlink()
def subset(d,var_mems):
ids, ide, jds, jde, kds, kde = get_dims(d, section='d')
ims, ime, jms, jme, kms, kme = get_dims(d, section='m')
its, ite, jts, jte, kts, kte = get_dims(d, section='t')
xts, xte = its - ims, ite - ims + 1
yts, yte = jts - jms, jte - jms + 1
zts, zte = kts - kms, kte - kms + 1
xs, xe = its - ids, ite - ids + 1
ys, ye = jts - jds, jte - jds + 1
zs, ze = kts - kds, kte - kds + 1
nx = ide - ids + 1
ny = jde - jds + 1
nz = kde - kds + 1
if ims==ids:
its = ids
if ime==ide:
ite = ide
if jms==jds:
jts = jds
if jme==jde:
jte = jde
for v in var_mems:
dims = d[v].dims
existing_mem = mp.shared_memory.SharedMemory(name=var_mems[v].name)
x_off, y_off = get_dim_offset(dims)
if len(dims) == 2:
data = np.ndarray((ny + y_off, nx + x_off),buffer=existing_mem.buf)
data[ys:ye, xs:xe] = d[v].values[yts:yte, xts:xte]
if len(dims) == 3:
data = np.ndarray((d.dims[dims[0]], ny + y_off, nx + x_off),buffer=existing_mem.buf)
if dims[0] == "time":
data[:, ys:ye+y_off, xs:xe+x_off] = d[v].values[:, yts:yte+y_off, xts:xte+x_off]
else:
data[zs:ze, ys:ye+y_off, xs:xe+x_off] = d[v].values[zts:zte, yts:yte+y_off, xts:xte+x_off]
if len(dims) == 4:
nt = d.dims[dims[0]]
nz = d.dims[dims[1]]
data = np.ndarray((nt, nz, ny + y_off, nx + x_off),buffer=existing_mem.buf)
data[:,zs:ze, ys:ye+y_off, xs:xe+x_off] = d[v].values[:,zts:zte, yts:yte+y_off, xts:xte+x_off]
existing_mem.close()
return
def main(file_search,cpus,var_names):
first_files = glob.glob(file_search.format(ens="000001"))
first_files.sort()
for f in first_files:
agg_file(f,var_names,verbose=True)
def continuous(file_search,cpus,var_names):
print("Running continuous aggregation, Ctrl-C to stop")
while True:
first_files = glob.glob(file_search.format(ens="000001"))
first_files.sort()
# skip the last file in the list as ICAR might still be running
for f in first_files[:-1]:
agg_file(f, var_names,verbose=False)
time.sleep(10)
if __name__ == '__main__':
# This should be an input, this is the search string that is assumed to match
# the output files to be aggregated.
file_search = "icar_out_{ens}_*"
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--n_cpus", type=int,
help="Number of cpus to use")
parser.add_argument("--continuous", action="store_true",
help="Use continuous aggregation of files")
parser.add_argument("-v", "--vars", type=str,
help="File containing var names to save (comma-delimited; .csv)")
parser.add_argument("-s", "--search_string", type=str,
help="Format of search string")
args = parser.parse_args()
cpus = mp.cpu_count()
if ( args.n_cpus and args.n_cpus > 0 and args.n_cpus < cpus): cpus = args.n_cpus
if ( args.vars): var_names = np.loadtxt(args.vars,dtype=str,delimiter=',')
else: var_names = None
if (args.search_string): file_search = args.search_string
mp.set_start_method('spawn')
pool = mp.Pool(cpus)
if args.continuous:
try:
continuous(file_search,cpus,var_names)
except KeyboardInterrupt:
pass
else:
main(file_search,cpus,var_names)
pool.close() | 0.252016 | 0.33704 |
def date_visualization(queries,query_results,graph_filename,csv_filename,month_flag=False,year_flag=True,weight_flag=False,cite_flag=False,database='CrossRef'):
import numpy as np
def counts(dates):
import pandas as pd
if month_flag==True:
freq='M'
date_format='%Y-%m'
elif year_flag==True:
freq='AS'
date_format='%Y'
daterange = pd.date_range(start=dates.index.min(),end=dates.index.max(),freq=freq)
daterange = daterange.strftime(date_format=date_format)
counts = []
for date in daterange:
counter = count_and_weight(dates,date)
counts.append(counter)
final=pd.DataFrame(index=daterange)
final['counts']=counts
return final
def count_and_weight(df,date):
count = df[date]
return sum(count['weights'])
def title_format(queries,weight_print):
query_format = ['"'+query+'"' for query in queries]
title_format = ''
if len(query_format)==1:
title_format+=query_format[0]
elif len(query_format)==2:
title_format+=query_format[0]+' and '+query_format[1]
else:
title_format+='{}, and {}'.format(', '.join(query_format[:-1]), query_format[-1])
if weight_print:
title_format+= weight_print
return title_format
else:
return title_format
def weight_database_print(database,relev_flag=False,cite_flag=False):
if (relev_flag==True):
if cite_flag==True:
weight_print = 'weighted by relevance score and citation count from '+database
else:
weight_print = 'weighted by relevance score from '+database
elif cite_flag!=True:
weight_print = 'weighted by citation count from '+database
else:
weight_print = 'from '+database
return weight_print
import matplotlib
#matplotlib.use('Agg') #prevents anything from printing out
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
from matplotlib.ticker import MaxNLocator
font = {'family' : 'georgia',
'weight' : 'normal',
'size' : 16}
matplotlib.rc('font', **font)
fig,ax=plt.subplots(figsize=(25,15))
years = mdates.YearLocator()
# yearsFmt = mdates.DateFormatter('%Y')
# ax.xaxis.set_major_formatter(yearsFmt)
# ax.xaxis.set_major_locator(years)
all_counts=[]
for dates in query_results:
all_counts.append(counts(dates))
all_dates = pd.concat(all_counts,join='outer',axis=1)
all_dates = all_dates.fillna(0)
all_dates.columns = queries
all_dates.index.name = 'Dates'
assert all_dates.shape[1]==len(queries),'number of labels does not match number of cols'
for col in range(all_dates.shape[1]):
plt.plot_date(x=all_dates.index,y=all_dates.iloc[:,col],linestyle='solid',label='"'+queries[col]+'"')
weight_database_print = weight_database_print(database,month_flag,year_flag)
if weight_database_print is not None:
ax.set_ylabel('Number of Publications '+weight_database_print,size=20)
start, end = ax.get_xlim()
#only int type on y axis
fig.gca().yaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_xlabel('Year',size=20)
ax.set_title('Number of Publications on ' + title_format(queries,weight_database_print),size=24)
start, end = ax.get_xlim()
tick_intvl = tick_optimizer(int(all_dates.index[0]),int(all_dates.index[-1]))
plt.xticks(np.arange(0,len(all_dates.index),tick_intvl),all_dates.index[::tick_intvl])
#include minor tick marks somehows
ax.xaxis.set_minor_locator(years)
plt.legend(loc='best',prop={'size': 18},labelspacing=1.0,borderpad=0.5)
fig.savefig(graph_filename+'.png')
all_dates.to_csv(csv_filename+'.csv') | Python/visualization.py | def date_visualization(queries,query_results,graph_filename,csv_filename,month_flag=False,year_flag=True,weight_flag=False,cite_flag=False,database='CrossRef'):
import numpy as np
def counts(dates):
import pandas as pd
if month_flag==True:
freq='M'
date_format='%Y-%m'
elif year_flag==True:
freq='AS'
date_format='%Y'
daterange = pd.date_range(start=dates.index.min(),end=dates.index.max(),freq=freq)
daterange = daterange.strftime(date_format=date_format)
counts = []
for date in daterange:
counter = count_and_weight(dates,date)
counts.append(counter)
final=pd.DataFrame(index=daterange)
final['counts']=counts
return final
def count_and_weight(df,date):
count = df[date]
return sum(count['weights'])
def title_format(queries,weight_print):
query_format = ['"'+query+'"' for query in queries]
title_format = ''
if len(query_format)==1:
title_format+=query_format[0]
elif len(query_format)==2:
title_format+=query_format[0]+' and '+query_format[1]
else:
title_format+='{}, and {}'.format(', '.join(query_format[:-1]), query_format[-1])
if weight_print:
title_format+= weight_print
return title_format
else:
return title_format
def weight_database_print(database,relev_flag=False,cite_flag=False):
if (relev_flag==True):
if cite_flag==True:
weight_print = 'weighted by relevance score and citation count from '+database
else:
weight_print = 'weighted by relevance score from '+database
elif cite_flag!=True:
weight_print = 'weighted by citation count from '+database
else:
weight_print = 'from '+database
return weight_print
import matplotlib
#matplotlib.use('Agg') #prevents anything from printing out
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
from matplotlib.ticker import MaxNLocator
font = {'family' : 'georgia',
'weight' : 'normal',
'size' : 16}
matplotlib.rc('font', **font)
fig,ax=plt.subplots(figsize=(25,15))
years = mdates.YearLocator()
# yearsFmt = mdates.DateFormatter('%Y')
# ax.xaxis.set_major_formatter(yearsFmt)
# ax.xaxis.set_major_locator(years)
all_counts=[]
for dates in query_results:
all_counts.append(counts(dates))
all_dates = pd.concat(all_counts,join='outer',axis=1)
all_dates = all_dates.fillna(0)
all_dates.columns = queries
all_dates.index.name = 'Dates'
assert all_dates.shape[1]==len(queries),'number of labels does not match number of cols'
for col in range(all_dates.shape[1]):
plt.plot_date(x=all_dates.index,y=all_dates.iloc[:,col],linestyle='solid',label='"'+queries[col]+'"')
weight_database_print = weight_database_print(database,month_flag,year_flag)
if weight_database_print is not None:
ax.set_ylabel('Number of Publications '+weight_database_print,size=20)
start, end = ax.get_xlim()
#only int type on y axis
fig.gca().yaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_xlabel('Year',size=20)
ax.set_title('Number of Publications on ' + title_format(queries,weight_database_print),size=24)
start, end = ax.get_xlim()
tick_intvl = tick_optimizer(int(all_dates.index[0]),int(all_dates.index[-1]))
plt.xticks(np.arange(0,len(all_dates.index),tick_intvl),all_dates.index[::tick_intvl])
#include minor tick marks somehows
ax.xaxis.set_minor_locator(years)
plt.legend(loc='best',prop={'size': 18},labelspacing=1.0,borderpad=0.5)
fig.savefig(graph_filename+'.png')
all_dates.to_csv(csv_filename+'.csv') | 0.206494 | 0.302803 |
from collections import OrderedDict
from sqlalchemy import create_engine
from sqlalchemy import ForeignKey, ForeignKeyConstraint, Column, Integer, String, DateTime
from sqlalchemy.types import Boolean
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import config
__config = config.Config()
engine = create_engine(__config.get_value("DatabaseConnection", "url", ""))
Session = sessionmaker(bind=engine)
Base = declarative_base()
class ActiveAlarm(Base):
__tablename__ = "active_alarm"
alarm_id = Column(Integer, primary_key=True)
alarm_source = Column(String, ForeignKey("source.source_name"), primary_key=True)
alarm_uei = Column(String)
alarm_timestamp = Column(DateTime)
alarm_severity = Column(String)
alarm_node_label = Column(String)
alarm_node_interface = Column(String)
alarm_node_service = Column(String)
alarm_logmsg = Column(String)
alarm_description = Column(String)
alarm_operinstruct = Column(String)
parameters = relationship("ActiveAlarmParm", cascade="all, delete-orphan")
forwarding_entries = relationship("ForwardedAlarm", cascade="all, delete-orphan")
source = relationship("Source")
def __str__(self):
output = "ID: " + str(self.alarm_id)
output += " UEI: " + self.alarm_uei
output += " Time: " + str(self.alarm_timestamp)
output += " Logmessage: " + self.alarm_logmsg
return output
class ActiveAlarmParm(Base):
__tablename__ = "active_alarm_parm"
alarm_id = Column(Integer, primary_key=True)
alarm_source = Column(String, primary_key=True)
parm_name = Column(String, primary_key=True)
parm_value = Column(String)
__table_args__ = (ForeignKeyConstraint(["alarm_id", "alarm_source"], ["active_alarm.alarm_id", "active_alarm.alarm_source"]), {})
alarm = relationship("ActiveAlarm")
class ForwardingRule(Base):
__tablename__ = "forwarding_rule"
rule_id = Column(Integer, primary_key=True)
rule_match = Column(String)
rule_delay = Column(Integer)
rule_maxforwardings = Column(Integer)
rule_target = Column(String, ForeignKey("target.target_name"))
forwarding_entries = relationship("ForwardedAlarm", cascade="all, delete-orphan")
target = relationship("Target")
def dict_repr(self):
data = OrderedDict([
("rule_id", self.rule_id),
("rule_match", self.rule_match),
("rule_delay", self.rule_delay),
("rule_maxforwardings", self.rule_maxforwardings),
("rule_target", self.rule_target)
])
return data
class ForwardedAlarm(Base):
__tablename__ = "forwarded_alarm"
alarm_id = Column(Integer, primary_key=True)
alarm_source = Column(String, primary_key=True)
rule_id = Column(Integer, ForeignKey("forwarding_rule.rule_id"), primary_key=True)
forwarded = Column(String)
forwarder_reference = Column(String)
__table_args__ = (ForeignKeyConstraint(["alarm_id", "alarm_source"], ["active_alarm.alarm_id", "active_alarm.alarm_source"]), {})
alarm = relationship("ActiveAlarm")
rule = relationship("ForwardingRule")
class Target(Base):
__tablename__ = "target"
target_name = Column(String, primary_key=True)
target_class = Column(String)
target_parms = relationship("TargetParameter", cascade="all, delete-orphan")
forwarding_rules = relationship("ForwardingRule", cascade="all, delete-orphan")
def dict_repr(self):
data = OrderedDict([
("target_name", self.target_name),
("target_class", self.target_class),
("target_parms", {parm.parameter_name: parm.parameter_value for parm in self.target_parms})
])
return data
class TargetParameter(Base):
__tablename__ = "target_parm"
target_name = Column(String, ForeignKey("target.target_name"), primary_key=True)
parameter_name = Column(String, primary_key=True)
parameter_value = Column(String)
target = relationship("Target")
class Source(Base):
__tablename__ = "source"
source_name = Column(String, primary_key=True)
source_url = Column(String)
source_user = Column(String)
source_password = Column(String)
source_filter = Column(String)
source_status = Column(Integer)
alarm_entries = relationship("ActiveAlarm", cascade="all, delete-orphan")
source_status_unknown = 0
source_status_up = 1
source_status_down = 2
def dict_repr(self):
data = OrderedDict([
("source_name", self.source_name),
("source_url", self.source_url),
("source_user", self.source_user),
("source_filter", self.source_filter),
("source_status", self.source_status),
])
return data
class LocalUser(Base):
__tablename__ = "user"
user_name = Column(String, primary_key=True)
password_hash = Column(String)
def dict_repr(self):
data = OrderedDict([
("user_name", self.user_name)
])
return data | model.py | from collections import OrderedDict
from sqlalchemy import create_engine
from sqlalchemy import ForeignKey, ForeignKeyConstraint, Column, Integer, String, DateTime
from sqlalchemy.types import Boolean
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import config
__config = config.Config()
engine = create_engine(__config.get_value("DatabaseConnection", "url", ""))
Session = sessionmaker(bind=engine)
Base = declarative_base()
class ActiveAlarm(Base):
__tablename__ = "active_alarm"
alarm_id = Column(Integer, primary_key=True)
alarm_source = Column(String, ForeignKey("source.source_name"), primary_key=True)
alarm_uei = Column(String)
alarm_timestamp = Column(DateTime)
alarm_severity = Column(String)
alarm_node_label = Column(String)
alarm_node_interface = Column(String)
alarm_node_service = Column(String)
alarm_logmsg = Column(String)
alarm_description = Column(String)
alarm_operinstruct = Column(String)
parameters = relationship("ActiveAlarmParm", cascade="all, delete-orphan")
forwarding_entries = relationship("ForwardedAlarm", cascade="all, delete-orphan")
source = relationship("Source")
def __str__(self):
output = "ID: " + str(self.alarm_id)
output += " UEI: " + self.alarm_uei
output += " Time: " + str(self.alarm_timestamp)
output += " Logmessage: " + self.alarm_logmsg
return output
class ActiveAlarmParm(Base):
__tablename__ = "active_alarm_parm"
alarm_id = Column(Integer, primary_key=True)
alarm_source = Column(String, primary_key=True)
parm_name = Column(String, primary_key=True)
parm_value = Column(String)
__table_args__ = (ForeignKeyConstraint(["alarm_id", "alarm_source"], ["active_alarm.alarm_id", "active_alarm.alarm_source"]), {})
alarm = relationship("ActiveAlarm")
class ForwardingRule(Base):
__tablename__ = "forwarding_rule"
rule_id = Column(Integer, primary_key=True)
rule_match = Column(String)
rule_delay = Column(Integer)
rule_maxforwardings = Column(Integer)
rule_target = Column(String, ForeignKey("target.target_name"))
forwarding_entries = relationship("ForwardedAlarm", cascade="all, delete-orphan")
target = relationship("Target")
def dict_repr(self):
data = OrderedDict([
("rule_id", self.rule_id),
("rule_match", self.rule_match),
("rule_delay", self.rule_delay),
("rule_maxforwardings", self.rule_maxforwardings),
("rule_target", self.rule_target)
])
return data
class ForwardedAlarm(Base):
__tablename__ = "forwarded_alarm"
alarm_id = Column(Integer, primary_key=True)
alarm_source = Column(String, primary_key=True)
rule_id = Column(Integer, ForeignKey("forwarding_rule.rule_id"), primary_key=True)
forwarded = Column(String)
forwarder_reference = Column(String)
__table_args__ = (ForeignKeyConstraint(["alarm_id", "alarm_source"], ["active_alarm.alarm_id", "active_alarm.alarm_source"]), {})
alarm = relationship("ActiveAlarm")
rule = relationship("ForwardingRule")
class Target(Base):
__tablename__ = "target"
target_name = Column(String, primary_key=True)
target_class = Column(String)
target_parms = relationship("TargetParameter", cascade="all, delete-orphan")
forwarding_rules = relationship("ForwardingRule", cascade="all, delete-orphan")
def dict_repr(self):
data = OrderedDict([
("target_name", self.target_name),
("target_class", self.target_class),
("target_parms", {parm.parameter_name: parm.parameter_value for parm in self.target_parms})
])
return data
class TargetParameter(Base):
__tablename__ = "target_parm"
target_name = Column(String, ForeignKey("target.target_name"), primary_key=True)
parameter_name = Column(String, primary_key=True)
parameter_value = Column(String)
target = relationship("Target")
class Source(Base):
__tablename__ = "source"
source_name = Column(String, primary_key=True)
source_url = Column(String)
source_user = Column(String)
source_password = Column(String)
source_filter = Column(String)
source_status = Column(Integer)
alarm_entries = relationship("ActiveAlarm", cascade="all, delete-orphan")
source_status_unknown = 0
source_status_up = 1
source_status_down = 2
def dict_repr(self):
data = OrderedDict([
("source_name", self.source_name),
("source_url", self.source_url),
("source_user", self.source_user),
("source_filter", self.source_filter),
("source_status", self.source_status),
])
return data
class LocalUser(Base):
__tablename__ = "user"
user_name = Column(String, primary_key=True)
password_hash = Column(String)
def dict_repr(self):
data = OrderedDict([
("user_name", self.user_name)
])
return data | 0.759404 | 0.136551 |
import pytest
from tests.functional.services.api.conftest import USER_API_CONFS
from tests.functional.services.api.images import wait_for_image_to_analyze, get_image_id, \
get_image_digest
from tests.functional.utils.http_utils import http_post, RequestFailedError, http_del
from tests.functional.conftest import get_logger
_logger = get_logger(__name__)
@pytest.fixture(scope="class", params=[USER_API_CONFS[0]])
def create_and_teardown_archive_rule(request):
"""
In order to interact with the archives API, a rule must be added first,
which depends on there being an image added as well:
1. Add node:latest image (this isn't currently depended upon in other tests)
2. Add Archive Rule
Note: This appears to only work for the root user ATM, so don't run w/ ft_user
"""
_logger.info("Adding alpine:edge Image for analysis")
add_image_resp = http_post(['images'], {'tag': 'alpine:edge'}, config=request.param)
if add_image_resp.code != 200:
raise RequestFailedError(add_image_resp.url, add_image_resp.code, add_image_resp.body)
wait_for_image_to_analyze(get_image_id(add_image_resp), request.param)
archive_rule_json = {
"analysis_age_days": 0,
"created_at": "2020-08-25T17:15:16.865Z",
"last_updated": "2020-08-25T17:15:16.865Z",
"selector": {
"registry": "docker.io",
"repository": "alpine",
"tag": "edge"
},
"system_global": True,
"tag_versions_newer": 0,
"transition": "archive"
}
_logger.info('Adding Archive Rule')
archive_rule_resp = http_post(['archives', 'rules'], archive_rule_json, config=request.param)
if archive_rule_resp.code != 200:
raise RequestFailedError(archive_rule_resp.url, archive_rule_resp.code, archive_rule_resp.body)
archive_resp = http_post(['archives', 'images'], [get_image_digest(add_image_resp)], config=request.param)
if archive_resp.code != 200:
raise RequestFailedError(archive_resp.url, archive_resp.code, archive_resp.body)
def teardown():
_logger.info('Removing alpine:edge image from anchore')
remove_image_resp = http_del(['images', 'by_id', get_image_id(add_image_resp)], query={'force': True})
if remove_image_resp.code != 200:
raise RequestFailedError(remove_image_resp.url, remove_image_resp.code, remove_image_resp.body)
_logger.info('Removing Archive Rule: rule_id={}'.format(archive_rule_resp.body['rule_id']))
remove_rule_resp = http_del(['archives', 'rules', archive_rule_resp.body['rule_id']])
if remove_rule_resp.code != 200:
raise RequestFailedError(remove_rule_resp.url, remove_rule_resp.code, remove_rule_resp.body)
delete_archive_image_resp = http_del(['archives', 'images', get_image_digest(add_image_resp)],
config=request.param)
if delete_archive_image_resp.code != 200:
raise RequestFailedError(delete_archive_image_resp.url,
delete_archive_image_resp.code,
delete_archive_image_resp.body)
request.addfinalizer(teardown)
return add_image_resp, archive_rule_resp, archive_resp, request.param | tests/functional/services/api/archives/conftest.py | import pytest
from tests.functional.services.api.conftest import USER_API_CONFS
from tests.functional.services.api.images import wait_for_image_to_analyze, get_image_id, \
get_image_digest
from tests.functional.utils.http_utils import http_post, RequestFailedError, http_del
from tests.functional.conftest import get_logger
_logger = get_logger(__name__)
@pytest.fixture(scope="class", params=[USER_API_CONFS[0]])
def create_and_teardown_archive_rule(request):
"""
In order to interact with the archives API, a rule must be added first,
which depends on there being an image added as well:
1. Add node:latest image (this isn't currently depended upon in other tests)
2. Add Archive Rule
Note: This appears to only work for the root user ATM, so don't run w/ ft_user
"""
_logger.info("Adding alpine:edge Image for analysis")
add_image_resp = http_post(['images'], {'tag': 'alpine:edge'}, config=request.param)
if add_image_resp.code != 200:
raise RequestFailedError(add_image_resp.url, add_image_resp.code, add_image_resp.body)
wait_for_image_to_analyze(get_image_id(add_image_resp), request.param)
archive_rule_json = {
"analysis_age_days": 0,
"created_at": "2020-08-25T17:15:16.865Z",
"last_updated": "2020-08-25T17:15:16.865Z",
"selector": {
"registry": "docker.io",
"repository": "alpine",
"tag": "edge"
},
"system_global": True,
"tag_versions_newer": 0,
"transition": "archive"
}
_logger.info('Adding Archive Rule')
archive_rule_resp = http_post(['archives', 'rules'], archive_rule_json, config=request.param)
if archive_rule_resp.code != 200:
raise RequestFailedError(archive_rule_resp.url, archive_rule_resp.code, archive_rule_resp.body)
archive_resp = http_post(['archives', 'images'], [get_image_digest(add_image_resp)], config=request.param)
if archive_resp.code != 200:
raise RequestFailedError(archive_resp.url, archive_resp.code, archive_resp.body)
def teardown():
_logger.info('Removing alpine:edge image from anchore')
remove_image_resp = http_del(['images', 'by_id', get_image_id(add_image_resp)], query={'force': True})
if remove_image_resp.code != 200:
raise RequestFailedError(remove_image_resp.url, remove_image_resp.code, remove_image_resp.body)
_logger.info('Removing Archive Rule: rule_id={}'.format(archive_rule_resp.body['rule_id']))
remove_rule_resp = http_del(['archives', 'rules', archive_rule_resp.body['rule_id']])
if remove_rule_resp.code != 200:
raise RequestFailedError(remove_rule_resp.url, remove_rule_resp.code, remove_rule_resp.body)
delete_archive_image_resp = http_del(['archives', 'images', get_image_digest(add_image_resp)],
config=request.param)
if delete_archive_image_resp.code != 200:
raise RequestFailedError(delete_archive_image_resp.url,
delete_archive_image_resp.code,
delete_archive_image_resp.body)
request.addfinalizer(teardown)
return add_image_resp, archive_rule_resp, archive_resp, request.param | 0.413004 | 0.234516 |
import datetime
import math
import argparse
MONDAY, TUESDAY, WEDNESDAY = 0, 1, 2
def _vernal_equinox(y):
"""整数で年を与えると、その年の春分の日が3月の何日であるかを返す
"""
if y <= 1947:
d = 0
elif y <= 1979:
d = math.floor(20.8357 + 0.242194 * (y - 1980) - math.floor((y - 1980) / 4))
elif y <= 2099:
d = math.floor(20.8431 + 0.242194 * (y - 1980) - math.floor((y - 1980) / 4))
elif y <= 2150:
d = math.floor(21.8510 + 0.242194 * (y - 1980) - math.floor((y - 1980) / 4))
else:
d = 0
return d
def _autumn_equinox(y):
"""整数で年を与えると、その年の秋分の日が9月の何日であるかを返す
"""
if y <= 1947:
d = 0
elif y <= 1979:
d = math.floor(23.2588 + 0.242194 * (y - 1980) - math.floor((y - 1980) / 4))
elif y <= 2099:
d = math.floor(23.2488 + 0.242194 * (y - 1980) - math.floor((y - 1980) / 4))
elif y <= 2150:
d = math.floor(24.2488 + 0.242194 * (y - 1980) - math.floor((y - 1980) / 4))
else:
d = 0
return d
def holiday_name(year=None, month=None, day=None, date=None):
if date is None:
date = datetime.date(year, month, day)
if date < datetime.date(1948, 7, 20):
return None
funcs = ['_january', '_february', '_march', '_april', '_may', '_june',
'_july', '_august', '_september', '_october', '_november', '_december']
name = globals()[funcs[date.month - 1]](date)
# 振替休日
if not name and date.weekday() == MONDAY:
prev = date + datetime.timedelta(days=-1)
if holiday_name(prev.year, prev.month, prev.day):
name = '振替休日'
return name
def _january(date):
name = None
if date.day == 1:
name = '元日'
if date.year >= 2000:
if int((date.day - 1) / 7) == 1 and date.weekday() == MONDAY:
name = '成人の日'
else:
if date.day == 15:
name = '成人の日'
return name
def _february(date):
name = None
if date.day == 11 and date.year >= 1967:
name = '建国記念の日'
if (date.year, date.month, date.day) == (1989, 2, 24):
name = '昭和天皇の大喪の礼'
return name
def _march(date):
name = None
if date.day == _vernal_equinox(date.year):
name = '春分の日'
return name
def _april(date):
name = None
if date.day == 29:
if date.year >= 2007:
name = '昭和の日'
elif date.year >= 1989:
name = 'みどりの日'
else:
name = '天皇誕生日'
if (date.year, date.month, date.day) == (1959, 4, 10):
name = '皇太子明仁親王の結婚の儀'
return name
def _may(date):
name = None
if date.day == 3:
name = '憲法記念日'
if date.day == 4:
if date.year >= 2007:
name = 'みどりの日'
elif date.year >= 1986 and date.weekday() != MONDAY:
name = '国民の休日'
if date.day == 5:
name = 'こどもの日'
if date.day == 6:
if date.year >= 2007 and date.weekday() in (TUESDAY, WEDNESDAY):
name = '振替休日'
return name
def _june(date):
name = None
if (date.year, date.month, date.day) == (1993, 6, 9):
name = '皇太子徳仁親王の結婚の儀'
return name
def _july(date):
name = None
if date.year >= 2003:
if int((date.day - 1) / 7) == 2 and date.weekday() == MONDAY:
name = '海の日'
if date.year >= 1996 and date.day == 20:
name = '海の日'
return name
def _august(date):
name = None
return name
def _september(date):
name = None
autumn_equinox = _autumn_equinox(date.year)
if date.day == autumn_equinox:
name = '秋分の日'
if date.year >= 2003:
if int((date.day - 1) / 7) == 2 and date.weekday() == MONDAY:
name = '敬老の日'
elif date.weekday() == TUESDAY and date.day == autumn_equinox - 1:
name = '国民の休日'
if date.year >= 1966 and date.day == 15:
name = '敬老の日'
return name
def _october(date):
name = None
if date.year >= 2000:
if int((date.day - 1) / 7) == 1 and date.weekday() == MONDAY:
name = '体育の日'
if date.year >= 1966 and date.day == 10:
name = '体育の日'
return name
def _november(date):
name = None
if date.day == 3:
name = '文化の日'
if date.day == 23:
name = '勤労感謝の日'
if (date.year, date.month, date.day) == (1990, 11, 12):
name = '即位礼正殿の儀'
return name
def _december(date):
name = None
if date.day == 23 and date.year >= 1989:
name = '天皇誕生日'
return name
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('year', type=int)
parser.add_argument('month', type=int)
parser.add_argument('day', type=int)
args = parser.parse_args()
print(holiday_name(args.year, args.month, args.day))
"""
//_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
//_/ CopyRight(C) K.Tsunoda(AddinBox) 2001 All Rights Reserved.
//_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
""" | stock/src/lib/jholiday.py | import datetime
import math
import argparse
MONDAY, TUESDAY, WEDNESDAY = 0, 1, 2
def _vernal_equinox(y):
"""整数で年を与えると、その年の春分の日が3月の何日であるかを返す
"""
if y <= 1947:
d = 0
elif y <= 1979:
d = math.floor(20.8357 + 0.242194 * (y - 1980) - math.floor((y - 1980) / 4))
elif y <= 2099:
d = math.floor(20.8431 + 0.242194 * (y - 1980) - math.floor((y - 1980) / 4))
elif y <= 2150:
d = math.floor(21.8510 + 0.242194 * (y - 1980) - math.floor((y - 1980) / 4))
else:
d = 0
return d
def _autumn_equinox(y):
"""整数で年を与えると、その年の秋分の日が9月の何日であるかを返す
"""
if y <= 1947:
d = 0
elif y <= 1979:
d = math.floor(23.2588 + 0.242194 * (y - 1980) - math.floor((y - 1980) / 4))
elif y <= 2099:
d = math.floor(23.2488 + 0.242194 * (y - 1980) - math.floor((y - 1980) / 4))
elif y <= 2150:
d = math.floor(24.2488 + 0.242194 * (y - 1980) - math.floor((y - 1980) / 4))
else:
d = 0
return d
def holiday_name(year=None, month=None, day=None, date=None):
if date is None:
date = datetime.date(year, month, day)
if date < datetime.date(1948, 7, 20):
return None
funcs = ['_january', '_february', '_march', '_april', '_may', '_june',
'_july', '_august', '_september', '_october', '_november', '_december']
name = globals()[funcs[date.month - 1]](date)
# 振替休日
if not name and date.weekday() == MONDAY:
prev = date + datetime.timedelta(days=-1)
if holiday_name(prev.year, prev.month, prev.day):
name = '振替休日'
return name
def _january(date):
name = None
if date.day == 1:
name = '元日'
if date.year >= 2000:
if int((date.day - 1) / 7) == 1 and date.weekday() == MONDAY:
name = '成人の日'
else:
if date.day == 15:
name = '成人の日'
return name
def _february(date):
name = None
if date.day == 11 and date.year >= 1967:
name = '建国記念の日'
if (date.year, date.month, date.day) == (1989, 2, 24):
name = '昭和天皇の大喪の礼'
return name
def _march(date):
name = None
if date.day == _vernal_equinox(date.year):
name = '春分の日'
return name
def _april(date):
name = None
if date.day == 29:
if date.year >= 2007:
name = '昭和の日'
elif date.year >= 1989:
name = 'みどりの日'
else:
name = '天皇誕生日'
if (date.year, date.month, date.day) == (1959, 4, 10):
name = '皇太子明仁親王の結婚の儀'
return name
def _may(date):
name = None
if date.day == 3:
name = '憲法記念日'
if date.day == 4:
if date.year >= 2007:
name = 'みどりの日'
elif date.year >= 1986 and date.weekday() != MONDAY:
name = '国民の休日'
if date.day == 5:
name = 'こどもの日'
if date.day == 6:
if date.year >= 2007 and date.weekday() in (TUESDAY, WEDNESDAY):
name = '振替休日'
return name
def _june(date):
name = None
if (date.year, date.month, date.day) == (1993, 6, 9):
name = '皇太子徳仁親王の結婚の儀'
return name
def _july(date):
name = None
if date.year >= 2003:
if int((date.day - 1) / 7) == 2 and date.weekday() == MONDAY:
name = '海の日'
if date.year >= 1996 and date.day == 20:
name = '海の日'
return name
def _august(date):
name = None
return name
def _september(date):
name = None
autumn_equinox = _autumn_equinox(date.year)
if date.day == autumn_equinox:
name = '秋分の日'
if date.year >= 2003:
if int((date.day - 1) / 7) == 2 and date.weekday() == MONDAY:
name = '敬老の日'
elif date.weekday() == TUESDAY and date.day == autumn_equinox - 1:
name = '国民の休日'
if date.year >= 1966 and date.day == 15:
name = '敬老の日'
return name
def _october(date):
name = None
if date.year >= 2000:
if int((date.day - 1) / 7) == 1 and date.weekday() == MONDAY:
name = '体育の日'
if date.year >= 1966 and date.day == 10:
name = '体育の日'
return name
def _november(date):
name = None
if date.day == 3:
name = '文化の日'
if date.day == 23:
name = '勤労感謝の日'
if (date.year, date.month, date.day) == (1990, 11, 12):
name = '即位礼正殿の儀'
return name
def _december(date):
name = None
if date.day == 23 and date.year >= 1989:
name = '天皇誕生日'
return name
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('year', type=int)
parser.add_argument('month', type=int)
parser.add_argument('day', type=int)
args = parser.parse_args()
print(holiday_name(args.year, args.month, args.day))
"""
//_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
//_/ CopyRight(C) K.Tsunoda(AddinBox) 2001 All Rights Reserved.
//_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
""" | 0.24817 | 0.28439 |
r"""Binary merger rate module.
This module provides functions to calculate compact binary merger rates
for individual galaxies.
"""
from astropy import constants, units
__all__ = [
'b_band_merger_rate',
]
abadie_table_III = {
'NS-NS': {
'low': 0.6,
'realistic': 60,
'high': 600,
'max': 2000},
'NS-BH': {
'low': 0.03,
'realistic': 2,
'high': 60},
'BH-BH': {
'low': 0.006,
'realistic': 0.2,
'high': 20}
}
def b_band_merger_rate(luminosity,
population='NS-NS',
optimism='low'):
r"""Model of Abadie et al (2010), Table III
Compact binary merger rates as a linear function of a galaxies
B-band luminosity.
Parameters
----------
luminosity : (ngal,) array-like
The B-band luminosity of the galaxies to generate merger
rates for, in units of solar luminosity.
population : {'NS-NS', 'NS-BH', 'BH-BH'}
Compact binary population to get rate for.
'NS-NS' is neutron star - neutron star
'NS-BH' is neutron star - black hole
'BH-BH' is black hole - black hole
optimism : {'low', 'realistic', 'high'}
Optimism of predicted merger rates.
For 'NS-NS' there is an extra option 'max'.
Returns
-------
merger_rate : array_like
Merger rates for the galaxies in units of year^-1
Notes
-----
References
----------
.. Abadie et al. 2010, Classical and Quantum Gravity,
Volume 27, Issue 17, article id. 173001 (2010)
https://arxiv.org/abs/1003.2480
Examples
--------
>>> import numpy as np
>>> from skypy.gravitational_wave import b_band_merger_rate
Sample 100 luminosity values near absolute magnitude -20.5.
>>> luminosities = 10.**(-0.4*(-20.5 + np.random.randn(100)))
Generate merger rates for these luminosities.
>>> rates = b_band_merger_rate(luminosities,
... population='NS-NS',
... optimism='low')
"""
# Convert luminosity to units of L_10 defined in Abadie et. al. 2010
L_10 = luminosity * constants.L_sun.to_value('erg/s') / (1e10 * 2.16e33)
return abadie_table_III[population][optimism] * L_10 / units.year | skypy/gravitational_wave/merger_rate.py | r"""Binary merger rate module.
This module provides functions to calculate compact binary merger rates
for individual galaxies.
"""
from astropy import constants, units
__all__ = [
'b_band_merger_rate',
]
abadie_table_III = {
'NS-NS': {
'low': 0.6,
'realistic': 60,
'high': 600,
'max': 2000},
'NS-BH': {
'low': 0.03,
'realistic': 2,
'high': 60},
'BH-BH': {
'low': 0.006,
'realistic': 0.2,
'high': 20}
}
def b_band_merger_rate(luminosity,
population='NS-NS',
optimism='low'):
r"""Model of Abadie et al (2010), Table III
Compact binary merger rates as a linear function of a galaxies
B-band luminosity.
Parameters
----------
luminosity : (ngal,) array-like
The B-band luminosity of the galaxies to generate merger
rates for, in units of solar luminosity.
population : {'NS-NS', 'NS-BH', 'BH-BH'}
Compact binary population to get rate for.
'NS-NS' is neutron star - neutron star
'NS-BH' is neutron star - black hole
'BH-BH' is black hole - black hole
optimism : {'low', 'realistic', 'high'}
Optimism of predicted merger rates.
For 'NS-NS' there is an extra option 'max'.
Returns
-------
merger_rate : array_like
Merger rates for the galaxies in units of year^-1
Notes
-----
References
----------
.. Abadie et al. 2010, Classical and Quantum Gravity,
Volume 27, Issue 17, article id. 173001 (2010)
https://arxiv.org/abs/1003.2480
Examples
--------
>>> import numpy as np
>>> from skypy.gravitational_wave import b_band_merger_rate
Sample 100 luminosity values near absolute magnitude -20.5.
>>> luminosities = 10.**(-0.4*(-20.5 + np.random.randn(100)))
Generate merger rates for these luminosities.
>>> rates = b_band_merger_rate(luminosities,
... population='NS-NS',
... optimism='low')
"""
# Convert luminosity to units of L_10 defined in Abadie et. al. 2010
L_10 = luminosity * constants.L_sun.to_value('erg/s') / (1e10 * 2.16e33)
return abadie_table_III[population][optimism] * L_10 / units.year | 0.959116 | 0.710415 |
from telnetlib import Telnet
def yield_vision_telnet_connection(host: str, port: int, user: str, password: str) -> Telnet:
try:
with Telnet(host, port) as telnet:
telnet.read_until(b"login: ", 5)
telnet.write(user.encode("ascii") + b"\n")
telnet.read_until(b"Password: ", 5)
telnet.write(password.encode("ascii") + b"\n")
if b"Invalid domain/user/password" in telnet.read_until(b"UniData Release", 5):
raise PermissionError("Invalid Vision Credientials Used.")
telnet.write(b"\n")
telnet.write(b"\n")
telnet.write(b"\n")
telnet.write(b"\n")
return telnet
except (TimeoutError, ConnectionRefusedError) as err:
raise PermissionError("Invalid Vision Credientials Used. (IP/Port Mismatch or Whitelisting Error)") from err
class VisionConnection:
def __init__(self, ip: str, port: int, username: str, password: str) -> None:
connection = yield_vision_telnet_connection(ip, port, username, password)
connection = self.gather_menu_type(connection)
connection = self.vision_dump_to_ecl(connection)
self.connection: Telnet = connection
self.debug: bool = False
self.timeout: int = 3
return
def close(self):
"""Close the connection."""
connection = self.connection
connection = self.vision_dump_to_ecl(connection)
try:
connection.write(b"BYE\n\n")
connection.write(b"\n")
connection.read_until(b"cgfdg~fdgdf~gdfg~fdg", 1)
connection.close()
except ConnectionResetError as _err:
# print(_err)
connection.close()
self.connection = None
return
print("Vision Software disconnect Failed, attempting socket disconnect...")
if connection:
connection.close()
self.connection = None
def __enter__(self) -> None:
return self
def __exit__(self, type, value, traceback):
self.close()
def vision_dump_to_ecl(self, telnet: Telnet) -> Telnet:
while b"1 record listed" not in telnet.read_until(b"\n:", 0.1):
telnet.write("\x03Q\n\n\nABORT\n\n\nLIST RELEASE SAMPLE 1\n".encode("ascii"))
telnet.read_until(b"\n:", 0.1)
return telnet
def gather_menu_type(self, connection: Telnet) -> Telnet:
connection = self.vision_dump_to_ecl(connection)
connection.write(b"M\n")
if b"*** MAIN MENU ***" in connection.read_until(b"Enter"):
self.menu_type = "main"
return connection
self.menu_type = "scanner"
return connection
def wait_write(self, wait_until, write_this, wait_sec=None):
wait_sec = self.timeout if wait_sec is None else wait_sec
wait_until = wait_until.encode()
tn_input = (write_this + "\r\n").encode()
if self.debug:
print(self.connection.read_until(wait_until, wait_sec).decode("ascii", "ignore"))
else:
self.connection.read_until(wait_until, wait_sec)
self.connection.write(tn_input)
def return_wait_write(self, wait_until, write_this, wait_sec=None):
"""RETURNS LAST DATA ASKED FOR THEN:
write_this = "what you want to write now"
wait_until = "string your waiting for next" """
wait_sec = self.timeout * 10 if wait_sec is None else wait_sec
wait_until = wait_until.encode()
write_this = (write_this + "\r\n").encode()
results = self.connection.read_until(wait_until, wait_sec)
self.connection.write(write_this)
if not self.debug:
return results.decode("ascii", "ignore")
result = results.decode("ascii", "ignore")
print(result)
return result | VisionBackofficeTools/VisionConnection.py | from telnetlib import Telnet
def yield_vision_telnet_connection(host: str, port: int, user: str, password: str) -> Telnet:
try:
with Telnet(host, port) as telnet:
telnet.read_until(b"login: ", 5)
telnet.write(user.encode("ascii") + b"\n")
telnet.read_until(b"Password: ", 5)
telnet.write(password.encode("ascii") + b"\n")
if b"Invalid domain/user/password" in telnet.read_until(b"UniData Release", 5):
raise PermissionError("Invalid Vision Credientials Used.")
telnet.write(b"\n")
telnet.write(b"\n")
telnet.write(b"\n")
telnet.write(b"\n")
return telnet
except (TimeoutError, ConnectionRefusedError) as err:
raise PermissionError("Invalid Vision Credientials Used. (IP/Port Mismatch or Whitelisting Error)") from err
class VisionConnection:
def __init__(self, ip: str, port: int, username: str, password: str) -> None:
connection = yield_vision_telnet_connection(ip, port, username, password)
connection = self.gather_menu_type(connection)
connection = self.vision_dump_to_ecl(connection)
self.connection: Telnet = connection
self.debug: bool = False
self.timeout: int = 3
return
def close(self):
"""Close the connection."""
connection = self.connection
connection = self.vision_dump_to_ecl(connection)
try:
connection.write(b"BYE\n\n")
connection.write(b"\n")
connection.read_until(b"cgfdg~fdgdf~gdfg~fdg", 1)
connection.close()
except ConnectionResetError as _err:
# print(_err)
connection.close()
self.connection = None
return
print("Vision Software disconnect Failed, attempting socket disconnect...")
if connection:
connection.close()
self.connection = None
def __enter__(self) -> None:
return self
def __exit__(self, type, value, traceback):
self.close()
def vision_dump_to_ecl(self, telnet: Telnet) -> Telnet:
while b"1 record listed" not in telnet.read_until(b"\n:", 0.1):
telnet.write("\x03Q\n\n\nABORT\n\n\nLIST RELEASE SAMPLE 1\n".encode("ascii"))
telnet.read_until(b"\n:", 0.1)
return telnet
def gather_menu_type(self, connection: Telnet) -> Telnet:
connection = self.vision_dump_to_ecl(connection)
connection.write(b"M\n")
if b"*** MAIN MENU ***" in connection.read_until(b"Enter"):
self.menu_type = "main"
return connection
self.menu_type = "scanner"
return connection
def wait_write(self, wait_until, write_this, wait_sec=None):
wait_sec = self.timeout if wait_sec is None else wait_sec
wait_until = wait_until.encode()
tn_input = (write_this + "\r\n").encode()
if self.debug:
print(self.connection.read_until(wait_until, wait_sec).decode("ascii", "ignore"))
else:
self.connection.read_until(wait_until, wait_sec)
self.connection.write(tn_input)
def return_wait_write(self, wait_until, write_this, wait_sec=None):
"""RETURNS LAST DATA ASKED FOR THEN:
write_this = "what you want to write now"
wait_until = "string your waiting for next" """
wait_sec = self.timeout * 10 if wait_sec is None else wait_sec
wait_until = wait_until.encode()
write_this = (write_this + "\r\n").encode()
results = self.connection.read_until(wait_until, wait_sec)
self.connection.write(write_this)
if not self.debug:
return results.decode("ascii", "ignore")
result = results.decode("ascii", "ignore")
print(result)
return result | 0.375821 | 0.138549 |
import subprocess
import sys
INIT_RUNTIME_ENV_COMMAND = """\
# Set noninteractive to avoid irrelevant warning messages
export DEBIAN_FRONTEND=noninteractive
echo 'Step 1/{steps}: Install docker'
sudo -E apt-get update
sudo -E apt-get install -y apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo -E apt-key add -
sudo -E apt-key fingerprint 0EBFCD88
sudo -E add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo -E apt-get update
sudo -E apt-get install -y docker-ce docker-ce-cli containerd.io
echo 'Step 2/{steps}: Install python3 and related packages'
sudo -E apt update
sudo -E apt install -y python3-pip
pip3 install redis psutil flask gunicorn pyyaml requests deepdiff
echo 'Step 3/{steps}: Install nvidia driver'
sudo -E apt-get install linux-headers-$(uname -r)
distribution=$(. /etc/os-release;echo $ID$VERSION_ID | tr -d '.')
wget --quiet https://developer.download.nvidia.com/compute/cuda/repos/$distribution/x86_64/cuda-$distribution.pin
sudo -E mv cuda-$distribution.pin /etc/apt/preferences.d/cuda-repository-pin-600
sudo -E apt-key adv \
--fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/$distribution/x86_64/7fa2af80.pub
echo "deb http://developer.download.nvidia.com/compute/cuda/repos/$distribution/x86_64 /" \
| sudo -E tee /etc/apt/sources.list.d/cuda.list
sudo -E apt-get update
sudo -E apt-get -y install cuda-drivers
echo 'Step 4/{steps}: Install nvidia container toolkit'
distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \
&& curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo -E apt-key add - \
&& curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list \
| sudo -E tee /etc/apt/sources.list.d/nvidia-docker.list
sudo -E apt-get update
sudo -E apt-get install -y nvidia-docker2
sudo -E systemctl restart docker
echo 'Step 5/{steps}: Delete outdated files'
rm ~/init_build_node_image_vm.py
"""
if __name__ == "__main__":
# Parse and exec command
command = INIT_RUNTIME_ENV_COMMAND.format(steps=5)
process = subprocess.Popen(
command,
executable="/bin/bash",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
while True:
next_line = process.stdout.readline()
if next_line == "" and process.poll() is not None:
break
sys.stdout.write(next_line)
sys.stdout.flush()
stdout, stderr = process.communicate()
if stderr:
sys.stderr.write(stderr) | maro/cli/grass/lib/scripts/build_node_image_vm/init_build_node_image_vm.py | import subprocess
import sys
INIT_RUNTIME_ENV_COMMAND = """\
# Set noninteractive to avoid irrelevant warning messages
export DEBIAN_FRONTEND=noninteractive
echo 'Step 1/{steps}: Install docker'
sudo -E apt-get update
sudo -E apt-get install -y apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo -E apt-key add -
sudo -E apt-key fingerprint 0EBFCD88
sudo -E add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo -E apt-get update
sudo -E apt-get install -y docker-ce docker-ce-cli containerd.io
echo 'Step 2/{steps}: Install python3 and related packages'
sudo -E apt update
sudo -E apt install -y python3-pip
pip3 install redis psutil flask gunicorn pyyaml requests deepdiff
echo 'Step 3/{steps}: Install nvidia driver'
sudo -E apt-get install linux-headers-$(uname -r)
distribution=$(. /etc/os-release;echo $ID$VERSION_ID | tr -d '.')
wget --quiet https://developer.download.nvidia.com/compute/cuda/repos/$distribution/x86_64/cuda-$distribution.pin
sudo -E mv cuda-$distribution.pin /etc/apt/preferences.d/cuda-repository-pin-600
sudo -E apt-key adv \
--fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/$distribution/x86_64/7fa2af80.pub
echo "deb http://developer.download.nvidia.com/compute/cuda/repos/$distribution/x86_64 /" \
| sudo -E tee /etc/apt/sources.list.d/cuda.list
sudo -E apt-get update
sudo -E apt-get -y install cuda-drivers
echo 'Step 4/{steps}: Install nvidia container toolkit'
distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \
&& curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo -E apt-key add - \
&& curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list \
| sudo -E tee /etc/apt/sources.list.d/nvidia-docker.list
sudo -E apt-get update
sudo -E apt-get install -y nvidia-docker2
sudo -E systemctl restart docker
echo 'Step 5/{steps}: Delete outdated files'
rm ~/init_build_node_image_vm.py
"""
if __name__ == "__main__":
# Parse and exec command
command = INIT_RUNTIME_ENV_COMMAND.format(steps=5)
process = subprocess.Popen(
command,
executable="/bin/bash",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
while True:
next_line = process.stdout.readline()
if next_line == "" and process.poll() is not None:
break
sys.stdout.write(next_line)
sys.stdout.flush()
stdout, stderr = process.communicate()
if stderr:
sys.stderr.write(stderr) | 0.212069 | 0.038829 |
class MetaData:
"""
Data structure to hold all meta information.
A instance of this class is typically created by processing all
meta data relevant tags of all doc comments in the given node structure.
Hint: Must be a clean data class without links to other
systems for optiomal cachability using Pickle
"""
__slots__ = ["name", "requires", "optionals", "breaks", "assets"]
def __init__(self, tree):
self.name = None
self.requires = set()
self.optionals = set()
self.breaks = set()
self.assets = set()
self.__inspect(tree)
def __inspect(self, node):
""" The internal inspection routine """
# Parse comments
comments = getattr(node, "comments", None)
if comments:
for comment in comments:
commentTags = comment.getTags()
if commentTags:
if "name" in commentTags:
self.name = list(commentTags["name"])[0]
if "require" in commentTags:
self.requires.update(commentTags["require"])
if "load" in commentTags:
# load is a special combination shorthand for requires + breaks
# This means load it but don't require it being loaded first
self.requires.update(commentTags["load"])
self.breaks.update(commentTags["load"])
if "optional" in commentTags:
self.optionals.update(commentTags["optional"])
if "break" in commentTags:
self.breaks.update(commentTags["break"])
if "asset" in commentTags:
self.assets.update(commentTags["asset"])
# Process children
for child in node:
if child is not None:
self.__inspect(child) | jasy/js/MetaData.py |
class MetaData:
"""
Data structure to hold all meta information.
A instance of this class is typically created by processing all
meta data relevant tags of all doc comments in the given node structure.
Hint: Must be a clean data class without links to other
systems for optiomal cachability using Pickle
"""
__slots__ = ["name", "requires", "optionals", "breaks", "assets"]
def __init__(self, tree):
self.name = None
self.requires = set()
self.optionals = set()
self.breaks = set()
self.assets = set()
self.__inspect(tree)
def __inspect(self, node):
""" The internal inspection routine """
# Parse comments
comments = getattr(node, "comments", None)
if comments:
for comment in comments:
commentTags = comment.getTags()
if commentTags:
if "name" in commentTags:
self.name = list(commentTags["name"])[0]
if "require" in commentTags:
self.requires.update(commentTags["require"])
if "load" in commentTags:
# load is a special combination shorthand for requires + breaks
# This means load it but don't require it being loaded first
self.requires.update(commentTags["load"])
self.breaks.update(commentTags["load"])
if "optional" in commentTags:
self.optionals.update(commentTags["optional"])
if "break" in commentTags:
self.breaks.update(commentTags["break"])
if "asset" in commentTags:
self.assets.update(commentTags["asset"])
# Process children
for child in node:
if child is not None:
self.__inspect(child) | 0.650911 | 0.284756 |
import logging
from django.contrib import messages
from django.template import RequestContext
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _, ungettext
from mayan.apps.views.generics import (
MultipleObjectConfirmActionView, SingleObjectCreateView,
SingleObjectEditView, SingleObjectListView
)
from .icons import icon_message_list
from .links import link_message_create
from .models import Message
from .permissions import (
permission_message_create, permission_message_delete,
permission_message_edit, permission_message_view
)
logger = logging.getLogger(name=__name__)
class MessageCreateView(SingleObjectCreateView):
fields = ('label', 'message', 'enabled', 'start_datetime', 'end_datetime')
model = Message
view_permission = permission_message_create
def get_extra_context(self):
return {
'title': _('Create message'),
}
def get_instance_extra_data(self):
return {
'_event_actor': self.request.user
}
class MessageDeleteView(MultipleObjectConfirmActionView):
model = Message
object_permission = permission_message_delete
pk_url_kwarg = 'message_id'
post_action_redirect = reverse_lazy(viewname='motd:message_list')
success_message = _('Delete request performed on %(count)d message')
success_message_plural = _(
'Delete request performed on %(count)d messages'
)
def get_extra_context(self):
result = {
'delete_view': True,
'title': ungettext(
singular='Delete the selected message?',
plural='Delete the selected messages?',
number=self.object_list.count()
)
}
if self.object_list.count() == 1:
result.update(
{
'object': self.object_list.first(),
'title': _('Delete message: %s?') % self.object_list.first()
}
)
return result
def object_action(self, instance, form=None):
try:
instance.delete()
messages.success(
message=_(
'Message "%s" deleted successfully.'
) % instance, request=self.request
)
except Exception as exception:
messages.error(
message=_('Error deleting message "%(message)s": %(error)s') % {
'message': instance, 'error': exception
}, request=self.request
)
class MessageEditView(SingleObjectEditView):
fields = ('label', 'message', 'enabled', 'start_datetime', 'end_datetime')
model = Message
object_permission = permission_message_edit
pk_url_kwarg = 'message_id'
post_action_redirect = reverse_lazy(viewname='motd:message_list')
def get_extra_context(self):
return {
'object': self.object,
'title': _('Edit message: %s') % self.object,
}
def get_instance_extra_data(self):
return {
'_event_actor': self.request.user
}
class MessageListView(SingleObjectListView):
model = Message
object_permission = permission_message_view
def get_extra_context(self):
return {
'hide_link': True,
'hide_object': True,
'no_results_icon': icon_message_list,
'no_results_main_link': link_message_create.resolve(
context=RequestContext(request=self.request)
),
'no_results_text': _(
'Messages are displayed in the login view. You can use '
'messages to convery information about your organzation, '
'announcements or usage guidelines for your users.'
),
'no_results_title': _('No messages available'),
'title': _('Messages'),
} | mayan/apps/motd/views.py | import logging
from django.contrib import messages
from django.template import RequestContext
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _, ungettext
from mayan.apps.views.generics import (
MultipleObjectConfirmActionView, SingleObjectCreateView,
SingleObjectEditView, SingleObjectListView
)
from .icons import icon_message_list
from .links import link_message_create
from .models import Message
from .permissions import (
permission_message_create, permission_message_delete,
permission_message_edit, permission_message_view
)
logger = logging.getLogger(name=__name__)
class MessageCreateView(SingleObjectCreateView):
fields = ('label', 'message', 'enabled', 'start_datetime', 'end_datetime')
model = Message
view_permission = permission_message_create
def get_extra_context(self):
return {
'title': _('Create message'),
}
def get_instance_extra_data(self):
return {
'_event_actor': self.request.user
}
class MessageDeleteView(MultipleObjectConfirmActionView):
model = Message
object_permission = permission_message_delete
pk_url_kwarg = 'message_id'
post_action_redirect = reverse_lazy(viewname='motd:message_list')
success_message = _('Delete request performed on %(count)d message')
success_message_plural = _(
'Delete request performed on %(count)d messages'
)
def get_extra_context(self):
result = {
'delete_view': True,
'title': ungettext(
singular='Delete the selected message?',
plural='Delete the selected messages?',
number=self.object_list.count()
)
}
if self.object_list.count() == 1:
result.update(
{
'object': self.object_list.first(),
'title': _('Delete message: %s?') % self.object_list.first()
}
)
return result
def object_action(self, instance, form=None):
try:
instance.delete()
messages.success(
message=_(
'Message "%s" deleted successfully.'
) % instance, request=self.request
)
except Exception as exception:
messages.error(
message=_('Error deleting message "%(message)s": %(error)s') % {
'message': instance, 'error': exception
}, request=self.request
)
class MessageEditView(SingleObjectEditView):
fields = ('label', 'message', 'enabled', 'start_datetime', 'end_datetime')
model = Message
object_permission = permission_message_edit
pk_url_kwarg = 'message_id'
post_action_redirect = reverse_lazy(viewname='motd:message_list')
def get_extra_context(self):
return {
'object': self.object,
'title': _('Edit message: %s') % self.object,
}
def get_instance_extra_data(self):
return {
'_event_actor': self.request.user
}
class MessageListView(SingleObjectListView):
model = Message
object_permission = permission_message_view
def get_extra_context(self):
return {
'hide_link': True,
'hide_object': True,
'no_results_icon': icon_message_list,
'no_results_main_link': link_message_create.resolve(
context=RequestContext(request=self.request)
),
'no_results_text': _(
'Messages are displayed in the login view. You can use '
'messages to convery information about your organzation, '
'announcements or usage guidelines for your users.'
),
'no_results_title': _('No messages available'),
'title': _('Messages'),
} | 0.369315 | 0.06148 |
from torch.distributions import Categorical
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
'''
Chapter 2. REINFORCE
Most code here is copied from SLM-Lab first and then modified to show a plain torch implementation.
'''
gamma = 0.99
# Policy Pi
class Pi(nn.Module):
def __init__(self, in_dim, out_dim):
super(Pi, self).__init__()
layers = [
nn.Linear(in_dim, 64),
nn.ReLU(),
nn.Linear(64, out_dim),
]
self.model = nn.Sequential(*layers)
self.onpolicy_reset()
self.train()
def onpolicy_reset(self):
self.log_probs = []
self.rewards = []
def forward(self, x):
pdparam = self.model(x)
return pdparam
def act(self, state):
"""
- Action probability distribution = Policy(state) : NN(state) generates probabilities for all actions.
- They are actually just logits which are not normalized, unlike probabilities that sum up to 1.
- Categorical() will sample action based on these logits by using Softmax.
- Softmax - https://miro.medium.com/max/875/1*ReYpdIZ3ZSAPb2W8cJpkBg.jpeg
- Categorical() also provides log(action_probability) that we need for calculating loss.
"""
x = torch.from_numpy(state.astype(np.float32)) # to tensor
pdparam = self.forward(x) # forward pass
pd = Categorical(logits=pdparam) # probability distribution
action = pd.sample() # pi(a|s) in action via pd
log_prob = pd.log_prob(action) # log_prob prob of pi(a|s)
self.log_probs.append(log_prob)
return action.item()
def train(pi, optimizer):
# Inner gradient-ascent loop of REINFORCE algorithm
T = len(pi.rewards)
rets = np.empty(T, dtype=np.float32) # the returns
future_ret = 0.0
# Compute the discounted returns efficiently in a reversed order.
for t in reversed(range(T)):
future_ret = pi.rewards[t] + gamma * future_ret
rets[t] = future_ret
# Compute loss (which is really opposite of reward)
rets = torch.tensor(rets)
log_probs = torch.stack(pi.log_probs)
loss = - log_probs * rets # gradient term: Negative for maximizing reward
loss = torch.sum(loss)
# Backpropagation
optimizer.zero_grad()
loss.backward() # backpropagate, compute gradients that will be stored by the tensors (parameters)
optimizer.step() # gradient-ascent, update the weights
return loss
def main():
env = gym.make("CartPole-v0")
in_dim = env.observation_space.shape[0] # 4
out_dim = env.action_space.n # 2
pi = Pi(in_dim, out_dim)
optimizer = optim.Adam(pi.parameters(), lr=0.01)
for epi in range(300):
state = env.reset()
for t in range(200): # cartpole max timestep is 200
action = pi.act(state)
state, reward, done, _ = env.step(action)
pi.rewards.append(reward)
# env.render()
if done:
break
loss = train(pi, optimizer)
total_reward = sum(pi.rewards)
solved = total_reward > 195.0
pi.onpolicy_reset()
print(f"Episode {epi}, loss: {loss}, total_reward: {total_reward}, solved: {solved}")
if __name__ == '__main__':
main() | Code/REINFORCE.py | from torch.distributions import Categorical
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
'''
Chapter 2. REINFORCE
Most code here is copied from SLM-Lab first and then modified to show a plain torch implementation.
'''
gamma = 0.99
# Policy Pi
class Pi(nn.Module):
def __init__(self, in_dim, out_dim):
super(Pi, self).__init__()
layers = [
nn.Linear(in_dim, 64),
nn.ReLU(),
nn.Linear(64, out_dim),
]
self.model = nn.Sequential(*layers)
self.onpolicy_reset()
self.train()
def onpolicy_reset(self):
self.log_probs = []
self.rewards = []
def forward(self, x):
pdparam = self.model(x)
return pdparam
def act(self, state):
"""
- Action probability distribution = Policy(state) : NN(state) generates probabilities for all actions.
- They are actually just logits which are not normalized, unlike probabilities that sum up to 1.
- Categorical() will sample action based on these logits by using Softmax.
- Softmax - https://miro.medium.com/max/875/1*ReYpdIZ3ZSAPb2W8cJpkBg.jpeg
- Categorical() also provides log(action_probability) that we need for calculating loss.
"""
x = torch.from_numpy(state.astype(np.float32)) # to tensor
pdparam = self.forward(x) # forward pass
pd = Categorical(logits=pdparam) # probability distribution
action = pd.sample() # pi(a|s) in action via pd
log_prob = pd.log_prob(action) # log_prob prob of pi(a|s)
self.log_probs.append(log_prob)
return action.item()
def train(pi, optimizer):
# Inner gradient-ascent loop of REINFORCE algorithm
T = len(pi.rewards)
rets = np.empty(T, dtype=np.float32) # the returns
future_ret = 0.0
# Compute the discounted returns efficiently in a reversed order.
for t in reversed(range(T)):
future_ret = pi.rewards[t] + gamma * future_ret
rets[t] = future_ret
# Compute loss (which is really opposite of reward)
rets = torch.tensor(rets)
log_probs = torch.stack(pi.log_probs)
loss = - log_probs * rets # gradient term: Negative for maximizing reward
loss = torch.sum(loss)
# Backpropagation
optimizer.zero_grad()
loss.backward() # backpropagate, compute gradients that will be stored by the tensors (parameters)
optimizer.step() # gradient-ascent, update the weights
return loss
def main():
env = gym.make("CartPole-v0")
in_dim = env.observation_space.shape[0] # 4
out_dim = env.action_space.n # 2
pi = Pi(in_dim, out_dim)
optimizer = optim.Adam(pi.parameters(), lr=0.01)
for epi in range(300):
state = env.reset()
for t in range(200): # cartpole max timestep is 200
action = pi.act(state)
state, reward, done, _ = env.step(action)
pi.rewards.append(reward)
# env.render()
if done:
break
loss = train(pi, optimizer)
total_reward = sum(pi.rewards)
solved = total_reward > 195.0
pi.onpolicy_reset()
print(f"Episode {epi}, loss: {loss}, total_reward: {total_reward}, solved: {solved}")
if __name__ == '__main__':
main() | 0.933756 | 0.502014 |
import datetime
import aiohttp
import pytest
from aio_geojson_client.consts import UPDATE_OK
from aio_geojson_flightairmap.feed import FlightAirMapFeed
from tests.utils import load_fixture
@pytest.mark.asyncio
async def test_update_ok(aresponses, event_loop):
"""Test updating feed is ok."""
home_coordinates = (-31.0, 151.0)
aresponses.add(
'192.168.3.11',
'/FlightAirMap/live/geojson',
'get',
aresponses.Response(text=load_fixture('flights-1.json'),
status=200),
match_querystring=True,
)
async with aiohttp.ClientSession(loop=event_loop) as websession:
feed = FlightAirMapFeed(websession, "http://192.168.3.11/FlightAirMap/live/geojson", home_coordinates,20000)
assert repr(feed) == "<FlightAirMapFeed" \
"(home=(-31.0, 151.0), url=http://" \
"192.168.3.11" \
"/FlightAirMap/live/geojson, " \
"radius=20000)>"
status, entries = await feed.update()
assert status == UPDATE_OK
assert entries is not None
assert len(entries) == 4
@pytest.mark.asyncio
async def test_empty_feed(aresponses, event_loop):
"""Test updating feed is ok when feed does not contain any entries."""
home_coordinates = (-41.2, 174.7)
aresponses.add(
'192.168.0.200',
'/FlightAirMap/live/geojson',
'get',
aresponses.Response(text=load_fixture('flights-2.json'),
status=200),
match_querystring=True,
)
async with aiohttp.ClientSession(loop=event_loop) as websession:
feed = FlightAirMapFeed(websession, url="http://192.168.0.200/FlightAirMap/live/geojson", home_coordinates=home_coordinates)
assert repr(feed) == "<FlightAirMapFeed(" \
"home=(-41.2, 174.7), " \
"url=http://192.168.0.200" \
"/FlightAirMap/live/geojson, " \
"radius=None)>"
status, entries = await feed.update()
assert status == UPDATE_OK
assert entries is not None
assert len(entries) == 0
assert feed.last_timestamp is None | tests/test_feed.py | import datetime
import aiohttp
import pytest
from aio_geojson_client.consts import UPDATE_OK
from aio_geojson_flightairmap.feed import FlightAirMapFeed
from tests.utils import load_fixture
@pytest.mark.asyncio
async def test_update_ok(aresponses, event_loop):
"""Test updating feed is ok."""
home_coordinates = (-31.0, 151.0)
aresponses.add(
'192.168.3.11',
'/FlightAirMap/live/geojson',
'get',
aresponses.Response(text=load_fixture('flights-1.json'),
status=200),
match_querystring=True,
)
async with aiohttp.ClientSession(loop=event_loop) as websession:
feed = FlightAirMapFeed(websession, "http://192.168.3.11/FlightAirMap/live/geojson", home_coordinates,20000)
assert repr(feed) == "<FlightAirMapFeed" \
"(home=(-31.0, 151.0), url=http://" \
"192.168.3.11" \
"/FlightAirMap/live/geojson, " \
"radius=20000)>"
status, entries = await feed.update()
assert status == UPDATE_OK
assert entries is not None
assert len(entries) == 4
@pytest.mark.asyncio
async def test_empty_feed(aresponses, event_loop):
"""Test updating feed is ok when feed does not contain any entries."""
home_coordinates = (-41.2, 174.7)
aresponses.add(
'192.168.0.200',
'/FlightAirMap/live/geojson',
'get',
aresponses.Response(text=load_fixture('flights-2.json'),
status=200),
match_querystring=True,
)
async with aiohttp.ClientSession(loop=event_loop) as websession:
feed = FlightAirMapFeed(websession, url="http://192.168.0.200/FlightAirMap/live/geojson", home_coordinates=home_coordinates)
assert repr(feed) == "<FlightAirMapFeed(" \
"home=(-41.2, 174.7), " \
"url=http://192.168.0.200" \
"/FlightAirMap/live/geojson, " \
"radius=None)>"
status, entries = await feed.update()
assert status == UPDATE_OK
assert entries is not None
assert len(entries) == 0
assert feed.last_timestamp is None | 0.59843 | 0.404566 |
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from pymongo import MongoClient
from sklearn import metrics
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import train_test_split
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import GaussianNB
# main features table of all features
from main_features import text_input, freq_input, numerical_input, class_label
# connect to db
client = MongoClient()
db = client['thesis']
fvs = db['features2']
initial_sets = []
labels = []
for item in fvs.find({'empty': 0}):
item_add = {}
for feature in text_input + freq_input:
item_add[feature] = item[feature]
for feature in numerical_input:
item_add[feature] = np.array([item[feature]])
initial_sets.append(item_add)
labels.append(item[class_label])
# custom sklearn selector class
class ItemSelector(BaseEstimator, TransformerMixin):
def __init__(self, key):
self.key = key
def fit(self):
return self
def transform(self, data_dict):
return data_dict[self.key]
# train test split is 0.66 / 0.33
X_train, X_test, y_train, y_test = train_test_split(initial_sets, labels, test_size=0.33, random_state=42)
text_union = []
freq_union = []
numerical_union = []
for feature in text_input:
text_union.append(
(feature,
Pipeline(
[
('selector', ItemSelector(key=feature)),
('tfidf', TfidfVectorizer(sublinear_tf=True, min_df=7, max_df=.2, ngram_range=(1, 3))),
]
)
)
)
for feature in freq_input:
freq_union.append(
(feature,
Pipeline(
[
('selector', ItemSelector(key=feature)),
('vect', DictVectorizer(sparse=True)),
]
)
)
)
for feature in numerical_input:
numerical_union.append(
(feature,
Pipeline(
[
('selector', ItemSelector(key=feature))
]
)
)
)
unionized = text_union + freq_union + numerical_union
union = FeatureUnion(transformer_list=unionized)
X_train_flip = {}
X_test_flip = {}
for feature in text_input+freq_input:
X_train_flip[feature] = [item[feature] for item in X_train]
X_test_flip[feature] = [item[feature] for item in X_test]
for feature in numerical_input:
X_train_flip[feature] = np.array([item[feature] for item in X_train])
X_test_flip[feature] = np.array([item[feature] for item in X_test])
target_names = [
'company',
'error',
'for sale',
'holding page',
'non-commercial',
'password protected',
'pay-per-click',
'personal-family-blog',
'porn',
'portal/media',
'web-shop'
]
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf[1])
t0 = time()
pipeline = Pipeline([
('union', union),
# ("MinMaxScaler", MaxAbsScaler()),
# ("StandardScaler", StandardScaler(with_mean=False)),
# ("normalise", Normalizer()),
clf
])
pipeline.fit(X_train_flip, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = pipeline.predict(X_test_flip)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=target_names))
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(pipeline).split('(')[0]
return clf_descr, score, train_time, test_time
extra_tree = ('Extratree', ExtraTreesClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0))
kNN = ("kNN", KNeighborsClassifier(n_neighbors=10))
random_forest = ("Random forest", RandomForestClassifier(n_estimators=100))
svc_l1 = ("Linear SCV L1", LinearSVC(loss='l2', penalty='l1', dual=False, tol=1e-3))
scv_l2 = ("Linear SCV L2", LinearSVC(loss='l2', penalty='l2', dual=False, tol=1e-3))
feature_reduction_svc_l1 = ("feature_reduction_svc_l1", Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
]))
featur_reduction_svc_l2 = ("featur_reduction_svc_l2", Pipeline([
('feature_selection', LinearSVC(penalty="l2", dual=False, tol=1e-3)),
('classification', LinearSVC())
]))
soft_voter = ("soft_voter",
VotingClassifier(estimators=[extra_tree, kNN, random_forest], voting='soft', weights=[2, 1, 2]))
hard_voter = ("hard_voter",
VotingClassifier(estimators=[extra_tree, kNN, random_forest, svc_l1, scv_l2], voting='hard'))
results = []
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
results.append(benchmark(random_forest))
gauss = ("Guass", GaussianNB(priors=None))
multi = ("multi", MultinomialNB(alpha=1.0, fit_prior=True, class_prior=None))
results.append(benchmark(gauss))
kNN = ("kNN", KNeighborsClassifier(n_neighbors=10, metric=cosine_similarity))
results.append(benchmark(kNN))
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='navy')
plt.barh(indices + .3, training_time, .2, label="training time",
color='c')
plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show() | classification/initial_baseline_sklearn_classification.py | from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from pymongo import MongoClient
from sklearn import metrics
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import train_test_split
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import GaussianNB
# main features table of all features
from main_features import text_input, freq_input, numerical_input, class_label
# connect to db
client = MongoClient()
db = client['thesis']
fvs = db['features2']
initial_sets = []
labels = []
for item in fvs.find({'empty': 0}):
item_add = {}
for feature in text_input + freq_input:
item_add[feature] = item[feature]
for feature in numerical_input:
item_add[feature] = np.array([item[feature]])
initial_sets.append(item_add)
labels.append(item[class_label])
# custom sklearn selector class
class ItemSelector(BaseEstimator, TransformerMixin):
def __init__(self, key):
self.key = key
def fit(self):
return self
def transform(self, data_dict):
return data_dict[self.key]
# train test split is 0.66 / 0.33
X_train, X_test, y_train, y_test = train_test_split(initial_sets, labels, test_size=0.33, random_state=42)
text_union = []
freq_union = []
numerical_union = []
for feature in text_input:
text_union.append(
(feature,
Pipeline(
[
('selector', ItemSelector(key=feature)),
('tfidf', TfidfVectorizer(sublinear_tf=True, min_df=7, max_df=.2, ngram_range=(1, 3))),
]
)
)
)
for feature in freq_input:
freq_union.append(
(feature,
Pipeline(
[
('selector', ItemSelector(key=feature)),
('vect', DictVectorizer(sparse=True)),
]
)
)
)
for feature in numerical_input:
numerical_union.append(
(feature,
Pipeline(
[
('selector', ItemSelector(key=feature))
]
)
)
)
unionized = text_union + freq_union + numerical_union
union = FeatureUnion(transformer_list=unionized)
X_train_flip = {}
X_test_flip = {}
for feature in text_input+freq_input:
X_train_flip[feature] = [item[feature] for item in X_train]
X_test_flip[feature] = [item[feature] for item in X_test]
for feature in numerical_input:
X_train_flip[feature] = np.array([item[feature] for item in X_train])
X_test_flip[feature] = np.array([item[feature] for item in X_test])
target_names = [
'company',
'error',
'for sale',
'holding page',
'non-commercial',
'password protected',
'pay-per-click',
'personal-family-blog',
'porn',
'portal/media',
'web-shop'
]
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf[1])
t0 = time()
pipeline = Pipeline([
('union', union),
# ("MinMaxScaler", MaxAbsScaler()),
# ("StandardScaler", StandardScaler(with_mean=False)),
# ("normalise", Normalizer()),
clf
])
pipeline.fit(X_train_flip, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = pipeline.predict(X_test_flip)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=target_names))
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(pipeline).split('(')[0]
return clf_descr, score, train_time, test_time
extra_tree = ('Extratree', ExtraTreesClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0))
kNN = ("kNN", KNeighborsClassifier(n_neighbors=10))
random_forest = ("Random forest", RandomForestClassifier(n_estimators=100))
svc_l1 = ("Linear SCV L1", LinearSVC(loss='l2', penalty='l1', dual=False, tol=1e-3))
scv_l2 = ("Linear SCV L2", LinearSVC(loss='l2', penalty='l2', dual=False, tol=1e-3))
feature_reduction_svc_l1 = ("feature_reduction_svc_l1", Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
]))
featur_reduction_svc_l2 = ("featur_reduction_svc_l2", Pipeline([
('feature_selection', LinearSVC(penalty="l2", dual=False, tol=1e-3)),
('classification', LinearSVC())
]))
soft_voter = ("soft_voter",
VotingClassifier(estimators=[extra_tree, kNN, random_forest], voting='soft', weights=[2, 1, 2]))
hard_voter = ("hard_voter",
VotingClassifier(estimators=[extra_tree, kNN, random_forest, svc_l1, scv_l2], voting='hard'))
results = []
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
results.append(benchmark(random_forest))
gauss = ("Guass", GaussianNB(priors=None))
multi = ("multi", MultinomialNB(alpha=1.0, fit_prior=True, class_prior=None))
results.append(benchmark(gauss))
kNN = ("kNN", KNeighborsClassifier(n_neighbors=10, metric=cosine_similarity))
results.append(benchmark(kNN))
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='navy')
plt.barh(indices + .3, training_time, .2, label="training time",
color='c')
plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show() | 0.690037 | 0.314866 |
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
from snr import *
from snr.utils.dummy_endpoint.dummy_endpoint_factory import \
DummyEndpointFactory
class MyEnum(Enum):
a = "a"
b = "b"
c = "c"
Id = Tuple[MyEnum, str]
Key = Union[MyEnum, Id]
class TestNode(SNRTestCase):
def test_lookup_proof_of_concept(self):
d: Dict[Key, int] = {
MyEnum.a: 1,
(MyEnum.a, "2"): 2,
(MyEnum.b, "3"): 3,
MyEnum.b: 4
}
def get(id: Id) -> Optional[int]:
val = d.get(id)
if not val:
val = d.get(id[0])
return val
self.assertEqual(1, get((MyEnum.a, "1")))
self.assertEqual(2, get((MyEnum.a, "2")))
self.assertEqual(3, get((MyEnum.b, "3")))
self.assertEqual(4, get((MyEnum.b, "4")))
self.assertIsNone(get((MyEnum.c, "1")))
def test_get_task_handlers(self):
def no_op(*args: Any) -> None:
return None
node = None
try:
node = Node("test",
self.get_config([
DummyEndpointFactory("dummy_endpoint_1", {
(TaskType.event, "by_type_and_name"): no_op,
TaskType.process_data: no_op
}),
DummyEndpointFactory("dummy_endpoint_2", {
TaskType.process_data: no_op
}),
])
)
handlers = node.get_task_handlers(tasks.terminate("test"))
self.assertEqual(1, len(handlers))
handlers = node.get_task_handlers(tasks.event("none"))
self.assertEqual(0, len(handlers))
handlers = node.get_task_handlers(
Task(TaskType.process_data, "by_type"))
self.assertEqual(2, len(handlers))
node.set_terminate_flag("test done")
node.terminate()
finally:
if node and not node.is_terminated():
node.set_terminate_flag("test done")
node.terminate()
node = None | tests/test_node.py | from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
from snr import *
from snr.utils.dummy_endpoint.dummy_endpoint_factory import \
DummyEndpointFactory
class MyEnum(Enum):
a = "a"
b = "b"
c = "c"
Id = Tuple[MyEnum, str]
Key = Union[MyEnum, Id]
class TestNode(SNRTestCase):
def test_lookup_proof_of_concept(self):
d: Dict[Key, int] = {
MyEnum.a: 1,
(MyEnum.a, "2"): 2,
(MyEnum.b, "3"): 3,
MyEnum.b: 4
}
def get(id: Id) -> Optional[int]:
val = d.get(id)
if not val:
val = d.get(id[0])
return val
self.assertEqual(1, get((MyEnum.a, "1")))
self.assertEqual(2, get((MyEnum.a, "2")))
self.assertEqual(3, get((MyEnum.b, "3")))
self.assertEqual(4, get((MyEnum.b, "4")))
self.assertIsNone(get((MyEnum.c, "1")))
def test_get_task_handlers(self):
def no_op(*args: Any) -> None:
return None
node = None
try:
node = Node("test",
self.get_config([
DummyEndpointFactory("dummy_endpoint_1", {
(TaskType.event, "by_type_and_name"): no_op,
TaskType.process_data: no_op
}),
DummyEndpointFactory("dummy_endpoint_2", {
TaskType.process_data: no_op
}),
])
)
handlers = node.get_task_handlers(tasks.terminate("test"))
self.assertEqual(1, len(handlers))
handlers = node.get_task_handlers(tasks.event("none"))
self.assertEqual(0, len(handlers))
handlers = node.get_task_handlers(
Task(TaskType.process_data, "by_type"))
self.assertEqual(2, len(handlers))
node.set_terminate_flag("test done")
node.terminate()
finally:
if node and not node.is_terminated():
node.set_terminate_flag("test done")
node.terminate()
node = None | 0.744935 | 0.223758 |