code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from easy_thumbnails.fields import ThumbnailerImageField
from .constants import WAITING, REJECTED, SELECTED
from clothings.models import Clothing
from handsome.utils import path_and_rename
from orders.models import Order
class DesignPhoto(models.Model):
"""
Photo for design
"""
designer = models.ForeignKey(User)
file = ThumbnailerImageField(
upload_to=path_and_rename('design-photo'),
resize_source=dict(size=(1024, 1024), sharpen=True))
description = models.CharField(max_length=128, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
def get_absolute_url(self):
return '{}design-photo/{}'.format(settings.MEDIA_URL, self.file)
def __unicode__(self):
return u'Design photo {} by {}'.format(self.file,
self.designer.username)
class DesignClothing(models.Model):
"""
Clothing for design
"""
clothing = models.ForeignKey(Clothing)
size = models.CharField(max_length=32)
color = models.CharField(max_length=32)
wanted = models.BooleanField(default=True)
class Design(models.Model):
"""
Design proposal for the order
"""
STATUS_CHOICES = (
(SELECTED, u'已选定方案'),
(REJECTED, u'已否定方案'),
(WAITING, u'等待选择'),
)
code = models.CharField(max_length=32, unique=True, blank=True, null=True)
order = models.ForeignKey(Order)
total_price = models.FloatField(default=0)
designer = models.ForeignKey(User, related_name='my_designs')
client = models.ForeignKey(User, related_name='designs_for_me')
status = models.CharField(max_length=16, choices=STATUS_CHOICES,
default=WAITING)
reject_reason = models.TextField(blank=True)
comment = models.TextField(blank=True)
photos = models.ManyToManyField(DesignPhoto)
clothings = models.ManyToManyField(DesignClothing)
created_at = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return u'Design for {} by {}'.format(self.client.username,
self.designer.username)
@property
def price(self):
total_price = 0
for design_clothing in self.clothings.all():
total_price += design_clothing.clothing.price
return total_price
def generate_design_code(sender, instance, created, *args, **kwargs):
"""
Generate design code.
100 + Date + Designer ID
"""
if created:
now = datetime.now().strftime('%y%m%d%H%M%S')
instance.code = u'600{}{}'.format(now, instance.designer.id)
instance.save(using=False)
post_save.connect(generate_design_code, Design) | designs/models.py | from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from easy_thumbnails.fields import ThumbnailerImageField
from .constants import WAITING, REJECTED, SELECTED
from clothings.models import Clothing
from handsome.utils import path_and_rename
from orders.models import Order
class DesignPhoto(models.Model):
"""
Photo for design
"""
designer = models.ForeignKey(User)
file = ThumbnailerImageField(
upload_to=path_and_rename('design-photo'),
resize_source=dict(size=(1024, 1024), sharpen=True))
description = models.CharField(max_length=128, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
def get_absolute_url(self):
return '{}design-photo/{}'.format(settings.MEDIA_URL, self.file)
def __unicode__(self):
return u'Design photo {} by {}'.format(self.file,
self.designer.username)
class DesignClothing(models.Model):
"""
Clothing for design
"""
clothing = models.ForeignKey(Clothing)
size = models.CharField(max_length=32)
color = models.CharField(max_length=32)
wanted = models.BooleanField(default=True)
class Design(models.Model):
"""
Design proposal for the order
"""
STATUS_CHOICES = (
(SELECTED, u'已选定方案'),
(REJECTED, u'已否定方案'),
(WAITING, u'等待选择'),
)
code = models.CharField(max_length=32, unique=True, blank=True, null=True)
order = models.ForeignKey(Order)
total_price = models.FloatField(default=0)
designer = models.ForeignKey(User, related_name='my_designs')
client = models.ForeignKey(User, related_name='designs_for_me')
status = models.CharField(max_length=16, choices=STATUS_CHOICES,
default=WAITING)
reject_reason = models.TextField(blank=True)
comment = models.TextField(blank=True)
photos = models.ManyToManyField(DesignPhoto)
clothings = models.ManyToManyField(DesignClothing)
created_at = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return u'Design for {} by {}'.format(self.client.username,
self.designer.username)
@property
def price(self):
total_price = 0
for design_clothing in self.clothings.all():
total_price += design_clothing.clothing.price
return total_price
def generate_design_code(sender, instance, created, *args, **kwargs):
"""
Generate design code.
100 + Date + Designer ID
"""
if created:
now = datetime.now().strftime('%y%m%d%H%M%S')
instance.code = u'600{}{}'.format(now, instance.designer.id)
instance.save(using=False)
post_save.connect(generate_design_code, Design) | 0.589716 | 0.125226 |
import argparse
import logging
# Have to do this here because imports below pull in boto which likes to set up logging configuration its own way.
from pandas import DataFrame
logging.basicConfig(
format='%(asctime)s %(name)-20s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S'
)
import subprocess
import pandas as pd
LOGGER = logging.getLogger('robojudge')
def generate_predictions(requested_predictions_df: DataFrame, prediction_module: str) -> None:
"""
Generates predictions for each of the requested scenarios by invoking `prediction_module`
:param requested_predictions_df: A Pandas DataFrame containing the predictions to be made, one per row. See sample
in `tests/fixtures` for format
:param prediction_module: Path to the module to be invoked to generate predictions. Generally should be
<path>/predict.py
:return Nothing. Predictions are written to the designated output file supplied in
requested_requested_predictions_df
"""
for row in requested_predictions_df.itertuples():
start_date = row.StartDate
end_date = row.EndDate
ip_file = row.IpFile
output_file = row.OutputFile
LOGGER.info(f'Running predict module {prediction_module} for {start_date} to {end_date} ip file {ip_file} output {output_file}')
# Spawn an external process to run each predictor. In future this may be parallel and even distributed
subprocess.call(
[
'python', prediction_module,
'--start_date', start_date,
'--end_date', end_date,
'--interventions_plan', ip_file,
'--output_file', output_file
]
)
def get_predictions_tasks(requested_predictions_file):
"""
Reads the file containing the list of predictions to be generated. It is likely that in the production scenario,
this file will reside on a shared volume that will be maintained elsewhere (e.g. by the judge box)
:param requested_predictions_file:
:return: A Pandas DataFrame containing the predictions to be generated
"""
# Don't want to parse dates here as we'll be sending them as strings to the spawned process command line
return pd.read_csv(
requested_predictions_file,
encoding="ISO-8859-1"
)
def do_main():
"""
Main line for this module
"""
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--requested-predictions-file",
dest="requested_predictions_file",
type=str,
required=True,
help="Path to the filename containing dates for predictions to be generated and "
"requested output files. A separate output file, with the requested name, will be "
"generated for each requested prediction date pair.")
parser.add_argument("-p", "--prediction-module",
dest="prediction_module",
type=str,
required=True,
help="Path to the python script that should be run to generate predictions. According to the "
"API conversion this script should be named predict.py")
args = parser.parse_args()
LOGGER.info(f'Generating predictions from file {args.requested_predictions_file}')
requested_predictions_df = get_predictions_tasks(args.requested_predictions_file, )
generate_predictions(requested_predictions_df, args.prediction_module)
if __name__ == '__main__':
do_main() | sandbox_phase_2/covid-xprize-robotasks-main/judging/generate_predictions_local.py | import argparse
import logging
# Have to do this here because imports below pull in boto which likes to set up logging configuration its own way.
from pandas import DataFrame
logging.basicConfig(
format='%(asctime)s %(name)-20s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S'
)
import subprocess
import pandas as pd
LOGGER = logging.getLogger('robojudge')
def generate_predictions(requested_predictions_df: DataFrame, prediction_module: str) -> None:
"""
Generates predictions for each of the requested scenarios by invoking `prediction_module`
:param requested_predictions_df: A Pandas DataFrame containing the predictions to be made, one per row. See sample
in `tests/fixtures` for format
:param prediction_module: Path to the module to be invoked to generate predictions. Generally should be
<path>/predict.py
:return Nothing. Predictions are written to the designated output file supplied in
requested_requested_predictions_df
"""
for row in requested_predictions_df.itertuples():
start_date = row.StartDate
end_date = row.EndDate
ip_file = row.IpFile
output_file = row.OutputFile
LOGGER.info(f'Running predict module {prediction_module} for {start_date} to {end_date} ip file {ip_file} output {output_file}')
# Spawn an external process to run each predictor. In future this may be parallel and even distributed
subprocess.call(
[
'python', prediction_module,
'--start_date', start_date,
'--end_date', end_date,
'--interventions_plan', ip_file,
'--output_file', output_file
]
)
def get_predictions_tasks(requested_predictions_file):
"""
Reads the file containing the list of predictions to be generated. It is likely that in the production scenario,
this file will reside on a shared volume that will be maintained elsewhere (e.g. by the judge box)
:param requested_predictions_file:
:return: A Pandas DataFrame containing the predictions to be generated
"""
# Don't want to parse dates here as we'll be sending them as strings to the spawned process command line
return pd.read_csv(
requested_predictions_file,
encoding="ISO-8859-1"
)
def do_main():
"""
Main line for this module
"""
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--requested-predictions-file",
dest="requested_predictions_file",
type=str,
required=True,
help="Path to the filename containing dates for predictions to be generated and "
"requested output files. A separate output file, with the requested name, will be "
"generated for each requested prediction date pair.")
parser.add_argument("-p", "--prediction-module",
dest="prediction_module",
type=str,
required=True,
help="Path to the python script that should be run to generate predictions. According to the "
"API conversion this script should be named predict.py")
args = parser.parse_args()
LOGGER.info(f'Generating predictions from file {args.requested_predictions_file}')
requested_predictions_df = get_predictions_tasks(args.requested_predictions_file, )
generate_predictions(requested_predictions_df, args.prediction_module)
if __name__ == '__main__':
do_main() | 0.695855 | 0.361644 |
import asyncio
import copy
import glob
import os
import pathlib
import uuid
from datetime import time
import yaml
from aiohttp import web
from app.objects.c_adversary import Adversary
from app.objects.c_operation import Operation
from app.objects.c_schedule import Schedule
from app.objects.secondclass.c_fact import Fact
from app.service.interfaces.i_rest_svc import RestServiceInterface
from app.utility.base_service import BaseService
class RestService(RestServiceInterface, BaseService):
def __init__(self):
self.log = self.add_service('rest_svc', self)
self.loop = asyncio.get_event_loop()
async def persist_adversary(self, data):
i = data.pop('i')
obj_default = (await self._services.get('data_svc').locate('objectives', match=dict(name='default')))[0]
if not i:
i = str(uuid.uuid4())
_, file_path = await self.get_service('file_svc').find_file_path('%s.yml' % i, location='data')
if not file_path:
file_path = 'data/adversaries/%s.yml' % i
with open(file_path, 'w+') as f:
f.seek(0)
p = list()
for ability in data.pop('atomic_ordering'):
p.append(ability['id'])
f.write(yaml.dump(dict(id=i, name=data.pop('name'), description=data.pop('description'),
atomic_ordering=p, objective=data.pop('objective', obj_default))))
f.truncate()
await self._services.get('data_svc').reload_data()
return [a.display for a in await self._services.get('data_svc').locate('adversaries', dict(adversary_id=i))]
async def update_planner(self, data):
planner = (await self.get_service('data_svc').locate('planners', dict(name=data['name'])))[0]
planner_id = planner.planner_id
file_path = await self._get_file_path(planner_id)
planner_dict = await self._read_from_yaml(file_path)
planner_dict['stopping_conditions'] = self._get_stopping_conditions(data)
await self._write_to_yaml(file_path, planner_dict)
planner.stopping_conditions = [Fact.load(dict(trait=f.get('trait'), value=f.get('value')))
for f in data['stopping_conditions']]
await self.get_service('data_svc').store(planner)
async def persist_ability(self, data):
_, file_path = await self.get_service('file_svc').find_file_path('%s.yml' % data.get('id'), location='data')
if not file_path:
d = 'data/abilities/%s' % data.get('tactic')
if not os.path.exists(d):
os.makedirs(d)
file_path = '%s/%s.yml' % (d, data.get('id'))
with open(file_path, 'w+') as f:
f.seek(0)
f.write(yaml.dump([data]))
access = (await self.get_service('data_svc').locate('abilities', dict(ability_id=data.get('id'))))[0].access
await self.get_service('data_svc').remove('abilities', dict(ability_id=data.get('id')))
await self.get_service('data_svc').load_ability_file(file_path, access)
return [a.display for a in await self.get_service('data_svc').locate('abilities', dict(ability_id=data.get('id')))]
async def persist_source(self, data):
_, file_path = await self.get_service('file_svc').find_file_path('%s.yml' % data.get('id'), location='data')
if not file_path:
file_path = 'data/sources/%s.yml' % data.get('id')
with open(file_path, 'w+') as f:
f.seek(0)
f.write(yaml.dump(data))
await self._services.get('data_svc').reload_data()
return [s.display for s in await self._services.get('data_svc').locate('sources', dict(id=data.get('id')))]
async def delete_agent(self, data):
await self.get_service('data_svc').remove('agents', data)
return 'Delete action completed'
async def delete_ability(self, data):
return await self._delete_data_from_memory_and_disk(ram_key='abilities', identifier='ability_id', data=data)
async def delete_adversary(self, data):
return await self._delete_data_from_memory_and_disk(ram_key='adversaries', identifier='adversary_id', data=data)
async def delete_operation(self, data):
await self.get_service('data_svc').remove('operations', data)
await self.get_service('data_svc').remove('sources', dict(id=str(data.get('id'))))
for f in glob.glob('data/results/*'):
if '%s-' % data.get('id') in f:
os.remove(f)
for f in glob.glob('data/facts/*.yml'):
if '%s' % data.get('id') in f:
os.remove(f)
return 'Delete action completed'
async def display_objects(self, object_name, data):
results = [o.display for o in await self.get_service('data_svc').locate(object_name, match=data)]
return await self._explode_display_results(object_name, results)
async def display_result(self, data):
link_id = str(data.pop('link_id'))
link = await self.get_service('app_svc').find_link(link_id)
if link:
try:
content = self.get_service('file_svc').read_result_file('%s' % link_id)
return dict(link=link.display, output=content)
except FileNotFoundError:
return ''
return ''
async def display_operation_report(self, data):
op_id = data.pop('op_id')
op = (await self.get_service('data_svc').locate('operations', match=dict(id=int(op_id))))[0]
return await op.report(file_svc=self.get_service('file_svc'), data_svc=self.get_service('data_svc'),
output=data.get('agent_output'))
async def download_contact_report(self, contact):
return dict(contacts=self.get_service('contact_svc').report.get(contact.get('contact'), dict()))
async def update_agent_data(self, data):
paw = data.pop('paw', None)
if paw is None:
await self._update_global_props(**data)
for agent in await self.get_service('data_svc').locate('agents', match=dict(paw=paw)):
await agent.gui_modification(**data)
return agent.display
async def update_chain_data(self, data):
link = await self.get_service('app_svc').find_link(data.pop('link_id'))
link.status = data.get('status')
if data.get('command'):
link.command = data.get('command')
return ''
async def create_operation(self, access, data):
operation = await self._build_operation_object(access, data)
operation.set_start_details()
await self.get_service('data_svc').store(operation)
self.loop.create_task(operation.run(self.get_services()))
return [operation.display]
async def create_schedule(self, access, data):
operation = await self._build_operation_object(access, data['operation'])
scheduled = await self.get_service('data_svc').store(
Schedule(name=operation.name,
schedule=time(data['schedule']['hour'], data['schedule']['minute'], 0),
task=operation)
)
self.log.debug('Scheduled new operation (%s) for %s' % (operation.name, scheduled.schedule))
async def list_payloads(self):
payload_dirs = [pathlib.Path.cwd() / 'data' / 'payloads']
payload_dirs.extend(pathlib.Path.cwd() / 'plugins' / plugin.name / 'payloads'
for plugin in await self.get_service('data_svc').locate('plugins') if plugin.enabled)
return set(p.name for p_dir in payload_dirs for p in p_dir.glob('*')
if p.is_file() and not p.name.startswith('.'))
async def find_abilities(self, paw):
data_svc = self.get_service('data_svc')
agent = (await data_svc.locate('agents', match=dict(paw=paw)))[0]
return await agent.capabilities(await self.get_service('data_svc').locate('abilities'))
async def get_potential_links(self, op_id, paw=None):
operation = (await self.get_service('data_svc').locate('operations', match=dict(id=op_id)))[0]
if operation.finish:
return []
agents = await self.get_service('data_svc').locate('agents', match=dict(paw=paw)) if paw else operation.agents
potential_abilities = await self._build_potential_abilities(operation)
operation.potential_links = await self._build_potential_links(operation, agents, potential_abilities)
return dict(links=[l.display for l in operation.potential_links])
async def apply_potential_link(self, link):
operation = await self.get_service('app_svc').find_op_with_link(link.id)
return await operation.apply(link)
async def task_agent_with_ability(self, paw, ability_id, obfuscator, facts=()):
new_links = []
for agent in await self.get_service('data_svc').locate('agents', dict(paw=paw)):
self.log.debug('Tasking %s with %s' % (paw, ability_id))
links = await agent.task(
abilities=await self.get_service('data_svc').locate('abilities', match=dict(ability_id=ability_id)),
obfuscator=obfuscator,
facts=facts
)
new_links.extend(links)
return new_links
async def get_link_pin(self, json_data):
link = await self.get_service('app_svc').find_link(json_data['link'])
if link and link.collect and not link.finish:
return link.pin
return 0
async def construct_agents_for_group(self, group):
if group:
return await self.get_service('data_svc').locate('agents', match=dict(group=group))
return await self.get_service('data_svc').locate('agents')
async def update_config(self, data):
if data.get('prop') == 'plugin':
enabled_plugins = self.get_config('plugins')
enabled_plugins.append(data.get('value'))
else:
self.set_config('main', data.get('prop'), data.get('value'))
return self.get_config()
async def update_operation(self, op_id, state=None, autonomous=None, obfuscator=None):
async def validate(op):
try:
if not len(op):
raise web.HTTPNotFound
elif await op[0].is_finished():
raise web.HTTPBadRequest(body='This operation has already finished.')
elif state not in op[0].states.values():
raise web.HTTPBadRequest(body='state must be one of {}'.format(op[0].states.values()))
except Exception as e:
self.log.error(repr(e))
operation = await self.get_service('data_svc').locate('operations', match=dict(id=op_id))
if state:
await validate(operation)
operation[0].state = state
operation[0].finish = self.get_current_timestamp()
self.log.debug('Changing operation=%s state to %s' % (op_id, state))
if autonomous:
operation[0].autonomous = 0 if operation[0].autonomous else 1
self.log.debug('Toggled operation=%s autonomous to %s' % (op_id, bool(operation[0].autonomous)))
if obfuscator:
operation[0].obfuscator = obfuscator
self.log.debug('Updated operation=%s obfuscator to %s' % (op_id, operation[0].obfuscator))
""" PRIVATE """
async def _build_operation_object(self, access, data):
name = data.pop('name')
group = data.pop('group', '')
planner = await self.get_service('data_svc').locate('planners', match=dict(name=data.get('planner', 'atomic')))
adversary = await self._construct_adversary_for_op(data.pop('adversary_id', ''))
agents = await self.construct_agents_for_group(group)
sources = await self.get_service('data_svc').locate('sources', match=dict(name=data.pop('source', 'basic')))
allowed = self._get_allowed_from_access(access)
return Operation(name=name, planner=planner[0], agents=agents, adversary=adversary,
group=group, jitter=data.pop('jitter', '2/8'), source=next(iter(sources), None),
state=data.pop('state', 'running'), autonomous=int(data.pop('autonomous', 1)), access=allowed,
obfuscator=data.pop('obfuscator', 'plain-text'),
auto_close=bool(int(data.pop('auto_close', 0))), visibility=int(data.pop('visibility', '50')))
def _get_allowed_from_access(self, access):
if self.Access.HIDDEN in access['access']:
return self.Access.HIDDEN
elif self.Access.BLUE in access['access']:
return self.Access.BLUE
else:
return self.Access.RED
@staticmethod
async def _read_from_yaml(file_path):
with open(file_path, 'r') as f:
return yaml.safe_load(f.read())
@staticmethod
async def _write_to_yaml(file_path, content):
with open(file_path, 'w') as f:
f.write(yaml.dump(content))
async def _get_file_path(self, planner_id):
_, file_path = await self.get_service('file_svc').find_file_path('%s.yml' % planner_id, location='data')
if not file_path:
file_path = 'data/planners/%s.yml' % planner_id
return file_path
@staticmethod
def _get_stopping_conditions(data):
new_stopping_conditions = data.get('stopping_conditions')
if new_stopping_conditions:
return [{s.get('trait'): s.get('value')} for s in new_stopping_conditions]
async def _build_potential_abilities(self, operation):
potential_abilities = []
for a in await self.get_service('data_svc').locate('abilities', match=dict(access=operation.access)):
if not operation.adversary.has_ability(a.ability_id):
potential_abilities.append(a)
return potential_abilities
async def _build_potential_links(self, operation, agents, abilities):
potential_links = []
for a in agents:
for pl in await self.get_service('planning_svc').generate_and_trim_links(a, operation, abilities):
potential_links.append(pl)
return await self.get_service('planning_svc').sort_links(potential_links)
async def _construct_adversary_for_op(self, adversary_id):
adv = await self.get_service('data_svc').locate('adversaries', match=dict(adversary_id=adversary_id))
if adv:
return copy.deepcopy(adv[0])
return Adversary.load(dict(adversary_id='ad-hoc', name='ad-hoc', description='an empty adversary profile', atomic_ordering=[]))
async def _update_global_props(self, sleep_min, sleep_max, watchdog, untrusted, implant_name, bootstrap_abilities):
if implant_name:
self.set_config(name='agents', prop='implant_name', value=implant_name)
if bootstrap_abilities:
abilities = self.get_config(name='agents', prop='bootstrap_abilities')
abilities.append(bootstrap_abilities)
self.set_config(name='agents', prop='sleep_min', value=sleep_min)
self.set_config(name='agents', prop='sleep_max', value=sleep_max)
self.set_config(name='agents', prop='untrusted_timer', value=untrusted)
self.set_config(name='agents', prop='watchdog', value=watchdog)
async def _explode_display_results(self, object_name, results):
if object_name == 'adversaries':
for adv in results:
adv['atomic_ordering'] = [ab.display for ab_id in adv['atomic_ordering'] for ab in
await self.get_service('data_svc').locate('abilities',
match=dict(ability_id=ab_id))]
adv['objective'] = [ab.display for ab in
await self.get_service('data_svc').locate('objectives',
match=dict(id=adv['objective']))][0]
return results
async def _delete_data_from_memory_and_disk(self, ram_key, identifier, data):
await self.get_service('data_svc').remove(ram_key, data)
_, file_path = await self.get_service('file_svc').find_file_path('%s.yml' % data.get(identifier),
location='data')
if not file_path:
file_path = 'data/%s/%s.yml' % (ram_key, data.get(identifier))
if os.path.exists(file_path):
os.remove(file_path)
return 'Delete action completed' | app/service/rest_svc.py | import asyncio
import copy
import glob
import os
import pathlib
import uuid
from datetime import time
import yaml
from aiohttp import web
from app.objects.c_adversary import Adversary
from app.objects.c_operation import Operation
from app.objects.c_schedule import Schedule
from app.objects.secondclass.c_fact import Fact
from app.service.interfaces.i_rest_svc import RestServiceInterface
from app.utility.base_service import BaseService
class RestService(RestServiceInterface, BaseService):
def __init__(self):
self.log = self.add_service('rest_svc', self)
self.loop = asyncio.get_event_loop()
async def persist_adversary(self, data):
i = data.pop('i')
obj_default = (await self._services.get('data_svc').locate('objectives', match=dict(name='default')))[0]
if not i:
i = str(uuid.uuid4())
_, file_path = await self.get_service('file_svc').find_file_path('%s.yml' % i, location='data')
if not file_path:
file_path = 'data/adversaries/%s.yml' % i
with open(file_path, 'w+') as f:
f.seek(0)
p = list()
for ability in data.pop('atomic_ordering'):
p.append(ability['id'])
f.write(yaml.dump(dict(id=i, name=data.pop('name'), description=data.pop('description'),
atomic_ordering=p, objective=data.pop('objective', obj_default))))
f.truncate()
await self._services.get('data_svc').reload_data()
return [a.display for a in await self._services.get('data_svc').locate('adversaries', dict(adversary_id=i))]
async def update_planner(self, data):
planner = (await self.get_service('data_svc').locate('planners', dict(name=data['name'])))[0]
planner_id = planner.planner_id
file_path = await self._get_file_path(planner_id)
planner_dict = await self._read_from_yaml(file_path)
planner_dict['stopping_conditions'] = self._get_stopping_conditions(data)
await self._write_to_yaml(file_path, planner_dict)
planner.stopping_conditions = [Fact.load(dict(trait=f.get('trait'), value=f.get('value')))
for f in data['stopping_conditions']]
await self.get_service('data_svc').store(planner)
async def persist_ability(self, data):
_, file_path = await self.get_service('file_svc').find_file_path('%s.yml' % data.get('id'), location='data')
if not file_path:
d = 'data/abilities/%s' % data.get('tactic')
if not os.path.exists(d):
os.makedirs(d)
file_path = '%s/%s.yml' % (d, data.get('id'))
with open(file_path, 'w+') as f:
f.seek(0)
f.write(yaml.dump([data]))
access = (await self.get_service('data_svc').locate('abilities', dict(ability_id=data.get('id'))))[0].access
await self.get_service('data_svc').remove('abilities', dict(ability_id=data.get('id')))
await self.get_service('data_svc').load_ability_file(file_path, access)
return [a.display for a in await self.get_service('data_svc').locate('abilities', dict(ability_id=data.get('id')))]
async def persist_source(self, data):
_, file_path = await self.get_service('file_svc').find_file_path('%s.yml' % data.get('id'), location='data')
if not file_path:
file_path = 'data/sources/%s.yml' % data.get('id')
with open(file_path, 'w+') as f:
f.seek(0)
f.write(yaml.dump(data))
await self._services.get('data_svc').reload_data()
return [s.display for s in await self._services.get('data_svc').locate('sources', dict(id=data.get('id')))]
async def delete_agent(self, data):
await self.get_service('data_svc').remove('agents', data)
return 'Delete action completed'
async def delete_ability(self, data):
return await self._delete_data_from_memory_and_disk(ram_key='abilities', identifier='ability_id', data=data)
async def delete_adversary(self, data):
return await self._delete_data_from_memory_and_disk(ram_key='adversaries', identifier='adversary_id', data=data)
async def delete_operation(self, data):
await self.get_service('data_svc').remove('operations', data)
await self.get_service('data_svc').remove('sources', dict(id=str(data.get('id'))))
for f in glob.glob('data/results/*'):
if '%s-' % data.get('id') in f:
os.remove(f)
for f in glob.glob('data/facts/*.yml'):
if '%s' % data.get('id') in f:
os.remove(f)
return 'Delete action completed'
async def display_objects(self, object_name, data):
results = [o.display for o in await self.get_service('data_svc').locate(object_name, match=data)]
return await self._explode_display_results(object_name, results)
async def display_result(self, data):
link_id = str(data.pop('link_id'))
link = await self.get_service('app_svc').find_link(link_id)
if link:
try:
content = self.get_service('file_svc').read_result_file('%s' % link_id)
return dict(link=link.display, output=content)
except FileNotFoundError:
return ''
return ''
async def display_operation_report(self, data):
op_id = data.pop('op_id')
op = (await self.get_service('data_svc').locate('operations', match=dict(id=int(op_id))))[0]
return await op.report(file_svc=self.get_service('file_svc'), data_svc=self.get_service('data_svc'),
output=data.get('agent_output'))
async def download_contact_report(self, contact):
return dict(contacts=self.get_service('contact_svc').report.get(contact.get('contact'), dict()))
async def update_agent_data(self, data):
paw = data.pop('paw', None)
if paw is None:
await self._update_global_props(**data)
for agent in await self.get_service('data_svc').locate('agents', match=dict(paw=paw)):
await agent.gui_modification(**data)
return agent.display
async def update_chain_data(self, data):
link = await self.get_service('app_svc').find_link(data.pop('link_id'))
link.status = data.get('status')
if data.get('command'):
link.command = data.get('command')
return ''
async def create_operation(self, access, data):
operation = await self._build_operation_object(access, data)
operation.set_start_details()
await self.get_service('data_svc').store(operation)
self.loop.create_task(operation.run(self.get_services()))
return [operation.display]
async def create_schedule(self, access, data):
operation = await self._build_operation_object(access, data['operation'])
scheduled = await self.get_service('data_svc').store(
Schedule(name=operation.name,
schedule=time(data['schedule']['hour'], data['schedule']['minute'], 0),
task=operation)
)
self.log.debug('Scheduled new operation (%s) for %s' % (operation.name, scheduled.schedule))
async def list_payloads(self):
payload_dirs = [pathlib.Path.cwd() / 'data' / 'payloads']
payload_dirs.extend(pathlib.Path.cwd() / 'plugins' / plugin.name / 'payloads'
for plugin in await self.get_service('data_svc').locate('plugins') if plugin.enabled)
return set(p.name for p_dir in payload_dirs for p in p_dir.glob('*')
if p.is_file() and not p.name.startswith('.'))
async def find_abilities(self, paw):
data_svc = self.get_service('data_svc')
agent = (await data_svc.locate('agents', match=dict(paw=paw)))[0]
return await agent.capabilities(await self.get_service('data_svc').locate('abilities'))
async def get_potential_links(self, op_id, paw=None):
operation = (await self.get_service('data_svc').locate('operations', match=dict(id=op_id)))[0]
if operation.finish:
return []
agents = await self.get_service('data_svc').locate('agents', match=dict(paw=paw)) if paw else operation.agents
potential_abilities = await self._build_potential_abilities(operation)
operation.potential_links = await self._build_potential_links(operation, agents, potential_abilities)
return dict(links=[l.display for l in operation.potential_links])
async def apply_potential_link(self, link):
operation = await self.get_service('app_svc').find_op_with_link(link.id)
return await operation.apply(link)
async def task_agent_with_ability(self, paw, ability_id, obfuscator, facts=()):
new_links = []
for agent in await self.get_service('data_svc').locate('agents', dict(paw=paw)):
self.log.debug('Tasking %s with %s' % (paw, ability_id))
links = await agent.task(
abilities=await self.get_service('data_svc').locate('abilities', match=dict(ability_id=ability_id)),
obfuscator=obfuscator,
facts=facts
)
new_links.extend(links)
return new_links
async def get_link_pin(self, json_data):
link = await self.get_service('app_svc').find_link(json_data['link'])
if link and link.collect and not link.finish:
return link.pin
return 0
async def construct_agents_for_group(self, group):
if group:
return await self.get_service('data_svc').locate('agents', match=dict(group=group))
return await self.get_service('data_svc').locate('agents')
async def update_config(self, data):
if data.get('prop') == 'plugin':
enabled_plugins = self.get_config('plugins')
enabled_plugins.append(data.get('value'))
else:
self.set_config('main', data.get('prop'), data.get('value'))
return self.get_config()
async def update_operation(self, op_id, state=None, autonomous=None, obfuscator=None):
async def validate(op):
try:
if not len(op):
raise web.HTTPNotFound
elif await op[0].is_finished():
raise web.HTTPBadRequest(body='This operation has already finished.')
elif state not in op[0].states.values():
raise web.HTTPBadRequest(body='state must be one of {}'.format(op[0].states.values()))
except Exception as e:
self.log.error(repr(e))
operation = await self.get_service('data_svc').locate('operations', match=dict(id=op_id))
if state:
await validate(operation)
operation[0].state = state
operation[0].finish = self.get_current_timestamp()
self.log.debug('Changing operation=%s state to %s' % (op_id, state))
if autonomous:
operation[0].autonomous = 0 if operation[0].autonomous else 1
self.log.debug('Toggled operation=%s autonomous to %s' % (op_id, bool(operation[0].autonomous)))
if obfuscator:
operation[0].obfuscator = obfuscator
self.log.debug('Updated operation=%s obfuscator to %s' % (op_id, operation[0].obfuscator))
""" PRIVATE """
async def _build_operation_object(self, access, data):
name = data.pop('name')
group = data.pop('group', '')
planner = await self.get_service('data_svc').locate('planners', match=dict(name=data.get('planner', 'atomic')))
adversary = await self._construct_adversary_for_op(data.pop('adversary_id', ''))
agents = await self.construct_agents_for_group(group)
sources = await self.get_service('data_svc').locate('sources', match=dict(name=data.pop('source', 'basic')))
allowed = self._get_allowed_from_access(access)
return Operation(name=name, planner=planner[0], agents=agents, adversary=adversary,
group=group, jitter=data.pop('jitter', '2/8'), source=next(iter(sources), None),
state=data.pop('state', 'running'), autonomous=int(data.pop('autonomous', 1)), access=allowed,
obfuscator=data.pop('obfuscator', 'plain-text'),
auto_close=bool(int(data.pop('auto_close', 0))), visibility=int(data.pop('visibility', '50')))
def _get_allowed_from_access(self, access):
if self.Access.HIDDEN in access['access']:
return self.Access.HIDDEN
elif self.Access.BLUE in access['access']:
return self.Access.BLUE
else:
return self.Access.RED
@staticmethod
async def _read_from_yaml(file_path):
with open(file_path, 'r') as f:
return yaml.safe_load(f.read())
@staticmethod
async def _write_to_yaml(file_path, content):
with open(file_path, 'w') as f:
f.write(yaml.dump(content))
async def _get_file_path(self, planner_id):
_, file_path = await self.get_service('file_svc').find_file_path('%s.yml' % planner_id, location='data')
if not file_path:
file_path = 'data/planners/%s.yml' % planner_id
return file_path
@staticmethod
def _get_stopping_conditions(data):
new_stopping_conditions = data.get('stopping_conditions')
if new_stopping_conditions:
return [{s.get('trait'): s.get('value')} for s in new_stopping_conditions]
async def _build_potential_abilities(self, operation):
potential_abilities = []
for a in await self.get_service('data_svc').locate('abilities', match=dict(access=operation.access)):
if not operation.adversary.has_ability(a.ability_id):
potential_abilities.append(a)
return potential_abilities
async def _build_potential_links(self, operation, agents, abilities):
potential_links = []
for a in agents:
for pl in await self.get_service('planning_svc').generate_and_trim_links(a, operation, abilities):
potential_links.append(pl)
return await self.get_service('planning_svc').sort_links(potential_links)
async def _construct_adversary_for_op(self, adversary_id):
adv = await self.get_service('data_svc').locate('adversaries', match=dict(adversary_id=adversary_id))
if adv:
return copy.deepcopy(adv[0])
return Adversary.load(dict(adversary_id='ad-hoc', name='ad-hoc', description='an empty adversary profile', atomic_ordering=[]))
async def _update_global_props(self, sleep_min, sleep_max, watchdog, untrusted, implant_name, bootstrap_abilities):
if implant_name:
self.set_config(name='agents', prop='implant_name', value=implant_name)
if bootstrap_abilities:
abilities = self.get_config(name='agents', prop='bootstrap_abilities')
abilities.append(bootstrap_abilities)
self.set_config(name='agents', prop='sleep_min', value=sleep_min)
self.set_config(name='agents', prop='sleep_max', value=sleep_max)
self.set_config(name='agents', prop='untrusted_timer', value=untrusted)
self.set_config(name='agents', prop='watchdog', value=watchdog)
async def _explode_display_results(self, object_name, results):
if object_name == 'adversaries':
for adv in results:
adv['atomic_ordering'] = [ab.display for ab_id in adv['atomic_ordering'] for ab in
await self.get_service('data_svc').locate('abilities',
match=dict(ability_id=ab_id))]
adv['objective'] = [ab.display for ab in
await self.get_service('data_svc').locate('objectives',
match=dict(id=adv['objective']))][0]
return results
async def _delete_data_from_memory_and_disk(self, ram_key, identifier, data):
await self.get_service('data_svc').remove(ram_key, data)
_, file_path = await self.get_service('file_svc').find_file_path('%s.yml' % data.get(identifier),
location='data')
if not file_path:
file_path = 'data/%s/%s.yml' % (ram_key, data.get(identifier))
if os.path.exists(file_path):
os.remove(file_path)
return 'Delete action completed' | 0.236604 | 0.076822 |
from .base import ApiBase
import requests
class Checklists(ApiBase):
__module__ = 'trello'
def __init__(self, apikey, token=None):
self._apikey = apikey
self._token = token
def get(self, idChecklist, cards=None, card_fields=None, checkItems=None, checkItem_fields=None, fields=None):
resp = requests.get(f"https://trello.com/1/checklists/{idChecklist}", params={"key": self._apikey, "token": self._token, "cards": cards, "card_fields": card_fields, "checkItems": checkItems, "checkItem_fields": checkItem_fields, "fields": fields}, data=None)
return self.raise_or_json(resp)
def get_field(self, field, idChecklist):
resp = requests.get(f"https://trello.com/1/checklists/{idChecklist}/{field}", params={"key": self._apikey, "token": self._token}, data=None)
return self.raise_or_json(resp)
def get_board(self, idChecklist, fields=None):
resp = requests.get(f"https://trello.com/1/checklists/{idChecklist}/board", params={"key": self._apikey, "token": self._token, "fields": fields}, data=None)
return self.raise_or_json(resp)
def get_board_field(self, field, idChecklist):
resp = requests.get(f"https://trello.com/1/checklists/{idChecklist}/board/{field}", params={"key": self._apikey, "token": self._token}, data=None)
return self.raise_or_json(resp)
def get_card(self, idChecklist, actions=None, attachments=None, attachment_fields=None, stickers=None, members=None, member_fields=None, checkItemStates=None, checklists=None, limit=None, since=None, before=None, filter=None, fields=None):
resp = requests.get(f"https://trello.com/1/checklists/{idChecklist}/cards", params={"key": self._apikey, "token": self._token, "actions": actions, "attachments": attachments, "attachment_fields": attachment_fields, "stickers": stickers, "members": members, "member_fields": member_fields, "checkItemStates": checkItemStates, "checklists": checklists, "limit": limit, "since": since, "before": before, "filter": filter, "fields": fields}, data=None)
return self.raise_or_json(resp)
def get_card_filter(self, filter, idChecklist):
resp = requests.get(f"https://trello.com/1/checklists/{idChecklist}/cards/{filter}", params={"key": self._apikey, "token": self._token}, data=None)
return self.raise_or_json(resp)
def get_checkItem(self, idChecklist, filter=None, fields=None):
resp = requests.get(f"https://trello.com/1/checklists/{idChecklist}/checkItems", params={"key": self._apikey, "token": self._token, "filter": filter, "fields": fields}, data=None)
return self.raise_or_json(resp)
def get_checkItem_idCheckItem(self, idCheckItem, idChecklist, fields=None):
resp = requests.get(f"https://trello.com/1/checklists/{idChecklist}/checkItems/{idCheckItem}", params={"key": self._apikey, "token": self._token, "fields": fields}, data=None)
return self.raise_or_json(resp)
def update(self, idChecklist, name=None, pos=None):
resp = requests.put(f"https://trello.com/1/checklists/{idChecklist}", params={"key": self._apikey, "token": self._token}, data={"name": name, "pos": pos})
return self.raise_or_json(resp)
def update_name(self, idChecklist, value):
resp = requests.put(f"https://trello.com/1/checklists/{idChecklist}/name", params={"key": self._apikey, "token": self._token}, data={"value": value})
return self.raise_or_json(resp)
def update_po(self, idChecklist, value):
resp = requests.put(f"https://trello.com/1/checklists/{idChecklist}/pos", params={"key": self._apikey, "token": self._token}, data={"value": value})
return self.raise_or_json(resp)
def new(self, idCard, name=None, pos=None, idChecklistSource=None):
resp = requests.post("https://trello.com/1/checklists", params={"key": self._apikey, "token": self._token}, data={"idCard": idCard, "name": name, "pos": pos, "idChecklistSource": idChecklistSource})
return self.raise_or_json(resp)
def new_checkItem(self, idChecklist, name, pos=None, checked=None):
resp = requests.post(f"https://trello.com/1/checklists/{idChecklist}/checkItems", params={"key": self._apikey, "token": self._token}, data={"name": name, "pos": pos, "checked": checked})
return self.raise_or_json(resp)
def delete(self, idChecklist):
resp = requests.delete(f"https://trello.com/1/checklists/{idChecklist}", params={"key": self._apikey, "token": self._token}, data=None)
return self.raise_or_json(resp)
def delete_checkItem_idCheckItem(self, idCheckItem, idChecklist):
resp = requests.delete(f"https://trello.com/1/checklists/{idChecklist}/checkItems/{idCheckItem}", params={"key": self._apikey, "token": self._token}, data=None)
return self.raise_or_json(resp) | trello/checklists.py | from .base import ApiBase
import requests
class Checklists(ApiBase):
__module__ = 'trello'
def __init__(self, apikey, token=None):
self._apikey = apikey
self._token = token
def get(self, idChecklist, cards=None, card_fields=None, checkItems=None, checkItem_fields=None, fields=None):
resp = requests.get(f"https://trello.com/1/checklists/{idChecklist}", params={"key": self._apikey, "token": self._token, "cards": cards, "card_fields": card_fields, "checkItems": checkItems, "checkItem_fields": checkItem_fields, "fields": fields}, data=None)
return self.raise_or_json(resp)
def get_field(self, field, idChecklist):
resp = requests.get(f"https://trello.com/1/checklists/{idChecklist}/{field}", params={"key": self._apikey, "token": self._token}, data=None)
return self.raise_or_json(resp)
def get_board(self, idChecklist, fields=None):
resp = requests.get(f"https://trello.com/1/checklists/{idChecklist}/board", params={"key": self._apikey, "token": self._token, "fields": fields}, data=None)
return self.raise_or_json(resp)
def get_board_field(self, field, idChecklist):
resp = requests.get(f"https://trello.com/1/checklists/{idChecklist}/board/{field}", params={"key": self._apikey, "token": self._token}, data=None)
return self.raise_or_json(resp)
def get_card(self, idChecklist, actions=None, attachments=None, attachment_fields=None, stickers=None, members=None, member_fields=None, checkItemStates=None, checklists=None, limit=None, since=None, before=None, filter=None, fields=None):
resp = requests.get(f"https://trello.com/1/checklists/{idChecklist}/cards", params={"key": self._apikey, "token": self._token, "actions": actions, "attachments": attachments, "attachment_fields": attachment_fields, "stickers": stickers, "members": members, "member_fields": member_fields, "checkItemStates": checkItemStates, "checklists": checklists, "limit": limit, "since": since, "before": before, "filter": filter, "fields": fields}, data=None)
return self.raise_or_json(resp)
def get_card_filter(self, filter, idChecklist):
resp = requests.get(f"https://trello.com/1/checklists/{idChecklist}/cards/{filter}", params={"key": self._apikey, "token": self._token}, data=None)
return self.raise_or_json(resp)
def get_checkItem(self, idChecklist, filter=None, fields=None):
resp = requests.get(f"https://trello.com/1/checklists/{idChecklist}/checkItems", params={"key": self._apikey, "token": self._token, "filter": filter, "fields": fields}, data=None)
return self.raise_or_json(resp)
def get_checkItem_idCheckItem(self, idCheckItem, idChecklist, fields=None):
resp = requests.get(f"https://trello.com/1/checklists/{idChecklist}/checkItems/{idCheckItem}", params={"key": self._apikey, "token": self._token, "fields": fields}, data=None)
return self.raise_or_json(resp)
def update(self, idChecklist, name=None, pos=None):
resp = requests.put(f"https://trello.com/1/checklists/{idChecklist}", params={"key": self._apikey, "token": self._token}, data={"name": name, "pos": pos})
return self.raise_or_json(resp)
def update_name(self, idChecklist, value):
resp = requests.put(f"https://trello.com/1/checklists/{idChecklist}/name", params={"key": self._apikey, "token": self._token}, data={"value": value})
return self.raise_or_json(resp)
def update_po(self, idChecklist, value):
resp = requests.put(f"https://trello.com/1/checklists/{idChecklist}/pos", params={"key": self._apikey, "token": self._token}, data={"value": value})
return self.raise_or_json(resp)
def new(self, idCard, name=None, pos=None, idChecklistSource=None):
resp = requests.post("https://trello.com/1/checklists", params={"key": self._apikey, "token": self._token}, data={"idCard": idCard, "name": name, "pos": pos, "idChecklistSource": idChecklistSource})
return self.raise_or_json(resp)
def new_checkItem(self, idChecklist, name, pos=None, checked=None):
resp = requests.post(f"https://trello.com/1/checklists/{idChecklist}/checkItems", params={"key": self._apikey, "token": self._token}, data={"name": name, "pos": pos, "checked": checked})
return self.raise_or_json(resp)
def delete(self, idChecklist):
resp = requests.delete(f"https://trello.com/1/checklists/{idChecklist}", params={"key": self._apikey, "token": self._token}, data=None)
return self.raise_or_json(resp)
def delete_checkItem_idCheckItem(self, idCheckItem, idChecklist):
resp = requests.delete(f"https://trello.com/1/checklists/{idChecklist}/checkItems/{idCheckItem}", params={"key": self._apikey, "token": self._token}, data=None)
return self.raise_or_json(resp) | 0.423339 | 0.146697 |
"""Tests sonnet.python.modules.nets.mlp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import sonnet as snt
from sonnet.testing import parameterized
import tensorflow as tf
class MLPTest(parameterized.ParameterizedTestCase,
tf.test.TestCase):
def setUp(self):
super(MLPTest, self).setUp()
self.output_sizes = [11, 13, 17]
self.batch_size = 5
self.input_size = 7
self.module_name = "mlp"
self.initializers = {
"w": tf.truncated_normal_initializer(stddev=1.0),
}
self.regularizers = {
"w": tf.contrib.layers.l1_regularizer(scale=0.1),
}
self.partitioners = {
"w": tf.fixed_size_partitioner(num_shards=2),
}
def testName(self):
unique_name = "unique_name"
with tf.variable_scope("scope"):
mlp = snt.nets.MLP(name=unique_name, output_sizes=self.output_sizes)
self.assertEqual(mlp.scope_name, "scope/" + unique_name)
self.assertEqual(mlp.module_name, unique_name)
@parameterized.NamedParameters(
("MLPNoFinalActBias", False, True),
("MLPNoFinalActNoBias", False, False),
("MLPFinalActBias", True, True),
("MLPFinalActNoBias", True, False),
)
def testConstructor(self, activate_final, use_bias):
with self.assertRaisesRegexp(ValueError, "output_sizes must not be empty"):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=[],
activate_final=activate_final,
use_bias=use_bias)
with self.assertRaisesRegexp(KeyError, "Invalid initializer keys.*"):
mlp = snt.nets.MLP(
name=self.module_name,
output_sizes=self.output_sizes,
initializers={"not_w": tf.truncated_normal_initializer(stddev=1.0)},
activate_final=activate_final,
use_bias=use_bias)
with self.assertRaisesRegexp(TypeError,
"Initializer for 'w' is not a callable "
"function or dictionary"):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
initializers={"w": tf.zeros([1, 2, 3])},
activate_final=activate_final,
use_bias=use_bias)
with self.assertRaisesRegexp(TypeError,
"Input 'activation' must be callable"):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
activation="not_a_function",
activate_final=activate_final,
use_bias=use_bias)
with self.assertRaisesRegexp(TypeError,
"output_sizes must be iterable"):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=None,
activate_final=activate_final,
use_bias=use_bias)
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
initializers=self.initializers,
partitioners=self.partitioners,
regularizers=self.regularizers,
activate_final=activate_final,
use_bias=use_bias)
self.assertEqual(self.initializers, mlp.initializers)
self.assertEqual(self.regularizers, mlp.regularizers)
self.assertEqual(self.partitioners, mlp.partitioners)
self.assertEqual(len(mlp.layers), len(self.output_sizes))
for i in range(0, len(mlp.layers)):
self.assertEqual(mlp.layers[i].output_size, self.output_sizes[i])
@parameterized.NamedParameters(
("MLPNoFinalActBias", False, True),
("MLPNoFinalActNoBias", False, False),
("MLPFinalActBias", True, True),
("MLPFinalActNoBias", True, False),
)
def testActivateBiasFlags(self, activate_final, use_bias):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
activate_final=activate_final,
use_bias=use_bias)
inputs = tf.placeholder(tf.float32,
shape=[self.batch_size, self.input_size])
net = mlp(inputs)
if activate_final:
self.assertEqual(net.op.type, "Relu")
elif use_bias:
self.assertEqual(net.op.type, "Add")
else:
self.assertEqual(net.op.type, "MatMul")
variables = mlp.get_variables()
if use_bias:
self.assertEqual(len(variables), len(self.output_sizes) * 2)
else:
self.assertEqual(len(variables), len(self.output_sizes))
def testShape(self):
inputs = tf.placeholder(tf.float32,
shape=[self.batch_size, self.input_size])
mlp = snt.nets.MLP(name=self.module_name, output_sizes=self.output_sizes)
output = mlp(inputs)
self.assertTrue(output.get_shape().is_compatible_with(
[self.batch_size, self.output_sizes[-1]]))
self.assertEqual((self.batch_size, self.input_size), mlp.input_shape)
self.assertEqual(self.output_sizes, list(mlp.output_sizes))
@parameterized.NamedParameters(
("MLPNoFinalActBias", False, True),
("MLPNoFinalActNoBias", False, False),
("MLPFinalActBias", True, True),
("MLPFinalActNoBias", True, False),
)
def testRegularizersInRegularizationLosses(self, active_final, use_bias):
if use_bias:
regularizers = {"w": tf.contrib.layers.l1_regularizer(scale=0.5),
"b": tf.contrib.layers.l2_regularizer(scale=0.5)}
else:
regularizers = {"w": tf.contrib.layers.l1_regularizer(scale=0.5)}
inputs = tf.placeholder(tf.float32,
shape=[self.batch_size, self.input_size])
mlp = snt.nets.MLP(name=self.module_name, output_sizes=self.output_sizes,
regularizers=regularizers)
mlp(inputs)
graph_regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.assertRegexpMatches(graph_regularizers[0].name, ".*l1_regularizer.*")
if use_bias:
self.assertRegexpMatches(graph_regularizers[1].name, ".*l2_regularizer.*")
@parameterized.NamedParameters(
("MLPNoFinalActBias", False, True),
("MLPNoFinalActNoBias", False, False),
("MLPFinalActBias", True, True),
("MLPFinalActNoBias", True, False),
)
def testTranspose(self, activate_final, use_bias):
with tf.variable_scope("scope1"):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
activate_final=activate_final,
use_bias=use_bias)
with tf.variable_scope("scope2"):
mlp_transpose = mlp.transpose()
self.assertEqual("scope1/" + self.module_name, mlp.scope_name)
self.assertEqual(self.module_name, mlp.module_name)
self.assertEqual("scope2/" + self.module_name + "_transpose",
mlp_transpose.scope_name)
self.assertEqual(self.module_name + "_transpose",
mlp_transpose.module_name)
input_to_mlp = tf.placeholder(tf.float32,
shape=[self.batch_size, self.input_size])
with self.assertRaisesRegexp(snt.Error,
"Variables in {} not instantiated yet, "
"__call__ the module first."
.format(mlp.layers[-1].scope_name)):
mlp_transpose(input_to_mlp)
mlp_transpose = mlp.transpose(name="another_mlp_transpose")
mlp_out = mlp(input_to_mlp)
mlp_transposed_output = mlp_transpose(mlp_out)
self.assertEqual(mlp_transposed_output.get_shape(),
input_to_mlp.get_shape())
self.assertEqual(mlp_transpose.use_bias, mlp.use_bias)
self.assertEqual(mlp_transpose.activate_final, mlp.activate_final)
if activate_final:
self.assertEqual(mlp_transposed_output.op.type, "Relu")
elif use_bias:
self.assertEqual(mlp_transposed_output.op.type, "Add")
else:
self.assertEqual(mlp_transposed_output.op.type, "MatMul")
for i in range(0, len(mlp.layers)):
self.assertEqual(mlp_transpose.layers[i].output_size,
mlp.layers[-1 - i].input_shape[1])
data = np.random.rand(self.batch_size, self.input_size)
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(mlp_transposed_output, feed_dict={input_to_mlp: data})
variables = mlp_transpose.get_variables()
if use_bias:
self.assertEqual(len(variables), len(self.output_sizes) * 2)
else:
self.assertEqual(len(variables), len(self.output_sizes))
# Test transpose method's activate_final arg.
mlp_activate_final = mlp.transpose(activate_final=True)
mlp_no_activate_final = mlp.transpose(activate_final=False)
mlp_inherit_activate_final = mlp.transpose()
self.assertEqual(True, mlp_activate_final.activate_final)
self.assertEqual(False, mlp_no_activate_final.activate_final)
self.assertEqual(mlp.activate_final,
mlp_inherit_activate_final.activate_final)
def testVariableMap(self):
"""Tests for regressions in variable names."""
use_bias = True
var_names_w = [
u"mlp/linear_0/w:0",
u"mlp/linear_1/w:0",
u"mlp/linear_2/w:0",
]
var_names_b = [
u"mlp/linear_0/b:0",
u"mlp/linear_1/b:0",
u"mlp/linear_2/b:0",
]
correct_variable_names = set(var_names_w + var_names_b)
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
activate_final=False,
use_bias=use_bias)
input_shape = [10, 100]
input_to_net = tf.placeholder(tf.float32, shape=input_shape)
_ = mlp(input_to_net)
variable_names = [var.name for var in mlp.get_variables()]
self.assertEqual(set(variable_names), set(correct_variable_names))
if __name__ == "__main__":
tf.test.main() | sonnet/python/modules/nets/mlp_test.py |
"""Tests sonnet.python.modules.nets.mlp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import sonnet as snt
from sonnet.testing import parameterized
import tensorflow as tf
class MLPTest(parameterized.ParameterizedTestCase,
tf.test.TestCase):
def setUp(self):
super(MLPTest, self).setUp()
self.output_sizes = [11, 13, 17]
self.batch_size = 5
self.input_size = 7
self.module_name = "mlp"
self.initializers = {
"w": tf.truncated_normal_initializer(stddev=1.0),
}
self.regularizers = {
"w": tf.contrib.layers.l1_regularizer(scale=0.1),
}
self.partitioners = {
"w": tf.fixed_size_partitioner(num_shards=2),
}
def testName(self):
unique_name = "unique_name"
with tf.variable_scope("scope"):
mlp = snt.nets.MLP(name=unique_name, output_sizes=self.output_sizes)
self.assertEqual(mlp.scope_name, "scope/" + unique_name)
self.assertEqual(mlp.module_name, unique_name)
@parameterized.NamedParameters(
("MLPNoFinalActBias", False, True),
("MLPNoFinalActNoBias", False, False),
("MLPFinalActBias", True, True),
("MLPFinalActNoBias", True, False),
)
def testConstructor(self, activate_final, use_bias):
with self.assertRaisesRegexp(ValueError, "output_sizes must not be empty"):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=[],
activate_final=activate_final,
use_bias=use_bias)
with self.assertRaisesRegexp(KeyError, "Invalid initializer keys.*"):
mlp = snt.nets.MLP(
name=self.module_name,
output_sizes=self.output_sizes,
initializers={"not_w": tf.truncated_normal_initializer(stddev=1.0)},
activate_final=activate_final,
use_bias=use_bias)
with self.assertRaisesRegexp(TypeError,
"Initializer for 'w' is not a callable "
"function or dictionary"):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
initializers={"w": tf.zeros([1, 2, 3])},
activate_final=activate_final,
use_bias=use_bias)
with self.assertRaisesRegexp(TypeError,
"Input 'activation' must be callable"):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
activation="not_a_function",
activate_final=activate_final,
use_bias=use_bias)
with self.assertRaisesRegexp(TypeError,
"output_sizes must be iterable"):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=None,
activate_final=activate_final,
use_bias=use_bias)
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
initializers=self.initializers,
partitioners=self.partitioners,
regularizers=self.regularizers,
activate_final=activate_final,
use_bias=use_bias)
self.assertEqual(self.initializers, mlp.initializers)
self.assertEqual(self.regularizers, mlp.regularizers)
self.assertEqual(self.partitioners, mlp.partitioners)
self.assertEqual(len(mlp.layers), len(self.output_sizes))
for i in range(0, len(mlp.layers)):
self.assertEqual(mlp.layers[i].output_size, self.output_sizes[i])
@parameterized.NamedParameters(
("MLPNoFinalActBias", False, True),
("MLPNoFinalActNoBias", False, False),
("MLPFinalActBias", True, True),
("MLPFinalActNoBias", True, False),
)
def testActivateBiasFlags(self, activate_final, use_bias):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
activate_final=activate_final,
use_bias=use_bias)
inputs = tf.placeholder(tf.float32,
shape=[self.batch_size, self.input_size])
net = mlp(inputs)
if activate_final:
self.assertEqual(net.op.type, "Relu")
elif use_bias:
self.assertEqual(net.op.type, "Add")
else:
self.assertEqual(net.op.type, "MatMul")
variables = mlp.get_variables()
if use_bias:
self.assertEqual(len(variables), len(self.output_sizes) * 2)
else:
self.assertEqual(len(variables), len(self.output_sizes))
def testShape(self):
inputs = tf.placeholder(tf.float32,
shape=[self.batch_size, self.input_size])
mlp = snt.nets.MLP(name=self.module_name, output_sizes=self.output_sizes)
output = mlp(inputs)
self.assertTrue(output.get_shape().is_compatible_with(
[self.batch_size, self.output_sizes[-1]]))
self.assertEqual((self.batch_size, self.input_size), mlp.input_shape)
self.assertEqual(self.output_sizes, list(mlp.output_sizes))
@parameterized.NamedParameters(
("MLPNoFinalActBias", False, True),
("MLPNoFinalActNoBias", False, False),
("MLPFinalActBias", True, True),
("MLPFinalActNoBias", True, False),
)
def testRegularizersInRegularizationLosses(self, active_final, use_bias):
if use_bias:
regularizers = {"w": tf.contrib.layers.l1_regularizer(scale=0.5),
"b": tf.contrib.layers.l2_regularizer(scale=0.5)}
else:
regularizers = {"w": tf.contrib.layers.l1_regularizer(scale=0.5)}
inputs = tf.placeholder(tf.float32,
shape=[self.batch_size, self.input_size])
mlp = snt.nets.MLP(name=self.module_name, output_sizes=self.output_sizes,
regularizers=regularizers)
mlp(inputs)
graph_regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.assertRegexpMatches(graph_regularizers[0].name, ".*l1_regularizer.*")
if use_bias:
self.assertRegexpMatches(graph_regularizers[1].name, ".*l2_regularizer.*")
@parameterized.NamedParameters(
("MLPNoFinalActBias", False, True),
("MLPNoFinalActNoBias", False, False),
("MLPFinalActBias", True, True),
("MLPFinalActNoBias", True, False),
)
def testTranspose(self, activate_final, use_bias):
with tf.variable_scope("scope1"):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
activate_final=activate_final,
use_bias=use_bias)
with tf.variable_scope("scope2"):
mlp_transpose = mlp.transpose()
self.assertEqual("scope1/" + self.module_name, mlp.scope_name)
self.assertEqual(self.module_name, mlp.module_name)
self.assertEqual("scope2/" + self.module_name + "_transpose",
mlp_transpose.scope_name)
self.assertEqual(self.module_name + "_transpose",
mlp_transpose.module_name)
input_to_mlp = tf.placeholder(tf.float32,
shape=[self.batch_size, self.input_size])
with self.assertRaisesRegexp(snt.Error,
"Variables in {} not instantiated yet, "
"__call__ the module first."
.format(mlp.layers[-1].scope_name)):
mlp_transpose(input_to_mlp)
mlp_transpose = mlp.transpose(name="another_mlp_transpose")
mlp_out = mlp(input_to_mlp)
mlp_transposed_output = mlp_transpose(mlp_out)
self.assertEqual(mlp_transposed_output.get_shape(),
input_to_mlp.get_shape())
self.assertEqual(mlp_transpose.use_bias, mlp.use_bias)
self.assertEqual(mlp_transpose.activate_final, mlp.activate_final)
if activate_final:
self.assertEqual(mlp_transposed_output.op.type, "Relu")
elif use_bias:
self.assertEqual(mlp_transposed_output.op.type, "Add")
else:
self.assertEqual(mlp_transposed_output.op.type, "MatMul")
for i in range(0, len(mlp.layers)):
self.assertEqual(mlp_transpose.layers[i].output_size,
mlp.layers[-1 - i].input_shape[1])
data = np.random.rand(self.batch_size, self.input_size)
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(mlp_transposed_output, feed_dict={input_to_mlp: data})
variables = mlp_transpose.get_variables()
if use_bias:
self.assertEqual(len(variables), len(self.output_sizes) * 2)
else:
self.assertEqual(len(variables), len(self.output_sizes))
# Test transpose method's activate_final arg.
mlp_activate_final = mlp.transpose(activate_final=True)
mlp_no_activate_final = mlp.transpose(activate_final=False)
mlp_inherit_activate_final = mlp.transpose()
self.assertEqual(True, mlp_activate_final.activate_final)
self.assertEqual(False, mlp_no_activate_final.activate_final)
self.assertEqual(mlp.activate_final,
mlp_inherit_activate_final.activate_final)
def testVariableMap(self):
"""Tests for regressions in variable names."""
use_bias = True
var_names_w = [
u"mlp/linear_0/w:0",
u"mlp/linear_1/w:0",
u"mlp/linear_2/w:0",
]
var_names_b = [
u"mlp/linear_0/b:0",
u"mlp/linear_1/b:0",
u"mlp/linear_2/b:0",
]
correct_variable_names = set(var_names_w + var_names_b)
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
activate_final=False,
use_bias=use_bias)
input_shape = [10, 100]
input_to_net = tf.placeholder(tf.float32, shape=input_shape)
_ = mlp(input_to_net)
variable_names = [var.name for var in mlp.get_variables()]
self.assertEqual(set(variable_names), set(correct_variable_names))
if __name__ == "__main__":
tf.test.main() | 0.874064 | 0.412885 |
import pkgutil
import warnings
from pathlib import Path
from pmfp.utils.fs_utils import get_abs_path, path_to_str
from ..utils import (
sphinx_new,
no_jekyll,
sphinx_config,
sphinx_build,
move_to_source,
makeindex
)
from pmfp.utils.template_utils import template_2_content
AppendConfig = ""
source_io = pkgutil.get_data('pmfp.entrypoint.doc_.new.source_temp', 'pyappend_config.py.jinja')
if source_io:
AppendConfig = source_io.decode('utf-8')
else:
raise AttributeError("加载pyappend_config.py.jinja模板失败")
pyindexmd = ""
source_io = pkgutil.get_data('pmfp.entrypoint.doc_.new.source_temp', 'pyindex.md.jinja')
if source_io:
pyindexmd = source_io.decode('utf-8')
else:
raise AttributeError("加载pyindex.md.jinja模板失败")
def doc_new_py(code: str, output: str, source_dir: str, *, project_name: str, author: str, version: str, cwd: str = ".") -> None:
"""为python项目构造api文档.
Args:
code (str): 项目源码位置
output (str): html文档位置
source_dir (str): 文档源码位置
project_name (str): 项目名
author (str): 项目作者
version (str): 项目版本
cwd (str): 执行命令的根目录
"""
if cwd:
cwdp = get_abs_path(cwd)
else:
cwdp = Path(".")
codep = get_abs_path(code, cwd=cwdp)
codep_str = path_to_str(codep)
outputp = get_abs_path(output, cwd=cwdp)
source_dirp = get_abs_path(source_dir, cwd=cwdp)
sphinx_new(source_dir=source_dirp, project_name=project_name, author=author, version=version, cwd=cwdp)
try:
appconfig = template_2_content(AppendConfig, code_path=codep_str)
sphinx_config(source_dirp, appconfig)
move_to_source(source_dir=source_dirp, root=cwdp)
makeindex(source_dir=source_dirp, template=pyindexmd, project_name=project_name)
sphinx_build(source_dir=source_dirp, doc_dir=outputp, cwd=cwdp)
no_jekyll(outputp)
except Exception as err:
warnings.warn(f"""初始化python项目文档失败:
{str(err)}
构造python项目的api文档需要安装依赖:
+ pip install sphinx
+ pip install recommonmark
+ pip install sphinx-autoapi
+ pip install sphinx_rtd_theme
""") | pmfp/entrypoint/doc_/new/new_py.py | import pkgutil
import warnings
from pathlib import Path
from pmfp.utils.fs_utils import get_abs_path, path_to_str
from ..utils import (
sphinx_new,
no_jekyll,
sphinx_config,
sphinx_build,
move_to_source,
makeindex
)
from pmfp.utils.template_utils import template_2_content
AppendConfig = ""
source_io = pkgutil.get_data('pmfp.entrypoint.doc_.new.source_temp', 'pyappend_config.py.jinja')
if source_io:
AppendConfig = source_io.decode('utf-8')
else:
raise AttributeError("加载pyappend_config.py.jinja模板失败")
pyindexmd = ""
source_io = pkgutil.get_data('pmfp.entrypoint.doc_.new.source_temp', 'pyindex.md.jinja')
if source_io:
pyindexmd = source_io.decode('utf-8')
else:
raise AttributeError("加载pyindex.md.jinja模板失败")
def doc_new_py(code: str, output: str, source_dir: str, *, project_name: str, author: str, version: str, cwd: str = ".") -> None:
"""为python项目构造api文档.
Args:
code (str): 项目源码位置
output (str): html文档位置
source_dir (str): 文档源码位置
project_name (str): 项目名
author (str): 项目作者
version (str): 项目版本
cwd (str): 执行命令的根目录
"""
if cwd:
cwdp = get_abs_path(cwd)
else:
cwdp = Path(".")
codep = get_abs_path(code, cwd=cwdp)
codep_str = path_to_str(codep)
outputp = get_abs_path(output, cwd=cwdp)
source_dirp = get_abs_path(source_dir, cwd=cwdp)
sphinx_new(source_dir=source_dirp, project_name=project_name, author=author, version=version, cwd=cwdp)
try:
appconfig = template_2_content(AppendConfig, code_path=codep_str)
sphinx_config(source_dirp, appconfig)
move_to_source(source_dir=source_dirp, root=cwdp)
makeindex(source_dir=source_dirp, template=pyindexmd, project_name=project_name)
sphinx_build(source_dir=source_dirp, doc_dir=outputp, cwd=cwdp)
no_jekyll(outputp)
except Exception as err:
warnings.warn(f"""初始化python项目文档失败:
{str(err)}
构造python项目的api文档需要安装依赖:
+ pip install sphinx
+ pip install recommonmark
+ pip install sphinx-autoapi
+ pip install sphinx_rtd_theme
""") | 0.238107 | 0.078749 |
import numpy as np
from mindspore import context
from mindspore import Tensor, nn
from mindspore.common.parameter import Parameter
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from mindspore.common import dtype as mstype
grad_all = C.GradOperation(get_all=True)
context.set_context(device_target="Ascend")
def test_for_after_for_in_for():
class ForAfterForInForNet(nn.Cell):
def __init__(self):
super().__init__()
self.relu = nn.ReLU()
self.softmax = nn.Softmax()
self.mul = P.Mul()
self.add = P.Add()
self.sub = P.Sub()
self.div = P.Div()
self.assign = P.Assign()
param_a = np.full((1,), 5, dtype=np.float32)
self.param_a = Parameter(Tensor(param_a), name='a')
param_b = np.full((1,), 2, dtype=np.float32)
self.param_b = Parameter(Tensor(param_b), name='b')
param_c = np.full((1,), 20, dtype=np.float32)
self.param_c = Parameter(Tensor(param_c), name='c')
def construct(self, x, y):
for _ in range(0, 4):
self.param_b = self.add(self.param_c, self.param_b)
for _ in range(0, 8):
self.param_b = self.param_a + j
self.param_c = self.param_a * self.param_b
for _ in range(0, 3):
y = y + self.param_b
x = self.relu(self.param_c * 3)
self.param_a = x - y
z = y + self.param_b
return z
class GradNet(nn.Cell):
def __init__(self, net):
super(GradNet, self).__init__()
self.net = net
def construct(self, *inputs):
return grad_all(self.net)(*inputs)
x = Tensor([11], mstype.int32)
y = Tensor([7], mstype.int32)
# graph mode
context.set_context(mode=context.GRAPH_MODE)
for_after_for_in_for_net = ForAfterForInForNet()
net = GradNet(for_after_for_in_for_net)
graph_forward_res = for_after_for_in_for_net(x, y)
graph_backward_res = net(x, y)
# pynative mode
context.set_context(mode=context.PYNATIVE_MODE)
for_after_for_in_for_net = ForAfterForInForNet()
net = GradNet(for_after_for_in_for_net)
pynative_forward_res = for_after_for_in_for_net(x, y)
pynative_backward_res = net(x, y)
assert graph_forward_res == pynative_forward_res
assert graph_backward_res == pynative_backward_res | tests/st/control/inner/test_332_for_after_for_in_for.py | import numpy as np
from mindspore import context
from mindspore import Tensor, nn
from mindspore.common.parameter import Parameter
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from mindspore.common import dtype as mstype
grad_all = C.GradOperation(get_all=True)
context.set_context(device_target="Ascend")
def test_for_after_for_in_for():
class ForAfterForInForNet(nn.Cell):
def __init__(self):
super().__init__()
self.relu = nn.ReLU()
self.softmax = nn.Softmax()
self.mul = P.Mul()
self.add = P.Add()
self.sub = P.Sub()
self.div = P.Div()
self.assign = P.Assign()
param_a = np.full((1,), 5, dtype=np.float32)
self.param_a = Parameter(Tensor(param_a), name='a')
param_b = np.full((1,), 2, dtype=np.float32)
self.param_b = Parameter(Tensor(param_b), name='b')
param_c = np.full((1,), 20, dtype=np.float32)
self.param_c = Parameter(Tensor(param_c), name='c')
def construct(self, x, y):
for _ in range(0, 4):
self.param_b = self.add(self.param_c, self.param_b)
for _ in range(0, 8):
self.param_b = self.param_a + j
self.param_c = self.param_a * self.param_b
for _ in range(0, 3):
y = y + self.param_b
x = self.relu(self.param_c * 3)
self.param_a = x - y
z = y + self.param_b
return z
class GradNet(nn.Cell):
def __init__(self, net):
super(GradNet, self).__init__()
self.net = net
def construct(self, *inputs):
return grad_all(self.net)(*inputs)
x = Tensor([11], mstype.int32)
y = Tensor([7], mstype.int32)
# graph mode
context.set_context(mode=context.GRAPH_MODE)
for_after_for_in_for_net = ForAfterForInForNet()
net = GradNet(for_after_for_in_for_net)
graph_forward_res = for_after_for_in_for_net(x, y)
graph_backward_res = net(x, y)
# pynative mode
context.set_context(mode=context.PYNATIVE_MODE)
for_after_for_in_for_net = ForAfterForInForNet()
net = GradNet(for_after_for_in_for_net)
pynative_forward_res = for_after_for_in_for_net(x, y)
pynative_backward_res = net(x, y)
assert graph_forward_res == pynative_forward_res
assert graph_backward_res == pynative_backward_res | 0.772359 | 0.35095 |
"Test squeezer, coverage 95%"
from textwrap import dedent
from tkinter import Text, Tk
import unittest
from unittest.mock import Mock, NonCallableMagicMock, patch, sentinel, ANY
from test.support import requires
from idlelib.config import idleConf
from idlelib.percolator import Percolator
from idlelib.squeezer import count_lines_with_wrapping, ExpandingButton, \
Squeezer
from idlelib import macosx
from idlelib.textview import view_text
from idlelib.tooltip import Hovertip
SENTINEL_VALUE = sentinel.SENTINEL_VALUE
def get_test_tk_root(test_instance):
"""Helper for tests: Create a root Tk object."""
requires('gui')
root = Tk()
root.withdraw()
def cleanup_root():
root.update_idletasks()
root.destroy()
test_instance.addCleanup(cleanup_root)
return root
class CountLinesTest(unittest.TestCase):
"""Tests for the count_lines_with_wrapping function."""
def check(self, expected, text, linewidth):
return self.assertEqual(
expected,
count_lines_with_wrapping(text, linewidth),
)
def test_count_empty(self):
"""Test with an empty string."""
self.assertEqual(count_lines_with_wrapping(""), 0)
def test_count_begins_with_empty_line(self):
"""Test with a string which begins with a newline."""
self.assertEqual(count_lines_with_wrapping("\ntext"), 2)
def test_count_ends_with_empty_line(self):
"""Test with a string which ends with a newline."""
self.assertEqual(count_lines_with_wrapping("text\n"), 1)
def test_count_several_lines(self):
"""Test with several lines of text."""
self.assertEqual(count_lines_with_wrapping("1\n2\n3\n"), 3)
def test_empty_lines(self):
self.check(expected=1, text='\n', linewidth=80)
self.check(expected=2, text='\n\n', linewidth=80)
self.check(expected=10, text='\n' * 10, linewidth=80)
def test_long_line(self):
self.check(expected=3, text='a' * 200, linewidth=80)
self.check(expected=3, text='a' * 200 + '\n', linewidth=80)
def test_several_lines_different_lengths(self):
text = dedent("""\
13 characters
43 is the number of characters on this line
7 chars
13 characters""")
self.check(expected=5, text=text, linewidth=80)
self.check(expected=5, text=text + '\n', linewidth=80)
self.check(expected=6, text=text, linewidth=40)
self.check(expected=7, text=text, linewidth=20)
self.check(expected=11, text=text, linewidth=10)
class SqueezerTest(unittest.TestCase):
"""Tests for the Squeezer class."""
def make_mock_editor_window(self, with_text_widget=False):
"""Create a mock EditorWindow instance."""
editwin = NonCallableMagicMock()
editwin.width = 80
if with_text_widget:
editwin.root = get_test_tk_root(self)
text_widget = self.make_text_widget(root=editwin.root)
editwin.text = editwin.per.bottom = text_widget
return editwin
def make_squeezer_instance(self, editor_window=None):
"""Create an actual Squeezer instance with a mock EditorWindow."""
if editor_window is None:
editor_window = self.make_mock_editor_window()
squeezer = Squeezer(editor_window)
return squeezer
def make_text_widget(self, root=None):
if root is None:
root = get_test_tk_root(self)
text_widget = Text(root)
text_widget["font"] = ('Courier', 10)
text_widget.mark_set("iomark", "1.0")
return text_widget
def set_idleconf_option_with_cleanup(self, configType, section, option, value):
prev_val = idleConf.GetOption(configType, section, option)
idleConf.SetOption(configType, section, option, value)
self.addCleanup(idleConf.SetOption,
configType, section, option, prev_val)
def test_count_lines(self):
"""Test Squeezer.count_lines() with various inputs."""
editwin = self.make_mock_editor_window()
squeezer = self.make_squeezer_instance(editwin)
for text_code, line_width, expected in [
(r"'\n'", 80, 1),
(r"'\n' * 3", 80, 3),
(r"'a' * 40 + '\n'", 80, 1),
(r"'a' * 80 + '\n'", 80, 1),
(r"'a' * 200 + '\n'", 80, 3),
(r"'aa\t' * 20", 80, 2),
(r"'aa\t' * 21", 80, 3),
(r"'aa\t' * 20", 40, 4),
]:
with self.subTest(text_code=text_code,
line_width=line_width,
expected=expected):
text = eval(text_code)
with patch.object(editwin, 'width', line_width):
self.assertEqual(squeezer.count_lines(text), expected)
def test_init(self):
"""Test the creation of Squeezer instances."""
editwin = self.make_mock_editor_window()
squeezer = self.make_squeezer_instance(editwin)
self.assertIs(squeezer.editwin, editwin)
self.assertEqual(squeezer.expandingbuttons, [])
def test_write_no_tags(self):
"""Test Squeezer's overriding of the EditorWindow's write() method."""
editwin = self.make_mock_editor_window()
for text in ['', 'TEXT', 'LONG TEXT' * 1000, 'MANY_LINES\n' * 100]:
editwin.write = orig_write = Mock(return_value=SENTINEL_VALUE)
squeezer = self.make_squeezer_instance(editwin)
self.assertEqual(squeezer.editwin.write(text, ()), SENTINEL_VALUE)
self.assertEqual(orig_write.call_count, 1)
orig_write.assert_called_with(text, ())
self.assertEqual(len(squeezer.expandingbuttons), 0)
def test_write_not_stdout(self):
"""Test Squeezer's overriding of the EditorWindow's write() method."""
for text in ['', 'TEXT', 'LONG TEXT' * 1000, 'MANY_LINES\n' * 100]:
editwin = self.make_mock_editor_window()
editwin.write.return_value = SENTINEL_VALUE
orig_write = editwin.write
squeezer = self.make_squeezer_instance(editwin)
self.assertEqual(squeezer.editwin.write(text, "stderr"),
SENTINEL_VALUE)
self.assertEqual(orig_write.call_count, 1)
orig_write.assert_called_with(text, "stderr")
self.assertEqual(len(squeezer.expandingbuttons), 0)
def test_write_stdout(self):
"""Test Squeezer's overriding of the EditorWindow's write() method."""
editwin = self.make_mock_editor_window()
for text in ['', 'TEXT']:
editwin.write = orig_write = Mock(return_value=SENTINEL_VALUE)
squeezer = self.make_squeezer_instance(editwin)
squeezer.auto_squeeze_min_lines = 50
self.assertEqual(squeezer.editwin.write(text, "stdout"),
SENTINEL_VALUE)
self.assertEqual(orig_write.call_count, 1)
orig_write.assert_called_with(text, "stdout")
self.assertEqual(len(squeezer.expandingbuttons), 0)
for text in ['LONG TEXT' * 1000, 'MANY_LINES\n' * 100]:
editwin.write = orig_write = Mock(return_value=SENTINEL_VALUE)
squeezer = self.make_squeezer_instance(editwin)
squeezer.auto_squeeze_min_lines = 50
self.assertEqual(squeezer.editwin.write(text, "stdout"), None)
self.assertEqual(orig_write.call_count, 0)
self.assertEqual(len(squeezer.expandingbuttons), 1)
def test_auto_squeeze(self):
"""Test that the auto-squeezing creates an ExpandingButton properly."""
editwin = self.make_mock_editor_window(with_text_widget=True)
text_widget = editwin.text
squeezer = self.make_squeezer_instance(editwin)
squeezer.auto_squeeze_min_lines = 5
squeezer.count_lines = Mock(return_value=6)
editwin.write('TEXT\n'*6, "stdout")
self.assertEqual(text_widget.get('1.0', 'end'), '\n')
self.assertEqual(len(squeezer.expandingbuttons), 1)
def test_squeeze_current_text(self):
"""Test the squeeze_current_text method."""
# Squeezing text should work for both stdout and stderr.
for tag_name in ["stdout", "stderr"]:
editwin = self.make_mock_editor_window(with_text_widget=True)
text_widget = editwin.text
squeezer = self.make_squeezer_instance(editwin)
squeezer.count_lines = Mock(return_value=6)
# Prepare some text in the Text widget.
text_widget.insert("1.0", "SOME\nTEXT\n", tag_name)
text_widget.mark_set("insert", "1.0")
self.assertEqual(text_widget.get('1.0', 'end'), 'SOME\nTEXT\n\n')
self.assertEqual(len(squeezer.expandingbuttons), 0)
# Test squeezing the current text.
retval = squeezer.squeeze_current_text()
self.assertEqual(retval, "break")
self.assertEqual(text_widget.get('1.0', 'end'), '\n\n')
self.assertEqual(len(squeezer.expandingbuttons), 1)
self.assertEqual(squeezer.expandingbuttons[0].s, 'SOME\nTEXT')
# Test that expanding the squeezed text works and afterwards
# the Text widget contains the original text.
squeezer.expandingbuttons[0].expand()
self.assertEqual(text_widget.get('1.0', 'end'), 'SOME\nTEXT\n\n')
self.assertEqual(len(squeezer.expandingbuttons), 0)
def test_squeeze_current_text_no_allowed_tags(self):
"""Test that the event doesn't squeeze text without a relevant tag."""
editwin = self.make_mock_editor_window(with_text_widget=True)
text_widget = editwin.text
squeezer = self.make_squeezer_instance(editwin)
squeezer.count_lines = Mock(return_value=6)
# Prepare some text in the Text widget.
text_widget.insert("1.0", "SOME\nTEXT\n", "TAG")
text_widget.mark_set("insert", "1.0")
self.assertEqual(text_widget.get('1.0', 'end'), 'SOME\nTEXT\n\n')
self.assertEqual(len(squeezer.expandingbuttons), 0)
# Test squeezing the current text.
retval = squeezer.squeeze_current_text()
self.assertEqual(retval, "break")
self.assertEqual(text_widget.get('1.0', 'end'), 'SOME\nTEXT\n\n')
self.assertEqual(len(squeezer.expandingbuttons), 0)
def test_squeeze_text_before_existing_squeezed_text(self):
"""Test squeezing text before existing squeezed text."""
editwin = self.make_mock_editor_window(with_text_widget=True)
text_widget = editwin.text
squeezer = self.make_squeezer_instance(editwin)
squeezer.count_lines = Mock(return_value=6)
# Prepare some text in the Text widget and squeeze it.
text_widget.insert("1.0", "SOME\nTEXT\n", "stdout")
text_widget.mark_set("insert", "1.0")
squeezer.squeeze_current_text()
self.assertEqual(len(squeezer.expandingbuttons), 1)
# Test squeezing the current text.
text_widget.insert("1.0", "MORE\nSTUFF\n", "stdout")
text_widget.mark_set("insert", "1.0")
retval = squeezer.squeeze_current_text()
self.assertEqual(retval, "break")
self.assertEqual(text_widget.get('1.0', 'end'), '\n\n\n')
self.assertEqual(len(squeezer.expandingbuttons), 2)
self.assertTrue(text_widget.compare(
squeezer.expandingbuttons[0],
'<',
squeezer.expandingbuttons[1],
))
def test_reload(self):
"""Test the reload() class-method."""
editwin = self.make_mock_editor_window(with_text_widget=True)
squeezer = self.make_squeezer_instance(editwin)
orig_auto_squeeze_min_lines = squeezer.auto_squeeze_min_lines
# Increase auto-squeeze-min-lines.
new_auto_squeeze_min_lines = orig_auto_squeeze_min_lines + 10
self.set_idleconf_option_with_cleanup(
'main', 'PyShell', 'auto-squeeze-min-lines',
str(new_auto_squeeze_min_lines))
Squeezer.reload()
self.assertEqual(squeezer.auto_squeeze_min_lines,
new_auto_squeeze_min_lines)
def test_reload_no_squeezer_instances(self):
"""Test that Squeezer.reload() runs without any instances existing."""
Squeezer.reload()
class ExpandingButtonTest(unittest.TestCase):
"""Tests for the ExpandingButton class."""
# In these tests the squeezer instance is a mock, but actual tkinter
# Text and Button instances are created.
def make_mock_squeezer(self):
"""Helper for tests: Create a mock Squeezer object."""
root = get_test_tk_root(self)
squeezer = Mock()
squeezer.editwin.text = Text(root)
squeezer.editwin.per = Percolator(squeezer.editwin.text)
self.addCleanup(squeezer.editwin.per.close)
# Set default values for the configuration settings.
squeezer.auto_squeeze_min_lines = 50
return squeezer
@patch('idlelib.squeezer.Hovertip', autospec=Hovertip)
def test_init(self, MockHovertip):
"""Test the simplest creation of an ExpandingButton."""
squeezer = self.make_mock_squeezer()
text_widget = squeezer.editwin.text
expandingbutton = ExpandingButton('TEXT', 'TAGS', 50, squeezer)
self.assertEqual(expandingbutton.s, 'TEXT')
# Check that the underlying tkinter.Button is properly configured.
self.assertEqual(expandingbutton.master, text_widget)
self.assertTrue('50 lines' in expandingbutton.cget('text'))
# Check that the text widget still contains no text.
self.assertEqual(text_widget.get('1.0', 'end'), '\n')
# Check that the mouse events are bound.
self.assertIn('<Double-Button-1>', expandingbutton.bind())
right_button_code = '<Button-%s>' % ('2' if macosx.isAquaTk() else '3')
self.assertIn(right_button_code, expandingbutton.bind())
# Check that ToolTip was called once, with appropriate values.
self.assertEqual(MockHovertip.call_count, 1)
MockHovertip.assert_called_with(expandingbutton, ANY, hover_delay=ANY)
# Check that 'right-click' appears in the tooltip text.
tooltip_text = MockHovertip.call_args[0][1]
self.assertIn('right-click', tooltip_text.lower())
def test_expand(self):
"""Test the expand event."""
squeezer = self.make_mock_squeezer()
expandingbutton = ExpandingButton('TEXT', 'TAGS', 50, squeezer)
# Insert the button into the text widget
# (this is normally done by the Squeezer class).
text_widget = squeezer.editwin.text
text_widget.window_create("1.0", window=expandingbutton)
# trigger the expand event
retval = expandingbutton.expand(event=Mock())
self.assertEqual(retval, None)
# Check that the text was inserted into the text widget.
self.assertEqual(text_widget.get('1.0', 'end'), 'TEXT\n')
# Check that the 'TAGS' tag was set on the inserted text.
text_end_index = text_widget.index('end-1c')
self.assertEqual(text_widget.get('1.0', text_end_index), 'TEXT')
self.assertEqual(text_widget.tag_nextrange('TAGS', '1.0'),
('1.0', text_end_index))
# Check that the button removed itself from squeezer.expandingbuttons.
self.assertEqual(squeezer.expandingbuttons.remove.call_count, 1)
squeezer.expandingbuttons.remove.assert_called_with(expandingbutton)
def test_expand_dangerous_oupput(self):
"""Test that expanding very long output asks user for confirmation."""
squeezer = self.make_mock_squeezer()
text = 'a' * 10**5
expandingbutton = ExpandingButton(text, 'TAGS', 50, squeezer)
expandingbutton.set_is_dangerous()
self.assertTrue(expandingbutton.is_dangerous)
# Insert the button into the text widget
# (this is normally done by the Squeezer class).
text_widget = expandingbutton.text
text_widget.window_create("1.0", window=expandingbutton)
# Patch the message box module to always return False.
with patch('idlelib.squeezer.messagebox') as mock_msgbox:
mock_msgbox.askokcancel.return_value = False
mock_msgbox.askyesno.return_value = False
# Trigger the expand event.
retval = expandingbutton.expand(event=Mock())
# Check that the event chain was broken and no text was inserted.
self.assertEqual(retval, 'break')
self.assertEqual(expandingbutton.text.get('1.0', 'end-1c'), '')
# Patch the message box module to always return True.
with patch('idlelib.squeezer.messagebox') as mock_msgbox:
mock_msgbox.askokcancel.return_value = True
mock_msgbox.askyesno.return_value = True
# Trigger the expand event.
retval = expandingbutton.expand(event=Mock())
# Check that the event chain wasn't broken and the text was inserted.
self.assertEqual(retval, None)
self.assertEqual(expandingbutton.text.get('1.0', 'end-1c'), text)
def test_copy(self):
"""Test the copy event."""
# Testing with the actual clipboard proved problematic, so this
# test replaces the clipboard manipulation functions with mocks
# and checks that they are called appropriately.
squeezer = self.make_mock_squeezer()
expandingbutton = ExpandingButton('TEXT', 'TAGS', 50, squeezer)
expandingbutton.clipboard_clear = Mock()
expandingbutton.clipboard_append = Mock()
# Trigger the copy event.
retval = expandingbutton.copy(event=Mock())
self.assertEqual(retval, None)
# Vheck that the expanding button called clipboard_clear() and
# clipboard_append('TEXT') once each.
self.assertEqual(expandingbutton.clipboard_clear.call_count, 1)
self.assertEqual(expandingbutton.clipboard_append.call_count, 1)
expandingbutton.clipboard_append.assert_called_with('TEXT')
def test_view(self):
"""Test the view event."""
squeezer = self.make_mock_squeezer()
expandingbutton = ExpandingButton('TEXT', 'TAGS', 50, squeezer)
expandingbutton.selection_own = Mock()
with patch('idlelib.squeezer.view_text', autospec=view_text)\
as mock_view_text:
# Trigger the view event.
expandingbutton.view(event=Mock())
# Check that the expanding button called view_text.
self.assertEqual(mock_view_text.call_count, 1)
# Check that the proper text was passed.
self.assertEqual(mock_view_text.call_args[0][2], 'TEXT')
def test_rmenu(self):
"""Test the context menu."""
squeezer = self.make_mock_squeezer()
expandingbutton = ExpandingButton('TEXT', 'TAGS', 50, squeezer)
with patch('tkinter.Menu') as mock_Menu:
mock_menu = Mock()
mock_Menu.return_value = mock_menu
mock_event = Mock()
mock_event.x = 10
mock_event.y = 10
expandingbutton.context_menu_event(event=mock_event)
self.assertEqual(mock_menu.add_command.call_count,
len(expandingbutton.rmenu_specs))
for label, *data in expandingbutton.rmenu_specs:
mock_menu.add_command.assert_any_call(label=label, command=ANY)
if __name__ == '__main__':
unittest.main(verbosity=2) | Lib/idlelib/idle_test/test_squeezer.py | "Test squeezer, coverage 95%"
from textwrap import dedent
from tkinter import Text, Tk
import unittest
from unittest.mock import Mock, NonCallableMagicMock, patch, sentinel, ANY
from test.support import requires
from idlelib.config import idleConf
from idlelib.percolator import Percolator
from idlelib.squeezer import count_lines_with_wrapping, ExpandingButton, \
Squeezer
from idlelib import macosx
from idlelib.textview import view_text
from idlelib.tooltip import Hovertip
SENTINEL_VALUE = sentinel.SENTINEL_VALUE
def get_test_tk_root(test_instance):
"""Helper for tests: Create a root Tk object."""
requires('gui')
root = Tk()
root.withdraw()
def cleanup_root():
root.update_idletasks()
root.destroy()
test_instance.addCleanup(cleanup_root)
return root
class CountLinesTest(unittest.TestCase):
"""Tests for the count_lines_with_wrapping function."""
def check(self, expected, text, linewidth):
return self.assertEqual(
expected,
count_lines_with_wrapping(text, linewidth),
)
def test_count_empty(self):
"""Test with an empty string."""
self.assertEqual(count_lines_with_wrapping(""), 0)
def test_count_begins_with_empty_line(self):
"""Test with a string which begins with a newline."""
self.assertEqual(count_lines_with_wrapping("\ntext"), 2)
def test_count_ends_with_empty_line(self):
"""Test with a string which ends with a newline."""
self.assertEqual(count_lines_with_wrapping("text\n"), 1)
def test_count_several_lines(self):
"""Test with several lines of text."""
self.assertEqual(count_lines_with_wrapping("1\n2\n3\n"), 3)
def test_empty_lines(self):
self.check(expected=1, text='\n', linewidth=80)
self.check(expected=2, text='\n\n', linewidth=80)
self.check(expected=10, text='\n' * 10, linewidth=80)
def test_long_line(self):
self.check(expected=3, text='a' * 200, linewidth=80)
self.check(expected=3, text='a' * 200 + '\n', linewidth=80)
def test_several_lines_different_lengths(self):
text = dedent("""\
13 characters
43 is the number of characters on this line
7 chars
13 characters""")
self.check(expected=5, text=text, linewidth=80)
self.check(expected=5, text=text + '\n', linewidth=80)
self.check(expected=6, text=text, linewidth=40)
self.check(expected=7, text=text, linewidth=20)
self.check(expected=11, text=text, linewidth=10)
class SqueezerTest(unittest.TestCase):
"""Tests for the Squeezer class."""
def make_mock_editor_window(self, with_text_widget=False):
"""Create a mock EditorWindow instance."""
editwin = NonCallableMagicMock()
editwin.width = 80
if with_text_widget:
editwin.root = get_test_tk_root(self)
text_widget = self.make_text_widget(root=editwin.root)
editwin.text = editwin.per.bottom = text_widget
return editwin
def make_squeezer_instance(self, editor_window=None):
"""Create an actual Squeezer instance with a mock EditorWindow."""
if editor_window is None:
editor_window = self.make_mock_editor_window()
squeezer = Squeezer(editor_window)
return squeezer
def make_text_widget(self, root=None):
if root is None:
root = get_test_tk_root(self)
text_widget = Text(root)
text_widget["font"] = ('Courier', 10)
text_widget.mark_set("iomark", "1.0")
return text_widget
def set_idleconf_option_with_cleanup(self, configType, section, option, value):
prev_val = idleConf.GetOption(configType, section, option)
idleConf.SetOption(configType, section, option, value)
self.addCleanup(idleConf.SetOption,
configType, section, option, prev_val)
def test_count_lines(self):
"""Test Squeezer.count_lines() with various inputs."""
editwin = self.make_mock_editor_window()
squeezer = self.make_squeezer_instance(editwin)
for text_code, line_width, expected in [
(r"'\n'", 80, 1),
(r"'\n' * 3", 80, 3),
(r"'a' * 40 + '\n'", 80, 1),
(r"'a' * 80 + '\n'", 80, 1),
(r"'a' * 200 + '\n'", 80, 3),
(r"'aa\t' * 20", 80, 2),
(r"'aa\t' * 21", 80, 3),
(r"'aa\t' * 20", 40, 4),
]:
with self.subTest(text_code=text_code,
line_width=line_width,
expected=expected):
text = eval(text_code)
with patch.object(editwin, 'width', line_width):
self.assertEqual(squeezer.count_lines(text), expected)
def test_init(self):
"""Test the creation of Squeezer instances."""
editwin = self.make_mock_editor_window()
squeezer = self.make_squeezer_instance(editwin)
self.assertIs(squeezer.editwin, editwin)
self.assertEqual(squeezer.expandingbuttons, [])
def test_write_no_tags(self):
"""Test Squeezer's overriding of the EditorWindow's write() method."""
editwin = self.make_mock_editor_window()
for text in ['', 'TEXT', 'LONG TEXT' * 1000, 'MANY_LINES\n' * 100]:
editwin.write = orig_write = Mock(return_value=SENTINEL_VALUE)
squeezer = self.make_squeezer_instance(editwin)
self.assertEqual(squeezer.editwin.write(text, ()), SENTINEL_VALUE)
self.assertEqual(orig_write.call_count, 1)
orig_write.assert_called_with(text, ())
self.assertEqual(len(squeezer.expandingbuttons), 0)
def test_write_not_stdout(self):
"""Test Squeezer's overriding of the EditorWindow's write() method."""
for text in ['', 'TEXT', 'LONG TEXT' * 1000, 'MANY_LINES\n' * 100]:
editwin = self.make_mock_editor_window()
editwin.write.return_value = SENTINEL_VALUE
orig_write = editwin.write
squeezer = self.make_squeezer_instance(editwin)
self.assertEqual(squeezer.editwin.write(text, "stderr"),
SENTINEL_VALUE)
self.assertEqual(orig_write.call_count, 1)
orig_write.assert_called_with(text, "stderr")
self.assertEqual(len(squeezer.expandingbuttons), 0)
def test_write_stdout(self):
"""Test Squeezer's overriding of the EditorWindow's write() method."""
editwin = self.make_mock_editor_window()
for text in ['', 'TEXT']:
editwin.write = orig_write = Mock(return_value=SENTINEL_VALUE)
squeezer = self.make_squeezer_instance(editwin)
squeezer.auto_squeeze_min_lines = 50
self.assertEqual(squeezer.editwin.write(text, "stdout"),
SENTINEL_VALUE)
self.assertEqual(orig_write.call_count, 1)
orig_write.assert_called_with(text, "stdout")
self.assertEqual(len(squeezer.expandingbuttons), 0)
for text in ['LONG TEXT' * 1000, 'MANY_LINES\n' * 100]:
editwin.write = orig_write = Mock(return_value=SENTINEL_VALUE)
squeezer = self.make_squeezer_instance(editwin)
squeezer.auto_squeeze_min_lines = 50
self.assertEqual(squeezer.editwin.write(text, "stdout"), None)
self.assertEqual(orig_write.call_count, 0)
self.assertEqual(len(squeezer.expandingbuttons), 1)
def test_auto_squeeze(self):
"""Test that the auto-squeezing creates an ExpandingButton properly."""
editwin = self.make_mock_editor_window(with_text_widget=True)
text_widget = editwin.text
squeezer = self.make_squeezer_instance(editwin)
squeezer.auto_squeeze_min_lines = 5
squeezer.count_lines = Mock(return_value=6)
editwin.write('TEXT\n'*6, "stdout")
self.assertEqual(text_widget.get('1.0', 'end'), '\n')
self.assertEqual(len(squeezer.expandingbuttons), 1)
def test_squeeze_current_text(self):
"""Test the squeeze_current_text method."""
# Squeezing text should work for both stdout and stderr.
for tag_name in ["stdout", "stderr"]:
editwin = self.make_mock_editor_window(with_text_widget=True)
text_widget = editwin.text
squeezer = self.make_squeezer_instance(editwin)
squeezer.count_lines = Mock(return_value=6)
# Prepare some text in the Text widget.
text_widget.insert("1.0", "SOME\nTEXT\n", tag_name)
text_widget.mark_set("insert", "1.0")
self.assertEqual(text_widget.get('1.0', 'end'), 'SOME\nTEXT\n\n')
self.assertEqual(len(squeezer.expandingbuttons), 0)
# Test squeezing the current text.
retval = squeezer.squeeze_current_text()
self.assertEqual(retval, "break")
self.assertEqual(text_widget.get('1.0', 'end'), '\n\n')
self.assertEqual(len(squeezer.expandingbuttons), 1)
self.assertEqual(squeezer.expandingbuttons[0].s, 'SOME\nTEXT')
# Test that expanding the squeezed text works and afterwards
# the Text widget contains the original text.
squeezer.expandingbuttons[0].expand()
self.assertEqual(text_widget.get('1.0', 'end'), 'SOME\nTEXT\n\n')
self.assertEqual(len(squeezer.expandingbuttons), 0)
def test_squeeze_current_text_no_allowed_tags(self):
"""Test that the event doesn't squeeze text without a relevant tag."""
editwin = self.make_mock_editor_window(with_text_widget=True)
text_widget = editwin.text
squeezer = self.make_squeezer_instance(editwin)
squeezer.count_lines = Mock(return_value=6)
# Prepare some text in the Text widget.
text_widget.insert("1.0", "SOME\nTEXT\n", "TAG")
text_widget.mark_set("insert", "1.0")
self.assertEqual(text_widget.get('1.0', 'end'), 'SOME\nTEXT\n\n')
self.assertEqual(len(squeezer.expandingbuttons), 0)
# Test squeezing the current text.
retval = squeezer.squeeze_current_text()
self.assertEqual(retval, "break")
self.assertEqual(text_widget.get('1.0', 'end'), 'SOME\nTEXT\n\n')
self.assertEqual(len(squeezer.expandingbuttons), 0)
def test_squeeze_text_before_existing_squeezed_text(self):
"""Test squeezing text before existing squeezed text."""
editwin = self.make_mock_editor_window(with_text_widget=True)
text_widget = editwin.text
squeezer = self.make_squeezer_instance(editwin)
squeezer.count_lines = Mock(return_value=6)
# Prepare some text in the Text widget and squeeze it.
text_widget.insert("1.0", "SOME\nTEXT\n", "stdout")
text_widget.mark_set("insert", "1.0")
squeezer.squeeze_current_text()
self.assertEqual(len(squeezer.expandingbuttons), 1)
# Test squeezing the current text.
text_widget.insert("1.0", "MORE\nSTUFF\n", "stdout")
text_widget.mark_set("insert", "1.0")
retval = squeezer.squeeze_current_text()
self.assertEqual(retval, "break")
self.assertEqual(text_widget.get('1.0', 'end'), '\n\n\n')
self.assertEqual(len(squeezer.expandingbuttons), 2)
self.assertTrue(text_widget.compare(
squeezer.expandingbuttons[0],
'<',
squeezer.expandingbuttons[1],
))
def test_reload(self):
"""Test the reload() class-method."""
editwin = self.make_mock_editor_window(with_text_widget=True)
squeezer = self.make_squeezer_instance(editwin)
orig_auto_squeeze_min_lines = squeezer.auto_squeeze_min_lines
# Increase auto-squeeze-min-lines.
new_auto_squeeze_min_lines = orig_auto_squeeze_min_lines + 10
self.set_idleconf_option_with_cleanup(
'main', 'PyShell', 'auto-squeeze-min-lines',
str(new_auto_squeeze_min_lines))
Squeezer.reload()
self.assertEqual(squeezer.auto_squeeze_min_lines,
new_auto_squeeze_min_lines)
def test_reload_no_squeezer_instances(self):
"""Test that Squeezer.reload() runs without any instances existing."""
Squeezer.reload()
class ExpandingButtonTest(unittest.TestCase):
"""Tests for the ExpandingButton class."""
# In these tests the squeezer instance is a mock, but actual tkinter
# Text and Button instances are created.
def make_mock_squeezer(self):
"""Helper for tests: Create a mock Squeezer object."""
root = get_test_tk_root(self)
squeezer = Mock()
squeezer.editwin.text = Text(root)
squeezer.editwin.per = Percolator(squeezer.editwin.text)
self.addCleanup(squeezer.editwin.per.close)
# Set default values for the configuration settings.
squeezer.auto_squeeze_min_lines = 50
return squeezer
@patch('idlelib.squeezer.Hovertip', autospec=Hovertip)
def test_init(self, MockHovertip):
"""Test the simplest creation of an ExpandingButton."""
squeezer = self.make_mock_squeezer()
text_widget = squeezer.editwin.text
expandingbutton = ExpandingButton('TEXT', 'TAGS', 50, squeezer)
self.assertEqual(expandingbutton.s, 'TEXT')
# Check that the underlying tkinter.Button is properly configured.
self.assertEqual(expandingbutton.master, text_widget)
self.assertTrue('50 lines' in expandingbutton.cget('text'))
# Check that the text widget still contains no text.
self.assertEqual(text_widget.get('1.0', 'end'), '\n')
# Check that the mouse events are bound.
self.assertIn('<Double-Button-1>', expandingbutton.bind())
right_button_code = '<Button-%s>' % ('2' if macosx.isAquaTk() else '3')
self.assertIn(right_button_code, expandingbutton.bind())
# Check that ToolTip was called once, with appropriate values.
self.assertEqual(MockHovertip.call_count, 1)
MockHovertip.assert_called_with(expandingbutton, ANY, hover_delay=ANY)
# Check that 'right-click' appears in the tooltip text.
tooltip_text = MockHovertip.call_args[0][1]
self.assertIn('right-click', tooltip_text.lower())
def test_expand(self):
"""Test the expand event."""
squeezer = self.make_mock_squeezer()
expandingbutton = ExpandingButton('TEXT', 'TAGS', 50, squeezer)
# Insert the button into the text widget
# (this is normally done by the Squeezer class).
text_widget = squeezer.editwin.text
text_widget.window_create("1.0", window=expandingbutton)
# trigger the expand event
retval = expandingbutton.expand(event=Mock())
self.assertEqual(retval, None)
# Check that the text was inserted into the text widget.
self.assertEqual(text_widget.get('1.0', 'end'), 'TEXT\n')
# Check that the 'TAGS' tag was set on the inserted text.
text_end_index = text_widget.index('end-1c')
self.assertEqual(text_widget.get('1.0', text_end_index), 'TEXT')
self.assertEqual(text_widget.tag_nextrange('TAGS', '1.0'),
('1.0', text_end_index))
# Check that the button removed itself from squeezer.expandingbuttons.
self.assertEqual(squeezer.expandingbuttons.remove.call_count, 1)
squeezer.expandingbuttons.remove.assert_called_with(expandingbutton)
def test_expand_dangerous_oupput(self):
"""Test that expanding very long output asks user for confirmation."""
squeezer = self.make_mock_squeezer()
text = 'a' * 10**5
expandingbutton = ExpandingButton(text, 'TAGS', 50, squeezer)
expandingbutton.set_is_dangerous()
self.assertTrue(expandingbutton.is_dangerous)
# Insert the button into the text widget
# (this is normally done by the Squeezer class).
text_widget = expandingbutton.text
text_widget.window_create("1.0", window=expandingbutton)
# Patch the message box module to always return False.
with patch('idlelib.squeezer.messagebox') as mock_msgbox:
mock_msgbox.askokcancel.return_value = False
mock_msgbox.askyesno.return_value = False
# Trigger the expand event.
retval = expandingbutton.expand(event=Mock())
# Check that the event chain was broken and no text was inserted.
self.assertEqual(retval, 'break')
self.assertEqual(expandingbutton.text.get('1.0', 'end-1c'), '')
# Patch the message box module to always return True.
with patch('idlelib.squeezer.messagebox') as mock_msgbox:
mock_msgbox.askokcancel.return_value = True
mock_msgbox.askyesno.return_value = True
# Trigger the expand event.
retval = expandingbutton.expand(event=Mock())
# Check that the event chain wasn't broken and the text was inserted.
self.assertEqual(retval, None)
self.assertEqual(expandingbutton.text.get('1.0', 'end-1c'), text)
def test_copy(self):
"""Test the copy event."""
# Testing with the actual clipboard proved problematic, so this
# test replaces the clipboard manipulation functions with mocks
# and checks that they are called appropriately.
squeezer = self.make_mock_squeezer()
expandingbutton = ExpandingButton('TEXT', 'TAGS', 50, squeezer)
expandingbutton.clipboard_clear = Mock()
expandingbutton.clipboard_append = Mock()
# Trigger the copy event.
retval = expandingbutton.copy(event=Mock())
self.assertEqual(retval, None)
# Vheck that the expanding button called clipboard_clear() and
# clipboard_append('TEXT') once each.
self.assertEqual(expandingbutton.clipboard_clear.call_count, 1)
self.assertEqual(expandingbutton.clipboard_append.call_count, 1)
expandingbutton.clipboard_append.assert_called_with('TEXT')
def test_view(self):
"""Test the view event."""
squeezer = self.make_mock_squeezer()
expandingbutton = ExpandingButton('TEXT', 'TAGS', 50, squeezer)
expandingbutton.selection_own = Mock()
with patch('idlelib.squeezer.view_text', autospec=view_text)\
as mock_view_text:
# Trigger the view event.
expandingbutton.view(event=Mock())
# Check that the expanding button called view_text.
self.assertEqual(mock_view_text.call_count, 1)
# Check that the proper text was passed.
self.assertEqual(mock_view_text.call_args[0][2], 'TEXT')
def test_rmenu(self):
"""Test the context menu."""
squeezer = self.make_mock_squeezer()
expandingbutton = ExpandingButton('TEXT', 'TAGS', 50, squeezer)
with patch('tkinter.Menu') as mock_Menu:
mock_menu = Mock()
mock_Menu.return_value = mock_menu
mock_event = Mock()
mock_event.x = 10
mock_event.y = 10
expandingbutton.context_menu_event(event=mock_event)
self.assertEqual(mock_menu.add_command.call_count,
len(expandingbutton.rmenu_specs))
for label, *data in expandingbutton.rmenu_specs:
mock_menu.add_command.assert_any_call(label=label, command=ANY)
if __name__ == '__main__':
unittest.main(verbosity=2) | 0.829803 | 0.345409 |
import numpy as np
import glob
import shutil
import os
import cv2
from PIL import Image, ImageOps
from matplotlib import pyplot as plt
clothes_dir = '/home/ssai1/dhgwag/VITON/VITON-HD/datasets/train/cloth'
clothes_mask_dir = '/home/ssai1/dhgwag/VITON/VITON-HD/datasets/train/cloth-mask'
image_dir = '/home/ssai1/dhgwag/VITON/VITON-HD/datasets/train/image'
image_parse_dir = '/home/ssai1/dhgwag/VITON/VITON-HD/datasets/train/image-parse'
result_dir = '/home/ssai1/yjcho/blackened_datasets'
def load_one_image(image_path):
img = Image.open(image_path).convert('RGB')
# img.save('img_test.jpg', format='jpeg')
np_img = np.array(img)
return np_img
def load_one_image_parse(image_parse_path):
# img_parse = Image.open(image_parse_path).convert('RGB')
img_parse = Image.open(image_parse_path)
# img_parse.save('img_parse_test.png', format='png')
np_img_parse = np.array(img_parse)
return np_img_parse
def get_parse_clothes(img_parse):
"""
img_parse: numpy array
"""
# print(np.unique(img_parse))
parse_upper = ((img_parse == 5).astype(np.float32) +
(img_parse == 6).astype(np.float32) +
(img_parse == 7).astype(np.float32))
# print("parse_cloth's elements:", np.unique(parse_upper))
return parse_upper
def parse2mask(parse):
"""
parse: NUMPY ARRAY upper clothes
"""
upper_mask = parse[np.where(parse > 0.0)] = 1.0
def clothes_darkenizer(img, mask):
# print("mask", mask.shape)
np_clothes = np.copy(img)
# print(type(np_clothes), np_clothes.shape)
np_clothes[np.where(mask == 0.0)] = 0.0 # only clothes will survive
Image.fromarray(np.uint8(np_clothes)).save('np_clothes.jpg')
PIL_clothes = Image.fromarray(np.uint8(np_clothes)).convert('RGB')
PIL_clothes.save('PIL_clothes.jpg')
PIL_gray_clothes = ImageOps.grayscale(PIL_clothes)
PIL_gray_clothes.save('gray_PIL.jpg')
np_gray_clothes = np.array(PIL_gray_clothes)
# stack three times
np_gray_clothes = np.stack([np_gray_clothes,np_gray_clothes,np_gray_clothes], axis=-1)
return np_gray_clothes
def merge_images(img1, img2, img2_mask):
"""
img1: main image
img2: sub image
img2_mask
"""
result = np.copy(img1)
result[np.where(img2_mask != 0)] = img2[np.where(img2_mask != 0)]
return result
def main():
shutil.rmtree(result_dir) if os.path.exists(result_dir) else None
os.mkdir(result_dir) if not os.path.exists(result_dir) else None
result_cloth_dir = os.path.join(result_dir, 'cloth')
result_cloth_mask_dir = os.path.join(result_dir, 'cloth-mask')
result_image_dir = os.path.join(result_dir, 'image')
result_image_parse_dir = os.path.join(result_dir, 'image-parse')
os.mkdir(result_cloth_dir)
os.mkdir(result_cloth_mask_dir)
os.mkdir(result_image_dir)
os.mkdir(result_image_parse_dir)
# human image processing
for img_path in glob.glob(os.path.join(image_dir, '*.jpg')):
img_parse_path = os.path.join(image_parse_dir, os.path.basename(img_path)).replace('.jpg', '.png')
img = load_one_image(img_path)
img_parse = load_one_image_parse(img_parse_path)
parse_upper = get_parse_clothes(img_parse)
np_gray_clothes = clothes_darkenizer(img, parse_upper)
result_img = merge_images(img, np_gray_clothes, parse_upper)
PIL_result_img = Image.fromarray(result_img)
PIL_result_img.save(os.path.join(result_image_dir, os.path.basename(img_path)))
Image.fromarray(img_parse).save(os.path.join(result_image_parse_dir, os.path.basename(img_parse_path)))
# plt.imshow(np.array(result_img))
# plt.show()
# clothes image processing
for clothes_path in glob.glob(os.path.join(clothes_dir, '*.jpg')):
clothes_mask_path = os.path.join(clothes_mask_dir, os.path.basename(clothes_path))
clothes = load_one_image(clothes_path)
clothes_mask = load_one_image(clothes_mask_path)
np_gray_clothes = clothes_darkenizer(clothes, clothes_mask)
result_img = merge_images(clothes, np_gray_clothes, clothes_mask)
PIL_result_img = Image.fromarray(result_img)
PIL_result_img.save(os.path.join(result_cloth_dir, os.path.basename(clothes_path)))
Image.fromarray(clothes_mask).save(os.path.join(result_cloth_mask_dir, os.path.basename(clothes_mask_path)))
# plt.imshow(np.array(result_img))
# plt.show()
if __name__ == '__main__':
main() | Scripts/clothes_blackenizer.py | import numpy as np
import glob
import shutil
import os
import cv2
from PIL import Image, ImageOps
from matplotlib import pyplot as plt
clothes_dir = '/home/ssai1/dhgwag/VITON/VITON-HD/datasets/train/cloth'
clothes_mask_dir = '/home/ssai1/dhgwag/VITON/VITON-HD/datasets/train/cloth-mask'
image_dir = '/home/ssai1/dhgwag/VITON/VITON-HD/datasets/train/image'
image_parse_dir = '/home/ssai1/dhgwag/VITON/VITON-HD/datasets/train/image-parse'
result_dir = '/home/ssai1/yjcho/blackened_datasets'
def load_one_image(image_path):
img = Image.open(image_path).convert('RGB')
# img.save('img_test.jpg', format='jpeg')
np_img = np.array(img)
return np_img
def load_one_image_parse(image_parse_path):
# img_parse = Image.open(image_parse_path).convert('RGB')
img_parse = Image.open(image_parse_path)
# img_parse.save('img_parse_test.png', format='png')
np_img_parse = np.array(img_parse)
return np_img_parse
def get_parse_clothes(img_parse):
"""
img_parse: numpy array
"""
# print(np.unique(img_parse))
parse_upper = ((img_parse == 5).astype(np.float32) +
(img_parse == 6).astype(np.float32) +
(img_parse == 7).astype(np.float32))
# print("parse_cloth's elements:", np.unique(parse_upper))
return parse_upper
def parse2mask(parse):
"""
parse: NUMPY ARRAY upper clothes
"""
upper_mask = parse[np.where(parse > 0.0)] = 1.0
def clothes_darkenizer(img, mask):
# print("mask", mask.shape)
np_clothes = np.copy(img)
# print(type(np_clothes), np_clothes.shape)
np_clothes[np.where(mask == 0.0)] = 0.0 # only clothes will survive
Image.fromarray(np.uint8(np_clothes)).save('np_clothes.jpg')
PIL_clothes = Image.fromarray(np.uint8(np_clothes)).convert('RGB')
PIL_clothes.save('PIL_clothes.jpg')
PIL_gray_clothes = ImageOps.grayscale(PIL_clothes)
PIL_gray_clothes.save('gray_PIL.jpg')
np_gray_clothes = np.array(PIL_gray_clothes)
# stack three times
np_gray_clothes = np.stack([np_gray_clothes,np_gray_clothes,np_gray_clothes], axis=-1)
return np_gray_clothes
def merge_images(img1, img2, img2_mask):
"""
img1: main image
img2: sub image
img2_mask
"""
result = np.copy(img1)
result[np.where(img2_mask != 0)] = img2[np.where(img2_mask != 0)]
return result
def main():
shutil.rmtree(result_dir) if os.path.exists(result_dir) else None
os.mkdir(result_dir) if not os.path.exists(result_dir) else None
result_cloth_dir = os.path.join(result_dir, 'cloth')
result_cloth_mask_dir = os.path.join(result_dir, 'cloth-mask')
result_image_dir = os.path.join(result_dir, 'image')
result_image_parse_dir = os.path.join(result_dir, 'image-parse')
os.mkdir(result_cloth_dir)
os.mkdir(result_cloth_mask_dir)
os.mkdir(result_image_dir)
os.mkdir(result_image_parse_dir)
# human image processing
for img_path in glob.glob(os.path.join(image_dir, '*.jpg')):
img_parse_path = os.path.join(image_parse_dir, os.path.basename(img_path)).replace('.jpg', '.png')
img = load_one_image(img_path)
img_parse = load_one_image_parse(img_parse_path)
parse_upper = get_parse_clothes(img_parse)
np_gray_clothes = clothes_darkenizer(img, parse_upper)
result_img = merge_images(img, np_gray_clothes, parse_upper)
PIL_result_img = Image.fromarray(result_img)
PIL_result_img.save(os.path.join(result_image_dir, os.path.basename(img_path)))
Image.fromarray(img_parse).save(os.path.join(result_image_parse_dir, os.path.basename(img_parse_path)))
# plt.imshow(np.array(result_img))
# plt.show()
# clothes image processing
for clothes_path in glob.glob(os.path.join(clothes_dir, '*.jpg')):
clothes_mask_path = os.path.join(clothes_mask_dir, os.path.basename(clothes_path))
clothes = load_one_image(clothes_path)
clothes_mask = load_one_image(clothes_mask_path)
np_gray_clothes = clothes_darkenizer(clothes, clothes_mask)
result_img = merge_images(clothes, np_gray_clothes, clothes_mask)
PIL_result_img = Image.fromarray(result_img)
PIL_result_img.save(os.path.join(result_cloth_dir, os.path.basename(clothes_path)))
Image.fromarray(clothes_mask).save(os.path.join(result_cloth_mask_dir, os.path.basename(clothes_mask_path)))
# plt.imshow(np.array(result_img))
# plt.show()
if __name__ == '__main__':
main() | 0.099334 | 0.32826 |
import os
import re
import logging
import urllib
from storage import Storage
from http import HTTP
regex_at = re.compile('(?<!\\\\)\$[\w_]+')
regex_anything = re.compile('(?<!\\\\)\$anything')
regex_iter = re.compile(r'.*code=(?P<code>\d+)&ticket=(?P<ticket>.+).*')
params=Storage()
params.routes_in=[]
params.routes_out=[]
params.routes_onerror=[]
params.error_handler=None
params.error_message = '<html><body><h1>Invalid request</h1></body></html>'
params.error_message_custom = '<html><body><h1>%s</h1></body></html>'
params.error_message_ticket = \
'<html><body><h1>Internal error</h1>Ticket issued: <a href="/admin/default/ticket/%(ticket)s" target="_blank">%(ticket)s</a></body><!-- this is junk text else IE does not display the page: '+('x'*512)+' //--></html>'
def load():
symbols = {}
if not os.path.exists('routes.py'):
return
try:
routesfp = open('routes.py', 'r')
exec routesfp.read() in symbols
routesfp.close()
logging.info('URL rewrite is on. configuration in routes.py')
except SyntaxError, e:
routesfp.close()
logging.error('Your routes.py has a syntax error. ' + \
'Please fix it before you restart web2py')
raise e
params.routes_in=[]
if 'routes_in' in symbols:
for (k, v) in symbols['routes_in']:
if not k[0] == '^':
k = '^%s' % k
if not k[-1] == '$':
k = '%s$' % k
if k.find(':') < 0:
k = '^.*?:%s' % k[1:]
if k.find('://') < 0:
i = k.find(':/')
k = r'%s:https?://[^:/]+:[a-z]+ %s' % (k[:i], k[i+1:])
for item in regex_anything.findall(k):
k = k.replace(item, '(?P<anything>.*)')
for item in regex_at.findall(k):
k = k.replace(item, '(?P<%s>[\\w_]+)' % item[1:])
for item in regex_at.findall(v):
v = v.replace(item, '\\g<%s>' % item[1:])
params.routes_in.append((re.compile(k, re.DOTALL), v))
params.routes_out=[]
if 'routes_out' in symbols:
for (k, v) in symbols['routes_out']:
if not k[0] == '^':
k = '^%s' % k
if not k[-1] == '$':
k = '%s$' % k
for item in regex_at.findall(k):
k = k.replace(item, '(?P<%s>\\w+)' % item[1:])
for item in regex_at.findall(v):
v = v.replace(item, '\\g<%s>' % item[1:])
params.routes_out.append((re.compile(k, re.DOTALL), v))
if 'routes_onerror' in symbols:
params.routes_onerror = symbols['routes_onerror']
if 'error_handler' in symbols:
params.error_handler = symbols['error_handler']
if 'error_message' in symbols:
params.error_message = symbols['error_message']
if 'error_message_ticket' in symbols:
params.error_message_ticket = symbols['error_message_ticket']
def filter_in(e):
if params.routes_in:
query = e.get('QUERY_STRING', None)
path = e['PATH_INFO']
host = e.get('HTTP_HOST', 'localhost').lower()
original_uri = path + (query and '?'+query or '')
i = host.find(':')
if i > 0:
host = host[:i]
key = '%s:%s://%s:%s %s' % \
(e['REMOTE_ADDR'],
e.get('WSGI_URL_SCHEME', 'http').lower(), host,
e.get('REQUEST_METHOD', 'get').lower(), path)
for (regex, value) in params.routes_in:
if regex.match(key):
path = regex.sub(value, key)
break
if path.find('?') < 0:
e['PATH_INFO'] = path
else:
if query:
path = path+'&'+query
e['PATH_INFO'] = ''
e['REQUEST_URI'] = path
e['WEB2PY_ORIGINAL_URI'] = original_uri
return e
def filter_out(url):
if params.routes_out:
items = url.split('?', 1)
for (regex, value) in params.routes_out:
if regex.match(items[0]):
return '?'.join([regex.sub(value, items[0])] + items[1:])
return url
def try_redirect_on_error(http_object, application, ticket=None):
status = int(str(http_object.status).split()[0])
if status>399 and params.routes_onerror:
keys=set(('%s/%s' % (application, status),
'%s/*' % (application),
'*/%s' % (status),
'*/*'))
for (key,redir) in params.routes_onerror:
if key in keys:
if redir == '!':
break
elif '?' in redir:
url = redir + '&' + 'code=%s&ticket=%s' % (status,ticket)
else:
url = redir + '?' + 'code=%s&ticket=%s' % (status,ticket)
return HTTP(303,
'You are being redirected <a href="%s">here</a>' % url,
Location=url)
return http_object | gluon/rewrite.py | import os
import re
import logging
import urllib
from storage import Storage
from http import HTTP
regex_at = re.compile('(?<!\\\\)\$[\w_]+')
regex_anything = re.compile('(?<!\\\\)\$anything')
regex_iter = re.compile(r'.*code=(?P<code>\d+)&ticket=(?P<ticket>.+).*')
params=Storage()
params.routes_in=[]
params.routes_out=[]
params.routes_onerror=[]
params.error_handler=None
params.error_message = '<html><body><h1>Invalid request</h1></body></html>'
params.error_message_custom = '<html><body><h1>%s</h1></body></html>'
params.error_message_ticket = \
'<html><body><h1>Internal error</h1>Ticket issued: <a href="/admin/default/ticket/%(ticket)s" target="_blank">%(ticket)s</a></body><!-- this is junk text else IE does not display the page: '+('x'*512)+' //--></html>'
def load():
symbols = {}
if not os.path.exists('routes.py'):
return
try:
routesfp = open('routes.py', 'r')
exec routesfp.read() in symbols
routesfp.close()
logging.info('URL rewrite is on. configuration in routes.py')
except SyntaxError, e:
routesfp.close()
logging.error('Your routes.py has a syntax error. ' + \
'Please fix it before you restart web2py')
raise e
params.routes_in=[]
if 'routes_in' in symbols:
for (k, v) in symbols['routes_in']:
if not k[0] == '^':
k = '^%s' % k
if not k[-1] == '$':
k = '%s$' % k
if k.find(':') < 0:
k = '^.*?:%s' % k[1:]
if k.find('://') < 0:
i = k.find(':/')
k = r'%s:https?://[^:/]+:[a-z]+ %s' % (k[:i], k[i+1:])
for item in regex_anything.findall(k):
k = k.replace(item, '(?P<anything>.*)')
for item in regex_at.findall(k):
k = k.replace(item, '(?P<%s>[\\w_]+)' % item[1:])
for item in regex_at.findall(v):
v = v.replace(item, '\\g<%s>' % item[1:])
params.routes_in.append((re.compile(k, re.DOTALL), v))
params.routes_out=[]
if 'routes_out' in symbols:
for (k, v) in symbols['routes_out']:
if not k[0] == '^':
k = '^%s' % k
if not k[-1] == '$':
k = '%s$' % k
for item in regex_at.findall(k):
k = k.replace(item, '(?P<%s>\\w+)' % item[1:])
for item in regex_at.findall(v):
v = v.replace(item, '\\g<%s>' % item[1:])
params.routes_out.append((re.compile(k, re.DOTALL), v))
if 'routes_onerror' in symbols:
params.routes_onerror = symbols['routes_onerror']
if 'error_handler' in symbols:
params.error_handler = symbols['error_handler']
if 'error_message' in symbols:
params.error_message = symbols['error_message']
if 'error_message_ticket' in symbols:
params.error_message_ticket = symbols['error_message_ticket']
def filter_in(e):
if params.routes_in:
query = e.get('QUERY_STRING', None)
path = e['PATH_INFO']
host = e.get('HTTP_HOST', 'localhost').lower()
original_uri = path + (query and '?'+query or '')
i = host.find(':')
if i > 0:
host = host[:i]
key = '%s:%s://%s:%s %s' % \
(e['REMOTE_ADDR'],
e.get('WSGI_URL_SCHEME', 'http').lower(), host,
e.get('REQUEST_METHOD', 'get').lower(), path)
for (regex, value) in params.routes_in:
if regex.match(key):
path = regex.sub(value, key)
break
if path.find('?') < 0:
e['PATH_INFO'] = path
else:
if query:
path = path+'&'+query
e['PATH_INFO'] = ''
e['REQUEST_URI'] = path
e['WEB2PY_ORIGINAL_URI'] = original_uri
return e
def filter_out(url):
if params.routes_out:
items = url.split('?', 1)
for (regex, value) in params.routes_out:
if regex.match(items[0]):
return '?'.join([regex.sub(value, items[0])] + items[1:])
return url
def try_redirect_on_error(http_object, application, ticket=None):
status = int(str(http_object.status).split()[0])
if status>399 and params.routes_onerror:
keys=set(('%s/%s' % (application, status),
'%s/*' % (application),
'*/%s' % (status),
'*/*'))
for (key,redir) in params.routes_onerror:
if key in keys:
if redir == '!':
break
elif '?' in redir:
url = redir + '&' + 'code=%s&ticket=%s' % (status,ticket)
else:
url = redir + '?' + 'code=%s&ticket=%s' % (status,ticket)
return HTTP(303,
'You are being redirected <a href="%s">here</a>' % url,
Location=url)
return http_object | 0.187839 | 0.055592 |
import sys
from gflags import _helpers
# TODO(vrusinov): use DISCLAIM_key_flags when it's moved out of __init__.
_helpers.disclaim_module_ids.add(id(sys.modules[__name__]))
class Error(Exception):
"""The base class for all flags errors."""
# TODO(b/31596146): Remove FlagsError.
FlagsError = Error
class CantOpenFlagFileError(Error):
"""Raised if flagfile fails to open: doesn't exist, wrong permissions, etc."""
class DuplicateFlagCannotPropagateNoneToSwig(Error):
"""Raised when redefining a SWIG flag and the default value is None.
It's raised when redefining a SWIG flag with allow_override=True and the
default value is None. Because it's currently impossible to pass None default
value back to SWIG. See FlagValues.SetDefault for details.
"""
class DuplicateFlagError(Error):
"""Raised if there is a flag naming conflict."""
@classmethod
def from_flag(cls, flagname, flag_values, other_flag_values=None):
"""Create a DuplicateFlagError by providing flag name and values.
Args:
flagname: Name of the flag being redefined.
flag_values: FlagValues object containing the first definition of
flagname.
other_flag_values: If this argument is not None, it should be the
FlagValues object where the second definition of flagname occurs.
If it is None, we assume that we're being called when attempting
to create the flag a second time, and we use the module calling
this one as the source of the second definition.
Returns:
An instance of DuplicateFlagError.
"""
first_module = flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
if other_flag_values is None:
second_module = _helpers.GetCallingModule()
else:
second_module = other_flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
flag_summary = flag_values[flagname].help
msg = ("The flag '%s' is defined twice. First from %s, Second from %s. "
"Description from first occurrence: %s") % (
flagname, first_module, second_module, flag_summary)
return cls(msg)
class IllegalFlagValueError(Error):
"""Raised if the flag command line argument is illegal."""
# TODO(yileiyang): Remove IllegalFlagValue.
IllegalFlagValue = IllegalFlagValueError
class UnrecognizedFlagError(Error):
"""Raised if a flag is unrecognized.
Attributes:
flagname: Name of the unrecognized flag.
flagvalue: Value of the flag, empty if the flag is not defined.
"""
def __init__(self, flagname, flagvalue='', suggestions=None):
self.flagname = flagname
self.flagvalue = flagvalue
if suggestions:
tip = '. Did you mean: %s?' % ', '.join(suggestions)
else:
tip = ''
Error.__init__(
self, 'Unknown command line flag \'%s\'%s' % (flagname, tip))
class UnparsedFlagAccessError(Error):
"""Attempt to use flag from unparsed FlagValues."""
class ValidationError(Error):
"""Raised if flag validator constraint is not satisfied.""" | third_party/py/gflags/gflags/exceptions.py | import sys
from gflags import _helpers
# TODO(vrusinov): use DISCLAIM_key_flags when it's moved out of __init__.
_helpers.disclaim_module_ids.add(id(sys.modules[__name__]))
class Error(Exception):
"""The base class for all flags errors."""
# TODO(b/31596146): Remove FlagsError.
FlagsError = Error
class CantOpenFlagFileError(Error):
"""Raised if flagfile fails to open: doesn't exist, wrong permissions, etc."""
class DuplicateFlagCannotPropagateNoneToSwig(Error):
"""Raised when redefining a SWIG flag and the default value is None.
It's raised when redefining a SWIG flag with allow_override=True and the
default value is None. Because it's currently impossible to pass None default
value back to SWIG. See FlagValues.SetDefault for details.
"""
class DuplicateFlagError(Error):
"""Raised if there is a flag naming conflict."""
@classmethod
def from_flag(cls, flagname, flag_values, other_flag_values=None):
"""Create a DuplicateFlagError by providing flag name and values.
Args:
flagname: Name of the flag being redefined.
flag_values: FlagValues object containing the first definition of
flagname.
other_flag_values: If this argument is not None, it should be the
FlagValues object where the second definition of flagname occurs.
If it is None, we assume that we're being called when attempting
to create the flag a second time, and we use the module calling
this one as the source of the second definition.
Returns:
An instance of DuplicateFlagError.
"""
first_module = flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
if other_flag_values is None:
second_module = _helpers.GetCallingModule()
else:
second_module = other_flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
flag_summary = flag_values[flagname].help
msg = ("The flag '%s' is defined twice. First from %s, Second from %s. "
"Description from first occurrence: %s") % (
flagname, first_module, second_module, flag_summary)
return cls(msg)
class IllegalFlagValueError(Error):
"""Raised if the flag command line argument is illegal."""
# TODO(yileiyang): Remove IllegalFlagValue.
IllegalFlagValue = IllegalFlagValueError
class UnrecognizedFlagError(Error):
"""Raised if a flag is unrecognized.
Attributes:
flagname: Name of the unrecognized flag.
flagvalue: Value of the flag, empty if the flag is not defined.
"""
def __init__(self, flagname, flagvalue='', suggestions=None):
self.flagname = flagname
self.flagvalue = flagvalue
if suggestions:
tip = '. Did you mean: %s?' % ', '.join(suggestions)
else:
tip = ''
Error.__init__(
self, 'Unknown command line flag \'%s\'%s' % (flagname, tip))
class UnparsedFlagAccessError(Error):
"""Attempt to use flag from unparsed FlagValues."""
class ValidationError(Error):
"""Raised if flag validator constraint is not satisfied.""" | 0.310904 | 0.272339 |
from hypothesis import given
from hypothesis.strategies import lists, text
from matching import Player
@given(name=text())
def test_init(name):
""" Make an instance of Player and check their attributes are correct. """
player = Player(name)
assert player.name == name
assert player.prefs is None
assert player.pref_names is None
assert player.matching is None
@given(name=text())
def test_repr(name):
""" Verify that a Player instance is represented by their name. """
player = Player(name)
assert repr(player) == name
@given(name=text(), pref_names=lists(text(), min_size=1))
def test_set_prefs(name, pref_names):
""" Verify a Player can set its preferences correctly. """
player = Player(name)
others = [Player(other) for other in pref_names]
player.set_prefs(others)
assert player.prefs == others
assert player.pref_names == [other.name for other in others]
@given(name=text(), pref_names=lists(text(), min_size=1))
def test_get_favourite(name, pref_names):
""" Check the correct player is returned as the favourite of a player. """
player = Player(name)
others = [Player(other) for other in pref_names]
player.set_prefs(others)
favourite = others[0]
assert player.get_favourite() == favourite
@given(name=text(), pref_names=lists(text(), min_size=1))
def test_match(name, pref_names):
""" Check that a player can match to another player correctly. """
player = Player(name)
other = Player(pref_names[0])
player.match(other)
assert player.matching == other
@given(name=text(), pref_names=lists(text(), min_size=1))
def test_unmatch(name, pref_names):
""" Check that a player can unmatch from another player correctly. """
player = Player(name)
other = Player(pref_names[0])
player.matching = other
player.unmatch()
assert player.matching is None
@given(name=text(), pref_names=lists(text(), min_size=1))
def test_forget(name, pref_names):
""" Test that a player can forget somebody. """
player = Player(name)
others = [Player(other) for other in pref_names]
player.set_prefs(others)
for i, other in enumerate(others[:-1]):
player.forget(other)
assert player.prefs == others[i + 1 :]
player.forget(others[-1])
assert player.prefs == []
assert player.pref_names == pref_names
@given(name=text(), pref_names=lists(text(), min_size=1))
def test_get_successors(name, pref_names):
""" Test that the correct successors to another player in a player's
preference list are found. """
player = Player(name)
others = [Player(other) for other in pref_names]
player.set_prefs(others)
player.matching = others[0]
if len(player.pref_names) > 1:
successors = others[1:]
assert player.get_successors() == successors
else:
assert player.get_successors() == []
@given(name=text(), pref_names=lists(text(), min_size=1, unique=True))
def test_prefers(name, pref_names):
""" Test that a comparison of preference between two other players can be
found for a player. """
player = Player(name)
others = [Player(other) for other in pref_names]
player.set_prefs(others)
for i, other in enumerate(others[:-1]):
assert player.prefers(other, others[i + 1]) | tests/players/test_player.py |
from hypothesis import given
from hypothesis.strategies import lists, text
from matching import Player
@given(name=text())
def test_init(name):
""" Make an instance of Player and check their attributes are correct. """
player = Player(name)
assert player.name == name
assert player.prefs is None
assert player.pref_names is None
assert player.matching is None
@given(name=text())
def test_repr(name):
""" Verify that a Player instance is represented by their name. """
player = Player(name)
assert repr(player) == name
@given(name=text(), pref_names=lists(text(), min_size=1))
def test_set_prefs(name, pref_names):
""" Verify a Player can set its preferences correctly. """
player = Player(name)
others = [Player(other) for other in pref_names]
player.set_prefs(others)
assert player.prefs == others
assert player.pref_names == [other.name for other in others]
@given(name=text(), pref_names=lists(text(), min_size=1))
def test_get_favourite(name, pref_names):
""" Check the correct player is returned as the favourite of a player. """
player = Player(name)
others = [Player(other) for other in pref_names]
player.set_prefs(others)
favourite = others[0]
assert player.get_favourite() == favourite
@given(name=text(), pref_names=lists(text(), min_size=1))
def test_match(name, pref_names):
""" Check that a player can match to another player correctly. """
player = Player(name)
other = Player(pref_names[0])
player.match(other)
assert player.matching == other
@given(name=text(), pref_names=lists(text(), min_size=1))
def test_unmatch(name, pref_names):
""" Check that a player can unmatch from another player correctly. """
player = Player(name)
other = Player(pref_names[0])
player.matching = other
player.unmatch()
assert player.matching is None
@given(name=text(), pref_names=lists(text(), min_size=1))
def test_forget(name, pref_names):
""" Test that a player can forget somebody. """
player = Player(name)
others = [Player(other) for other in pref_names]
player.set_prefs(others)
for i, other in enumerate(others[:-1]):
player.forget(other)
assert player.prefs == others[i + 1 :]
player.forget(others[-1])
assert player.prefs == []
assert player.pref_names == pref_names
@given(name=text(), pref_names=lists(text(), min_size=1))
def test_get_successors(name, pref_names):
""" Test that the correct successors to another player in a player's
preference list are found. """
player = Player(name)
others = [Player(other) for other in pref_names]
player.set_prefs(others)
player.matching = others[0]
if len(player.pref_names) > 1:
successors = others[1:]
assert player.get_successors() == successors
else:
assert player.get_successors() == []
@given(name=text(), pref_names=lists(text(), min_size=1, unique=True))
def test_prefers(name, pref_names):
""" Test that a comparison of preference between two other players can be
found for a player. """
player = Player(name)
others = [Player(other) for other in pref_names]
player.set_prefs(others)
for i, other in enumerate(others[:-1]):
assert player.prefers(other, others[i + 1]) | 0.807764 | 0.614799 |
import datetime
import errno
import os
import shutil
import unittest
from xml.etree import ElementTree as et
from procsim.core import exceptions, job_order
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
JOB_ORDER_0083 = 'JobOrder_0083.xml'
TEST_JOB_ORDER = 'job_order.xml'
EXPECTED_INPUTS = [
(['$PATH/BIO_RAW_022_10_20210201T000000_20210201T013810_D20210201T013810_01_B07CK0.zip'], 'RAW_022_10', '', ''),
(['$PATH/BIO_RAW_023_10_20210201T000000_20210201T013810_D20210201T013810_01_B07CK0.zip'], 'RAW_023_10', '', ''),
(['$PATH/BIO_RAW_024_10_20210201T000000_20210201T013810_D20210201T013810_01_B07CK0.zip'], 'RAW_024_10', '', ''),
(['$PATH/BIO_RAW_025_10_20210201T002432_20210201T002932_D20210201T013810_01_B07CK0.zip'], 'RAW_025_10', '', ''),
(['$PATH/BIO_RAW_026_10_20210201T002432_20210201T002932_D20210201T013810_01_B07CK0.zip'], 'RAW_026_10', '', '')
]
def equal_ignore_order(a, b):
""" Use only when elements are neither hashable nor sortable! """
unmatched = list(b)
for element in a:
try:
unmatched.remove(element)
except ValueError:
return False
return not unmatched
def patch_job_order(src, dest, path):
file_in = open(src, 'r')
file_out = open(dest, 'w')
lines = file_in.read()
file_out.write(lines.replace('$PATH', path))
file_out.close()
file_in.close()
class _Logger:
def __init__(self):
self.count = 0
def debug(self, *args, **kwargs):
pass
def progress(self, *args, **kwargs):
self.count += 1
def error(self, *args, **kwargs):
print(*args, **kwargs)
class JobOrderParserTest(unittest.TestCase):
def testFactory(self):
logger = _Logger()
self.assertRaises(exceptions.ProcsimException,
job_order.job_order_parser_factory, 'ESA-EOPG-EEGS-ID-0083a', logger)
sim = job_order.job_order_parser_factory('ESA-EOPG-EEGS-ID-0083', logger)
self.assertIsNotNone(sim)
self.assertIsInstance(sim, job_order.JobOrderParser)
def testParse(self):
path = os.path.join(THIS_DIR, 'tmp')
os.makedirs(path, exist_ok=True)
self.addCleanup(shutil.rmtree, path)
patch_job_order(os.path.join(THIS_DIR, JOB_ORDER_0083), os.path.join(path, TEST_JOB_ORDER), path)
expected_inputs = []
for input in EXPECTED_INPUTS:
entry = job_order.JobOrderInput()
for file_name in input[0]:
file_name = file_name.replace('$PATH', path)
entry.file_names.append(file_name)
try:
os.mknod(os.path.join(THIS_DIR, file_name))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
entry.file_type = input[1]
entry.alternative_input_id = input[2]
entry.id = input[3]
expected_inputs.append(entry)
expected_inputs
logger = _Logger()
sim = job_order.job_order_parser_factory('ESA-EOPG-EEGS-ID-0083', logger)
sim.read(os.path.join(path, TEST_JOB_ORDER))
self.assertEqual(sim.processor_name, 'l0preproc_sm')
self.assertEqual(sim.processor_version, '01.01')
self.assertEqual(sim.stderr_levels, [])
self.assertEqual(sim.stdout_levels, ['ERROR', 'WARNING', 'PROGRESS', 'INFO'])
self.assertEqual(sim.node, 'MyNode')
self.assertEqual(len(sim.tasks), 1)
self.assertEqual(sim.tasks[0].name, 'Step1')
self.assertEqual(sim.tasks[0].version, '05.03L01')
self.assertEqual(sim.tasks[0].amount_of_ram_mb, 1073741824)
self.assertEqual(sim.tasks[0].disk_space_mb, 1073741824)
self.assertEqual(sim.tasks[0].nr_cpu_cores, 1)
self.assertEqual(sim.toi_start, datetime.datetime(2021, 2, 1, 1, 2, 3, 123456))
self.assertEqual(sim.toi_stop, datetime.datetime(2021, 2, 1, 1, 2, 3, 456000))
params = set(sim.tasks[0].processing_parameters)
self.assertIn('Product_Counter', params)
self.assertIn('Processing_Stage_Flag', params)
self.assertIn('originator_ID', params)
self.assertIn('Orbit_Number', params)
self.assertIn('Acquisition_Station', params)
inputs = sim.tasks[0].inputs
self.assertEqual(len(inputs), 5)
self.assertTrue(equal_ignore_order(inputs, expected_inputs))
# TODO: test outputs
if __name__ == '__main__':
unittest.main() | procsim/core/test/test_job_order.py | import datetime
import errno
import os
import shutil
import unittest
from xml.etree import ElementTree as et
from procsim.core import exceptions, job_order
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
JOB_ORDER_0083 = 'JobOrder_0083.xml'
TEST_JOB_ORDER = 'job_order.xml'
EXPECTED_INPUTS = [
(['$PATH/BIO_RAW_022_10_20210201T000000_20210201T013810_D20210201T013810_01_B07CK0.zip'], 'RAW_022_10', '', ''),
(['$PATH/BIO_RAW_023_10_20210201T000000_20210201T013810_D20210201T013810_01_B07CK0.zip'], 'RAW_023_10', '', ''),
(['$PATH/BIO_RAW_024_10_20210201T000000_20210201T013810_D20210201T013810_01_B07CK0.zip'], 'RAW_024_10', '', ''),
(['$PATH/BIO_RAW_025_10_20210201T002432_20210201T002932_D20210201T013810_01_B07CK0.zip'], 'RAW_025_10', '', ''),
(['$PATH/BIO_RAW_026_10_20210201T002432_20210201T002932_D20210201T013810_01_B07CK0.zip'], 'RAW_026_10', '', '')
]
def equal_ignore_order(a, b):
""" Use only when elements are neither hashable nor sortable! """
unmatched = list(b)
for element in a:
try:
unmatched.remove(element)
except ValueError:
return False
return not unmatched
def patch_job_order(src, dest, path):
file_in = open(src, 'r')
file_out = open(dest, 'w')
lines = file_in.read()
file_out.write(lines.replace('$PATH', path))
file_out.close()
file_in.close()
class _Logger:
def __init__(self):
self.count = 0
def debug(self, *args, **kwargs):
pass
def progress(self, *args, **kwargs):
self.count += 1
def error(self, *args, **kwargs):
print(*args, **kwargs)
class JobOrderParserTest(unittest.TestCase):
def testFactory(self):
logger = _Logger()
self.assertRaises(exceptions.ProcsimException,
job_order.job_order_parser_factory, 'ESA-EOPG-EEGS-ID-0083a', logger)
sim = job_order.job_order_parser_factory('ESA-EOPG-EEGS-ID-0083', logger)
self.assertIsNotNone(sim)
self.assertIsInstance(sim, job_order.JobOrderParser)
def testParse(self):
path = os.path.join(THIS_DIR, 'tmp')
os.makedirs(path, exist_ok=True)
self.addCleanup(shutil.rmtree, path)
patch_job_order(os.path.join(THIS_DIR, JOB_ORDER_0083), os.path.join(path, TEST_JOB_ORDER), path)
expected_inputs = []
for input in EXPECTED_INPUTS:
entry = job_order.JobOrderInput()
for file_name in input[0]:
file_name = file_name.replace('$PATH', path)
entry.file_names.append(file_name)
try:
os.mknod(os.path.join(THIS_DIR, file_name))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
entry.file_type = input[1]
entry.alternative_input_id = input[2]
entry.id = input[3]
expected_inputs.append(entry)
expected_inputs
logger = _Logger()
sim = job_order.job_order_parser_factory('ESA-EOPG-EEGS-ID-0083', logger)
sim.read(os.path.join(path, TEST_JOB_ORDER))
self.assertEqual(sim.processor_name, 'l0preproc_sm')
self.assertEqual(sim.processor_version, '01.01')
self.assertEqual(sim.stderr_levels, [])
self.assertEqual(sim.stdout_levels, ['ERROR', 'WARNING', 'PROGRESS', 'INFO'])
self.assertEqual(sim.node, 'MyNode')
self.assertEqual(len(sim.tasks), 1)
self.assertEqual(sim.tasks[0].name, 'Step1')
self.assertEqual(sim.tasks[0].version, '05.03L01')
self.assertEqual(sim.tasks[0].amount_of_ram_mb, 1073741824)
self.assertEqual(sim.tasks[0].disk_space_mb, 1073741824)
self.assertEqual(sim.tasks[0].nr_cpu_cores, 1)
self.assertEqual(sim.toi_start, datetime.datetime(2021, 2, 1, 1, 2, 3, 123456))
self.assertEqual(sim.toi_stop, datetime.datetime(2021, 2, 1, 1, 2, 3, 456000))
params = set(sim.tasks[0].processing_parameters)
self.assertIn('Product_Counter', params)
self.assertIn('Processing_Stage_Flag', params)
self.assertIn('originator_ID', params)
self.assertIn('Orbit_Number', params)
self.assertIn('Acquisition_Station', params)
inputs = sim.tasks[0].inputs
self.assertEqual(len(inputs), 5)
self.assertTrue(equal_ignore_order(inputs, expected_inputs))
# TODO: test outputs
if __name__ == '__main__':
unittest.main() | 0.256646 | 0.353986 |
import traceback
from functools import wraps
from . import exception
from . import flavor, peel, is_event, chat_flavors, inline_flavors
def _wrap_none(fn):
def w(*args, **kwargs):
try:
return fn(*args, **kwargs)
except (KeyError, exception.BadFlavor):
return None
return w
def per_chat_id(types='all'):
"""
:param types:
``all`` or a list of chat types (``private``, ``group``, ``channel``)
:return:
a seeder function that returns the chat id only if the chat type is in ``types``.
"""
return _wrap_none(lambda msg:
msg['chat']['id']
if types == 'all' or msg['chat']['type'] in types
else None)
def per_chat_id_in(s, types='all'):
"""
:param s:
a list or set of chat id
:param types:
``all`` or a list of chat types (``private``, ``group``, ``channel``)
:return:
a seeder function that returns the chat id only if the chat id is in ``s``
and chat type is in ``types``.
"""
return _wrap_none(lambda msg:
msg['chat']['id']
if (types == 'all' or msg['chat']['type'] in types) and msg['chat']['id'] in s
else None)
def per_chat_id_except(s, types='all'):
"""
:param s:
a list or set of chat id
:param types:
``all`` or a list of chat types (``private``, ``group``, ``channel``)
:return:
a seeder function that returns the chat id only if the chat id is *not* in ``s``
and chat type is in ``types``.
"""
return _wrap_none(lambda msg:
msg['chat']['id']
if (types == 'all' or msg['chat']['type'] in types) and msg['chat']['id'] not in s
else None)
def per_from_id(flavors=None):
"""
:param flavors:
``all`` or a list of flavors
:return:
a seeder function that returns the from id only if the message flavor is
in ``flavors``.
"""
if flavors is None:
flavors = chat_flavors+inline_flavors
return _wrap_none(lambda msg:
msg['from']['id']
if flavors == 'all' or flavor(msg) in flavors
else None)
def per_from_id_in(s, flavors=None):
"""
:param s:
a list or set of from id
:param flavors:
``all`` or a list of flavors
:return:
a seeder function that returns the from id only if the from id is in ``s``
and message flavor is in ``flavors``.
"""
if flavors is None:
flavors = chat_flavors+inline_flavors
return _wrap_none(lambda msg:
msg['from']['id']
if (flavors == 'all' or flavor(msg) in flavors) and msg['from']['id'] in s
else None)
def per_from_id_except(s, flavors=None):
"""
:param s:
a list or set of from id
:param flavors:
``all`` or a list of flavors
:return:
a seeder function that returns the from id only if the from id is *not* in ``s``
and message flavor is in ``flavors``.
"""
if flavors is None:
flavors = chat_flavors+inline_flavors
return _wrap_none(lambda msg:
msg['from']['id']
if (flavors == 'all' or flavor(msg) in flavors) and msg['from']['id'] not in s
else None)
def per_inline_from_id():
"""
:return:
a seeder function that returns the from id only if the message flavor
is ``inline_query`` or ``chosen_inline_result``
"""
return per_from_id(flavors=inline_flavors)
def per_inline_from_id_in(s):
"""
:param s: a list or set of from id
:return:
a seeder function that returns the from id only if the message flavor
is ``inline_query`` or ``chosen_inline_result`` and the from id is in ``s``.
"""
return per_from_id_in(s, flavors=inline_flavors)
def per_inline_from_id_except(s):
"""
:param s: a list or set of from id
:return:
a seeder function that returns the from id only if the message flavor
is ``inline_query`` or ``chosen_inline_result`` and the from id is *not* in ``s``.
"""
return per_from_id_except(s, flavors=inline_flavors)
def per_application():
"""
:return:
a seeder function that always returns 1, ensuring at most one delegate is ever spawned
for the entire application.
"""
return lambda msg: 1
def per_message(flavors='all'):
"""
:param flavors: ``all`` or a list of flavors
:return:
a seeder function that returns a non-hashable only if the message flavor
is in ``flavors``.
"""
return _wrap_none(lambda msg: [] if flavors == 'all' or flavor(msg) in flavors else None)
def per_event_source_id(event_space):
"""
:return:
a seeder function that returns an event's source id only if that event's
source space equals to ``event_space``.
"""
def f(event):
if is_event(event):
v = peel(event)
if v['source']['space'] == event_space:
return v['source']['id']
return None
return None
return _wrap_none(f)
def per_callback_query_chat_id(types='all'):
"""
:param types:
``all`` or a list of chat types (``private``, ``group``, ``channel``)
:return:
a seeder function that returns a callback query's originating chat id
if the chat type is in ``types``.
"""
def f(msg):
if (flavor(msg) == 'callback_query' and 'message' in msg
and (types == 'all' or msg['message']['chat']['type'] in types)):
return msg['message']['chat']['id']
return None
return f
def per_callback_query_origin(origins='all'):
"""
:param origins:
``all`` or a list of origin types (``chat``, ``inline``)
:return:
a seeder function that returns a callback query's origin identifier if
that origin type is in ``origins``. The origin identifier is guaranteed
to be a tuple.
"""
def f(msg):
def origin_type_ok():
return (origins == 'all'
or ('chat' in origins and 'message' in msg)
or ('inline' in origins and 'inline_message_id' in msg))
if flavor(msg) == 'callback_query' and origin_type_ok():
if 'inline_message_id' in msg:
return msg['inline_message_id'],
return msg['message']['chat']['id'], msg['message']['message_id']
return None
return f
def per_invoice_payload():
"""
:return:
a seeder function that returns the invoice payload.
"""
def f(msg):
if 'successful_payment' in msg:
return msg['successful_payment']['invoice_payload']
return msg['invoice_payload']
return _wrap_none(f)
def call(func, *args, **kwargs):
"""
:return:
a delegator function that returns a tuple (``func``, (seed tuple,)+ ``args``, ``kwargs``).
That is, seed tuple is inserted before supplied positional arguments.
By default, a thread wrapping ``func`` and all those arguments is spawned.
"""
def f(seed_tuple):
return func, (seed_tuple,)+args, kwargs
return f
def create_run(cls, *args, **kwargs):
"""
:return:
a delegator function that calls the ``cls`` constructor whose arguments being
a seed tuple followed by supplied ``*args`` and ``**kwargs``, then returns
the object's ``run`` method. By default, a thread wrapping that ``run`` method
is spawned.
"""
def f(seed_tuple):
j = cls(seed_tuple, *args, **kwargs)
return j.run
return f
def create_open(cls, *args, **kwargs):
"""
:return:
a delegator function that calls the ``cls`` constructor whose arguments being
a seed tuple followed by supplied ``*args`` and ``**kwargs``, then returns
a looping function that uses the object's ``listener`` to wait for messages
and invokes instance method ``open``, ``on_message``, and ``on_close`` accordingly.
By default, a thread wrapping that looping function is spawned.
"""
def f(seed_tuple):
j = cls(seed_tuple, *args, **kwargs)
def wait_loop():
bot, msg, seed = seed_tuple
try:
handled = j.open(msg, seed)
if not handled:
j.on_message(msg)
while 1:
msg = j.listener.wait()
j.on_message(msg)
# These exceptions are "normal" exits.
except (exception.IdleTerminate, exception.StopListening) as e:
j.on_close(e)
# Any other exceptions are accidents. **Print it out.**
# This is to prevent swallowing exceptions in the case that on_close()
# gets overridden but fails to account for unexpected exceptions.
except Exception as e:
traceback.print_exc()
j.on_close(e)
return wait_loop
return f
def until(condition, fns):
"""
Try a list of seeder functions until a condition is met.
:param condition:
a function that takes one argument - a seed - and returns ``True``
or ``False``
:param fns:
a list of seeder functions
:return:
a "composite" seeder function that calls each supplied function in turn,
and returns the first seed where the condition is met. If the condition
is never met, it returns ``None``.
"""
def f(msg):
for fn in fns:
seed = fn(msg)
if condition(seed):
return seed
return None
return f
def chain(*fns):
"""
:return:
a "composite" seeder function that calls each supplied function in turn,
and returns the first seed that is not ``None``.
"""
return until(lambda seed: seed is not None, fns)
def _ensure_seeders_list(fn):
@wraps(fn)
def e(seeders, *aa, **kw):
return fn(seeders if isinstance(seeders, list) else [seeders], *aa, **kw)
return e
@_ensure_seeders_list
def pair(seeders, delegator_factory, *args, **kwargs):
"""
The basic pair producer.
:return:
a (seeder, delegator_factory(\*args, \*\*kwargs)) tuple.
:param seeders:
If it is a seeder function or a list of one seeder function, it is returned
as the final seeder. If it is a list of more than one seeder function, they
are chained together before returned as the final seeder.
"""
return (chain(*seeders) if len(seeders) > 1 else seeders[0],
delegator_factory(*args, **kwargs))
def _natural_numbers():
x = 0
while 1:
x += 1
yield x
_event_space = _natural_numbers()
def pave_event_space(fn=pair):
"""
:return:
a pair producer that ensures the seeder and delegator share the same event space.
"""
global _event_space
event_space = next(_event_space)
@_ensure_seeders_list
def p(seeders, delegator_factory, *args, **kwargs):
return fn(seeders + [per_event_source_id(event_space)],
delegator_factory, *args, event_space=event_space, **kwargs)
return p
def include_callback_query_chat_id(fn=pair, types='all'):
"""
:return:
a pair producer that enables static callback query capturing
across seeder and delegator.
:param types:
``all`` or a list of chat types (``private``, ``group``, ``channel``)
"""
@_ensure_seeders_list
def p(seeders, delegator_factory, *args, **kwargs):
return fn(seeders + [per_callback_query_chat_id(types=types)],
delegator_factory, *args, include_callback_query=True, **kwargs)
return p
from . import helper
def intercept_callback_query_origin(fn=pair, origins='all'):
"""
:return:
a pair producer that enables dynamic callback query origin mapping
across seeder and delegator.
:param origins:
``all`` or a list of origin types (``chat``, ``inline``).
Origin mapping is only enabled for specified origin types.
"""
origin_map = helper.SafeDict()
# For key functions that returns a tuple as key (e.g. per_callback_query_origin()),
# wrap the key in another tuple to prevent router from mistaking it as
# a key followed by some arguments.
def tuplize(fn):
def tp(msg):
return (fn(msg),)
return tp
router = helper.Router(tuplize(per_callback_query_origin(origins=origins)),
origin_map)
def modify_origin_map(origin, dest, set):
if set:
origin_map[origin] = dest
else:
try:
del origin_map[origin]
except KeyError:
pass
if origins == 'all':
intercept = modify_origin_map
else:
intercept = (modify_origin_map if 'chat' in origins else False,
modify_origin_map if 'inline' in origins else False)
@_ensure_seeders_list
def p(seeders, delegator_factory, *args, **kwargs):
return fn(seeders + [_wrap_none(router.map)],
delegator_factory, *args, intercept_callback_query=intercept, **kwargs)
return p | amanobot/delegate.py | import traceback
from functools import wraps
from . import exception
from . import flavor, peel, is_event, chat_flavors, inline_flavors
def _wrap_none(fn):
def w(*args, **kwargs):
try:
return fn(*args, **kwargs)
except (KeyError, exception.BadFlavor):
return None
return w
def per_chat_id(types='all'):
"""
:param types:
``all`` or a list of chat types (``private``, ``group``, ``channel``)
:return:
a seeder function that returns the chat id only if the chat type is in ``types``.
"""
return _wrap_none(lambda msg:
msg['chat']['id']
if types == 'all' or msg['chat']['type'] in types
else None)
def per_chat_id_in(s, types='all'):
"""
:param s:
a list or set of chat id
:param types:
``all`` or a list of chat types (``private``, ``group``, ``channel``)
:return:
a seeder function that returns the chat id only if the chat id is in ``s``
and chat type is in ``types``.
"""
return _wrap_none(lambda msg:
msg['chat']['id']
if (types == 'all' or msg['chat']['type'] in types) and msg['chat']['id'] in s
else None)
def per_chat_id_except(s, types='all'):
"""
:param s:
a list or set of chat id
:param types:
``all`` or a list of chat types (``private``, ``group``, ``channel``)
:return:
a seeder function that returns the chat id only if the chat id is *not* in ``s``
and chat type is in ``types``.
"""
return _wrap_none(lambda msg:
msg['chat']['id']
if (types == 'all' or msg['chat']['type'] in types) and msg['chat']['id'] not in s
else None)
def per_from_id(flavors=None):
"""
:param flavors:
``all`` or a list of flavors
:return:
a seeder function that returns the from id only if the message flavor is
in ``flavors``.
"""
if flavors is None:
flavors = chat_flavors+inline_flavors
return _wrap_none(lambda msg:
msg['from']['id']
if flavors == 'all' or flavor(msg) in flavors
else None)
def per_from_id_in(s, flavors=None):
"""
:param s:
a list or set of from id
:param flavors:
``all`` or a list of flavors
:return:
a seeder function that returns the from id only if the from id is in ``s``
and message flavor is in ``flavors``.
"""
if flavors is None:
flavors = chat_flavors+inline_flavors
return _wrap_none(lambda msg:
msg['from']['id']
if (flavors == 'all' or flavor(msg) in flavors) and msg['from']['id'] in s
else None)
def per_from_id_except(s, flavors=None):
"""
:param s:
a list or set of from id
:param flavors:
``all`` or a list of flavors
:return:
a seeder function that returns the from id only if the from id is *not* in ``s``
and message flavor is in ``flavors``.
"""
if flavors is None:
flavors = chat_flavors+inline_flavors
return _wrap_none(lambda msg:
msg['from']['id']
if (flavors == 'all' or flavor(msg) in flavors) and msg['from']['id'] not in s
else None)
def per_inline_from_id():
"""
:return:
a seeder function that returns the from id only if the message flavor
is ``inline_query`` or ``chosen_inline_result``
"""
return per_from_id(flavors=inline_flavors)
def per_inline_from_id_in(s):
"""
:param s: a list or set of from id
:return:
a seeder function that returns the from id only if the message flavor
is ``inline_query`` or ``chosen_inline_result`` and the from id is in ``s``.
"""
return per_from_id_in(s, flavors=inline_flavors)
def per_inline_from_id_except(s):
"""
:param s: a list or set of from id
:return:
a seeder function that returns the from id only if the message flavor
is ``inline_query`` or ``chosen_inline_result`` and the from id is *not* in ``s``.
"""
return per_from_id_except(s, flavors=inline_flavors)
def per_application():
"""
:return:
a seeder function that always returns 1, ensuring at most one delegate is ever spawned
for the entire application.
"""
return lambda msg: 1
def per_message(flavors='all'):
"""
:param flavors: ``all`` or a list of flavors
:return:
a seeder function that returns a non-hashable only if the message flavor
is in ``flavors``.
"""
return _wrap_none(lambda msg: [] if flavors == 'all' or flavor(msg) in flavors else None)
def per_event_source_id(event_space):
"""
:return:
a seeder function that returns an event's source id only if that event's
source space equals to ``event_space``.
"""
def f(event):
if is_event(event):
v = peel(event)
if v['source']['space'] == event_space:
return v['source']['id']
return None
return None
return _wrap_none(f)
def per_callback_query_chat_id(types='all'):
"""
:param types:
``all`` or a list of chat types (``private``, ``group``, ``channel``)
:return:
a seeder function that returns a callback query's originating chat id
if the chat type is in ``types``.
"""
def f(msg):
if (flavor(msg) == 'callback_query' and 'message' in msg
and (types == 'all' or msg['message']['chat']['type'] in types)):
return msg['message']['chat']['id']
return None
return f
def per_callback_query_origin(origins='all'):
"""
:param origins:
``all`` or a list of origin types (``chat``, ``inline``)
:return:
a seeder function that returns a callback query's origin identifier if
that origin type is in ``origins``. The origin identifier is guaranteed
to be a tuple.
"""
def f(msg):
def origin_type_ok():
return (origins == 'all'
or ('chat' in origins and 'message' in msg)
or ('inline' in origins and 'inline_message_id' in msg))
if flavor(msg) == 'callback_query' and origin_type_ok():
if 'inline_message_id' in msg:
return msg['inline_message_id'],
return msg['message']['chat']['id'], msg['message']['message_id']
return None
return f
def per_invoice_payload():
"""
:return:
a seeder function that returns the invoice payload.
"""
def f(msg):
if 'successful_payment' in msg:
return msg['successful_payment']['invoice_payload']
return msg['invoice_payload']
return _wrap_none(f)
def call(func, *args, **kwargs):
"""
:return:
a delegator function that returns a tuple (``func``, (seed tuple,)+ ``args``, ``kwargs``).
That is, seed tuple is inserted before supplied positional arguments.
By default, a thread wrapping ``func`` and all those arguments is spawned.
"""
def f(seed_tuple):
return func, (seed_tuple,)+args, kwargs
return f
def create_run(cls, *args, **kwargs):
"""
:return:
a delegator function that calls the ``cls`` constructor whose arguments being
a seed tuple followed by supplied ``*args`` and ``**kwargs``, then returns
the object's ``run`` method. By default, a thread wrapping that ``run`` method
is spawned.
"""
def f(seed_tuple):
j = cls(seed_tuple, *args, **kwargs)
return j.run
return f
def create_open(cls, *args, **kwargs):
"""
:return:
a delegator function that calls the ``cls`` constructor whose arguments being
a seed tuple followed by supplied ``*args`` and ``**kwargs``, then returns
a looping function that uses the object's ``listener`` to wait for messages
and invokes instance method ``open``, ``on_message``, and ``on_close`` accordingly.
By default, a thread wrapping that looping function is spawned.
"""
def f(seed_tuple):
j = cls(seed_tuple, *args, **kwargs)
def wait_loop():
bot, msg, seed = seed_tuple
try:
handled = j.open(msg, seed)
if not handled:
j.on_message(msg)
while 1:
msg = j.listener.wait()
j.on_message(msg)
# These exceptions are "normal" exits.
except (exception.IdleTerminate, exception.StopListening) as e:
j.on_close(e)
# Any other exceptions are accidents. **Print it out.**
# This is to prevent swallowing exceptions in the case that on_close()
# gets overridden but fails to account for unexpected exceptions.
except Exception as e:
traceback.print_exc()
j.on_close(e)
return wait_loop
return f
def until(condition, fns):
"""
Try a list of seeder functions until a condition is met.
:param condition:
a function that takes one argument - a seed - and returns ``True``
or ``False``
:param fns:
a list of seeder functions
:return:
a "composite" seeder function that calls each supplied function in turn,
and returns the first seed where the condition is met. If the condition
is never met, it returns ``None``.
"""
def f(msg):
for fn in fns:
seed = fn(msg)
if condition(seed):
return seed
return None
return f
def chain(*fns):
"""
:return:
a "composite" seeder function that calls each supplied function in turn,
and returns the first seed that is not ``None``.
"""
return until(lambda seed: seed is not None, fns)
def _ensure_seeders_list(fn):
@wraps(fn)
def e(seeders, *aa, **kw):
return fn(seeders if isinstance(seeders, list) else [seeders], *aa, **kw)
return e
@_ensure_seeders_list
def pair(seeders, delegator_factory, *args, **kwargs):
"""
The basic pair producer.
:return:
a (seeder, delegator_factory(\*args, \*\*kwargs)) tuple.
:param seeders:
If it is a seeder function or a list of one seeder function, it is returned
as the final seeder. If it is a list of more than one seeder function, they
are chained together before returned as the final seeder.
"""
return (chain(*seeders) if len(seeders) > 1 else seeders[0],
delegator_factory(*args, **kwargs))
def _natural_numbers():
x = 0
while 1:
x += 1
yield x
_event_space = _natural_numbers()
def pave_event_space(fn=pair):
"""
:return:
a pair producer that ensures the seeder and delegator share the same event space.
"""
global _event_space
event_space = next(_event_space)
@_ensure_seeders_list
def p(seeders, delegator_factory, *args, **kwargs):
return fn(seeders + [per_event_source_id(event_space)],
delegator_factory, *args, event_space=event_space, **kwargs)
return p
def include_callback_query_chat_id(fn=pair, types='all'):
"""
:return:
a pair producer that enables static callback query capturing
across seeder and delegator.
:param types:
``all`` or a list of chat types (``private``, ``group``, ``channel``)
"""
@_ensure_seeders_list
def p(seeders, delegator_factory, *args, **kwargs):
return fn(seeders + [per_callback_query_chat_id(types=types)],
delegator_factory, *args, include_callback_query=True, **kwargs)
return p
from . import helper
def intercept_callback_query_origin(fn=pair, origins='all'):
"""
:return:
a pair producer that enables dynamic callback query origin mapping
across seeder and delegator.
:param origins:
``all`` or a list of origin types (``chat``, ``inline``).
Origin mapping is only enabled for specified origin types.
"""
origin_map = helper.SafeDict()
# For key functions that returns a tuple as key (e.g. per_callback_query_origin()),
# wrap the key in another tuple to prevent router from mistaking it as
# a key followed by some arguments.
def tuplize(fn):
def tp(msg):
return (fn(msg),)
return tp
router = helper.Router(tuplize(per_callback_query_origin(origins=origins)),
origin_map)
def modify_origin_map(origin, dest, set):
if set:
origin_map[origin] = dest
else:
try:
del origin_map[origin]
except KeyError:
pass
if origins == 'all':
intercept = modify_origin_map
else:
intercept = (modify_origin_map if 'chat' in origins else False,
modify_origin_map if 'inline' in origins else False)
@_ensure_seeders_list
def p(seeders, delegator_factory, *args, **kwargs):
return fn(seeders + [_wrap_none(router.map)],
delegator_factory, *args, intercept_callback_query=intercept, **kwargs)
return p | 0.649245 | 0.12749 |
import codecs
import os
import re
import sys
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [("pytest-args=", "a", "Arguments to pass into py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
test_requirements = [
"pytest>=3.1.0",
"pytest-django",
"pytest-pythonpath",
"pytest-cov",
"mixer",
]
extras_requirements = {
"test": test_requirements,
"exchange": ["certifi"],
}
def read(fname):
file_path = os.path.join(os.path.dirname(__file__), fname)
return codecs.open(file_path, encoding="utf-8").read()
def find_version():
match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", read("djmoney/__init__.py"), re.M)
if match:
return match.group(1)
raise RuntimeError("Unable to find __version__ string.")
setup(
name="django-money",
version=find_version(),
description=(
"Adds support for using money and currency fields in django models and forms. "
"Uses py-moneyed as the money implementation."
),
long_description=read("README.rst"),
long_description_content_type="text/x-rst",
url="https://github.com/django-money/django-money",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
license="BSD",
packages=find_packages(include=["djmoney", "djmoney.*"]),
install_requires=["setuptools", "Django>=2.2", "py-moneyed>=1.2,<2.0"],
python_requires=">=3.7",
platforms=["Any"],
keywords=["django", "py-money", "money"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Framework :: Django",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.2",
"Framework :: Django :: 4.0",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
tests_require=test_requirements,
extras_require=extras_requirements,
cmdclass={"test": PyTest},
) | setup.py | import codecs
import os
import re
import sys
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [("pytest-args=", "a", "Arguments to pass into py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
test_requirements = [
"pytest>=3.1.0",
"pytest-django",
"pytest-pythonpath",
"pytest-cov",
"mixer",
]
extras_requirements = {
"test": test_requirements,
"exchange": ["certifi"],
}
def read(fname):
file_path = os.path.join(os.path.dirname(__file__), fname)
return codecs.open(file_path, encoding="utf-8").read()
def find_version():
match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", read("djmoney/__init__.py"), re.M)
if match:
return match.group(1)
raise RuntimeError("Unable to find __version__ string.")
setup(
name="django-money",
version=find_version(),
description=(
"Adds support for using money and currency fields in django models and forms. "
"Uses py-moneyed as the money implementation."
),
long_description=read("README.rst"),
long_description_content_type="text/x-rst",
url="https://github.com/django-money/django-money",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
license="BSD",
packages=find_packages(include=["djmoney", "djmoney.*"]),
install_requires=["setuptools", "Django>=2.2", "py-moneyed>=1.2,<2.0"],
python_requires=">=3.7",
platforms=["Any"],
keywords=["django", "py-money", "money"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Framework :: Django",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.2",
"Framework :: Django :: 4.0",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
tests_require=test_requirements,
extras_require=extras_requirements,
cmdclass={"test": PyTest},
) | 0.307982 | 0.322859 |
import os
import numpy as np
from data.imca import generate_synthetic_data
from metrics.mcc import mean_corr_coef
from models.icebeem_wrapper import ICEBEEM_wrapper
from models.ivae.ivae_wrapper import IVAE_wrapper
from models.tcl.tcl_wrapper_gpu import TCL_wrapper
def run_ivae_exp(args, config):
"""run iVAE simulations"""
data_dim = config.data_dim
n_segments = config.n_segments
n_layers = config.n_layers
n_obs_per_seg = config.n_obs_per_seg
data_seed = config.data_seed
max_iter = config.ivae.max_iter
lr = config.ivae.lr
cuda = config.ivae.cuda
results = {l: {n: [] for n in n_obs_per_seg} for l in n_layers}
nSims = args.nSims
dataset = args.dataset
test = args.test
for l in n_layers:
for n in n_obs_per_seg:
x, y, s = generate_synthetic_data(data_dim, n_segments, n, l, seed=data_seed,
simulationMethod=dataset, one_hot_labels=True, varyMean=True)
for seed in range(nSims):
print('Running exp with L={} and n={}; seed={}'.format(l, n, seed))
# generate data
# run iVAE
ckpt_file = os.path.join(args.checkpoints, 'ivae_{}_l{}_n{}_s{}.pt'.format(dataset, l, n, seed))
res_iVAE = IVAE_wrapper(X=x, U=y, n_layers=l + 1, hidden_dim=data_dim * 2,
cuda=cuda, max_iter=max_iter, lr=lr,
ckpt_file=ckpt_file, seed=seed, test=test)
# store results
results[l][n].append(mean_corr_coef(res_iVAE[0].detach().numpy(), s))
print(mean_corr_coef(res_iVAE[0].detach().numpy(), s))
# prepare output
Results = {
'data_dim': data_dim,
'data_segments': n_segments,
'CorrelationCoef': results
}
return Results
def run_icebeem_exp(args, config):
"""run ICE-BeeM simulations"""
data_dim = config.data_dim
n_segments = config.n_segments
n_layers = config.n_layers
n_obs_per_seg = config.n_obs_per_seg
data_seed = config.data_seed
lr_flow = config.icebeem.lr_flow
lr_ebm = config.icebeem.lr_ebm
n_layers_flow = config.icebeem.n_layers_flow
ebm_hidden_size = config.icebeem.ebm_hidden_size
results = {l: {n: [] for n in n_obs_per_seg} for l in n_layers}
nSims = args.nSims
dataset = args.dataset
test = args.test
for l in n_layers:
for n in n_obs_per_seg:
x, y, s = generate_synthetic_data(data_dim, n_segments, n, l, seed=data_seed,
simulationMethod=dataset, one_hot_labels=True)
for seed in range(nSims):
print('Running exp with L={} and n={}; seed={}'.format(l, n, seed))
# generate data
n_layers_ebm = l + 1
ckpt_file = os.path.join(args.checkpoints, 'icebeem_{}_l{}_n{}_s{}.pt'.format(dataset, l, n, seed))
recov_sources = ICEBEEM_wrapper(X=x, Y=y, ebm_hidden_size=ebm_hidden_size,
n_layers_ebm=n_layers_ebm, n_layers_flow=n_layers_flow,
lr_flow=lr_flow, lr_ebm=lr_ebm, seed=seed, ckpt_file=ckpt_file,
test=test)
# store results
results[l][n].append(np.max([mean_corr_coef(z, s) for z in recov_sources]))
print(np.max([mean_corr_coef(z, s) for z in recov_sources]))
# prepare output
Results = {
'data_dim': data_dim,
'data_segments': n_segments,
'CorrelationCoef': results
}
return Results
def run_tcl_exp(args, config):
"""run TCL simulations"""
stepDict = {1: [int(5e3), int(5e3)], 2: [int(1e4), int(1e4)], 3: [int(1e4), int(1e4)], 4: [int(1e4), int(1e4)],
5: [int(1e4), int(1e4)]}
data_dim = config.data_dim
n_segments = config.n_segments
n_layers = config.n_layers
n_obs_per_seg = config.n_obs_per_seg
data_seed = config.data_seed
results = {l: {n: [] for n in n_obs_per_seg} for l in n_layers}
results_no_ica = {l: {n: [] for n in n_obs_per_seg} for l in n_layers}
num_comp = data_dim
nSims = args.nSims
dataset = args.dataset
test = args.test
for l in n_layers:
for n in n_obs_per_seg:
# generate data
x, y, s = generate_synthetic_data(data_dim, n_segments, n, l, seed=data_seed,
simulationMethod=dataset, one_hot_labels=False)
for seed in range(nSims):
print('Running exp with L={} and n={}; seed={}'.format(l, n, seed))
# checkpointing done in TF is more complicated than pytorch, create a separate folder per arg tuple
ckpt_folder = os.path.join(args.checkpoints, args.dataset, str(l), str(n), str(seed))
# run TCL
res_TCL = TCL_wrapper(sensor=x.T, label=y, random_seed=seed,
list_hidden_nodes=[num_comp * 2] * (l - 1) + [num_comp],
max_steps=stepDict[l][0] * 2, max_steps_init=stepDict[l][1],
ckpt_dir=ckpt_folder, test=test)
# store results
mcc_no_ica = mean_corr_coef(res_TCL[0].T, s ** 2)
mcc_ica = mean_corr_coef(res_TCL[1].T, s ** 2)
print('TCL mcc (no ICA): {}\t mcc: {}'.format(mcc_no_ica, mcc_ica))
results[l][n].append(mcc_ica)
results_no_ica[l][n].append(mcc_no_ica)
# prepare output
Results = {
'data_dim': data_dim,
'data_segments': n_segments,
'CorrelationCoef': results,
'CorrelationCoef_no_ica': results_no_ica,
}
return Results | runners/simulation_runner.py | import os
import numpy as np
from data.imca import generate_synthetic_data
from metrics.mcc import mean_corr_coef
from models.icebeem_wrapper import ICEBEEM_wrapper
from models.ivae.ivae_wrapper import IVAE_wrapper
from models.tcl.tcl_wrapper_gpu import TCL_wrapper
def run_ivae_exp(args, config):
"""run iVAE simulations"""
data_dim = config.data_dim
n_segments = config.n_segments
n_layers = config.n_layers
n_obs_per_seg = config.n_obs_per_seg
data_seed = config.data_seed
max_iter = config.ivae.max_iter
lr = config.ivae.lr
cuda = config.ivae.cuda
results = {l: {n: [] for n in n_obs_per_seg} for l in n_layers}
nSims = args.nSims
dataset = args.dataset
test = args.test
for l in n_layers:
for n in n_obs_per_seg:
x, y, s = generate_synthetic_data(data_dim, n_segments, n, l, seed=data_seed,
simulationMethod=dataset, one_hot_labels=True, varyMean=True)
for seed in range(nSims):
print('Running exp with L={} and n={}; seed={}'.format(l, n, seed))
# generate data
# run iVAE
ckpt_file = os.path.join(args.checkpoints, 'ivae_{}_l{}_n{}_s{}.pt'.format(dataset, l, n, seed))
res_iVAE = IVAE_wrapper(X=x, U=y, n_layers=l + 1, hidden_dim=data_dim * 2,
cuda=cuda, max_iter=max_iter, lr=lr,
ckpt_file=ckpt_file, seed=seed, test=test)
# store results
results[l][n].append(mean_corr_coef(res_iVAE[0].detach().numpy(), s))
print(mean_corr_coef(res_iVAE[0].detach().numpy(), s))
# prepare output
Results = {
'data_dim': data_dim,
'data_segments': n_segments,
'CorrelationCoef': results
}
return Results
def run_icebeem_exp(args, config):
"""run ICE-BeeM simulations"""
data_dim = config.data_dim
n_segments = config.n_segments
n_layers = config.n_layers
n_obs_per_seg = config.n_obs_per_seg
data_seed = config.data_seed
lr_flow = config.icebeem.lr_flow
lr_ebm = config.icebeem.lr_ebm
n_layers_flow = config.icebeem.n_layers_flow
ebm_hidden_size = config.icebeem.ebm_hidden_size
results = {l: {n: [] for n in n_obs_per_seg} for l in n_layers}
nSims = args.nSims
dataset = args.dataset
test = args.test
for l in n_layers:
for n in n_obs_per_seg:
x, y, s = generate_synthetic_data(data_dim, n_segments, n, l, seed=data_seed,
simulationMethod=dataset, one_hot_labels=True)
for seed in range(nSims):
print('Running exp with L={} and n={}; seed={}'.format(l, n, seed))
# generate data
n_layers_ebm = l + 1
ckpt_file = os.path.join(args.checkpoints, 'icebeem_{}_l{}_n{}_s{}.pt'.format(dataset, l, n, seed))
recov_sources = ICEBEEM_wrapper(X=x, Y=y, ebm_hidden_size=ebm_hidden_size,
n_layers_ebm=n_layers_ebm, n_layers_flow=n_layers_flow,
lr_flow=lr_flow, lr_ebm=lr_ebm, seed=seed, ckpt_file=ckpt_file,
test=test)
# store results
results[l][n].append(np.max([mean_corr_coef(z, s) for z in recov_sources]))
print(np.max([mean_corr_coef(z, s) for z in recov_sources]))
# prepare output
Results = {
'data_dim': data_dim,
'data_segments': n_segments,
'CorrelationCoef': results
}
return Results
def run_tcl_exp(args, config):
"""run TCL simulations"""
stepDict = {1: [int(5e3), int(5e3)], 2: [int(1e4), int(1e4)], 3: [int(1e4), int(1e4)], 4: [int(1e4), int(1e4)],
5: [int(1e4), int(1e4)]}
data_dim = config.data_dim
n_segments = config.n_segments
n_layers = config.n_layers
n_obs_per_seg = config.n_obs_per_seg
data_seed = config.data_seed
results = {l: {n: [] for n in n_obs_per_seg} for l in n_layers}
results_no_ica = {l: {n: [] for n in n_obs_per_seg} for l in n_layers}
num_comp = data_dim
nSims = args.nSims
dataset = args.dataset
test = args.test
for l in n_layers:
for n in n_obs_per_seg:
# generate data
x, y, s = generate_synthetic_data(data_dim, n_segments, n, l, seed=data_seed,
simulationMethod=dataset, one_hot_labels=False)
for seed in range(nSims):
print('Running exp with L={} and n={}; seed={}'.format(l, n, seed))
# checkpointing done in TF is more complicated than pytorch, create a separate folder per arg tuple
ckpt_folder = os.path.join(args.checkpoints, args.dataset, str(l), str(n), str(seed))
# run TCL
res_TCL = TCL_wrapper(sensor=x.T, label=y, random_seed=seed,
list_hidden_nodes=[num_comp * 2] * (l - 1) + [num_comp],
max_steps=stepDict[l][0] * 2, max_steps_init=stepDict[l][1],
ckpt_dir=ckpt_folder, test=test)
# store results
mcc_no_ica = mean_corr_coef(res_TCL[0].T, s ** 2)
mcc_ica = mean_corr_coef(res_TCL[1].T, s ** 2)
print('TCL mcc (no ICA): {}\t mcc: {}'.format(mcc_no_ica, mcc_ica))
results[l][n].append(mcc_ica)
results_no_ica[l][n].append(mcc_no_ica)
# prepare output
Results = {
'data_dim': data_dim,
'data_segments': n_segments,
'CorrelationCoef': results,
'CorrelationCoef_no_ica': results_no_ica,
}
return Results | 0.522446 | 0.307423 |
from tf_keras_1.optimizers.imports import *
from system.imports import *
@accepts(dict, post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def load_optimizer(system_dict):
'''
Load Optimizers in training states
Args:
system_dict (dict): System dictionary storing experiment state and set variables
Returns:
dict: updated system dict
'''
optimizer = system_dict["local"]["optimizer"];
learning_rate = system_dict["hyper-parameters"]["learning_rate"];
if(optimizer == "sgd"):
system_dict["local"]["optimizer"] = kro.SGD(
lr=learning_rate,
momentum=system_dict["hyper-parameters"]["optimizer"]["params"]["momentum"],
decay=system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"],
nesterov=False,
clipnorm=system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"],
clipvalue=system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"]);
elif(optimizer == "nesterov_sgd"):
system_dict["local"]["optimizer"] = kro.SGD(
lr=learning_rate,
momentum=system_dict["hyper-parameters"]["optimizer"]["params"]["momentum"],
decay=system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"],
nesterov=True,
clipnorm=system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"],
clipvalue=system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"]);
elif(optimizer == "rmsprop"):
system_dict["local"]["optimizer"] = kro.RMSprop(
lr=learning_rate,
rho=system_dict["hyper-parameters"]["optimizer"]["params"]["decay_rate"],
epsilon=system_dict["hyper-parameters"]["optimizer"]["params"]["epsilon"],
decay=system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"],
clipnorm=system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"],
clipvalue=system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"]);
elif(optimizer == "adam"):
system_dict["local"]["optimizer"] = kro.Adam(
lr=learning_rate,
beta_1=system_dict["hyper-parameters"]["optimizer"]["params"]["beta1"],
beta_2=system_dict["hyper-parameters"]["optimizer"]["params"]["beta2"],
epsilon=system_dict["hyper-parameters"]["optimizer"]["params"]["epsilon"],
decay=system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"],
amsgrad=system_dict["hyper-parameters"]["optimizer"]["params"]["amsgrad"],
clipnorm=system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"],
clipvalue=system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"]);
elif(optimizer == "nadam"):
system_dict["local"]["optimizer"] = kro.Nadam(
lr=learning_rate,
beta_1=system_dict["hyper-parameters"]["optimizer"]["params"]["beta1"],
beta_2=system_dict["hyper-parameters"]["optimizer"]["params"]["beta2"],
epsilon=system_dict["hyper-parameters"]["optimizer"]["params"]["epsilon"],
clipnorm=system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"],
clipvalue=system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"]
);
elif(optimizer == "adamax"):
system_dict["local"]["optimizer"] = kro.Adamax(
lr=learning_rate,
beta_1=system_dict["hyper-parameters"]["optimizer"]["params"]["beta1"],
beta_2=system_dict["hyper-parameters"]["optimizer"]["params"]["beta2"],
epsilon=system_dict["hyper-parameters"]["optimizer"]["params"]["epsilon"],
decay=system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"],
clipnorm=system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"],
clipvalue=system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"]);
elif(optimizer == "adadelta"):
system_dict["local"]["optimizer"] = kro.Adadelta(
lr=learning_rate,
rho=system_dict["hyper-parameters"]["optimizer"]["params"]["rho"],
epsilon=system_dict["hyper-parameters"]["optimizer"]["params"]["epsilon"],
decay=system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"],
clipnorm=system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"],
clipvalue=system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"]);
elif(optimizer == "adagrad"):
system_dict["local"]["optimizer"] = kro.Adagrad(
lr=learning_rate,
decay=system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"],
clipnorm=system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"],
clipvalue=system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"]);
return system_dict; | monk/tf_keras_1/optimizers/return_optimizer.py | from tf_keras_1.optimizers.imports import *
from system.imports import *
@accepts(dict, post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def load_optimizer(system_dict):
'''
Load Optimizers in training states
Args:
system_dict (dict): System dictionary storing experiment state and set variables
Returns:
dict: updated system dict
'''
optimizer = system_dict["local"]["optimizer"];
learning_rate = system_dict["hyper-parameters"]["learning_rate"];
if(optimizer == "sgd"):
system_dict["local"]["optimizer"] = kro.SGD(
lr=learning_rate,
momentum=system_dict["hyper-parameters"]["optimizer"]["params"]["momentum"],
decay=system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"],
nesterov=False,
clipnorm=system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"],
clipvalue=system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"]);
elif(optimizer == "nesterov_sgd"):
system_dict["local"]["optimizer"] = kro.SGD(
lr=learning_rate,
momentum=system_dict["hyper-parameters"]["optimizer"]["params"]["momentum"],
decay=system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"],
nesterov=True,
clipnorm=system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"],
clipvalue=system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"]);
elif(optimizer == "rmsprop"):
system_dict["local"]["optimizer"] = kro.RMSprop(
lr=learning_rate,
rho=system_dict["hyper-parameters"]["optimizer"]["params"]["decay_rate"],
epsilon=system_dict["hyper-parameters"]["optimizer"]["params"]["epsilon"],
decay=system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"],
clipnorm=system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"],
clipvalue=system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"]);
elif(optimizer == "adam"):
system_dict["local"]["optimizer"] = kro.Adam(
lr=learning_rate,
beta_1=system_dict["hyper-parameters"]["optimizer"]["params"]["beta1"],
beta_2=system_dict["hyper-parameters"]["optimizer"]["params"]["beta2"],
epsilon=system_dict["hyper-parameters"]["optimizer"]["params"]["epsilon"],
decay=system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"],
amsgrad=system_dict["hyper-parameters"]["optimizer"]["params"]["amsgrad"],
clipnorm=system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"],
clipvalue=system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"]);
elif(optimizer == "nadam"):
system_dict["local"]["optimizer"] = kro.Nadam(
lr=learning_rate,
beta_1=system_dict["hyper-parameters"]["optimizer"]["params"]["beta1"],
beta_2=system_dict["hyper-parameters"]["optimizer"]["params"]["beta2"],
epsilon=system_dict["hyper-parameters"]["optimizer"]["params"]["epsilon"],
clipnorm=system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"],
clipvalue=system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"]
);
elif(optimizer == "adamax"):
system_dict["local"]["optimizer"] = kro.Adamax(
lr=learning_rate,
beta_1=system_dict["hyper-parameters"]["optimizer"]["params"]["beta1"],
beta_2=system_dict["hyper-parameters"]["optimizer"]["params"]["beta2"],
epsilon=system_dict["hyper-parameters"]["optimizer"]["params"]["epsilon"],
decay=system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"],
clipnorm=system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"],
clipvalue=system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"]);
elif(optimizer == "adadelta"):
system_dict["local"]["optimizer"] = kro.Adadelta(
lr=learning_rate,
rho=system_dict["hyper-parameters"]["optimizer"]["params"]["rho"],
epsilon=system_dict["hyper-parameters"]["optimizer"]["params"]["epsilon"],
decay=system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"],
clipnorm=system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"],
clipvalue=system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"]);
elif(optimizer == "adagrad"):
system_dict["local"]["optimizer"] = kro.Adagrad(
lr=learning_rate,
decay=system_dict["hyper-parameters"]["optimizer"]["params"]["weight_decay"],
clipnorm=system_dict["hyper-parameters"]["optimizer"]["params"]["clipnorm"],
clipvalue=system_dict["hyper-parameters"]["optimizer"]["params"]["clipvalue"]);
return system_dict; | 0.659076 | 0.092074 |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class StructuredAttention_bi(nn.Module):
def __init__(self, dropout=0.1, scale=100):
super(StructuredAttention_bi, self).__init__()
self.dropout = dropout
self.scale = scale
def forward(self, C, Q, c_mask, q_mask):
bsz, _, num_img, num_region, hsz = Q.shape
S, S_mask = self.similarity(C, Q, c_mask, q_mask)
S_c = F.softmax(S * self.scale, dim=-1)
S_q = F.softmax(S * self.scale, dim=-2)
S_c = S_c * S_mask
S_q = S_q * S_mask
A_c = torch.matmul(S_c, Q)
A_q = torch.matmul(S_q.transpose(-2, -1), C)
return A_c, A_q, S_mask, S_mask.transpose(-2, -1)
def similarity(self, C, Q, c_mask, q_mask):
C = F.dropout(F.normalize(C, p=2, dim=-1), p=self.dropout, training=self.training)
Q = F.dropout(F.normalize(Q, p=2, dim=-1), p=self.dropout, training=self.training)
S_mask = torch.matmul(c_mask.unsqueeze(-1), q_mask.unsqueeze(-2))
S = torch.matmul(C, Q.transpose(-2, -1))
masked_S = S - 1e10*(1 - S_mask)
return masked_S, S_mask
class StructuredAttention_frame(nn.Module):
def __init__(self, dropout=0.1, scale=100):
super(StructuredAttention_frame, self).__init__()
self.dropout = dropout
self.scale = scale
def forward(self, C, Q, c_mask, q_mask):
bsz, _, num_img, hsz = Q.shape
S, S_mask = self.similarity(C, Q, c_mask, q_mask)
S_ = F.softmax(S * self.scale, dim=-1)
S_ = S_ * S_mask
A = torch.matmul(S_, Q)
return A, S, S_mask, S_
def similarity(self, C, Q, c_mask, q_mask):
C = F.dropout(F.normalize(C, p=2, dim=-1), p=self.dropout, training=self.training)
Q = F.dropout(F.normalize(Q, p=2, dim=-1), p=self.dropout, training=self.training)
S_mask = c_mask.unsqueeze(-1)
S = torch.matmul(C, Q.transpose(-2, -1))
masked_S = S - 1e10*(1 - S_mask)
return masked_S, S_mask | iPerceiveVideoQA/qanet/context_query_attention.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class StructuredAttention_bi(nn.Module):
def __init__(self, dropout=0.1, scale=100):
super(StructuredAttention_bi, self).__init__()
self.dropout = dropout
self.scale = scale
def forward(self, C, Q, c_mask, q_mask):
bsz, _, num_img, num_region, hsz = Q.shape
S, S_mask = self.similarity(C, Q, c_mask, q_mask)
S_c = F.softmax(S * self.scale, dim=-1)
S_q = F.softmax(S * self.scale, dim=-2)
S_c = S_c * S_mask
S_q = S_q * S_mask
A_c = torch.matmul(S_c, Q)
A_q = torch.matmul(S_q.transpose(-2, -1), C)
return A_c, A_q, S_mask, S_mask.transpose(-2, -1)
def similarity(self, C, Q, c_mask, q_mask):
C = F.dropout(F.normalize(C, p=2, dim=-1), p=self.dropout, training=self.training)
Q = F.dropout(F.normalize(Q, p=2, dim=-1), p=self.dropout, training=self.training)
S_mask = torch.matmul(c_mask.unsqueeze(-1), q_mask.unsqueeze(-2))
S = torch.matmul(C, Q.transpose(-2, -1))
masked_S = S - 1e10*(1 - S_mask)
return masked_S, S_mask
class StructuredAttention_frame(nn.Module):
def __init__(self, dropout=0.1, scale=100):
super(StructuredAttention_frame, self).__init__()
self.dropout = dropout
self.scale = scale
def forward(self, C, Q, c_mask, q_mask):
bsz, _, num_img, hsz = Q.shape
S, S_mask = self.similarity(C, Q, c_mask, q_mask)
S_ = F.softmax(S * self.scale, dim=-1)
S_ = S_ * S_mask
A = torch.matmul(S_, Q)
return A, S, S_mask, S_
def similarity(self, C, Q, c_mask, q_mask):
C = F.dropout(F.normalize(C, p=2, dim=-1), p=self.dropout, training=self.training)
Q = F.dropout(F.normalize(Q, p=2, dim=-1), p=self.dropout, training=self.training)
S_mask = c_mask.unsqueeze(-1)
S = torch.matmul(C, Q.transpose(-2, -1))
masked_S = S - 1e10*(1 - S_mask)
return masked_S, S_mask | 0.885136 | 0.421314 |
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import importlib
import json
import warnings
# External imports
from docutils.parsers.rst.directives import unchanged
from sphinx.errors import SphinxError
# Bokeh imports
from bokeh.model import Model
from bokeh.util.warnings import BokehDeprecationWarning
# Bokeh imports
from .bokeh_directive import BokehDirective, py_sig_re
from .templates import MODEL_DETAIL
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'BokehModelDirective',
'setup',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class BokehModelDirective(BokehDirective):
has_content = True
required_arguments = 1
optional_arguments = 1
option_spec = {
'module': unchanged
}
def run(self):
sig = " ".join(self.arguments)
m = py_sig_re.match(sig)
if m is None:
raise SphinxError("Unable to parse signature for bokeh-model: %r" % sig)
name_prefix, model_name, arglist, retann = m.groups()
module_name = self.options['module']
try:
module = importlib.import_module(module_name)
except ImportError:
raise SphinxError("Unable to generate reference docs for %s, couldn't import module '%s'" % (model_name, module_name))
model = getattr(module, model_name, None)
if model is None:
raise SphinxError("Unable to generate reference docs for %s, no model '%s' in %s" % (model_name, model_name, module_name))
if not issubclass(model, Model):
raise SphinxError("Unable to generate reference docs for %s, model '%s' is a subclass of Model" % (model_name, model_name))
# We may need to instantiate deprecated objects as part of documenting
# them in the reference guide. Suppress any warnings here to keep the
# docs build clean just for this case
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=BokehDeprecationWarning)
model_obj = model()
model_json = json.dumps(
model_obj.to_json(include_defaults=True),
sort_keys=True,
indent=2,
separators=(',', ': ')
)
rst_text = MODEL_DETAIL.render(
name=model_name,
module_name=module_name,
model_json=model_json,
)
return self._parse(rst_text, "<bokeh-model>")
def setup(app):
''' Required Sphinx extension setup function. '''
app.add_directive_to_domain('py', 'bokeh-model', BokehModelDirective)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#----------------------------------------------------------------------------- | bokeh/sphinxext/bokeh_model.py | #-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import importlib
import json
import warnings
# External imports
from docutils.parsers.rst.directives import unchanged
from sphinx.errors import SphinxError
# Bokeh imports
from bokeh.model import Model
from bokeh.util.warnings import BokehDeprecationWarning
# Bokeh imports
from .bokeh_directive import BokehDirective, py_sig_re
from .templates import MODEL_DETAIL
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'BokehModelDirective',
'setup',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class BokehModelDirective(BokehDirective):
has_content = True
required_arguments = 1
optional_arguments = 1
option_spec = {
'module': unchanged
}
def run(self):
sig = " ".join(self.arguments)
m = py_sig_re.match(sig)
if m is None:
raise SphinxError("Unable to parse signature for bokeh-model: %r" % sig)
name_prefix, model_name, arglist, retann = m.groups()
module_name = self.options['module']
try:
module = importlib.import_module(module_name)
except ImportError:
raise SphinxError("Unable to generate reference docs for %s, couldn't import module '%s'" % (model_name, module_name))
model = getattr(module, model_name, None)
if model is None:
raise SphinxError("Unable to generate reference docs for %s, no model '%s' in %s" % (model_name, model_name, module_name))
if not issubclass(model, Model):
raise SphinxError("Unable to generate reference docs for %s, model '%s' is a subclass of Model" % (model_name, model_name))
# We may need to instantiate deprecated objects as part of documenting
# them in the reference guide. Suppress any warnings here to keep the
# docs build clean just for this case
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=BokehDeprecationWarning)
model_obj = model()
model_json = json.dumps(
model_obj.to_json(include_defaults=True),
sort_keys=True,
indent=2,
separators=(',', ': ')
)
rst_text = MODEL_DETAIL.render(
name=model_name,
module_name=module_name,
model_json=model_json,
)
return self._parse(rst_text, "<bokeh-model>")
def setup(app):
''' Required Sphinx extension setup function. '''
app.add_directive_to_domain('py', 'bokeh-model', BokehModelDirective)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#----------------------------------------------------------------------------- | 0.362518 | 0.096323 |
import sys, unittest
from django.test import TestCase
from django.core.serializers.json import DjangoJSONEncoder
from django.utils import timezone
from explorer.exporters import CSVExporter, JSONExporter, ExcelExporter, PdfExporter
from explorer.tests.factories import SimpleQueryFactory
from explorer.models import QueryResult
import json
from datetime import date, datetime
from six import b
class TestCsv(TestCase):
def test_writing_unicode(self):
res = QueryResult(SimpleQueryFactory(sql='select 1 as "a", 2 as ""').sql)
res.execute_query()
res.process()
res._data = [[1, None], [u"Jenét", '1']]
res = CSVExporter(query=None)._get_output(res).getvalue()
self.assertEqual(res, 'a,\r\n1,\r\nJenét,1\r\n')
def test_custom_delimiter(self):
q = SimpleQueryFactory(sql='select 1, 2')
exporter = CSVExporter(query=q)
res = exporter.get_output(delim='|')
self.assertEqual(res, '1|2\r\n1|2\r\n')
class TestJson(TestCase):
def test_writing_json(self):
res = QueryResult(SimpleQueryFactory(sql='select 1 as "a", 2 as ""').sql)
res.execute_query()
res.process()
res._data = [[1, None], [u"Jenét", '1']]
res = JSONExporter(query=None)._get_output(res).getvalue()
expected = [{'a': 1, '': None}, {'a': 'Jenét', '': '1'}]
self.assertEqual(res, json.dumps(expected))
def test_writing_datetimes(self):
res = QueryResult(SimpleQueryFactory(sql='select 1 as "a", 2 as "b"').sql)
res.execute_query()
res.process()
res._data = [[1, date.today()]]
res = JSONExporter(query=None)._get_output(res).getvalue()
expected = [{'a': 1, 'b': date.today()}]
self.assertEqual(res, json.dumps(expected, cls=DjangoJSONEncoder))
class TestExcel(TestCase):
def test_writing_excel(self):
""" This is a pretty crap test. It at least exercises the code.
If anyone wants to go through the brain damage of actually building
an 'expected' xlsx output and comparing it
(see https://github.com/jmcnamara/XlsxWriter/blob/master/xlsxwriter/test/helperfunctions.py for reference)
, by all means submit a pull request!
"""
res = QueryResult(SimpleQueryFactory(sql='select 1 as "a", 2 as ""',
title='this title is longer than 32 characters').sql)
res.execute_query()
res.process()
d = datetime.now()
d = timezone.make_aware(d, timezone.get_current_timezone())
res._data = [[1, None], [u"Jenét", d]]
res = ExcelExporter(query=SimpleQueryFactory())._get_output(res).getvalue()
expected = b('PK')
self.assertEqual(res[:2], expected)
def test_writing_dict_fields(self):
res = QueryResult(SimpleQueryFactory(sql='select 1 as "a", 2 as ""',
title='this title is longer than 32 characters').sql)
res.execute_query()
res.process()
res._data = [[1, ['foo', 'bar']], [2, {'foo': 'bar'}]]
res = ExcelExporter(query=SimpleQueryFactory())._get_output(res).getvalue()
expected = b('PK')
self.assertEqual(res[:2], expected)
@unittest.skipIf(sys.version_info[0] > 2, "only supported in python 2.7")
class TestPdf(TestCase):
def test_writing_pdf(self):
""" Use same logic as with excel
"""
res = QueryResult(SimpleQueryFactory(sql='select 1 as "a", 2 as ""',
title='this title is longer than 32 characters').sql)
res.execute_query()
res.process()
d = datetime.now()
d = timezone.make_aware(d, timezone.get_current_timezone())
res._data = [[1, None], [u"Jenét", d]]
res = PdfExporter(query=SimpleQueryFactory())._get_output(res).getvalue()
expected = b('%PDF')
self.assertEqual(res[:4], expected) | explorer/tests/test_exporters.py | import sys, unittest
from django.test import TestCase
from django.core.serializers.json import DjangoJSONEncoder
from django.utils import timezone
from explorer.exporters import CSVExporter, JSONExporter, ExcelExporter, PdfExporter
from explorer.tests.factories import SimpleQueryFactory
from explorer.models import QueryResult
import json
from datetime import date, datetime
from six import b
class TestCsv(TestCase):
def test_writing_unicode(self):
res = QueryResult(SimpleQueryFactory(sql='select 1 as "a", 2 as ""').sql)
res.execute_query()
res.process()
res._data = [[1, None], [u"Jenét", '1']]
res = CSVExporter(query=None)._get_output(res).getvalue()
self.assertEqual(res, 'a,\r\n1,\r\nJenét,1\r\n')
def test_custom_delimiter(self):
q = SimpleQueryFactory(sql='select 1, 2')
exporter = CSVExporter(query=q)
res = exporter.get_output(delim='|')
self.assertEqual(res, '1|2\r\n1|2\r\n')
class TestJson(TestCase):
def test_writing_json(self):
res = QueryResult(SimpleQueryFactory(sql='select 1 as "a", 2 as ""').sql)
res.execute_query()
res.process()
res._data = [[1, None], [u"Jenét", '1']]
res = JSONExporter(query=None)._get_output(res).getvalue()
expected = [{'a': 1, '': None}, {'a': 'Jenét', '': '1'}]
self.assertEqual(res, json.dumps(expected))
def test_writing_datetimes(self):
res = QueryResult(SimpleQueryFactory(sql='select 1 as "a", 2 as "b"').sql)
res.execute_query()
res.process()
res._data = [[1, date.today()]]
res = JSONExporter(query=None)._get_output(res).getvalue()
expected = [{'a': 1, 'b': date.today()}]
self.assertEqual(res, json.dumps(expected, cls=DjangoJSONEncoder))
class TestExcel(TestCase):
def test_writing_excel(self):
""" This is a pretty crap test. It at least exercises the code.
If anyone wants to go through the brain damage of actually building
an 'expected' xlsx output and comparing it
(see https://github.com/jmcnamara/XlsxWriter/blob/master/xlsxwriter/test/helperfunctions.py for reference)
, by all means submit a pull request!
"""
res = QueryResult(SimpleQueryFactory(sql='select 1 as "a", 2 as ""',
title='this title is longer than 32 characters').sql)
res.execute_query()
res.process()
d = datetime.now()
d = timezone.make_aware(d, timezone.get_current_timezone())
res._data = [[1, None], [u"Jenét", d]]
res = ExcelExporter(query=SimpleQueryFactory())._get_output(res).getvalue()
expected = b('PK')
self.assertEqual(res[:2], expected)
def test_writing_dict_fields(self):
res = QueryResult(SimpleQueryFactory(sql='select 1 as "a", 2 as ""',
title='this title is longer than 32 characters').sql)
res.execute_query()
res.process()
res._data = [[1, ['foo', 'bar']], [2, {'foo': 'bar'}]]
res = ExcelExporter(query=SimpleQueryFactory())._get_output(res).getvalue()
expected = b('PK')
self.assertEqual(res[:2], expected)
@unittest.skipIf(sys.version_info[0] > 2, "only supported in python 2.7")
class TestPdf(TestCase):
def test_writing_pdf(self):
""" Use same logic as with excel
"""
res = QueryResult(SimpleQueryFactory(sql='select 1 as "a", 2 as ""',
title='this title is longer than 32 characters').sql)
res.execute_query()
res.process()
d = datetime.now()
d = timezone.make_aware(d, timezone.get_current_timezone())
res._data = [[1, None], [u"Jenét", d]]
res = PdfExporter(query=SimpleQueryFactory())._get_output(res).getvalue()
expected = b('%PDF')
self.assertEqual(res[:4], expected) | 0.462959 | 0.406332 |
lst = [1, 2, 3, 4, 5]
# 리스트의 가장 뒤에 10을 추가
lst.append(10)
print(lst)
# [1, 2, 3, 4, 5, 10]
# 3번 인덱스 자리에 22를 삽입
lst.insert(3, 22)
print(lst)
# [1, 2, 3, 22, 4, 5, 10]
# lst의 뒤에 지정한 리스트 추가
# lst += [4, 5, 6] 과 결과가 같아 보이나, +=는 새로운 리스트를 생성하므로 속도가 더 느림
lst.extend([4, 5, 6])
print(lst)
# [1, 2, 3, 22, 4, 5, 10, 4, 5, 6]
# 데이터 10의 인덱스 검색
print(lst.index(10))
# 6
# lst 3번 인덱스 자리의 데이터 삭제
del lst[3]
print(lst)
# [1, 2, 3, 4, 5, 10, 4, 5, 6]
a = lst.pop(4)
print(a, lst)
# 5 [1, 2, 3, 4, 10, 4, 5, 6]
# lst에서 마지막 요소를 뽑아 a에 저장
a = lst.pop()
print(a, lst)
# 6 [1, 2, 3, 4, 10, 4, 5]
# lst에서 3이라는 값을 삭제
lst.remove(3)
print(lst)
# [1, 2, 4, 10, 4, 5]
# lst의 모든 요소 삭제
lst.clear()
print(lst)
# []
lst = ["ab", "cd", "ef", "gh", "ij"]
print("cd" in lst)
# True
if "ij" in lst :
print("성공")
else :
print("실패")
# 오 3항 연산자 쌉가능
print("성공" if ("ij" in lst) else "실패")
# lst에 "ij"와 "zz"가 있으면 "성공" 없으면 "실패"
if "ij" in lst and "zz" in lst :
print("성공")
else :
print("실패")
# 3항 연산자
print("성공" if ("ij" in lst and "zz" in lst) else "실패")
# lst에 "ij"와 "zz"가 있으면 "성공" 없으면 "실패"
if "ij" in lst or "zz" in lst :
print("성공")
else :
print("실패")
# 3항 연산자
print("성공" if ("ij" in lst or "zz" in lst) else "실패")
for i in range(1, 5) : # i의 값이 0부터 5가 될 때 까지 루프를 돔
print(i)
# 0 1 2 3 4
print("----------------------------------")
# lst 리스트이 모든 데이터를 차례대로 출력(for문 이용) 1
for i in range(len(lst)) :
print(lst[i])
# ab cd ef gh ij
print("----------------------------------")
# lst 리스트이 모든 데이터를 차례대로 출력(for문 이용) 2
# lst 데이터들을 끝가지 차례대로 tmp에 저장함
for tmp in lst :
print(tmp)
# ab cd ef gh ij
dic = {"키1":"값1", "키2":"값2", "키1":"aa", "키3":"값2"}
print(dic)
print(dic["키1"])
# print(dic["키"])
# 존재하지 않는 키를 호출할 경우 KeyError 발생
for tmp in dic :
# 키 호출
print(tmp)
# 값 호출
print(dic[tmp])
# 키1, aa, 키2, 값2, 키3, 값2
dic2 = {"name":"홍길동", "job":"도둑", "address":["울릉도", "제주도", "함경도"]}
# 딕셔너리 전체 출력
print(dic2)
# 주소만 출력
print(dic2["address"])
# 제주도만 출력
print(dic2["address"][1])
dic2["age"] = 33
print(dic2)
dic2["name"] = "전우치"
print(dic2) | week_1/func2.py | lst = [1, 2, 3, 4, 5]
# 리스트의 가장 뒤에 10을 추가
lst.append(10)
print(lst)
# [1, 2, 3, 4, 5, 10]
# 3번 인덱스 자리에 22를 삽입
lst.insert(3, 22)
print(lst)
# [1, 2, 3, 22, 4, 5, 10]
# lst의 뒤에 지정한 리스트 추가
# lst += [4, 5, 6] 과 결과가 같아 보이나, +=는 새로운 리스트를 생성하므로 속도가 더 느림
lst.extend([4, 5, 6])
print(lst)
# [1, 2, 3, 22, 4, 5, 10, 4, 5, 6]
# 데이터 10의 인덱스 검색
print(lst.index(10))
# 6
# lst 3번 인덱스 자리의 데이터 삭제
del lst[3]
print(lst)
# [1, 2, 3, 4, 5, 10, 4, 5, 6]
a = lst.pop(4)
print(a, lst)
# 5 [1, 2, 3, 4, 10, 4, 5, 6]
# lst에서 마지막 요소를 뽑아 a에 저장
a = lst.pop()
print(a, lst)
# 6 [1, 2, 3, 4, 10, 4, 5]
# lst에서 3이라는 값을 삭제
lst.remove(3)
print(lst)
# [1, 2, 4, 10, 4, 5]
# lst의 모든 요소 삭제
lst.clear()
print(lst)
# []
lst = ["ab", "cd", "ef", "gh", "ij"]
print("cd" in lst)
# True
if "ij" in lst :
print("성공")
else :
print("실패")
# 오 3항 연산자 쌉가능
print("성공" if ("ij" in lst) else "실패")
# lst에 "ij"와 "zz"가 있으면 "성공" 없으면 "실패"
if "ij" in lst and "zz" in lst :
print("성공")
else :
print("실패")
# 3항 연산자
print("성공" if ("ij" in lst and "zz" in lst) else "실패")
# lst에 "ij"와 "zz"가 있으면 "성공" 없으면 "실패"
if "ij" in lst or "zz" in lst :
print("성공")
else :
print("실패")
# 3항 연산자
print("성공" if ("ij" in lst or "zz" in lst) else "실패")
for i in range(1, 5) : # i의 값이 0부터 5가 될 때 까지 루프를 돔
print(i)
# 0 1 2 3 4
print("----------------------------------")
# lst 리스트이 모든 데이터를 차례대로 출력(for문 이용) 1
for i in range(len(lst)) :
print(lst[i])
# ab cd ef gh ij
print("----------------------------------")
# lst 리스트이 모든 데이터를 차례대로 출력(for문 이용) 2
# lst 데이터들을 끝가지 차례대로 tmp에 저장함
for tmp in lst :
print(tmp)
# ab cd ef gh ij
dic = {"키1":"값1", "키2":"값2", "키1":"aa", "키3":"값2"}
print(dic)
print(dic["키1"])
# print(dic["키"])
# 존재하지 않는 키를 호출할 경우 KeyError 발생
for tmp in dic :
# 키 호출
print(tmp)
# 값 호출
print(dic[tmp])
# 키1, aa, 키2, 값2, 키3, 값2
dic2 = {"name":"홍길동", "job":"도둑", "address":["울릉도", "제주도", "함경도"]}
# 딕셔너리 전체 출력
print(dic2)
# 주소만 출력
print(dic2["address"])
# 제주도만 출력
print(dic2["address"][1])
dic2["age"] = 33
print(dic2)
dic2["name"] = "전우치"
print(dic2) | 0.073734 | 0.579162 |
import logging
import json
from pcmdi_metrics.driver.outputmetrics import OutputMetrics
from pcmdi_metrics.driver.observation import Observation
from pcmdi_metrics.driver.model import Model
import pcmdi_metrics.driver.dataset
import pcmdi_metrics.driver.pmp_parser
from pcmdi_metrics import LOG_LEVEL
import ast
class PMPDriver(object):
def __init__(self, parameter):
plog = logging.getLogger("pcmdi_metrics")
plog.setLevel(LOG_LEVEL)
# create file handler which logs messages
formatter = logging.Formatter('%%(levelname)s::%%(asctime)s::%%(name)s::%s:: %%(message)s' %
(parameter.case_id), datefmt="%Y-%m-%d %H:%M")
for h in plog.handlers:
h.setFormatter(formatter)
fh = logging.FileHandler(
'pcmdi_metrics_driver.%s.log' % (parameter.case_id))
fh.setLevel(LOG_LEVEL)
formatter = logging.Formatter(
'%(levelname)s::%(asctime)s:: %(message)s', datefmt="%Y-%m-%d %H:%M")
fh.setFormatter(formatter)
plog.addHandler(fh)
self.parameter = parameter
self.obs_dict = {}
self.regions_dict = {}
self.var = ''
self.output_metric = None
self.region = ''
self.sftlf = pcmdi_metrics.driver.dataset.DataSet.create_sftlf(
self.parameter)
self.default_regions = []
self.regions_specs = {}
def __call__(self):
self.run_diags()
def run_diags(self):
''' Runs the diagnostics. What did you think it did? '''
self.obs_dict = self.load_obs_dict()
self.regions_dict = self.create_regions_dict()
for self.var_name_long in self.parameter.vars:
self.var = self.var_name_long.split('_')[0]
if self.var not in self.obs_dict:
logging.getLogger("pcmdi_metrics").error(
'Variable %s not in obs_dict' % self.var)
continue
for region in self.regions_dict[self.var]:
logging.getLogger("pcmdi_metrics").info("REGION: {}".format(region))
self.region = self.create_region(region)
self.run_reference_and_test_comparison()
def load_obs_dict(self):
''' Loads obs_info_dictionary.json and appends
custom_observations from the parameter file if needed. '''
obs_file_name = 'obs_info_dictionary.json'
obs_json_file = pcmdi_metrics.driver.dataset.DataSet.load_path_as_file_obj(
obs_file_name)
obs_dict = json.loads(obs_json_file.read())
obs_json_file.close()
if hasattr(self.parameter, 'custom_observations'):
# Can't use load_path_as_file_obj() b/c might not be in /share/
cust_obs_json_file = open(self.parameter.custom_observations)
obs_dict.update(json.load(cust_obs_json_file))
cust_obs_json_file.close()
return obs_dict
def create_regions_dict(self):
''' Creates a dict from self.default_regions. '''
self.load_default_regions_and_regions_specs()
regions_dict = {}
for var_name_long in self.parameter.vars:
var = var_name_long.split('_')[0]
regions = self.parameter.regions
region = regions.get(var, self.default_regions)
if not isinstance(region, (list, tuple)):
region = [region]
if None in region:
region.remove(None)
for r in self.default_regions:
region.insert(0, r)
regions_dict[var] = region
return regions_dict
def load_default_regions_and_regions_specs(self):
''' Gets the default_regions dict and regions_specs dict
from default_regions.py and stores them as attributes. '''
default_regions_file = \
pcmdi_metrics.driver.dataset.DataSet.load_path_as_file_obj(
'default_regions.py')
exec(compile(open(default_regions_file.name).read(),
default_regions_file.name, 'exec'))
default_regions_file.close()
try:
self.default_regions = locals()['default_regions']
self.regions_specs = locals()['regions_specs']
except KeyError:
logging.getLogger("pcmdi_metrics").error(
'Failed to open default_regions.py')
region_values = self.parameter.regions_values
region_values.update(getattr(self.parameter, "regions_values", {}))
# Now need to edit regions_specs
for region in region_values:
insert_dict = {'value': region_values[region]}
if region in self.regions_specs:
self.regions_specs[region].update(insert_dict)
else:
self.regions_specs[region] = insert_dict
self.regions_specs.update(getattr(self.parameter,
"regions_specs", {}))
def create_region(self, region):
''' From the argument region, it gets that region from self.regions_specs
(which itself is loaded from default_regions.py) '''
if isinstance(region, str):
region_name = region
region = self.regions_specs.get(
region_name,
self.regions_specs.get(region_name.lower()))
region['id'] = region_name
elif region is None:
# It's okay if region == None
pass
else:
raise Exception('Unknown region: %s' % region)
return region
def run_reference_and_test_comparison(self):
''' Does the (obs or model) vs (obs or model) comparison. '''
reference_data_set = self.parameter.reference_data_set
test_data_set = self.parameter.test_data_set
reference_data_set_is_obs = self.is_data_set_obs(reference_data_set)
test_data_set_is_obs = self.is_data_set_obs(test_data_set)
# If either the reference or test are obs, the data sets
# themselves need to be modified.
if reference_data_set_is_obs:
reference_data_set = Observation.setup_obs_list_from_parameter(
reference_data_set, self.obs_dict, self.var)
if test_data_set_is_obs:
test_data_set = Observation.setup_obs_list_from_parameter(
test_data_set, self.obs_dict, self.var)
if len(reference_data_set) == 0: # We did not find any ref!!!
raise RuntimeError("No reference dataset found!")
# self.reference/self.test are either an obs or model
for reference in reference_data_set:
try:
ref = self.determine_obs_or_model(reference_data_set_is_obs,
reference, self.parameter.reference_data_path)
# TODO Make this a custom exception. This exception is for
# when a model doesn't have sftlf for a given region
except RuntimeError:
continue
for test in test_data_set:
logging.getLogger("pcmdi_metrics").info("TEST DATA IS: {}".format(test))
self.output_metric = OutputMetrics(self.parameter, self.var_name_long,
self.obs_dict, sftlf=self.sftlf)
self.output_metric.add_region(self.region)
try:
tst = self.determine_obs_or_model(test_data_set_is_obs,
test, self.parameter.test_data_path)
self.output_metric.obs_or_model = tst.obs_or_model
# TODO Make this a custom exception. This exception is for
# when a model doesn't have sftlf for a given region
except RuntimeError:
continue
except Exception as err:
logging.getLogger("pcmdi_metrics").info("Unexpected error: {e}".format(e=err))
break
try:
self.output_metric.calculate_and_output_metrics(ref, tst)
except RuntimeError:
continue
except Exception as err:
err_msg = "Unexpected error in calculate output metrics: {e}".format(e=err)
logging.getLogger("pcmdi_metrics").info(err_msg)
break
def is_data_set_obs(self, data_set):
''' Is data_set (which is either a test or reference) an obs? '''
if 'all' in data_set:
return True
data_set_is_obs = True
# If an element of data_set is not in the obs_dict, then
# data_set is a model.
for obs in data_set:
if obs not in self.obs_dict[self.var]:
data_set_is_obs = False
break
return data_set_is_obs
def determine_obs_or_model(self, is_obs, ref_or_test, data_path):
''' Actually create Observation or Module object
based on if ref_or_test is an obs or model. '''
if is_obs:
logging.getLogger("pcmdi_metrics").info(
'%s is an obs' % ref_or_test)
return Observation(self.parameter, self.var_name_long, self.region,
ref_or_test, self.obs_dict, data_path, self.sftlf)
else:
logging.getLogger("pcmdi_metrics").info(
'%s is a model' % ref_or_test)
return Model(self.parameter, self.var_name_long, self.region,
ref_or_test, self.obs_dict, data_path, self.sftlf)
def create_mean_climate_parser():
parser = pcmdi_metrics.driver.pmp_parser.PMPMetricsParser()
parser.add_argument(
'--case_id',
dest='case_id',
help='Defines a subdirectory to the metrics output, so multiple' +
'cases can be compared',
required=False)
parser.add_argument(
'-v', '--vars',
type=str,
nargs='+',
dest='vars',
help='Variables to use',
required=False)
parser.add_argument(
'--regions',
type=ast.literal_eval,
dest='regions',
help='Regions on which to run the metrics',
required=False)
parser.add_argument(
'--regions_values',
type=ast.literal_eval,
dest='regions_values',
help='Users can customize regions values names',
required=False)
parser.add_argument(
'-r', '--reference_data_set',
type=str,
nargs='+',
dest='reference_data_set',
help='List of observations or models that are used as a ' +
'reference against the test_data_set',
required=False)
parser.add_argument(
'--reference_data_path',
dest='reference_data_path',
help='Path for the reference climitologies',
required=False)
parser.add_argument(
'-t', '--test_data_set',
type=str,
nargs='+',
dest='test_data_set',
help='List of observations or models to test ' +
'against the reference_data_set',
required=False)
parser.add_argument(
'--test_data_path',
dest='test_data_path',
help='Path for the test climitologies',
required=False)
parser.add_argument(
'--target_grid',
dest='target_grid',
help='Options are "2.5x2.5" or an actual cdms2 grid object',
required=False)
parser.add_argument(
'--regrid_tool',
dest='regrid_tool',
help='Options are "regrid2" or "esmf"',
required=False)
parser.add_argument(
'--regrid_method',
dest='regrid_method',
help='Options are "linear" or "conservative", ' +
'only if regrid_tool is "esmf"',
required=False)
parser.add_argument(
'--regrid_tool_ocn',
dest='regrid_tool_ocn',
help='Options are "regrid2" or "esmf"',
required=False)
parser.add_argument(
'--regrid_method_ocn',
dest='regrid_method_ocn',
help='Options are "linear" or "conservative", ' +
'only if regrid_tool is "esmf"',
required=False)
parser.add_argument(
'--period',
dest='period',
help='A simulation parameter',
required=False)
parser.add_argument(
'--realization',
dest='realization',
help='A simulation parameter',
required=False)
parser.add_argument(
'--simulation_description_mapping',
type=ast.literal_eval,
dest='simulation_description_mapping',
help='List of observations or models to test ' +
'against the reference_data_set',
default={},
required=False)
parser.add_argument(
'--ext',
dest='ext',
help='Extension for the output files?',
required=False)
parser.add_argument(
'--dry_run',
# If input is 'True' or 'true', return True. Otherwise False.
type=lambda x: x.lower() == 'true',
dest='dry_run',
help='True if output is to be created, False otherwise',
required=False)
parser.add_argument(
'--filename_template',
dest='filename_template',
help='Template for climatology files',
required=False)
parser.add_argument(
'--sftlf_filename_template',
dest='sftlf_filename_template',
help='Filename template for landsea masks ("sftlf")',
required=False)
parser.add_argument(
'--custom_observations',
dest='custom_observations',
help='Path to an alternative, custom observation file',
required=False)
parser.add_argument(
'--metrics_output_path',
dest='metrics_output_path',
help='Directory of where to put the results',
required=False)
parser.add_argument(
'--filename_output_template',
dest='filename_output_template',
help='Filename for the interpolated test climatologies',
required=False)
parser.add_argument(
'--save_test_clims',
# If input is 'True' or 'true', return True. Otherwise False.
type=lambda x: x.lower() == 'true',
dest='save_test_clims',
help='True if to save interpolated test climatologies,' +
' otherwise False',
required=False)
parser.add_argument(
'--test_clims_interpolated_output',
dest='test_clims_interpolated_output',
help='Directory of where to put the interpolated ' +
'test climatologies',
required=False)
parser.add_argument(
'--output_json_template',
help='Filename template for results json files',
required=False)
parser.add_argument(
'--user_notes',
dest='user_notes',
help='Provide a short description to help identify this run of the PMP mean climate.',
required=False)
parser.add_argument(
'--cmec',
dest='cmec',
action='store_true',
help='Save metrics in CMEC format',
default=False,
required=False)
parser.add_argument(
'--no_cmec',
dest='cmec',
action='store_false',
help='Option to not save metrics in CMEC format',
default=False,
required=False)
return parser | pcmdi_metrics/pcmdi/mean_climate_metrics_driver.py | import logging
import json
from pcmdi_metrics.driver.outputmetrics import OutputMetrics
from pcmdi_metrics.driver.observation import Observation
from pcmdi_metrics.driver.model import Model
import pcmdi_metrics.driver.dataset
import pcmdi_metrics.driver.pmp_parser
from pcmdi_metrics import LOG_LEVEL
import ast
class PMPDriver(object):
def __init__(self, parameter):
plog = logging.getLogger("pcmdi_metrics")
plog.setLevel(LOG_LEVEL)
# create file handler which logs messages
formatter = logging.Formatter('%%(levelname)s::%%(asctime)s::%%(name)s::%s:: %%(message)s' %
(parameter.case_id), datefmt="%Y-%m-%d %H:%M")
for h in plog.handlers:
h.setFormatter(formatter)
fh = logging.FileHandler(
'pcmdi_metrics_driver.%s.log' % (parameter.case_id))
fh.setLevel(LOG_LEVEL)
formatter = logging.Formatter(
'%(levelname)s::%(asctime)s:: %(message)s', datefmt="%Y-%m-%d %H:%M")
fh.setFormatter(formatter)
plog.addHandler(fh)
self.parameter = parameter
self.obs_dict = {}
self.regions_dict = {}
self.var = ''
self.output_metric = None
self.region = ''
self.sftlf = pcmdi_metrics.driver.dataset.DataSet.create_sftlf(
self.parameter)
self.default_regions = []
self.regions_specs = {}
def __call__(self):
self.run_diags()
def run_diags(self):
''' Runs the diagnostics. What did you think it did? '''
self.obs_dict = self.load_obs_dict()
self.regions_dict = self.create_regions_dict()
for self.var_name_long in self.parameter.vars:
self.var = self.var_name_long.split('_')[0]
if self.var not in self.obs_dict:
logging.getLogger("pcmdi_metrics").error(
'Variable %s not in obs_dict' % self.var)
continue
for region in self.regions_dict[self.var]:
logging.getLogger("pcmdi_metrics").info("REGION: {}".format(region))
self.region = self.create_region(region)
self.run_reference_and_test_comparison()
def load_obs_dict(self):
''' Loads obs_info_dictionary.json and appends
custom_observations from the parameter file if needed. '''
obs_file_name = 'obs_info_dictionary.json'
obs_json_file = pcmdi_metrics.driver.dataset.DataSet.load_path_as_file_obj(
obs_file_name)
obs_dict = json.loads(obs_json_file.read())
obs_json_file.close()
if hasattr(self.parameter, 'custom_observations'):
# Can't use load_path_as_file_obj() b/c might not be in /share/
cust_obs_json_file = open(self.parameter.custom_observations)
obs_dict.update(json.load(cust_obs_json_file))
cust_obs_json_file.close()
return obs_dict
def create_regions_dict(self):
''' Creates a dict from self.default_regions. '''
self.load_default_regions_and_regions_specs()
regions_dict = {}
for var_name_long in self.parameter.vars:
var = var_name_long.split('_')[0]
regions = self.parameter.regions
region = regions.get(var, self.default_regions)
if not isinstance(region, (list, tuple)):
region = [region]
if None in region:
region.remove(None)
for r in self.default_regions:
region.insert(0, r)
regions_dict[var] = region
return regions_dict
def load_default_regions_and_regions_specs(self):
''' Gets the default_regions dict and regions_specs dict
from default_regions.py and stores them as attributes. '''
default_regions_file = \
pcmdi_metrics.driver.dataset.DataSet.load_path_as_file_obj(
'default_regions.py')
exec(compile(open(default_regions_file.name).read(),
default_regions_file.name, 'exec'))
default_regions_file.close()
try:
self.default_regions = locals()['default_regions']
self.regions_specs = locals()['regions_specs']
except KeyError:
logging.getLogger("pcmdi_metrics").error(
'Failed to open default_regions.py')
region_values = self.parameter.regions_values
region_values.update(getattr(self.parameter, "regions_values", {}))
# Now need to edit regions_specs
for region in region_values:
insert_dict = {'value': region_values[region]}
if region in self.regions_specs:
self.regions_specs[region].update(insert_dict)
else:
self.regions_specs[region] = insert_dict
self.regions_specs.update(getattr(self.parameter,
"regions_specs", {}))
def create_region(self, region):
''' From the argument region, it gets that region from self.regions_specs
(which itself is loaded from default_regions.py) '''
if isinstance(region, str):
region_name = region
region = self.regions_specs.get(
region_name,
self.regions_specs.get(region_name.lower()))
region['id'] = region_name
elif region is None:
# It's okay if region == None
pass
else:
raise Exception('Unknown region: %s' % region)
return region
def run_reference_and_test_comparison(self):
''' Does the (obs or model) vs (obs or model) comparison. '''
reference_data_set = self.parameter.reference_data_set
test_data_set = self.parameter.test_data_set
reference_data_set_is_obs = self.is_data_set_obs(reference_data_set)
test_data_set_is_obs = self.is_data_set_obs(test_data_set)
# If either the reference or test are obs, the data sets
# themselves need to be modified.
if reference_data_set_is_obs:
reference_data_set = Observation.setup_obs_list_from_parameter(
reference_data_set, self.obs_dict, self.var)
if test_data_set_is_obs:
test_data_set = Observation.setup_obs_list_from_parameter(
test_data_set, self.obs_dict, self.var)
if len(reference_data_set) == 0: # We did not find any ref!!!
raise RuntimeError("No reference dataset found!")
# self.reference/self.test are either an obs or model
for reference in reference_data_set:
try:
ref = self.determine_obs_or_model(reference_data_set_is_obs,
reference, self.parameter.reference_data_path)
# TODO Make this a custom exception. This exception is for
# when a model doesn't have sftlf for a given region
except RuntimeError:
continue
for test in test_data_set:
logging.getLogger("pcmdi_metrics").info("TEST DATA IS: {}".format(test))
self.output_metric = OutputMetrics(self.parameter, self.var_name_long,
self.obs_dict, sftlf=self.sftlf)
self.output_metric.add_region(self.region)
try:
tst = self.determine_obs_or_model(test_data_set_is_obs,
test, self.parameter.test_data_path)
self.output_metric.obs_or_model = tst.obs_or_model
# TODO Make this a custom exception. This exception is for
# when a model doesn't have sftlf for a given region
except RuntimeError:
continue
except Exception as err:
logging.getLogger("pcmdi_metrics").info("Unexpected error: {e}".format(e=err))
break
try:
self.output_metric.calculate_and_output_metrics(ref, tst)
except RuntimeError:
continue
except Exception as err:
err_msg = "Unexpected error in calculate output metrics: {e}".format(e=err)
logging.getLogger("pcmdi_metrics").info(err_msg)
break
def is_data_set_obs(self, data_set):
''' Is data_set (which is either a test or reference) an obs? '''
if 'all' in data_set:
return True
data_set_is_obs = True
# If an element of data_set is not in the obs_dict, then
# data_set is a model.
for obs in data_set:
if obs not in self.obs_dict[self.var]:
data_set_is_obs = False
break
return data_set_is_obs
def determine_obs_or_model(self, is_obs, ref_or_test, data_path):
''' Actually create Observation or Module object
based on if ref_or_test is an obs or model. '''
if is_obs:
logging.getLogger("pcmdi_metrics").info(
'%s is an obs' % ref_or_test)
return Observation(self.parameter, self.var_name_long, self.region,
ref_or_test, self.obs_dict, data_path, self.sftlf)
else:
logging.getLogger("pcmdi_metrics").info(
'%s is a model' % ref_or_test)
return Model(self.parameter, self.var_name_long, self.region,
ref_or_test, self.obs_dict, data_path, self.sftlf)
def create_mean_climate_parser():
parser = pcmdi_metrics.driver.pmp_parser.PMPMetricsParser()
parser.add_argument(
'--case_id',
dest='case_id',
help='Defines a subdirectory to the metrics output, so multiple' +
'cases can be compared',
required=False)
parser.add_argument(
'-v', '--vars',
type=str,
nargs='+',
dest='vars',
help='Variables to use',
required=False)
parser.add_argument(
'--regions',
type=ast.literal_eval,
dest='regions',
help='Regions on which to run the metrics',
required=False)
parser.add_argument(
'--regions_values',
type=ast.literal_eval,
dest='regions_values',
help='Users can customize regions values names',
required=False)
parser.add_argument(
'-r', '--reference_data_set',
type=str,
nargs='+',
dest='reference_data_set',
help='List of observations or models that are used as a ' +
'reference against the test_data_set',
required=False)
parser.add_argument(
'--reference_data_path',
dest='reference_data_path',
help='Path for the reference climitologies',
required=False)
parser.add_argument(
'-t', '--test_data_set',
type=str,
nargs='+',
dest='test_data_set',
help='List of observations or models to test ' +
'against the reference_data_set',
required=False)
parser.add_argument(
'--test_data_path',
dest='test_data_path',
help='Path for the test climitologies',
required=False)
parser.add_argument(
'--target_grid',
dest='target_grid',
help='Options are "2.5x2.5" or an actual cdms2 grid object',
required=False)
parser.add_argument(
'--regrid_tool',
dest='regrid_tool',
help='Options are "regrid2" or "esmf"',
required=False)
parser.add_argument(
'--regrid_method',
dest='regrid_method',
help='Options are "linear" or "conservative", ' +
'only if regrid_tool is "esmf"',
required=False)
parser.add_argument(
'--regrid_tool_ocn',
dest='regrid_tool_ocn',
help='Options are "regrid2" or "esmf"',
required=False)
parser.add_argument(
'--regrid_method_ocn',
dest='regrid_method_ocn',
help='Options are "linear" or "conservative", ' +
'only if regrid_tool is "esmf"',
required=False)
parser.add_argument(
'--period',
dest='period',
help='A simulation parameter',
required=False)
parser.add_argument(
'--realization',
dest='realization',
help='A simulation parameter',
required=False)
parser.add_argument(
'--simulation_description_mapping',
type=ast.literal_eval,
dest='simulation_description_mapping',
help='List of observations or models to test ' +
'against the reference_data_set',
default={},
required=False)
parser.add_argument(
'--ext',
dest='ext',
help='Extension for the output files?',
required=False)
parser.add_argument(
'--dry_run',
# If input is 'True' or 'true', return True. Otherwise False.
type=lambda x: x.lower() == 'true',
dest='dry_run',
help='True if output is to be created, False otherwise',
required=False)
parser.add_argument(
'--filename_template',
dest='filename_template',
help='Template for climatology files',
required=False)
parser.add_argument(
'--sftlf_filename_template',
dest='sftlf_filename_template',
help='Filename template for landsea masks ("sftlf")',
required=False)
parser.add_argument(
'--custom_observations',
dest='custom_observations',
help='Path to an alternative, custom observation file',
required=False)
parser.add_argument(
'--metrics_output_path',
dest='metrics_output_path',
help='Directory of where to put the results',
required=False)
parser.add_argument(
'--filename_output_template',
dest='filename_output_template',
help='Filename for the interpolated test climatologies',
required=False)
parser.add_argument(
'--save_test_clims',
# If input is 'True' or 'true', return True. Otherwise False.
type=lambda x: x.lower() == 'true',
dest='save_test_clims',
help='True if to save interpolated test climatologies,' +
' otherwise False',
required=False)
parser.add_argument(
'--test_clims_interpolated_output',
dest='test_clims_interpolated_output',
help='Directory of where to put the interpolated ' +
'test climatologies',
required=False)
parser.add_argument(
'--output_json_template',
help='Filename template for results json files',
required=False)
parser.add_argument(
'--user_notes',
dest='user_notes',
help='Provide a short description to help identify this run of the PMP mean climate.',
required=False)
parser.add_argument(
'--cmec',
dest='cmec',
action='store_true',
help='Save metrics in CMEC format',
default=False,
required=False)
parser.add_argument(
'--no_cmec',
dest='cmec',
action='store_false',
help='Option to not save metrics in CMEC format',
default=False,
required=False)
return parser | 0.437824 | 0.108708 |
from distutils.version import StrictVersion
import os
import re
import subprocess
from typing import List
from typing import Optional
from typing import Set
from typing import Union
import lttng
from .names import CONTEXT_TYPE_CONSTANTS_MAP
from .names import DEFAULT_CONTEXT
from .names import DEFAULT_EVENTS_KERNEL
from .names import DEFAULT_EVENTS_ROS
def get_version() -> Union[StrictVersion, None]:
"""
Get the version of the lttng module.
The module does not have a __version__ attribute, but the version is mentioned in its __doc__,
and seems to be written in a consistent way across versions.
:return: the version as a StrictVersion object, or `None` if it cannot be extracted
"""
doc_lines = lttng.__doc__.split('\n')
filtered_doc_lines: List[str] = list(filter(None, doc_lines))
if len(filtered_doc_lines) == 0:
return None
first_line = filtered_doc_lines[0]
version_string = first_line.split(' ')[1]
if not re.compile(r'^[0-9]+\.[0-9]+\.[0-9]+$').match(version_string):
return None
return StrictVersion(version_string)
def setup(
session_name: str,
base_path: str,
ros_events: Union[List[str], Set[str]] = DEFAULT_EVENTS_ROS,
kernel_events: Union[List[str], Set[str]] = DEFAULT_EVENTS_KERNEL,
context_names: Union[List[str], Set[str]] = DEFAULT_CONTEXT,
channel_name_ust: str = 'ros2',
channel_name_kernel: str = 'kchan',
) -> Optional[str]:
"""
Set up LTTng session, with events and context.
See: https://lttng.org/docs/#doc-core-concepts
:param session_name: the name of the session
:param base_path: the path to the directory in which to create the tracing session directory
:param ros_events: list of ROS events to enable
:param kernel_events: list of kernel events to enable
:param context_names: list of context elements to enable
:param channel_name_ust: the UST channel name
:param channel_name_kernel: the kernel channel name
:return: the full path to the trace directory
"""
# Check if there is a session daemon running
if lttng.session_daemon_alive() == 0:
# Otherwise spawn one without doing any error checks
subprocess.run(
['lttng-sessiond', '--daemonize'],
)
# Convert lists to sets
if not isinstance(ros_events, set):
ros_events = set(ros_events)
if not isinstance(kernel_events, set):
kernel_events = set(kernel_events)
if not isinstance(context_names, set):
context_names = set(context_names)
# Resolve full tracing directory path
full_path = os.path.join(base_path, session_name)
ust_enabled = ros_events is not None and len(ros_events) > 0
kernel_enabled = kernel_events is not None and len(kernel_events) > 0
# Domains
if ust_enabled:
domain_ust = lttng.Domain()
domain_ust.type = lttng.DOMAIN_UST
# Per-user buffer
domain_ust.buf_type = lttng.BUFFER_PER_UID
channel_ust = lttng.Channel()
channel_ust.name = channel_name_ust
# Discard, do not overwrite
channel_ust.attr.overwrite = 0
# 8 sub-buffers of 2 times the usual page size
channel_ust.attr.subbuf_size = 2 * 4096
channel_ust.attr.num_subbuf = 8
# Ignore switch timer interval and use read timer instead
channel_ust.attr.switch_timer_interval = 0
channel_ust.attr.read_timer_interval = 200
# mmap channel output instead of splice
channel_ust.attr.output = lttng.EVENT_MMAP
events_list_ust = _create_events(ros_events)
if kernel_enabled:
domain_kernel = lttng.Domain()
domain_kernel.type = lttng.DOMAIN_KERNEL
# Global buffer (only option for kernel domain)
domain_kernel.buf_type = lttng.BUFFER_GLOBAL
channel_kernel = lttng.Channel()
channel_kernel.name = channel_name_kernel
# Discard, do not overwrite
channel_kernel.attr.overwrite = 0
# 8 sub-buffers of 8 times the usual page size, since
# there can be way more kernel events than UST events
channel_kernel.attr.subbuf_size = 8 * 4096
channel_kernel.attr.num_subbuf = 8
# Ignore switch timer interval and use read timer instead
channel_kernel.attr.switch_timer_interval = 0
channel_kernel.attr.read_timer_interval = 200
# mmap channel output instead of splice
channel_kernel.attr.output = lttng.EVENT_MMAP
events_list_kernel = _create_events(kernel_events)
# Session
_create_session(session_name, full_path)
# Handles, channels, events
handle_ust = None
if ust_enabled:
handle_ust = _create_handle(session_name, domain_ust)
_enable_channel(handle_ust, channel_ust)
_enable_events(handle_ust, events_list_ust, channel_ust.name)
handle_kernel = None
if kernel_enabled:
handle_kernel = _create_handle(session_name, domain_kernel)
_enable_channel(handle_kernel, channel_kernel)
_enable_events(handle_kernel, events_list_kernel, channel_kernel.name)
# Context
context_list = _create_context_list(context_names)
# TODO make it possible to add context in userspace and kernel separately, since some context
# types might only apply to userspace OR kernel; only consider userspace contexts for now
handles_context = [handle_ust]
enabled_handles: List[lttng.Handle] = list(filter(None, handles_context))
_add_context(enabled_handles, context_list)
return full_path
def start(
session_name: str,
) -> None:
"""
Start LTTng session, and check for errors.
:param session_name: the name of the session
"""
result = lttng.start(session_name)
if result < 0:
raise RuntimeError(f'failed to start tracing: {lttng.strerror(result)}')
def stop(
session_name: str,
) -> None:
"""
Stop LTTng session, and check for errors.
:param session_name: the name of the session
"""
result = lttng.stop(session_name)
if result < 0:
raise RuntimeError(f'failed to stop tracing: {lttng.strerror(result)}')
def destroy(
session_name: str,
) -> None:
"""
Destroy LTTng session, and check for errors.
:param session_name: the name of the session
"""
result = lttng.destroy(session_name)
if result < 0:
raise RuntimeError(f'failed to destroy tracing session: {lttng.strerror(result)}')
def _create_events(
event_names: Set[str],
) -> List[lttng.Event]:
"""
Create events list from names.
:param event_names: a set of names to create events for
:return: the list of events
"""
events_list = []
for event_name in event_names:
e = lttng.Event()
e.name = event_name
e.type = lttng.EVENT_TRACEPOINT
e.loglevel_type = lttng.EVENT_LOGLEVEL_ALL
events_list.append(e)
return events_list
def _create_session(
session_name: str,
full_path: str,
) -> None:
"""
Create session from name and full directory path, and check for errors.
:param session_name: the name of the session
:param full_path: the full path to the main directory to write trace data to
"""
result = lttng.create(session_name, full_path)
LTTNG_ERR_EXIST_SESS = 28
if result == -LTTNG_ERR_EXIST_SESS:
# Sessions seem to persist, so if it already exists,
# just destroy it and try again
destroy(session_name)
result = lttng.create(session_name, full_path)
if result < 0:
raise RuntimeError(f'session creation failed: {lttng.strerror(result)}')
def _create_handle(
session_name: str,
domain: lttng.Domain,
) -> lttng.Handle:
"""
Create a handle for a given session name and a domain, and check for errors.
:param session_name: the name of the session
:param domain: the domain to be used
:return: the handle
"""
handle = None
handle = lttng.Handle(session_name, domain)
if handle is None:
raise RuntimeError('handle creation failed')
return handle
def _enable_channel(
handle: lttng.Handle,
channel: lttng.Channel,
) -> None:
"""
Enable channel for a handle, and check for errors.
:param handle: the handle to be used
:param channel: the channel to enable
"""
result = lttng.enable_channel(handle, channel)
if result < 0:
raise RuntimeError(f'channel enabling failed: {lttng.strerror(result)}')
def _enable_events(
handle: lttng.Handle,
events_list: List[lttng.Event],
channel_name: str,
) -> None:
"""
Enable events list for a given handle and channel name, and check for errors.
:param handle: the handle to be used
:param events_list: the list of events to enable
:param channel_name: the name of the channel to associate
"""
for event in events_list:
result = lttng.enable_event(handle, event, channel_name)
if result < 0:
raise RuntimeError(f'event enabling failed: {lttng.strerror(result)}')
context_map = {
name: getattr(lttng, name_constant, None) if name_constant is not None else None
for name, name_constant in CONTEXT_TYPE_CONSTANTS_MAP.items()
}
def _context_name_to_type(
context_name: str,
) -> Union[int, None]:
"""
Convert from context name to LTTng enum/constant type.
:param context_name: the generic name for the context
:return: the associated type, or `None` if it cannot be found
"""
return context_map.get(context_name, None)
def _create_context_list(
context_names: Set[str],
) -> List[lttng.EventContext]:
"""
Create context list from names, and check for errors.
:param context_names: the set of context names
:return: the event context list
"""
context_list = []
for context_name in context_names:
ec = lttng.EventContext()
context_type = _context_name_to_type(context_name)
if context_type is not None:
ec.ctx = context_type
context_list.append(ec)
else:
raise RuntimeError(f'failed to find context type: {context_name}')
return context_list
def _add_context(
handles: List[lttng.Handle],
context_list: List[lttng.EventContext],
) -> None:
"""
Add context list to given handles, and check for errors.
:param handles: the list of handles for which to add context
:param context_list: the list of event contexts to add to the handles
"""
for handle in handles:
for contex in context_list:
result = lttng.add_context(handle, contex, None, None)
if result < 0:
raise RuntimeError(f'failed to add context: {lttng.strerror(result)}') | tracetools_trace/tracetools_trace/tools/lttng_impl.py | from distutils.version import StrictVersion
import os
import re
import subprocess
from typing import List
from typing import Optional
from typing import Set
from typing import Union
import lttng
from .names import CONTEXT_TYPE_CONSTANTS_MAP
from .names import DEFAULT_CONTEXT
from .names import DEFAULT_EVENTS_KERNEL
from .names import DEFAULT_EVENTS_ROS
def get_version() -> Union[StrictVersion, None]:
"""
Get the version of the lttng module.
The module does not have a __version__ attribute, but the version is mentioned in its __doc__,
and seems to be written in a consistent way across versions.
:return: the version as a StrictVersion object, or `None` if it cannot be extracted
"""
doc_lines = lttng.__doc__.split('\n')
filtered_doc_lines: List[str] = list(filter(None, doc_lines))
if len(filtered_doc_lines) == 0:
return None
first_line = filtered_doc_lines[0]
version_string = first_line.split(' ')[1]
if not re.compile(r'^[0-9]+\.[0-9]+\.[0-9]+$').match(version_string):
return None
return StrictVersion(version_string)
def setup(
session_name: str,
base_path: str,
ros_events: Union[List[str], Set[str]] = DEFAULT_EVENTS_ROS,
kernel_events: Union[List[str], Set[str]] = DEFAULT_EVENTS_KERNEL,
context_names: Union[List[str], Set[str]] = DEFAULT_CONTEXT,
channel_name_ust: str = 'ros2',
channel_name_kernel: str = 'kchan',
) -> Optional[str]:
"""
Set up LTTng session, with events and context.
See: https://lttng.org/docs/#doc-core-concepts
:param session_name: the name of the session
:param base_path: the path to the directory in which to create the tracing session directory
:param ros_events: list of ROS events to enable
:param kernel_events: list of kernel events to enable
:param context_names: list of context elements to enable
:param channel_name_ust: the UST channel name
:param channel_name_kernel: the kernel channel name
:return: the full path to the trace directory
"""
# Check if there is a session daemon running
if lttng.session_daemon_alive() == 0:
# Otherwise spawn one without doing any error checks
subprocess.run(
['lttng-sessiond', '--daemonize'],
)
# Convert lists to sets
if not isinstance(ros_events, set):
ros_events = set(ros_events)
if not isinstance(kernel_events, set):
kernel_events = set(kernel_events)
if not isinstance(context_names, set):
context_names = set(context_names)
# Resolve full tracing directory path
full_path = os.path.join(base_path, session_name)
ust_enabled = ros_events is not None and len(ros_events) > 0
kernel_enabled = kernel_events is not None and len(kernel_events) > 0
# Domains
if ust_enabled:
domain_ust = lttng.Domain()
domain_ust.type = lttng.DOMAIN_UST
# Per-user buffer
domain_ust.buf_type = lttng.BUFFER_PER_UID
channel_ust = lttng.Channel()
channel_ust.name = channel_name_ust
# Discard, do not overwrite
channel_ust.attr.overwrite = 0
# 8 sub-buffers of 2 times the usual page size
channel_ust.attr.subbuf_size = 2 * 4096
channel_ust.attr.num_subbuf = 8
# Ignore switch timer interval and use read timer instead
channel_ust.attr.switch_timer_interval = 0
channel_ust.attr.read_timer_interval = 200
# mmap channel output instead of splice
channel_ust.attr.output = lttng.EVENT_MMAP
events_list_ust = _create_events(ros_events)
if kernel_enabled:
domain_kernel = lttng.Domain()
domain_kernel.type = lttng.DOMAIN_KERNEL
# Global buffer (only option for kernel domain)
domain_kernel.buf_type = lttng.BUFFER_GLOBAL
channel_kernel = lttng.Channel()
channel_kernel.name = channel_name_kernel
# Discard, do not overwrite
channel_kernel.attr.overwrite = 0
# 8 sub-buffers of 8 times the usual page size, since
# there can be way more kernel events than UST events
channel_kernel.attr.subbuf_size = 8 * 4096
channel_kernel.attr.num_subbuf = 8
# Ignore switch timer interval and use read timer instead
channel_kernel.attr.switch_timer_interval = 0
channel_kernel.attr.read_timer_interval = 200
# mmap channel output instead of splice
channel_kernel.attr.output = lttng.EVENT_MMAP
events_list_kernel = _create_events(kernel_events)
# Session
_create_session(session_name, full_path)
# Handles, channels, events
handle_ust = None
if ust_enabled:
handle_ust = _create_handle(session_name, domain_ust)
_enable_channel(handle_ust, channel_ust)
_enable_events(handle_ust, events_list_ust, channel_ust.name)
handle_kernel = None
if kernel_enabled:
handle_kernel = _create_handle(session_name, domain_kernel)
_enable_channel(handle_kernel, channel_kernel)
_enable_events(handle_kernel, events_list_kernel, channel_kernel.name)
# Context
context_list = _create_context_list(context_names)
# TODO make it possible to add context in userspace and kernel separately, since some context
# types might only apply to userspace OR kernel; only consider userspace contexts for now
handles_context = [handle_ust]
enabled_handles: List[lttng.Handle] = list(filter(None, handles_context))
_add_context(enabled_handles, context_list)
return full_path
def start(
session_name: str,
) -> None:
"""
Start LTTng session, and check for errors.
:param session_name: the name of the session
"""
result = lttng.start(session_name)
if result < 0:
raise RuntimeError(f'failed to start tracing: {lttng.strerror(result)}')
def stop(
session_name: str,
) -> None:
"""
Stop LTTng session, and check for errors.
:param session_name: the name of the session
"""
result = lttng.stop(session_name)
if result < 0:
raise RuntimeError(f'failed to stop tracing: {lttng.strerror(result)}')
def destroy(
session_name: str,
) -> None:
"""
Destroy LTTng session, and check for errors.
:param session_name: the name of the session
"""
result = lttng.destroy(session_name)
if result < 0:
raise RuntimeError(f'failed to destroy tracing session: {lttng.strerror(result)}')
def _create_events(
event_names: Set[str],
) -> List[lttng.Event]:
"""
Create events list from names.
:param event_names: a set of names to create events for
:return: the list of events
"""
events_list = []
for event_name in event_names:
e = lttng.Event()
e.name = event_name
e.type = lttng.EVENT_TRACEPOINT
e.loglevel_type = lttng.EVENT_LOGLEVEL_ALL
events_list.append(e)
return events_list
def _create_session(
session_name: str,
full_path: str,
) -> None:
"""
Create session from name and full directory path, and check for errors.
:param session_name: the name of the session
:param full_path: the full path to the main directory to write trace data to
"""
result = lttng.create(session_name, full_path)
LTTNG_ERR_EXIST_SESS = 28
if result == -LTTNG_ERR_EXIST_SESS:
# Sessions seem to persist, so if it already exists,
# just destroy it and try again
destroy(session_name)
result = lttng.create(session_name, full_path)
if result < 0:
raise RuntimeError(f'session creation failed: {lttng.strerror(result)}')
def _create_handle(
session_name: str,
domain: lttng.Domain,
) -> lttng.Handle:
"""
Create a handle for a given session name and a domain, and check for errors.
:param session_name: the name of the session
:param domain: the domain to be used
:return: the handle
"""
handle = None
handle = lttng.Handle(session_name, domain)
if handle is None:
raise RuntimeError('handle creation failed')
return handle
def _enable_channel(
handle: lttng.Handle,
channel: lttng.Channel,
) -> None:
"""
Enable channel for a handle, and check for errors.
:param handle: the handle to be used
:param channel: the channel to enable
"""
result = lttng.enable_channel(handle, channel)
if result < 0:
raise RuntimeError(f'channel enabling failed: {lttng.strerror(result)}')
def _enable_events(
handle: lttng.Handle,
events_list: List[lttng.Event],
channel_name: str,
) -> None:
"""
Enable events list for a given handle and channel name, and check for errors.
:param handle: the handle to be used
:param events_list: the list of events to enable
:param channel_name: the name of the channel to associate
"""
for event in events_list:
result = lttng.enable_event(handle, event, channel_name)
if result < 0:
raise RuntimeError(f'event enabling failed: {lttng.strerror(result)}')
context_map = {
name: getattr(lttng, name_constant, None) if name_constant is not None else None
for name, name_constant in CONTEXT_TYPE_CONSTANTS_MAP.items()
}
def _context_name_to_type(
context_name: str,
) -> Union[int, None]:
"""
Convert from context name to LTTng enum/constant type.
:param context_name: the generic name for the context
:return: the associated type, or `None` if it cannot be found
"""
return context_map.get(context_name, None)
def _create_context_list(
context_names: Set[str],
) -> List[lttng.EventContext]:
"""
Create context list from names, and check for errors.
:param context_names: the set of context names
:return: the event context list
"""
context_list = []
for context_name in context_names:
ec = lttng.EventContext()
context_type = _context_name_to_type(context_name)
if context_type is not None:
ec.ctx = context_type
context_list.append(ec)
else:
raise RuntimeError(f'failed to find context type: {context_name}')
return context_list
def _add_context(
handles: List[lttng.Handle],
context_list: List[lttng.EventContext],
) -> None:
"""
Add context list to given handles, and check for errors.
:param handles: the list of handles for which to add context
:param context_list: the list of event contexts to add to the handles
"""
for handle in handles:
for contex in context_list:
result = lttng.add_context(handle, contex, None, None)
if result < 0:
raise RuntimeError(f'failed to add context: {lttng.strerror(result)}') | 0.786787 | 0.173044 |
import os
import pytest
from llnl.util.link_tree import MergeConflictError
import spack.package
import spack.spec
from spack.directory_layout import DirectoryLayout
from spack.filesystem_view import YamlFilesystemView
from spack.repo import RepoPath
def create_ext_pkg(name, prefix, extendee_spec, monkeypatch):
ext_spec = spack.spec.Spec(name)
ext_spec._concrete = True
ext_spec.package.spec.prefix = prefix
ext_pkg = ext_spec.package
# temporarily override extendee_spec property on the package
monkeypatch.setattr(ext_pkg.__class__, "extendee_spec", extendee_spec)
return ext_pkg
def create_python_ext_pkg(name, prefix, python_spec, monkeypatch,
namespace=None):
ext_pkg = create_ext_pkg(name, prefix, python_spec, monkeypatch)
ext_pkg.py_namespace = namespace
return ext_pkg
def create_dir_structure(tmpdir, dir_structure):
for fname, children in dir_structure.items():
tmpdir.ensure(fname, dir=fname.endswith('/'))
if children:
create_dir_structure(tmpdir.join(fname), children)
@pytest.fixture()
def builtin_and_mock_packages():
# These tests use mock_repo packages to test functionality of builtin
# packages for python and perl. To test this we put the mock repo at lower
# precedence than the builtin repo, so we test builtin.perl against
# builtin.mock.perl-extension.
repo_dirs = [spack.paths.packages_path, spack.paths.mock_packages_path]
path = RepoPath(*repo_dirs)
with spack.repo.use_repositories(path):
yield
@pytest.fixture()
def python_and_extension_dirs(tmpdir, builtin_and_mock_packages):
python_dirs = {
'bin/': {
'python': None
},
'lib/': {
'python2.7/': {
'site-packages/': None
}
}
}
python_name = 'python'
python_prefix = tmpdir.join(python_name)
create_dir_structure(python_prefix, python_dirs)
python_spec = spack.spec.Spec('python@2.7.12')
python_spec._concrete = True
python_spec.package.spec.prefix = str(python_prefix)
ext_dirs = {
'bin/': {
'py-ext-tool': None
},
'lib/': {
'python2.7/': {
'site-packages/': {
'py-extension1/': {
'sample.py': None
}
}
}
}
}
ext_name = 'py-extension1'
ext_prefix = tmpdir.join(ext_name)
create_dir_structure(ext_prefix, ext_dirs)
easy_install_location = 'lib/python2.7/site-packages/easy-install.pth'
with open(str(ext_prefix.join(easy_install_location)), 'w') as f:
f.write("""path/to/ext1.egg
path/to/setuptools.egg""")
return str(python_prefix), str(ext_prefix)
@pytest.fixture()
def namespace_extensions(tmpdir, builtin_and_mock_packages):
ext1_dirs = {
'bin/': {
'py-ext-tool1': None
},
'lib/': {
'python2.7/': {
'site-packages/': {
'examplenamespace/': {
'__init__.py': None,
'ext1_sample.py': None
}
}
}
}
}
ext2_dirs = {
'bin/': {
'py-ext-tool2': None
},
'lib/': {
'python2.7/': {
'site-packages/': {
'examplenamespace/': {
'__init__.py': None,
'ext2_sample.py': None
}
}
}
}
}
ext1_name = 'py-extension1'
ext1_prefix = tmpdir.join(ext1_name)
create_dir_structure(ext1_prefix, ext1_dirs)
ext2_name = 'py-extension2'
ext2_prefix = tmpdir.join(ext2_name)
create_dir_structure(ext2_prefix, ext2_dirs)
return str(ext1_prefix), str(ext2_prefix), 'examplenamespace'
def test_python_activation_with_files(tmpdir, python_and_extension_dirs,
monkeypatch, builtin_and_mock_packages):
python_prefix, ext_prefix = python_and_extension_dirs
python_spec = spack.spec.Spec('python@2.7.12')
python_spec._concrete = True
python_spec.package.spec.prefix = python_prefix
ext_pkg = create_python_ext_pkg(
'py-extension1', ext_prefix, python_spec, monkeypatch)
python_pkg = python_spec.package
python_pkg.activate(ext_pkg, python_pkg.view())
assert os.path.exists(os.path.join(python_prefix, 'bin/py-ext-tool'))
easy_install_location = 'lib/python2.7/site-packages/easy-install.pth'
with open(os.path.join(python_prefix, easy_install_location), 'r') as f:
easy_install_contents = f.read()
assert 'ext1.egg' in easy_install_contents
assert 'setuptools.egg' not in easy_install_contents
def test_python_activation_view(tmpdir, python_and_extension_dirs,
builtin_and_mock_packages, monkeypatch):
python_prefix, ext_prefix = python_and_extension_dirs
python_spec = spack.spec.Spec('python@2.7.12')
python_spec._concrete = True
python_spec.package.spec.prefix = python_prefix
ext_pkg = create_python_ext_pkg('py-extension1', ext_prefix, python_spec,
monkeypatch)
view_dir = str(tmpdir.join('view'))
layout = DirectoryLayout(view_dir)
view = YamlFilesystemView(view_dir, layout)
python_pkg = python_spec.package
python_pkg.activate(ext_pkg, view)
assert not os.path.exists(os.path.join(python_prefix, 'bin/py-ext-tool'))
assert os.path.exists(os.path.join(view_dir, 'bin/py-ext-tool'))
def test_python_ignore_namespace_init_conflict(
tmpdir, namespace_extensions, builtin_and_mock_packages, monkeypatch):
"""Test the view update logic in PythonPackage ignores conflicting
instances of __init__ for packages which are in the same namespace.
"""
ext1_prefix, ext2_prefix, py_namespace = namespace_extensions
python_spec = spack.spec.Spec('python@2.7.12')
python_spec._concrete = True
ext1_pkg = create_python_ext_pkg('py-extension1', ext1_prefix, python_spec,
monkeypatch, py_namespace)
ext2_pkg = create_python_ext_pkg('py-extension2', ext2_prefix, python_spec,
monkeypatch, py_namespace)
view_dir = str(tmpdir.join('view'))
layout = DirectoryLayout(view_dir)
view = YamlFilesystemView(view_dir, layout)
python_pkg = python_spec.package
python_pkg.activate(ext1_pkg, view)
# Normally handled by Package.do_activate, but here we activate directly
view.extensions_layout.add_extension(python_spec, ext1_pkg.spec)
python_pkg.activate(ext2_pkg, view)
f1 = 'lib/python2.7/site-packages/examplenamespace/ext1_sample.py'
f2 = 'lib/python2.7/site-packages/examplenamespace/ext2_sample.py'
init_file = 'lib/python2.7/site-packages/examplenamespace/__init__.py'
assert os.path.exists(os.path.join(view_dir, f1))
assert os.path.exists(os.path.join(view_dir, f2))
assert os.path.exists(os.path.join(view_dir, init_file))
def test_python_keep_namespace_init(
tmpdir, namespace_extensions, builtin_and_mock_packages, monkeypatch):
"""Test the view update logic in PythonPackage keeps the namespace
__init__ file as long as one package in the namespace still
exists.
"""
ext1_prefix, ext2_prefix, py_namespace = namespace_extensions
python_spec = spack.spec.Spec('python@2.7.12')
python_spec._concrete = True
ext1_pkg = create_python_ext_pkg('py-extension1', ext1_prefix, python_spec,
monkeypatch, py_namespace)
ext2_pkg = create_python_ext_pkg('py-extension2', ext2_prefix, python_spec,
monkeypatch, py_namespace)
view_dir = str(tmpdir.join('view'))
layout = DirectoryLayout(view_dir)
view = YamlFilesystemView(view_dir, layout)
python_pkg = python_spec.package
python_pkg.activate(ext1_pkg, view)
# Normally handled by Package.do_activate, but here we activate directly
view.extensions_layout.add_extension(python_spec, ext1_pkg.spec)
python_pkg.activate(ext2_pkg, view)
view.extensions_layout.add_extension(python_spec, ext2_pkg.spec)
f1 = 'lib/python2.7/site-packages/examplenamespace/ext1_sample.py'
init_file = 'lib/python2.7/site-packages/examplenamespace/__init__.py'
python_pkg.deactivate(ext1_pkg, view)
view.extensions_layout.remove_extension(python_spec, ext1_pkg.spec)
assert not os.path.exists(os.path.join(view_dir, f1))
assert os.path.exists(os.path.join(view_dir, init_file))
python_pkg.deactivate(ext2_pkg, view)
view.extensions_layout.remove_extension(python_spec, ext2_pkg.spec)
assert not os.path.exists(os.path.join(view_dir, init_file))
def test_python_namespace_conflict(tmpdir, namespace_extensions,
monkeypatch, builtin_and_mock_packages):
"""Test the view update logic in PythonPackage reports an error when two
python extensions with different namespaces have a conflicting __init__
file.
"""
ext1_prefix, ext2_prefix, py_namespace = namespace_extensions
other_namespace = py_namespace + 'other'
python_spec = spack.spec.Spec('python@2.7.12')
python_spec._concrete = True
ext1_pkg = create_python_ext_pkg('py-extension1', ext1_prefix, python_spec,
monkeypatch, py_namespace)
ext2_pkg = create_python_ext_pkg('py-extension2', ext2_prefix, python_spec,
monkeypatch, other_namespace)
view_dir = str(tmpdir.join('view'))
layout = DirectoryLayout(view_dir)
view = YamlFilesystemView(view_dir, layout)
python_pkg = python_spec.package
python_pkg.activate(ext1_pkg, view)
view.extensions_layout.add_extension(python_spec, ext1_pkg.spec)
with pytest.raises(MergeConflictError):
python_pkg.activate(ext2_pkg, view)
@pytest.fixture()
def perl_and_extension_dirs(tmpdir, builtin_and_mock_packages):
perl_dirs = {
'bin/': {
'perl': None
},
'lib/': {
'site_perl/': {
'5.24.1/': {
'x86_64-linux/': None
}
}
}
}
perl_name = 'perl'
perl_prefix = tmpdir.join(perl_name)
create_dir_structure(perl_prefix, perl_dirs)
perl_spec = spack.spec.Spec('perl@5.24.1')
perl_spec._concrete = True
perl_spec.package.spec.prefix = str(perl_prefix)
ext_dirs = {
'bin/': {
'perl-ext-tool': None
},
'lib/': {
'site_perl/': {
'5.24.1/': {
'x86_64-linux/': {
'TestExt/': {
}
}
}
}
}
}
ext_name = 'perl-extension'
ext_prefix = tmpdir.join(ext_name)
create_dir_structure(ext_prefix, ext_dirs)
return str(perl_prefix), str(ext_prefix)
def test_perl_activation(tmpdir, builtin_and_mock_packages, monkeypatch):
# Note the lib directory is based partly on the perl version
perl_spec = spack.spec.Spec('perl@5.24.1')
perl_spec._concrete = True
perl_name = 'perl'
tmpdir.ensure(perl_name, dir=True)
perl_prefix = str(tmpdir.join(perl_name))
# Set the prefix on the package's spec reference because that is a copy of
# the original spec
perl_spec.package.spec.prefix = perl_prefix
ext_name = 'perl-extension'
tmpdir.ensure(ext_name, dir=True)
ext_pkg = create_ext_pkg(
ext_name, str(tmpdir.join(ext_name)), perl_spec, monkeypatch)
perl_pkg = perl_spec.package
perl_pkg.activate(ext_pkg, perl_pkg.view())
def test_perl_activation_with_files(tmpdir, perl_and_extension_dirs,
monkeypatch, builtin_and_mock_packages):
perl_prefix, ext_prefix = perl_and_extension_dirs
perl_spec = spack.spec.Spec('perl@5.24.1')
perl_spec._concrete = True
perl_spec.package.spec.prefix = perl_prefix
ext_pkg = create_ext_pkg(
'perl-extension', ext_prefix, perl_spec, monkeypatch)
perl_pkg = perl_spec.package
perl_pkg.activate(ext_pkg, perl_pkg.view())
assert os.path.exists(os.path.join(perl_prefix, 'bin/perl-ext-tool'))
def test_perl_activation_view(tmpdir, perl_and_extension_dirs,
monkeypatch, builtin_and_mock_packages):
perl_prefix, ext_prefix = perl_and_extension_dirs
perl_spec = spack.spec.Spec('perl@5.24.1')
perl_spec._concrete = True
perl_spec.package.spec.prefix = perl_prefix
ext_pkg = create_ext_pkg(
'perl-extension', ext_prefix, perl_spec, monkeypatch)
view_dir = str(tmpdir.join('view'))
layout = DirectoryLayout(view_dir)
view = YamlFilesystemView(view_dir, layout)
perl_pkg = perl_spec.package
perl_pkg.activate(ext_pkg, view)
assert not os.path.exists(os.path.join(perl_prefix, 'bin/perl-ext-tool'))
assert os.path.exists(os.path.join(view_dir, 'bin/perl-ext-tool'))
def test_is_activated_upstream_extendee(tmpdir, builtin_and_mock_packages,
monkeypatch):
"""When an extendee is installed upstream, make sure that the extension
spec is never considered to be globally activated for it.
"""
extendee_spec = spack.spec.Spec('python')
extendee_spec._concrete = True
python_name = 'python'
tmpdir.ensure(python_name, dir=True)
python_prefix = str(tmpdir.join(python_name))
# Set the prefix on the package's spec reference because that is a copy of
# the original spec
extendee_spec.package.spec.prefix = python_prefix
monkeypatch.setattr(extendee_spec.package.__class__,
'installed_upstream', True)
ext_name = 'py-extension1'
tmpdir.ensure(ext_name, dir=True)
ext_pkg = create_ext_pkg(
ext_name, str(tmpdir.join(ext_name)), extendee_spec, monkeypatch)
# The view should not be checked at all if the extendee is installed
# upstream, so use 'None' here
mock_view = None
assert not ext_pkg.is_activated(mock_view) | lib/spack/spack/test/test_activations.py | import os
import pytest
from llnl.util.link_tree import MergeConflictError
import spack.package
import spack.spec
from spack.directory_layout import DirectoryLayout
from spack.filesystem_view import YamlFilesystemView
from spack.repo import RepoPath
def create_ext_pkg(name, prefix, extendee_spec, monkeypatch):
ext_spec = spack.spec.Spec(name)
ext_spec._concrete = True
ext_spec.package.spec.prefix = prefix
ext_pkg = ext_spec.package
# temporarily override extendee_spec property on the package
monkeypatch.setattr(ext_pkg.__class__, "extendee_spec", extendee_spec)
return ext_pkg
def create_python_ext_pkg(name, prefix, python_spec, monkeypatch,
namespace=None):
ext_pkg = create_ext_pkg(name, prefix, python_spec, monkeypatch)
ext_pkg.py_namespace = namespace
return ext_pkg
def create_dir_structure(tmpdir, dir_structure):
for fname, children in dir_structure.items():
tmpdir.ensure(fname, dir=fname.endswith('/'))
if children:
create_dir_structure(tmpdir.join(fname), children)
@pytest.fixture()
def builtin_and_mock_packages():
# These tests use mock_repo packages to test functionality of builtin
# packages for python and perl. To test this we put the mock repo at lower
# precedence than the builtin repo, so we test builtin.perl against
# builtin.mock.perl-extension.
repo_dirs = [spack.paths.packages_path, spack.paths.mock_packages_path]
path = RepoPath(*repo_dirs)
with spack.repo.use_repositories(path):
yield
@pytest.fixture()
def python_and_extension_dirs(tmpdir, builtin_and_mock_packages):
python_dirs = {
'bin/': {
'python': None
},
'lib/': {
'python2.7/': {
'site-packages/': None
}
}
}
python_name = 'python'
python_prefix = tmpdir.join(python_name)
create_dir_structure(python_prefix, python_dirs)
python_spec = spack.spec.Spec('python@2.7.12')
python_spec._concrete = True
python_spec.package.spec.prefix = str(python_prefix)
ext_dirs = {
'bin/': {
'py-ext-tool': None
},
'lib/': {
'python2.7/': {
'site-packages/': {
'py-extension1/': {
'sample.py': None
}
}
}
}
}
ext_name = 'py-extension1'
ext_prefix = tmpdir.join(ext_name)
create_dir_structure(ext_prefix, ext_dirs)
easy_install_location = 'lib/python2.7/site-packages/easy-install.pth'
with open(str(ext_prefix.join(easy_install_location)), 'w') as f:
f.write("""path/to/ext1.egg
path/to/setuptools.egg""")
return str(python_prefix), str(ext_prefix)
@pytest.fixture()
def namespace_extensions(tmpdir, builtin_and_mock_packages):
ext1_dirs = {
'bin/': {
'py-ext-tool1': None
},
'lib/': {
'python2.7/': {
'site-packages/': {
'examplenamespace/': {
'__init__.py': None,
'ext1_sample.py': None
}
}
}
}
}
ext2_dirs = {
'bin/': {
'py-ext-tool2': None
},
'lib/': {
'python2.7/': {
'site-packages/': {
'examplenamespace/': {
'__init__.py': None,
'ext2_sample.py': None
}
}
}
}
}
ext1_name = 'py-extension1'
ext1_prefix = tmpdir.join(ext1_name)
create_dir_structure(ext1_prefix, ext1_dirs)
ext2_name = 'py-extension2'
ext2_prefix = tmpdir.join(ext2_name)
create_dir_structure(ext2_prefix, ext2_dirs)
return str(ext1_prefix), str(ext2_prefix), 'examplenamespace'
def test_python_activation_with_files(tmpdir, python_and_extension_dirs,
monkeypatch, builtin_and_mock_packages):
python_prefix, ext_prefix = python_and_extension_dirs
python_spec = spack.spec.Spec('python@2.7.12')
python_spec._concrete = True
python_spec.package.spec.prefix = python_prefix
ext_pkg = create_python_ext_pkg(
'py-extension1', ext_prefix, python_spec, monkeypatch)
python_pkg = python_spec.package
python_pkg.activate(ext_pkg, python_pkg.view())
assert os.path.exists(os.path.join(python_prefix, 'bin/py-ext-tool'))
easy_install_location = 'lib/python2.7/site-packages/easy-install.pth'
with open(os.path.join(python_prefix, easy_install_location), 'r') as f:
easy_install_contents = f.read()
assert 'ext1.egg' in easy_install_contents
assert 'setuptools.egg' not in easy_install_contents
def test_python_activation_view(tmpdir, python_and_extension_dirs,
builtin_and_mock_packages, monkeypatch):
python_prefix, ext_prefix = python_and_extension_dirs
python_spec = spack.spec.Spec('python@2.7.12')
python_spec._concrete = True
python_spec.package.spec.prefix = python_prefix
ext_pkg = create_python_ext_pkg('py-extension1', ext_prefix, python_spec,
monkeypatch)
view_dir = str(tmpdir.join('view'))
layout = DirectoryLayout(view_dir)
view = YamlFilesystemView(view_dir, layout)
python_pkg = python_spec.package
python_pkg.activate(ext_pkg, view)
assert not os.path.exists(os.path.join(python_prefix, 'bin/py-ext-tool'))
assert os.path.exists(os.path.join(view_dir, 'bin/py-ext-tool'))
def test_python_ignore_namespace_init_conflict(
tmpdir, namespace_extensions, builtin_and_mock_packages, monkeypatch):
"""Test the view update logic in PythonPackage ignores conflicting
instances of __init__ for packages which are in the same namespace.
"""
ext1_prefix, ext2_prefix, py_namespace = namespace_extensions
python_spec = spack.spec.Spec('python@2.7.12')
python_spec._concrete = True
ext1_pkg = create_python_ext_pkg('py-extension1', ext1_prefix, python_spec,
monkeypatch, py_namespace)
ext2_pkg = create_python_ext_pkg('py-extension2', ext2_prefix, python_spec,
monkeypatch, py_namespace)
view_dir = str(tmpdir.join('view'))
layout = DirectoryLayout(view_dir)
view = YamlFilesystemView(view_dir, layout)
python_pkg = python_spec.package
python_pkg.activate(ext1_pkg, view)
# Normally handled by Package.do_activate, but here we activate directly
view.extensions_layout.add_extension(python_spec, ext1_pkg.spec)
python_pkg.activate(ext2_pkg, view)
f1 = 'lib/python2.7/site-packages/examplenamespace/ext1_sample.py'
f2 = 'lib/python2.7/site-packages/examplenamespace/ext2_sample.py'
init_file = 'lib/python2.7/site-packages/examplenamespace/__init__.py'
assert os.path.exists(os.path.join(view_dir, f1))
assert os.path.exists(os.path.join(view_dir, f2))
assert os.path.exists(os.path.join(view_dir, init_file))
def test_python_keep_namespace_init(
tmpdir, namespace_extensions, builtin_and_mock_packages, monkeypatch):
"""Test the view update logic in PythonPackage keeps the namespace
__init__ file as long as one package in the namespace still
exists.
"""
ext1_prefix, ext2_prefix, py_namespace = namespace_extensions
python_spec = spack.spec.Spec('python@2.7.12')
python_spec._concrete = True
ext1_pkg = create_python_ext_pkg('py-extension1', ext1_prefix, python_spec,
monkeypatch, py_namespace)
ext2_pkg = create_python_ext_pkg('py-extension2', ext2_prefix, python_spec,
monkeypatch, py_namespace)
view_dir = str(tmpdir.join('view'))
layout = DirectoryLayout(view_dir)
view = YamlFilesystemView(view_dir, layout)
python_pkg = python_spec.package
python_pkg.activate(ext1_pkg, view)
# Normally handled by Package.do_activate, but here we activate directly
view.extensions_layout.add_extension(python_spec, ext1_pkg.spec)
python_pkg.activate(ext2_pkg, view)
view.extensions_layout.add_extension(python_spec, ext2_pkg.spec)
f1 = 'lib/python2.7/site-packages/examplenamespace/ext1_sample.py'
init_file = 'lib/python2.7/site-packages/examplenamespace/__init__.py'
python_pkg.deactivate(ext1_pkg, view)
view.extensions_layout.remove_extension(python_spec, ext1_pkg.spec)
assert not os.path.exists(os.path.join(view_dir, f1))
assert os.path.exists(os.path.join(view_dir, init_file))
python_pkg.deactivate(ext2_pkg, view)
view.extensions_layout.remove_extension(python_spec, ext2_pkg.spec)
assert not os.path.exists(os.path.join(view_dir, init_file))
def test_python_namespace_conflict(tmpdir, namespace_extensions,
monkeypatch, builtin_and_mock_packages):
"""Test the view update logic in PythonPackage reports an error when two
python extensions with different namespaces have a conflicting __init__
file.
"""
ext1_prefix, ext2_prefix, py_namespace = namespace_extensions
other_namespace = py_namespace + 'other'
python_spec = spack.spec.Spec('python@2.7.12')
python_spec._concrete = True
ext1_pkg = create_python_ext_pkg('py-extension1', ext1_prefix, python_spec,
monkeypatch, py_namespace)
ext2_pkg = create_python_ext_pkg('py-extension2', ext2_prefix, python_spec,
monkeypatch, other_namespace)
view_dir = str(tmpdir.join('view'))
layout = DirectoryLayout(view_dir)
view = YamlFilesystemView(view_dir, layout)
python_pkg = python_spec.package
python_pkg.activate(ext1_pkg, view)
view.extensions_layout.add_extension(python_spec, ext1_pkg.spec)
with pytest.raises(MergeConflictError):
python_pkg.activate(ext2_pkg, view)
@pytest.fixture()
def perl_and_extension_dirs(tmpdir, builtin_and_mock_packages):
perl_dirs = {
'bin/': {
'perl': None
},
'lib/': {
'site_perl/': {
'5.24.1/': {
'x86_64-linux/': None
}
}
}
}
perl_name = 'perl'
perl_prefix = tmpdir.join(perl_name)
create_dir_structure(perl_prefix, perl_dirs)
perl_spec = spack.spec.Spec('perl@5.24.1')
perl_spec._concrete = True
perl_spec.package.spec.prefix = str(perl_prefix)
ext_dirs = {
'bin/': {
'perl-ext-tool': None
},
'lib/': {
'site_perl/': {
'5.24.1/': {
'x86_64-linux/': {
'TestExt/': {
}
}
}
}
}
}
ext_name = 'perl-extension'
ext_prefix = tmpdir.join(ext_name)
create_dir_structure(ext_prefix, ext_dirs)
return str(perl_prefix), str(ext_prefix)
def test_perl_activation(tmpdir, builtin_and_mock_packages, monkeypatch):
# Note the lib directory is based partly on the perl version
perl_spec = spack.spec.Spec('perl@5.24.1')
perl_spec._concrete = True
perl_name = 'perl'
tmpdir.ensure(perl_name, dir=True)
perl_prefix = str(tmpdir.join(perl_name))
# Set the prefix on the package's spec reference because that is a copy of
# the original spec
perl_spec.package.spec.prefix = perl_prefix
ext_name = 'perl-extension'
tmpdir.ensure(ext_name, dir=True)
ext_pkg = create_ext_pkg(
ext_name, str(tmpdir.join(ext_name)), perl_spec, monkeypatch)
perl_pkg = perl_spec.package
perl_pkg.activate(ext_pkg, perl_pkg.view())
def test_perl_activation_with_files(tmpdir, perl_and_extension_dirs,
monkeypatch, builtin_and_mock_packages):
perl_prefix, ext_prefix = perl_and_extension_dirs
perl_spec = spack.spec.Spec('perl@5.24.1')
perl_spec._concrete = True
perl_spec.package.spec.prefix = perl_prefix
ext_pkg = create_ext_pkg(
'perl-extension', ext_prefix, perl_spec, monkeypatch)
perl_pkg = perl_spec.package
perl_pkg.activate(ext_pkg, perl_pkg.view())
assert os.path.exists(os.path.join(perl_prefix, 'bin/perl-ext-tool'))
def test_perl_activation_view(tmpdir, perl_and_extension_dirs,
monkeypatch, builtin_and_mock_packages):
perl_prefix, ext_prefix = perl_and_extension_dirs
perl_spec = spack.spec.Spec('perl@5.24.1')
perl_spec._concrete = True
perl_spec.package.spec.prefix = perl_prefix
ext_pkg = create_ext_pkg(
'perl-extension', ext_prefix, perl_spec, monkeypatch)
view_dir = str(tmpdir.join('view'))
layout = DirectoryLayout(view_dir)
view = YamlFilesystemView(view_dir, layout)
perl_pkg = perl_spec.package
perl_pkg.activate(ext_pkg, view)
assert not os.path.exists(os.path.join(perl_prefix, 'bin/perl-ext-tool'))
assert os.path.exists(os.path.join(view_dir, 'bin/perl-ext-tool'))
def test_is_activated_upstream_extendee(tmpdir, builtin_and_mock_packages,
monkeypatch):
"""When an extendee is installed upstream, make sure that the extension
spec is never considered to be globally activated for it.
"""
extendee_spec = spack.spec.Spec('python')
extendee_spec._concrete = True
python_name = 'python'
tmpdir.ensure(python_name, dir=True)
python_prefix = str(tmpdir.join(python_name))
# Set the prefix on the package's spec reference because that is a copy of
# the original spec
extendee_spec.package.spec.prefix = python_prefix
monkeypatch.setattr(extendee_spec.package.__class__,
'installed_upstream', True)
ext_name = 'py-extension1'
tmpdir.ensure(ext_name, dir=True)
ext_pkg = create_ext_pkg(
ext_name, str(tmpdir.join(ext_name)), extendee_spec, monkeypatch)
# The view should not be checked at all if the extendee is installed
# upstream, so use 'None' here
mock_view = None
assert not ext_pkg.is_activated(mock_view) | 0.392104 | 0.153994 |
import argparse
try:
from . import treedata_pb2 as proto
from . import utils
except (ValueError, ImportError):
import treedata_pb2 as proto
import utils
import represent_ast as ra
class Node:
def __init__(self, id, label, position):
self.id = id
self.label = label
self.position = position
class ReadGraph:
def __init__(self, proto_t, reverser):
self.name = proto_t.name
self._proto = proto_t
self._reverser = reverser
self.root = -1
self._ingoing = {}
self._index_ingoing()
def _index_ingoing(self):
data = self._proto
for i in range(len(data.assignment)):
cid = data.from_node[i]
pid = data.assignment[i]
if pid not in self._ingoing:
self._ingoing[pid] = []
self._ingoing[pid].append(cid)
if self.root == -1 or data.depth[pid] == 0:
self.root = pid
def in_edges(self, id):
if id not in self._ingoing:
return []
return self._ingoing[id]
def node(self, id):
pos = self._proto.position[id]
lix = self._proto.nodes[id]
return Node(
id, self._reverser.reverse('ast', lix), position=pos
)
def rewrite_label(self, id, label):
self._proto.nodes[id] = self._reverser.index('ast', label)
class WriteGraph:
def __init__(self, name, indexer):
self.proto = proto.AnnotatedTree()
self.proto.name = name
self._indexer = indexer
def write_node(self, label, position=0, assign_to=-1):
data = self.proto
id = len(data.nodes)
lix = self._indexer.index('ast', label)
data.nodes.append(lix)
if assign_to != -1:
depth = data.depth[assign_to] + 1
data.from_node.append(id)
data.assignment.append(assign_to)
else:
depth = 0
data.depth.append(depth)
data.position.append(position)
return id
def rewrite_label(self, id, label):
self._proto.nodes[id] = self._indexer.index('ast', label)
def read_graph(self):
return ReadGraph(
self.proto, self._indexer
)
def find_statement_roots(graph, root):
roots = []
check = set(['CompoundStmt', 'IfStmt'])
Q = graph.in_edges(root)
while len(Q) > 0:
id = Q.pop()
node = graph.node(id)
label = node.label
if label == 'CompoundStmt':
Q.extend(graph.in_edges(id))
else:
roots.append(id)
for u in graph.in_edges(id):
n2 = graph.node(u)
if n2.label in check:
Q.append(u)
return roots
def ast_to_seq(graph, root):
sequence = []
Q = [root]
seen = set([root])
while len(Q) > 0:
id = Q[0]
Q = Q[1:]
if id == "[SEP]":
sequence.append(id)
continue
label = graph.node(id).label
if id != root and label == 'IfStmt':
graph.rewrite_label(id, 'ElseIfStmt')
continue
if label == 'CompoundStmt':
continue
sequence.append(label)
neigh = sorted([u for u in graph.in_edges(id) if u not in seen], key=lambda x: graph.node(x).position)
seen = seen.union(set(neigh))
Q = neigh + Q
return sequence
def ast_to_set(graph, root):
out = set([])
Q = [root]
while len(Q) > 0:
id = Q.pop()
if id == "[SEP]":
sequence.append(id)
continue
label = graph.node(id).label
if id != root and label == 'IfStmt':
graph.rewrite_label(id, 'ElseIfStmt')
continue
if label == 'CompoundStmt':
continue
out.add(label)
neigh = graph.in_edges(id)
Q.extend(neigh)
return out
def transform_state(graph, cgraph, attach_root, root, set_sem=False):
if set_sem:
sequence = list(ast_to_set(graph, root))
else:
sequence = ast_to_seq(graph, root)
for pos in range(len(sequence)):
ipos = 0 if set_sem else pos
cgraph.write_node(sequence[pos], position=ipos, assign_to=attach_root)
def stmt_label(label):
return "Stmt_%s" % label
def transform_func(graph, cgraph, attach_func, root, set_sem=False):
if attach_func == 1:
state_roots = [root]
else:
state_roots = find_statement_roots(graph, root)
state_roots = sorted(state_roots, key=lambda id: graph.node(id).position)
attach_roots = []
for pos, id in enumerate(state_roots):
node = graph.node(id)
nid = cgraph.write_node(node.label, position=pos, assign_to=attach_func)
attach_roots.append(nid)
for i in range(len(state_roots)):
transform_state(graph, cgraph, attach_roots[i], state_roots[i],
set_sem=set_sem)
def attach_noop(graph):
Q = [0]
rgraph = graph.read_graph()
while len(Q) > 0:
current = Q.pop()
edges = rgraph.in_edges(current)
if len(edges) == 0:
continue
graph.write_node("[NOOP]", position=0, assign_to=current)
Q.extend(edges)
def rm_empty_func(graph):
rm = []
for u in graph.in_edges("N0"):
if len(graph.in_edges(u)) == 0:
rm.append(u)
for r in rm:
graph.remove_node(r)
def transform_graph(graph, indexer, set_sem=False):
cgraph = WriteGraph(graph.name, indexer)
program_id = cgraph.write_node("PROGRAM", position=0)
init_id = cgraph.write_node("InitFunctionDecl", position=0, assign_to=program_id)
for u in graph.in_edges(graph.root):
if u == graph.root:
continue
node = graph.node(u)
root = init_id
if 'FunctionDecl' in node.label:
root = cgraph.write_node(node.label, position=0, assign_to=program_id)
transform_func(graph, cgraph, root, u, set_sem=set_sem)
attach_noop(cgraph)
return cgraph
def preprocess(id, data, old_index, new_indexer, set_sem=False):
graph = ReadGraph(data, old_index)
out_graph = transform_graph(graph, new_indexer, set_sem=set_sem)
out = out_graph.proto
return id, out
def bounded_stream(stream, bound):
for i, D in enumerate(stream):
yield D
if i >= bound:
break | ase20_supplementary/webui/model/preprocess_clang.py | import argparse
try:
from . import treedata_pb2 as proto
from . import utils
except (ValueError, ImportError):
import treedata_pb2 as proto
import utils
import represent_ast as ra
class Node:
def __init__(self, id, label, position):
self.id = id
self.label = label
self.position = position
class ReadGraph:
def __init__(self, proto_t, reverser):
self.name = proto_t.name
self._proto = proto_t
self._reverser = reverser
self.root = -1
self._ingoing = {}
self._index_ingoing()
def _index_ingoing(self):
data = self._proto
for i in range(len(data.assignment)):
cid = data.from_node[i]
pid = data.assignment[i]
if pid not in self._ingoing:
self._ingoing[pid] = []
self._ingoing[pid].append(cid)
if self.root == -1 or data.depth[pid] == 0:
self.root = pid
def in_edges(self, id):
if id not in self._ingoing:
return []
return self._ingoing[id]
def node(self, id):
pos = self._proto.position[id]
lix = self._proto.nodes[id]
return Node(
id, self._reverser.reverse('ast', lix), position=pos
)
def rewrite_label(self, id, label):
self._proto.nodes[id] = self._reverser.index('ast', label)
class WriteGraph:
def __init__(self, name, indexer):
self.proto = proto.AnnotatedTree()
self.proto.name = name
self._indexer = indexer
def write_node(self, label, position=0, assign_to=-1):
data = self.proto
id = len(data.nodes)
lix = self._indexer.index('ast', label)
data.nodes.append(lix)
if assign_to != -1:
depth = data.depth[assign_to] + 1
data.from_node.append(id)
data.assignment.append(assign_to)
else:
depth = 0
data.depth.append(depth)
data.position.append(position)
return id
def rewrite_label(self, id, label):
self._proto.nodes[id] = self._indexer.index('ast', label)
def read_graph(self):
return ReadGraph(
self.proto, self._indexer
)
def find_statement_roots(graph, root):
roots = []
check = set(['CompoundStmt', 'IfStmt'])
Q = graph.in_edges(root)
while len(Q) > 0:
id = Q.pop()
node = graph.node(id)
label = node.label
if label == 'CompoundStmt':
Q.extend(graph.in_edges(id))
else:
roots.append(id)
for u in graph.in_edges(id):
n2 = graph.node(u)
if n2.label in check:
Q.append(u)
return roots
def ast_to_seq(graph, root):
sequence = []
Q = [root]
seen = set([root])
while len(Q) > 0:
id = Q[0]
Q = Q[1:]
if id == "[SEP]":
sequence.append(id)
continue
label = graph.node(id).label
if id != root and label == 'IfStmt':
graph.rewrite_label(id, 'ElseIfStmt')
continue
if label == 'CompoundStmt':
continue
sequence.append(label)
neigh = sorted([u for u in graph.in_edges(id) if u not in seen], key=lambda x: graph.node(x).position)
seen = seen.union(set(neigh))
Q = neigh + Q
return sequence
def ast_to_set(graph, root):
out = set([])
Q = [root]
while len(Q) > 0:
id = Q.pop()
if id == "[SEP]":
sequence.append(id)
continue
label = graph.node(id).label
if id != root and label == 'IfStmt':
graph.rewrite_label(id, 'ElseIfStmt')
continue
if label == 'CompoundStmt':
continue
out.add(label)
neigh = graph.in_edges(id)
Q.extend(neigh)
return out
def transform_state(graph, cgraph, attach_root, root, set_sem=False):
if set_sem:
sequence = list(ast_to_set(graph, root))
else:
sequence = ast_to_seq(graph, root)
for pos in range(len(sequence)):
ipos = 0 if set_sem else pos
cgraph.write_node(sequence[pos], position=ipos, assign_to=attach_root)
def stmt_label(label):
return "Stmt_%s" % label
def transform_func(graph, cgraph, attach_func, root, set_sem=False):
if attach_func == 1:
state_roots = [root]
else:
state_roots = find_statement_roots(graph, root)
state_roots = sorted(state_roots, key=lambda id: graph.node(id).position)
attach_roots = []
for pos, id in enumerate(state_roots):
node = graph.node(id)
nid = cgraph.write_node(node.label, position=pos, assign_to=attach_func)
attach_roots.append(nid)
for i in range(len(state_roots)):
transform_state(graph, cgraph, attach_roots[i], state_roots[i],
set_sem=set_sem)
def attach_noop(graph):
Q = [0]
rgraph = graph.read_graph()
while len(Q) > 0:
current = Q.pop()
edges = rgraph.in_edges(current)
if len(edges) == 0:
continue
graph.write_node("[NOOP]", position=0, assign_to=current)
Q.extend(edges)
def rm_empty_func(graph):
rm = []
for u in graph.in_edges("N0"):
if len(graph.in_edges(u)) == 0:
rm.append(u)
for r in rm:
graph.remove_node(r)
def transform_graph(graph, indexer, set_sem=False):
cgraph = WriteGraph(graph.name, indexer)
program_id = cgraph.write_node("PROGRAM", position=0)
init_id = cgraph.write_node("InitFunctionDecl", position=0, assign_to=program_id)
for u in graph.in_edges(graph.root):
if u == graph.root:
continue
node = graph.node(u)
root = init_id
if 'FunctionDecl' in node.label:
root = cgraph.write_node(node.label, position=0, assign_to=program_id)
transform_func(graph, cgraph, root, u, set_sem=set_sem)
attach_noop(cgraph)
return cgraph
def preprocess(id, data, old_index, new_indexer, set_sem=False):
graph = ReadGraph(data, old_index)
out_graph = transform_graph(graph, new_indexer, set_sem=set_sem)
out = out_graph.proto
return id, out
def bounded_stream(stream, bound):
for i, D in enumerate(stream):
yield D
if i >= bound:
break | 0.278453 | 0.213787 |
from django.conf.urls import url
from django.contrib import admin
from django.contrib.admin.actions import delete_selected
from django.contrib.auth.models import User
from django.test import SimpleTestCase, TestCase, override_settings
from django.test.client import RequestFactory
from django.urls import reverse
from .models import Article
site = admin.AdminSite(name="test_adminsite")
site.register(User)
site.register(Article)
urlpatterns = [
url(r'^test_admin/admin/', site.urls),
]
@override_settings(ROOT_URLCONF='admin_views.test_adminsite')
class SiteEachContextTest(TestCase):
"""
Check each_context contains the documented variables and that available_apps context
variable structure is the expected one.
"""
request_factory = RequestFactory()
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_superuser(username='super', password='<PASSWORD>', email='<EMAIL>')
def setUp(self):
request = self.request_factory.get(reverse('test_adminsite:index'))
request.user = self.u1
self.ctx = site.each_context(request)
def test_each_context(self):
ctx = self.ctx
self.assertEqual(ctx['site_header'], 'Django administration')
self.assertEqual(ctx['site_title'], 'Django site admin')
self.assertEqual(ctx['site_url'], '/')
self.assertIs(ctx['has_permission'], True)
def test_each_context_site_url_with_script_name(self):
request = self.request_factory.get(reverse('test_adminsite:index'), SCRIPT_NAME='/my-script-name/')
request.user = self.u1
self.assertEqual(site.each_context(request)['site_url'], '/my-script-name/')
def test_available_apps(self):
ctx = self.ctx
apps = ctx['available_apps']
# we have registered two models from two different apps
self.assertEqual(len(apps), 2)
# admin_views.Article
admin_views = apps[0]
self.assertEqual(admin_views['app_label'], 'admin_views')
self.assertEqual(len(admin_views['models']), 1)
self.assertEqual(admin_views['models'][0]['object_name'], 'Article')
# auth.User
auth = apps[1]
self.assertEqual(auth['app_label'], 'auth')
self.assertEqual(len(auth['models']), 1)
user = auth['models'][0]
self.assertEqual(user['object_name'], 'User')
self.assertEqual(auth['app_url'], '/test_admin/admin/auth/')
self.assertIs(auth['has_module_perms'], True)
self.assertIn('perms', user)
self.assertIs(user['perms']['add'], True)
self.assertIs(user['perms']['change'], True)
self.assertIs(user['perms']['delete'], True)
self.assertEqual(user['admin_url'], '/test_admin/admin/auth/user/')
self.assertEqual(user['add_url'], '/test_admin/admin/auth/user/add/')
self.assertEqual(user['name'], 'Users')
class SiteActionsTests(SimpleTestCase):
def setUp(self):
self.site = admin.AdminSite()
def test_add_action(self):
def test_action():
pass
self.site.add_action(test_action)
self.assertEqual(self.site.get_action('test_action'), test_action)
def test_disable_action(self):
action_name = 'delete_selected'
self.assertEqual(self.site._actions[action_name], delete_selected)
self.site.disable_action(action_name)
with self.assertRaises(KeyError):
self.site._actions[action_name]
def test_get_action(self):
"""AdminSite.get_action() returns an action even if it's disabled."""
action_name = 'delete_selected'
self.assertEqual(self.site.get_action(action_name), delete_selected)
self.site.disable_action(action_name)
self.assertEqual(self.site.get_action(action_name), delete_selected) | tests/admin_views/test_adminsite.py | from django.conf.urls import url
from django.contrib import admin
from django.contrib.admin.actions import delete_selected
from django.contrib.auth.models import User
from django.test import SimpleTestCase, TestCase, override_settings
from django.test.client import RequestFactory
from django.urls import reverse
from .models import Article
site = admin.AdminSite(name="test_adminsite")
site.register(User)
site.register(Article)
urlpatterns = [
url(r'^test_admin/admin/', site.urls),
]
@override_settings(ROOT_URLCONF='admin_views.test_adminsite')
class SiteEachContextTest(TestCase):
"""
Check each_context contains the documented variables and that available_apps context
variable structure is the expected one.
"""
request_factory = RequestFactory()
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_superuser(username='super', password='<PASSWORD>', email='<EMAIL>')
def setUp(self):
request = self.request_factory.get(reverse('test_adminsite:index'))
request.user = self.u1
self.ctx = site.each_context(request)
def test_each_context(self):
ctx = self.ctx
self.assertEqual(ctx['site_header'], 'Django administration')
self.assertEqual(ctx['site_title'], 'Django site admin')
self.assertEqual(ctx['site_url'], '/')
self.assertIs(ctx['has_permission'], True)
def test_each_context_site_url_with_script_name(self):
request = self.request_factory.get(reverse('test_adminsite:index'), SCRIPT_NAME='/my-script-name/')
request.user = self.u1
self.assertEqual(site.each_context(request)['site_url'], '/my-script-name/')
def test_available_apps(self):
ctx = self.ctx
apps = ctx['available_apps']
# we have registered two models from two different apps
self.assertEqual(len(apps), 2)
# admin_views.Article
admin_views = apps[0]
self.assertEqual(admin_views['app_label'], 'admin_views')
self.assertEqual(len(admin_views['models']), 1)
self.assertEqual(admin_views['models'][0]['object_name'], 'Article')
# auth.User
auth = apps[1]
self.assertEqual(auth['app_label'], 'auth')
self.assertEqual(len(auth['models']), 1)
user = auth['models'][0]
self.assertEqual(user['object_name'], 'User')
self.assertEqual(auth['app_url'], '/test_admin/admin/auth/')
self.assertIs(auth['has_module_perms'], True)
self.assertIn('perms', user)
self.assertIs(user['perms']['add'], True)
self.assertIs(user['perms']['change'], True)
self.assertIs(user['perms']['delete'], True)
self.assertEqual(user['admin_url'], '/test_admin/admin/auth/user/')
self.assertEqual(user['add_url'], '/test_admin/admin/auth/user/add/')
self.assertEqual(user['name'], 'Users')
class SiteActionsTests(SimpleTestCase):
def setUp(self):
self.site = admin.AdminSite()
def test_add_action(self):
def test_action():
pass
self.site.add_action(test_action)
self.assertEqual(self.site.get_action('test_action'), test_action)
def test_disable_action(self):
action_name = 'delete_selected'
self.assertEqual(self.site._actions[action_name], delete_selected)
self.site.disable_action(action_name)
with self.assertRaises(KeyError):
self.site._actions[action_name]
def test_get_action(self):
"""AdminSite.get_action() returns an action even if it's disabled."""
action_name = 'delete_selected'
self.assertEqual(self.site.get_action(action_name), delete_selected)
self.site.disable_action(action_name)
self.assertEqual(self.site.get_action(action_name), delete_selected) | 0.549641 | 0.271499 |
import asyncio
import concurrent.futures
import functools
import logging
import signal
import threading
import warnings
from typing import (Optional, Collection, Union, Tuple, Set, Text, Any, Coroutine,
cast, TYPE_CHECKING)
from kopf.engines import peering
from kopf.engines import posting
from kopf.reactor import handling
from kopf.reactor import lifecycles
from kopf.reactor import queueing
from kopf.reactor import registries
if TYPE_CHECKING:
asyncio_Task = asyncio.Task[None]
asyncio_Future = asyncio.Future[Any]
else:
asyncio_Task = asyncio.Task
asyncio_Future = asyncio.Future
Flag = Union[asyncio_Future, asyncio.Event, concurrent.futures.Future, threading.Event]
Tasks = Collection[asyncio_Task]
logger = logging.getLogger(__name__)
def run(
loop: Optional[asyncio.AbstractEventLoop] = None,
lifecycle: Optional[lifecycles.LifeCycleFn] = None,
registry: Optional[registries.OperatorRegistry] = None,
standalone: bool = False,
priority: int = 0,
peering_name: Optional[str] = None,
namespace: Optional[str] = None,
) -> None:
"""
Run the whole operator synchronously.
This function should be used to run an operator in normal sync mode.
"""
loop = loop if loop is not None else asyncio.get_event_loop()
try:
loop.run_until_complete(operator(
lifecycle=lifecycle,
registry=registry,
standalone=standalone,
namespace=namespace,
priority=priority,
peering_name=peering_name,
))
except asyncio.CancelledError:
pass
async def operator(
lifecycle: Optional[lifecycles.LifeCycleFn] = None,
registry: Optional[registries.OperatorRegistry] = None,
standalone: bool = False,
priority: int = 0,
peering_name: Optional[str] = None,
namespace: Optional[str] = None,
stop_flag: Optional[Flag] = None,
ready_flag: Optional[Flag] = None,
) -> None:
"""
Run the whole operator asynchronously.
This function should be used to run an operator in an asyncio event-loop
if the operator is orchestrated explicitly and manually.
It is efficiently `spawn_tasks` + `run_tasks` with some safety.
"""
existing_tasks = await _all_tasks()
operator_tasks = await spawn_tasks(
lifecycle=lifecycle,
registry=registry,
standalone=standalone,
namespace=namespace,
priority=priority,
peering_name=peering_name,
stop_flag=stop_flag,
ready_flag=ready_flag,
)
await run_tasks(operator_tasks, ignored=existing_tasks)
async def spawn_tasks(
lifecycle: Optional[lifecycles.LifeCycleFn] = None,
registry: Optional[registries.OperatorRegistry] = None,
standalone: bool = False,
priority: int = 0,
peering_name: Optional[str] = None,
namespace: Optional[str] = None,
stop_flag: Optional[Flag] = None,
ready_flag: Optional[Flag] = None,
) -> Tasks:
"""
Spawn all the tasks needed to run the operator.
The tasks are properly inter-connected with the synchronisation primitives.
"""
loop = asyncio.get_running_loop()
# The freezer and the registry are scoped to this whole task-set, to sync them all.
lifecycle = lifecycle if lifecycle is not None else lifecycles.get_default_lifecycle()
registry = registry if registry is not None else registries.get_default_registry()
event_queue: posting.K8sEventQueue = asyncio.Queue(loop=loop)
freeze_flag: asyncio.Event = asyncio.Event(loop=loop)
signal_flag: asyncio_Future = asyncio.Future(loop=loop)
tasks = []
# A top-level task for external stopping by setting a stop-flag. Once set,
# this task will exit, and thus all other top-level tasks will be cancelled.
tasks.extend([
loop.create_task(_stop_flag_checker(
signal_flag=signal_flag,
ready_flag=ready_flag,
stop_flag=stop_flag,
)),
])
# K8s-event posting. Events are queued in-memory and posted in the background.
# NB: currently, it is a global task, but can be made per-resource or per-object.
tasks.extend([
loop.create_task(_root_task_checker("poster of events", posting.poster(
event_queue=event_queue))),
])
# Monitor the peers, unless explicitly disabled.
ourselves: Optional[peering.Peer] = peering.Peer.detect(
id=peering.detect_own_id(), priority=priority,
standalone=standalone, namespace=namespace, name=peering_name,
)
if ourselves:
tasks.extend([
loop.create_task(peering.peers_keepalive(
ourselves=ourselves)),
loop.create_task(_root_task_checker("watcher of peering", queueing.watcher(
namespace=namespace,
resource=ourselves.resource,
handler=functools.partial(peering.peers_handler,
ourselves=ourselves,
freeze=freeze_flag)))), # freeze is set/cleared
])
# Resource event handling, only once for every known resource (de-duplicated).
for resource in registry.resources:
tasks.extend([
loop.create_task(_root_task_checker(f"watcher of {resource.name}", queueing.watcher(
namespace=namespace,
resource=resource,
handler=functools.partial(handling.resource_handler,
lifecycle=lifecycle,
registry=registry,
resource=resource,
event_queue=event_queue,
freeze=freeze_flag)))), # freeze is only checked
])
# On Ctrl+C or pod termination, cancel all tasks gracefully.
if threading.current_thread() is threading.main_thread():
loop.add_signal_handler(signal.SIGINT, signal_flag.set_result, signal.SIGINT)
loop.add_signal_handler(signal.SIGTERM, signal_flag.set_result, signal.SIGTERM)
else:
logger.warning("OS signals are ignored: running not in the main thread.")
return tasks
async def run_tasks(
root_tasks: Tasks,
*,
ignored: Tasks = frozenset(),
) -> None:
"""
Orchestrate the tasks and terminate them gracefully when needed.
The root tasks are expected to run forever. Their number is limited. Once
any of them exits, the whole operator and all other root tasks should exit.
The root tasks, in turn, can spawn multiple sub-tasks of various purposes.
They can be awaited, monitored, or fired-and-forgot.
The hung tasks are those that were spawned during the operator runtime,
and were not cancelled/exited on the root tasks termination. They are given
some extra time to finish, after which they are forcely terminated too.
.. note::
Due to implementation details, every task created after the operator's
startup is assumed to be a task or a sub-task of the operator.
In the end, all tasks are forcely cancelled. Even if those tasks were
created by other means. There is no way to trace who spawned what.
Only the tasks that existed before the operator startup are ignored
(for example, those that spawned the operator itself).
"""
try:
# Run the infinite tasks until one of them fails/exits (they never exit normally).
root_done, root_pending = await _wait(root_tasks, return_when=asyncio.FIRST_COMPLETED)
except asyncio.CancelledError:
# If the operator is cancelled, propagate the cancellation to all the sub-tasks.
# There is no graceful period: cancel as soon as possible, but allow them to finish.
root_cancelled, root_left = await _stop(root_tasks, title="Root", cancelled=True)
hung_tasks = await _all_tasks(ignored=ignored)
hung_cancelled, hung_left = await _stop(hung_tasks, title="Hung", cancelled=True)
raise
else:
# If the operator is intact, but one of the root tasks has exited (successfully or not),
# cancel all the remaining root tasks, and gracefully exit other spawned sub-tasks.
root_cancelled, root_left = await _stop(root_pending, title="Root", cancelled=False)
hung_tasks = await _all_tasks(ignored=ignored)
try:
# After the root tasks are all gone, cancel any spawned sub-tasks (e.g. handlers).
# TODO: assumption! the loop is not fully ours! find a way to cancel our spawned tasks.
hung_done, hung_pending = await _wait(hung_tasks, timeout=5.0)
except asyncio.CancelledError:
# If the operator is cancelled, propagate the cancellation to all the sub-tasks.
hung_cancelled, hung_left = await _stop(hung_tasks, title="Hung", cancelled=True)
raise
else:
# If the operator is intact, but the timeout is reached, forcely cancel the sub-tasks.
hung_cancelled, hung_left = await _stop(hung_pending, title="Hung", cancelled=False)
# If succeeded or if cancellation is silenced, re-raise from failed tasks (if any).
await _reraise(root_done | root_cancelled | hung_done | hung_cancelled)
async def _all_tasks(
ignored: Tasks = frozenset(),
) -> Tasks:
current_task = asyncio.current_task()
return {task for task in asyncio.all_tasks()
if task is not current_task and task not in ignored}
async def _wait(
tasks: Tasks,
*,
timeout: Optional[float] = None,
return_when: Any = asyncio.ALL_COMPLETED,
) -> Tuple[Set[asyncio_Task], Set[asyncio_Task]]:
if not tasks:
return set(), set()
done, pending = await asyncio.wait(tasks, timeout=timeout, return_when=return_when)
return cast(Set[asyncio_Task], done), cast(Set[asyncio_Task], pending)
async def _stop(
tasks: Tasks,
title: str,
cancelled: bool,
) -> Tuple[Set[asyncio_Task], Set[asyncio_Task]]:
if not tasks:
logger.debug(f"{title} tasks stopping is skipped: no tasks given.")
return set(), set()
for task in tasks:
task.cancel()
# If the waiting (current) task is cancelled before the wait is over,
# propagate the cancellation to all the awaited (sub-) tasks, and let them finish.
try:
done, pending = await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED)
except asyncio.CancelledError:
# If the waiting (current) task is cancelled while propagating the cancellation
# (i.e. double-cancelled), let it fail without graceful cleanup. It is urgent, it seems.
pending = {task for task in tasks if not task.done()}
are = 'are' if not pending else 'are not'
why = 'double-cancelled at stopping' if cancelled else 'cancelled at stopping'
logger.debug(f"{title} tasks {are} stopped: {why}; tasks left: {pending!r}")
raise # the repeated cancellation, handled specially.
else:
# If the cancellation is propagated normally and the awaited (sub-) tasks exited,
# consider it as a successful cleanup.
are = 'are' if not pending else 'are not'
why = 'cancelled normally' if cancelled else 'finished normally'
logger.debug(f"{title} tasks {are} stopped: {why}; tasks left: {pending!r}")
return cast(Set[asyncio_Task], done), cast(Set[asyncio_Task], pending)
async def _reraise(
tasks: Tasks,
) -> None:
for task in tasks:
try:
task.result() # can raise the regular (non-cancellation) exceptions.
except asyncio.CancelledError:
pass
async def _root_task_checker(
name: Text,
coro: Coroutine[Any, Any, Any],
) -> None:
try:
await coro
except asyncio.CancelledError:
logger.debug(f"Root task {name!r} is cancelled.")
raise
except Exception as e:
logger.error(f"Root task {name!r} is failed: %r", e)
raise # fail the process and its exit status
else:
logger.warning(f"Root task {name!r} is finished unexpectedly.")
async def _stop_flag_checker(
signal_flag: asyncio_Future,
ready_flag: Optional[Flag],
stop_flag: Optional[Flag],
) -> None:
# TODO: collect the readiness of all root tasks instead, and set this one only when fully ready.
# Notify the caller that we are ready to be executed.
await _raise_flag(ready_flag)
# Selects the flags to be awaited (if set).
flags = []
if signal_flag is not None:
flags.append(signal_flag)
if stop_flag is not None:
flags.append(asyncio.create_task(_wait_flag(stop_flag)))
# Wait until one of the stoppers is set/raised.
try:
done, pending = await asyncio.wait(flags, return_when=asyncio.FIRST_COMPLETED)
future = done.pop()
result = await future
except asyncio.CancelledError:
pass # operator is stopping for any other reason
else:
if result is None:
logger.info("Stop-flag is raised. Operator is stopping.")
elif isinstance(result, signal.Signals):
logger.info("Signal %s is received. Operator is stopping.", result.name)
else:
logger.info("Stop-flag is set to %r. Operator is stopping.", result)
def create_tasks(
loop: asyncio.AbstractEventLoop,
*arg: Any,
**kwargs: Any,
) -> Tasks:
"""
.. deprecated:: 1.0
This is a synchronous interface to `spawn_tasks`.
It is only kept for backward compatibility, as it was exposed
via the public interface of the framework.
"""
warnings.warn("kopf.create_tasks() is deprecated: "
"use kopf.spawn_tasks() or kopf.operator().",
DeprecationWarning)
return loop.run_until_complete(spawn_tasks(*arg, **kwargs))
async def _wait_flag(
flag: Optional[Flag],
) -> Any:
"""
Wait for a flag to be raised.
Non-asyncio primitives are generally not our worry,
but we support them for convenience.
"""
if flag is None:
pass
elif isinstance(flag, asyncio.Future):
return await flag
elif isinstance(flag, asyncio.Event):
return await flag.wait()
elif isinstance(flag, concurrent.futures.Future):
loop = asyncio.get_running_loop()
return await loop.run_in_executor(None, flag.result)
elif isinstance(flag, threading.Event):
loop = asyncio.get_running_loop()
return await loop.run_in_executor(None, flag.wait)
else:
raise TypeError(f"Unsupported type of a flag: {flag!r}")
async def _raise_flag(
flag: Optional[Flag],
) -> None:
"""
Raise a flag.
Non-asyncio primitives are generally not our worry,
but we support them for convenience.
"""
if flag is None:
pass
elif isinstance(flag, asyncio.Future):
flag.set_result(None)
elif isinstance(flag, asyncio.Event):
flag.set()
elif isinstance(flag, concurrent.futures.Future):
flag.set_result(None)
elif isinstance(flag, threading.Event):
flag.set()
else:
raise TypeError(f"Unsupported type of a flag: {flag!r}") | kopf/reactor/running.py | import asyncio
import concurrent.futures
import functools
import logging
import signal
import threading
import warnings
from typing import (Optional, Collection, Union, Tuple, Set, Text, Any, Coroutine,
cast, TYPE_CHECKING)
from kopf.engines import peering
from kopf.engines import posting
from kopf.reactor import handling
from kopf.reactor import lifecycles
from kopf.reactor import queueing
from kopf.reactor import registries
if TYPE_CHECKING:
asyncio_Task = asyncio.Task[None]
asyncio_Future = asyncio.Future[Any]
else:
asyncio_Task = asyncio.Task
asyncio_Future = asyncio.Future
Flag = Union[asyncio_Future, asyncio.Event, concurrent.futures.Future, threading.Event]
Tasks = Collection[asyncio_Task]
logger = logging.getLogger(__name__)
def run(
loop: Optional[asyncio.AbstractEventLoop] = None,
lifecycle: Optional[lifecycles.LifeCycleFn] = None,
registry: Optional[registries.OperatorRegistry] = None,
standalone: bool = False,
priority: int = 0,
peering_name: Optional[str] = None,
namespace: Optional[str] = None,
) -> None:
"""
Run the whole operator synchronously.
This function should be used to run an operator in normal sync mode.
"""
loop = loop if loop is not None else asyncio.get_event_loop()
try:
loop.run_until_complete(operator(
lifecycle=lifecycle,
registry=registry,
standalone=standalone,
namespace=namespace,
priority=priority,
peering_name=peering_name,
))
except asyncio.CancelledError:
pass
async def operator(
lifecycle: Optional[lifecycles.LifeCycleFn] = None,
registry: Optional[registries.OperatorRegistry] = None,
standalone: bool = False,
priority: int = 0,
peering_name: Optional[str] = None,
namespace: Optional[str] = None,
stop_flag: Optional[Flag] = None,
ready_flag: Optional[Flag] = None,
) -> None:
"""
Run the whole operator asynchronously.
This function should be used to run an operator in an asyncio event-loop
if the operator is orchestrated explicitly and manually.
It is efficiently `spawn_tasks` + `run_tasks` with some safety.
"""
existing_tasks = await _all_tasks()
operator_tasks = await spawn_tasks(
lifecycle=lifecycle,
registry=registry,
standalone=standalone,
namespace=namespace,
priority=priority,
peering_name=peering_name,
stop_flag=stop_flag,
ready_flag=ready_flag,
)
await run_tasks(operator_tasks, ignored=existing_tasks)
async def spawn_tasks(
lifecycle: Optional[lifecycles.LifeCycleFn] = None,
registry: Optional[registries.OperatorRegistry] = None,
standalone: bool = False,
priority: int = 0,
peering_name: Optional[str] = None,
namespace: Optional[str] = None,
stop_flag: Optional[Flag] = None,
ready_flag: Optional[Flag] = None,
) -> Tasks:
"""
Spawn all the tasks needed to run the operator.
The tasks are properly inter-connected with the synchronisation primitives.
"""
loop = asyncio.get_running_loop()
# The freezer and the registry are scoped to this whole task-set, to sync them all.
lifecycle = lifecycle if lifecycle is not None else lifecycles.get_default_lifecycle()
registry = registry if registry is not None else registries.get_default_registry()
event_queue: posting.K8sEventQueue = asyncio.Queue(loop=loop)
freeze_flag: asyncio.Event = asyncio.Event(loop=loop)
signal_flag: asyncio_Future = asyncio.Future(loop=loop)
tasks = []
# A top-level task for external stopping by setting a stop-flag. Once set,
# this task will exit, and thus all other top-level tasks will be cancelled.
tasks.extend([
loop.create_task(_stop_flag_checker(
signal_flag=signal_flag,
ready_flag=ready_flag,
stop_flag=stop_flag,
)),
])
# K8s-event posting. Events are queued in-memory and posted in the background.
# NB: currently, it is a global task, but can be made per-resource or per-object.
tasks.extend([
loop.create_task(_root_task_checker("poster of events", posting.poster(
event_queue=event_queue))),
])
# Monitor the peers, unless explicitly disabled.
ourselves: Optional[peering.Peer] = peering.Peer.detect(
id=peering.detect_own_id(), priority=priority,
standalone=standalone, namespace=namespace, name=peering_name,
)
if ourselves:
tasks.extend([
loop.create_task(peering.peers_keepalive(
ourselves=ourselves)),
loop.create_task(_root_task_checker("watcher of peering", queueing.watcher(
namespace=namespace,
resource=ourselves.resource,
handler=functools.partial(peering.peers_handler,
ourselves=ourselves,
freeze=freeze_flag)))), # freeze is set/cleared
])
# Resource event handling, only once for every known resource (de-duplicated).
for resource in registry.resources:
tasks.extend([
loop.create_task(_root_task_checker(f"watcher of {resource.name}", queueing.watcher(
namespace=namespace,
resource=resource,
handler=functools.partial(handling.resource_handler,
lifecycle=lifecycle,
registry=registry,
resource=resource,
event_queue=event_queue,
freeze=freeze_flag)))), # freeze is only checked
])
# On Ctrl+C or pod termination, cancel all tasks gracefully.
if threading.current_thread() is threading.main_thread():
loop.add_signal_handler(signal.SIGINT, signal_flag.set_result, signal.SIGINT)
loop.add_signal_handler(signal.SIGTERM, signal_flag.set_result, signal.SIGTERM)
else:
logger.warning("OS signals are ignored: running not in the main thread.")
return tasks
async def run_tasks(
root_tasks: Tasks,
*,
ignored: Tasks = frozenset(),
) -> None:
"""
Orchestrate the tasks and terminate them gracefully when needed.
The root tasks are expected to run forever. Their number is limited. Once
any of them exits, the whole operator and all other root tasks should exit.
The root tasks, in turn, can spawn multiple sub-tasks of various purposes.
They can be awaited, monitored, or fired-and-forgot.
The hung tasks are those that were spawned during the operator runtime,
and were not cancelled/exited on the root tasks termination. They are given
some extra time to finish, after which they are forcely terminated too.
.. note::
Due to implementation details, every task created after the operator's
startup is assumed to be a task or a sub-task of the operator.
In the end, all tasks are forcely cancelled. Even if those tasks were
created by other means. There is no way to trace who spawned what.
Only the tasks that existed before the operator startup are ignored
(for example, those that spawned the operator itself).
"""
try:
# Run the infinite tasks until one of them fails/exits (they never exit normally).
root_done, root_pending = await _wait(root_tasks, return_when=asyncio.FIRST_COMPLETED)
except asyncio.CancelledError:
# If the operator is cancelled, propagate the cancellation to all the sub-tasks.
# There is no graceful period: cancel as soon as possible, but allow them to finish.
root_cancelled, root_left = await _stop(root_tasks, title="Root", cancelled=True)
hung_tasks = await _all_tasks(ignored=ignored)
hung_cancelled, hung_left = await _stop(hung_tasks, title="Hung", cancelled=True)
raise
else:
# If the operator is intact, but one of the root tasks has exited (successfully or not),
# cancel all the remaining root tasks, and gracefully exit other spawned sub-tasks.
root_cancelled, root_left = await _stop(root_pending, title="Root", cancelled=False)
hung_tasks = await _all_tasks(ignored=ignored)
try:
# After the root tasks are all gone, cancel any spawned sub-tasks (e.g. handlers).
# TODO: assumption! the loop is not fully ours! find a way to cancel our spawned tasks.
hung_done, hung_pending = await _wait(hung_tasks, timeout=5.0)
except asyncio.CancelledError:
# If the operator is cancelled, propagate the cancellation to all the sub-tasks.
hung_cancelled, hung_left = await _stop(hung_tasks, title="Hung", cancelled=True)
raise
else:
# If the operator is intact, but the timeout is reached, forcely cancel the sub-tasks.
hung_cancelled, hung_left = await _stop(hung_pending, title="Hung", cancelled=False)
# If succeeded or if cancellation is silenced, re-raise from failed tasks (if any).
await _reraise(root_done | root_cancelled | hung_done | hung_cancelled)
async def _all_tasks(
ignored: Tasks = frozenset(),
) -> Tasks:
current_task = asyncio.current_task()
return {task for task in asyncio.all_tasks()
if task is not current_task and task not in ignored}
async def _wait(
tasks: Tasks,
*,
timeout: Optional[float] = None,
return_when: Any = asyncio.ALL_COMPLETED,
) -> Tuple[Set[asyncio_Task], Set[asyncio_Task]]:
if not tasks:
return set(), set()
done, pending = await asyncio.wait(tasks, timeout=timeout, return_when=return_when)
return cast(Set[asyncio_Task], done), cast(Set[asyncio_Task], pending)
async def _stop(
tasks: Tasks,
title: str,
cancelled: bool,
) -> Tuple[Set[asyncio_Task], Set[asyncio_Task]]:
if not tasks:
logger.debug(f"{title} tasks stopping is skipped: no tasks given.")
return set(), set()
for task in tasks:
task.cancel()
# If the waiting (current) task is cancelled before the wait is over,
# propagate the cancellation to all the awaited (sub-) tasks, and let them finish.
try:
done, pending = await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED)
except asyncio.CancelledError:
# If the waiting (current) task is cancelled while propagating the cancellation
# (i.e. double-cancelled), let it fail without graceful cleanup. It is urgent, it seems.
pending = {task for task in tasks if not task.done()}
are = 'are' if not pending else 'are not'
why = 'double-cancelled at stopping' if cancelled else 'cancelled at stopping'
logger.debug(f"{title} tasks {are} stopped: {why}; tasks left: {pending!r}")
raise # the repeated cancellation, handled specially.
else:
# If the cancellation is propagated normally and the awaited (sub-) tasks exited,
# consider it as a successful cleanup.
are = 'are' if not pending else 'are not'
why = 'cancelled normally' if cancelled else 'finished normally'
logger.debug(f"{title} tasks {are} stopped: {why}; tasks left: {pending!r}")
return cast(Set[asyncio_Task], done), cast(Set[asyncio_Task], pending)
async def _reraise(
tasks: Tasks,
) -> None:
for task in tasks:
try:
task.result() # can raise the regular (non-cancellation) exceptions.
except asyncio.CancelledError:
pass
async def _root_task_checker(
name: Text,
coro: Coroutine[Any, Any, Any],
) -> None:
try:
await coro
except asyncio.CancelledError:
logger.debug(f"Root task {name!r} is cancelled.")
raise
except Exception as e:
logger.error(f"Root task {name!r} is failed: %r", e)
raise # fail the process and its exit status
else:
logger.warning(f"Root task {name!r} is finished unexpectedly.")
async def _stop_flag_checker(
signal_flag: asyncio_Future,
ready_flag: Optional[Flag],
stop_flag: Optional[Flag],
) -> None:
# TODO: collect the readiness of all root tasks instead, and set this one only when fully ready.
# Notify the caller that we are ready to be executed.
await _raise_flag(ready_flag)
# Selects the flags to be awaited (if set).
flags = []
if signal_flag is not None:
flags.append(signal_flag)
if stop_flag is not None:
flags.append(asyncio.create_task(_wait_flag(stop_flag)))
# Wait until one of the stoppers is set/raised.
try:
done, pending = await asyncio.wait(flags, return_when=asyncio.FIRST_COMPLETED)
future = done.pop()
result = await future
except asyncio.CancelledError:
pass # operator is stopping for any other reason
else:
if result is None:
logger.info("Stop-flag is raised. Operator is stopping.")
elif isinstance(result, signal.Signals):
logger.info("Signal %s is received. Operator is stopping.", result.name)
else:
logger.info("Stop-flag is set to %r. Operator is stopping.", result)
def create_tasks(
loop: asyncio.AbstractEventLoop,
*arg: Any,
**kwargs: Any,
) -> Tasks:
"""
.. deprecated:: 1.0
This is a synchronous interface to `spawn_tasks`.
It is only kept for backward compatibility, as it was exposed
via the public interface of the framework.
"""
warnings.warn("kopf.create_tasks() is deprecated: "
"use kopf.spawn_tasks() or kopf.operator().",
DeprecationWarning)
return loop.run_until_complete(spawn_tasks(*arg, **kwargs))
async def _wait_flag(
flag: Optional[Flag],
) -> Any:
"""
Wait for a flag to be raised.
Non-asyncio primitives are generally not our worry,
but we support them for convenience.
"""
if flag is None:
pass
elif isinstance(flag, asyncio.Future):
return await flag
elif isinstance(flag, asyncio.Event):
return await flag.wait()
elif isinstance(flag, concurrent.futures.Future):
loop = asyncio.get_running_loop()
return await loop.run_in_executor(None, flag.result)
elif isinstance(flag, threading.Event):
loop = asyncio.get_running_loop()
return await loop.run_in_executor(None, flag.wait)
else:
raise TypeError(f"Unsupported type of a flag: {flag!r}")
async def _raise_flag(
flag: Optional[Flag],
) -> None:
"""
Raise a flag.
Non-asyncio primitives are generally not our worry,
but we support them for convenience.
"""
if flag is None:
pass
elif isinstance(flag, asyncio.Future):
flag.set_result(None)
elif isinstance(flag, asyncio.Event):
flag.set()
elif isinstance(flag, concurrent.futures.Future):
flag.set_result(None)
elif isinstance(flag, threading.Event):
flag.set()
else:
raise TypeError(f"Unsupported type of a flag: {flag!r}") | 0.764276 | 0.154535 |
import json
import requests
from src.clustering.models import ClusteringMethods
from src.encoding.models import ValueEncodings, TaskGenerationTypes
from src.hyperparameter_optimization.models import HyperOptAlgorithms, HyperOptLosses, HyperparameterOptimizationMethods
from src.labelling.models import LabelTypes, ThresholdTypes
from src.predictive_model.classification.models import ClassificationMethods
from src.predictive_model.regression.models import RegressionMethods
def create_classification_payload(
split=1,
encodings=[ValueEncodings.SIMPLE_INDEX.value],
encoding={
"padding": "zero_padding",
"generation_type": TaskGenerationTypes.ALL_IN_ONE.value,
"prefix_length": 5,
"features": []
},
labeling={
"type": LabelTypes.ATTRIBUTE_STRING.value,
"attribute_name": "creator",
"threshold_type": ThresholdTypes.THRESHOLD_MEAN.value,
"threshold": 0,
"add_remaining_time": False,
"add_elapsed_time": False,
"add_executed_events": False,
"add_resources_used": False,
"add_new_traces": False
},
clustering=[ClusteringMethods.NO_CLUSTER.value],
classification=[ClassificationMethods.MULTINOMIAL_NAIVE_BAYES.value],
hyperparameter_optimization={
"type": HyperparameterOptimizationMethods.HYPEROPT.value,
"max_evaluations": 3,
"performance_metric": HyperOptLosses.AUC.value,
"algorithm_type": HyperOptAlgorithms.TPE.value
},
incremental_train=[],
model_hyperparameters={}):
config = {
"clusterings": clustering,
"labelling": labeling,
"encodings": encodings,
"encoding": encoding,
"hyperparameter_optimizer": hyperparameter_optimization,
"methods": classification,
"incremental_train": incremental_train,
"create_models": True,
}
config.update(model_hyperparameters)
return {"type": "classification", "split_id": split, "config": config}
def create_regression_payload(
split=1,
encodings=[ValueEncodings.SIMPLE_INDEX.value],
encoding={
"padding": "zero_padding",
"generation_type": TaskGenerationTypes.ALL_IN_ONE.value,
"prefix_length": 5,
"features": []
},
labeling={
"type": LabelTypes.ATTRIBUTE_STRING.value,
"attribute_name": "creator",
"threshold_type": ThresholdTypes.THRESHOLD_MEAN.value,
"threshold": 0,
"add_remaining_time": False,
"add_elapsed_time": False,
"add_executed_events": False,
"add_resources_used": False,
"add_new_traces": False
},
clustering=[ClusteringMethods.NO_CLUSTER.value],
regression=[RegressionMethods.RANDOM_FOREST.value],
hyperparameter_optimization={
"type": HyperparameterOptimizationMethods.HYPEROPT.value,
"max_evaluations": 3,
"performance_metric": HyperOptLosses.RMSE.value,
"algorithm_type": HyperOptAlgorithms.TPE.value
},
incremental_train=[],
model_hyperparameters={}):
config = {
"clusterings": clustering,
"labelling": labeling,
"encodings": encodings,
"encoding": encoding,
"hyperparameter_optimizer": hyperparameter_optimization,
"methods": regression,
"incremental_train": incremental_train,
"create_models": True,
}
config.update(model_hyperparameters)
return {"type": "regression", "split_id": split, "config": config}
def upload_split(
train='cache/log_cache/test_logs/general_example_train.xes',
test='cache/log_cache/test_logs/general_example_test.xes',
server_name="0.0.0.0",
server_port='8000'
):
r = requests.post(
'http://' + server_name + ':' + server_port + '/splits/multiple',
files={'trainingSet': open(train, 'r+'), 'testSet': open(test, 'r+')}
)
return json.loads(r.text)['id']
def send_job_request(
payload,
server_name="0.0.0.0",
server_port='8000'
):
r = requests.post(
'http://' + server_name + ':' + server_port + '/jobs/multiple',
json=payload,
headers={'Content-type': 'application/json'}
)
return json.loads(r.text)
def retrieve_job(
config,
server_name="0.0.0.0",
server_port='8000'
):
r = requests.get(
'http://' + server_name + ':' + server_port + '/jobs/',
headers={'Content-type': 'application/json'},
json=config
)
return json.loads(r.text) | src/utils/experiments_utils.py | import json
import requests
from src.clustering.models import ClusteringMethods
from src.encoding.models import ValueEncodings, TaskGenerationTypes
from src.hyperparameter_optimization.models import HyperOptAlgorithms, HyperOptLosses, HyperparameterOptimizationMethods
from src.labelling.models import LabelTypes, ThresholdTypes
from src.predictive_model.classification.models import ClassificationMethods
from src.predictive_model.regression.models import RegressionMethods
def create_classification_payload(
split=1,
encodings=[ValueEncodings.SIMPLE_INDEX.value],
encoding={
"padding": "zero_padding",
"generation_type": TaskGenerationTypes.ALL_IN_ONE.value,
"prefix_length": 5,
"features": []
},
labeling={
"type": LabelTypes.ATTRIBUTE_STRING.value,
"attribute_name": "creator",
"threshold_type": ThresholdTypes.THRESHOLD_MEAN.value,
"threshold": 0,
"add_remaining_time": False,
"add_elapsed_time": False,
"add_executed_events": False,
"add_resources_used": False,
"add_new_traces": False
},
clustering=[ClusteringMethods.NO_CLUSTER.value],
classification=[ClassificationMethods.MULTINOMIAL_NAIVE_BAYES.value],
hyperparameter_optimization={
"type": HyperparameterOptimizationMethods.HYPEROPT.value,
"max_evaluations": 3,
"performance_metric": HyperOptLosses.AUC.value,
"algorithm_type": HyperOptAlgorithms.TPE.value
},
incremental_train=[],
model_hyperparameters={}):
config = {
"clusterings": clustering,
"labelling": labeling,
"encodings": encodings,
"encoding": encoding,
"hyperparameter_optimizer": hyperparameter_optimization,
"methods": classification,
"incremental_train": incremental_train,
"create_models": True,
}
config.update(model_hyperparameters)
return {"type": "classification", "split_id": split, "config": config}
def create_regression_payload(
split=1,
encodings=[ValueEncodings.SIMPLE_INDEX.value],
encoding={
"padding": "zero_padding",
"generation_type": TaskGenerationTypes.ALL_IN_ONE.value,
"prefix_length": 5,
"features": []
},
labeling={
"type": LabelTypes.ATTRIBUTE_STRING.value,
"attribute_name": "creator",
"threshold_type": ThresholdTypes.THRESHOLD_MEAN.value,
"threshold": 0,
"add_remaining_time": False,
"add_elapsed_time": False,
"add_executed_events": False,
"add_resources_used": False,
"add_new_traces": False
},
clustering=[ClusteringMethods.NO_CLUSTER.value],
regression=[RegressionMethods.RANDOM_FOREST.value],
hyperparameter_optimization={
"type": HyperparameterOptimizationMethods.HYPEROPT.value,
"max_evaluations": 3,
"performance_metric": HyperOptLosses.RMSE.value,
"algorithm_type": HyperOptAlgorithms.TPE.value
},
incremental_train=[],
model_hyperparameters={}):
config = {
"clusterings": clustering,
"labelling": labeling,
"encodings": encodings,
"encoding": encoding,
"hyperparameter_optimizer": hyperparameter_optimization,
"methods": regression,
"incremental_train": incremental_train,
"create_models": True,
}
config.update(model_hyperparameters)
return {"type": "regression", "split_id": split, "config": config}
def upload_split(
train='cache/log_cache/test_logs/general_example_train.xes',
test='cache/log_cache/test_logs/general_example_test.xes',
server_name="0.0.0.0",
server_port='8000'
):
r = requests.post(
'http://' + server_name + ':' + server_port + '/splits/multiple',
files={'trainingSet': open(train, 'r+'), 'testSet': open(test, 'r+')}
)
return json.loads(r.text)['id']
def send_job_request(
payload,
server_name="0.0.0.0",
server_port='8000'
):
r = requests.post(
'http://' + server_name + ':' + server_port + '/jobs/multiple',
json=payload,
headers={'Content-type': 'application/json'}
)
return json.loads(r.text)
def retrieve_job(
config,
server_name="0.0.0.0",
server_port='8000'
):
r = requests.get(
'http://' + server_name + ':' + server_port + '/jobs/',
headers={'Content-type': 'application/json'},
json=config
)
return json.loads(r.text) | 0.563498 | 0.3415 |
import sys
import logging
import numpy as np
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Evaluator:
def __init__(self, num_samples: int = 0, num_features: int = 0):
self.loss = 0.0
self.best_loss = sys.maxsize
self.best_image2text_recall_at_k = -1.0
self.cur_image2text_recall_at_k = -1.0
self.best_text2image_recall_at_k = -1.0
self.cur_text2image_recall_at_k = -1.0
self.index_update = 0
self.num_samples = num_samples
self.num_features = num_features
self.embedded_images = np.zeros((self.num_samples, self.num_features))
self.embedded_captions = np.zeros((self.num_samples, self.num_features))
def reset_all_vars(self) -> None:
self.loss = 0
self.index_update = 0
self.embedded_images = np.zeros((self.num_samples, self.num_features))
self.embedded_captions = np.zeros((self.num_samples, self.num_features))
self.cur_text2image_recall_at_k = -1.0
self.cur_image2text_recall_at_k = -1.0
def update_metrics(self, loss: float) -> None:
self.loss += loss
def update_embeddings(
self, embedded_images: np.ndarray, embedded_captions: np.ndarray
) -> None:
num_samples = embedded_images.shape[0]
self.embedded_images[
self.index_update : self.index_update + num_samples, :
] = embedded_images
self.embedded_captions[
self.index_update : self.index_update + num_samples, :
] = embedded_captions
self.index_update += num_samples
def is_best_loss(self) -> bool:
if self.loss < self.best_loss:
return True
return False
def update_best_loss(self):
self.best_loss = self.loss
def is_best_image2text_recall_at_k(self, k: int) -> bool:
self.cur_image2text_recall_at_k = self.image2text_recall_at_k(k)
if self.cur_image2text_recall_at_k > self.best_image2text_recall_at_k:
return True
return False
def update_best_image2text_recall_at_k(self):
self.best_image2text_recall_at_k = self.cur_image2text_recall_at_k
def is_best_text2image_recall_at_k(self, k: int) -> bool:
self.cur_text2image_recall_at_k = self.text2image_recall_at_k(k)
if self.cur_text2image_recall_at_k > self.best_text2image_recall_at_k:
return True
return False
def update_best_text2image_recall_at_k(self):
self.best_text2image_recall_at_k = self.cur_text2image_recall_at_k
def image2text_recall_at_k(self, k: int) -> float:
"""Computes the recall at K when doing image to text retrieval and updates the
object variable.
Args:
k: Recall at K (this is K).
Returns:
The recall at K.
"""
num_images = self.embedded_images.shape[0] // 5
ranks = np.zeros(num_images)
for index in range(num_images):
# Get query image
query_image = self.embedded_images[5 * index]
# Similarities
similarities = np.dot(query_image, self.embedded_captions.T).flatten()
indices = np.argsort(similarities)[::-1]
# Score
rank = sys.maxsize
for i in range(5 * index, 5 * index + 5, 1):
tmp = np.where(indices == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
return len(np.where(ranks < k)[0]) / len(ranks)
def text2image_recall_at_k(self, k) -> float:
"""Computes the recall at K when doing text to image retrieval and updates the
object variable.
Args:
k: Recall at K (this is K).
Returns:
The recall at K.
"""
num_captions = self.embedded_captions.shape[0]
ranks = np.zeros(num_captions)
for index in range(num_captions):
# Get query captions
query_captions = self.embedded_captions[5 * index : 5 * index + 5]
# Similarities
similarities = np.dot(query_captions, self.embedded_images[0::5].T)
inds = np.zeros(similarities.shape)
for i in range(len(inds)):
inds[i] = np.argsort(similarities[i])[::-1]
ranks[5 * index + i] = np.where(inds[i] == index)[0][0]
return len(np.where(ranks < k)[0]) / len(ranks) | src/utils/evaluators.py | import sys
import logging
import numpy as np
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Evaluator:
def __init__(self, num_samples: int = 0, num_features: int = 0):
self.loss = 0.0
self.best_loss = sys.maxsize
self.best_image2text_recall_at_k = -1.0
self.cur_image2text_recall_at_k = -1.0
self.best_text2image_recall_at_k = -1.0
self.cur_text2image_recall_at_k = -1.0
self.index_update = 0
self.num_samples = num_samples
self.num_features = num_features
self.embedded_images = np.zeros((self.num_samples, self.num_features))
self.embedded_captions = np.zeros((self.num_samples, self.num_features))
def reset_all_vars(self) -> None:
self.loss = 0
self.index_update = 0
self.embedded_images = np.zeros((self.num_samples, self.num_features))
self.embedded_captions = np.zeros((self.num_samples, self.num_features))
self.cur_text2image_recall_at_k = -1.0
self.cur_image2text_recall_at_k = -1.0
def update_metrics(self, loss: float) -> None:
self.loss += loss
def update_embeddings(
self, embedded_images: np.ndarray, embedded_captions: np.ndarray
) -> None:
num_samples = embedded_images.shape[0]
self.embedded_images[
self.index_update : self.index_update + num_samples, :
] = embedded_images
self.embedded_captions[
self.index_update : self.index_update + num_samples, :
] = embedded_captions
self.index_update += num_samples
def is_best_loss(self) -> bool:
if self.loss < self.best_loss:
return True
return False
def update_best_loss(self):
self.best_loss = self.loss
def is_best_image2text_recall_at_k(self, k: int) -> bool:
self.cur_image2text_recall_at_k = self.image2text_recall_at_k(k)
if self.cur_image2text_recall_at_k > self.best_image2text_recall_at_k:
return True
return False
def update_best_image2text_recall_at_k(self):
self.best_image2text_recall_at_k = self.cur_image2text_recall_at_k
def is_best_text2image_recall_at_k(self, k: int) -> bool:
self.cur_text2image_recall_at_k = self.text2image_recall_at_k(k)
if self.cur_text2image_recall_at_k > self.best_text2image_recall_at_k:
return True
return False
def update_best_text2image_recall_at_k(self):
self.best_text2image_recall_at_k = self.cur_text2image_recall_at_k
def image2text_recall_at_k(self, k: int) -> float:
"""Computes the recall at K when doing image to text retrieval and updates the
object variable.
Args:
k: Recall at K (this is K).
Returns:
The recall at K.
"""
num_images = self.embedded_images.shape[0] // 5
ranks = np.zeros(num_images)
for index in range(num_images):
# Get query image
query_image = self.embedded_images[5 * index]
# Similarities
similarities = np.dot(query_image, self.embedded_captions.T).flatten()
indices = np.argsort(similarities)[::-1]
# Score
rank = sys.maxsize
for i in range(5 * index, 5 * index + 5, 1):
tmp = np.where(indices == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
return len(np.where(ranks < k)[0]) / len(ranks)
def text2image_recall_at_k(self, k) -> float:
"""Computes the recall at K when doing text to image retrieval and updates the
object variable.
Args:
k: Recall at K (this is K).
Returns:
The recall at K.
"""
num_captions = self.embedded_captions.shape[0]
ranks = np.zeros(num_captions)
for index in range(num_captions):
# Get query captions
query_captions = self.embedded_captions[5 * index : 5 * index + 5]
# Similarities
similarities = np.dot(query_captions, self.embedded_images[0::5].T)
inds = np.zeros(similarities.shape)
for i in range(len(inds)):
inds[i] = np.argsort(similarities[i])[::-1]
ranks[5 * index + i] = np.where(inds[i] == index)[0][0]
return len(np.where(ranks < k)[0]) / len(ranks) | 0.590071 | 0.327131 |
import numpy as np
from gradient_algorithm import gradient_algorithm_var_alpha, gradient_algorithm_fixed_alpha, gradient_algorithm_linesearch
from conjugate_gradient_algorithm import conjugate_gradient_algorithm, conjugate_gradient_algorithm_linesearch
from newton_algorithm import newton_algorithm, newton_algorithm_linesearch
from quasi_newton_algorithm import quasi_newton_algorithm, quasi_newton_algorithm_linesearch
from naive_random_search_algorithm import naive_random_search_algorithm
from simulated_annealing_algorithm import simulated_annealing_algorithm
from particle_swam_optimization_algorithm import particle_swam_optimization_algorithm
from print_report import print_report
import time
reg_coef = 0.01; # regularization coefficient
# A @ X = B (matrix form)
# Minimize the following objective function:
# X = [x0, ..., xn].T;
# f(X) = norm(A * X - B)^2 = X.T * (A.T @ A) @ X - 2 * (A.T @ B).T @ X + B.T @ B + reg_coef * X.T @ X;
N_eqs = 5000; # number of equations
N_pars = 10; # number of parameters.
A = np.random.rand(N_eqs, N_pars);
B = np.random.rand(N_eqs, 1);
Q = A.T @ A;
func = lambda X : X.T @ Q @ X - 2 * (A.T @ B).T @ X + B.T @ B + reg_coef * X.T @ X;
func_grad = lambda X : 2 * Q @ X - 2 * (A.T @ B) + 2 * reg_coef * X;
func_hessian = lambda X : 2 * Q + 2 * reg_coef;
# Objective function for naive random walk and simulated annealing algorithms
func_error = lambda X : np.linalg.norm((A @ X - B + reg_coef * X.T @ X), axis = 0) ** 2;
# Objective function for particle swarm optimization algorithm (particles along row dimension, axis = 0)
def func_error_ps(X):
a = np.linalg.norm((X @ A.T - B.T + reg_coef * np.diag(X @ X.T).reshape(-1, 1)), axis = 1) ** 2;
return a.reshape(-1,1);
# Normal equation with Tikhonov regularization
print('***********************************************************************');
print('Normal equation with Tikhonov regularization');
start = time.time();
X = np.linalg.solve(Q + reg_coef * np.eye(N_pars), A.T @ B);
end = time.time();
print("Norm(X): %0.3f" % (np.linalg.norm(X)));
print('f(X): %0.3f' % (func(X)));
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Gradient algorithm with variable step size (steepest descent)
print('***********************************************************************');
print('Gradient algorithm with variable step size (steepest descent)');
N_iter_max = 1000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max};
X0 = np.zeros((N_pars, 1));
start = time.time();
X, report = gradient_algorithm_var_alpha(X0, func, func_grad, func_hessian, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Gradient algorithm with fixed step size
print('***********************************************************************');
print('Gradient algorithm with fixed step size');
N_iter_max = 1000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max};
X0 = np.zeros((N_pars, 1));
start = time.time();
X, report = gradient_algorithm_fixed_alpha(X0, func, func_grad, func_hessian, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Gradient algorithm with variable step size based on line search
print('***********************************************************************');
print('Gradient algorithm with variable step size based on line search');
N_iter_max = 1000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max};
X0 = np.zeros((N_pars, 1));
start = time.time();
X, report = gradient_algorithm_linesearch(X0, func, func_grad, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Conjugate gradient algorithm with variable step size (steepest descent)
print('***********************************************************************');
print('Conjugate gradient algorithm with variable step size (steepest descent)');
N_iter_max = 1000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max};
X0 = np.zeros((N_pars, 1));
start = time.time();
X, report = conjugate_gradient_algorithm(X0, func, func_grad, func_hessian, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Conjugate gradient algorithm with variable step size based on line search
print('***********************************************************************');
print('Conjugate gradient algorithm with variable step size based on line search');
N_iter_max = 1000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max};
X0 = np.zeros((N_pars, 1));
start = time.time();
X, report = conjugate_gradient_algorithm_linesearch(X0, func, func_grad, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Newton's algorithm
print('***********************************************************************');
print('Newton\'s algorithm');
N_iter_max = 1000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max};
X0 = np.zeros((N_pars, 1));
start = time.time();
X, report = newton_algorithm(X0, func, func_grad, func_hessian, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Newton's algorithm with variable step size based on line search
print('***********************************************************************');
print('Newton\'s algorithm with variable step size based on line search');
N_iter_max = 1000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max};
X0 = np.zeros((N_pars, 1));
start = time.time();
X, report = newton_algorithm_linesearch(X0, func, func_grad, func_hessian, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Quasi-Newton algorithm with variable step size (steepest descent)
print('***********************************************************************');
print('Quasi-Newton algorithm with variable step size (steepest descent)');
N_iter_max = 1000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max};
X0 = np.zeros((N_pars, 1));
start = time.time();
X, report = quasi_newton_algorithm(X0, func, func_grad, func_hessian, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Quasi-Newton algorithm with variable step size based on line search
print('***********************************************************************');
print('Quasi-Newton algorithm with variable step size based on line search');
N_iter_max = 1000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max};
X0 = np.zeros((N_pars, 1));
start = time.time();
X, report = quasi_newton_algorithm_linesearch(X0, func, func_grad, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Naive random walk algorithm
print('***********************************************************************');
print('Naive random walk algorithm');
N_iter_max = 10000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
X_lower = -1 * np.ones((N_pars, 1)); # X lower bound
X_upper = 1 * np.ones((N_pars, 1)); # X upper bound
alpha = 1.0; # step size
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max, 'x_lower' : X_lower,
'x_upper' : X_upper, 'alpha' : alpha};
X0 = X_lower + (X_upper - X_lower) * np.random.rand(X_lower.size, 1);
start = time.time();
X, report = naive_random_search_algorithm(X0, func_error, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Simulated annealing algorithm
print('***********************************************************************');
print('Simulated annealing algorithm');
N_iter_max = 10000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
X_lower = -1 * np.ones((N_pars, 1)); # X lower bound
X_upper = 1 * np.ones((N_pars, 1)); # X upper bound
alpha = 1; # step size
gamma = 1.0; # controls temperature decay, gamma > 0
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max, 'x_lower' : X_lower,
'x_upper' : X_upper, 'alpha' : alpha, 'gamma' : gamma};
X0 = X_lower + (X_upper - X_lower) * np.random.rand(X_lower.size, 1);
start = time.time();
X, report = simulated_annealing_algorithm(X0, func_error, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Particle swarm optimization algorithm
print('***********************************************************************');
print('Particle swarm optimization algorithm');
N_iter_max = 1000;
tolerance_x = 10e-8;
tolerance_y = 10e-8;
X_lower = -1 * np.ones((N_pars, 1)); # X lower bound
X_upper = 1 * np.ones((N_pars, 1)); # X upper bound
d_lower = -0.25; # direction (aka velocity) lower bound
d_upper = 0.25; # direction (aka velocity) upper bound
N_ps = 1000; # number of particles
w = 1.0; # inertial constant, w < 1
c1 = 1.0; # cognitive/independent component, c1 ~ 2
c2 = 0; # social component, c2 ~ 2
alpha = 1.0; # step size
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max, 'x_lower' : X_lower,
'x_upper' : X_upper, 'alpha' : alpha, 'd_lower' : d_lower, 'd_upper' : d_upper, 'N_ps' : N_ps, 'w' : w, 'c1' : c1, 'c2' : c2};
start = time.time();
X, report = particle_swam_optimization_algorithm(func_error_ps, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n'); | system_of_linear_equations/demo_system_of_linear_equations.py | import numpy as np
from gradient_algorithm import gradient_algorithm_var_alpha, gradient_algorithm_fixed_alpha, gradient_algorithm_linesearch
from conjugate_gradient_algorithm import conjugate_gradient_algorithm, conjugate_gradient_algorithm_linesearch
from newton_algorithm import newton_algorithm, newton_algorithm_linesearch
from quasi_newton_algorithm import quasi_newton_algorithm, quasi_newton_algorithm_linesearch
from naive_random_search_algorithm import naive_random_search_algorithm
from simulated_annealing_algorithm import simulated_annealing_algorithm
from particle_swam_optimization_algorithm import particle_swam_optimization_algorithm
from print_report import print_report
import time
reg_coef = 0.01; # regularization coefficient
# A @ X = B (matrix form)
# Minimize the following objective function:
# X = [x0, ..., xn].T;
# f(X) = norm(A * X - B)^2 = X.T * (A.T @ A) @ X - 2 * (A.T @ B).T @ X + B.T @ B + reg_coef * X.T @ X;
N_eqs = 5000; # number of equations
N_pars = 10; # number of parameters.
A = np.random.rand(N_eqs, N_pars);
B = np.random.rand(N_eqs, 1);
Q = A.T @ A;
func = lambda X : X.T @ Q @ X - 2 * (A.T @ B).T @ X + B.T @ B + reg_coef * X.T @ X;
func_grad = lambda X : 2 * Q @ X - 2 * (A.T @ B) + 2 * reg_coef * X;
func_hessian = lambda X : 2 * Q + 2 * reg_coef;
# Objective function for naive random walk and simulated annealing algorithms
func_error = lambda X : np.linalg.norm((A @ X - B + reg_coef * X.T @ X), axis = 0) ** 2;
# Objective function for particle swarm optimization algorithm (particles along row dimension, axis = 0)
def func_error_ps(X):
a = np.linalg.norm((X @ A.T - B.T + reg_coef * np.diag(X @ X.T).reshape(-1, 1)), axis = 1) ** 2;
return a.reshape(-1,1);
# Normal equation with Tikhonov regularization
print('***********************************************************************');
print('Normal equation with Tikhonov regularization');
start = time.time();
X = np.linalg.solve(Q + reg_coef * np.eye(N_pars), A.T @ B);
end = time.time();
print("Norm(X): %0.3f" % (np.linalg.norm(X)));
print('f(X): %0.3f' % (func(X)));
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Gradient algorithm with variable step size (steepest descent)
print('***********************************************************************');
print('Gradient algorithm with variable step size (steepest descent)');
N_iter_max = 1000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max};
X0 = np.zeros((N_pars, 1));
start = time.time();
X, report = gradient_algorithm_var_alpha(X0, func, func_grad, func_hessian, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Gradient algorithm with fixed step size
print('***********************************************************************');
print('Gradient algorithm with fixed step size');
N_iter_max = 1000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max};
X0 = np.zeros((N_pars, 1));
start = time.time();
X, report = gradient_algorithm_fixed_alpha(X0, func, func_grad, func_hessian, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Gradient algorithm with variable step size based on line search
print('***********************************************************************');
print('Gradient algorithm with variable step size based on line search');
N_iter_max = 1000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max};
X0 = np.zeros((N_pars, 1));
start = time.time();
X, report = gradient_algorithm_linesearch(X0, func, func_grad, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Conjugate gradient algorithm with variable step size (steepest descent)
print('***********************************************************************');
print('Conjugate gradient algorithm with variable step size (steepest descent)');
N_iter_max = 1000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max};
X0 = np.zeros((N_pars, 1));
start = time.time();
X, report = conjugate_gradient_algorithm(X0, func, func_grad, func_hessian, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Conjugate gradient algorithm with variable step size based on line search
print('***********************************************************************');
print('Conjugate gradient algorithm with variable step size based on line search');
N_iter_max = 1000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max};
X0 = np.zeros((N_pars, 1));
start = time.time();
X, report = conjugate_gradient_algorithm_linesearch(X0, func, func_grad, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Newton's algorithm
print('***********************************************************************');
print('Newton\'s algorithm');
N_iter_max = 1000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max};
X0 = np.zeros((N_pars, 1));
start = time.time();
X, report = newton_algorithm(X0, func, func_grad, func_hessian, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Newton's algorithm with variable step size based on line search
print('***********************************************************************');
print('Newton\'s algorithm with variable step size based on line search');
N_iter_max = 1000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max};
X0 = np.zeros((N_pars, 1));
start = time.time();
X, report = newton_algorithm_linesearch(X0, func, func_grad, func_hessian, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Quasi-Newton algorithm with variable step size (steepest descent)
print('***********************************************************************');
print('Quasi-Newton algorithm with variable step size (steepest descent)');
N_iter_max = 1000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max};
X0 = np.zeros((N_pars, 1));
start = time.time();
X, report = quasi_newton_algorithm(X0, func, func_grad, func_hessian, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Quasi-Newton algorithm with variable step size based on line search
print('***********************************************************************');
print('Quasi-Newton algorithm with variable step size based on line search');
N_iter_max = 1000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max};
X0 = np.zeros((N_pars, 1));
start = time.time();
X, report = quasi_newton_algorithm_linesearch(X0, func, func_grad, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Naive random walk algorithm
print('***********************************************************************');
print('Naive random walk algorithm');
N_iter_max = 10000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
X_lower = -1 * np.ones((N_pars, 1)); # X lower bound
X_upper = 1 * np.ones((N_pars, 1)); # X upper bound
alpha = 1.0; # step size
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max, 'x_lower' : X_lower,
'x_upper' : X_upper, 'alpha' : alpha};
X0 = X_lower + (X_upper - X_lower) * np.random.rand(X_lower.size, 1);
start = time.time();
X, report = naive_random_search_algorithm(X0, func_error, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Simulated annealing algorithm
print('***********************************************************************');
print('Simulated annealing algorithm');
N_iter_max = 10000;
tolerance_x = 10e-6;
tolerance_y = 10e-8;
X_lower = -1 * np.ones((N_pars, 1)); # X lower bound
X_upper = 1 * np.ones((N_pars, 1)); # X upper bound
alpha = 1; # step size
gamma = 1.0; # controls temperature decay, gamma > 0
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max, 'x_lower' : X_lower,
'x_upper' : X_upper, 'alpha' : alpha, 'gamma' : gamma};
X0 = X_lower + (X_upper - X_lower) * np.random.rand(X_lower.size, 1);
start = time.time();
X, report = simulated_annealing_algorithm(X0, func_error, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n');
# Particle swarm optimization algorithm
print('***********************************************************************');
print('Particle swarm optimization algorithm');
N_iter_max = 1000;
tolerance_x = 10e-8;
tolerance_y = 10e-8;
X_lower = -1 * np.ones((N_pars, 1)); # X lower bound
X_upper = 1 * np.ones((N_pars, 1)); # X upper bound
d_lower = -0.25; # direction (aka velocity) lower bound
d_upper = 0.25; # direction (aka velocity) upper bound
N_ps = 1000; # number of particles
w = 1.0; # inertial constant, w < 1
c1 = 1.0; # cognitive/independent component, c1 ~ 2
c2 = 0; # social component, c2 ~ 2
alpha = 1.0; # step size
options = {'tolerance_x' : tolerance_x, 'tolerance_y' : tolerance_y, 'N_iter_max' : N_iter_max, 'x_lower' : X_lower,
'x_upper' : X_upper, 'alpha' : alpha, 'd_lower' : d_lower, 'd_upper' : d_upper, 'N_ps' : N_ps, 'w' : w, 'c1' : c1, 'c2' : c2};
start = time.time();
X, report = particle_swam_optimization_algorithm(func_error_ps, options);
end = time.time();
print_report(func, report);
print('Elapsed time [s]: %0.5f' % (end - start));
print('***********************************************************************\n'); | 0.652574 | 0.588121 |
import warnings
from functools import partial
import onnx
from onnx import numpy_helper
import tensorflow as tf
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
import numpy as np
from tensorflow.python.ops.image_ops_impl import ResizeMethodV1
class Operations:
def make_op(self, op_type, inputs, attrs):
# print(op_type)
# print([i.shape for i in inputs])
# print(attrs)
# print()
return getattr(self, 'op_' + op_type.lower())(*inputs, **attrs)
class DataFormat: pass
class OnnxTensor(DataFormat): pass
class OnnxConstant(OnnxTensor): pass
class InterleavedImageBatch(DataFormat): pass
class OptimizationMissingWarning(Warning): pass
def ensure_data_format(tensor, format):
if issubclass(tensor.data_format, format):
return tensor
elif tensor.data_format is OnnxConstant and format is InterleavedImageBatch:
assert len(tensor.shape) == 4
out = tensor.transpose([0, 2, 3, 1])
out.data_format = InterleavedImageBatch
return out
elif tensor.data_format is OnnxTensor and format is InterleavedImageBatch:
assert len(tensor.shape) == 4
n, c, h, w = tensor.shape
if h == w == 1 or c == 1:
out = tf.reshape(tensor, [n, h, w, c])
else:
out = tf.transpose(tensor, [0, 2, 3, 1])
warnings.warn("Transpose inserted. Please report at https://github.com/AxisCommunications/onnx-to-keras/issues", OptimizationMissingWarning)
out.data_format = InterleavedImageBatch
return out
elif tensor.data_format is InterleavedImageBatch and format is OnnxTensor:
assert len(tensor.shape) == 4
n, h, w, c = tensor.shape
if h == w == 1 or c == 1:
out = tf.reshape(tensor, [n, c, h, w])
else:
out = tf.transpose(tensor, [0, 3, 1, 2])
warnings.warn("Transpose inserted. Please report at https://github.com/AxisCommunications/onnx-to-keras/issues", OptimizationMissingWarning)
out.data_format = OnnxTensor
return out
else:
raise NotImplementedError
def compatible_data_format(format1, format2):
return issubclass(format1, format2) or issubclass(format2, format1)
def ensure_compatible_data_format(a, b):
if compatible_data_format(a.data_format, b.data_format):
return a, b
if b.data_format is OnnxConstant:
return a, ensure_data_format(b, a.data_format)
return ensure_data_format(a, b.data_format), b
class Constant(np.ndarray):
data_format = OnnxConstant
class TfKerasOperations(Operations):
keras = tf.keras
def parse_attr(self, a):
if a.type == onnx.AttributeProto.INT:
return a.i
elif a.type == onnx.AttributeProto.INTS:
return tuple(a.ints)
elif a.type == onnx.AttributeProto.FLOAT:
return a.f
elif a.type == onnx.AttributeProto.STRING:
return a.s
elif a.type == onnx.AttributeProto.TENSOR:
return self.make_constant(numpy_helper.to_array(a.t))
else:
raise NotImplementedError
def make_constant(self, x):
return np.asarray(x).view(Constant)
def make_input(self, shape, dtype, name=None):
dtype = tf.as_dtype(dtype)
# XXX: Assumes all inputs are image batches that we want to transpose
assert len(shape) == 4
tensor = tf.keras.layers.Input((shape[2], shape[3], shape[1]), shape[0], name, dtype)
tensor.data_format = InterleavedImageBatch
return tensor
def op_conv(self, x, weights, bias=None, kernel_shape=None, strides=None, pads=None, dilations=None, group=None):
# Torch: (out_channels, in_channels, kH, kW)
weights = ensure_data_format(weights, OnnxConstant) # XXX Assumes no ops on weights
if len(kernel_shape) == 2:
x = ensure_data_format(x, InterleavedImageBatch)
assert kernel_shape == weights.shape[2:4]
if group == 1:
# Tf; filter_height, filter_width, in_channels, out_channels
weights = weights.transpose(2, 3, 1, 0)
filters = weights.shape[3]
ConvClass = self.keras.layers.Conv2D
elif group == x.shape[3]:
# Tf; filter_height, filter_width, out_channels, in_channels
weights = weights.transpose(2, 3, 0, 1)
filters = weights.shape[2]
def ConvClass(filters, kernel_size, strides, dilation_rate, padding,
kernel_initializer, use_bias=True, bias_initializer='zeros'):
return self.keras.layers.DepthwiseConv2D(kernel_size, strides, dilation_rate=dilation_rate,
padding=padding, use_bias=use_bias,
bias_initializer=bias_initializer,
depthwise_initializer=kernel_initializer)
else:
raise NotImplementedError
if pads == (0,0,0,0):
padding = 'valid'
elif (kernel_shape[0] == kernel_shape[1] and pads[0] == pads[1] == pads[2] == pads[3] and
pads[0] * 2 + 1 == kernel_shape[0] and strides == (1, 1) and dilations == (1, 1)):
padding = 'same'
elif (kernel_shape == (3, 3) and pads == (1,1,1,1) and strides == (2,2) and dilations == (1, 1) and
x.shape[1] % 2 == 1 and x.shape[2] % 2 == 1):
padding = 'same'
else:
# ((top_pad, bottom_pad), (left_pad, right_pad))
pad = self.keras.layers.ZeroPadding2D(((pads[0], pads[2]), (pads[1], pads[3])))
x = pad(x)
padding = 'valid'
if bias is None:
conv = ConvClass(filters, kernel_shape, strides,
dilation_rate=dilations, padding=padding,
kernel_initializer='zeros', use_bias=False)
out = conv(x)
conv.set_weights([weights.view(np.ndarray)])
else:
bias = ensure_data_format(bias, OnnxConstant) # XXX Assumes no ops on weights
conv = ConvClass(filters, kernel_shape, strides,
dilation_rate=dilations, padding=padding,
kernel_initializer='zeros', bias_initializer='zeros')
out = conv(x)
conv.set_weights([weights.view(np.ndarray), bias.view(np.ndarray)])
out.data_format = InterleavedImageBatch
return [out]
else:
raise NotImplementedError
def op_relu(self, x):
out = self.keras.layers.ReLU()(x)
out.data_format = x.data_format
return [out]
def op_leakyrelu(self, x, alpha):
out = self.keras.layers.LeakyReLU(alpha=alpha)(x)
out.data_format = x.data_format
return [out]
def op_sigmoid(self, x):
out = self.keras.activations.sigmoid(x)
out.data_format = x.data_format
return [out]
def op_softmax(self, x, axis):
out = self.keras.activations.softmax(x, axis=axis)
out.data_format = x.data_format
return [out]
def op_prelu(self, x, alpha):
alpha = ensure_data_format(alpha, OnnxConstant) # XXX Assumes no ops on alpha
if len(alpha) == 1:
shared = list(range(1, len(x.shape)))
alpha = alpha.reshape((1,) * (len(x.shape) - 1))
elif len(alpha) == x.shape[-1]:
shared = list(range(1, len(x.shape) - 1))
else:
raise NotImplementedError
alpha_initializer = self.keras.initializers.Constant(alpha.view(np.ndarray))
out = self.keras.layers.PReLU(shared_axes=shared, alpha_initializer=alpha_initializer)(x)
out.data_format = x.data_format
return [out]
def op_maxpool(self, x, kernel_shape, pads, strides, ceil_mode=0):
assert ceil_mode == 0
if len(kernel_shape) == 2:
x = ensure_data_format(x, InterleavedImageBatch)
if pads == (0, 0, 0, 0):
padding = 'valid'
else:
# ((top_pad, bottom_pad), (left_pad, right_pad))
pad = self.keras.layers.ZeroPadding2D(((pads[0], pads[2]), (pads[1], pads[3])))
x = pad(x)
padding = 'valid'
out = self.keras.layers.MaxPool2D(kernel_shape, strides, padding)(x)
out.data_format = InterleavedImageBatch
return [out]
else:
raise NotImplementedError
def op_concat(self, *tensors, axis):
if all(t.data_format is InterleavedImageBatch for t in tensors):
axis = (0, 3, 1, 2)[axis]
out = self.keras.layers.Concatenate(axis)(list(tensors))
out.data_format = InterleavedImageBatch
elif all(t.data_format is OnnxConstant for t in tensors):
out = self.make_constant(np.concatenate(tensors, axis))
else:
raise NotImplementedError
return [out]
def op_convtranspose(self, x, weights, bias=None, kernel_shape=None, strides=None, pads=None, dilations=None,
group=None, output_padding=(0, 0)):
assert kernel_shape is not None
assert strides is not None
assert pads is not None
assert dilations is not None
assert group is not None
weights = ensure_data_format(weights, OnnxConstant) # XXX Assumes no ops on weights
if bias is None:
use_bias = False
bias_initializer = None
else:
bias = ensure_data_format(bias, OnnxConstant) # XXX Assumes no ops on weights
use_bias = True
if len(kernel_shape) == 2:
x = ensure_data_format(x, InterleavedImageBatch)
assert kernel_shape == weights.shape[2:4]
_, h_in, w_in, _ = x.shape
h_out = (h_in - 1) * strides[0] - 2 * pads[0] + dilations[0] * (kernel_shape[0] - 1) + 1 + output_padding[0]
w_out=(w_in - 1) * strides[1] - 2 * pads[1] + dilations[1] * (kernel_shape[1] - 1) + 1 + output_padding[1]
if pads == (0,0,0,0):
padding = 'valid'
elif h_out == strides[0] * h_in and w_out == strides[1] * w_in and output_padding==(0,0):
padding = 'same'
output_padding = None # output_padding overrides the padding argument in keras
else:
raise NotImplementedError
# Tf; filter_height, filter_width, out_channels, in_channels
# Torch: (in_channels, out_channels, kH, kW)
weights = weights.transpose(2, 3, 1, 0)
filters = weights.shape[2]
if group == 1:
conv = self.keras.layers.Conv2DTranspose(filters, kernel_shape, strides,
dilation_rate=dilations, padding=padding,
kernel_initializer='zeros',
use_bias=use_bias, bias_initializer='zeros',
output_padding=output_padding)
out = conv(x)
if use_bias:
conv.set_weights([weights.view(np.ndarray), bias.view(np.ndarray)])
else:
conv.set_weights([weights.view(np.ndarray)])
else:
splits = tf.split(x, group, axis=-1)
convolved_splits = []
n = weights.shape[3] // group
assert group * n == weights.shape[3]
for i, split in enumerate(splits):
conv = self.keras.layers.Conv2DTranspose(filters, kernel_shape, strides,
dilation_rate=dilations, padding=padding,
kernel_initializer='zeros',
use_bias=use_bias, bias_initializer='zeros',
output_padding=output_padding)
convolved_splits.append(conv(split))
grouped_weights = weights[:, :, :, i*n:(i+1)*n]
if use_bias:
grouped_bias = bias[i*n:(i+1)*n]
conv.set_weights([grouped_weights.view(np.ndarray), grouped_bias.view(np.ndarray)])
else:
conv.set_weights([grouped_weights.view(np.ndarray)])
out = tf.concat(convolved_splits, -1)
assert out.shape[1] == h_out
assert out.shape[2] == w_out
out.data_format = InterleavedImageBatch
return [out]
else:
raise NotImplementedError
def op_batchnormalization(self, x, weight, bias, running_mean, running_var, momentum, epsilon):
if len(x.shape) != 4:
raise NotImplementedError
norm = self.keras.layers.BatchNormalization(momentum=momentum, epsilon=epsilon)
out = norm(x)
norm.set_weights([weight.view(np.ndarray), bias.view(np.ndarray),
running_mean.view(np.ndarray), running_var.view(np.ndarray)])
out.data_format = x.data_format
return [out]
def op_unsqueeze(self, x, axes):
x = ensure_data_format(x, OnnxTensor)
out = x
if isinstance(x, Constant):
for ax in sorted(axes):
out = np.expand_dims(out, ax).view(Constant)
out.data_format = x.data_format
else:
for ax in sorted(axes):
out = self.keras.backend.expand_dims(out, ax)
out.data_format = OnnxTensor
return [out]
def op_clip(self, x, min, max):
if min == 0:
out = self.keras.layers.ReLU(max)(x)
else:
out = self.keras.backend.clip(x, min, max)
out.data_format = x.data_format
return [out]
def op_add(self, x1, x2):
x1, x2 = ensure_compatible_data_format(x1, x2)
out = self.keras.layers.Add()([x1, x2])
out.data_format = x1.data_format
return [out]
def op_sub(self, x1, x2):
x1, x2 = ensure_compatible_data_format(x1, x2)
out = self.keras.layers.Subtract()([x1, x2])
out.data_format = x1.data_format
return [out]
def op_reducemean(self, x, axes, keepdims):
x = ensure_data_format(x, InterleavedImageBatch)
if axes == (2, 3) and keepdims == 0:
out = self.keras.layers.GlobalAveragePooling2D()(x)
out.data_format = OnnxTensor
else:
raise NotImplementedError
return [out]
def op_gemm(self, x, weights, bias, beta, transB, alpha):
x = ensure_data_format(x, OnnxTensor)
if beta == 1.0 and transB == 1 and alpha == 1.0:
out = self.keras.layers.Dense(weights.shape[0], kernel_initializer='zeros',
bias_initializer='zeros',
weights=[weights.view(np.ndarray).T, bias.view(np.ndarray)])(x)
out.data_format = OnnxTensor
else:
raise NotImplementedError
return [out]
def op_pad(self, x, pads, mode, value=0.0):
x = ensure_data_format(x, InterleavedImageBatch)
if mode == b'constant' and len(pads) == 8:
assert len(x.shape) * 2 == len(pads)
if pads[0] == pads[1] == pads[4] == pads[5] == 0:
# ((top_pad, bottom_pad), (left_pad, right_pad))
if value == 0.0:
paddings = ((pads[2], pads[6]), (pads[3], pads[7]))
out = self.keras.layers.ZeroPadding2D(paddings)(x)
else:
paddings = ((0,0), (pads[2], pads[6]), (pads[3], pads[7]), (0,0))
out = tf.pad(x, paddings, constant_values=value)
else:
raise NotImplementedError
else:
raise NotImplementedError
out.data_format = InterleavedImageBatch
return [out]
def op_averagepool(self, x, kernel_shape, pads, strides, ceil_mode=0):
x = ensure_data_format(x, InterleavedImageBatch)
assert ceil_mode == 0
if len(x.shape) == 4:
if pads == (0,0,0,0):
padding = 'valid'
else:
raise NotImplementedError
out = self.keras.layers.AveragePooling2D(kernel_shape, strides, padding)(x)
else:
raise NotImplementedError
out.data_format = InterleavedImageBatch
return [out]
def op_globalaveragepool(self, x):
x = ensure_data_format(x, InterleavedImageBatch)
if len(x.shape) == 4:
out = self.keras.backend.mean(x, axis=[1, 2], keepdims=True)
else:
raise NotImplementedError
out.data_format = InterleavedImageBatch
return [out]
def op_flatten(self, x, axis):
if axis == 1 and len(x.shape) == 4 and x.shape[1] == 1 and x.shape[2] == 1:
out = self.keras.layers.Flatten()(x)
else:
raise NotImplementedError
out.data_format = OnnxTensor
return [out]
def op_slice(self, x, starts, ends, axes=None, steps=None):
if axes is None:
axes = range(len(starts))
if steps is None:
steps = [1] * len(starts)
if x.data_format is OnnxConstant:
if axes != (0,):
raise NotImplementedError
out = self.make_constant(x[starts[0]:ends[0]:steps[0]])
else:
x = ensure_data_format(x, InterleavedImageBatch)
if len(x.shape) != 4:
raise NotImplementedError
if len(axes) == 1 and starts[0] != ends[0]:
if axes[0] == 0:
out = x[starts[0]:ends[0]:steps[0],:,:,:]
elif axes[0] == 1:
out = x[:,:,:,starts[0]:ends[0]:steps[0]]
elif axes[0] == 2:
out = x[:,starts[0]:ends[0]:steps[0],:,:]
elif axes[0] == 3:
out = x[:,:,starts[0]:ends[0]:steps[0],:]
else:
raise NotImplementedError
elif tuple(axes) == (2,3) and starts[0] != ends[0] and starts[1] != ends[1]:
out = x[:,starts[0]:ends[0]:steps[0],starts[1]:ends[1]:steps[1],:]
else:
raise NotImplementedError
out.data_format = InterleavedImageBatch
return [out]
def op_constant(self, value):
out = value
out.data_format = OnnxConstant
return [out]
def op_shape(self, x):
shape = list(map(int, x.shape))
if x.data_format is InterleavedImageBatch:
n, h, w, f = shape
shape = [n, f, h, w]
return [self.make_constant(shape)]
def op_gather(self, x, indices, axis=0):
x = ensure_data_format(x, OnnxConstant)
if axis == 0:
return [self.make_constant(x[indices])]
else:
raise NotImplementedError
def op_cast(self, x, to):
dtype = {
0: None, # UNDEFINED
1: np.float,
2: np.uint8,
3: np.int8,
4: np.uint16,
5: np.int16,
6: np.int32,
7: np.int64,
8: str,
9: np.bool,
10: np.float16,
11: np.double,
12: np.uint32,
13: np.uint64,
14: np.complex64,
15: np.complex128,
# // Non-IEEE floating-point format based on IEEE754 single-precision
# // floating-point number truncated to 16 bits.
# // This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits.
#BFLOAT16 = 16;
}[to]
if x.data_format is OnnxConstant:
return [self.make_constant(x.astype(dtype))]
else:
out = self.keras.backend.cast(x, dtype)
out.data_format = x.data_format
return [out]
def op_mul(self, a, b):
if b.shape == ():
a, b = b, a
if a.shape == ():
out = a * b
out.data_format = b.data_format
return [out]
a, b = ensure_compatible_data_format(a, b)
if a.data_format is OnnxConstant:
return [self.make_constant(a * b)]
else:
out = tf.keras.layers.Multiply()([a, b])
out.data_format = a.data_format
return [out]
def op_floor(self, x):
x = ensure_data_format(x, OnnxConstant)
return [self.make_constant(np.floor(x))]
def op_div(self, a, b):
a = ensure_data_format(a, OnnxConstant)
b = ensure_data_format(b, OnnxConstant)
return [self.make_constant(a / b)]
def op_upsample(self, x, scales, mode=b'nearest'):
if mode == b'nearest':
return self.op_resize(x, None, scales, coordinate_transformation_mode=b'asymmetric', nearest_mode=b'floor')
if mode == b'linear':
return self.op_resize(x, None, scales, coordinate_transformation_mode=b'align_corners', mode=b'linear', nearest_mode=b'floor')
raise NotImplementedError
def op_resize(self, x, roi, scales=None, sizes=None, *,
coordinate_transformation_mode=b"half_pixel", cubic_coeff_a=-0.75, exclude_outside=0,
extrapolation_value=0.0, mode=b"nearest", nearest_mode=b"round_prefer_floor"):
assert cubic_coeff_a == -0.75
assert exclude_outside == 0
assert extrapolation_value == 0.0
x = ensure_data_format(x, InterleavedImageBatch)
if sizes is None:
assert scales[0] == scales[1] == 1
size = [int(x.shape[1] * scales[2]), int(x.shape[2] * scales[3])]
else:
assert sizes[0] == x.shape[0]
assert sizes[1] == x.shape[3]
size = sizes[2:4]
if mode == b'nearest' and coordinate_transformation_mode == b'asymmetric' and nearest_mode==b'floor':
out = tf.compat.v1.image.resize(x, size, ResizeMethodV1.NEAREST_NEIGHBOR)
elif mode == b'linear' and coordinate_transformation_mode == b'align_corners':
out = tf.compat.v1.image.resize(x, size, ResizeMethodV1.BILINEAR, align_corners=True)
else:
raise NotImplementedError
out.data_format = InterleavedImageBatch
return [out]
def op_equal(self, x, y):
x, y = ensure_compatible_data_format(x, y)
out = self.keras.backend.equal(x, y)
out.data_format = x.data_format
return [out]
def op_reshape(self, x, shape):
x = ensure_data_format(x, OnnxTensor)
assert x.shape[0] == shape[0]
out = self.keras.layers.Reshape(shape[1:])(x)
out.data_format = OnnxTensor
return [out]
def op_transpose(self, x, perm):
x = ensure_data_format(x, OnnxConstant)
x = x.transpose(perm)
x.data_format = OnnxConstant
return [x]
def op_matmul(self, x1, x2):
x1 = ensure_data_format(x1, OnnxTensor)
x2 = ensure_data_format(x2, OnnxTensor)
if x1.data_format is OnnxConstant:
x1 = tf.convert_to_tensor(x1)
if x2.data_format is OnnxConstant:
x2 = tf.convert_to_tensor(x2)
if len(x1.shape) == 2:
assert len(x2.shape) == 2
out = self.keras.backend.dot(x1, x2)
elif len(x1.shape) == 3:
assert len(x2.shape) == 3
assert x1.shape[0] == x2.shape[0] == 1
out = self.keras.backend.dot(x1, x2)
out = tf.reshape(out, (1, out.shape[1], out.shape[3]))
elif len(x1.shape) == 4:
assert len(x2.shape) == 4
assert x1.shape[0] == x2.shape[0] == 1
assert x1.shape[1] == x2.shape[1] == 1
out = self.keras.backend.dot(x1, x2)
out = tf.reshape(out, (1, 1, out.shape[2], out.shape[5]))
else:
raise NotImplementedError
out.data_format = OnnxTensor
return [out]
def op_sqrt(self, x):
out = self.keras.backend.sqrt(x)
out.data_format = x.data_format
return [out]
def op_abs(self, x):
out = self.keras.backend.abs(x)
out.data_format = x.data_format
return [out]
def op_neg(self, x):
out = -x
out.data_format = x.data_format
return [out]
def onnx2keras(onnx_model):
tensors = {}
ops = TfKerasOperations()
for init in onnx_model.graph.initializer:
tensors[init.name] = ops.make_constant(numpy_helper.to_array(init))
model_inputs = []
for input in onnx_model.graph.input:
if input.name in tensors:
continue
shape = [d.dim_value if (d.dim_value > 0 and d.dim_param == "") else None
for d in input.type.tensor_type.shape.dim]
dtype = TENSOR_TYPE_TO_NP_TYPE[input.type.tensor_type.elem_type]
tensors[input.name] = ops.make_input(shape, dtype, input.name)
model_inputs.append(tensors[input.name])
for node in onnx_model.graph.node:
inputs = [tensors[i] for i in node.input]
attrs = {a.name: ops.parse_attr(a) for a in node.attribute}
output_tensors = ops.make_op(node.op_type, inputs, attrs)
assert len(output_tensors) == len(node.output)
for n, t in zip(node.output, output_tensors):
tensors[n] = t
outputs = [tensors[o.name] for o in onnx_model.graph.output]
return tf.keras.models.Model(model_inputs, outputs)
def main(infile, outfile=None, export_saved_model=False):
if outfile is None:
outfile = infile[:-5] if infile[-5:] == '.onnx' else infile
outfile += '.h5'
model = onnx2keras(onnx.load(infile))
if export_saved_model:
import tensorflow.compat.v1 as tf_v1
tf_v1.keras.experimental.export_saved_model(model, export_saved_model)
else:
model.save(outfile)
if __name__ == '__main__':
from fire import Fire
Fire(main) | onnx2keras.py | import warnings
from functools import partial
import onnx
from onnx import numpy_helper
import tensorflow as tf
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
import numpy as np
from tensorflow.python.ops.image_ops_impl import ResizeMethodV1
class Operations:
def make_op(self, op_type, inputs, attrs):
# print(op_type)
# print([i.shape for i in inputs])
# print(attrs)
# print()
return getattr(self, 'op_' + op_type.lower())(*inputs, **attrs)
class DataFormat: pass
class OnnxTensor(DataFormat): pass
class OnnxConstant(OnnxTensor): pass
class InterleavedImageBatch(DataFormat): pass
class OptimizationMissingWarning(Warning): pass
def ensure_data_format(tensor, format):
if issubclass(tensor.data_format, format):
return tensor
elif tensor.data_format is OnnxConstant and format is InterleavedImageBatch:
assert len(tensor.shape) == 4
out = tensor.transpose([0, 2, 3, 1])
out.data_format = InterleavedImageBatch
return out
elif tensor.data_format is OnnxTensor and format is InterleavedImageBatch:
assert len(tensor.shape) == 4
n, c, h, w = tensor.shape
if h == w == 1 or c == 1:
out = tf.reshape(tensor, [n, h, w, c])
else:
out = tf.transpose(tensor, [0, 2, 3, 1])
warnings.warn("Transpose inserted. Please report at https://github.com/AxisCommunications/onnx-to-keras/issues", OptimizationMissingWarning)
out.data_format = InterleavedImageBatch
return out
elif tensor.data_format is InterleavedImageBatch and format is OnnxTensor:
assert len(tensor.shape) == 4
n, h, w, c = tensor.shape
if h == w == 1 or c == 1:
out = tf.reshape(tensor, [n, c, h, w])
else:
out = tf.transpose(tensor, [0, 3, 1, 2])
warnings.warn("Transpose inserted. Please report at https://github.com/AxisCommunications/onnx-to-keras/issues", OptimizationMissingWarning)
out.data_format = OnnxTensor
return out
else:
raise NotImplementedError
def compatible_data_format(format1, format2):
return issubclass(format1, format2) or issubclass(format2, format1)
def ensure_compatible_data_format(a, b):
if compatible_data_format(a.data_format, b.data_format):
return a, b
if b.data_format is OnnxConstant:
return a, ensure_data_format(b, a.data_format)
return ensure_data_format(a, b.data_format), b
class Constant(np.ndarray):
data_format = OnnxConstant
class TfKerasOperations(Operations):
keras = tf.keras
def parse_attr(self, a):
if a.type == onnx.AttributeProto.INT:
return a.i
elif a.type == onnx.AttributeProto.INTS:
return tuple(a.ints)
elif a.type == onnx.AttributeProto.FLOAT:
return a.f
elif a.type == onnx.AttributeProto.STRING:
return a.s
elif a.type == onnx.AttributeProto.TENSOR:
return self.make_constant(numpy_helper.to_array(a.t))
else:
raise NotImplementedError
def make_constant(self, x):
return np.asarray(x).view(Constant)
def make_input(self, shape, dtype, name=None):
dtype = tf.as_dtype(dtype)
# XXX: Assumes all inputs are image batches that we want to transpose
assert len(shape) == 4
tensor = tf.keras.layers.Input((shape[2], shape[3], shape[1]), shape[0], name, dtype)
tensor.data_format = InterleavedImageBatch
return tensor
def op_conv(self, x, weights, bias=None, kernel_shape=None, strides=None, pads=None, dilations=None, group=None):
# Torch: (out_channels, in_channels, kH, kW)
weights = ensure_data_format(weights, OnnxConstant) # XXX Assumes no ops on weights
if len(kernel_shape) == 2:
x = ensure_data_format(x, InterleavedImageBatch)
assert kernel_shape == weights.shape[2:4]
if group == 1:
# Tf; filter_height, filter_width, in_channels, out_channels
weights = weights.transpose(2, 3, 1, 0)
filters = weights.shape[3]
ConvClass = self.keras.layers.Conv2D
elif group == x.shape[3]:
# Tf; filter_height, filter_width, out_channels, in_channels
weights = weights.transpose(2, 3, 0, 1)
filters = weights.shape[2]
def ConvClass(filters, kernel_size, strides, dilation_rate, padding,
kernel_initializer, use_bias=True, bias_initializer='zeros'):
return self.keras.layers.DepthwiseConv2D(kernel_size, strides, dilation_rate=dilation_rate,
padding=padding, use_bias=use_bias,
bias_initializer=bias_initializer,
depthwise_initializer=kernel_initializer)
else:
raise NotImplementedError
if pads == (0,0,0,0):
padding = 'valid'
elif (kernel_shape[0] == kernel_shape[1] and pads[0] == pads[1] == pads[2] == pads[3] and
pads[0] * 2 + 1 == kernel_shape[0] and strides == (1, 1) and dilations == (1, 1)):
padding = 'same'
elif (kernel_shape == (3, 3) and pads == (1,1,1,1) and strides == (2,2) and dilations == (1, 1) and
x.shape[1] % 2 == 1 and x.shape[2] % 2 == 1):
padding = 'same'
else:
# ((top_pad, bottom_pad), (left_pad, right_pad))
pad = self.keras.layers.ZeroPadding2D(((pads[0], pads[2]), (pads[1], pads[3])))
x = pad(x)
padding = 'valid'
if bias is None:
conv = ConvClass(filters, kernel_shape, strides,
dilation_rate=dilations, padding=padding,
kernel_initializer='zeros', use_bias=False)
out = conv(x)
conv.set_weights([weights.view(np.ndarray)])
else:
bias = ensure_data_format(bias, OnnxConstant) # XXX Assumes no ops on weights
conv = ConvClass(filters, kernel_shape, strides,
dilation_rate=dilations, padding=padding,
kernel_initializer='zeros', bias_initializer='zeros')
out = conv(x)
conv.set_weights([weights.view(np.ndarray), bias.view(np.ndarray)])
out.data_format = InterleavedImageBatch
return [out]
else:
raise NotImplementedError
def op_relu(self, x):
out = self.keras.layers.ReLU()(x)
out.data_format = x.data_format
return [out]
def op_leakyrelu(self, x, alpha):
out = self.keras.layers.LeakyReLU(alpha=alpha)(x)
out.data_format = x.data_format
return [out]
def op_sigmoid(self, x):
out = self.keras.activations.sigmoid(x)
out.data_format = x.data_format
return [out]
def op_softmax(self, x, axis):
out = self.keras.activations.softmax(x, axis=axis)
out.data_format = x.data_format
return [out]
def op_prelu(self, x, alpha):
alpha = ensure_data_format(alpha, OnnxConstant) # XXX Assumes no ops on alpha
if len(alpha) == 1:
shared = list(range(1, len(x.shape)))
alpha = alpha.reshape((1,) * (len(x.shape) - 1))
elif len(alpha) == x.shape[-1]:
shared = list(range(1, len(x.shape) - 1))
else:
raise NotImplementedError
alpha_initializer = self.keras.initializers.Constant(alpha.view(np.ndarray))
out = self.keras.layers.PReLU(shared_axes=shared, alpha_initializer=alpha_initializer)(x)
out.data_format = x.data_format
return [out]
def op_maxpool(self, x, kernel_shape, pads, strides, ceil_mode=0):
assert ceil_mode == 0
if len(kernel_shape) == 2:
x = ensure_data_format(x, InterleavedImageBatch)
if pads == (0, 0, 0, 0):
padding = 'valid'
else:
# ((top_pad, bottom_pad), (left_pad, right_pad))
pad = self.keras.layers.ZeroPadding2D(((pads[0], pads[2]), (pads[1], pads[3])))
x = pad(x)
padding = 'valid'
out = self.keras.layers.MaxPool2D(kernel_shape, strides, padding)(x)
out.data_format = InterleavedImageBatch
return [out]
else:
raise NotImplementedError
def op_concat(self, *tensors, axis):
if all(t.data_format is InterleavedImageBatch for t in tensors):
axis = (0, 3, 1, 2)[axis]
out = self.keras.layers.Concatenate(axis)(list(tensors))
out.data_format = InterleavedImageBatch
elif all(t.data_format is OnnxConstant for t in tensors):
out = self.make_constant(np.concatenate(tensors, axis))
else:
raise NotImplementedError
return [out]
def op_convtranspose(self, x, weights, bias=None, kernel_shape=None, strides=None, pads=None, dilations=None,
group=None, output_padding=(0, 0)):
assert kernel_shape is not None
assert strides is not None
assert pads is not None
assert dilations is not None
assert group is not None
weights = ensure_data_format(weights, OnnxConstant) # XXX Assumes no ops on weights
if bias is None:
use_bias = False
bias_initializer = None
else:
bias = ensure_data_format(bias, OnnxConstant) # XXX Assumes no ops on weights
use_bias = True
if len(kernel_shape) == 2:
x = ensure_data_format(x, InterleavedImageBatch)
assert kernel_shape == weights.shape[2:4]
_, h_in, w_in, _ = x.shape
h_out = (h_in - 1) * strides[0] - 2 * pads[0] + dilations[0] * (kernel_shape[0] - 1) + 1 + output_padding[0]
w_out=(w_in - 1) * strides[1] - 2 * pads[1] + dilations[1] * (kernel_shape[1] - 1) + 1 + output_padding[1]
if pads == (0,0,0,0):
padding = 'valid'
elif h_out == strides[0] * h_in and w_out == strides[1] * w_in and output_padding==(0,0):
padding = 'same'
output_padding = None # output_padding overrides the padding argument in keras
else:
raise NotImplementedError
# Tf; filter_height, filter_width, out_channels, in_channels
# Torch: (in_channels, out_channels, kH, kW)
weights = weights.transpose(2, 3, 1, 0)
filters = weights.shape[2]
if group == 1:
conv = self.keras.layers.Conv2DTranspose(filters, kernel_shape, strides,
dilation_rate=dilations, padding=padding,
kernel_initializer='zeros',
use_bias=use_bias, bias_initializer='zeros',
output_padding=output_padding)
out = conv(x)
if use_bias:
conv.set_weights([weights.view(np.ndarray), bias.view(np.ndarray)])
else:
conv.set_weights([weights.view(np.ndarray)])
else:
splits = tf.split(x, group, axis=-1)
convolved_splits = []
n = weights.shape[3] // group
assert group * n == weights.shape[3]
for i, split in enumerate(splits):
conv = self.keras.layers.Conv2DTranspose(filters, kernel_shape, strides,
dilation_rate=dilations, padding=padding,
kernel_initializer='zeros',
use_bias=use_bias, bias_initializer='zeros',
output_padding=output_padding)
convolved_splits.append(conv(split))
grouped_weights = weights[:, :, :, i*n:(i+1)*n]
if use_bias:
grouped_bias = bias[i*n:(i+1)*n]
conv.set_weights([grouped_weights.view(np.ndarray), grouped_bias.view(np.ndarray)])
else:
conv.set_weights([grouped_weights.view(np.ndarray)])
out = tf.concat(convolved_splits, -1)
assert out.shape[1] == h_out
assert out.shape[2] == w_out
out.data_format = InterleavedImageBatch
return [out]
else:
raise NotImplementedError
def op_batchnormalization(self, x, weight, bias, running_mean, running_var, momentum, epsilon):
if len(x.shape) != 4:
raise NotImplementedError
norm = self.keras.layers.BatchNormalization(momentum=momentum, epsilon=epsilon)
out = norm(x)
norm.set_weights([weight.view(np.ndarray), bias.view(np.ndarray),
running_mean.view(np.ndarray), running_var.view(np.ndarray)])
out.data_format = x.data_format
return [out]
def op_unsqueeze(self, x, axes):
x = ensure_data_format(x, OnnxTensor)
out = x
if isinstance(x, Constant):
for ax in sorted(axes):
out = np.expand_dims(out, ax).view(Constant)
out.data_format = x.data_format
else:
for ax in sorted(axes):
out = self.keras.backend.expand_dims(out, ax)
out.data_format = OnnxTensor
return [out]
def op_clip(self, x, min, max):
if min == 0:
out = self.keras.layers.ReLU(max)(x)
else:
out = self.keras.backend.clip(x, min, max)
out.data_format = x.data_format
return [out]
def op_add(self, x1, x2):
x1, x2 = ensure_compatible_data_format(x1, x2)
out = self.keras.layers.Add()([x1, x2])
out.data_format = x1.data_format
return [out]
def op_sub(self, x1, x2):
x1, x2 = ensure_compatible_data_format(x1, x2)
out = self.keras.layers.Subtract()([x1, x2])
out.data_format = x1.data_format
return [out]
def op_reducemean(self, x, axes, keepdims):
x = ensure_data_format(x, InterleavedImageBatch)
if axes == (2, 3) and keepdims == 0:
out = self.keras.layers.GlobalAveragePooling2D()(x)
out.data_format = OnnxTensor
else:
raise NotImplementedError
return [out]
def op_gemm(self, x, weights, bias, beta, transB, alpha):
x = ensure_data_format(x, OnnxTensor)
if beta == 1.0 and transB == 1 and alpha == 1.0:
out = self.keras.layers.Dense(weights.shape[0], kernel_initializer='zeros',
bias_initializer='zeros',
weights=[weights.view(np.ndarray).T, bias.view(np.ndarray)])(x)
out.data_format = OnnxTensor
else:
raise NotImplementedError
return [out]
def op_pad(self, x, pads, mode, value=0.0):
x = ensure_data_format(x, InterleavedImageBatch)
if mode == b'constant' and len(pads) == 8:
assert len(x.shape) * 2 == len(pads)
if pads[0] == pads[1] == pads[4] == pads[5] == 0:
# ((top_pad, bottom_pad), (left_pad, right_pad))
if value == 0.0:
paddings = ((pads[2], pads[6]), (pads[3], pads[7]))
out = self.keras.layers.ZeroPadding2D(paddings)(x)
else:
paddings = ((0,0), (pads[2], pads[6]), (pads[3], pads[7]), (0,0))
out = tf.pad(x, paddings, constant_values=value)
else:
raise NotImplementedError
else:
raise NotImplementedError
out.data_format = InterleavedImageBatch
return [out]
def op_averagepool(self, x, kernel_shape, pads, strides, ceil_mode=0):
x = ensure_data_format(x, InterleavedImageBatch)
assert ceil_mode == 0
if len(x.shape) == 4:
if pads == (0,0,0,0):
padding = 'valid'
else:
raise NotImplementedError
out = self.keras.layers.AveragePooling2D(kernel_shape, strides, padding)(x)
else:
raise NotImplementedError
out.data_format = InterleavedImageBatch
return [out]
def op_globalaveragepool(self, x):
x = ensure_data_format(x, InterleavedImageBatch)
if len(x.shape) == 4:
out = self.keras.backend.mean(x, axis=[1, 2], keepdims=True)
else:
raise NotImplementedError
out.data_format = InterleavedImageBatch
return [out]
def op_flatten(self, x, axis):
if axis == 1 and len(x.shape) == 4 and x.shape[1] == 1 and x.shape[2] == 1:
out = self.keras.layers.Flatten()(x)
else:
raise NotImplementedError
out.data_format = OnnxTensor
return [out]
def op_slice(self, x, starts, ends, axes=None, steps=None):
if axes is None:
axes = range(len(starts))
if steps is None:
steps = [1] * len(starts)
if x.data_format is OnnxConstant:
if axes != (0,):
raise NotImplementedError
out = self.make_constant(x[starts[0]:ends[0]:steps[0]])
else:
x = ensure_data_format(x, InterleavedImageBatch)
if len(x.shape) != 4:
raise NotImplementedError
if len(axes) == 1 and starts[0] != ends[0]:
if axes[0] == 0:
out = x[starts[0]:ends[0]:steps[0],:,:,:]
elif axes[0] == 1:
out = x[:,:,:,starts[0]:ends[0]:steps[0]]
elif axes[0] == 2:
out = x[:,starts[0]:ends[0]:steps[0],:,:]
elif axes[0] == 3:
out = x[:,:,starts[0]:ends[0]:steps[0],:]
else:
raise NotImplementedError
elif tuple(axes) == (2,3) and starts[0] != ends[0] and starts[1] != ends[1]:
out = x[:,starts[0]:ends[0]:steps[0],starts[1]:ends[1]:steps[1],:]
else:
raise NotImplementedError
out.data_format = InterleavedImageBatch
return [out]
def op_constant(self, value):
out = value
out.data_format = OnnxConstant
return [out]
def op_shape(self, x):
shape = list(map(int, x.shape))
if x.data_format is InterleavedImageBatch:
n, h, w, f = shape
shape = [n, f, h, w]
return [self.make_constant(shape)]
def op_gather(self, x, indices, axis=0):
x = ensure_data_format(x, OnnxConstant)
if axis == 0:
return [self.make_constant(x[indices])]
else:
raise NotImplementedError
def op_cast(self, x, to):
dtype = {
0: None, # UNDEFINED
1: np.float,
2: np.uint8,
3: np.int8,
4: np.uint16,
5: np.int16,
6: np.int32,
7: np.int64,
8: str,
9: np.bool,
10: np.float16,
11: np.double,
12: np.uint32,
13: np.uint64,
14: np.complex64,
15: np.complex128,
# // Non-IEEE floating-point format based on IEEE754 single-precision
# // floating-point number truncated to 16 bits.
# // This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits.
#BFLOAT16 = 16;
}[to]
if x.data_format is OnnxConstant:
return [self.make_constant(x.astype(dtype))]
else:
out = self.keras.backend.cast(x, dtype)
out.data_format = x.data_format
return [out]
def op_mul(self, a, b):
if b.shape == ():
a, b = b, a
if a.shape == ():
out = a * b
out.data_format = b.data_format
return [out]
a, b = ensure_compatible_data_format(a, b)
if a.data_format is OnnxConstant:
return [self.make_constant(a * b)]
else:
out = tf.keras.layers.Multiply()([a, b])
out.data_format = a.data_format
return [out]
def op_floor(self, x):
x = ensure_data_format(x, OnnxConstant)
return [self.make_constant(np.floor(x))]
def op_div(self, a, b):
a = ensure_data_format(a, OnnxConstant)
b = ensure_data_format(b, OnnxConstant)
return [self.make_constant(a / b)]
def op_upsample(self, x, scales, mode=b'nearest'):
if mode == b'nearest':
return self.op_resize(x, None, scales, coordinate_transformation_mode=b'asymmetric', nearest_mode=b'floor')
if mode == b'linear':
return self.op_resize(x, None, scales, coordinate_transformation_mode=b'align_corners', mode=b'linear', nearest_mode=b'floor')
raise NotImplementedError
def op_resize(self, x, roi, scales=None, sizes=None, *,
coordinate_transformation_mode=b"half_pixel", cubic_coeff_a=-0.75, exclude_outside=0,
extrapolation_value=0.0, mode=b"nearest", nearest_mode=b"round_prefer_floor"):
assert cubic_coeff_a == -0.75
assert exclude_outside == 0
assert extrapolation_value == 0.0
x = ensure_data_format(x, InterleavedImageBatch)
if sizes is None:
assert scales[0] == scales[1] == 1
size = [int(x.shape[1] * scales[2]), int(x.shape[2] * scales[3])]
else:
assert sizes[0] == x.shape[0]
assert sizes[1] == x.shape[3]
size = sizes[2:4]
if mode == b'nearest' and coordinate_transformation_mode == b'asymmetric' and nearest_mode==b'floor':
out = tf.compat.v1.image.resize(x, size, ResizeMethodV1.NEAREST_NEIGHBOR)
elif mode == b'linear' and coordinate_transformation_mode == b'align_corners':
out = tf.compat.v1.image.resize(x, size, ResizeMethodV1.BILINEAR, align_corners=True)
else:
raise NotImplementedError
out.data_format = InterleavedImageBatch
return [out]
def op_equal(self, x, y):
x, y = ensure_compatible_data_format(x, y)
out = self.keras.backend.equal(x, y)
out.data_format = x.data_format
return [out]
def op_reshape(self, x, shape):
x = ensure_data_format(x, OnnxTensor)
assert x.shape[0] == shape[0]
out = self.keras.layers.Reshape(shape[1:])(x)
out.data_format = OnnxTensor
return [out]
def op_transpose(self, x, perm):
x = ensure_data_format(x, OnnxConstant)
x = x.transpose(perm)
x.data_format = OnnxConstant
return [x]
def op_matmul(self, x1, x2):
x1 = ensure_data_format(x1, OnnxTensor)
x2 = ensure_data_format(x2, OnnxTensor)
if x1.data_format is OnnxConstant:
x1 = tf.convert_to_tensor(x1)
if x2.data_format is OnnxConstant:
x2 = tf.convert_to_tensor(x2)
if len(x1.shape) == 2:
assert len(x2.shape) == 2
out = self.keras.backend.dot(x1, x2)
elif len(x1.shape) == 3:
assert len(x2.shape) == 3
assert x1.shape[0] == x2.shape[0] == 1
out = self.keras.backend.dot(x1, x2)
out = tf.reshape(out, (1, out.shape[1], out.shape[3]))
elif len(x1.shape) == 4:
assert len(x2.shape) == 4
assert x1.shape[0] == x2.shape[0] == 1
assert x1.shape[1] == x2.shape[1] == 1
out = self.keras.backend.dot(x1, x2)
out = tf.reshape(out, (1, 1, out.shape[2], out.shape[5]))
else:
raise NotImplementedError
out.data_format = OnnxTensor
return [out]
def op_sqrt(self, x):
out = self.keras.backend.sqrt(x)
out.data_format = x.data_format
return [out]
def op_abs(self, x):
out = self.keras.backend.abs(x)
out.data_format = x.data_format
return [out]
def op_neg(self, x):
out = -x
out.data_format = x.data_format
return [out]
def onnx2keras(onnx_model):
tensors = {}
ops = TfKerasOperations()
for init in onnx_model.graph.initializer:
tensors[init.name] = ops.make_constant(numpy_helper.to_array(init))
model_inputs = []
for input in onnx_model.graph.input:
if input.name in tensors:
continue
shape = [d.dim_value if (d.dim_value > 0 and d.dim_param == "") else None
for d in input.type.tensor_type.shape.dim]
dtype = TENSOR_TYPE_TO_NP_TYPE[input.type.tensor_type.elem_type]
tensors[input.name] = ops.make_input(shape, dtype, input.name)
model_inputs.append(tensors[input.name])
for node in onnx_model.graph.node:
inputs = [tensors[i] for i in node.input]
attrs = {a.name: ops.parse_attr(a) for a in node.attribute}
output_tensors = ops.make_op(node.op_type, inputs, attrs)
assert len(output_tensors) == len(node.output)
for n, t in zip(node.output, output_tensors):
tensors[n] = t
outputs = [tensors[o.name] for o in onnx_model.graph.output]
return tf.keras.models.Model(model_inputs, outputs)
def main(infile, outfile=None, export_saved_model=False):
if outfile is None:
outfile = infile[:-5] if infile[-5:] == '.onnx' else infile
outfile += '.h5'
model = onnx2keras(onnx.load(infile))
if export_saved_model:
import tensorflow.compat.v1 as tf_v1
tf_v1.keras.experimental.export_saved_model(model, export_saved_model)
else:
model.save(outfile)
if __name__ == '__main__':
from fire import Fire
Fire(main) | 0.453988 | 0.562237 |
from collections import defaultdict
from enum import Enum
from typing import Dict, Optional
from dagster import Field, Selector
from dagster import _check as check
from dagster.serdes.serdes import whitelist_for_serdes
def get_retries_config():
return Field(
Selector({"enabled": {}, "disabled": {}}),
is_required=False,
default_value={"enabled": {}},
)
@whitelist_for_serdes
class RetryMode(Enum):
ENABLED = "enabled"
DISABLED = "disabled"
# Designed for use of inner plan execution within "orchestrator" engine such as multiprocess,
# up_for_retry steps are not directly re-enqueued, deferring that to the engine.
DEFERRED = "deferred"
@staticmethod
def from_config(config_value: Dict[str, Dict]) -> Optional["RetryMode"]:
for selector, _ in config_value.items():
return RetryMode(selector)
return None
@property
def enabled(self) -> bool:
return self == RetryMode.ENABLED
@property
def disabled(self) -> bool:
return self == RetryMode.DISABLED
@property
def deferred(self) -> bool:
return self == RetryMode.DEFERRED
def for_inner_plan(self) -> "RetryMode":
if self.disabled or self.deferred:
return self
elif self.enabled:
return RetryMode.DEFERRED
else:
check.failed("Unexpected RetryMode! Expected enabled, disabled, or deferred")
class RetryState:
def __init__(self, previous_attempts: Optional[Dict[str, int]] = None):
self._attempts = defaultdict(int)
for key, val in check.opt_dict_param(
previous_attempts, "previous_attempts", key_type=str, value_type=int
).items():
self._attempts[key] = val
def get_attempt_count(self, key: str) -> int:
return self._attempts[key]
def mark_attempt(self, key: str) -> None:
self._attempts[key] += 1
def snapshot_attempts(self) -> Dict[str, int]:
return dict(self._attempts) | python_modules/dagster/dagster/core/execution/retries.py | from collections import defaultdict
from enum import Enum
from typing import Dict, Optional
from dagster import Field, Selector
from dagster import _check as check
from dagster.serdes.serdes import whitelist_for_serdes
def get_retries_config():
return Field(
Selector({"enabled": {}, "disabled": {}}),
is_required=False,
default_value={"enabled": {}},
)
@whitelist_for_serdes
class RetryMode(Enum):
ENABLED = "enabled"
DISABLED = "disabled"
# Designed for use of inner plan execution within "orchestrator" engine such as multiprocess,
# up_for_retry steps are not directly re-enqueued, deferring that to the engine.
DEFERRED = "deferred"
@staticmethod
def from_config(config_value: Dict[str, Dict]) -> Optional["RetryMode"]:
for selector, _ in config_value.items():
return RetryMode(selector)
return None
@property
def enabled(self) -> bool:
return self == RetryMode.ENABLED
@property
def disabled(self) -> bool:
return self == RetryMode.DISABLED
@property
def deferred(self) -> bool:
return self == RetryMode.DEFERRED
def for_inner_plan(self) -> "RetryMode":
if self.disabled or self.deferred:
return self
elif self.enabled:
return RetryMode.DEFERRED
else:
check.failed("Unexpected RetryMode! Expected enabled, disabled, or deferred")
class RetryState:
def __init__(self, previous_attempts: Optional[Dict[str, int]] = None):
self._attempts = defaultdict(int)
for key, val in check.opt_dict_param(
previous_attempts, "previous_attempts", key_type=str, value_type=int
).items():
self._attempts[key] = val
def get_attempt_count(self, key: str) -> int:
return self._attempts[key]
def mark_attempt(self, key: str) -> None:
self._attempts[key] += 1
def snapshot_attempts(self) -> Dict[str, int]:
return dict(self._attempts) | 0.846863 | 0.157331 |
import glob
import os
import datetime
import json
import numpy as np
import pandas as pd
from pandas import DataFrame
import time
from telegram_definition_L1 import *
from golabal_def import Dir_Path
# telegram directory (default)
tel_directory = Dir_Path
# initialisation
selTelegram_N02 = np.array([], dtype=teltype_N02)
appended_allTelegram_N02 = []
timeIndex = []
alltimeIndex = []
messageId = {
'N02': 'EF21',
}
def setup_data():
# initialisation
start_time = time.time()
allTelegram_N02 = np.array([], dtype=teltype_N02)
selTelegram_N02 = np.array([], dtype=teltype_N02)
timeIndex = []
# specificy telegram type
tel_directory_N02 = tel_directory + '\\*' + messageId["N02"] + '*.tel'
# get list of available files
filelist = glob.glob(tel_directory_N02)
# sort file list
filelist.sort(key=lambda x: os.path.getmtime(x))
if len(filelist) > 0:
for file in filelist:
f = open(file, 'rb')
one_telegram = np.fromfile(f, dtype=teltype_N02)
selTelegram_N02 = np.concatenate((selTelegram_N02, one_telegram))
timeIndex.append(datetime.datetime.fromtimestamp(os.path.getmtime(file)))
f.close()
elaps_time = "- %s seconds ---" % (time.time() - start_time)
print("N02: data found time" + elaps_time)
else:
print("N02: no data found")
# Alloy composition
df_chem = DataFrame(selTelegram_N02['AlloyComposition'][:, :7])
df_chem.columns = ['chem_1', 'chem_2', 'chem_3', 'chem_4', 'chem_5', 'chem_6', 'chem_7']
# ExitThick
df_ext_thick_G1 = DataFrame(selTelegram_N02['ExitThick'][:, 2])
df_ext_thick_G2 = DataFrame(selTelegram_N02['ExitThick'][:, 7])
df_ext_thick_G3 = DataFrame(selTelegram_N02['ExitThick'][:, 12])
df_ext_thick_G4 = DataFrame(selTelegram_N02['ExitThick'][:, 17])
df_ext_thick_G5 = DataFrame(selTelegram_N02['ExitThick'][:, 22])
df_ext_thick = pd.concat(
[df_ext_thick_G1, df_ext_thick_G2, df_ext_thick_G3, df_ext_thick_G4, df_ext_thick_G5],
axis=1, sort=False)
df_ext_thick.columns = ['ExitThick_G1', 'ExitThick_G2', 'ExitThick_G3', 'ExitThick_G4', 'ExitThick_G5']
# ExitTemp
df_ext_temp_G1 = DataFrame(selTelegram_N02['ExitTemp'][:, 2])
df_ext_temp_G2 = DataFrame(selTelegram_N02['ExitTemp'][:, 7])
df_ext_temp_G3 = DataFrame(selTelegram_N02['ExitTemp'][:, 12])
df_ext_temp_G4 = DataFrame(selTelegram_N02['ExitTemp'][:, 17])
df_ext_temp_G5 = DataFrame(selTelegram_N02['ExitTemp'][:, 22])
df_Exit_Temp = pd.concat(
[df_ext_temp_G1, df_ext_temp_G2, df_ext_temp_G3, df_ext_temp_G4, df_ext_temp_G5],
axis=1, sort=False)
df_Exit_Temp.columns = ['ExitTemp_G1', 'ExitTemp_G2', 'ExitTemp_G3', 'ExitTemp_G4', 'ExitTemp_G5']
# RollSpeed
df_RollSpeed_G1 = DataFrame(selTelegram_N02['RollSpeed'][:, 2]) # [:, :5])
df_RollSpeed_G2 = DataFrame(selTelegram_N02['RollSpeed'][:, 7]) # [:, 5:10])
df_RollSpeed_G3 = DataFrame(selTelegram_N02['RollSpeed'][:, 12]) # [:, 10:15])
df_RollSpeed_G4 = DataFrame(selTelegram_N02['RollSpeed'][:, 17]) # [:, 15:20])
df_RollSpeed_G5 = DataFrame(selTelegram_N02['RollSpeed'][:, 22]) # [:, 20:25])
df_RollSpeed = pd.concat(
[df_RollSpeed_G1, df_RollSpeed_G2, df_RollSpeed_G3, df_RollSpeed_G4, df_RollSpeed_G5],
axis=1, sort=False)
df_RollSpeed.columns = ['RollSpeed_G1', 'RollSpeed_G2', 'RollSpeed_G3', 'RollSpeed_G4', 'RollSpeed_G5']
# TensionEntry
df_TensionEntry_G1 = DataFrame(selTelegram_N02['TensionEntry'][:, 2]) # [:, :5])
df_TensionEntry_G2 = DataFrame(selTelegram_N02['TensionEntry'][:, 7]) # [:, 5:10])
df_TensionEntry_G3 = DataFrame(selTelegram_N02['TensionEntry'][:, 12]) # [:, 10:15])
df_TensionEntry_G4 = DataFrame(selTelegram_N02['TensionEntry'][:, 17]) # [:, 15:20])
df_TensionEntry_G5 = DataFrame(selTelegram_N02['TensionEntry'][:, 22]) # [:, 20:25])
df_TensionEntry = pd.concat(
[df_TensionEntry_G1, df_TensionEntry_G2, df_TensionEntry_G3, df_TensionEntry_G4, df_TensionEntry_G5],
axis=1, sort=False)
df_TensionEntry.columns = ['TensionEntry_G1', 'TensionEntry_G2', 'TensionEntry_G3', 'TensionEntry_G4',
'TensionEntry_G5']
# TensionExit
df_TensionExit_G1 = DataFrame(selTelegram_N02['TensionExit'][:, 2]) # [:, :5])
df_TensionExit_G2 = DataFrame(selTelegram_N02['TensionExit'][:, 7]) # [:, 5:10])
df_TensionExit_G3 = DataFrame(selTelegram_N02['TensionExit'][:, 12]) # [:, 10:15])
df_TensionExit_G4 = DataFrame(selTelegram_N02['TensionExit'][:, 17]) # [:, 15:20])
df_TensionExit_G5 = DataFrame(selTelegram_N02['TensionExit'][:, 22]) # [:, 20:25])
df_TensionExit = pd.concat(
[df_TensionExit_G1, df_TensionExit_G2, df_TensionExit_G3, df_TensionExit_G4, df_TensionExit_G5], axis=1,
sort=False)
df_TensionExit.columns = ['TensionExit_G1', 'TensionExit_G2', 'TensionExit_G3', 'TensionExit_G4', 'TensionExit_G5']
# RollForceOS
df_RollForceOS_G1 = DataFrame(selTelegram_N02['RollForceOS'][:, 2]) # [:, :5])
df_RollForceOS_G2 = DataFrame(selTelegram_N02['RollForceOS'][:, 7]) # [:, 5:10])
df_RollForceOS_G3 = DataFrame(selTelegram_N02['RollForceOS'][:, 12]) # [:, 10:15])
df_RollForceOS_G4 = DataFrame(selTelegram_N02['RollForceOS'][:, 17]) # [:, 15:20])
df_RollForceOS_G5 = DataFrame(selTelegram_N02['RollForceOS'][:, 22]) # [:, 20:25])
df_RollForceOS = pd.concat(
[df_RollForceOS_G1, df_RollForceOS_G2, df_RollForceOS_G3, df_RollForceOS_G4, df_RollForceOS_G5], axis=1,
sort=False)
df_RollForceOS.columns = ['RollForceOS_G1', 'RollForceOS_G2', 'RollForceOS_G3', 'RollForceOS_G4', 'RollForceOS_G5']
# RollForceDS
df_RollForceDS_G1 = DataFrame(selTelegram_N02['RollForceDS'][:, 2]) # [:, :5])
df_RollForceDS_G2 = DataFrame(selTelegram_N02['RollForceDS'][:, 7]) # [:, 5:10])
df_RollForceDS_G3 = DataFrame(selTelegram_N02['RollForceDS'][:, 12]) # [:, 10:15])
df_RollForceDS_G4 = DataFrame(selTelegram_N02['RollForceDS'][:, 17]) # [:, 15:20])
df_RollForceDS_G5 = DataFrame(selTelegram_N02['RollForceDS'][:, 22]) # [:, 20:25])
df_RollForceDS = pd.concat(
[df_RollForceDS_G1, df_RollForceDS_G2, df_RollForceDS_G3, df_RollForceDS_G4, df_RollForceDS_G5], axis=1,
sort=False)
df_RollForceDS.columns = ['RollForceDS_G1', 'RollForceDS_G2', 'RollForceDS_G3', 'RollForceDS_G4', 'RollForceDS_G5']
# BendWROS
df_BendWROS_G1 = DataFrame(selTelegram_N02['BendWROS'][:, 2]) # [:, :5])
df_BendWROS_G2 = DataFrame(selTelegram_N02['BendWROS'][:, 7]) # [:, 5:10])
df_BendWROS_G3 = DataFrame(selTelegram_N02['BendWROS'][:, 12]) # [:, 10:15])
df_BendWROS_G4 = DataFrame(selTelegram_N02['BendWROS'][:, 17]) # [:, 15:20])
df_BendWROS_G5 = DataFrame(selTelegram_N02['BendWROS'][:, 22]) # [:, 20:25])
df_BendWROS = pd.concat(
[df_BendWROS_G1, df_BendWROS_G2, df_BendWROS_G3, df_BendWROS_G4, df_BendWROS_G5], axis=1,
sort=False)
df_BendWROS.columns = ['BendWROS_G1', 'BendWROS_G2', 'BendWROS_G3', 'BendWROS_G4', 'BendWROS_G5']
# BendWRDS
df_BendWRDS_G1 = DataFrame(selTelegram_N02['BendWRDS'][:, 2]) # [:, :5])
df_BendWRDS_G2 = DataFrame(selTelegram_N02['BendWRDS'][:, 7]) # [:, 5:10])
df_BendWRDS_G3 = DataFrame(selTelegram_N02['BendWRDS'][:, 12]) # [:, 10:15])
df_BendWRDS_G4 = DataFrame(selTelegram_N02['BendWRDS'][:, 17]) # [:, 15:20])
df_BendWRDS_G5 = DataFrame(selTelegram_N02['BendWRDS'][:, 22]) # [:, 20:25])
df_BendWRDS = pd.concat(
[df_BendWRDS_G1, df_BendWRDS_G2, df_BendWRDS_G3, df_BendWRDS_G4, df_BendWRDS_G5], axis=1,
sort=False)
df_BendWRDS.columns = ['BendWRDS_G1', 'BendWRDS_G2', 'BendWRDS_G3', 'BendWRDS_G4', 'BendWRDS_G5']
# BendIROS
df_BendIROS_G1 = DataFrame(selTelegram_N02['BendIROS'][:, 2]) # [:, :5])
df_BendIROS_G2 = DataFrame(selTelegram_N02['BendIROS'][:, 7]) # [:, 5:10])
df_BendIROS_G3 = DataFrame(selTelegram_N02['BendIROS'][:, 12]) # [:, 10:15])
df_BendIROS_G4 = DataFrame(selTelegram_N02['BendIROS'][:, 17]) # [:, 15:20])
df_BendIROS_G5 = DataFrame(selTelegram_N02['BendIROS'][:, 22]) # [:, 20:25])
df_BendIROS = pd.concat(
[df_BendIROS_G1, df_BendIROS_G2, df_BendIROS_G3, df_BendIROS_G4, df_BendIROS_G5], axis=1,
sort=False)
df_BendIROS.columns = ['BendIROS_G1', 'BendIROS_G2', 'BendIROS_G3', 'BendIROS_G4', 'BendIROS_G5']
# BendIRDS
df_BendIRDS_G1 = DataFrame(selTelegram_N02['BendIRDS'][:, 2]) # [:, :5])
df_BendIRDS_G2 = DataFrame(selTelegram_N02['BendIRDS'][:, 7]) # [:, 5:10])
df_BendIRDS_G3 = DataFrame(selTelegram_N02['BendIRDS'][:, 12]) # [:, 10:15])
df_BendIRDS_G4 = DataFrame(selTelegram_N02['BendIRDS'][:, 17]) # [:, 15:20])
df_BendIRDS_G5 = DataFrame(selTelegram_N02['BendIRDS'][:, 22]) # [:, 20:25])
df_BendIRDS = pd.concat(
[df_BendIRDS_G1, df_BendIRDS_G2, df_BendIRDS_G3, df_BendIRDS_G4, df_BendIRDS_G5], axis=1,
sort=False)
df_BendIRDS.columns = ['BendIRDS_G1', 'BendIRDS_G2', 'BendIRDS_G3', 'BendIRDS_G4', 'BendIRDS_G5']
# ShiftCVC
df_ShiftCVC_G1 = DataFrame(selTelegram_N02['ShiftCVC'][:, 2]) # [:, :5])
df_ShiftCVC_G2 = DataFrame(selTelegram_N02['ShiftCVC'][:, 7]) # [:, 5:10])
df_ShiftCVC_G3 = DataFrame(selTelegram_N02['ShiftCVC'][:, 12]) # [:, 10:15])
df_ShiftCVC_G4 = DataFrame(selTelegram_N02['ShiftCVC'][:, 17]) # [:, 15:20])
df_ShiftCVC_G5 = DataFrame(selTelegram_N02['ShiftCVC'][:, 22]) # [:, 20:25])
df_ShiftCVC = pd.concat(
[df_ShiftCVC_G1, df_ShiftCVC_G2, df_ShiftCVC_G3, df_ShiftCVC_G4, df_ShiftCVC_G5], axis=1,
sort=False)
df_ShiftCVC.columns = ['ShiftCVC_G1', 'ShiftCVC_G2', 'ShiftCVC_G3', 'ShiftCVC_G4', 'ShiftCVC_G5']
# SlipForward
df_SlipForward_G1 = DataFrame(selTelegram_N02['SlipForward'][:, 2]) # [:, :5])
df_SlipForward_G2 = DataFrame(selTelegram_N02['SlipForward'][:, 7]) # [:, 5:10])
df_SlipForward_G3 = DataFrame(selTelegram_N02['SlipForward'][:, 12]) # [:, 10:15])
df_SlipForward_G4 = DataFrame(selTelegram_N02['SlipForward'][:, 17]) # [:, 15:20])
df_SlipForward_G5 = DataFrame(selTelegram_N02['SlipForward'][:, 22]) # [:, 20:25])
df_SlipForward = pd.concat(
[df_SlipForward_G1, df_SlipForward_G2, df_SlipForward_G3, df_SlipForward_G4, df_SlipForward_G5], axis=1,
sort=False)
df_SlipForward.columns = ['SlipForward_G1', 'SlipForward_G2', 'SlipForward_G3', 'SlipForward_G4', 'SlipForward_G5']
# HydPosOS
df_HydPosOS_G1 = DataFrame(selTelegram_N02['HydPosOS'][:, 2]) # [:, :5])
df_HydPosOS_G2 = DataFrame(selTelegram_N02['HydPosOS'][:, 7]) # [:, 5:10])
df_HydPosOS_G3 = DataFrame(selTelegram_N02['HydPosOS'][:, 12]) # [:, 10:15])
df_HydPosOS_G4 = DataFrame(selTelegram_N02['HydPosOS'][:, 17]) # [:, 15:20])
df_HydPosOS_G5 = DataFrame(selTelegram_N02['HydPosOS'][:, 22]) # [:, 20:25])
df_HydPosOS = pd.concat(
[df_HydPosOS_G1, df_HydPosOS_G2, df_HydPosOS_G3, df_HydPosOS_G4, df_HydPosOS_G5], axis=1,
sort=False)
df_HydPosOS.columns = ['HydPosOS_G1', 'HydPosOS_G2', 'HydPosOS_G3', 'HydPosOS_G4', 'HydPosOS_G5']
# HydPosDS
df_HydPosDS_G1 = DataFrame(selTelegram_N02['HydPosDS'][:, 2]) # [:, :5])
df_HydPosDS_G2 = DataFrame(selTelegram_N02['HydPosDS'][:, 7]) # [:, 5:10])
df_HydPosDS_G3 = DataFrame(selTelegram_N02['HydPosDS'][:, 12]) # [:, 10:15])
df_HydPosDS_G4 = DataFrame(selTelegram_N02['HydPosDS'][:, 17]) # [:, 15:20])
df_HydPosDS_G5 = DataFrame(selTelegram_N02['HydPosDS'][:, 22]) # [:, 20:25])
df_HydPosDS = pd.concat(
[df_HydPosDS_G1, df_HydPosDS_G2, df_HydPosDS_G3, df_HydPosDS_G4, df_HydPosDS_G5], axis=1,
sort=False)
df_HydPosDS.columns = ['HydPosDS_G1', 'HydPosDS_G2', 'HydPosDS_G3', 'HydPosDS_G4', 'HydPosDS_G5']
# DriveTorque
df_DriveTorque_G1 = DataFrame(selTelegram_N02['DriveTorque'][:, 2]) # [:, :5])
df_DriveTorque_G2 = DataFrame(selTelegram_N02['DriveTorque'][:, 7]) # [:, 5:10])
df_DriveTorque_G3 = DataFrame(selTelegram_N02['DriveTorque'][:, 12]) # [:, 10:15])
df_DriveTorque_G4 = DataFrame(selTelegram_N02['DriveTorque'][:, 17]) # [:, 15:20])
df_DriveTorque_G5 = DataFrame(selTelegram_N02['DriveTorque'][:, 22]) # [:, 20:25])
df_DriveTorque = pd.concat(
[df_DriveTorque_G1, df_DriveTorque_G2, df_DriveTorque_G3, df_DriveTorque_G4, df_DriveTorque_G5], axis=1,
sort=False)
df_DriveTorque.columns = ['DriveTorque_G1', 'DriveTorque_G2', 'DriveTorque_G3', 'DriveTorque_G4', 'DriveTorque_G5']
df1 = DataFrame({'Time': timeIndex,
'CoilId': selTelegram_N02['CoilId'][:],
'CoilIdOut': selTelegram_N02['CoilIdOut'][:],
'SeqCoilOut': selTelegram_N02['SeqCoilOut'][:],
'SetupNo': selTelegram_N02['SetupNo'][:],
'ReturnCode': selTelegram_N02['ReturnCode'][:],
'SetupValidCode': selTelegram_N02['SetupValidCode'][:],
'NoPasses': selTelegram_N02['NoPasses'][:],
'AlloyCode': selTelegram_N02['AlloyCode'][:],
'AnalysisFlag': selTelegram_N02['AnalysisFlag'][:],
'Width': selTelegram_N02['Width'][:],
'LengthStart': selTelegram_N02['LengthStart'][:, ],
'Length0': selTelegram_N02['Length0'][:],
'Length1_G1': selTelegram_N02['Length1'][:, 0],
'Length1_G2': selTelegram_N02['Length1'][:, 1],
'Length1_G3': selTelegram_N02['Length1'][:, 2],
'Length1_G4': selTelegram_N02['Length1'][:, 3],
'Length1_G5': selTelegram_N02['Length1'][:, 3],
'EntryThick': selTelegram_N02['EntryThick'][:, 0],
'EntryTemp': selTelegram_N02['EntryTemp'][:, 1],
'const_force_mode': selTelegram_N02['ConstForceMode'][:],
'flag_setup_trans_mode': selTelegram_N02['FlagSetupTransMode'][:],
'return_code': selTelegram_N02['ReturnCode'][:],
'setup_valid_code': selTelegram_N02['SetupValidCode'][:],
'thread_speed_mode': selTelegram_N02['ThreadSpeedMode'][:],
'threading_mode': selTelegram_N02['ThreadingMode'][:],
'tail_out_mode': selTelegram_N02['TailOutMode'][:],
'ThreadAssist': selTelegram_N02['ThreadAssist'][:],
'SpoolInd': selTelegram_N02['SpoolInd'][:],
'SpoolOuterDiam': selTelegram_N02['SpoolOuterDiam'][:],
'SpoolWidth': selTelegram_N02['SpoolWidth'][:],
'TargetTransLength': selTelegram_N02['TargetTransLength'][:],
'TargetPosWeldSeam': selTelegram_N02['TargetPosWeldSeam'][:],
'TargetThickHeadLength': selTelegram_N02['TargetThickHeadLength'][:],
'ArtifSleeveUsage': selTelegram_N02['ArtifSleeveUsage'][:],
'TensionCurveID': selTelegram_N02['TensionCurveID'][:],
'TensionCurveNoPos': selTelegram_N02['TensionCurveNoPos'][:],
'yield_strength_calc': selTelegram_N02['YieldStrengthCalc'][:],
'StandSwitchOff_G1 ': selTelegram_N02['StandSwitchOff'][:, 0],
'StandSwitchOff_G2 ': selTelegram_N02['StandSwitchOff'][:, 1],
'StandSwitchOff_G3 ': selTelegram_N02['StandSwitchOff'][:, 2],
'StandSwitchOff_G4 ': selTelegram_N02['StandSwitchOff'][:, 3],
'StandSwitchOff_G5 ': selTelegram_N02['StandSwitchOff'][:, 4],
'TargetCoilTempLimit': selTelegram_N02['TargetCoilTempLimit'][:],
'ThermalCrown_G1 ': selTelegram_N02['ThermalCrown'][:, 0],
'ThermalCrown_G2 ': selTelegram_N02['ThermalCrown'][:, 1],
'ThermalCrown_G3 ': selTelegram_N02['ThermalCrown'][:, 2],
'ThermalCrown_G4 ': selTelegram_N02['ThermalCrown'][:, 3],
'ThermalCrown_G5 ': selTelegram_N02['ThermalCrown'][:, 4],
'FfcCtrlUsage_G1 ': selTelegram_N02['FfcCtrlUsage'][:, 0],
'FfcCtrlUsage_G2 ': selTelegram_N02['FfcCtrlUsage'][:, 1],
'FfcCtrlUsage_G3 ': selTelegram_N02['FfcCtrlUsage'][:, 2],
'FfcCtrlUsage_G4 ': selTelegram_N02['FfcCtrlUsage'][:, 3],
'FfcCtrlUsage_G5 ': selTelegram_N02['FfcCtrlUsage'][:, 4],
'FbcCtrlUsage_G1 ': selTelegram_N02['FbcCtrlUsage'][:, 0],
'FbcCtrlUsage_G2 ': selTelegram_N02['FbcCtrlUsage'][:, 1],
'FbcCtrlUsage_G3 ': selTelegram_N02['FbcCtrlUsage'][:, 2],
'FbcCtrlUsage_G4 ': selTelegram_N02['FbcCtrlUsage'][:, 3],
'FbcCtrlUsage_G5 ': selTelegram_N02['FbcCtrlUsage'][:, 4],
'VfcCtrlUsage_G1 ': selTelegram_N02['VfcCtrlUsage'][:, 0],
'VfcCtrlUsage_G2 ': selTelegram_N02['VfcCtrlUsage'][:, 1],
'VfcCtrlUsage_G3 ': selTelegram_N02['VfcCtrlUsage'][:, 2],
'VfcCtrlUsage_G4 ': selTelegram_N02['VfcCtrlUsage'][:, 3],
'VfcCtrlUsage_G5 ': selTelegram_N02['VfcCtrlUsage'][:, 4]
})
export_database = pd.concat([df1, df_ext_thick, df_Exit_Temp, df_RollSpeed,
df_TensionEntry, df_TensionExit, df_RollForceOS,
df_RollForceDS, df_BendWROS, df_BendWRDS, df_BendIROS,
df_BendIRDS, df_ShiftCVC, df_SlipForward, df_HydPosOS, df_HydPosDS, df_DriveTorque,
df_chem],
axis=1, sort=False)
arr_coilids = pd.DataFrame(selTelegram_N02['CoilIdOut'][:], columns=['CoilIdOut'])
datasets = {
'df_00': arr_coilids.to_json(orient='split', date_format='iso'),
'df_01': export_database.to_json(orient='split', date_format='iso'),
}
elaps1_time = "- %s seconds ---" % (time.time() - start_time)
print(elaps1_time + 'setup_data compile')
return json.dumps(datasets) | setup_data.py | import glob
import os
import datetime
import json
import numpy as np
import pandas as pd
from pandas import DataFrame
import time
from telegram_definition_L1 import *
from golabal_def import Dir_Path
# telegram directory (default)
tel_directory = Dir_Path
# initialisation
selTelegram_N02 = np.array([], dtype=teltype_N02)
appended_allTelegram_N02 = []
timeIndex = []
alltimeIndex = []
messageId = {
'N02': 'EF21',
}
def setup_data():
# initialisation
start_time = time.time()
allTelegram_N02 = np.array([], dtype=teltype_N02)
selTelegram_N02 = np.array([], dtype=teltype_N02)
timeIndex = []
# specificy telegram type
tel_directory_N02 = tel_directory + '\\*' + messageId["N02"] + '*.tel'
# get list of available files
filelist = glob.glob(tel_directory_N02)
# sort file list
filelist.sort(key=lambda x: os.path.getmtime(x))
if len(filelist) > 0:
for file in filelist:
f = open(file, 'rb')
one_telegram = np.fromfile(f, dtype=teltype_N02)
selTelegram_N02 = np.concatenate((selTelegram_N02, one_telegram))
timeIndex.append(datetime.datetime.fromtimestamp(os.path.getmtime(file)))
f.close()
elaps_time = "- %s seconds ---" % (time.time() - start_time)
print("N02: data found time" + elaps_time)
else:
print("N02: no data found")
# Alloy composition
df_chem = DataFrame(selTelegram_N02['AlloyComposition'][:, :7])
df_chem.columns = ['chem_1', 'chem_2', 'chem_3', 'chem_4', 'chem_5', 'chem_6', 'chem_7']
# ExitThick
df_ext_thick_G1 = DataFrame(selTelegram_N02['ExitThick'][:, 2])
df_ext_thick_G2 = DataFrame(selTelegram_N02['ExitThick'][:, 7])
df_ext_thick_G3 = DataFrame(selTelegram_N02['ExitThick'][:, 12])
df_ext_thick_G4 = DataFrame(selTelegram_N02['ExitThick'][:, 17])
df_ext_thick_G5 = DataFrame(selTelegram_N02['ExitThick'][:, 22])
df_ext_thick = pd.concat(
[df_ext_thick_G1, df_ext_thick_G2, df_ext_thick_G3, df_ext_thick_G4, df_ext_thick_G5],
axis=1, sort=False)
df_ext_thick.columns = ['ExitThick_G1', 'ExitThick_G2', 'ExitThick_G3', 'ExitThick_G4', 'ExitThick_G5']
# ExitTemp
df_ext_temp_G1 = DataFrame(selTelegram_N02['ExitTemp'][:, 2])
df_ext_temp_G2 = DataFrame(selTelegram_N02['ExitTemp'][:, 7])
df_ext_temp_G3 = DataFrame(selTelegram_N02['ExitTemp'][:, 12])
df_ext_temp_G4 = DataFrame(selTelegram_N02['ExitTemp'][:, 17])
df_ext_temp_G5 = DataFrame(selTelegram_N02['ExitTemp'][:, 22])
df_Exit_Temp = pd.concat(
[df_ext_temp_G1, df_ext_temp_G2, df_ext_temp_G3, df_ext_temp_G4, df_ext_temp_G5],
axis=1, sort=False)
df_Exit_Temp.columns = ['ExitTemp_G1', 'ExitTemp_G2', 'ExitTemp_G3', 'ExitTemp_G4', 'ExitTemp_G5']
# RollSpeed
df_RollSpeed_G1 = DataFrame(selTelegram_N02['RollSpeed'][:, 2]) # [:, :5])
df_RollSpeed_G2 = DataFrame(selTelegram_N02['RollSpeed'][:, 7]) # [:, 5:10])
df_RollSpeed_G3 = DataFrame(selTelegram_N02['RollSpeed'][:, 12]) # [:, 10:15])
df_RollSpeed_G4 = DataFrame(selTelegram_N02['RollSpeed'][:, 17]) # [:, 15:20])
df_RollSpeed_G5 = DataFrame(selTelegram_N02['RollSpeed'][:, 22]) # [:, 20:25])
df_RollSpeed = pd.concat(
[df_RollSpeed_G1, df_RollSpeed_G2, df_RollSpeed_G3, df_RollSpeed_G4, df_RollSpeed_G5],
axis=1, sort=False)
df_RollSpeed.columns = ['RollSpeed_G1', 'RollSpeed_G2', 'RollSpeed_G3', 'RollSpeed_G4', 'RollSpeed_G5']
# TensionEntry
df_TensionEntry_G1 = DataFrame(selTelegram_N02['TensionEntry'][:, 2]) # [:, :5])
df_TensionEntry_G2 = DataFrame(selTelegram_N02['TensionEntry'][:, 7]) # [:, 5:10])
df_TensionEntry_G3 = DataFrame(selTelegram_N02['TensionEntry'][:, 12]) # [:, 10:15])
df_TensionEntry_G4 = DataFrame(selTelegram_N02['TensionEntry'][:, 17]) # [:, 15:20])
df_TensionEntry_G5 = DataFrame(selTelegram_N02['TensionEntry'][:, 22]) # [:, 20:25])
df_TensionEntry = pd.concat(
[df_TensionEntry_G1, df_TensionEntry_G2, df_TensionEntry_G3, df_TensionEntry_G4, df_TensionEntry_G5],
axis=1, sort=False)
df_TensionEntry.columns = ['TensionEntry_G1', 'TensionEntry_G2', 'TensionEntry_G3', 'TensionEntry_G4',
'TensionEntry_G5']
# TensionExit
df_TensionExit_G1 = DataFrame(selTelegram_N02['TensionExit'][:, 2]) # [:, :5])
df_TensionExit_G2 = DataFrame(selTelegram_N02['TensionExit'][:, 7]) # [:, 5:10])
df_TensionExit_G3 = DataFrame(selTelegram_N02['TensionExit'][:, 12]) # [:, 10:15])
df_TensionExit_G4 = DataFrame(selTelegram_N02['TensionExit'][:, 17]) # [:, 15:20])
df_TensionExit_G5 = DataFrame(selTelegram_N02['TensionExit'][:, 22]) # [:, 20:25])
df_TensionExit = pd.concat(
[df_TensionExit_G1, df_TensionExit_G2, df_TensionExit_G3, df_TensionExit_G4, df_TensionExit_G5], axis=1,
sort=False)
df_TensionExit.columns = ['TensionExit_G1', 'TensionExit_G2', 'TensionExit_G3', 'TensionExit_G4', 'TensionExit_G5']
# RollForceOS
df_RollForceOS_G1 = DataFrame(selTelegram_N02['RollForceOS'][:, 2]) # [:, :5])
df_RollForceOS_G2 = DataFrame(selTelegram_N02['RollForceOS'][:, 7]) # [:, 5:10])
df_RollForceOS_G3 = DataFrame(selTelegram_N02['RollForceOS'][:, 12]) # [:, 10:15])
df_RollForceOS_G4 = DataFrame(selTelegram_N02['RollForceOS'][:, 17]) # [:, 15:20])
df_RollForceOS_G5 = DataFrame(selTelegram_N02['RollForceOS'][:, 22]) # [:, 20:25])
df_RollForceOS = pd.concat(
[df_RollForceOS_G1, df_RollForceOS_G2, df_RollForceOS_G3, df_RollForceOS_G4, df_RollForceOS_G5], axis=1,
sort=False)
df_RollForceOS.columns = ['RollForceOS_G1', 'RollForceOS_G2', 'RollForceOS_G3', 'RollForceOS_G4', 'RollForceOS_G5']
# RollForceDS
df_RollForceDS_G1 = DataFrame(selTelegram_N02['RollForceDS'][:, 2]) # [:, :5])
df_RollForceDS_G2 = DataFrame(selTelegram_N02['RollForceDS'][:, 7]) # [:, 5:10])
df_RollForceDS_G3 = DataFrame(selTelegram_N02['RollForceDS'][:, 12]) # [:, 10:15])
df_RollForceDS_G4 = DataFrame(selTelegram_N02['RollForceDS'][:, 17]) # [:, 15:20])
df_RollForceDS_G5 = DataFrame(selTelegram_N02['RollForceDS'][:, 22]) # [:, 20:25])
df_RollForceDS = pd.concat(
[df_RollForceDS_G1, df_RollForceDS_G2, df_RollForceDS_G3, df_RollForceDS_G4, df_RollForceDS_G5], axis=1,
sort=False)
df_RollForceDS.columns = ['RollForceDS_G1', 'RollForceDS_G2', 'RollForceDS_G3', 'RollForceDS_G4', 'RollForceDS_G5']
# BendWROS
df_BendWROS_G1 = DataFrame(selTelegram_N02['BendWROS'][:, 2]) # [:, :5])
df_BendWROS_G2 = DataFrame(selTelegram_N02['BendWROS'][:, 7]) # [:, 5:10])
df_BendWROS_G3 = DataFrame(selTelegram_N02['BendWROS'][:, 12]) # [:, 10:15])
df_BendWROS_G4 = DataFrame(selTelegram_N02['BendWROS'][:, 17]) # [:, 15:20])
df_BendWROS_G5 = DataFrame(selTelegram_N02['BendWROS'][:, 22]) # [:, 20:25])
df_BendWROS = pd.concat(
[df_BendWROS_G1, df_BendWROS_G2, df_BendWROS_G3, df_BendWROS_G4, df_BendWROS_G5], axis=1,
sort=False)
df_BendWROS.columns = ['BendWROS_G1', 'BendWROS_G2', 'BendWROS_G3', 'BendWROS_G4', 'BendWROS_G5']
# BendWRDS
df_BendWRDS_G1 = DataFrame(selTelegram_N02['BendWRDS'][:, 2]) # [:, :5])
df_BendWRDS_G2 = DataFrame(selTelegram_N02['BendWRDS'][:, 7]) # [:, 5:10])
df_BendWRDS_G3 = DataFrame(selTelegram_N02['BendWRDS'][:, 12]) # [:, 10:15])
df_BendWRDS_G4 = DataFrame(selTelegram_N02['BendWRDS'][:, 17]) # [:, 15:20])
df_BendWRDS_G5 = DataFrame(selTelegram_N02['BendWRDS'][:, 22]) # [:, 20:25])
df_BendWRDS = pd.concat(
[df_BendWRDS_G1, df_BendWRDS_G2, df_BendWRDS_G3, df_BendWRDS_G4, df_BendWRDS_G5], axis=1,
sort=False)
df_BendWRDS.columns = ['BendWRDS_G1', 'BendWRDS_G2', 'BendWRDS_G3', 'BendWRDS_G4', 'BendWRDS_G5']
# BendIROS
df_BendIROS_G1 = DataFrame(selTelegram_N02['BendIROS'][:, 2]) # [:, :5])
df_BendIROS_G2 = DataFrame(selTelegram_N02['BendIROS'][:, 7]) # [:, 5:10])
df_BendIROS_G3 = DataFrame(selTelegram_N02['BendIROS'][:, 12]) # [:, 10:15])
df_BendIROS_G4 = DataFrame(selTelegram_N02['BendIROS'][:, 17]) # [:, 15:20])
df_BendIROS_G5 = DataFrame(selTelegram_N02['BendIROS'][:, 22]) # [:, 20:25])
df_BendIROS = pd.concat(
[df_BendIROS_G1, df_BendIROS_G2, df_BendIROS_G3, df_BendIROS_G4, df_BendIROS_G5], axis=1,
sort=False)
df_BendIROS.columns = ['BendIROS_G1', 'BendIROS_G2', 'BendIROS_G3', 'BendIROS_G4', 'BendIROS_G5']
# BendIRDS
df_BendIRDS_G1 = DataFrame(selTelegram_N02['BendIRDS'][:, 2]) # [:, :5])
df_BendIRDS_G2 = DataFrame(selTelegram_N02['BendIRDS'][:, 7]) # [:, 5:10])
df_BendIRDS_G3 = DataFrame(selTelegram_N02['BendIRDS'][:, 12]) # [:, 10:15])
df_BendIRDS_G4 = DataFrame(selTelegram_N02['BendIRDS'][:, 17]) # [:, 15:20])
df_BendIRDS_G5 = DataFrame(selTelegram_N02['BendIRDS'][:, 22]) # [:, 20:25])
df_BendIRDS = pd.concat(
[df_BendIRDS_G1, df_BendIRDS_G2, df_BendIRDS_G3, df_BendIRDS_G4, df_BendIRDS_G5], axis=1,
sort=False)
df_BendIRDS.columns = ['BendIRDS_G1', 'BendIRDS_G2', 'BendIRDS_G3', 'BendIRDS_G4', 'BendIRDS_G5']
# ShiftCVC
df_ShiftCVC_G1 = DataFrame(selTelegram_N02['ShiftCVC'][:, 2]) # [:, :5])
df_ShiftCVC_G2 = DataFrame(selTelegram_N02['ShiftCVC'][:, 7]) # [:, 5:10])
df_ShiftCVC_G3 = DataFrame(selTelegram_N02['ShiftCVC'][:, 12]) # [:, 10:15])
df_ShiftCVC_G4 = DataFrame(selTelegram_N02['ShiftCVC'][:, 17]) # [:, 15:20])
df_ShiftCVC_G5 = DataFrame(selTelegram_N02['ShiftCVC'][:, 22]) # [:, 20:25])
df_ShiftCVC = pd.concat(
[df_ShiftCVC_G1, df_ShiftCVC_G2, df_ShiftCVC_G3, df_ShiftCVC_G4, df_ShiftCVC_G5], axis=1,
sort=False)
df_ShiftCVC.columns = ['ShiftCVC_G1', 'ShiftCVC_G2', 'ShiftCVC_G3', 'ShiftCVC_G4', 'ShiftCVC_G5']
# SlipForward
df_SlipForward_G1 = DataFrame(selTelegram_N02['SlipForward'][:, 2]) # [:, :5])
df_SlipForward_G2 = DataFrame(selTelegram_N02['SlipForward'][:, 7]) # [:, 5:10])
df_SlipForward_G3 = DataFrame(selTelegram_N02['SlipForward'][:, 12]) # [:, 10:15])
df_SlipForward_G4 = DataFrame(selTelegram_N02['SlipForward'][:, 17]) # [:, 15:20])
df_SlipForward_G5 = DataFrame(selTelegram_N02['SlipForward'][:, 22]) # [:, 20:25])
df_SlipForward = pd.concat(
[df_SlipForward_G1, df_SlipForward_G2, df_SlipForward_G3, df_SlipForward_G4, df_SlipForward_G5], axis=1,
sort=False)
df_SlipForward.columns = ['SlipForward_G1', 'SlipForward_G2', 'SlipForward_G3', 'SlipForward_G4', 'SlipForward_G5']
# HydPosOS
df_HydPosOS_G1 = DataFrame(selTelegram_N02['HydPosOS'][:, 2]) # [:, :5])
df_HydPosOS_G2 = DataFrame(selTelegram_N02['HydPosOS'][:, 7]) # [:, 5:10])
df_HydPosOS_G3 = DataFrame(selTelegram_N02['HydPosOS'][:, 12]) # [:, 10:15])
df_HydPosOS_G4 = DataFrame(selTelegram_N02['HydPosOS'][:, 17]) # [:, 15:20])
df_HydPosOS_G5 = DataFrame(selTelegram_N02['HydPosOS'][:, 22]) # [:, 20:25])
df_HydPosOS = pd.concat(
[df_HydPosOS_G1, df_HydPosOS_G2, df_HydPosOS_G3, df_HydPosOS_G4, df_HydPosOS_G5], axis=1,
sort=False)
df_HydPosOS.columns = ['HydPosOS_G1', 'HydPosOS_G2', 'HydPosOS_G3', 'HydPosOS_G4', 'HydPosOS_G5']
# HydPosDS
df_HydPosDS_G1 = DataFrame(selTelegram_N02['HydPosDS'][:, 2]) # [:, :5])
df_HydPosDS_G2 = DataFrame(selTelegram_N02['HydPosDS'][:, 7]) # [:, 5:10])
df_HydPosDS_G3 = DataFrame(selTelegram_N02['HydPosDS'][:, 12]) # [:, 10:15])
df_HydPosDS_G4 = DataFrame(selTelegram_N02['HydPosDS'][:, 17]) # [:, 15:20])
df_HydPosDS_G5 = DataFrame(selTelegram_N02['HydPosDS'][:, 22]) # [:, 20:25])
df_HydPosDS = pd.concat(
[df_HydPosDS_G1, df_HydPosDS_G2, df_HydPosDS_G3, df_HydPosDS_G4, df_HydPosDS_G5], axis=1,
sort=False)
df_HydPosDS.columns = ['HydPosDS_G1', 'HydPosDS_G2', 'HydPosDS_G3', 'HydPosDS_G4', 'HydPosDS_G5']
# DriveTorque
df_DriveTorque_G1 = DataFrame(selTelegram_N02['DriveTorque'][:, 2]) # [:, :5])
df_DriveTorque_G2 = DataFrame(selTelegram_N02['DriveTorque'][:, 7]) # [:, 5:10])
df_DriveTorque_G3 = DataFrame(selTelegram_N02['DriveTorque'][:, 12]) # [:, 10:15])
df_DriveTorque_G4 = DataFrame(selTelegram_N02['DriveTorque'][:, 17]) # [:, 15:20])
df_DriveTorque_G5 = DataFrame(selTelegram_N02['DriveTorque'][:, 22]) # [:, 20:25])
df_DriveTorque = pd.concat(
[df_DriveTorque_G1, df_DriveTorque_G2, df_DriveTorque_G3, df_DriveTorque_G4, df_DriveTorque_G5], axis=1,
sort=False)
df_DriveTorque.columns = ['DriveTorque_G1', 'DriveTorque_G2', 'DriveTorque_G3', 'DriveTorque_G4', 'DriveTorque_G5']
df1 = DataFrame({'Time': timeIndex,
'CoilId': selTelegram_N02['CoilId'][:],
'CoilIdOut': selTelegram_N02['CoilIdOut'][:],
'SeqCoilOut': selTelegram_N02['SeqCoilOut'][:],
'SetupNo': selTelegram_N02['SetupNo'][:],
'ReturnCode': selTelegram_N02['ReturnCode'][:],
'SetupValidCode': selTelegram_N02['SetupValidCode'][:],
'NoPasses': selTelegram_N02['NoPasses'][:],
'AlloyCode': selTelegram_N02['AlloyCode'][:],
'AnalysisFlag': selTelegram_N02['AnalysisFlag'][:],
'Width': selTelegram_N02['Width'][:],
'LengthStart': selTelegram_N02['LengthStart'][:, ],
'Length0': selTelegram_N02['Length0'][:],
'Length1_G1': selTelegram_N02['Length1'][:, 0],
'Length1_G2': selTelegram_N02['Length1'][:, 1],
'Length1_G3': selTelegram_N02['Length1'][:, 2],
'Length1_G4': selTelegram_N02['Length1'][:, 3],
'Length1_G5': selTelegram_N02['Length1'][:, 3],
'EntryThick': selTelegram_N02['EntryThick'][:, 0],
'EntryTemp': selTelegram_N02['EntryTemp'][:, 1],
'const_force_mode': selTelegram_N02['ConstForceMode'][:],
'flag_setup_trans_mode': selTelegram_N02['FlagSetupTransMode'][:],
'return_code': selTelegram_N02['ReturnCode'][:],
'setup_valid_code': selTelegram_N02['SetupValidCode'][:],
'thread_speed_mode': selTelegram_N02['ThreadSpeedMode'][:],
'threading_mode': selTelegram_N02['ThreadingMode'][:],
'tail_out_mode': selTelegram_N02['TailOutMode'][:],
'ThreadAssist': selTelegram_N02['ThreadAssist'][:],
'SpoolInd': selTelegram_N02['SpoolInd'][:],
'SpoolOuterDiam': selTelegram_N02['SpoolOuterDiam'][:],
'SpoolWidth': selTelegram_N02['SpoolWidth'][:],
'TargetTransLength': selTelegram_N02['TargetTransLength'][:],
'TargetPosWeldSeam': selTelegram_N02['TargetPosWeldSeam'][:],
'TargetThickHeadLength': selTelegram_N02['TargetThickHeadLength'][:],
'ArtifSleeveUsage': selTelegram_N02['ArtifSleeveUsage'][:],
'TensionCurveID': selTelegram_N02['TensionCurveID'][:],
'TensionCurveNoPos': selTelegram_N02['TensionCurveNoPos'][:],
'yield_strength_calc': selTelegram_N02['YieldStrengthCalc'][:],
'StandSwitchOff_G1 ': selTelegram_N02['StandSwitchOff'][:, 0],
'StandSwitchOff_G2 ': selTelegram_N02['StandSwitchOff'][:, 1],
'StandSwitchOff_G3 ': selTelegram_N02['StandSwitchOff'][:, 2],
'StandSwitchOff_G4 ': selTelegram_N02['StandSwitchOff'][:, 3],
'StandSwitchOff_G5 ': selTelegram_N02['StandSwitchOff'][:, 4],
'TargetCoilTempLimit': selTelegram_N02['TargetCoilTempLimit'][:],
'ThermalCrown_G1 ': selTelegram_N02['ThermalCrown'][:, 0],
'ThermalCrown_G2 ': selTelegram_N02['ThermalCrown'][:, 1],
'ThermalCrown_G3 ': selTelegram_N02['ThermalCrown'][:, 2],
'ThermalCrown_G4 ': selTelegram_N02['ThermalCrown'][:, 3],
'ThermalCrown_G5 ': selTelegram_N02['ThermalCrown'][:, 4],
'FfcCtrlUsage_G1 ': selTelegram_N02['FfcCtrlUsage'][:, 0],
'FfcCtrlUsage_G2 ': selTelegram_N02['FfcCtrlUsage'][:, 1],
'FfcCtrlUsage_G3 ': selTelegram_N02['FfcCtrlUsage'][:, 2],
'FfcCtrlUsage_G4 ': selTelegram_N02['FfcCtrlUsage'][:, 3],
'FfcCtrlUsage_G5 ': selTelegram_N02['FfcCtrlUsage'][:, 4],
'FbcCtrlUsage_G1 ': selTelegram_N02['FbcCtrlUsage'][:, 0],
'FbcCtrlUsage_G2 ': selTelegram_N02['FbcCtrlUsage'][:, 1],
'FbcCtrlUsage_G3 ': selTelegram_N02['FbcCtrlUsage'][:, 2],
'FbcCtrlUsage_G4 ': selTelegram_N02['FbcCtrlUsage'][:, 3],
'FbcCtrlUsage_G5 ': selTelegram_N02['FbcCtrlUsage'][:, 4],
'VfcCtrlUsage_G1 ': selTelegram_N02['VfcCtrlUsage'][:, 0],
'VfcCtrlUsage_G2 ': selTelegram_N02['VfcCtrlUsage'][:, 1],
'VfcCtrlUsage_G3 ': selTelegram_N02['VfcCtrlUsage'][:, 2],
'VfcCtrlUsage_G4 ': selTelegram_N02['VfcCtrlUsage'][:, 3],
'VfcCtrlUsage_G5 ': selTelegram_N02['VfcCtrlUsage'][:, 4]
})
export_database = pd.concat([df1, df_ext_thick, df_Exit_Temp, df_RollSpeed,
df_TensionEntry, df_TensionExit, df_RollForceOS,
df_RollForceDS, df_BendWROS, df_BendWRDS, df_BendIROS,
df_BendIRDS, df_ShiftCVC, df_SlipForward, df_HydPosOS, df_HydPosDS, df_DriveTorque,
df_chem],
axis=1, sort=False)
arr_coilids = pd.DataFrame(selTelegram_N02['CoilIdOut'][:], columns=['CoilIdOut'])
datasets = {
'df_00': arr_coilids.to_json(orient='split', date_format='iso'),
'df_01': export_database.to_json(orient='split', date_format='iso'),
}
elaps1_time = "- %s seconds ---" % (time.time() - start_time)
print(elaps1_time + 'setup_data compile')
return json.dumps(datasets) | 0.182899 | 0.228737 |
import time, sys
from time import gmtime
import httplib, urllib
ip_address='10.12.19.67'
#ip_address='127.0.0.1'
#ip_address='10.20.218.197'
cost='0'
weather='overcast'
#local_hour=time.localtime().tm_hour
sun_percentage=[0,0,0,0,0,0,0.2305,0.6537,0.8328,0.9215,0.9689,0.9927,1,0.9927,0.9689,0.9215,0.8328,0.6537,0.2305,0,0,0,0,0]
local_hour=6
alt_hour=sun_percentage[local_hour]
if weather=='daylight':
max_light=10750*0.45*alt_hour
elif weather=='overcast':
max_light=1075*0.45*alt_hour
elif weather=='dark':
max_light=107.5*0.45*alt_hour
l_amount=''+str(int(max_light))
print l_amount
sys.path.append('../..')
import spade
in_use=False
name="blinds_agent"
class MyAgent(spade.Agent.Agent):
def _setup(self):
template = spade.Behaviour.ACLTemplate()
template.setSender(spade.AID.aid("control_agent@"+ip_address,["xmpp://control_agent@"+ip_address]))
template.setOntology("auction")
t = spade.Behaviour.MessageTemplate(template)
self.addBehaviour(self.RecBehav(),t)
print "Receiver Light template behaviour just started!"
class RecBehav(spade.Behaviour.EventBehaviour):
def _process(self):
global in_use
msg = self._receive(block=False,timeout=10)
print name+" has received a CFP:"
try:
m_content=int(msg.getContent())
except ValueError:
print "Not a number"
light_sensed=m_content
if not in_use:
msg = spade.ACLMessage.ACLMessage()
msg.setPerformative("propose")
msg.setOntology("auction")
msg.addReceiver(spade.AID.aid("control_agent@"+ip_address,["xmpp://control_agent@"+ip_address]))
msg.setContent(cost+" "+str(int(int(l_amount)*0.25))+" "+name+"0")
self.myAgent.send(msg)
msg.setContent(cost+" "+str(int(int(l_amount)*0.50))+" "+name+"1")
self.myAgent.send(msg)
msg.setContent(cost+" "+str(int(int(l_amount)*0.75))+" "+name+"2")
self.myAgent.send(msg)
msg.setContent(cost+" "+str(int(int(l_amount)*1))+" "+name+"3")
self.myAgent.send(msg)
print name+" has sent a proposal to the control_agent:"
a = MyAgent(name+"@"+ip_address, "secret")
a.start()
alive = True
while alive:
try:
time.sleep(1)
except KeyboardInterrupt:
alive=False
a.stop()
sys.exit(0) | NinjaBandSPADE/SPADE-agents/blinds_agent.py | import time, sys
from time import gmtime
import httplib, urllib
ip_address='10.12.19.67'
#ip_address='127.0.0.1'
#ip_address='10.20.218.197'
cost='0'
weather='overcast'
#local_hour=time.localtime().tm_hour
sun_percentage=[0,0,0,0,0,0,0.2305,0.6537,0.8328,0.9215,0.9689,0.9927,1,0.9927,0.9689,0.9215,0.8328,0.6537,0.2305,0,0,0,0,0]
local_hour=6
alt_hour=sun_percentage[local_hour]
if weather=='daylight':
max_light=10750*0.45*alt_hour
elif weather=='overcast':
max_light=1075*0.45*alt_hour
elif weather=='dark':
max_light=107.5*0.45*alt_hour
l_amount=''+str(int(max_light))
print l_amount
sys.path.append('../..')
import spade
in_use=False
name="blinds_agent"
class MyAgent(spade.Agent.Agent):
def _setup(self):
template = spade.Behaviour.ACLTemplate()
template.setSender(spade.AID.aid("control_agent@"+ip_address,["xmpp://control_agent@"+ip_address]))
template.setOntology("auction")
t = spade.Behaviour.MessageTemplate(template)
self.addBehaviour(self.RecBehav(),t)
print "Receiver Light template behaviour just started!"
class RecBehav(spade.Behaviour.EventBehaviour):
def _process(self):
global in_use
msg = self._receive(block=False,timeout=10)
print name+" has received a CFP:"
try:
m_content=int(msg.getContent())
except ValueError:
print "Not a number"
light_sensed=m_content
if not in_use:
msg = spade.ACLMessage.ACLMessage()
msg.setPerformative("propose")
msg.setOntology("auction")
msg.addReceiver(spade.AID.aid("control_agent@"+ip_address,["xmpp://control_agent@"+ip_address]))
msg.setContent(cost+" "+str(int(int(l_amount)*0.25))+" "+name+"0")
self.myAgent.send(msg)
msg.setContent(cost+" "+str(int(int(l_amount)*0.50))+" "+name+"1")
self.myAgent.send(msg)
msg.setContent(cost+" "+str(int(int(l_amount)*0.75))+" "+name+"2")
self.myAgent.send(msg)
msg.setContent(cost+" "+str(int(int(l_amount)*1))+" "+name+"3")
self.myAgent.send(msg)
print name+" has sent a proposal to the control_agent:"
a = MyAgent(name+"@"+ip_address, "secret")
a.start()
alive = True
while alive:
try:
time.sleep(1)
except KeyboardInterrupt:
alive=False
a.stop()
sys.exit(0) | 0.029633 | 0.084191 |
import logging
import ray
import ray.streaming._streaming as _streaming
import ray.streaming.generated.remote_call_pb2 as remote_call_pb
import ray.streaming.runtime.processor as processor
from ray.streaming.config import Config
from ray.streaming.runtime.graph import ExecutionGraph
from ray.streaming.runtime.task import SourceStreamTask, OneInputStreamTask
logger = logging.getLogger(__name__)
# special flag to indicate this actor not ready
_NOT_READY_FLAG_ = b" " * 4
@ray.remote
class JobWorker(object):
"""A streaming job worker is used to execute user-defined function and
interact with `JobMaster`"""
def __init__(self):
self.worker_context = None
self.task_id = None
self.config = None
self.execution_graph = None
self.execution_task = None
self.execution_node = None
self.stream_processor = None
self.task = None
self.reader_client = None
self.writer_client = None
def init(self, worker_context_bytes):
worker_context = remote_call_pb.WorkerContext()
worker_context.ParseFromString(worker_context_bytes)
self.worker_context = worker_context
self.task_id = worker_context.task_id
self.config = worker_context.conf
execution_graph = ExecutionGraph(worker_context.graph)
self.execution_graph = execution_graph
self.execution_task = self.execution_graph. \
get_execution_task_by_task_id(self.task_id)
self.execution_node = self.execution_graph. \
get_execution_node_by_task_id(self.task_id)
operator = self.execution_node.stream_operator
self.stream_processor = processor.build_processor(operator)
logger.info(
"Initializing JobWorker, task_id: {}, operator: {}.".format(
self.task_id, self.stream_processor))
if self.config.get(Config.CHANNEL_TYPE, Config.NATIVE_CHANNEL):
self.reader_client = _streaming.ReaderClient()
self.writer_client = _streaming.WriterClient()
self.task = self.create_stream_task()
self.task.start()
logger.info("JobWorker init succeed")
return True
def create_stream_task(self):
if isinstance(self.stream_processor, processor.SourceProcessor):
return SourceStreamTask(self.task_id, self.stream_processor, self)
elif isinstance(self.stream_processor, processor.OneInputProcessor):
return OneInputStreamTask(self.task_id, self.stream_processor,
self)
else:
raise Exception("Unsupported processor type: " +
type(self.stream_processor))
def on_reader_message(self, buffer: bytes):
"""Called by upstream queue writer to send data message to downstream
queue reader.
"""
self.reader_client.on_reader_message(buffer)
def on_reader_message_sync(self, buffer: bytes):
"""Called by upstream queue writer to send control message to downstream
downstream queue reader.
"""
if self.reader_client is None:
return _NOT_READY_FLAG_
result = self.reader_client.on_reader_message_sync(buffer)
return result.to_pybytes()
def on_writer_message(self, buffer: bytes):
"""Called by downstream queue reader to send notify message to
upstream queue writer.
"""
self.writer_client.on_writer_message(buffer)
def on_writer_message_sync(self, buffer: bytes):
"""Called by downstream queue reader to send control message to
upstream queue writer.
"""
if self.writer_client is None:
return _NOT_READY_FLAG_
result = self.writer_client.on_writer_message_sync(buffer)
return result.to_pybytes() | streaming/python/runtime/worker.py | import logging
import ray
import ray.streaming._streaming as _streaming
import ray.streaming.generated.remote_call_pb2 as remote_call_pb
import ray.streaming.runtime.processor as processor
from ray.streaming.config import Config
from ray.streaming.runtime.graph import ExecutionGraph
from ray.streaming.runtime.task import SourceStreamTask, OneInputStreamTask
logger = logging.getLogger(__name__)
# special flag to indicate this actor not ready
_NOT_READY_FLAG_ = b" " * 4
@ray.remote
class JobWorker(object):
"""A streaming job worker is used to execute user-defined function and
interact with `JobMaster`"""
def __init__(self):
self.worker_context = None
self.task_id = None
self.config = None
self.execution_graph = None
self.execution_task = None
self.execution_node = None
self.stream_processor = None
self.task = None
self.reader_client = None
self.writer_client = None
def init(self, worker_context_bytes):
worker_context = remote_call_pb.WorkerContext()
worker_context.ParseFromString(worker_context_bytes)
self.worker_context = worker_context
self.task_id = worker_context.task_id
self.config = worker_context.conf
execution_graph = ExecutionGraph(worker_context.graph)
self.execution_graph = execution_graph
self.execution_task = self.execution_graph. \
get_execution_task_by_task_id(self.task_id)
self.execution_node = self.execution_graph. \
get_execution_node_by_task_id(self.task_id)
operator = self.execution_node.stream_operator
self.stream_processor = processor.build_processor(operator)
logger.info(
"Initializing JobWorker, task_id: {}, operator: {}.".format(
self.task_id, self.stream_processor))
if self.config.get(Config.CHANNEL_TYPE, Config.NATIVE_CHANNEL):
self.reader_client = _streaming.ReaderClient()
self.writer_client = _streaming.WriterClient()
self.task = self.create_stream_task()
self.task.start()
logger.info("JobWorker init succeed")
return True
def create_stream_task(self):
if isinstance(self.stream_processor, processor.SourceProcessor):
return SourceStreamTask(self.task_id, self.stream_processor, self)
elif isinstance(self.stream_processor, processor.OneInputProcessor):
return OneInputStreamTask(self.task_id, self.stream_processor,
self)
else:
raise Exception("Unsupported processor type: " +
type(self.stream_processor))
def on_reader_message(self, buffer: bytes):
"""Called by upstream queue writer to send data message to downstream
queue reader.
"""
self.reader_client.on_reader_message(buffer)
def on_reader_message_sync(self, buffer: bytes):
"""Called by upstream queue writer to send control message to downstream
downstream queue reader.
"""
if self.reader_client is None:
return _NOT_READY_FLAG_
result = self.reader_client.on_reader_message_sync(buffer)
return result.to_pybytes()
def on_writer_message(self, buffer: bytes):
"""Called by downstream queue reader to send notify message to
upstream queue writer.
"""
self.writer_client.on_writer_message(buffer)
def on_writer_message_sync(self, buffer: bytes):
"""Called by downstream queue reader to send control message to
upstream queue writer.
"""
if self.writer_client is None:
return _NOT_READY_FLAG_
result = self.writer_client.on_writer_message_sync(buffer)
return result.to_pybytes() | 0.687735 | 0.054224 |
# Standard library imports
from typing import Dict, List, Tuple
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.core.component import validated
from gluonts.distribution.bijection import Bijection, InverseBijection
from gluonts.distribution.bijection_output import BijectionOutput
from gluonts.model.common import Tensor
# Relative imports
from .distribution import getF, softplus
class BoxCoxTranform(Bijection):
r"""
Implements Box-Cox transformation of a uni-variate random variable.
The Box-Cox transformation of an observation :math:`z` is given by
.. math::
BoxCox(z; \lambda_1, \lambda_2) = \begin{cases}
((z + \lambda_2)^{\lambda_1} - 1) / \lambda_1, \quad & \text{if }
\lambda_1 \neq 0, \\
\log (z + \lambda_2), \quad & \text{otherwise.}
\end{cases}
Here, :math:`\lambda_1` and :math:`\lambda_2` are learnable parameters. Note that the domain
of the transformation is not restricted.
For numerical stability, instead of checking :math:`\lambda_1` is exactly zero, we use the condition
.. math::
|\lambda_1| < tol\_lambda\_1
for a pre-specified tolerance `tol_lambda_1`.
Inverse of the Box-Cox Transform is given by
.. math::
BoxCox^{-1}(y; \lambda_1, \lambda_2) = \begin{cases}
(y \lambda_1 + 1)^{(1/\lambda_1)} - \lambda_2, \quad & \text{if }
\lambda_1 \neq 0, \\
\exp (y) - \lambda_2, \quad & \text{otherwise.}
\end{cases}
**Notes on numerical stability:**
1. For the forward transformation, :math:`\lambda_2` must always be chosen such that
.. math::
z + \lambda_2 > 0.
To achieve this one needs to know a priori the lower bound on the observations.
This is set in `BoxCoxTransformOutput`, since :math:`\lambda_2` is learnable.
2. Similarly for the inverse transformation to work reliably, a sufficient condition is
.. math::
y \lambda_1 + 1 \geq 0,
where :math:`y` is the input to the inverse transformation.
This cannot always be guaranteed especially when :math:`y` is a sample from a transformed distribution.
Hence we always truncate :math:`y \lambda_1 + 1` at zero.
An example showing why this could happen in our case:
consider transforming observations from the unit interval (0, 1) with parameters
.. math::
\begin{align}
\lambda_1 = &\ 1.1, \\
\lambda_2 = &\ 0.
\end{align}
Then the range of the transformation is (-0.9090, 0.0).
If Gaussian is fit to the transformed observations and a sample is drawn from it,
then it is likely that the sample is outside this range, e.g., when the mean is close to -0.9.
The subsequent inverse transformation of the sample is not a real number anymore.
>>> y = -0.91
>>> lambda_1 = 1.1
>>> lambda_2 = 0.0
>>> (y * lambda_1 + 1) ** (1 / lambda_1) + lambda_2
(-0.0017979146510711471+0.0005279153735965289j)
Parameters
----------
lambda_1
lambda_2
tol_lambda_1
For numerical stability, treat `lambda_1` as zero if it is less than
`tol_lambda_1`
F
"""
arg_names = ["box_cox.lambda_1", "box_cox.lambda_2"]
def __init__(
self,
lambda_1: Tensor,
lambda_2: Tensor,
tol_lambda_1: float = 1e-2,
F=None,
) -> None:
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.tol_lambda_1 = tol_lambda_1
self.F = F if F else getF(lambda_1)
# Addressing mxnet madness
self._power = self.F.power if self.F == mx.nd else self.F.pow
@property
def args(self) -> List:
r"""
List: current values of the parameters
"""
return [self.lambda_1, self.lambda_2]
@property
def event_dim(self) -> int:
return 0
def f(self, z: Tensor) -> Tensor:
r"""
Forward transformation of observations `z`
Parameters
----------
z
observations
Returns
-------
Tensor
Transformed observations
"""
F = self.F
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
tol_lambda_1 = self.tol_lambda_1
_power = self._power
return F.where(
condition=(F.abs(lambda_1).__ge__(tol_lambda_1).broadcast_like(z)),
x=(_power(z + lambda_2, lambda_1) - 1.0) / lambda_1,
y=F.log(z + lambda_2),
name="Box_Cox_trans",
)
def f_inv(self, y: Tensor) -> Tensor:
r"""Inverse of the Box-Cox Transform
Parameters
----------
y
Transformed observations
Returns
-------
Tensor
Observations
"""
F = self.F
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
tol_lambda_1 = self.tol_lambda_1
_power = self._power
# For numerical stability we truncate :math:`y * \lambda_1 + 1.0` at zero.
base = F.relu(y * lambda_1 + 1.0)
return F.where(
condition=F.abs(lambda_1).__ge__(tol_lambda_1),
x=_power(base, 1.0 / lambda_1) - lambda_2,
y=F.exp(y) - lambda_2,
name="Box_Cox_inverse_trans",
)
def log_abs_det_jac(self, z: Tensor, y: Tensor = None) -> Tensor:
r"""
Logarithm of the absolute value of the Jacobian determinant corresponding to the Box-Cox Transform
is given by
.. math::
\log \frac{d}{dz} BoxCox(z; \lambda_1, \lambda_2) = \begin{cases}
\log (z + \lambda_2) (\lambda_1 - 1), \quad & \text{if } \lambda_1 \neq 0, \\
-\log (z + \lambda_2), \quad & \text{otherwise.}
\end{cases}
Note that the derivative of the transformation is always non-negative.
Parameters
----------
z
observations
y
not used
Returns
-------
Tensor
"""
F = self.F
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
tol_lambda_1 = self.tol_lambda_1
return F.where(
condition=F.abs(lambda_1).__ge__(tol_lambda_1),
x=F.log(z + lambda_2) * (lambda_1 - 1.0),
y=-F.log(z + lambda_2),
name="Box_Cox_trans_log_det_jac",
)
class BoxCoxTransformOutput(BijectionOutput):
bij_cls: type = BoxCoxTranform
args_dim: Dict[str, int] = dict(zip(BoxCoxTranform.arg_names, [1, 1]))
@validated()
def __init__(self, lb_obs: float = 0.0, fix_lambda_2: bool = True) -> None:
super().__init__()
self.lb_obs = lb_obs
self.fix_lambda_2 = fix_lambda_2
def domain_map(self, F, *args: Tensor) -> Tuple[Tensor, ...]:
lambda_1, lambda_2 = args
if self.fix_lambda_2:
lambda_2 = self.lb_obs * F.ones_like(lambda_2)
else:
# This makes sure that :math:`z + \lambda_2 > 0`, where :math:`z > lb_obs`
lambda_2 = softplus(F, lambda_2) - self.lb_obs * F.ones_like(
lambda_2
)
# we squeeze the output since event_shape is ()
return lambda_1.squeeze(axis=-1), lambda_2.squeeze(axis=-1)
@property
def event_shape(self) -> Tuple:
return ()
class InverseBoxCoxTransform(InverseBijection):
"""
Implements the inverse of Box-Cox transformation as a bijection.
"""
arg_names = ["box_cox.lambda_1", "box_cox.lambda_2"]
def __init__(
self,
lambda_1: Tensor,
lambda_2: Tensor,
tol_lambda_1: float = 1e-2,
F=None,
) -> None:
super().__init__(BoxCoxTranform(lambda_1, lambda_2, tol_lambda_1, F))
@property
def event_dim(self) -> int:
return 0
class InverseBoxCoxTransformOutput(BoxCoxTransformOutput):
bij_cls: type = InverseBoxCoxTransform
args_dim: Dict[str, int] = dict(
zip(InverseBoxCoxTransform.arg_names, [1, 1])
)
@property
def event_shape(self) -> Tuple:
return () | src/gluonts/distribution/box_cox_tranform.py |
# Standard library imports
from typing import Dict, List, Tuple
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.core.component import validated
from gluonts.distribution.bijection import Bijection, InverseBijection
from gluonts.distribution.bijection_output import BijectionOutput
from gluonts.model.common import Tensor
# Relative imports
from .distribution import getF, softplus
class BoxCoxTranform(Bijection):
r"""
Implements Box-Cox transformation of a uni-variate random variable.
The Box-Cox transformation of an observation :math:`z` is given by
.. math::
BoxCox(z; \lambda_1, \lambda_2) = \begin{cases}
((z + \lambda_2)^{\lambda_1} - 1) / \lambda_1, \quad & \text{if }
\lambda_1 \neq 0, \\
\log (z + \lambda_2), \quad & \text{otherwise.}
\end{cases}
Here, :math:`\lambda_1` and :math:`\lambda_2` are learnable parameters. Note that the domain
of the transformation is not restricted.
For numerical stability, instead of checking :math:`\lambda_1` is exactly zero, we use the condition
.. math::
|\lambda_1| < tol\_lambda\_1
for a pre-specified tolerance `tol_lambda_1`.
Inverse of the Box-Cox Transform is given by
.. math::
BoxCox^{-1}(y; \lambda_1, \lambda_2) = \begin{cases}
(y \lambda_1 + 1)^{(1/\lambda_1)} - \lambda_2, \quad & \text{if }
\lambda_1 \neq 0, \\
\exp (y) - \lambda_2, \quad & \text{otherwise.}
\end{cases}
**Notes on numerical stability:**
1. For the forward transformation, :math:`\lambda_2` must always be chosen such that
.. math::
z + \lambda_2 > 0.
To achieve this one needs to know a priori the lower bound on the observations.
This is set in `BoxCoxTransformOutput`, since :math:`\lambda_2` is learnable.
2. Similarly for the inverse transformation to work reliably, a sufficient condition is
.. math::
y \lambda_1 + 1 \geq 0,
where :math:`y` is the input to the inverse transformation.
This cannot always be guaranteed especially when :math:`y` is a sample from a transformed distribution.
Hence we always truncate :math:`y \lambda_1 + 1` at zero.
An example showing why this could happen in our case:
consider transforming observations from the unit interval (0, 1) with parameters
.. math::
\begin{align}
\lambda_1 = &\ 1.1, \\
\lambda_2 = &\ 0.
\end{align}
Then the range of the transformation is (-0.9090, 0.0).
If Gaussian is fit to the transformed observations and a sample is drawn from it,
then it is likely that the sample is outside this range, e.g., when the mean is close to -0.9.
The subsequent inverse transformation of the sample is not a real number anymore.
>>> y = -0.91
>>> lambda_1 = 1.1
>>> lambda_2 = 0.0
>>> (y * lambda_1 + 1) ** (1 / lambda_1) + lambda_2
(-0.0017979146510711471+0.0005279153735965289j)
Parameters
----------
lambda_1
lambda_2
tol_lambda_1
For numerical stability, treat `lambda_1` as zero if it is less than
`tol_lambda_1`
F
"""
arg_names = ["box_cox.lambda_1", "box_cox.lambda_2"]
def __init__(
self,
lambda_1: Tensor,
lambda_2: Tensor,
tol_lambda_1: float = 1e-2,
F=None,
) -> None:
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.tol_lambda_1 = tol_lambda_1
self.F = F if F else getF(lambda_1)
# Addressing mxnet madness
self._power = self.F.power if self.F == mx.nd else self.F.pow
@property
def args(self) -> List:
r"""
List: current values of the parameters
"""
return [self.lambda_1, self.lambda_2]
@property
def event_dim(self) -> int:
return 0
def f(self, z: Tensor) -> Tensor:
r"""
Forward transformation of observations `z`
Parameters
----------
z
observations
Returns
-------
Tensor
Transformed observations
"""
F = self.F
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
tol_lambda_1 = self.tol_lambda_1
_power = self._power
return F.where(
condition=(F.abs(lambda_1).__ge__(tol_lambda_1).broadcast_like(z)),
x=(_power(z + lambda_2, lambda_1) - 1.0) / lambda_1,
y=F.log(z + lambda_2),
name="Box_Cox_trans",
)
def f_inv(self, y: Tensor) -> Tensor:
r"""Inverse of the Box-Cox Transform
Parameters
----------
y
Transformed observations
Returns
-------
Tensor
Observations
"""
F = self.F
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
tol_lambda_1 = self.tol_lambda_1
_power = self._power
# For numerical stability we truncate :math:`y * \lambda_1 + 1.0` at zero.
base = F.relu(y * lambda_1 + 1.0)
return F.where(
condition=F.abs(lambda_1).__ge__(tol_lambda_1),
x=_power(base, 1.0 / lambda_1) - lambda_2,
y=F.exp(y) - lambda_2,
name="Box_Cox_inverse_trans",
)
def log_abs_det_jac(self, z: Tensor, y: Tensor = None) -> Tensor:
r"""
Logarithm of the absolute value of the Jacobian determinant corresponding to the Box-Cox Transform
is given by
.. math::
\log \frac{d}{dz} BoxCox(z; \lambda_1, \lambda_2) = \begin{cases}
\log (z + \lambda_2) (\lambda_1 - 1), \quad & \text{if } \lambda_1 \neq 0, \\
-\log (z + \lambda_2), \quad & \text{otherwise.}
\end{cases}
Note that the derivative of the transformation is always non-negative.
Parameters
----------
z
observations
y
not used
Returns
-------
Tensor
"""
F = self.F
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
tol_lambda_1 = self.tol_lambda_1
return F.where(
condition=F.abs(lambda_1).__ge__(tol_lambda_1),
x=F.log(z + lambda_2) * (lambda_1 - 1.0),
y=-F.log(z + lambda_2),
name="Box_Cox_trans_log_det_jac",
)
class BoxCoxTransformOutput(BijectionOutput):
bij_cls: type = BoxCoxTranform
args_dim: Dict[str, int] = dict(zip(BoxCoxTranform.arg_names, [1, 1]))
@validated()
def __init__(self, lb_obs: float = 0.0, fix_lambda_2: bool = True) -> None:
super().__init__()
self.lb_obs = lb_obs
self.fix_lambda_2 = fix_lambda_2
def domain_map(self, F, *args: Tensor) -> Tuple[Tensor, ...]:
lambda_1, lambda_2 = args
if self.fix_lambda_2:
lambda_2 = self.lb_obs * F.ones_like(lambda_2)
else:
# This makes sure that :math:`z + \lambda_2 > 0`, where :math:`z > lb_obs`
lambda_2 = softplus(F, lambda_2) - self.lb_obs * F.ones_like(
lambda_2
)
# we squeeze the output since event_shape is ()
return lambda_1.squeeze(axis=-1), lambda_2.squeeze(axis=-1)
@property
def event_shape(self) -> Tuple:
return ()
class InverseBoxCoxTransform(InverseBijection):
"""
Implements the inverse of Box-Cox transformation as a bijection.
"""
arg_names = ["box_cox.lambda_1", "box_cox.lambda_2"]
def __init__(
self,
lambda_1: Tensor,
lambda_2: Tensor,
tol_lambda_1: float = 1e-2,
F=None,
) -> None:
super().__init__(BoxCoxTranform(lambda_1, lambda_2, tol_lambda_1, F))
@property
def event_dim(self) -> int:
return 0
class InverseBoxCoxTransformOutput(BoxCoxTransformOutput):
bij_cls: type = InverseBoxCoxTransform
args_dim: Dict[str, int] = dict(
zip(InverseBoxCoxTransform.arg_names, [1, 1])
)
@property
def event_shape(self) -> Tuple:
return () | 0.969222 | 0.753047 |
import argparse
import json
from datetime import datetime, timedelta
import requests
from bs4 import BeautifulSoup
from models import Ad, Filter
DEFAULT_FILTER_PATH = 'filter.json'
MONTHS = {
'jan': 1,
'fev': 2,
'mar': 3,
'abr': 4,
'mai': 5,
'jun': 6,
'jul': 7,
'ago': 8,
'set': 9,
'out': 10,
'nov': 11,
'dez': 12,
}
def scrape_ad(ad_element):
"""Scrape a single ad from an ad html element."""
ad_link = ad_element.a
if ad_link is None:
return None
ad_link_url = ad_link.attrs['href']
photos_div, data_div = ad_link.div.contents[:2]
image_url = photos_div.find('img').attrs['src']
title = data_div.find('h2').get_text()
other_data = [span.get_text() for span in data_div.find_all('span')]
ad_obj = Ad(
title=title,
link=ad_link_url,
img=image_url,
info=other_data.pop(0),
value=other_data.pop(0)
)
# discard previous ad value
if other_data[0].startswith('R$'):
other_data.pop(0)
publication_day = other_data.pop(0).lower()
if publication_day == 'hoje':
publication_day = datetime.today()
elif publication_day == 'ontem':
publication_day = datetime.today() - timedelta(days=1)
else:
day, month_str = publication_day.split(' ')
publication_day = datetime(
year=datetime.today().year,
month=MONTHS[month_str],
day=int(day)
)
hour, minutes = other_data.pop(0).split(':')
publication_datetime = publication_day.replace(
hour=int(hour),
minute=int(minutes)
)
ad_obj.date = publication_datetime
ad_obj.location = other_data.pop(0)
if other_data:
ad_obj.vendor_type = other_data.pop(0)
return ad_obj
def scrape(url: str, from_date: datetime = None):
"""Scrape a OLX ad list page extracting data from each ad"""
response = requests.get(
url,
headers={
'User-Agent': (
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:96.0) '
'Gecko/20100101 Firefox/96.0'
)
}
)
if response.status_code != 200:
print(f'Erro ao tentar baixar a página ({response.status_code})')
else:
ad_filter = Filter.load_from_file(DEFAULT_FILTER_PATH)
if from_date:
ad_filter['from_date'] = from_date
soup = BeautifulSoup(response.text, 'html.parser')
ads = soup.find('ul', {'id': 'ad-list'})
scraped_ads = []
for ad_element in ads.contents:
ad_obj = scrape_ad(ad_element)
if ad_obj is not None and ad_filter.should_filter(ad_obj):
scraped_ads.append(ad_obj)
with open('result.json', 'w', encoding='utf-8') as output_fp:
json.dump(
[ad_obj.serialized() for ad_obj in scraped_ads],
output_fp,
indent=2
)
print(f'{len(scraped_ads)} ads saved!')
def date(date_string: str) -> datetime:
"""Validate and convert a date from string to datetime.
The date must be in the format MM/DD/YYYY.
"""
return datetime.strptime(date_string, r'%m/%d/%Y')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Scrape OLX ads.')
parser.add_argument(
'url',
help=('Url of the page to scrape. Should be the search page with a '
'list of ads.')
)
parser.add_argument(
'--from-date',
dest='from_date',
type=date,
default=None,
help=('The date to start the search from. Will scrape only ads newer '
'than this date. The format must be MM/DD/YYYY')
)
args = parser.parse_args()
scrape(url=args.url, from_date=args.from_date) | scrape.py | import argparse
import json
from datetime import datetime, timedelta
import requests
from bs4 import BeautifulSoup
from models import Ad, Filter
DEFAULT_FILTER_PATH = 'filter.json'
MONTHS = {
'jan': 1,
'fev': 2,
'mar': 3,
'abr': 4,
'mai': 5,
'jun': 6,
'jul': 7,
'ago': 8,
'set': 9,
'out': 10,
'nov': 11,
'dez': 12,
}
def scrape_ad(ad_element):
"""Scrape a single ad from an ad html element."""
ad_link = ad_element.a
if ad_link is None:
return None
ad_link_url = ad_link.attrs['href']
photos_div, data_div = ad_link.div.contents[:2]
image_url = photos_div.find('img').attrs['src']
title = data_div.find('h2').get_text()
other_data = [span.get_text() for span in data_div.find_all('span')]
ad_obj = Ad(
title=title,
link=ad_link_url,
img=image_url,
info=other_data.pop(0),
value=other_data.pop(0)
)
# discard previous ad value
if other_data[0].startswith('R$'):
other_data.pop(0)
publication_day = other_data.pop(0).lower()
if publication_day == 'hoje':
publication_day = datetime.today()
elif publication_day == 'ontem':
publication_day = datetime.today() - timedelta(days=1)
else:
day, month_str = publication_day.split(' ')
publication_day = datetime(
year=datetime.today().year,
month=MONTHS[month_str],
day=int(day)
)
hour, minutes = other_data.pop(0).split(':')
publication_datetime = publication_day.replace(
hour=int(hour),
minute=int(minutes)
)
ad_obj.date = publication_datetime
ad_obj.location = other_data.pop(0)
if other_data:
ad_obj.vendor_type = other_data.pop(0)
return ad_obj
def scrape(url: str, from_date: datetime = None):
"""Scrape a OLX ad list page extracting data from each ad"""
response = requests.get(
url,
headers={
'User-Agent': (
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:96.0) '
'Gecko/20100101 Firefox/96.0'
)
}
)
if response.status_code != 200:
print(f'Erro ao tentar baixar a página ({response.status_code})')
else:
ad_filter = Filter.load_from_file(DEFAULT_FILTER_PATH)
if from_date:
ad_filter['from_date'] = from_date
soup = BeautifulSoup(response.text, 'html.parser')
ads = soup.find('ul', {'id': 'ad-list'})
scraped_ads = []
for ad_element in ads.contents:
ad_obj = scrape_ad(ad_element)
if ad_obj is not None and ad_filter.should_filter(ad_obj):
scraped_ads.append(ad_obj)
with open('result.json', 'w', encoding='utf-8') as output_fp:
json.dump(
[ad_obj.serialized() for ad_obj in scraped_ads],
output_fp,
indent=2
)
print(f'{len(scraped_ads)} ads saved!')
def date(date_string: str) -> datetime:
"""Validate and convert a date from string to datetime.
The date must be in the format MM/DD/YYYY.
"""
return datetime.strptime(date_string, r'%m/%d/%Y')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Scrape OLX ads.')
parser.add_argument(
'url',
help=('Url of the page to scrape. Should be the search page with a '
'list of ads.')
)
parser.add_argument(
'--from-date',
dest='from_date',
type=date,
default=None,
help=('The date to start the search from. Will scrape only ads newer '
'than this date. The format must be MM/DD/YYYY')
)
args = parser.parse_args()
scrape(url=args.url, from_date=args.from_date) | 0.519765 | 0.153644 |
"""Helper tools for use in tests."""
from __future__ import division
import base64
import copy
import itertools
import os
from collections import defaultdict
from decimal import Decimal
import boto3
import pytest
from boto3.dynamodb.types import Binary
from botocore.exceptions import NoRegionError
from mock import patch
from moto import mock_dynamodb2
from dynamodb_encryption_sdk.delegated_keys.jce import JceNameLocalDelegatedKey
from dynamodb_encryption_sdk.encrypted.client import EncryptedClient
from dynamodb_encryption_sdk.encrypted.item import decrypt_python_item, encrypt_python_item
from dynamodb_encryption_sdk.encrypted.resource import EncryptedResource
from dynamodb_encryption_sdk.encrypted.table import EncryptedTable
from dynamodb_encryption_sdk.identifiers import CryptoAction
from dynamodb_encryption_sdk.internal.identifiers import ReservedAttributes
from dynamodb_encryption_sdk.material_providers.most_recent import MostRecentProvider
from dynamodb_encryption_sdk.material_providers.static import StaticCryptographicMaterialsProvider
from dynamodb_encryption_sdk.material_providers.store.meta import MetaStore
from dynamodb_encryption_sdk.material_providers.wrapped import WrappedCryptographicMaterialsProvider
from dynamodb_encryption_sdk.materials.raw import RawDecryptionMaterials, RawEncryptionMaterials
from dynamodb_encryption_sdk.structures import AttributeActions
from dynamodb_encryption_sdk.transform import ddb_to_dict, dict_to_ddb
RUNNING_IN_TRAVIS = "TRAVIS" in os.environ
_DELEGATED_KEY_CACHE = defaultdict(lambda: defaultdict(dict))
TEST_TABLE_NAME = "my_table"
TEST_REGION_NAME = "us-west-2"
TEST_INDEX = {
"partition_attribute": {"type": "S", "value": "test_value"},
"sort_attribute": {"type": "N", "value": Decimal("99.233")},
}
SECONDARY_INDEX = {
"secondary_index_1": {"type": "B", "value": Binary(b"\x00\x01\x02")},
"secondary_index_2": {"type": "S", "value": "another_value"},
}
TEST_KEY = {name: value["value"] for name, value in TEST_INDEX.items()}
TEST_BATCH_INDEXES = [
{
"partition_attribute": {"type": "S", "value": "test_value"},
"sort_attribute": {"type": "N", "value": Decimal("99.233")},
},
{
"partition_attribute": {"type": "S", "value": "test_value"},
"sort_attribute": {"type": "N", "value": Decimal("92986745")},
},
{
"partition_attribute": {"type": "S", "value": "test_value"},
"sort_attribute": {"type": "N", "value": Decimal("2231.0001")},
},
{
"partition_attribute": {"type": "S", "value": "another_test_value"},
"sort_attribute": {"type": "N", "value": Decimal("732342")},
},
]
TEST_BATCH_KEYS = [{name: value["value"] for name, value in key.items()} for key in TEST_BATCH_INDEXES]
@pytest.fixture
def example_table():
mock_dynamodb2().start(reset=False)
ddb = boto3.client("dynamodb", region_name=TEST_REGION_NAME)
ddb.create_table(
TableName=TEST_TABLE_NAME,
KeySchema=[
{"AttributeName": "partition_attribute", "KeyType": "HASH"},
{"AttributeName": "sort_attribute", "KeyType": "RANGE"},
],
AttributeDefinitions=[
{"AttributeName": name, "AttributeType": value["type"]} for name, value in TEST_INDEX.items()
],
ProvisionedThroughput={"ReadCapacityUnits": 100, "WriteCapacityUnits": 100},
)
yield
ddb.delete_table(TableName=TEST_TABLE_NAME)
mock_dynamodb2().stop()
@pytest.fixture
def table_with_local_secondary_indexes():
mock_dynamodb2().start(reset=False)
ddb = boto3.client("dynamodb", region_name=TEST_REGION_NAME)
ddb.create_table(
TableName=TEST_TABLE_NAME,
KeySchema=[
{"AttributeName": "partition_attribute", "KeyType": "HASH"},
{"AttributeName": "sort_attribute", "KeyType": "RANGE"},
],
LocalSecondaryIndexes=[
{
"IndexName": "lsi-1",
"KeySchema": [{"AttributeName": "secondary_index_1", "KeyType": "HASH"}],
"Projection": {"ProjectionType": "ALL"},
},
{
"IndexName": "lsi-2",
"KeySchema": [{"AttributeName": "secondary_index_2", "KeyType": "HASH"}],
"Projection": {"ProjectionType": "ALL"},
},
],
AttributeDefinitions=[
{"AttributeName": name, "AttributeType": value["type"]}
for name, value in list(TEST_INDEX.items()) + list(SECONDARY_INDEX.items())
],
ProvisionedThroughput={"ReadCapacityUnits": 100, "WriteCapacityUnits": 100},
)
yield
ddb.delete_table(TableName=TEST_TABLE_NAME)
mock_dynamodb2().stop()
@pytest.fixture
def table_with_global_secondary_indexes():
mock_dynamodb2().start(reset=False)
ddb = boto3.client("dynamodb", region_name=TEST_REGION_NAME)
ddb.create_table(
TableName=TEST_TABLE_NAME,
KeySchema=[
{"AttributeName": "partition_attribute", "KeyType": "HASH"},
{"AttributeName": "sort_attribute", "KeyType": "RANGE"},
],
GlobalSecondaryIndexes=[
{
"IndexName": "gsi-1",
"KeySchema": [{"AttributeName": "secondary_index_1", "KeyType": "HASH"}],
"Projection": {"ProjectionType": "ALL"},
"ProvisionedThroughput": {"ReadCapacityUnits": 100, "WriteCapacityUnits": 100},
},
{
"IndexName": "gsi-2",
"KeySchema": [{"AttributeName": "secondary_index_2", "KeyType": "HASH"}],
"Projection": {"ProjectionType": "ALL"},
"ProvisionedThroughput": {"ReadCapacityUnits": 100, "WriteCapacityUnits": 100},
},
],
AttributeDefinitions=[
{"AttributeName": name, "AttributeType": value["type"]}
for name, value in list(TEST_INDEX.items()) + list(SECONDARY_INDEX.items())
],
ProvisionedThroughput={"ReadCapacityUnits": 100, "WriteCapacityUnits": 100},
)
yield
ddb.delete_table(TableName=TEST_TABLE_NAME)
mock_dynamodb2().stop()
def _get_from_cache(dk_class, algorithm, key_length):
"""Don't generate new keys every time. All we care about is that they are valid keys, not that they are unique."""
try:
return _DELEGATED_KEY_CACHE[dk_class][algorithm][key_length]
except KeyError:
key = dk_class.generate(algorithm, key_length)
_DELEGATED_KEY_CACHE[dk_class][algorithm][key_length] = key
return key
def build_static_jce_cmp(encryption_algorithm, encryption_key_length, signing_algorithm, signing_key_length):
"""Build a StaticCryptographicMaterialsProvider using ephemeral JceNameLocalDelegatedKeys as specified."""
encryption_key = _get_from_cache(JceNameLocalDelegatedKey, encryption_algorithm, encryption_key_length)
authentication_key = _get_from_cache(JceNameLocalDelegatedKey, signing_algorithm, signing_key_length)
encryption_materials = RawEncryptionMaterials(signing_key=authentication_key, encryption_key=encryption_key)
decryption_materials = RawDecryptionMaterials(verification_key=authentication_key, decryption_key=encryption_key)
return StaticCryptographicMaterialsProvider(
encryption_materials=encryption_materials, decryption_materials=decryption_materials
)
def _build_wrapped_jce_cmp(wrapping_algorithm, wrapping_key_length, signing_algorithm, signing_key_length):
"""Build a WrappedCryptographicMaterialsProvider using ephemeral JceNameLocalDelegatedKeys as specified."""
wrapping_key = _get_from_cache(JceNameLocalDelegatedKey, wrapping_algorithm, wrapping_key_length)
signing_key = _get_from_cache(JceNameLocalDelegatedKey, signing_algorithm, signing_key_length)
return WrappedCryptographicMaterialsProvider(
wrapping_key=wrapping_key, unwrapping_key=wrapping_key, signing_key=signing_key
)
def _all_encryption():
"""All encryption configurations to test in slow tests."""
return itertools.chain(itertools.product(("AES",), (128, 256)), itertools.product(("RSA",), (1024, 2048, 4096)))
def _all_authentication():
"""All authentication configurations to test in slow tests."""
return itertools.chain(
itertools.product(("HmacSHA224", "HmacSHA256", "HmacSHA384", "HmacSHA512"), (128, 256)),
itertools.product(("SHA224withRSA", "SHA256withRSA", "SHA384withRSA", "SHA512withRSA"), (1024, 2048, 4096)),
)
def _all_algorithm_pairs():
"""All algorithm pairs (encryption + authentication) to test in slow tests."""
for encryption_pair, signing_pair in itertools.product(_all_encryption(), _all_authentication()):
yield encryption_pair + signing_pair
def _some_algorithm_pairs():
"""Cherry-picked set of algorithm pairs (encryption + authentication) to test in fast tests."""
return (("AES", 256, "HmacSHA256", 256), ("AES", 256, "SHA256withRSA", 4096), ("RSA", 4096, "SHA256withRSA", 4096))
_cmp_builders = {"static": build_static_jce_cmp, "wrapped": _build_wrapped_jce_cmp}
def _all_possible_cmps(algorithm_generator):
"""Generate all possible cryptographic materials providers based on the supplied generator."""
# The AES combinations do the same thing, but this makes sure that the AESWrap name works as expected.
yield _build_wrapped_jce_cmp("AESWrap", 256, "HmacSHA256", 256)
for builder_info, args in itertools.product(_cmp_builders.items(), algorithm_generator()):
builder_type, builder_func = builder_info
encryption_algorithm, encryption_key_length, signing_algorithm, signing_key_length = args
if builder_type == "static" and encryption_algorithm != "AES":
# Only AES keys are allowed to be used with static materials
continue
id_string = "{enc_algorithm}/{enc_key_length} {builder_type} {sig_algorithm}/{sig_key_length}".format(
enc_algorithm=encryption_algorithm,
enc_key_length=encryption_key_length,
builder_type=builder_type,
sig_algorithm=signing_algorithm,
sig_key_length=signing_key_length,
)
yield pytest.param(
builder_func(encryption_algorithm, encryption_key_length, signing_algorithm, signing_key_length),
id=id_string,
)
def set_parametrized_cmp(metafunc):
"""Set paramatrized values for cryptographic materials providers."""
for name, algorithm_generator in (("all_the_cmps", _all_algorithm_pairs), ("some_cmps", _some_algorithm_pairs)):
if name in metafunc.fixturenames:
metafunc.parametrize(name, _all_possible_cmps(algorithm_generator))
_ACTIONS = {
"hypothesis_actions": (
pytest.param(AttributeActions(default_action=CryptoAction.ENCRYPT_AND_SIGN), id="encrypt all"),
pytest.param(AttributeActions(default_action=CryptoAction.SIGN_ONLY), id="sign only all"),
pytest.param(AttributeActions(default_action=CryptoAction.DO_NOTHING), id="do nothing"),
)
}
_ACTIONS["parametrized_actions"] = _ACTIONS["hypothesis_actions"] + (
pytest.param(
AttributeActions(
default_action=CryptoAction.ENCRYPT_AND_SIGN,
attribute_actions={
"number_set": CryptoAction.SIGN_ONLY,
"string_set": CryptoAction.SIGN_ONLY,
"binary_set": CryptoAction.SIGN_ONLY,
},
),
id="sign sets, encrypt everything else",
),
pytest.param(
AttributeActions(
default_action=CryptoAction.ENCRYPT_AND_SIGN,
attribute_actions={
"number_set": CryptoAction.DO_NOTHING,
"string_set": CryptoAction.DO_NOTHING,
"binary_set": CryptoAction.DO_NOTHING,
},
),
id="ignore sets, encrypt everything else",
),
pytest.param(
AttributeActions(
default_action=CryptoAction.DO_NOTHING, attribute_actions={"map": CryptoAction.ENCRYPT_AND_SIGN}
),
id="encrypt map, ignore everything else",
),
pytest.param(
AttributeActions(
default_action=CryptoAction.SIGN_ONLY,
attribute_actions={
"number_set": CryptoAction.DO_NOTHING,
"string_set": CryptoAction.DO_NOTHING,
"binary_set": CryptoAction.DO_NOTHING,
"map": CryptoAction.ENCRYPT_AND_SIGN,
},
),
id="ignore sets, encrypt map, sign everything else",
),
)
def set_parametrized_actions(metafunc):
"""Set parametrized values for attribute actions."""
for name, actions in _ACTIONS.items():
if name in metafunc.fixturenames:
metafunc.parametrize(name, actions)
def set_parametrized_item(metafunc):
"""Set parametrized values for items to cycle."""
if "parametrized_item" in metafunc.fixturenames:
metafunc.parametrize("parametrized_item", (pytest.param(diverse_item(), id="diverse item"),))
def diverse_item():
base_item = {
"int": 5,
"decimal": Decimal("123.456"),
"string": "this is a string",
"binary": b"this is a bytestring! \x01",
"number_set": set([5, 4, 3]),
"string_set": set(["abc", "def", "geh"]),
"binary_set": set([b"\x00\x00\x00", b"\x00\x01\x00", b"\x00\x00\x02"]),
}
base_item["list"] = [copy.copy(i) for i in base_item.values()]
base_item["map"] = copy.deepcopy(base_item)
return copy.deepcopy(base_item)
_reserved_attributes = set([attr.value for attr in ReservedAttributes])
def return_requestitems_as_unprocessed(*args, **kwargs):
return {"UnprocessedItems": kwargs["RequestItems"]}
def check_encrypted_item(plaintext_item, ciphertext_item, attribute_actions):
# Verify that all expected attributes are present
ciphertext_attributes = set(ciphertext_item.keys())
plaintext_attributes = set(plaintext_item.keys())
if attribute_actions.take_no_actions:
assert ciphertext_attributes == plaintext_attributes
else:
assert ciphertext_attributes == plaintext_attributes.union(_reserved_attributes)
for name, value in ciphertext_item.items():
# Skip the attributes we add
if name in _reserved_attributes:
continue
# If the attribute should have been encrypted, verify that it is Binary and different from the original
if attribute_actions.action(name) is CryptoAction.ENCRYPT_AND_SIGN:
assert isinstance(value, Binary)
assert value != plaintext_item[name]
# Otherwise, verify that it is the same as the original
else:
assert value == plaintext_item[name]
def _matching_key(actual_item, expected):
expected_item = [
i
for i in expected
if i["partition_attribute"] == actual_item["partition_attribute"]
and i["sort_attribute"] == actual_item["sort_attribute"]
]
assert len(expected_item) == 1
return expected_item[0]
def _nop_transformer(item):
return item
def assert_items_exist_in_list(source, expected, transformer):
for actual_item in source:
expected_item = _matching_key(actual_item, expected)
assert transformer(actual_item) == transformer(expected_item)
def assert_equal_lists_of_items(actual, expected, transformer=_nop_transformer):
assert len(actual) == len(expected)
assert_items_exist_in_list(actual, expected, transformer)
def assert_list_of_items_contains(full, subset, transformer=_nop_transformer):
assert len(full) >= len(subset)
assert_items_exist_in_list(subset, full, transformer)
def check_many_encrypted_items(actual, expected, attribute_actions, transformer=_nop_transformer):
assert len(actual) == len(expected)
for actual_item in actual:
expected_item = _matching_key(actual_item, expected)
check_encrypted_item(
plaintext_item=transformer(expected_item),
ciphertext_item=transformer(actual_item),
attribute_actions=attribute_actions,
)
def _generate_items(initial_item, write_transformer):
items = []
for key in TEST_BATCH_KEYS:
_item = initial_item.copy()
_item.update(key)
items.append(write_transformer(_item))
return items
def _cleanup_items(encrypted, write_transformer, table_name=TEST_TABLE_NAME):
ddb_keys = [write_transformer(key) for key in TEST_BATCH_KEYS]
_delete_result = encrypted.batch_write_item( # noqa
RequestItems={table_name: [{"DeleteRequest": {"Key": _key}} for _key in ddb_keys]}
)
def cycle_batch_item_check(
raw,
encrypted,
initial_actions,
initial_item,
write_transformer=_nop_transformer,
read_transformer=_nop_transformer,
table_name=TEST_TABLE_NAME,
delete_items=True,
):
"""Check that cycling (plaintext->encrypted->decrypted) item batch has the expected results."""
check_attribute_actions = initial_actions.copy()
check_attribute_actions.set_index_keys(*list(TEST_KEY.keys()))
items = _generate_items(initial_item, write_transformer)
_put_result = encrypted.batch_write_item( # noqa
RequestItems={table_name: [{"PutRequest": {"Item": _item}} for _item in items]}
)
ddb_keys = [write_transformer(key) for key in TEST_BATCH_KEYS]
encrypted_result = raw.batch_get_item(RequestItems={table_name: {"Keys": ddb_keys}})
check_many_encrypted_items(
actual=encrypted_result["Responses"][table_name],
expected=items,
attribute_actions=check_attribute_actions,
transformer=read_transformer,
)
decrypted_result = encrypted.batch_get_item(RequestItems={table_name: {"Keys": ddb_keys}})
assert_equal_lists_of_items(
actual=decrypted_result["Responses"][table_name], expected=items, transformer=read_transformer
)
if delete_items:
_cleanup_items(encrypted, write_transformer, table_name)
del check_attribute_actions
del items
def cycle_batch_writer_check(raw_table, encrypted_table, initial_actions, initial_item):
"""Check that cycling (plaintext->encrypted->decrypted) items with the Table batch writer
has the expected results.
"""
check_attribute_actions = initial_actions.copy()
check_attribute_actions.set_index_keys(*list(TEST_KEY.keys()))
items = _generate_items(initial_item, _nop_transformer)
with encrypted_table.batch_writer() as writer:
for item in items:
writer.put_item(item)
ddb_keys = [key for key in TEST_BATCH_KEYS]
encrypted_items = [raw_table.get_item(Key=key, ConsistentRead=True)["Item"] for key in ddb_keys]
check_many_encrypted_items(
actual=encrypted_items, expected=items, attribute_actions=check_attribute_actions, transformer=_nop_transformer
)
decrypted_result = [encrypted_table.get_item(Key=key, ConsistentRead=True)["Item"] for key in ddb_keys]
assert_equal_lists_of_items(actual=decrypted_result, expected=items, transformer=_nop_transformer)
with encrypted_table.batch_writer() as writer:
for key in ddb_keys:
writer.delete_item(key)
del check_attribute_actions
del items
def batch_write_item_unprocessed_check(
encrypted, initial_item, write_transformer=_nop_transformer, table_name=TEST_TABLE_NAME
):
"""Check that unprocessed items in a batch result are unencrypted."""
items = _generate_items(initial_item, write_transformer)
request_items = {table_name: [{"PutRequest": {"Item": _item}} for _item in items]}
_put_result = encrypted.batch_write_item(RequestItems=request_items)
# we expect results to include Unprocessed items, or the test case is invalid!
unprocessed_items = _put_result["UnprocessedItems"]
assert unprocessed_items != {}
unprocessed = [operation["PutRequest"]["Item"] for operation in unprocessed_items[TEST_TABLE_NAME]]
assert_list_of_items_contains(items, unprocessed, transformer=_nop_transformer)
del items
def cycle_item_check(plaintext_item, crypto_config):
"""Check that cycling (plaintext->encrypted->decrypted) an item has the expected results."""
ciphertext_item = encrypt_python_item(plaintext_item, crypto_config)
check_encrypted_item(plaintext_item, ciphertext_item, crypto_config.attribute_actions)
cycled_item = decrypt_python_item(ciphertext_item, crypto_config)
assert cycled_item == plaintext_item
del ciphertext_item
del cycled_item
def table_cycle_check(materials_provider, initial_actions, initial_item, table_name, region_name=None):
check_attribute_actions = initial_actions.copy()
check_attribute_actions.set_index_keys(*list(TEST_KEY.keys()))
item = initial_item.copy()
item.update(TEST_KEY)
kwargs = {}
if region_name is not None:
kwargs["region_name"] = region_name
table = boto3.resource("dynamodb", **kwargs).Table(table_name)
e_table = EncryptedTable(table=table, materials_provider=materials_provider, attribute_actions=initial_actions)
_put_result = e_table.put_item(Item=item) # noqa
encrypted_result = table.get_item(Key=TEST_KEY, ConsistentRead=True)
check_encrypted_item(item, encrypted_result["Item"], check_attribute_actions)
decrypted_result = e_table.get_item(Key=TEST_KEY, ConsistentRead=True)
assert decrypted_result["Item"] == item
e_table.delete_item(Key=TEST_KEY)
del item
del check_attribute_actions
def table_cycle_batch_writer_check(materials_provider, initial_actions, initial_item, table_name, region_name=None):
kwargs = {}
if region_name is not None:
kwargs["region_name"] = region_name
table = boto3.resource("dynamodb", **kwargs).Table(table_name)
e_table = EncryptedTable(table=table, materials_provider=materials_provider, attribute_actions=initial_actions)
cycle_batch_writer_check(table, e_table, initial_actions, initial_item)
def table_batch_writer_unprocessed_items_check(
materials_provider, initial_actions, initial_item, table_name, region_name=None
):
kwargs = {}
if region_name is not None:
kwargs["region_name"] = region_name
resource = boto3.resource("dynamodb", **kwargs)
table = resource.Table(table_name)
items = _generate_items(initial_item, _nop_transformer)
request_items = {table_name: [{"PutRequest": {"Item": _item}} for _item in items]}
with patch.object(table.meta.client, "batch_write_item") as batch_write_mock:
# Check that unprocessed items returned to a BatchWriter are successfully retried
batch_write_mock.side_effect = [{"UnprocessedItems": request_items}, {"UnprocessedItems": {}}]
e_table = EncryptedTable(table=table, materials_provider=materials_provider, attribute_actions=initial_actions)
with e_table.batch_writer() as writer:
for item in items:
writer.put_item(item)
del items
def resource_cycle_batch_items_check(materials_provider, initial_actions, initial_item, table_name, region_name=None):
kwargs = {}
if region_name is not None:
kwargs["region_name"] = region_name
resource = boto3.resource("dynamodb", **kwargs)
e_resource = EncryptedResource(
resource=resource, materials_provider=materials_provider, attribute_actions=initial_actions
)
cycle_batch_item_check(
raw=resource,
encrypted=e_resource,
initial_actions=initial_actions,
initial_item=initial_item,
table_name=table_name,
)
raw_scan_result = resource.Table(table_name).scan(ConsistentRead=True)
e_scan_result = e_resource.Table(table_name).scan(ConsistentRead=True)
assert not raw_scan_result["Items"]
assert not e_scan_result["Items"]
def resource_batch_items_unprocessed_check(
materials_provider, initial_actions, initial_item, table_name, region_name=None
):
kwargs = {}
if region_name is not None:
kwargs["region_name"] = region_name
resource = boto3.resource("dynamodb", **kwargs)
with patch.object(resource, "batch_write_item", return_requestitems_as_unprocessed):
e_resource = EncryptedResource(
resource=resource, materials_provider=materials_provider, attribute_actions=initial_actions
)
batch_write_item_unprocessed_check(
encrypted=e_resource, initial_item=initial_item, write_transformer=dict_to_ddb, table_name=table_name
)
def client_cycle_single_item_check(materials_provider, initial_actions, initial_item, table_name, region_name=None):
check_attribute_actions = initial_actions.copy()
check_attribute_actions.set_index_keys(*list(TEST_KEY.keys()))
item = initial_item.copy()
item.update(TEST_KEY)
ddb_item = dict_to_ddb(item)
ddb_key = dict_to_ddb(TEST_KEY)
kwargs = {}
if region_name is not None:
kwargs["region_name"] = region_name
client = boto3.client("dynamodb", **kwargs)
e_client = EncryptedClient(client=client, materials_provider=materials_provider, attribute_actions=initial_actions)
_put_result = e_client.put_item(TableName=table_name, Item=ddb_item) # noqa
encrypted_result = client.get_item(TableName=table_name, Key=ddb_key, ConsistentRead=True)
check_encrypted_item(item, ddb_to_dict(encrypted_result["Item"]), check_attribute_actions)
decrypted_result = e_client.get_item(TableName=table_name, Key=ddb_key, ConsistentRead=True)
assert ddb_to_dict(decrypted_result["Item"]) == item
e_client.delete_item(TableName=table_name, Key=ddb_key)
del item
del check_attribute_actions
def client_cycle_batch_items_check(materials_provider, initial_actions, initial_item, table_name, region_name=None):
kwargs = {}
if region_name is not None:
kwargs["region_name"] = region_name
client = boto3.client("dynamodb", **kwargs)
e_client = EncryptedClient(client=client, materials_provider=materials_provider, attribute_actions=initial_actions)
cycle_batch_item_check(
raw=client,
encrypted=e_client,
initial_actions=initial_actions,
initial_item=initial_item,
write_transformer=dict_to_ddb,
read_transformer=ddb_to_dict,
table_name=table_name,
)
raw_scan_result = client.scan(TableName=table_name, ConsistentRead=True)
e_scan_result = e_client.scan(TableName=table_name, ConsistentRead=True)
assert not raw_scan_result["Items"]
assert not e_scan_result["Items"]
def client_batch_items_unprocessed_check(
materials_provider, initial_actions, initial_item, table_name, region_name=None
):
kwargs = {}
if region_name is not None:
kwargs["region_name"] = region_name
client = boto3.client("dynamodb", **kwargs)
with patch.object(client, "batch_write_item", return_requestitems_as_unprocessed):
e_client = EncryptedClient(
client=client, materials_provider=materials_provider, attribute_actions=initial_actions
)
batch_write_item_unprocessed_check(
encrypted=e_client, initial_item=initial_item, write_transformer=dict_to_ddb, table_name=table_name
)
def client_cycle_batch_items_check_paginators(
materials_provider, initial_actions, initial_item, table_name, region_name=None
):
kwargs = {}
if region_name is not None:
kwargs["region_name"] = region_name
client = boto3.client("dynamodb", **kwargs)
e_client = EncryptedClient(client=client, materials_provider=materials_provider, attribute_actions=initial_actions)
cycle_batch_item_check(
raw=client,
encrypted=e_client,
initial_actions=initial_actions,
initial_item=initial_item,
write_transformer=dict_to_ddb,
read_transformer=ddb_to_dict,
table_name=table_name,
delete_items=False,
)
encrypted_items = []
raw_paginator = client.get_paginator("scan")
for page in raw_paginator.paginate(TableName=table_name, ConsistentRead=True):
encrypted_items.extend(page["Items"])
decrypted_items = []
encrypted_paginator = e_client.get_paginator("scan")
for page in encrypted_paginator.paginate(TableName=table_name, ConsistentRead=True):
decrypted_items.extend(page["Items"])
print(encrypted_items)
print(decrypted_items)
check_attribute_actions = initial_actions.copy()
check_attribute_actions.set_index_keys(*list(TEST_KEY.keys()))
check_many_encrypted_items(
actual=encrypted_items,
expected=decrypted_items,
attribute_actions=check_attribute_actions,
transformer=ddb_to_dict,
)
_cleanup_items(encrypted=e_client, write_transformer=dict_to_ddb, table_name=table_name)
raw_scan_result = client.scan(TableName=table_name, ConsistentRead=True)
e_scan_result = e_client.scan(TableName=table_name, ConsistentRead=True)
assert not raw_scan_result["Items"]
assert not e_scan_result["Items"]
def build_metastore():
client = boto3.client("dynamodb", region_name=TEST_REGION_NAME)
table_name = base64.urlsafe_b64encode(os.urandom(32)).decode("utf-8").replace("=", ".")
MetaStore.create_table(client, table_name, 1, 1)
waiter = client.get_waiter("table_exists")
waiter.wait(TableName=table_name)
table = boto3.resource("dynamodb", region_name=TEST_REGION_NAME).Table(table_name)
return MetaStore(table, build_static_jce_cmp("AES", 256, "HmacSHA256", 256)), table_name
def delete_metastore(table_name):
client = boto3.client("dynamodb", region_name=TEST_REGION_NAME)
client.delete_table(TableName=table_name)
# It sometimes takes a long time to delete a table.
# If hanging, asynchronously deleting tables becomes an issue,
# come back to this.
# Otherwise, let's just let them take care of themselves.
# waiter = client.get_waiter("table_not_exists")
# waiter.wait(TableName=table_name)
@pytest.fixture
def mock_metastore():
with mock_dynamodb2():
metastore, table_name = build_metastore()
yield metastore
delete_metastore(table_name)
def _count_entries(records, *messages):
count = 0
for record in records:
if all((message in record.getMessage() for message in messages)):
count += 1
return count
def _count_puts(records, table_name):
return _count_entries(records, '"TableName": "{}"'.format(table_name), "OperationModel(name=PutItem)")
def _count_gets(records, table_name):
return _count_entries(records, '"TableName": "{}"'.format(table_name), "OperationModel(name=GetItem)")
def check_metastore_cache_use_encrypt(metastore, table_name, log_capture):
try:
table = boto3.resource("dynamodb").Table(table_name)
except NoRegionError:
table = boto3.resource("dynamodb", region_name=TEST_REGION_NAME).Table(table_name)
most_recent_provider = MostRecentProvider(provider_store=metastore, material_name="test", version_ttl=600.0)
e_table = EncryptedTable(table=table, materials_provider=most_recent_provider)
item = diverse_item()
item.update(TEST_KEY)
e_table.put_item(Item=item)
e_table.put_item(Item=item)
e_table.put_item(Item=item)
e_table.put_item(Item=item)
try:
primary_puts = _count_puts(log_capture.records, e_table.name)
metastore_puts = _count_puts(log_capture.records, metastore._table.name)
assert primary_puts == 4
assert metastore_puts == 1
e_table.get_item(Key=TEST_KEY)
e_table.get_item(Key=TEST_KEY)
e_table.get_item(Key=TEST_KEY)
primary_gets = _count_gets(log_capture.records, e_table.name)
metastore_gets = _count_gets(log_capture.records, metastore._table.name)
metastore_puts = _count_puts(log_capture.records, metastore._table.name)
assert primary_gets == 3
assert metastore_gets == 0
assert metastore_puts == 1
most_recent_provider.refresh()
e_table.get_item(Key=TEST_KEY)
e_table.get_item(Key=TEST_KEY)
e_table.get_item(Key=TEST_KEY)
primary_gets = _count_gets(log_capture.records, e_table.name)
metastore_gets = _count_gets(log_capture.records, metastore._table.name)
assert primary_gets == 6
assert metastore_gets == 1
finally:
e_table.delete_item(Key=TEST_KEY) | test/functional/functional_test_utils.py | """Helper tools for use in tests."""
from __future__ import division
import base64
import copy
import itertools
import os
from collections import defaultdict
from decimal import Decimal
import boto3
import pytest
from boto3.dynamodb.types import Binary
from botocore.exceptions import NoRegionError
from mock import patch
from moto import mock_dynamodb2
from dynamodb_encryption_sdk.delegated_keys.jce import JceNameLocalDelegatedKey
from dynamodb_encryption_sdk.encrypted.client import EncryptedClient
from dynamodb_encryption_sdk.encrypted.item import decrypt_python_item, encrypt_python_item
from dynamodb_encryption_sdk.encrypted.resource import EncryptedResource
from dynamodb_encryption_sdk.encrypted.table import EncryptedTable
from dynamodb_encryption_sdk.identifiers import CryptoAction
from dynamodb_encryption_sdk.internal.identifiers import ReservedAttributes
from dynamodb_encryption_sdk.material_providers.most_recent import MostRecentProvider
from dynamodb_encryption_sdk.material_providers.static import StaticCryptographicMaterialsProvider
from dynamodb_encryption_sdk.material_providers.store.meta import MetaStore
from dynamodb_encryption_sdk.material_providers.wrapped import WrappedCryptographicMaterialsProvider
from dynamodb_encryption_sdk.materials.raw import RawDecryptionMaterials, RawEncryptionMaterials
from dynamodb_encryption_sdk.structures import AttributeActions
from dynamodb_encryption_sdk.transform import ddb_to_dict, dict_to_ddb
RUNNING_IN_TRAVIS = "TRAVIS" in os.environ
_DELEGATED_KEY_CACHE = defaultdict(lambda: defaultdict(dict))
TEST_TABLE_NAME = "my_table"
TEST_REGION_NAME = "us-west-2"
TEST_INDEX = {
"partition_attribute": {"type": "S", "value": "test_value"},
"sort_attribute": {"type": "N", "value": Decimal("99.233")},
}
SECONDARY_INDEX = {
"secondary_index_1": {"type": "B", "value": Binary(b"\x00\x01\x02")},
"secondary_index_2": {"type": "S", "value": "another_value"},
}
TEST_KEY = {name: value["value"] for name, value in TEST_INDEX.items()}
TEST_BATCH_INDEXES = [
{
"partition_attribute": {"type": "S", "value": "test_value"},
"sort_attribute": {"type": "N", "value": Decimal("99.233")},
},
{
"partition_attribute": {"type": "S", "value": "test_value"},
"sort_attribute": {"type": "N", "value": Decimal("92986745")},
},
{
"partition_attribute": {"type": "S", "value": "test_value"},
"sort_attribute": {"type": "N", "value": Decimal("2231.0001")},
},
{
"partition_attribute": {"type": "S", "value": "another_test_value"},
"sort_attribute": {"type": "N", "value": Decimal("732342")},
},
]
TEST_BATCH_KEYS = [{name: value["value"] for name, value in key.items()} for key in TEST_BATCH_INDEXES]
@pytest.fixture
def example_table():
mock_dynamodb2().start(reset=False)
ddb = boto3.client("dynamodb", region_name=TEST_REGION_NAME)
ddb.create_table(
TableName=TEST_TABLE_NAME,
KeySchema=[
{"AttributeName": "partition_attribute", "KeyType": "HASH"},
{"AttributeName": "sort_attribute", "KeyType": "RANGE"},
],
AttributeDefinitions=[
{"AttributeName": name, "AttributeType": value["type"]} for name, value in TEST_INDEX.items()
],
ProvisionedThroughput={"ReadCapacityUnits": 100, "WriteCapacityUnits": 100},
)
yield
ddb.delete_table(TableName=TEST_TABLE_NAME)
mock_dynamodb2().stop()
@pytest.fixture
def table_with_local_secondary_indexes():
mock_dynamodb2().start(reset=False)
ddb = boto3.client("dynamodb", region_name=TEST_REGION_NAME)
ddb.create_table(
TableName=TEST_TABLE_NAME,
KeySchema=[
{"AttributeName": "partition_attribute", "KeyType": "HASH"},
{"AttributeName": "sort_attribute", "KeyType": "RANGE"},
],
LocalSecondaryIndexes=[
{
"IndexName": "lsi-1",
"KeySchema": [{"AttributeName": "secondary_index_1", "KeyType": "HASH"}],
"Projection": {"ProjectionType": "ALL"},
},
{
"IndexName": "lsi-2",
"KeySchema": [{"AttributeName": "secondary_index_2", "KeyType": "HASH"}],
"Projection": {"ProjectionType": "ALL"},
},
],
AttributeDefinitions=[
{"AttributeName": name, "AttributeType": value["type"]}
for name, value in list(TEST_INDEX.items()) + list(SECONDARY_INDEX.items())
],
ProvisionedThroughput={"ReadCapacityUnits": 100, "WriteCapacityUnits": 100},
)
yield
ddb.delete_table(TableName=TEST_TABLE_NAME)
mock_dynamodb2().stop()
@pytest.fixture
def table_with_global_secondary_indexes():
mock_dynamodb2().start(reset=False)
ddb = boto3.client("dynamodb", region_name=TEST_REGION_NAME)
ddb.create_table(
TableName=TEST_TABLE_NAME,
KeySchema=[
{"AttributeName": "partition_attribute", "KeyType": "HASH"},
{"AttributeName": "sort_attribute", "KeyType": "RANGE"},
],
GlobalSecondaryIndexes=[
{
"IndexName": "gsi-1",
"KeySchema": [{"AttributeName": "secondary_index_1", "KeyType": "HASH"}],
"Projection": {"ProjectionType": "ALL"},
"ProvisionedThroughput": {"ReadCapacityUnits": 100, "WriteCapacityUnits": 100},
},
{
"IndexName": "gsi-2",
"KeySchema": [{"AttributeName": "secondary_index_2", "KeyType": "HASH"}],
"Projection": {"ProjectionType": "ALL"},
"ProvisionedThroughput": {"ReadCapacityUnits": 100, "WriteCapacityUnits": 100},
},
],
AttributeDefinitions=[
{"AttributeName": name, "AttributeType": value["type"]}
for name, value in list(TEST_INDEX.items()) + list(SECONDARY_INDEX.items())
],
ProvisionedThroughput={"ReadCapacityUnits": 100, "WriteCapacityUnits": 100},
)
yield
ddb.delete_table(TableName=TEST_TABLE_NAME)
mock_dynamodb2().stop()
def _get_from_cache(dk_class, algorithm, key_length):
"""Don't generate new keys every time. All we care about is that they are valid keys, not that they are unique."""
try:
return _DELEGATED_KEY_CACHE[dk_class][algorithm][key_length]
except KeyError:
key = dk_class.generate(algorithm, key_length)
_DELEGATED_KEY_CACHE[dk_class][algorithm][key_length] = key
return key
def build_static_jce_cmp(encryption_algorithm, encryption_key_length, signing_algorithm, signing_key_length):
"""Build a StaticCryptographicMaterialsProvider using ephemeral JceNameLocalDelegatedKeys as specified."""
encryption_key = _get_from_cache(JceNameLocalDelegatedKey, encryption_algorithm, encryption_key_length)
authentication_key = _get_from_cache(JceNameLocalDelegatedKey, signing_algorithm, signing_key_length)
encryption_materials = RawEncryptionMaterials(signing_key=authentication_key, encryption_key=encryption_key)
decryption_materials = RawDecryptionMaterials(verification_key=authentication_key, decryption_key=encryption_key)
return StaticCryptographicMaterialsProvider(
encryption_materials=encryption_materials, decryption_materials=decryption_materials
)
def _build_wrapped_jce_cmp(wrapping_algorithm, wrapping_key_length, signing_algorithm, signing_key_length):
"""Build a WrappedCryptographicMaterialsProvider using ephemeral JceNameLocalDelegatedKeys as specified."""
wrapping_key = _get_from_cache(JceNameLocalDelegatedKey, wrapping_algorithm, wrapping_key_length)
signing_key = _get_from_cache(JceNameLocalDelegatedKey, signing_algorithm, signing_key_length)
return WrappedCryptographicMaterialsProvider(
wrapping_key=wrapping_key, unwrapping_key=wrapping_key, signing_key=signing_key
)
def _all_encryption():
"""All encryption configurations to test in slow tests."""
return itertools.chain(itertools.product(("AES",), (128, 256)), itertools.product(("RSA",), (1024, 2048, 4096)))
def _all_authentication():
"""All authentication configurations to test in slow tests."""
return itertools.chain(
itertools.product(("HmacSHA224", "HmacSHA256", "HmacSHA384", "HmacSHA512"), (128, 256)),
itertools.product(("SHA224withRSA", "SHA256withRSA", "SHA384withRSA", "SHA512withRSA"), (1024, 2048, 4096)),
)
def _all_algorithm_pairs():
"""All algorithm pairs (encryption + authentication) to test in slow tests."""
for encryption_pair, signing_pair in itertools.product(_all_encryption(), _all_authentication()):
yield encryption_pair + signing_pair
def _some_algorithm_pairs():
"""Cherry-picked set of algorithm pairs (encryption + authentication) to test in fast tests."""
return (("AES", 256, "HmacSHA256", 256), ("AES", 256, "SHA256withRSA", 4096), ("RSA", 4096, "SHA256withRSA", 4096))
_cmp_builders = {"static": build_static_jce_cmp, "wrapped": _build_wrapped_jce_cmp}
def _all_possible_cmps(algorithm_generator):
"""Generate all possible cryptographic materials providers based on the supplied generator."""
# The AES combinations do the same thing, but this makes sure that the AESWrap name works as expected.
yield _build_wrapped_jce_cmp("AESWrap", 256, "HmacSHA256", 256)
for builder_info, args in itertools.product(_cmp_builders.items(), algorithm_generator()):
builder_type, builder_func = builder_info
encryption_algorithm, encryption_key_length, signing_algorithm, signing_key_length = args
if builder_type == "static" and encryption_algorithm != "AES":
# Only AES keys are allowed to be used with static materials
continue
id_string = "{enc_algorithm}/{enc_key_length} {builder_type} {sig_algorithm}/{sig_key_length}".format(
enc_algorithm=encryption_algorithm,
enc_key_length=encryption_key_length,
builder_type=builder_type,
sig_algorithm=signing_algorithm,
sig_key_length=signing_key_length,
)
yield pytest.param(
builder_func(encryption_algorithm, encryption_key_length, signing_algorithm, signing_key_length),
id=id_string,
)
def set_parametrized_cmp(metafunc):
"""Set paramatrized values for cryptographic materials providers."""
for name, algorithm_generator in (("all_the_cmps", _all_algorithm_pairs), ("some_cmps", _some_algorithm_pairs)):
if name in metafunc.fixturenames:
metafunc.parametrize(name, _all_possible_cmps(algorithm_generator))
_ACTIONS = {
"hypothesis_actions": (
pytest.param(AttributeActions(default_action=CryptoAction.ENCRYPT_AND_SIGN), id="encrypt all"),
pytest.param(AttributeActions(default_action=CryptoAction.SIGN_ONLY), id="sign only all"),
pytest.param(AttributeActions(default_action=CryptoAction.DO_NOTHING), id="do nothing"),
)
}
_ACTIONS["parametrized_actions"] = _ACTIONS["hypothesis_actions"] + (
pytest.param(
AttributeActions(
default_action=CryptoAction.ENCRYPT_AND_SIGN,
attribute_actions={
"number_set": CryptoAction.SIGN_ONLY,
"string_set": CryptoAction.SIGN_ONLY,
"binary_set": CryptoAction.SIGN_ONLY,
},
),
id="sign sets, encrypt everything else",
),
pytest.param(
AttributeActions(
default_action=CryptoAction.ENCRYPT_AND_SIGN,
attribute_actions={
"number_set": CryptoAction.DO_NOTHING,
"string_set": CryptoAction.DO_NOTHING,
"binary_set": CryptoAction.DO_NOTHING,
},
),
id="ignore sets, encrypt everything else",
),
pytest.param(
AttributeActions(
default_action=CryptoAction.DO_NOTHING, attribute_actions={"map": CryptoAction.ENCRYPT_AND_SIGN}
),
id="encrypt map, ignore everything else",
),
pytest.param(
AttributeActions(
default_action=CryptoAction.SIGN_ONLY,
attribute_actions={
"number_set": CryptoAction.DO_NOTHING,
"string_set": CryptoAction.DO_NOTHING,
"binary_set": CryptoAction.DO_NOTHING,
"map": CryptoAction.ENCRYPT_AND_SIGN,
},
),
id="ignore sets, encrypt map, sign everything else",
),
)
def set_parametrized_actions(metafunc):
"""Set parametrized values for attribute actions."""
for name, actions in _ACTIONS.items():
if name in metafunc.fixturenames:
metafunc.parametrize(name, actions)
def set_parametrized_item(metafunc):
"""Set parametrized values for items to cycle."""
if "parametrized_item" in metafunc.fixturenames:
metafunc.parametrize("parametrized_item", (pytest.param(diverse_item(), id="diverse item"),))
def diverse_item():
base_item = {
"int": 5,
"decimal": Decimal("123.456"),
"string": "this is a string",
"binary": b"this is a bytestring! \x01",
"number_set": set([5, 4, 3]),
"string_set": set(["abc", "def", "geh"]),
"binary_set": set([b"\x00\x00\x00", b"\x00\x01\x00", b"\x00\x00\x02"]),
}
base_item["list"] = [copy.copy(i) for i in base_item.values()]
base_item["map"] = copy.deepcopy(base_item)
return copy.deepcopy(base_item)
_reserved_attributes = set([attr.value for attr in ReservedAttributes])
def return_requestitems_as_unprocessed(*args, **kwargs):
return {"UnprocessedItems": kwargs["RequestItems"]}
def check_encrypted_item(plaintext_item, ciphertext_item, attribute_actions):
# Verify that all expected attributes are present
ciphertext_attributes = set(ciphertext_item.keys())
plaintext_attributes = set(plaintext_item.keys())
if attribute_actions.take_no_actions:
assert ciphertext_attributes == plaintext_attributes
else:
assert ciphertext_attributes == plaintext_attributes.union(_reserved_attributes)
for name, value in ciphertext_item.items():
# Skip the attributes we add
if name in _reserved_attributes:
continue
# If the attribute should have been encrypted, verify that it is Binary and different from the original
if attribute_actions.action(name) is CryptoAction.ENCRYPT_AND_SIGN:
assert isinstance(value, Binary)
assert value != plaintext_item[name]
# Otherwise, verify that it is the same as the original
else:
assert value == plaintext_item[name]
def _matching_key(actual_item, expected):
expected_item = [
i
for i in expected
if i["partition_attribute"] == actual_item["partition_attribute"]
and i["sort_attribute"] == actual_item["sort_attribute"]
]
assert len(expected_item) == 1
return expected_item[0]
def _nop_transformer(item):
return item
def assert_items_exist_in_list(source, expected, transformer):
for actual_item in source:
expected_item = _matching_key(actual_item, expected)
assert transformer(actual_item) == transformer(expected_item)
def assert_equal_lists_of_items(actual, expected, transformer=_nop_transformer):
assert len(actual) == len(expected)
assert_items_exist_in_list(actual, expected, transformer)
def assert_list_of_items_contains(full, subset, transformer=_nop_transformer):
assert len(full) >= len(subset)
assert_items_exist_in_list(subset, full, transformer)
def check_many_encrypted_items(actual, expected, attribute_actions, transformer=_nop_transformer):
assert len(actual) == len(expected)
for actual_item in actual:
expected_item = _matching_key(actual_item, expected)
check_encrypted_item(
plaintext_item=transformer(expected_item),
ciphertext_item=transformer(actual_item),
attribute_actions=attribute_actions,
)
def _generate_items(initial_item, write_transformer):
items = []
for key in TEST_BATCH_KEYS:
_item = initial_item.copy()
_item.update(key)
items.append(write_transformer(_item))
return items
def _cleanup_items(encrypted, write_transformer, table_name=TEST_TABLE_NAME):
ddb_keys = [write_transformer(key) for key in TEST_BATCH_KEYS]
_delete_result = encrypted.batch_write_item( # noqa
RequestItems={table_name: [{"DeleteRequest": {"Key": _key}} for _key in ddb_keys]}
)
def cycle_batch_item_check(
raw,
encrypted,
initial_actions,
initial_item,
write_transformer=_nop_transformer,
read_transformer=_nop_transformer,
table_name=TEST_TABLE_NAME,
delete_items=True,
):
"""Check that cycling (plaintext->encrypted->decrypted) item batch has the expected results."""
check_attribute_actions = initial_actions.copy()
check_attribute_actions.set_index_keys(*list(TEST_KEY.keys()))
items = _generate_items(initial_item, write_transformer)
_put_result = encrypted.batch_write_item( # noqa
RequestItems={table_name: [{"PutRequest": {"Item": _item}} for _item in items]}
)
ddb_keys = [write_transformer(key) for key in TEST_BATCH_KEYS]
encrypted_result = raw.batch_get_item(RequestItems={table_name: {"Keys": ddb_keys}})
check_many_encrypted_items(
actual=encrypted_result["Responses"][table_name],
expected=items,
attribute_actions=check_attribute_actions,
transformer=read_transformer,
)
decrypted_result = encrypted.batch_get_item(RequestItems={table_name: {"Keys": ddb_keys}})
assert_equal_lists_of_items(
actual=decrypted_result["Responses"][table_name], expected=items, transformer=read_transformer
)
if delete_items:
_cleanup_items(encrypted, write_transformer, table_name)
del check_attribute_actions
del items
def cycle_batch_writer_check(raw_table, encrypted_table, initial_actions, initial_item):
"""Check that cycling (plaintext->encrypted->decrypted) items with the Table batch writer
has the expected results.
"""
check_attribute_actions = initial_actions.copy()
check_attribute_actions.set_index_keys(*list(TEST_KEY.keys()))
items = _generate_items(initial_item, _nop_transformer)
with encrypted_table.batch_writer() as writer:
for item in items:
writer.put_item(item)
ddb_keys = [key for key in TEST_BATCH_KEYS]
encrypted_items = [raw_table.get_item(Key=key, ConsistentRead=True)["Item"] for key in ddb_keys]
check_many_encrypted_items(
actual=encrypted_items, expected=items, attribute_actions=check_attribute_actions, transformer=_nop_transformer
)
decrypted_result = [encrypted_table.get_item(Key=key, ConsistentRead=True)["Item"] for key in ddb_keys]
assert_equal_lists_of_items(actual=decrypted_result, expected=items, transformer=_nop_transformer)
with encrypted_table.batch_writer() as writer:
for key in ddb_keys:
writer.delete_item(key)
del check_attribute_actions
del items
def batch_write_item_unprocessed_check(
encrypted, initial_item, write_transformer=_nop_transformer, table_name=TEST_TABLE_NAME
):
"""Check that unprocessed items in a batch result are unencrypted."""
items = _generate_items(initial_item, write_transformer)
request_items = {table_name: [{"PutRequest": {"Item": _item}} for _item in items]}
_put_result = encrypted.batch_write_item(RequestItems=request_items)
# we expect results to include Unprocessed items, or the test case is invalid!
unprocessed_items = _put_result["UnprocessedItems"]
assert unprocessed_items != {}
unprocessed = [operation["PutRequest"]["Item"] for operation in unprocessed_items[TEST_TABLE_NAME]]
assert_list_of_items_contains(items, unprocessed, transformer=_nop_transformer)
del items
def cycle_item_check(plaintext_item, crypto_config):
"""Check that cycling (plaintext->encrypted->decrypted) an item has the expected results."""
ciphertext_item = encrypt_python_item(plaintext_item, crypto_config)
check_encrypted_item(plaintext_item, ciphertext_item, crypto_config.attribute_actions)
cycled_item = decrypt_python_item(ciphertext_item, crypto_config)
assert cycled_item == plaintext_item
del ciphertext_item
del cycled_item
def table_cycle_check(materials_provider, initial_actions, initial_item, table_name, region_name=None):
check_attribute_actions = initial_actions.copy()
check_attribute_actions.set_index_keys(*list(TEST_KEY.keys()))
item = initial_item.copy()
item.update(TEST_KEY)
kwargs = {}
if region_name is not None:
kwargs["region_name"] = region_name
table = boto3.resource("dynamodb", **kwargs).Table(table_name)
e_table = EncryptedTable(table=table, materials_provider=materials_provider, attribute_actions=initial_actions)
_put_result = e_table.put_item(Item=item) # noqa
encrypted_result = table.get_item(Key=TEST_KEY, ConsistentRead=True)
check_encrypted_item(item, encrypted_result["Item"], check_attribute_actions)
decrypted_result = e_table.get_item(Key=TEST_KEY, ConsistentRead=True)
assert decrypted_result["Item"] == item
e_table.delete_item(Key=TEST_KEY)
del item
del check_attribute_actions
def table_cycle_batch_writer_check(materials_provider, initial_actions, initial_item, table_name, region_name=None):
kwargs = {}
if region_name is not None:
kwargs["region_name"] = region_name
table = boto3.resource("dynamodb", **kwargs).Table(table_name)
e_table = EncryptedTable(table=table, materials_provider=materials_provider, attribute_actions=initial_actions)
cycle_batch_writer_check(table, e_table, initial_actions, initial_item)
def table_batch_writer_unprocessed_items_check(
materials_provider, initial_actions, initial_item, table_name, region_name=None
):
kwargs = {}
if region_name is not None:
kwargs["region_name"] = region_name
resource = boto3.resource("dynamodb", **kwargs)
table = resource.Table(table_name)
items = _generate_items(initial_item, _nop_transformer)
request_items = {table_name: [{"PutRequest": {"Item": _item}} for _item in items]}
with patch.object(table.meta.client, "batch_write_item") as batch_write_mock:
# Check that unprocessed items returned to a BatchWriter are successfully retried
batch_write_mock.side_effect = [{"UnprocessedItems": request_items}, {"UnprocessedItems": {}}]
e_table = EncryptedTable(table=table, materials_provider=materials_provider, attribute_actions=initial_actions)
with e_table.batch_writer() as writer:
for item in items:
writer.put_item(item)
del items
def resource_cycle_batch_items_check(materials_provider, initial_actions, initial_item, table_name, region_name=None):
kwargs = {}
if region_name is not None:
kwargs["region_name"] = region_name
resource = boto3.resource("dynamodb", **kwargs)
e_resource = EncryptedResource(
resource=resource, materials_provider=materials_provider, attribute_actions=initial_actions
)
cycle_batch_item_check(
raw=resource,
encrypted=e_resource,
initial_actions=initial_actions,
initial_item=initial_item,
table_name=table_name,
)
raw_scan_result = resource.Table(table_name).scan(ConsistentRead=True)
e_scan_result = e_resource.Table(table_name).scan(ConsistentRead=True)
assert not raw_scan_result["Items"]
assert not e_scan_result["Items"]
def resource_batch_items_unprocessed_check(
materials_provider, initial_actions, initial_item, table_name, region_name=None
):
kwargs = {}
if region_name is not None:
kwargs["region_name"] = region_name
resource = boto3.resource("dynamodb", **kwargs)
with patch.object(resource, "batch_write_item", return_requestitems_as_unprocessed):
e_resource = EncryptedResource(
resource=resource, materials_provider=materials_provider, attribute_actions=initial_actions
)
batch_write_item_unprocessed_check(
encrypted=e_resource, initial_item=initial_item, write_transformer=dict_to_ddb, table_name=table_name
)
def client_cycle_single_item_check(materials_provider, initial_actions, initial_item, table_name, region_name=None):
check_attribute_actions = initial_actions.copy()
check_attribute_actions.set_index_keys(*list(TEST_KEY.keys()))
item = initial_item.copy()
item.update(TEST_KEY)
ddb_item = dict_to_ddb(item)
ddb_key = dict_to_ddb(TEST_KEY)
kwargs = {}
if region_name is not None:
kwargs["region_name"] = region_name
client = boto3.client("dynamodb", **kwargs)
e_client = EncryptedClient(client=client, materials_provider=materials_provider, attribute_actions=initial_actions)
_put_result = e_client.put_item(TableName=table_name, Item=ddb_item) # noqa
encrypted_result = client.get_item(TableName=table_name, Key=ddb_key, ConsistentRead=True)
check_encrypted_item(item, ddb_to_dict(encrypted_result["Item"]), check_attribute_actions)
decrypted_result = e_client.get_item(TableName=table_name, Key=ddb_key, ConsistentRead=True)
assert ddb_to_dict(decrypted_result["Item"]) == item
e_client.delete_item(TableName=table_name, Key=ddb_key)
del item
del check_attribute_actions
def client_cycle_batch_items_check(materials_provider, initial_actions, initial_item, table_name, region_name=None):
kwargs = {}
if region_name is not None:
kwargs["region_name"] = region_name
client = boto3.client("dynamodb", **kwargs)
e_client = EncryptedClient(client=client, materials_provider=materials_provider, attribute_actions=initial_actions)
cycle_batch_item_check(
raw=client,
encrypted=e_client,
initial_actions=initial_actions,
initial_item=initial_item,
write_transformer=dict_to_ddb,
read_transformer=ddb_to_dict,
table_name=table_name,
)
raw_scan_result = client.scan(TableName=table_name, ConsistentRead=True)
e_scan_result = e_client.scan(TableName=table_name, ConsistentRead=True)
assert not raw_scan_result["Items"]
assert not e_scan_result["Items"]
def client_batch_items_unprocessed_check(
materials_provider, initial_actions, initial_item, table_name, region_name=None
):
kwargs = {}
if region_name is not None:
kwargs["region_name"] = region_name
client = boto3.client("dynamodb", **kwargs)
with patch.object(client, "batch_write_item", return_requestitems_as_unprocessed):
e_client = EncryptedClient(
client=client, materials_provider=materials_provider, attribute_actions=initial_actions
)
batch_write_item_unprocessed_check(
encrypted=e_client, initial_item=initial_item, write_transformer=dict_to_ddb, table_name=table_name
)
def client_cycle_batch_items_check_paginators(
materials_provider, initial_actions, initial_item, table_name, region_name=None
):
kwargs = {}
if region_name is not None:
kwargs["region_name"] = region_name
client = boto3.client("dynamodb", **kwargs)
e_client = EncryptedClient(client=client, materials_provider=materials_provider, attribute_actions=initial_actions)
cycle_batch_item_check(
raw=client,
encrypted=e_client,
initial_actions=initial_actions,
initial_item=initial_item,
write_transformer=dict_to_ddb,
read_transformer=ddb_to_dict,
table_name=table_name,
delete_items=False,
)
encrypted_items = []
raw_paginator = client.get_paginator("scan")
for page in raw_paginator.paginate(TableName=table_name, ConsistentRead=True):
encrypted_items.extend(page["Items"])
decrypted_items = []
encrypted_paginator = e_client.get_paginator("scan")
for page in encrypted_paginator.paginate(TableName=table_name, ConsistentRead=True):
decrypted_items.extend(page["Items"])
print(encrypted_items)
print(decrypted_items)
check_attribute_actions = initial_actions.copy()
check_attribute_actions.set_index_keys(*list(TEST_KEY.keys()))
check_many_encrypted_items(
actual=encrypted_items,
expected=decrypted_items,
attribute_actions=check_attribute_actions,
transformer=ddb_to_dict,
)
_cleanup_items(encrypted=e_client, write_transformer=dict_to_ddb, table_name=table_name)
raw_scan_result = client.scan(TableName=table_name, ConsistentRead=True)
e_scan_result = e_client.scan(TableName=table_name, ConsistentRead=True)
assert not raw_scan_result["Items"]
assert not e_scan_result["Items"]
def build_metastore():
client = boto3.client("dynamodb", region_name=TEST_REGION_NAME)
table_name = base64.urlsafe_b64encode(os.urandom(32)).decode("utf-8").replace("=", ".")
MetaStore.create_table(client, table_name, 1, 1)
waiter = client.get_waiter("table_exists")
waiter.wait(TableName=table_name)
table = boto3.resource("dynamodb", region_name=TEST_REGION_NAME).Table(table_name)
return MetaStore(table, build_static_jce_cmp("AES", 256, "HmacSHA256", 256)), table_name
def delete_metastore(table_name):
client = boto3.client("dynamodb", region_name=TEST_REGION_NAME)
client.delete_table(TableName=table_name)
# It sometimes takes a long time to delete a table.
# If hanging, asynchronously deleting tables becomes an issue,
# come back to this.
# Otherwise, let's just let them take care of themselves.
# waiter = client.get_waiter("table_not_exists")
# waiter.wait(TableName=table_name)
@pytest.fixture
def mock_metastore():
with mock_dynamodb2():
metastore, table_name = build_metastore()
yield metastore
delete_metastore(table_name)
def _count_entries(records, *messages):
count = 0
for record in records:
if all((message in record.getMessage() for message in messages)):
count += 1
return count
def _count_puts(records, table_name):
return _count_entries(records, '"TableName": "{}"'.format(table_name), "OperationModel(name=PutItem)")
def _count_gets(records, table_name):
return _count_entries(records, '"TableName": "{}"'.format(table_name), "OperationModel(name=GetItem)")
def check_metastore_cache_use_encrypt(metastore, table_name, log_capture):
try:
table = boto3.resource("dynamodb").Table(table_name)
except NoRegionError:
table = boto3.resource("dynamodb", region_name=TEST_REGION_NAME).Table(table_name)
most_recent_provider = MostRecentProvider(provider_store=metastore, material_name="test", version_ttl=600.0)
e_table = EncryptedTable(table=table, materials_provider=most_recent_provider)
item = diverse_item()
item.update(TEST_KEY)
e_table.put_item(Item=item)
e_table.put_item(Item=item)
e_table.put_item(Item=item)
e_table.put_item(Item=item)
try:
primary_puts = _count_puts(log_capture.records, e_table.name)
metastore_puts = _count_puts(log_capture.records, metastore._table.name)
assert primary_puts == 4
assert metastore_puts == 1
e_table.get_item(Key=TEST_KEY)
e_table.get_item(Key=TEST_KEY)
e_table.get_item(Key=TEST_KEY)
primary_gets = _count_gets(log_capture.records, e_table.name)
metastore_gets = _count_gets(log_capture.records, metastore._table.name)
metastore_puts = _count_puts(log_capture.records, metastore._table.name)
assert primary_gets == 3
assert metastore_gets == 0
assert metastore_puts == 1
most_recent_provider.refresh()
e_table.get_item(Key=TEST_KEY)
e_table.get_item(Key=TEST_KEY)
e_table.get_item(Key=TEST_KEY)
primary_gets = _count_gets(log_capture.records, e_table.name)
metastore_gets = _count_gets(log_capture.records, metastore._table.name)
assert primary_gets == 6
assert metastore_gets == 1
finally:
e_table.delete_item(Key=TEST_KEY) | 0.653569 | 0.339745 |
import argparse
from http import client as httplib
import socket
from oslo_serialization import jsonutils
from kuryr_kubernetes import constants
class UnixDomainHttpConnection(httplib.HTTPConnection):
def __init__(self, path, timeout):
httplib.HTTPConnection.__init__(
self, "localhost", timeout=timeout)
self.__unix_socket_path = path
self.timeout = timeout
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
sock.connect(self.__unix_socket_path)
self.sock = sock
def create_subports(num_ports, trunk_ips, timeout=180):
method = 'POST'
body = jsonutils.dumps({"trunks": trunk_ips, "num_ports": num_ports})
headers = {'Content-Type': 'application/json', 'Connection': 'close'}
headers['Content-Length'] = len(body)
path = 'http://localhost{0}'.format(constants.VIF_POOL_POPULATE)
socket_path = constants.MANAGER_SOCKET_FILE
conn = UnixDomainHttpConnection(socket_path, timeout)
conn.request(method, path, body=body, headers=headers)
resp = conn.getresponse()
print(resp.read())
def delete_subports(trunk_ips, timeout=180):
method = 'POST'
body = jsonutils.dumps({"trunks": trunk_ips})
headers = {'Content-Type': 'application/json', 'Connection': 'close'}
headers['Content-Length'] = len(body)
path = 'http://localhost{0}'.format(constants.VIF_POOL_FREE)
socket_path = constants.MANAGER_SOCKET_FILE
conn = UnixDomainHttpConnection(socket_path, timeout)
conn.request(method, path, body=body, headers=headers)
resp = conn.getresponse()
print(resp.read())
def list_pools(timeout=180):
method = 'GET'
body = jsonutils.dumps({})
headers = {'Context-Type': 'application/json', 'Connection': 'close'}
headers['Context-Length'] = len(body)
path = 'http://localhost{0}'.format(constants.VIF_POOL_LIST)
socket_path = constants.MANAGER_SOCKET_FILE
conn = UnixDomainHttpConnection(socket_path, timeout)
conn.request(method, path, body=body, headers=headers)
resp = conn.getresponse()
print(resp.read())
def show_pool(trunk_ip, project_id, sg, timeout=180):
method = 'GET'
body = jsonutils.dumps({"pool_key": [trunk_ip, project_id, sg]})
headers = {'Context-Type': 'application/json', 'Connection': 'close'}
headers['Context-Length'] = len(body)
path = 'http://localhost{0}'.format(constants.VIF_POOL_SHOW)
socket_path = constants.MANAGER_SOCKET_FILE
conn = UnixDomainHttpConnection(socket_path, timeout)
conn.request(method, path, body=body, headers=headers)
resp = conn.getresponse()
print(resp.read())
def _get_parser():
parser = argparse.ArgumentParser(
description='Tool to create/free subports from the subports pool')
subparser = parser.add_subparsers(help='commands', dest='command')
create_ports_parser = subparser.add_parser(
'create',
help='Populate the pool(s) with subports')
create_ports_parser.add_argument(
'--trunks',
help='list of trunk IPs where subports will be added',
nargs='+',
dest='subports',
required=True)
create_ports_parser.add_argument(
'-n', '--num-ports',
help='number of subports to be created per pool.',
dest='num',
default=1,
type=int)
create_ports_parser.add_argument(
'-t', '--timeout',
help='set timeout for operation. Default is 180 sec',
dest='timeout',
default=180,
type=int)
delete_ports_parser = subparser.add_parser(
'free',
help='Remove unused subports from the pools')
delete_ports_parser.add_argument(
'--trunks',
help='list of trunk IPs where subports will be freed',
nargs='+',
dest='subports')
delete_ports_parser.add_argument(
'-t', '--timeout',
help='set timeout for operation. Default is 180 sec',
dest='timeout',
default=180,
type=int)
list_pools_parser = subparser.add_parser(
'list',
help='List available pools and the number of ports they have')
list_pools_parser.add_argument(
'-t', '--timeout',
help='set timeout for operation. Default is 180 sec',
dest='timeout',
default=180,
type=int)
show_pool_parser = subparser.add_parser(
'show',
help='Show the ports associated to a given pool')
show_pool_parser.add_argument(
'--trunk',
help='Trunk IP of the desired pool',
dest='trunk_ip',
required=True)
show_pool_parser.add_argument(
'-p', '--project-id',
help='project id of the pool',
dest='project_id',
required=True)
show_pool_parser.add_argument(
'--sg',
help='Security group ids of the pool',
dest='sg',
nargs='+',
required=True)
show_pool_parser.add_argument(
'-t', '--timeout',
help='set timeout for operation. Default is 180 sec',
dest='timeout',
default=180,
type=int)
return parser
def main():
"""Parse options and call the appropriate class/method."""
parser = _get_parser()
args = parser.parse_args()
if args.command == 'create':
create_subports(args.num, args.subports, args.timeout)
elif args.command == 'free':
delete_subports(args.subports, args.timeout)
elif args.command == 'list':
list_pools(args.timeout)
elif args.command == 'show':
show_pool(args.trunk_ip, args.project_id, args.sg, args.timeout)
if __name__ == '__main__':
main() | contrib/pools-management/subports.py |
import argparse
from http import client as httplib
import socket
from oslo_serialization import jsonutils
from kuryr_kubernetes import constants
class UnixDomainHttpConnection(httplib.HTTPConnection):
def __init__(self, path, timeout):
httplib.HTTPConnection.__init__(
self, "localhost", timeout=timeout)
self.__unix_socket_path = path
self.timeout = timeout
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
sock.connect(self.__unix_socket_path)
self.sock = sock
def create_subports(num_ports, trunk_ips, timeout=180):
method = 'POST'
body = jsonutils.dumps({"trunks": trunk_ips, "num_ports": num_ports})
headers = {'Content-Type': 'application/json', 'Connection': 'close'}
headers['Content-Length'] = len(body)
path = 'http://localhost{0}'.format(constants.VIF_POOL_POPULATE)
socket_path = constants.MANAGER_SOCKET_FILE
conn = UnixDomainHttpConnection(socket_path, timeout)
conn.request(method, path, body=body, headers=headers)
resp = conn.getresponse()
print(resp.read())
def delete_subports(trunk_ips, timeout=180):
method = 'POST'
body = jsonutils.dumps({"trunks": trunk_ips})
headers = {'Content-Type': 'application/json', 'Connection': 'close'}
headers['Content-Length'] = len(body)
path = 'http://localhost{0}'.format(constants.VIF_POOL_FREE)
socket_path = constants.MANAGER_SOCKET_FILE
conn = UnixDomainHttpConnection(socket_path, timeout)
conn.request(method, path, body=body, headers=headers)
resp = conn.getresponse()
print(resp.read())
def list_pools(timeout=180):
method = 'GET'
body = jsonutils.dumps({})
headers = {'Context-Type': 'application/json', 'Connection': 'close'}
headers['Context-Length'] = len(body)
path = 'http://localhost{0}'.format(constants.VIF_POOL_LIST)
socket_path = constants.MANAGER_SOCKET_FILE
conn = UnixDomainHttpConnection(socket_path, timeout)
conn.request(method, path, body=body, headers=headers)
resp = conn.getresponse()
print(resp.read())
def show_pool(trunk_ip, project_id, sg, timeout=180):
method = 'GET'
body = jsonutils.dumps({"pool_key": [trunk_ip, project_id, sg]})
headers = {'Context-Type': 'application/json', 'Connection': 'close'}
headers['Context-Length'] = len(body)
path = 'http://localhost{0}'.format(constants.VIF_POOL_SHOW)
socket_path = constants.MANAGER_SOCKET_FILE
conn = UnixDomainHttpConnection(socket_path, timeout)
conn.request(method, path, body=body, headers=headers)
resp = conn.getresponse()
print(resp.read())
def _get_parser():
parser = argparse.ArgumentParser(
description='Tool to create/free subports from the subports pool')
subparser = parser.add_subparsers(help='commands', dest='command')
create_ports_parser = subparser.add_parser(
'create',
help='Populate the pool(s) with subports')
create_ports_parser.add_argument(
'--trunks',
help='list of trunk IPs where subports will be added',
nargs='+',
dest='subports',
required=True)
create_ports_parser.add_argument(
'-n', '--num-ports',
help='number of subports to be created per pool.',
dest='num',
default=1,
type=int)
create_ports_parser.add_argument(
'-t', '--timeout',
help='set timeout for operation. Default is 180 sec',
dest='timeout',
default=180,
type=int)
delete_ports_parser = subparser.add_parser(
'free',
help='Remove unused subports from the pools')
delete_ports_parser.add_argument(
'--trunks',
help='list of trunk IPs where subports will be freed',
nargs='+',
dest='subports')
delete_ports_parser.add_argument(
'-t', '--timeout',
help='set timeout for operation. Default is 180 sec',
dest='timeout',
default=180,
type=int)
list_pools_parser = subparser.add_parser(
'list',
help='List available pools and the number of ports they have')
list_pools_parser.add_argument(
'-t', '--timeout',
help='set timeout for operation. Default is 180 sec',
dest='timeout',
default=180,
type=int)
show_pool_parser = subparser.add_parser(
'show',
help='Show the ports associated to a given pool')
show_pool_parser.add_argument(
'--trunk',
help='Trunk IP of the desired pool',
dest='trunk_ip',
required=True)
show_pool_parser.add_argument(
'-p', '--project-id',
help='project id of the pool',
dest='project_id',
required=True)
show_pool_parser.add_argument(
'--sg',
help='Security group ids of the pool',
dest='sg',
nargs='+',
required=True)
show_pool_parser.add_argument(
'-t', '--timeout',
help='set timeout for operation. Default is 180 sec',
dest='timeout',
default=180,
type=int)
return parser
def main():
"""Parse options and call the appropriate class/method."""
parser = _get_parser()
args = parser.parse_args()
if args.command == 'create':
create_subports(args.num, args.subports, args.timeout)
elif args.command == 'free':
delete_subports(args.subports, args.timeout)
elif args.command == 'list':
list_pools(args.timeout)
elif args.command == 'show':
show_pool(args.trunk_ip, args.project_id, args.sg, args.timeout)
if __name__ == '__main__':
main() | 0.4436 | 0.061199 |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
from collections import OrderedDict
import sys
linestyles = OrderedDict(
[('solid', (0, ())),
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 5))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))])
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
ROOT_DIR = sys.argv[1]
det_3d = [ [] for _ in range(3) ]
det_bv = [ [] for _ in range(3) ]
for epoch in range(9, 200, 10):
log_file = os.path.join(ROOT_DIR, str(epoch), 'log')
if not os.path.exists( log_file ):
break
else:
lines = open(log_file).readlines()
for line in lines:
line = line.split()
if line[0] == 'car_detection_ground':
det_bv[0].append( float( line[-3] ) )
det_bv[1].append( float( line[-2] ) )
det_bv[2].append( float( line[-1] ) )
elif line[0] == 'car_detection_3d':
det_3d[0].append( float(line[-3]) )
det_3d[1].append( float(line[-2]) )
det_3d[2].append( float(line[-1]) )
RANGE = range(len(det_bv[0]))
plt.figure(figsize=(10, 7))
plt.plot( RANGE, det_3d[0] , linestyle=linestyles['solid'], linewidth=1.5, color=tableau20[0] )
plt.plot( RANGE, det_3d[1] , linestyle=linestyles['solid'], linewidth=1.5, color=tableau20[2] )
plt.plot( RANGE, det_3d[2] , linestyle=linestyles['solid'], linewidth=1.5, color=tableau20[4] )
plt.plot( RANGE, det_bv[0] , linestyle=linestyles['densely dotted'], linewidth=1.5, color=tableau20[0] )
plt.plot( RANGE, det_bv[1] , linestyle=linestyles['densely dotted'], linewidth=1.5, color=tableau20[2] )
plt.plot( RANGE, det_bv[2] , linestyle=linestyles['densely dotted'], linewidth=1.5, color=tableau20[4] )
plt.legend(['3d easy', '3d moderate', '3d hard', 'bird view easy', 'bird view moderate', 'bird view hard'], loc=4)
plt.xlabel('Epoch', fontsize=16)
plt.xticks( RANGE, range(9, len(RANGE)*10, 10) )
plt.xticks(fontsize=14)
plt.ylabel('AP', fontsize=16)
plt.ylim(35, 95)
plt.yticks( range(35, 95, 5) )
plt.yticks(fontsize=14)
plt.grid(linestyle=linestyles['dotted'])
DIR_NAME = ROOT_DIR.split('/')[-1]
OUTPUT_NAME = DIR_NAME + '.jpg'
plt.savefig(OUTPUT_NAME)
print('results parsed and saved in: ' + OUTPUT_NAME) | DEEPLEARNING/DL_VOXELNET/parse_log.py | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
from collections import OrderedDict
import sys
linestyles = OrderedDict(
[('solid', (0, ())),
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 5))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))])
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
ROOT_DIR = sys.argv[1]
det_3d = [ [] for _ in range(3) ]
det_bv = [ [] for _ in range(3) ]
for epoch in range(9, 200, 10):
log_file = os.path.join(ROOT_DIR, str(epoch), 'log')
if not os.path.exists( log_file ):
break
else:
lines = open(log_file).readlines()
for line in lines:
line = line.split()
if line[0] == 'car_detection_ground':
det_bv[0].append( float( line[-3] ) )
det_bv[1].append( float( line[-2] ) )
det_bv[2].append( float( line[-1] ) )
elif line[0] == 'car_detection_3d':
det_3d[0].append( float(line[-3]) )
det_3d[1].append( float(line[-2]) )
det_3d[2].append( float(line[-1]) )
RANGE = range(len(det_bv[0]))
plt.figure(figsize=(10, 7))
plt.plot( RANGE, det_3d[0] , linestyle=linestyles['solid'], linewidth=1.5, color=tableau20[0] )
plt.plot( RANGE, det_3d[1] , linestyle=linestyles['solid'], linewidth=1.5, color=tableau20[2] )
plt.plot( RANGE, det_3d[2] , linestyle=linestyles['solid'], linewidth=1.5, color=tableau20[4] )
plt.plot( RANGE, det_bv[0] , linestyle=linestyles['densely dotted'], linewidth=1.5, color=tableau20[0] )
plt.plot( RANGE, det_bv[1] , linestyle=linestyles['densely dotted'], linewidth=1.5, color=tableau20[2] )
plt.plot( RANGE, det_bv[2] , linestyle=linestyles['densely dotted'], linewidth=1.5, color=tableau20[4] )
plt.legend(['3d easy', '3d moderate', '3d hard', 'bird view easy', 'bird view moderate', 'bird view hard'], loc=4)
plt.xlabel('Epoch', fontsize=16)
plt.xticks( RANGE, range(9, len(RANGE)*10, 10) )
plt.xticks(fontsize=14)
plt.ylabel('AP', fontsize=16)
plt.ylim(35, 95)
plt.yticks( range(35, 95, 5) )
plt.yticks(fontsize=14)
plt.grid(linestyle=linestyles['dotted'])
DIR_NAME = ROOT_DIR.split('/')[-1]
OUTPUT_NAME = DIR_NAME + '.jpg'
plt.savefig(OUTPUT_NAME)
print('results parsed and saved in: ' + OUTPUT_NAME) | 0.245718 | 0.405508 |
import enum
import jarvisenv
class Status(enum.Enum):
"""Available cart statuses"""
Idle = 0
Moving = 1
Loading = 2
Unloading = 3
class Load:
"""Object for a single load."""
def __init__(self, src, dst, weight, content):
assert weight > 0
self.src = src
self.dst = dst
self.weight = weight
self.content = content
self.onload = Load.just_pass_it
self.onunload = Load.just_pass_it
self.prio = False
self.born = 0
def __str__(self):
return '%sLoad(%s)' % ("Priority" if self.prio else "", self.content)
def set_priority(self):
"""one way setting of the priority"""
self.prio = True
def load(self, cart_dev):
"""load itself, invoke callback"""
if callable(self.onload):
self.onload(cart_dev, self)
def unload(self, cart_dev):
"""unload itself, invoke callback"""
if callable(self.onunload):
self.onunload(cart_dev, self)
def just_pass_it(self, argument=None):
"""Dummy function for load and unload"""
class CartError(Exception):
"""Exception for some self-checks in Cart class"""
class Cart:
"""Cart device"""
def __init__(self, nslots, load_capacity, debug_lvl=0):
self.slots = [None] * nslots
self.load_capacity = load_capacity
self.status = Status.Idle
self.data = None
self.pos = None
self.debug_lvl = debug_lvl
self.onmove = Cart.just_pass_it
def __str__(self):
return 'Cart(pos=%s, %s, data=%s, maxload=%d, slots=%s)' % \
(self.pos, self.status, self.data, self.load_capacity,
self.slots)
def just_pass_it(self, argument=None):
"""Dummy function for a move"""
def log(self, msg):
"""a simple logger"""
if self.debug_lvl > 1:
print(self)
if self.debug_lvl > 0:
print('%d %s' % (jarvisenv.time(), msg))
def check_idle(self):
if self.status != Status.Idle:
raise CartError("Cart is busy: %s" % self.status)
def empty(self):
"""returns True if cart has no load at all"""
return self.slots == [None] * len(self.slots)
def load_sum(self):
"""return sum of all loads"""
sum_weight = 0
for slot in self.slots:
if slot:
sum_weight += slot.weight
return sum_weight
def get_prio_idx(self):
"""returns index of slot index with prioritized load or -1 if there is none"""
for i in range(len(self.slots)):
if self.slots[i].prio:
return i
return -1
def check_free_slot(self, slot):
"""pass or raise an exception about invalid slot number"""
if slot < 0 or slot >= len(self.slots):
raise IndexError("slot '%s' outside range [0;%d]" %
(slot, len(self.slots)))
if self.slots[slot] is not None:
raise ValueError("slot %d not empty: %s" %
(slot, self.slots[slot]))
def check_loaded_slot(self, slot):
"""pass or raise an exception about invalid slot when unloading"""
if slot < 0 or slot >= len(self.slots):
raise IndexError("slot '%s' outside range [0;%d]" %
(slot, len(self.slots)))
if self.slots[slot] is None:
raise ValueError("slot %d not empty: %s" %
(slot, self.slots[slot]))
def get_free_slot(self):
"""returns index of free slot, or -1 if all slots are occupied"""
for i in range(len(self.slots)):
if self.slots[i] is None:
return i
return -1
def set_idle(self):
"""helper function to idle the cart"""
self.log("idle %s" % self.pos)
self.status = Status.Idle
self.data = None
def start_moving(self, destination):
self.log("moving %s %s" % (self.pos, destination))
self.check_idle()
self.status = Status.Moving
self.data = destination
if callable(self.onmove):
self.onmove(self)
def finish_moving(self):
# self.log("finishing moving to %s" % self.data)
assert self.status == Status.Moving
self.pos = self.data
self.set_idle()
# self.log("finished")
def start_loading(self, load: Load, slot):
self.check_idle()
self.check_free_slot(slot)
self.status = Status.Loading
self.data = (load, slot)
self.log("loading %s %s %d %d" % (self.pos, load.content, load.weight, slot))
# here, a factory can start loading to the slot
def finish_loading(self):
assert self.status == Status.Loading
load, slot = self.data
self.slots[slot] = load
load.load(self)
self.log("loaded %s %s" % (self.pos, load.content))
self.set_idle()
return load
def start_unloading(self, slot):
self.check_idle()
self.check_loaded_slot(slot)
self.status = Status.Unloading
self.data = slot
load = self.slots[slot]
self.log("unloading %s %s %d %d" % (self.pos, load.content, load.weight, slot))
# here, a factory can start unloading the slot
def finish_unloading(self):
assert self.status == Status.Unloading
load = self.slots[self.data]
self.slots[self.data] = None
load.unload(self)
self.log("unloaded %s %s" % (self.pos, load.content))
self.set_idle()
return load | cart.py | import enum
import jarvisenv
class Status(enum.Enum):
"""Available cart statuses"""
Idle = 0
Moving = 1
Loading = 2
Unloading = 3
class Load:
"""Object for a single load."""
def __init__(self, src, dst, weight, content):
assert weight > 0
self.src = src
self.dst = dst
self.weight = weight
self.content = content
self.onload = Load.just_pass_it
self.onunload = Load.just_pass_it
self.prio = False
self.born = 0
def __str__(self):
return '%sLoad(%s)' % ("Priority" if self.prio else "", self.content)
def set_priority(self):
"""one way setting of the priority"""
self.prio = True
def load(self, cart_dev):
"""load itself, invoke callback"""
if callable(self.onload):
self.onload(cart_dev, self)
def unload(self, cart_dev):
"""unload itself, invoke callback"""
if callable(self.onunload):
self.onunload(cart_dev, self)
def just_pass_it(self, argument=None):
"""Dummy function for load and unload"""
class CartError(Exception):
"""Exception for some self-checks in Cart class"""
class Cart:
"""Cart device"""
def __init__(self, nslots, load_capacity, debug_lvl=0):
self.slots = [None] * nslots
self.load_capacity = load_capacity
self.status = Status.Idle
self.data = None
self.pos = None
self.debug_lvl = debug_lvl
self.onmove = Cart.just_pass_it
def __str__(self):
return 'Cart(pos=%s, %s, data=%s, maxload=%d, slots=%s)' % \
(self.pos, self.status, self.data, self.load_capacity,
self.slots)
def just_pass_it(self, argument=None):
"""Dummy function for a move"""
def log(self, msg):
"""a simple logger"""
if self.debug_lvl > 1:
print(self)
if self.debug_lvl > 0:
print('%d %s' % (jarvisenv.time(), msg))
def check_idle(self):
if self.status != Status.Idle:
raise CartError("Cart is busy: %s" % self.status)
def empty(self):
"""returns True if cart has no load at all"""
return self.slots == [None] * len(self.slots)
def load_sum(self):
"""return sum of all loads"""
sum_weight = 0
for slot in self.slots:
if slot:
sum_weight += slot.weight
return sum_weight
def get_prio_idx(self):
"""returns index of slot index with prioritized load or -1 if there is none"""
for i in range(len(self.slots)):
if self.slots[i].prio:
return i
return -1
def check_free_slot(self, slot):
"""pass or raise an exception about invalid slot number"""
if slot < 0 or slot >= len(self.slots):
raise IndexError("slot '%s' outside range [0;%d]" %
(slot, len(self.slots)))
if self.slots[slot] is not None:
raise ValueError("slot %d not empty: %s" %
(slot, self.slots[slot]))
def check_loaded_slot(self, slot):
"""pass or raise an exception about invalid slot when unloading"""
if slot < 0 or slot >= len(self.slots):
raise IndexError("slot '%s' outside range [0;%d]" %
(slot, len(self.slots)))
if self.slots[slot] is None:
raise ValueError("slot %d not empty: %s" %
(slot, self.slots[slot]))
def get_free_slot(self):
"""returns index of free slot, or -1 if all slots are occupied"""
for i in range(len(self.slots)):
if self.slots[i] is None:
return i
return -1
def set_idle(self):
"""helper function to idle the cart"""
self.log("idle %s" % self.pos)
self.status = Status.Idle
self.data = None
def start_moving(self, destination):
self.log("moving %s %s" % (self.pos, destination))
self.check_idle()
self.status = Status.Moving
self.data = destination
if callable(self.onmove):
self.onmove(self)
def finish_moving(self):
# self.log("finishing moving to %s" % self.data)
assert self.status == Status.Moving
self.pos = self.data
self.set_idle()
# self.log("finished")
def start_loading(self, load: Load, slot):
self.check_idle()
self.check_free_slot(slot)
self.status = Status.Loading
self.data = (load, slot)
self.log("loading %s %s %d %d" % (self.pos, load.content, load.weight, slot))
# here, a factory can start loading to the slot
def finish_loading(self):
assert self.status == Status.Loading
load, slot = self.data
self.slots[slot] = load
load.load(self)
self.log("loaded %s %s" % (self.pos, load.content))
self.set_idle()
return load
def start_unloading(self, slot):
self.check_idle()
self.check_loaded_slot(slot)
self.status = Status.Unloading
self.data = slot
load = self.slots[slot]
self.log("unloading %s %s %d %d" % (self.pos, load.content, load.weight, slot))
# here, a factory can start unloading the slot
def finish_unloading(self):
assert self.status == Status.Unloading
load = self.slots[self.data]
self.slots[self.data] = None
load.unload(self)
self.log("unloaded %s %s" % (self.pos, load.content))
self.set_idle()
return load | 0.707101 | 0.378057 |
from .deployment_utils import UniversumRunner
from .utils import python, simple_test_config
def test_minimal_install(clean_docker_main: UniversumRunner):
# Run without parameters
log = clean_docker_main.environment.assert_unsuccessful_execution(f"{python()} -m universum")
assert "No module named universum" not in log
# Run locally
log = clean_docker_main.run(simple_test_config, force_installed=True)
assert clean_docker_main.local.repo_file.basename in log
# Run from Git
clean_docker_main.clean_artifacts()
log = clean_docker_main.run(simple_test_config, vcs_type="git", force_installed=True)
assert clean_docker_main.git.repo_file.basename in log
# Run from P4
clean_docker_main.clean_artifacts()
log = clean_docker_main.run(simple_test_config, vcs_type="p4", force_installed=True)
assert clean_docker_main.perforce.repo_file.basename in log
def test_minimal_install_with_git_only(clean_docker_main_no_p4: UniversumRunner, capsys):
# Run from P4
clean_docker_main_no_p4.run(simple_test_config, vcs_type="p4", force_installed=True, expected_to_fail=True)
assert "Please refer to `Prerequisites` chapter of project documentation" in capsys.readouterr().out
# Run from git
clean_docker_main_no_p4.clean_artifacts()
log = clean_docker_main_no_p4.run(simple_test_config, vcs_type="git", force_installed=True)
assert clean_docker_main_no_p4.git.repo_file.basename in log
def test_minimal_install_plain_ubuntu(clean_docker_main_no_vcs: UniversumRunner, capsys):
# Run from P4
clean_docker_main_no_vcs.run(simple_test_config, vcs_type="p4", force_installed=True, expected_to_fail=True)
assert "Please refer to `Prerequisites` chapter of project documentation" in capsys.readouterr().out
# Run from Git
clean_docker_main_no_vcs.run(simple_test_config, vcs_type="git", force_installed=True, expected_to_fail=True)
assert "Please refer to `Prerequisites` chapter of project documentation" in capsys.readouterr().out
# Run locally
log = clean_docker_main_no_vcs.run(simple_test_config, force_installed=True)
assert clean_docker_main_no_vcs.local.repo_file.basename in log | tests/test_deployment.py | from .deployment_utils import UniversumRunner
from .utils import python, simple_test_config
def test_minimal_install(clean_docker_main: UniversumRunner):
# Run without parameters
log = clean_docker_main.environment.assert_unsuccessful_execution(f"{python()} -m universum")
assert "No module named universum" not in log
# Run locally
log = clean_docker_main.run(simple_test_config, force_installed=True)
assert clean_docker_main.local.repo_file.basename in log
# Run from Git
clean_docker_main.clean_artifacts()
log = clean_docker_main.run(simple_test_config, vcs_type="git", force_installed=True)
assert clean_docker_main.git.repo_file.basename in log
# Run from P4
clean_docker_main.clean_artifacts()
log = clean_docker_main.run(simple_test_config, vcs_type="p4", force_installed=True)
assert clean_docker_main.perforce.repo_file.basename in log
def test_minimal_install_with_git_only(clean_docker_main_no_p4: UniversumRunner, capsys):
# Run from P4
clean_docker_main_no_p4.run(simple_test_config, vcs_type="p4", force_installed=True, expected_to_fail=True)
assert "Please refer to `Prerequisites` chapter of project documentation" in capsys.readouterr().out
# Run from git
clean_docker_main_no_p4.clean_artifacts()
log = clean_docker_main_no_p4.run(simple_test_config, vcs_type="git", force_installed=True)
assert clean_docker_main_no_p4.git.repo_file.basename in log
def test_minimal_install_plain_ubuntu(clean_docker_main_no_vcs: UniversumRunner, capsys):
# Run from P4
clean_docker_main_no_vcs.run(simple_test_config, vcs_type="p4", force_installed=True, expected_to_fail=True)
assert "Please refer to `Prerequisites` chapter of project documentation" in capsys.readouterr().out
# Run from Git
clean_docker_main_no_vcs.run(simple_test_config, vcs_type="git", force_installed=True, expected_to_fail=True)
assert "Please refer to `Prerequisites` chapter of project documentation" in capsys.readouterr().out
# Run locally
log = clean_docker_main_no_vcs.run(simple_test_config, force_installed=True)
assert clean_docker_main_no_vcs.local.repo_file.basename in log | 0.51562 | 0.330282 |
import gevent.monkey; gevent.monkey.patch_all()
import socket
import time
import gevent
import gevent.server
import gevent.socket
import gevent.queue
from cluster import ClusterManager
from dispatcher import DispatchClient
from task import Task
import util
import constants
class ScheduleError(Exception): pass
class DistributedScheduler(object):
def __init__(self, queue, leader, replica_factor=2, replica_offset=5, interface=None,
port=6001, cluster_port=6000):
if interface is None:
interface = socket.gethostbyname(socket.gethostname())
self.interface = interface
self.port = port
self.dispatcher = DispatchClient(interface, self._dispatcher_event)
self.cluster = ClusterManager(leader, callback=self._cluster_update,
interface=interface, port=cluster_port)
self.backend = gevent.server.StreamServer((interface, port), self._backend_server)
self.peers = set()
self.connections = {}
self.queue = queue
self.scheduled = {}
self.scheduled_acks = {}
self.schedules = 0
self.replica_factor = replica_factor
self.replica_offset = replica_offset
def start(self):
self.dispatcher.start()
self.backend.start()
self.cluster.start()
def schedule(self, task):
host_list = list(self.peers)
# This implements the round-robin N replication method for picking
# which hosts to send the task. In short, every schedule moves along the
# cluster ring by one, then picks N hosts, where N is level of replication
replication_factor = min(self.replica_factor, len(host_list))
host_ids = [(self.schedules + n) % len(host_list) for n in xrange(replication_factor)]
hosts = [host_list[id] for id in host_ids]
task.replica_hosts = hosts
self.scheduled_acks[task.id] = gevent.queue.Queue()
for host in hosts:
self.connections[host].send('schedule:%s\n' % task.serialize())
task.replica_offset += self.replica_offset
try:
# TODO: document, wrap this whole operation in timeout
return all([self.scheduled_acks[task.id].get(timeout=2) for h in hosts])
except gevent.queue.Empty:
raise ScheduleError("not all hosts acked")
finally:
self.schedules += 1
self.scheduled_acks.pop(task.id)
def _cluster_update(self, hosts):
add_hosts = hosts - self.peers
remove_hosts = self.peers - hosts
for host in remove_hosts:
print "disconnecting from peer %s" % host
gevent.spawn(self._remove_peer, host)
for host in add_hosts:
print "connecting to peer %s" % (host)
gevent.spawn(self._add_peer, host)
self.peers = hosts
def _add_peer(self, host):
client = gevent.socket.create_connection((host, self.port), source_address=(self.interface, 0))
self.connections[host] = client
for line in util.line_protocol(client):
ack, task_id = line.split(':', 1)
if ack == 'scheduled' and task_id in self.scheduled_acks:
self.scheduled_acks[task_id].put(True)
print "disconnected from peer %s" % host
self._remove_peer(host)
def _remove_peer(self, host):
if host in self.connections:
peer = self.connections.pop(host)
try:
peer.shutdown(0)
except:
pass
def _dispatcher_event(self, event, payload):
if event == 'start':
task = self.scheduled[payload]
eta = int(time.time() + constants.WORKER_TIMEOUT)
self._sendto_replicas(task, 'reschedule:%s:%s\n' % (task.id, eta))
elif event == 'success':
task = self.scheduled[payload]
self._sendto_replicas(task, 'cancel:%s\n' % task.id)
self.scheduled.pop(task.id)
elif event == 'failure':
task_id, reason = payload.split(':', 1)
self.scheduled.pop(task.id)
print "FAILURE %s: %s" % (task_id, reason)
def _sendto_replicas(self, task, message):
other_replica_hosts = set(task.replica_hosts) - set([self.interface])
for host in other_replica_hosts:
if host in self.connections:
self.connections[host].send(message)
def _backend_server(self, socket, address):
for line in util.line_protocol(socket):
action, payload = line.split(':', 1)
if action == 'schedule':
task = Task.unserialize(payload)
task.schedule(self.dispatcher)
self.scheduled[task.id] = task
socket.send('scheduled:%s\n' % task.id)
print "scheduled: %s" % task.id
elif action == 'cancel':
task_id = payload
print "canceled: %s" % task_id
self.scheduled.pop(task_id).cancel()
elif action == 'reschedule':
task_id, eta = payload.split(':', 1)
eta = int(eta)
print "rescheduled: %s for %s" % (task_id, eta)
self.scheduled[task_id].reschedule(self.dispatcher, eta) | miyamoto/scheduler.py | import gevent.monkey; gevent.monkey.patch_all()
import socket
import time
import gevent
import gevent.server
import gevent.socket
import gevent.queue
from cluster import ClusterManager
from dispatcher import DispatchClient
from task import Task
import util
import constants
class ScheduleError(Exception): pass
class DistributedScheduler(object):
def __init__(self, queue, leader, replica_factor=2, replica_offset=5, interface=None,
port=6001, cluster_port=6000):
if interface is None:
interface = socket.gethostbyname(socket.gethostname())
self.interface = interface
self.port = port
self.dispatcher = DispatchClient(interface, self._dispatcher_event)
self.cluster = ClusterManager(leader, callback=self._cluster_update,
interface=interface, port=cluster_port)
self.backend = gevent.server.StreamServer((interface, port), self._backend_server)
self.peers = set()
self.connections = {}
self.queue = queue
self.scheduled = {}
self.scheduled_acks = {}
self.schedules = 0
self.replica_factor = replica_factor
self.replica_offset = replica_offset
def start(self):
self.dispatcher.start()
self.backend.start()
self.cluster.start()
def schedule(self, task):
host_list = list(self.peers)
# This implements the round-robin N replication method for picking
# which hosts to send the task. In short, every schedule moves along the
# cluster ring by one, then picks N hosts, where N is level of replication
replication_factor = min(self.replica_factor, len(host_list))
host_ids = [(self.schedules + n) % len(host_list) for n in xrange(replication_factor)]
hosts = [host_list[id] for id in host_ids]
task.replica_hosts = hosts
self.scheduled_acks[task.id] = gevent.queue.Queue()
for host in hosts:
self.connections[host].send('schedule:%s\n' % task.serialize())
task.replica_offset += self.replica_offset
try:
# TODO: document, wrap this whole operation in timeout
return all([self.scheduled_acks[task.id].get(timeout=2) for h in hosts])
except gevent.queue.Empty:
raise ScheduleError("not all hosts acked")
finally:
self.schedules += 1
self.scheduled_acks.pop(task.id)
def _cluster_update(self, hosts):
add_hosts = hosts - self.peers
remove_hosts = self.peers - hosts
for host in remove_hosts:
print "disconnecting from peer %s" % host
gevent.spawn(self._remove_peer, host)
for host in add_hosts:
print "connecting to peer %s" % (host)
gevent.spawn(self._add_peer, host)
self.peers = hosts
def _add_peer(self, host):
client = gevent.socket.create_connection((host, self.port), source_address=(self.interface, 0))
self.connections[host] = client
for line in util.line_protocol(client):
ack, task_id = line.split(':', 1)
if ack == 'scheduled' and task_id in self.scheduled_acks:
self.scheduled_acks[task_id].put(True)
print "disconnected from peer %s" % host
self._remove_peer(host)
def _remove_peer(self, host):
if host in self.connections:
peer = self.connections.pop(host)
try:
peer.shutdown(0)
except:
pass
def _dispatcher_event(self, event, payload):
if event == 'start':
task = self.scheduled[payload]
eta = int(time.time() + constants.WORKER_TIMEOUT)
self._sendto_replicas(task, 'reschedule:%s:%s\n' % (task.id, eta))
elif event == 'success':
task = self.scheduled[payload]
self._sendto_replicas(task, 'cancel:%s\n' % task.id)
self.scheduled.pop(task.id)
elif event == 'failure':
task_id, reason = payload.split(':', 1)
self.scheduled.pop(task.id)
print "FAILURE %s: %s" % (task_id, reason)
def _sendto_replicas(self, task, message):
other_replica_hosts = set(task.replica_hosts) - set([self.interface])
for host in other_replica_hosts:
if host in self.connections:
self.connections[host].send(message)
def _backend_server(self, socket, address):
for line in util.line_protocol(socket):
action, payload = line.split(':', 1)
if action == 'schedule':
task = Task.unserialize(payload)
task.schedule(self.dispatcher)
self.scheduled[task.id] = task
socket.send('scheduled:%s\n' % task.id)
print "scheduled: %s" % task.id
elif action == 'cancel':
task_id = payload
print "canceled: %s" % task_id
self.scheduled.pop(task_id).cancel()
elif action == 'reschedule':
task_id, eta = payload.split(':', 1)
eta = int(eta)
print "rescheduled: %s for %s" % (task_id, eta)
self.scheduled[task_id].reschedule(self.dispatcher, eta) | 0.222447 | 0.071689 |
from tenable.errors import *
from ..checker import check, single
from datetime import date
import pytest
@pytest.mark.vcr()
def test_families(api):
families = api.plugins.families()
assert isinstance(families, list)
for f in families:
check(f, 'count', int)
check(f, 'id', int)
check(f, 'name', str)
@pytest.mark.vcr()
def test_family_details_family_id_typeerror(api):
with pytest.raises(TypeError):
api.plugins.family_details('nope')
@pytest.mark.vcr()
def test_family_details(api):
f = api.plugins.family_details(27)
assert isinstance(f, dict)
check(f, 'name', str)
check(f, 'id', int)
check(f, 'plugins', list)
for p in f['plugins']:
check(p, 'id', int)
check(p, 'name', str)
assert f['id'] == 27
@pytest.mark.vcr()
def test_plugin_details_plugin_id_typerror(api):
with pytest.raises(TypeError):
api.plugins.plugin_details('nope')
@pytest.mark.vcr()
def test_plugin_details(api):
p = api.plugins.plugin_details(19506)
assert isinstance(p, dict)
check(p, 'attributes', list)
for a in p['attributes']:
check(a, 'attribute_name', str)
check(a, 'attribute_value', str)
check(p, 'family_name', str)
check(p, 'id', int)
check(p, 'name', str)
assert p['id'] == 19506
@pytest.mark.vcr()
def test_plugins_list_page_typeerror(api):
with pytest.raises(TypeError):
api.plugins.list(page='one')
@pytest.mark.vcr()
def test_plugins_list_size_typeerror(api):
with pytest.raises(TypeError):
api.plugins.list(size='one')
@pytest.mark.vcr()
def test_plugins_list_last_updated_date_typeerror(api):
with pytest.raises(TypeError):
api.plugins.list(last_updated=1)
@pytest.mark.vcr()
def test_plugins_list_num_pages_typeerror(api):
with pytest.raises(TypeError):
api.plugins.list(num_pages='one')
@pytest.mark.vcr()
def test_plugins_list_success(api):
plugins = api.plugins.list(
last_updated=date(2019, 1, 1),
num_pages=2,
size=10)
for p in plugins:
check(p, 'attributes', dict)
check(p['attributes'], 'description', str)
check(p['attributes'], 'plugin_publication_date', str)
check(p['attributes'], 'plugin_modification_date', str)
check(p['attributes'], 'plugin_version', str)
check(p['attributes'], 'synopsis', str)
check(p['attributes'], 'risk_factor', str)
check(p, 'id', int)
check(p, 'name', str) | tests/io/test_plugins.py | from tenable.errors import *
from ..checker import check, single
from datetime import date
import pytest
@pytest.mark.vcr()
def test_families(api):
families = api.plugins.families()
assert isinstance(families, list)
for f in families:
check(f, 'count', int)
check(f, 'id', int)
check(f, 'name', str)
@pytest.mark.vcr()
def test_family_details_family_id_typeerror(api):
with pytest.raises(TypeError):
api.plugins.family_details('nope')
@pytest.mark.vcr()
def test_family_details(api):
f = api.plugins.family_details(27)
assert isinstance(f, dict)
check(f, 'name', str)
check(f, 'id', int)
check(f, 'plugins', list)
for p in f['plugins']:
check(p, 'id', int)
check(p, 'name', str)
assert f['id'] == 27
@pytest.mark.vcr()
def test_plugin_details_plugin_id_typerror(api):
with pytest.raises(TypeError):
api.plugins.plugin_details('nope')
@pytest.mark.vcr()
def test_plugin_details(api):
p = api.plugins.plugin_details(19506)
assert isinstance(p, dict)
check(p, 'attributes', list)
for a in p['attributes']:
check(a, 'attribute_name', str)
check(a, 'attribute_value', str)
check(p, 'family_name', str)
check(p, 'id', int)
check(p, 'name', str)
assert p['id'] == 19506
@pytest.mark.vcr()
def test_plugins_list_page_typeerror(api):
with pytest.raises(TypeError):
api.plugins.list(page='one')
@pytest.mark.vcr()
def test_plugins_list_size_typeerror(api):
with pytest.raises(TypeError):
api.plugins.list(size='one')
@pytest.mark.vcr()
def test_plugins_list_last_updated_date_typeerror(api):
with pytest.raises(TypeError):
api.plugins.list(last_updated=1)
@pytest.mark.vcr()
def test_plugins_list_num_pages_typeerror(api):
with pytest.raises(TypeError):
api.plugins.list(num_pages='one')
@pytest.mark.vcr()
def test_plugins_list_success(api):
plugins = api.plugins.list(
last_updated=date(2019, 1, 1),
num_pages=2,
size=10)
for p in plugins:
check(p, 'attributes', dict)
check(p['attributes'], 'description', str)
check(p['attributes'], 'plugin_publication_date', str)
check(p['attributes'], 'plugin_modification_date', str)
check(p['attributes'], 'plugin_version', str)
check(p['attributes'], 'synopsis', str)
check(p['attributes'], 'risk_factor', str)
check(p, 'id', int)
check(p, 'name', str) | 0.501465 | 0.378603 |
import numpy as np
import matplotlib.pyplot as plt
from numpy.polynomial.legendre import leggauss
from quadr import lglnodes,equispaced
def lagrange_basis(nodes,x,k):
y=np.zeros(x.size)
for ix, xi in enumerate(x):
tmp=[(xi-nodes[j])/(nodes[k]-nodes[j]) for j in range(len(nodes)) if j!=k]
y[ix]=np.prod(tmp)
return y
def get_nodes(order,nodes_type):
if nodes_type=="equispaced":
nodes,w = equispaced(order)
elif nodes_type == "gaussLegendre":
nodes,w = leggauss(order)
elif nodes_type == "gaussLobatto":
nodes, w = lglnodes(order-1,10**-15)
nodes=nodes*0.5+0.5
w = w*0.5
return nodes, w
def compute_theta_DeC(order, nodes_type):
nodes, w = get_nodes(order,nodes_type)
int_nodes, int_w = get_nodes(order,"gaussLobatto")
# generate theta coefficients
theta = np.zeros((order,order))
beta = np.zeros(order)
for m in range(order):
beta[m] = nodes[m]
nodes_m = int_nodes*(nodes[m])
w_m = int_w*(nodes[m])
for r in range(order):
theta[r,m] = sum(lagrange_basis(nodes,nodes_m,r)*w_m)
return theta, beta
def compute_RK_from_DeC(M_sub,K_corr,nodes_type):
order=M_sub+1;
[theta,beta]=compute_theta_DeC(order,nodes_type)
bar_beta=beta[1:] # M_sub
bar_theta=theta[:,1:].transpose() # M_sub x (M_sub +1)
theta0= bar_theta[:,0] # M_sub x 1
bar_theta= bar_theta[:,1:] #M_sub x M_sub
A=np.zeros((M_sub*(K_corr-1)+1,M_sub*(K_corr-1)+1)) # (M_sub x K_corr +1)^2
b=np.zeros(M_sub*(K_corr-1)+1)
c=np.zeros(M_sub*(K_corr-1)+1)
c[1:M_sub+1]=bar_beta
A[1:M_sub+1,0]=bar_beta
for k in range(1,K_corr-1):
r0=1+M_sub*k
r1=1+M_sub*(k+1)
c0=1+M_sub*(k-1)
c1=1+M_sub*(k)
c[r0:r1]=bar_beta
A[r0:r1,0]=theta0
A[r0:r1,c0:c1]=bar_theta
b[0]=theta0[-1]
b[-M_sub:]=bar_theta[M_sub-1,:]
return A,b,c
def dec(func, tspan, y_0, M_sub, K_corr, distribution):
N_time=len(tspan)
dim=len(y_0)
U=np.zeros((dim, N_time))
u_p=np.zeros((dim, M_sub+1))
u_a=np.zeros((dim, M_sub+1))
rhs= np.zeros((dim,M_sub+1))
Theta, beta = compute_theta_DeC(M_sub+1,distribution)
U[:,0]=y_0
for it in range(1, N_time):
delta_t=(tspan[it]-tspan[it-1])
for m in range(M_sub+1):
u_a[:,m]=U[:,it-1]
u_p[:,m]=U[:,it-1]
for k in range(1,K_corr+1):
u_p=np.copy(u_a)
for r in range(M_sub+1):
rhs[:,r]=func(u_p[:,r])
for m in range(1,M_sub+1):
u_a[:,m]= U[:,it-1]+delta_t*sum([Theta[r,m]*rhs[:,r] for r in range(M_sub+1)])
U[:,it]=u_a[:,M_sub]
return tspan, U
def decImplicit(func,jac_stiff, tspan, y_0, M_sub, K_corr, distribution):
N_time=len(tspan)
dim=len(y_0)
U=np.zeros((dim, N_time))
u_p=np.zeros((dim, M_sub+1))
u_a=np.zeros((dim, M_sub+1))
u_help= np.zeros(dim)
rhs= np.zeros((dim,M_sub+1))
Theta, beta = compute_theta_DeC(M_sub+1,distribution)
invJac=np.zeros((M_sub+1,dim,dim))
U[:,0]=y_0
for it in range(1, N_time):
delta_t=(tspan[it]-tspan[it-1])
for m in range(M_sub+1):
u_a[:,m]=U[:,it-1]
u_p[:,m]=U[:,it-1]
SS=jac_stiff(u_p[:,0])
for m in range(1,M_sub+1):
invJac[m,:,:]=np.linalg.inv(np.eye(dim) - delta_t*beta[m]*SS)
for k in range(1,K_corr+1):
u_p=np.copy(u_a)
for r in range(M_sub+1):
rhs[:,r]=func(u_p[:,r])
for m in range(1,M_sub+1):
u_a[:,m]= u_p[:,m]+delta_t*np.matmul(invJac[m,:,:],\
(-(u_p[:,m]-u_p[:,0])/delta_t\
+sum([Theta[r,m]*rhs[:,r] for r in range(M_sub+1)])))
U[:,it]=u_a[:,M_sub]
return tspan, U
def decMPatankar(prod_dest, rhs, tspan, y_0, M_sub, K_corr, distribution):
N_time=len(tspan)
dim=len(y_0)
U=np.zeros((dim, N_time))
u_p=np.zeros((dim, M_sub+1))
u_a=np.zeros((dim, M_sub+1))
prod_p = np.zeros((dim,dim,M_sub+1))
dest_p = np.zeros((dim,dim,M_sub+1))
rhs_p= np.zeros((dim,M_sub+1))
Theta, beta = compute_theta_DeC(M_sub+1,distribution)
U[:,0]=y_0
for it in range(1, N_time):
delta_t=(tspan[it]-tspan[it-1])
for m in range(M_sub+1):
u_a[:,m]=U[:,it-1]
u_p[:,m]=U[:,it-1]
for k in range(1,K_corr+1):
u_p=np.copy(u_a)
for r in range(M_sub+1):
prod_p[:,:,r], dest_p[:,:,r]=prod_dest(u_p[:,r])
rhs_p[:,r]=rhs(u_p[:,r])
for m in range(1,M_sub+1):
u_a[:,m]= patankar_type_dec(prod_p,dest_p,rhs_p,delta_t,m,M_sub,Theta,u_p,dim)
U[:,it]=u_a[:,M_sub]
return tspan, U
def patankar_type_dec(prod_p,dest_p,rhs_p,delta_t,m,M_sub,Theta,u_p,dim):
mass= np.eye(dim)
RHS= u_p[:,0]
for i in range(dim):
for r in range(M_sub+1):
RHS[i]=RHS[i]+delta_t*Theta[r,m]*rhs_p[i,r]
if Theta[r,m]>0:
for j in range(dim):
mass[i,j]=mass[i,j]-delta_t*Theta[r,m]*(prod_p[i,j,r]/u_p[j,m])
mass[i,i]=mass[i,i]+ delta_t*Theta[r,m]*(dest_p[i,j,r]/u_p[i,m])
elif Theta[r,m]<0:
for j in range(dim):
mass[i,i]=mass[i,i]- delta_t*Theta[r,m]*(prod_p[i,j,r]/u_p[i,m])
mass[i,j]=mass[i,j]+ delta_t*Theta[r,m]*(dest_p[i,j,r]/u_p[j,m])
return np.linalg.solve(mass,RHS) | pythonCodes/DeC.py | import numpy as np
import matplotlib.pyplot as plt
from numpy.polynomial.legendre import leggauss
from quadr import lglnodes,equispaced
def lagrange_basis(nodes,x,k):
y=np.zeros(x.size)
for ix, xi in enumerate(x):
tmp=[(xi-nodes[j])/(nodes[k]-nodes[j]) for j in range(len(nodes)) if j!=k]
y[ix]=np.prod(tmp)
return y
def get_nodes(order,nodes_type):
if nodes_type=="equispaced":
nodes,w = equispaced(order)
elif nodes_type == "gaussLegendre":
nodes,w = leggauss(order)
elif nodes_type == "gaussLobatto":
nodes, w = lglnodes(order-1,10**-15)
nodes=nodes*0.5+0.5
w = w*0.5
return nodes, w
def compute_theta_DeC(order, nodes_type):
nodes, w = get_nodes(order,nodes_type)
int_nodes, int_w = get_nodes(order,"gaussLobatto")
# generate theta coefficients
theta = np.zeros((order,order))
beta = np.zeros(order)
for m in range(order):
beta[m] = nodes[m]
nodes_m = int_nodes*(nodes[m])
w_m = int_w*(nodes[m])
for r in range(order):
theta[r,m] = sum(lagrange_basis(nodes,nodes_m,r)*w_m)
return theta, beta
def compute_RK_from_DeC(M_sub,K_corr,nodes_type):
order=M_sub+1;
[theta,beta]=compute_theta_DeC(order,nodes_type)
bar_beta=beta[1:] # M_sub
bar_theta=theta[:,1:].transpose() # M_sub x (M_sub +1)
theta0= bar_theta[:,0] # M_sub x 1
bar_theta= bar_theta[:,1:] #M_sub x M_sub
A=np.zeros((M_sub*(K_corr-1)+1,M_sub*(K_corr-1)+1)) # (M_sub x K_corr +1)^2
b=np.zeros(M_sub*(K_corr-1)+1)
c=np.zeros(M_sub*(K_corr-1)+1)
c[1:M_sub+1]=bar_beta
A[1:M_sub+1,0]=bar_beta
for k in range(1,K_corr-1):
r0=1+M_sub*k
r1=1+M_sub*(k+1)
c0=1+M_sub*(k-1)
c1=1+M_sub*(k)
c[r0:r1]=bar_beta
A[r0:r1,0]=theta0
A[r0:r1,c0:c1]=bar_theta
b[0]=theta0[-1]
b[-M_sub:]=bar_theta[M_sub-1,:]
return A,b,c
def dec(func, tspan, y_0, M_sub, K_corr, distribution):
N_time=len(tspan)
dim=len(y_0)
U=np.zeros((dim, N_time))
u_p=np.zeros((dim, M_sub+1))
u_a=np.zeros((dim, M_sub+1))
rhs= np.zeros((dim,M_sub+1))
Theta, beta = compute_theta_DeC(M_sub+1,distribution)
U[:,0]=y_0
for it in range(1, N_time):
delta_t=(tspan[it]-tspan[it-1])
for m in range(M_sub+1):
u_a[:,m]=U[:,it-1]
u_p[:,m]=U[:,it-1]
for k in range(1,K_corr+1):
u_p=np.copy(u_a)
for r in range(M_sub+1):
rhs[:,r]=func(u_p[:,r])
for m in range(1,M_sub+1):
u_a[:,m]= U[:,it-1]+delta_t*sum([Theta[r,m]*rhs[:,r] for r in range(M_sub+1)])
U[:,it]=u_a[:,M_sub]
return tspan, U
def decImplicit(func,jac_stiff, tspan, y_0, M_sub, K_corr, distribution):
N_time=len(tspan)
dim=len(y_0)
U=np.zeros((dim, N_time))
u_p=np.zeros((dim, M_sub+1))
u_a=np.zeros((dim, M_sub+1))
u_help= np.zeros(dim)
rhs= np.zeros((dim,M_sub+1))
Theta, beta = compute_theta_DeC(M_sub+1,distribution)
invJac=np.zeros((M_sub+1,dim,dim))
U[:,0]=y_0
for it in range(1, N_time):
delta_t=(tspan[it]-tspan[it-1])
for m in range(M_sub+1):
u_a[:,m]=U[:,it-1]
u_p[:,m]=U[:,it-1]
SS=jac_stiff(u_p[:,0])
for m in range(1,M_sub+1):
invJac[m,:,:]=np.linalg.inv(np.eye(dim) - delta_t*beta[m]*SS)
for k in range(1,K_corr+1):
u_p=np.copy(u_a)
for r in range(M_sub+1):
rhs[:,r]=func(u_p[:,r])
for m in range(1,M_sub+1):
u_a[:,m]= u_p[:,m]+delta_t*np.matmul(invJac[m,:,:],\
(-(u_p[:,m]-u_p[:,0])/delta_t\
+sum([Theta[r,m]*rhs[:,r] for r in range(M_sub+1)])))
U[:,it]=u_a[:,M_sub]
return tspan, U
def decMPatankar(prod_dest, rhs, tspan, y_0, M_sub, K_corr, distribution):
N_time=len(tspan)
dim=len(y_0)
U=np.zeros((dim, N_time))
u_p=np.zeros((dim, M_sub+1))
u_a=np.zeros((dim, M_sub+1))
prod_p = np.zeros((dim,dim,M_sub+1))
dest_p = np.zeros((dim,dim,M_sub+1))
rhs_p= np.zeros((dim,M_sub+1))
Theta, beta = compute_theta_DeC(M_sub+1,distribution)
U[:,0]=y_0
for it in range(1, N_time):
delta_t=(tspan[it]-tspan[it-1])
for m in range(M_sub+1):
u_a[:,m]=U[:,it-1]
u_p[:,m]=U[:,it-1]
for k in range(1,K_corr+1):
u_p=np.copy(u_a)
for r in range(M_sub+1):
prod_p[:,:,r], dest_p[:,:,r]=prod_dest(u_p[:,r])
rhs_p[:,r]=rhs(u_p[:,r])
for m in range(1,M_sub+1):
u_a[:,m]= patankar_type_dec(prod_p,dest_p,rhs_p,delta_t,m,M_sub,Theta,u_p,dim)
U[:,it]=u_a[:,M_sub]
return tspan, U
def patankar_type_dec(prod_p,dest_p,rhs_p,delta_t,m,M_sub,Theta,u_p,dim):
mass= np.eye(dim)
RHS= u_p[:,0]
for i in range(dim):
for r in range(M_sub+1):
RHS[i]=RHS[i]+delta_t*Theta[r,m]*rhs_p[i,r]
if Theta[r,m]>0:
for j in range(dim):
mass[i,j]=mass[i,j]-delta_t*Theta[r,m]*(prod_p[i,j,r]/u_p[j,m])
mass[i,i]=mass[i,i]+ delta_t*Theta[r,m]*(dest_p[i,j,r]/u_p[i,m])
elif Theta[r,m]<0:
for j in range(dim):
mass[i,i]=mass[i,i]- delta_t*Theta[r,m]*(prod_p[i,j,r]/u_p[i,m])
mass[i,j]=mass[i,j]+ delta_t*Theta[r,m]*(dest_p[i,j,r]/u_p[j,m])
return np.linalg.solve(mass,RHS) | 0.119459 | 0.509093 |
# Copyright 2019 <NAME>
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import random
import torch as th
import torch.nn.functional as tf
from typing import Tuple, Union
def tf_mask(batch: int,
shape: Tuple[int],
p: float = 1.0,
max_bands: int = 30,
max_frame: int = 40,
num_freq_masks: int = 2,
num_time_masks: int = 2,
device: Union[str, th.device] = "cpu") -> th.Tensor:
"""
Return batch of TF-masks
Args:
batch: batch size, N
shape: (T x F)
Return:
masks (Tensor): 0,1 masks, N x T x F
"""
T, F = shape
max_frame = min(max_frame, int(T * p))
max_bands = min(max_bands, F)
mask = []
for _ in range(batch):
fmask = random_mask(shape,
max_steps=max_bands,
num_masks=num_freq_masks,
order="freq",
device=device)
tmask = random_mask(shape,
max_steps=max_frame,
num_masks=num_time_masks,
order="time",
device=device)
mask.append(fmask * tmask)
# N x T x F
return th.stack(mask)
def random_mask(shape: Tuple[int],
max_steps: int = 30,
num_masks: int = 2,
order: str = "freq",
device: Union[str, th.device] = "cpu") -> th.Tensor:
"""
Generate random 0/1 masks
Args:
shape: (T, F)
Return:
masks (Tensor): 0,1 masks, T x F
"""
if order not in ["time", "freq"]:
raise RuntimeError(f"Unknown order: {order}")
# shape: T x F
masks = th.ones(shape, device=device)
L = shape[1] if order == "freq" else shape[0]
for _ in range(num_masks):
dur = random.randint(1, max_steps - 1)
if L - dur <= 0:
continue
beg = random.randint(0, L - dur - 1)
if order == "freq":
masks[:, beg:beg + dur] = 0
else:
masks[beg:beg + dur, :] = 0
return masks
def perturb_speed(wav: th.Tensor, weight: th.Tensor):
"""
Do speed perturb
Args:
wav (Tensor): N x S
weight (Tensor): dst_sr x src_sr x K
Return
wav (Tensor): N x (N/src_sr)*dst_sr
"""
_, src_sr, K = weight.shape
N, S = wav.shape
num_blocks = S // src_sr
if num_blocks == 0:
raise RuntimeError(
f"Input wav is too short to be perturbed, length = {S}")
# N x B x sr
wav = wav[:, :num_blocks * src_sr].view(N, num_blocks, -1)
# N x src_sr x B
wav = wav.transpose(1, 2)
# N x dst_sr x B
wav = tf.conv1d(wav, weight, padding=(K - 1) // 2)
# N x B x dst_sr
wav = wav.transpose(1, 2).contiguous()
# N x B*dst_sr
return wav.view(N, -1) | aps/transform/augment.py |
# Copyright 2019 <NAME>
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import random
import torch as th
import torch.nn.functional as tf
from typing import Tuple, Union
def tf_mask(batch: int,
shape: Tuple[int],
p: float = 1.0,
max_bands: int = 30,
max_frame: int = 40,
num_freq_masks: int = 2,
num_time_masks: int = 2,
device: Union[str, th.device] = "cpu") -> th.Tensor:
"""
Return batch of TF-masks
Args:
batch: batch size, N
shape: (T x F)
Return:
masks (Tensor): 0,1 masks, N x T x F
"""
T, F = shape
max_frame = min(max_frame, int(T * p))
max_bands = min(max_bands, F)
mask = []
for _ in range(batch):
fmask = random_mask(shape,
max_steps=max_bands,
num_masks=num_freq_masks,
order="freq",
device=device)
tmask = random_mask(shape,
max_steps=max_frame,
num_masks=num_time_masks,
order="time",
device=device)
mask.append(fmask * tmask)
# N x T x F
return th.stack(mask)
def random_mask(shape: Tuple[int],
max_steps: int = 30,
num_masks: int = 2,
order: str = "freq",
device: Union[str, th.device] = "cpu") -> th.Tensor:
"""
Generate random 0/1 masks
Args:
shape: (T, F)
Return:
masks (Tensor): 0,1 masks, T x F
"""
if order not in ["time", "freq"]:
raise RuntimeError(f"Unknown order: {order}")
# shape: T x F
masks = th.ones(shape, device=device)
L = shape[1] if order == "freq" else shape[0]
for _ in range(num_masks):
dur = random.randint(1, max_steps - 1)
if L - dur <= 0:
continue
beg = random.randint(0, L - dur - 1)
if order == "freq":
masks[:, beg:beg + dur] = 0
else:
masks[beg:beg + dur, :] = 0
return masks
def perturb_speed(wav: th.Tensor, weight: th.Tensor):
"""
Do speed perturb
Args:
wav (Tensor): N x S
weight (Tensor): dst_sr x src_sr x K
Return
wav (Tensor): N x (N/src_sr)*dst_sr
"""
_, src_sr, K = weight.shape
N, S = wav.shape
num_blocks = S // src_sr
if num_blocks == 0:
raise RuntimeError(
f"Input wav is too short to be perturbed, length = {S}")
# N x B x sr
wav = wav[:, :num_blocks * src_sr].view(N, num_blocks, -1)
# N x src_sr x B
wav = wav.transpose(1, 2)
# N x dst_sr x B
wav = tf.conv1d(wav, weight, padding=(K - 1) // 2)
# N x B x dst_sr
wav = wav.transpose(1, 2).contiguous()
# N x B*dst_sr
return wav.view(N, -1) | 0.916339 | 0.426919 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
def PoissonGen(inp, rescale_fac=2.0):
rand_inp = torch.rand_like(inp)
return torch.mul(torch.le(rand_inp * rescale_fac, torch.abs(inp)).float(), torch.sign(inp))
class SpikingActivation(torch.autograd.Function):
"""
We can implement our own custom autograd Functions by subclassing
torch.autograd.Function and implementing the forward and backward passes
which operate on Tensors.
"""
@staticmethod
def forward(ctx, input, threshold):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
"""
# threshold = 0.
# spikes = torch.zeros(input.shape)
# spikes = (input > threshold) * 1
# input[input > threshold] = 0.
spikes = torch.zeros(input.shape)
mem_thr = (input/threshold) - 1.0
spikes = (mem_thr > 0) * 1.0
rst = torch.zeros(input.shape)
rst[mem_thr > 0] = threshold
input = input - rst
ctx.save_for_backward(input, spikes)
return spikes, input
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
Backpropagation implemented from
https://github.com/Intelligent-Computing-Lab-Yale/BNTT-Batch-Normalization-Through-Time.git
"""
spikes, input = ctx.saved_tensors
grad_input = grad_output.clone()
grad = grad_input * 0.3 * F.threshold(1.0 - torch.abs(input), 0, 0)
return grad
class SpikingNeuron(nn.Module):
def __init__(self, dt=1e-6, Tsim=1e-3, Cv=50e-12, Cu=30e-12, record=True):
super(SpikingNeuron, self).__init__()
self.spike_activation = SpikingActivation.apply
self.dt = dt
self.Cv = Cv
self.Cu = Cu
self.Tsim = Tsim
self.beta = dt/Cv # assuming R=1 => tau = RC
self.record = record
self.leak_mem = 0.95
self.threshold = 0.3
def forward(self, input, num_steps, conv, bntt):
# See the autograd section for explanation of what happens here.
if num_steps == 0:
self.batch_size = input.size(0)
self.v = torch.zeros( input.size() )
self.spikes = torch.zeros( input.size() )
if self.record:
self.v_t = []
self.s_t = []
self.in_t = []
# self.v = self.leak_mem*self.v + (1-self.leak_mem)*(input)
# self.spikes, self.v = self.spike_activation(self.v)
# self.v = self.leak_mem*self.v + bntt[num_steps]*conv(inpu)
self.v = self.leak_mem*self.v + bntt*(2*input)
self.spikes, self.v = self.spike_activation(self.v, 1.)
if self.record:
self.v_t.append(self.v)
self.s_t.append(self.spikes)
self.in_t.append(input)
return self.spikes, self.v
def extra_repr(self):
# (Optional)Set the extra information about this module. You can test
# it by printing an object of this class.
return 'Spiking Neuron Layer'
class Net(nn.Module):
def __init__(self, dt=1e-6, Tsim=1e-3, Cv=50e-12, Cu=30e-12, record=True):
super(Net, self).__init__()
self.dt = dt
self.Tsim = Tsim
self.num_steps = int(Tsim/dt) + 1
# define neural network layers
self.spike_layer = SpikingNeuron(dt, Tsim, Cv, Cu, record)
def forward(self, input):
for t in range(self.num_steps):
spike_input = PoissonGen(input)
s, v = self.spike_layer(spike_input, t, 1., 1.)
return self.spike_layer.v_t, self.spike_layer.s_t, self.spike_layer.in_t
input = torch.tensor([[0.5]])
net = Net(dt=1e-3, Tsim=300e-3)
v_t, s_t, in_t = net(input)
v_t = [v[0,0] for v in v_t]
s_t = [s[0,0] for s in s_t]
in_t = [i[0,0] for i in in_t]
plt.plot(v_t)
plt.plot(s_t)
plt.plot(in_t) | initial/network/initial_0.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
def PoissonGen(inp, rescale_fac=2.0):
rand_inp = torch.rand_like(inp)
return torch.mul(torch.le(rand_inp * rescale_fac, torch.abs(inp)).float(), torch.sign(inp))
class SpikingActivation(torch.autograd.Function):
"""
We can implement our own custom autograd Functions by subclassing
torch.autograd.Function and implementing the forward and backward passes
which operate on Tensors.
"""
@staticmethod
def forward(ctx, input, threshold):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
"""
# threshold = 0.
# spikes = torch.zeros(input.shape)
# spikes = (input > threshold) * 1
# input[input > threshold] = 0.
spikes = torch.zeros(input.shape)
mem_thr = (input/threshold) - 1.0
spikes = (mem_thr > 0) * 1.0
rst = torch.zeros(input.shape)
rst[mem_thr > 0] = threshold
input = input - rst
ctx.save_for_backward(input, spikes)
return spikes, input
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
Backpropagation implemented from
https://github.com/Intelligent-Computing-Lab-Yale/BNTT-Batch-Normalization-Through-Time.git
"""
spikes, input = ctx.saved_tensors
grad_input = grad_output.clone()
grad = grad_input * 0.3 * F.threshold(1.0 - torch.abs(input), 0, 0)
return grad
class SpikingNeuron(nn.Module):
def __init__(self, dt=1e-6, Tsim=1e-3, Cv=50e-12, Cu=30e-12, record=True):
super(SpikingNeuron, self).__init__()
self.spike_activation = SpikingActivation.apply
self.dt = dt
self.Cv = Cv
self.Cu = Cu
self.Tsim = Tsim
self.beta = dt/Cv # assuming R=1 => tau = RC
self.record = record
self.leak_mem = 0.95
self.threshold = 0.3
def forward(self, input, num_steps, conv, bntt):
# See the autograd section for explanation of what happens here.
if num_steps == 0:
self.batch_size = input.size(0)
self.v = torch.zeros( input.size() )
self.spikes = torch.zeros( input.size() )
if self.record:
self.v_t = []
self.s_t = []
self.in_t = []
# self.v = self.leak_mem*self.v + (1-self.leak_mem)*(input)
# self.spikes, self.v = self.spike_activation(self.v)
# self.v = self.leak_mem*self.v + bntt[num_steps]*conv(inpu)
self.v = self.leak_mem*self.v + bntt*(2*input)
self.spikes, self.v = self.spike_activation(self.v, 1.)
if self.record:
self.v_t.append(self.v)
self.s_t.append(self.spikes)
self.in_t.append(input)
return self.spikes, self.v
def extra_repr(self):
# (Optional)Set the extra information about this module. You can test
# it by printing an object of this class.
return 'Spiking Neuron Layer'
class Net(nn.Module):
def __init__(self, dt=1e-6, Tsim=1e-3, Cv=50e-12, Cu=30e-12, record=True):
super(Net, self).__init__()
self.dt = dt
self.Tsim = Tsim
self.num_steps = int(Tsim/dt) + 1
# define neural network layers
self.spike_layer = SpikingNeuron(dt, Tsim, Cv, Cu, record)
def forward(self, input):
for t in range(self.num_steps):
spike_input = PoissonGen(input)
s, v = self.spike_layer(spike_input, t, 1., 1.)
return self.spike_layer.v_t, self.spike_layer.s_t, self.spike_layer.in_t
input = torch.tensor([[0.5]])
net = Net(dt=1e-3, Tsim=300e-3)
v_t, s_t, in_t = net(input)
v_t = [v[0,0] for v in v_t]
s_t = [s[0,0] for s in s_t]
in_t = [i[0,0] for i in in_t]
plt.plot(v_t)
plt.plot(s_t)
plt.plot(in_t) | 0.858911 | 0.617974 |
import httplib
import stubout
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common import jsonutils
from cinder.openstack.common.scheduler import filters
from cinder import test
from cinder.tests.scheduler import fakes
from cinder.tests import utils as test_utils
from cinder import utils
DATA = ''
def stub_out_https_backend(stubs):
"""
Stubs out the httplib.HTTPRequest.getresponse to return
faked-out data instead of grabbing actual contents of a resource
The stubbed getresponse() returns an iterator over
the data "I am a teapot, short and stout\n"
:param stubs: Set of stubout stubs
"""
class FakeHTTPResponse(object):
def read(self):
return DATA
def fake_do_request(self, *args, **kwargs):
return httplib.OK, FakeHTTPResponse()
class HostFiltersTestCase(test.TestCase):
"""Test case for host filters."""
def setUp(self):
super(HostFiltersTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
stub_out_https_backend(self.stubs)
self.context = context.RequestContext('fake', 'fake')
self.json_query = jsonutils.dumps(
['and', ['>=', '$free_capacity_gb', 1024],
['>=', '$total_capacity_gb', 10 * 1024]])
# This has a side effect of testing 'get_filter_classes'
# when specifying a method (in this case, our standard filters)
filter_handler = filters.HostFilterHandler('cinder.scheduler.filters')
classes = filter_handler.get_all_classes()
self.class_map = {}
for cls in classes:
self.class_map[cls.__name__] = cls
def _stub_service_is_up(self, ret_value):
def fake_service_is_up(service):
return ret_value
self.stubs.Set(utils, 'service_is_up', fake_service_is_up)
@test.skip_if(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_capacity_filter_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 200,
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@test.skip_if(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_capacity_filter_fails(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 120,
'reserved_percentage': 20,
'updated_at': None,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@test.skip_if(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_capacity_filter_passes_infinite(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 'infinite',
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@test.skip_if(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_capacity_filter_passes_unknown(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 'unknown',
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@test.skip_if(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_retry_filter_disabled(self):
# Test case where retry/re-scheduling is disabled.
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@test.skip_if(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_retry_filter_pass(self):
# Node not previously tried.
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', {})
retry = dict(num_attempts=2, hosts=['host2'])
filter_properties = dict(retry=retry)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@test.skip_if(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_retry_filter_fail(self):
# Node was already tried.
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', {})
retry = dict(num_attempts=1, hosts=['host1'])
filter_properties = dict(retry=retry)
self.assertFalse(filt_cls.host_passes(host, filter_properties)) | cinder/tests/scheduler/test_host_filters.py | import httplib
import stubout
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common import jsonutils
from cinder.openstack.common.scheduler import filters
from cinder import test
from cinder.tests.scheduler import fakes
from cinder.tests import utils as test_utils
from cinder import utils
DATA = ''
def stub_out_https_backend(stubs):
"""
Stubs out the httplib.HTTPRequest.getresponse to return
faked-out data instead of grabbing actual contents of a resource
The stubbed getresponse() returns an iterator over
the data "I am a teapot, short and stout\n"
:param stubs: Set of stubout stubs
"""
class FakeHTTPResponse(object):
def read(self):
return DATA
def fake_do_request(self, *args, **kwargs):
return httplib.OK, FakeHTTPResponse()
class HostFiltersTestCase(test.TestCase):
"""Test case for host filters."""
def setUp(self):
super(HostFiltersTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
stub_out_https_backend(self.stubs)
self.context = context.RequestContext('fake', 'fake')
self.json_query = jsonutils.dumps(
['and', ['>=', '$free_capacity_gb', 1024],
['>=', '$total_capacity_gb', 10 * 1024]])
# This has a side effect of testing 'get_filter_classes'
# when specifying a method (in this case, our standard filters)
filter_handler = filters.HostFilterHandler('cinder.scheduler.filters')
classes = filter_handler.get_all_classes()
self.class_map = {}
for cls in classes:
self.class_map[cls.__name__] = cls
def _stub_service_is_up(self, ret_value):
def fake_service_is_up(service):
return ret_value
self.stubs.Set(utils, 'service_is_up', fake_service_is_up)
@test.skip_if(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_capacity_filter_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 200,
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@test.skip_if(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_capacity_filter_fails(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 120,
'reserved_percentage': 20,
'updated_at': None,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@test.skip_if(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_capacity_filter_passes_infinite(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 'infinite',
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@test.skip_if(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_capacity_filter_passes_unknown(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
service = {'disabled': False}
host = fakes.FakeHostState('host1',
{'free_capacity_gb': 'unknown',
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@test.skip_if(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_retry_filter_disabled(self):
# Test case where retry/re-scheduling is disabled.
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@test.skip_if(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_retry_filter_pass(self):
# Node not previously tried.
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', {})
retry = dict(num_attempts=2, hosts=['host2'])
filter_properties = dict(retry=retry)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@test.skip_if(not test_utils.is_cinder_installed(),
'Test requires Cinder installed')
def test_retry_filter_fail(self):
# Node was already tried.
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', {})
retry = dict(num_attempts=1, hosts=['host1'])
filter_properties = dict(retry=retry)
self.assertFalse(filt_cls.host_passes(host, filter_properties)) | 0.634543 | 0.30234 |
import logging
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from six.moves import input
from django_extensions.management.mysql import parse_mysql_cnf
from django_extensions.management.utils import signalcommand
class Command(BaseCommand):
help = "Resets the database for this project."
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--noinput', action='store_false',
dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'
)
parser.add_argument(
'--no-utf8', action='store_true', dest='no_utf8_support',
default=False,
help='Tells Django to not create a UTF-8 charset database'
)
parser.add_argument(
'-U', '--user', action='store', dest='user', default=None,
help='Use another user for the database then defined in settings.py'
)
parser.add_argument(
'-O', '--owner', action='store', dest='owner', default=None,
help='Use another owner for creating the database then the user defined in settings or via --user'
)
parser.add_argument(
'-P', '--password', action='store', dest='password', default=None,
help='Use another password for the database then defined in settings.py'
)
parser.add_argument(
'-D', '--dbname', action='store', dest='dbname', default=None,
help='Use another database name then defined in settings.py'
)
parser.add_argument(
'-R', '--router', action='store', dest='router', default='default',
help='Use this router-database other then defined in settings.py'
)
parser.add_argument(
'-c', '--close-sessions', action='store_true', dest='close_sessions', default=False,
help='Close database connections before dropping database (PostgreSQL only)'
)
@signalcommand
def handle(self, *args, **options):
"""
Resets the database for this project.
Note: Transaction wrappers are in reverse as a work around for
autocommit, anybody know how to do this the right way?
"""
if args:
raise CommandError("reset_db takes no arguments")
router = options['router']
dbinfo = settings.DATABASES.get(router)
if dbinfo is None:
raise CommandError("Unknown database router %s" % router)
engine = dbinfo.get('ENGINE').split('.')[-1]
user = password = database_name = database_host = database_port = ''
if engine == 'mysql':
(user, password, database_name, database_host, database_port) = parse_mysql_cnf(dbinfo)
user = options['user'] or dbinfo.get('USER') or user
password = options['password'] or dbinfo.get('PASSWORD') or password
owner = options['owner'] or user
database_name = options['dbname'] or dbinfo.get('NAME') or database_name
if database_name == '':
raise CommandError("You need to specify DATABASE_NAME in your Django settings file.")
database_host = dbinfo.get('HOST') or database_host
database_port = dbinfo.get('PORT') or database_port
verbosity = options["verbosity"]
if options['interactive']:
confirm = input("""
You have requested a database reset.
This will IRREVERSIBLY DESTROY
ALL data in the database "%s".
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % (database_name,))
else:
confirm = 'yes'
if confirm != 'yes':
print("Reset cancelled.")
return
if engine in ('sqlite3', 'spatialite'):
import os
try:
logging.info("Unlinking %s database" % engine)
os.unlink(database_name)
except OSError:
pass
elif engine in ('mysql',):
import MySQLdb as Database
kwargs = {
'user': user,
'passwd': password,
}
if database_host.startswith('/'):
kwargs['unix_socket'] = database_host
else:
kwargs['host'] = database_host
if database_port:
kwargs['port'] = int(database_port)
connection = Database.connect(**kwargs)
drop_query = 'DROP DATABASE IF EXISTS `%s`' % database_name
utf8_support = '' if options['no_utf8_support'] else 'CHARACTER SET utf8'
create_query = 'CREATE DATABASE `%s` %s' % (database_name, utf8_support)
logging.info('Executing... "' + drop_query + '"')
connection.query(drop_query)
logging.info('Executing... "' + create_query + '"')
connection.query(create_query.strip())
elif engine in ('postgresql', 'postgresql_psycopg2', 'postgis'):
import psycopg2 as Database # NOQA
conn_params = {'database': 'template1'}
if user:
conn_params['user'] = user
if password:
conn_params['password'] = password
if database_host:
conn_params['host'] = database_host
if database_port:
conn_params['port'] = database_port
connection = Database.connect(**conn_params)
connection.set_isolation_level(0) # autocommit false
cursor = connection.cursor()
if options['close_sessions']:
close_sessions_query = """
SELECT pg_terminate_backend(pg_stat_activity.pid)
FROM pg_stat_activity
WHERE pg_stat_activity.datname = '%s';
""" % database_name
logging.info('Executing... "' + close_sessions_query.strip() + '"')
try:
cursor.execute(close_sessions_query)
except Database.ProgrammingError as e:
logging.exception("Error: %s" % str(e))
drop_query = "DROP DATABASE \"%s\";" % database_name
logging.info('Executing... "' + drop_query + '"')
try:
cursor.execute(drop_query)
except Database.ProgrammingError as e:
logging.exception("Error: %s" % str(e))
create_query = "CREATE DATABASE \"%s\"" % database_name
if owner:
create_query += " WITH OWNER = \"%s\" " % owner
create_query += " ENCODING = 'UTF8'"
if settings.DEFAULT_TABLESPACE:
create_query += ' TABLESPACE = %s;' % settings.DEFAULT_TABLESPACE
else:
create_query += ';'
logging.info('Executing... "' + create_query + '"')
cursor.execute(create_query)
else:
raise CommandError("Unknown database engine %s" % engine)
if verbosity >= 2 or options['interactive']:
print("Reset successful.") | django_extensions/management/commands/reset_db.py | import logging
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from six.moves import input
from django_extensions.management.mysql import parse_mysql_cnf
from django_extensions.management.utils import signalcommand
class Command(BaseCommand):
help = "Resets the database for this project."
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--noinput', action='store_false',
dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'
)
parser.add_argument(
'--no-utf8', action='store_true', dest='no_utf8_support',
default=False,
help='Tells Django to not create a UTF-8 charset database'
)
parser.add_argument(
'-U', '--user', action='store', dest='user', default=None,
help='Use another user for the database then defined in settings.py'
)
parser.add_argument(
'-O', '--owner', action='store', dest='owner', default=None,
help='Use another owner for creating the database then the user defined in settings or via --user'
)
parser.add_argument(
'-P', '--password', action='store', dest='password', default=None,
help='Use another password for the database then defined in settings.py'
)
parser.add_argument(
'-D', '--dbname', action='store', dest='dbname', default=None,
help='Use another database name then defined in settings.py'
)
parser.add_argument(
'-R', '--router', action='store', dest='router', default='default',
help='Use this router-database other then defined in settings.py'
)
parser.add_argument(
'-c', '--close-sessions', action='store_true', dest='close_sessions', default=False,
help='Close database connections before dropping database (PostgreSQL only)'
)
@signalcommand
def handle(self, *args, **options):
"""
Resets the database for this project.
Note: Transaction wrappers are in reverse as a work around for
autocommit, anybody know how to do this the right way?
"""
if args:
raise CommandError("reset_db takes no arguments")
router = options['router']
dbinfo = settings.DATABASES.get(router)
if dbinfo is None:
raise CommandError("Unknown database router %s" % router)
engine = dbinfo.get('ENGINE').split('.')[-1]
user = password = database_name = database_host = database_port = ''
if engine == 'mysql':
(user, password, database_name, database_host, database_port) = parse_mysql_cnf(dbinfo)
user = options['user'] or dbinfo.get('USER') or user
password = options['password'] or dbinfo.get('PASSWORD') or password
owner = options['owner'] or user
database_name = options['dbname'] or dbinfo.get('NAME') or database_name
if database_name == '':
raise CommandError("You need to specify DATABASE_NAME in your Django settings file.")
database_host = dbinfo.get('HOST') or database_host
database_port = dbinfo.get('PORT') or database_port
verbosity = options["verbosity"]
if options['interactive']:
confirm = input("""
You have requested a database reset.
This will IRREVERSIBLY DESTROY
ALL data in the database "%s".
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % (database_name,))
else:
confirm = 'yes'
if confirm != 'yes':
print("Reset cancelled.")
return
if engine in ('sqlite3', 'spatialite'):
import os
try:
logging.info("Unlinking %s database" % engine)
os.unlink(database_name)
except OSError:
pass
elif engine in ('mysql',):
import MySQLdb as Database
kwargs = {
'user': user,
'passwd': password,
}
if database_host.startswith('/'):
kwargs['unix_socket'] = database_host
else:
kwargs['host'] = database_host
if database_port:
kwargs['port'] = int(database_port)
connection = Database.connect(**kwargs)
drop_query = 'DROP DATABASE IF EXISTS `%s`' % database_name
utf8_support = '' if options['no_utf8_support'] else 'CHARACTER SET utf8'
create_query = 'CREATE DATABASE `%s` %s' % (database_name, utf8_support)
logging.info('Executing... "' + drop_query + '"')
connection.query(drop_query)
logging.info('Executing... "' + create_query + '"')
connection.query(create_query.strip())
elif engine in ('postgresql', 'postgresql_psycopg2', 'postgis'):
import psycopg2 as Database # NOQA
conn_params = {'database': 'template1'}
if user:
conn_params['user'] = user
if password:
conn_params['password'] = password
if database_host:
conn_params['host'] = database_host
if database_port:
conn_params['port'] = database_port
connection = Database.connect(**conn_params)
connection.set_isolation_level(0) # autocommit false
cursor = connection.cursor()
if options['close_sessions']:
close_sessions_query = """
SELECT pg_terminate_backend(pg_stat_activity.pid)
FROM pg_stat_activity
WHERE pg_stat_activity.datname = '%s';
""" % database_name
logging.info('Executing... "' + close_sessions_query.strip() + '"')
try:
cursor.execute(close_sessions_query)
except Database.ProgrammingError as e:
logging.exception("Error: %s" % str(e))
drop_query = "DROP DATABASE \"%s\";" % database_name
logging.info('Executing... "' + drop_query + '"')
try:
cursor.execute(drop_query)
except Database.ProgrammingError as e:
logging.exception("Error: %s" % str(e))
create_query = "CREATE DATABASE \"%s\"" % database_name
if owner:
create_query += " WITH OWNER = \"%s\" " % owner
create_query += " ENCODING = 'UTF8'"
if settings.DEFAULT_TABLESPACE:
create_query += ' TABLESPACE = %s;' % settings.DEFAULT_TABLESPACE
else:
create_query += ';'
logging.info('Executing... "' + create_query + '"')
cursor.execute(create_query)
else:
raise CommandError("Unknown database engine %s" % engine)
if verbosity >= 2 or options['interactive']:
print("Reset successful.") | 0.314577 | 0.047184 |
class Error(Exception):
pass
class AlreadyExists(Error):
pass
class BadArgument(Error):
pass
class BadExitStatus(Error):
pass
class ConfigError(Error):
pass
class ContentLengthMismatch(Error):
pass
class ContentChecksumMismatch(Error):
pass
class ContentSeekError(Error):
pass
class CoordinatorNotRunning(Error):
pass
class CoordinatorStillRunning(Error):
pass
class DatabaseProblem(Error):
pass
class EmptyRegistry(Error):
pass
class EmptyToplevel(Error):
pass
class GenericError(Error):
pass
class InUse(Error):
pass
class InfiniteSetError(Error):
pass
class InstanceIsRunning(Error):
pass
class InstanceNotRunning(Error):
pass
class InternalError(Error):
pass
class InternalInconsistency(Error):
pass
class InvalidRangeType(Error):
pass
class MalformedPayload(Error):
def __init__(self, message):
self.message = message
class KVStoreClearError(Error):
pass
class MissingPayload(Error):
pass
class NoContentLengthHeader(Error):
pass
class NoContentTypeHeader(Error):
pass
class NoOriginHeader(Error):
pass
class NoInstance(Error):
pass
class NoMoreServers(Error):
pass
class NoObjects(Error):
pass
class NoSuchFile(Error):
pass
class NoSuchInstance(Error):
pass
class NoSuchKey(Error):
pass
class NoSuchObject(Error):
pass
class NoSuchPlugin(Error):
pass
class NoSuchProcess(Error):
pass
class NoSuchProfile(Error):
pass
class NoSuchResource(Error):
pass
class NoSuchRole(Error):
pass
class NoSuchServer(Error):
pass
class NoSuchServerType(Error):
pass
class NoSuchToplevel(Error):
pass
class NoSuchUsage(Error):
pass
class NoSuchUser(Error):
pass
class NoSuchVerb(Error):
pass
class NoSuchVersion(Error):
pass
class NotAcceptable(Error):
pass
class NonEmptyNamespace(Error):
pass
class NonexistentTag(Error):
pass
class NonexistentNamespace(Error):
pass
class NotIndexed(Error):
pass
class PasswordMismatch(Error):
pass
class PermissionDenied(Error):
pass
class PluginError(Error):
pass
class ProcessStillRunning(Error):
pass
class QueryParseError(Error):
pass
class TimeoutError(Error):
pass
class TooManyObjects(Error):
pass
class UnexpectedContentLengthHeader(Error):
pass
class UnknownAcceptType(Error):
pass
class UnknownContentType(Error):
pass
class UnknownError(Error):
pass
class UnsupportedJSONType(Error):
pass
class WatchLimitReached(Error):
pass
class UnwrappableBlob(Error):
pass
class IndexingError(Error):
pass
class FieldError(Error):
def __init__(self, fieldName):
self.fieldName = fieldName
def __repr__(self):
return '<%s instance: fieldName=%r>' % (
self.__class__.__name__, self.fieldName)
__str__ = __repr__
class InvalidPayloadField(FieldError):
pass
class InvalidResponsePayloadField(FieldError):
pass
class PayloadFieldMissing(FieldError):
pass
class ResponsePayloadFieldMissing(FieldError):
pass
class UnknownPayloadField(FieldError):
pass
class UnknownResponsePayloadField(FieldError):
pass
class ArgumentError(Error):
def __init__(self, argument):
self.argument = argument
def __repr__(self):
return '<%s instance: argument=%r>' % (
self.__class__.__name__, self.argument)
__str__ = __repr__
class UnknownArgument(ArgumentError):
pass
class MissingArgument(ArgumentError):
pass
class MultipleArgumentValues(ArgumentError):
pass
class InvalidUTF8Argument(ArgumentError):
pass | fluiddb/common/error.py | class Error(Exception):
pass
class AlreadyExists(Error):
pass
class BadArgument(Error):
pass
class BadExitStatus(Error):
pass
class ConfigError(Error):
pass
class ContentLengthMismatch(Error):
pass
class ContentChecksumMismatch(Error):
pass
class ContentSeekError(Error):
pass
class CoordinatorNotRunning(Error):
pass
class CoordinatorStillRunning(Error):
pass
class DatabaseProblem(Error):
pass
class EmptyRegistry(Error):
pass
class EmptyToplevel(Error):
pass
class GenericError(Error):
pass
class InUse(Error):
pass
class InfiniteSetError(Error):
pass
class InstanceIsRunning(Error):
pass
class InstanceNotRunning(Error):
pass
class InternalError(Error):
pass
class InternalInconsistency(Error):
pass
class InvalidRangeType(Error):
pass
class MalformedPayload(Error):
def __init__(self, message):
self.message = message
class KVStoreClearError(Error):
pass
class MissingPayload(Error):
pass
class NoContentLengthHeader(Error):
pass
class NoContentTypeHeader(Error):
pass
class NoOriginHeader(Error):
pass
class NoInstance(Error):
pass
class NoMoreServers(Error):
pass
class NoObjects(Error):
pass
class NoSuchFile(Error):
pass
class NoSuchInstance(Error):
pass
class NoSuchKey(Error):
pass
class NoSuchObject(Error):
pass
class NoSuchPlugin(Error):
pass
class NoSuchProcess(Error):
pass
class NoSuchProfile(Error):
pass
class NoSuchResource(Error):
pass
class NoSuchRole(Error):
pass
class NoSuchServer(Error):
pass
class NoSuchServerType(Error):
pass
class NoSuchToplevel(Error):
pass
class NoSuchUsage(Error):
pass
class NoSuchUser(Error):
pass
class NoSuchVerb(Error):
pass
class NoSuchVersion(Error):
pass
class NotAcceptable(Error):
pass
class NonEmptyNamespace(Error):
pass
class NonexistentTag(Error):
pass
class NonexistentNamespace(Error):
pass
class NotIndexed(Error):
pass
class PasswordMismatch(Error):
pass
class PermissionDenied(Error):
pass
class PluginError(Error):
pass
class ProcessStillRunning(Error):
pass
class QueryParseError(Error):
pass
class TimeoutError(Error):
pass
class TooManyObjects(Error):
pass
class UnexpectedContentLengthHeader(Error):
pass
class UnknownAcceptType(Error):
pass
class UnknownContentType(Error):
pass
class UnknownError(Error):
pass
class UnsupportedJSONType(Error):
pass
class WatchLimitReached(Error):
pass
class UnwrappableBlob(Error):
pass
class IndexingError(Error):
pass
class FieldError(Error):
def __init__(self, fieldName):
self.fieldName = fieldName
def __repr__(self):
return '<%s instance: fieldName=%r>' % (
self.__class__.__name__, self.fieldName)
__str__ = __repr__
class InvalidPayloadField(FieldError):
pass
class InvalidResponsePayloadField(FieldError):
pass
class PayloadFieldMissing(FieldError):
pass
class ResponsePayloadFieldMissing(FieldError):
pass
class UnknownPayloadField(FieldError):
pass
class UnknownResponsePayloadField(FieldError):
pass
class ArgumentError(Error):
def __init__(self, argument):
self.argument = argument
def __repr__(self):
return '<%s instance: argument=%r>' % (
self.__class__.__name__, self.argument)
__str__ = __repr__
class UnknownArgument(ArgumentError):
pass
class MissingArgument(ArgumentError):
pass
class MultipleArgumentValues(ArgumentError):
pass
class InvalidUTF8Argument(ArgumentError):
pass | 0.738763 | 0.173743 |
import json
import os
from concurrent.futures import ThreadPoolExecutor
import pytest
from ruamel import yaml
from precept import (
Precept, Command, Argument, Config, ConfigProperty, Nestable, ConfigFormat,
config_factory)
override_configs = {
'config_int': 25,
'config_str': 'bar',
'config_list': [5, 4, 5],
'config_nested': {
'nested_str': 'foo',
}
}
config_files = [
'config.yml', './tests/configs.yml', './tests/configs2.yml'
]
class ConfigTest(Config):
"""root_comment"""
config_str = ConfigProperty(
comment='comment_string',
config_type=str,
auto_environ=True
)
config_str_with_default = ConfigProperty(
default='Default foo bar',
auto_environ=True,
)
config_int = ConfigProperty(default=10, auto_environ=True)
config_float = ConfigProperty(
default=89.99,
comment='comment_float',
auto_environ=True,
)
config_list = ConfigProperty(
default=[1, 2, 3],
auto_environ=True,
)
config_auto_global = ConfigProperty(
default=333, auto_global=True, comment='comment_auto_global'
)
class ConfigNested(Nestable):
"""docstring_comment"""
nested_str = ConfigProperty(
default='nested',
comment='nested_comment'
)
class DoubleNested(Nestable):
"""doubly"""
double = ConfigProperty(
default=2.2, comment='double_comment_nested'
)
double_nested: DoubleNested = None
config_nested: ConfigNested = None
override = {
'config_int': 22,
'config_str': 'foo',
'config_float': 55.77,
'config_str_with_default': 'not default',
'config_list': [5, 4, 5],
'config_nested': {
'nested_str': 'hello',
'double_nested': {'double': 77.77}
}
}
class ConfigCli(Precept):
default_configs = {
'config_int': 1,
'config_str': 'foo',
'config_list': [1, 2, 3],
'config_nested': {
'nested_str': 'bar',
}
}
result = None
def __init__(self):
super().__init__(
config_file=config_files,
executor=ThreadPoolExecutor(),
add_dump_config_command=True,
)
@Command(
Argument(
'config_name',
type=str,
)
)
async def use_config(self, config_name):
self.result = getattr(self.config, config_name)
@pytest.mark.parametrize(
'config_name, config_value', list(ConfigCli.default_configs.items())
)
def test_config_defaults(config_name, config_value):
cli = ConfigCli()
cli.start(f'--quiet use-config {config_name}'.split(' '))
assert cli.result == config_value
@pytest.mark.parametrize(
'config_name, config_value', list(override_configs.items())
)
def test_config_file(config_name, config_value):
config_file = './config.yml'
try:
cli = ConfigCli()
cli.config.read_dict(override_configs)
cli.config.save(config_file)
cli.start(f'--quiet use-config {config_name}'.split(' '))
assert cli.result == config_value
finally:
if os.path.exists(config_file):
os.remove(config_file)
@pytest.mark.parametrize(
'config_name, config_value', list(override_configs.items())
)
def test_config_override(config_name, config_value):
config_file = './custom.yml'
try:
cli = ConfigCli()
cli.config.read_dict(override_configs)
cli.config.save(config_file)
cli.start(
f'--quiet --config-file {config_file}'
f' use-config {config_name}'.split(' ')
)
assert cli.result == config_value
finally:
if os.path.exists(config_file):
os.remove(config_file)
def test_dump_config_defaults():
config_file = './test.yml'
try:
cli = ConfigCli()
cli.config.config_format = ConfigFormat.YML
cli.start(f'--quiet dump-configs {config_file}'.split(' '))
assert os.path.exists(config_file)
with open(config_file, 'r') as f:
configs = yaml.load(f, Loader=yaml.RoundTripLoader)
for k, v in cli.default_configs.items():
assert configs[k] == v
finally:
if os.path.exists(config_file):
os.remove(config_file)
def test_dump_config_current_configs():
config_file = './config.yml'
output = './output.yml'
try:
cli = ConfigCli()
cli.config.config_format = ConfigFormat.YML
cli.config.read_dict(override_configs)
cli.config.save(config_file)
cli.start(f'--quiet dump-configs {output}'.split(' '))
with open(config_file, 'r') as f:
configs = yaml.load(f, Loader=yaml.RoundTripLoader)
for k, v in override_configs.items():
assert configs[k] == v
finally:
if os.path.exists(config_file):
os.remove(config_file)
if os.path.exists(output):
os.remove(output)
@pytest.mark.parametrize(
'level', list(range(len(config_files)))
)
def test_multi_configs(level):
config_file = config_files[level]
try:
cli = ConfigCli()
cli.config.read_dict(override_configs)
cli.config.save(config_file)
for k, v in override_configs.items():
cli.start(f'--quiet use-config {k}'.split(' '))
assert cli.result == v
finally:
if os.path.exists(config_file):
os.remove(config_file)
def test_config_class():
cfg = ConfigTest()
# Default values assertions
assert cfg.config_str_with_default == 'Default foo bar'
assert cfg.config_nested.nested_str == 'nested'
assert cfg.config_nested.double_nested.double == 2.2
assert cfg.config_str is None
assert cfg.config_list == [1, 2, 3]
cfg.read_dict(override)
# Changed values assertions
assert cfg.config_str == 'foo'
assert cfg.config_nested.nested_str == 'hello'
assert cfg['config_nested']['nested_str'] == 'hello'
# pylint: disable=unsubscriptable-object
assert cfg.config_nested['nested_str'] == 'hello'
assert cfg.config_str_with_default == 'not default'
assert cfg.config_nested.double_nested.double == 77.77
@pytest.mark.parametrize('config_format', [
ConfigFormat.YML, ConfigFormat.INI, ConfigFormat.TOML
])
def test_config_comments(tmp_path, config_format):
cfg = ConfigTest(config_format=config_format)
config_file = os.path.join(tmp_path, 'configs')
cfg.read_dict(override)
cfg.save(config_file)
cfg2 = ConfigTest(config_format=config_format)
cfg2.read_file(config_file)
# Test that the comment are not included in the values
assert cfg2.config_str == 'foo'
assert cfg2.config_float == 55.77
assert cfg2.config_nested.nested_str == 'hello'
assert cfg2.config_nested.double_nested.double == 77.77
assert cfg2.config_list == [5, 4, 5]
with open(config_file) as f:
test = f.read()
for comment in (
'comment_string', 'comment_float', 'docstring_comment',
'nested_comment', 'double_comment_nested', 'doubly', 'root_comment'
):
assert comment in test
def test_config_json(tmp_path):
cfg = ConfigTest(config_format=ConfigFormat.JSON)
config_file = os.path.join(tmp_path, 'config.json')
cfg.save(config_file)
with open(config_file) as f:
data = json.load(f)
assert data['config_nested']['nested_str'] == 'nested'
@pytest.mark.parametrize(
'name, value', list(
x for x in override.items() if not isinstance(x[1], dict)
)
)
def test_config_environ(monkeypatch, name, value):
monkeypatch.setenv(
name.upper(),
str(value)
if not isinstance(value, list)
else yaml.round_trip_dump(value)
)
cfg = ConfigTest()
assert getattr(cfg, name) == value
# pylint: disable=no-member
def test_config_factory():
d = {'flat': 'face', 'nested': {'double': {'keyed': 'alright'}}}
cls = config_factory(d)
cfg = cls()
assert cfg.flat == 'face'
assert cfg.nested.double.keyed == 'alright'
@pytest.mark.parametrize(
'config_name, config_value', list(override.items())
)
def test_new_config_cli(config_name, config_value):
class Cfg(ConfigCli):
config = ConfigTest()
cli = Cfg()
cli.config.read_dict(override)
cli.start(f'--quiet use-config {config_name}'.split(' '))
assert cli.result == config_value
def test_config_get_root():
# Bug used to raise an error, should always return the root.
c = ConfigTest()
root = c.get_root()
assert root is c
def test_config_auto_global():
class Cfg(ConfigCli):
config = ConfigTest()
cli = Cfg()
cli.start('--config-auto-global=77 use-config config_auto_global'.split())
assert cli.result == 77
def test_config_set():
cfg = ConfigTest()
cfg.config_str = 'Changed'
assert cfg.config_str == 'Changed'
cfg.config_nested.nested_str = 'Also changed'
assert cfg.config_nested.nested_str == 'Also changed'
def test_config_order(tmp_path):
# Config order should be
# - cli arguments
# - updated dict values
# - set values
# - config file.
cfg = ConfigTest()
cfg._app = ConfigCli()
cfg.config_str = 'changed 2'
cfg.config_nested.nested_str = 'changed one'
config_path = os.path.join(tmp_path, 'config.toml')
# Test changed values stays after reading dict.
cfg.read_dict({'config_str': 'updated'})
assert cfg.config_nested.nested_str == 'changed one'
assert cfg.config_str == 'updated'
# Test that reading the config file doesn't change set values
cfg2 = ConfigTest()
cfg2.config_str_with_default = 'Changed again'
cfg2.config_nested.double_nested.double = 88
cfg2.save(config_path)
cfg.read_file(config_path)
assert cfg.config_str_with_default == 'Changed again'
assert cfg.config_str == 'updated'
assert cfg.config_nested.double_nested.double == 88
assert cfg.config_nested.nested_str == 'changed one'
# Test argument take precedence over all
cfg._app.cli.globals = {
'config_auto_global': 555
}
cfg.config_auto_global = 111
assert cfg.config_auto_global == 555
def test_multi_config_instances():
cfg1 = ConfigTest()
cfg2 = ConfigTest()
cfg1.config_str = 'Foo'
assert cfg1.config_str != cfg2.config_str
cfg2.config_nested.nested_str = 'multi-instance'
assert cfg1.config_nested.nested_str != cfg2.config_nested.nested_str
cfg1.config_nested.double_nested.double = 3.0
assert (
cfg1.config_nested.double_nested.double
!= cfg2.config_nested.double_nested.double
)
def test_dump_config_str_no_default_no_comment():
config_file = './config.toml'
class Conf(Config):
config_str_no_default_or_comment = ConfigProperty(config_type=str)
class Cli(Precept):
config_class = Conf
cli = Cli(config_file=config_file, add_dump_config_command=True)
cli.config.config_format = ConfigFormat.TOML
try:
cli.start(f'dump-configs {config_file}')
finally:
if os.path.exists(config_file):
os.remove(config_file)
def test_dump_config_str_bool_default_less_40_comment():
config_file = './config.toml'
class Conf(Config):
boolean_cfg = ConfigProperty(
config_type=bool, default=True, comment='less than 40'
)
class Cli(Precept):
config_class = Conf
cli = Cli(config_file=config_file, add_dump_config_command=True)
cli.config.config_format = ConfigFormat.TOML
try:
cli.start(f'dump-configs {config_file}')
finally:
if os.path.exists(config_file):
os.remove(config_file)
toml_config = '''
[[nest_list]]
foo = "foo"
hello = "hello"
[[nest_list]]
foo = "bar"
hello = "world"
'''
def test_toml_list():
class Conf(Config):
nest_list = ConfigProperty(config_type=list)
conf = Conf()
config_file = './config.toml'
with open(config_file, 'w') as f:
f.write(toml_config)
conf.config_format = ConfigFormat.TOML
conf.read_file(config_file)
assert conf.nest_list[0]['foo'] == 'foo' | tests/test_configs.py | import json
import os
from concurrent.futures import ThreadPoolExecutor
import pytest
from ruamel import yaml
from precept import (
Precept, Command, Argument, Config, ConfigProperty, Nestable, ConfigFormat,
config_factory)
override_configs = {
'config_int': 25,
'config_str': 'bar',
'config_list': [5, 4, 5],
'config_nested': {
'nested_str': 'foo',
}
}
config_files = [
'config.yml', './tests/configs.yml', './tests/configs2.yml'
]
class ConfigTest(Config):
"""root_comment"""
config_str = ConfigProperty(
comment='comment_string',
config_type=str,
auto_environ=True
)
config_str_with_default = ConfigProperty(
default='Default foo bar',
auto_environ=True,
)
config_int = ConfigProperty(default=10, auto_environ=True)
config_float = ConfigProperty(
default=89.99,
comment='comment_float',
auto_environ=True,
)
config_list = ConfigProperty(
default=[1, 2, 3],
auto_environ=True,
)
config_auto_global = ConfigProperty(
default=333, auto_global=True, comment='comment_auto_global'
)
class ConfigNested(Nestable):
"""docstring_comment"""
nested_str = ConfigProperty(
default='nested',
comment='nested_comment'
)
class DoubleNested(Nestable):
"""doubly"""
double = ConfigProperty(
default=2.2, comment='double_comment_nested'
)
double_nested: DoubleNested = None
config_nested: ConfigNested = None
override = {
'config_int': 22,
'config_str': 'foo',
'config_float': 55.77,
'config_str_with_default': 'not default',
'config_list': [5, 4, 5],
'config_nested': {
'nested_str': 'hello',
'double_nested': {'double': 77.77}
}
}
class ConfigCli(Precept):
default_configs = {
'config_int': 1,
'config_str': 'foo',
'config_list': [1, 2, 3],
'config_nested': {
'nested_str': 'bar',
}
}
result = None
def __init__(self):
super().__init__(
config_file=config_files,
executor=ThreadPoolExecutor(),
add_dump_config_command=True,
)
@Command(
Argument(
'config_name',
type=str,
)
)
async def use_config(self, config_name):
self.result = getattr(self.config, config_name)
@pytest.mark.parametrize(
'config_name, config_value', list(ConfigCli.default_configs.items())
)
def test_config_defaults(config_name, config_value):
cli = ConfigCli()
cli.start(f'--quiet use-config {config_name}'.split(' '))
assert cli.result == config_value
@pytest.mark.parametrize(
'config_name, config_value', list(override_configs.items())
)
def test_config_file(config_name, config_value):
config_file = './config.yml'
try:
cli = ConfigCli()
cli.config.read_dict(override_configs)
cli.config.save(config_file)
cli.start(f'--quiet use-config {config_name}'.split(' '))
assert cli.result == config_value
finally:
if os.path.exists(config_file):
os.remove(config_file)
@pytest.mark.parametrize(
'config_name, config_value', list(override_configs.items())
)
def test_config_override(config_name, config_value):
config_file = './custom.yml'
try:
cli = ConfigCli()
cli.config.read_dict(override_configs)
cli.config.save(config_file)
cli.start(
f'--quiet --config-file {config_file}'
f' use-config {config_name}'.split(' ')
)
assert cli.result == config_value
finally:
if os.path.exists(config_file):
os.remove(config_file)
def test_dump_config_defaults():
config_file = './test.yml'
try:
cli = ConfigCli()
cli.config.config_format = ConfigFormat.YML
cli.start(f'--quiet dump-configs {config_file}'.split(' '))
assert os.path.exists(config_file)
with open(config_file, 'r') as f:
configs = yaml.load(f, Loader=yaml.RoundTripLoader)
for k, v in cli.default_configs.items():
assert configs[k] == v
finally:
if os.path.exists(config_file):
os.remove(config_file)
def test_dump_config_current_configs():
config_file = './config.yml'
output = './output.yml'
try:
cli = ConfigCli()
cli.config.config_format = ConfigFormat.YML
cli.config.read_dict(override_configs)
cli.config.save(config_file)
cli.start(f'--quiet dump-configs {output}'.split(' '))
with open(config_file, 'r') as f:
configs = yaml.load(f, Loader=yaml.RoundTripLoader)
for k, v in override_configs.items():
assert configs[k] == v
finally:
if os.path.exists(config_file):
os.remove(config_file)
if os.path.exists(output):
os.remove(output)
@pytest.mark.parametrize(
'level', list(range(len(config_files)))
)
def test_multi_configs(level):
config_file = config_files[level]
try:
cli = ConfigCli()
cli.config.read_dict(override_configs)
cli.config.save(config_file)
for k, v in override_configs.items():
cli.start(f'--quiet use-config {k}'.split(' '))
assert cli.result == v
finally:
if os.path.exists(config_file):
os.remove(config_file)
def test_config_class():
cfg = ConfigTest()
# Default values assertions
assert cfg.config_str_with_default == 'Default foo bar'
assert cfg.config_nested.nested_str == 'nested'
assert cfg.config_nested.double_nested.double == 2.2
assert cfg.config_str is None
assert cfg.config_list == [1, 2, 3]
cfg.read_dict(override)
# Changed values assertions
assert cfg.config_str == 'foo'
assert cfg.config_nested.nested_str == 'hello'
assert cfg['config_nested']['nested_str'] == 'hello'
# pylint: disable=unsubscriptable-object
assert cfg.config_nested['nested_str'] == 'hello'
assert cfg.config_str_with_default == 'not default'
assert cfg.config_nested.double_nested.double == 77.77
@pytest.mark.parametrize('config_format', [
ConfigFormat.YML, ConfigFormat.INI, ConfigFormat.TOML
])
def test_config_comments(tmp_path, config_format):
cfg = ConfigTest(config_format=config_format)
config_file = os.path.join(tmp_path, 'configs')
cfg.read_dict(override)
cfg.save(config_file)
cfg2 = ConfigTest(config_format=config_format)
cfg2.read_file(config_file)
# Test that the comment are not included in the values
assert cfg2.config_str == 'foo'
assert cfg2.config_float == 55.77
assert cfg2.config_nested.nested_str == 'hello'
assert cfg2.config_nested.double_nested.double == 77.77
assert cfg2.config_list == [5, 4, 5]
with open(config_file) as f:
test = f.read()
for comment in (
'comment_string', 'comment_float', 'docstring_comment',
'nested_comment', 'double_comment_nested', 'doubly', 'root_comment'
):
assert comment in test
def test_config_json(tmp_path):
cfg = ConfigTest(config_format=ConfigFormat.JSON)
config_file = os.path.join(tmp_path, 'config.json')
cfg.save(config_file)
with open(config_file) as f:
data = json.load(f)
assert data['config_nested']['nested_str'] == 'nested'
@pytest.mark.parametrize(
'name, value', list(
x for x in override.items() if not isinstance(x[1], dict)
)
)
def test_config_environ(monkeypatch, name, value):
monkeypatch.setenv(
name.upper(),
str(value)
if not isinstance(value, list)
else yaml.round_trip_dump(value)
)
cfg = ConfigTest()
assert getattr(cfg, name) == value
# pylint: disable=no-member
def test_config_factory():
d = {'flat': 'face', 'nested': {'double': {'keyed': 'alright'}}}
cls = config_factory(d)
cfg = cls()
assert cfg.flat == 'face'
assert cfg.nested.double.keyed == 'alright'
@pytest.mark.parametrize(
'config_name, config_value', list(override.items())
)
def test_new_config_cli(config_name, config_value):
class Cfg(ConfigCli):
config = ConfigTest()
cli = Cfg()
cli.config.read_dict(override)
cli.start(f'--quiet use-config {config_name}'.split(' '))
assert cli.result == config_value
def test_config_get_root():
# Bug used to raise an error, should always return the root.
c = ConfigTest()
root = c.get_root()
assert root is c
def test_config_auto_global():
class Cfg(ConfigCli):
config = ConfigTest()
cli = Cfg()
cli.start('--config-auto-global=77 use-config config_auto_global'.split())
assert cli.result == 77
def test_config_set():
cfg = ConfigTest()
cfg.config_str = 'Changed'
assert cfg.config_str == 'Changed'
cfg.config_nested.nested_str = 'Also changed'
assert cfg.config_nested.nested_str == 'Also changed'
def test_config_order(tmp_path):
# Config order should be
# - cli arguments
# - updated dict values
# - set values
# - config file.
cfg = ConfigTest()
cfg._app = ConfigCli()
cfg.config_str = 'changed 2'
cfg.config_nested.nested_str = 'changed one'
config_path = os.path.join(tmp_path, 'config.toml')
# Test changed values stays after reading dict.
cfg.read_dict({'config_str': 'updated'})
assert cfg.config_nested.nested_str == 'changed one'
assert cfg.config_str == 'updated'
# Test that reading the config file doesn't change set values
cfg2 = ConfigTest()
cfg2.config_str_with_default = 'Changed again'
cfg2.config_nested.double_nested.double = 88
cfg2.save(config_path)
cfg.read_file(config_path)
assert cfg.config_str_with_default == 'Changed again'
assert cfg.config_str == 'updated'
assert cfg.config_nested.double_nested.double == 88
assert cfg.config_nested.nested_str == 'changed one'
# Test argument take precedence over all
cfg._app.cli.globals = {
'config_auto_global': 555
}
cfg.config_auto_global = 111
assert cfg.config_auto_global == 555
def test_multi_config_instances():
cfg1 = ConfigTest()
cfg2 = ConfigTest()
cfg1.config_str = 'Foo'
assert cfg1.config_str != cfg2.config_str
cfg2.config_nested.nested_str = 'multi-instance'
assert cfg1.config_nested.nested_str != cfg2.config_nested.nested_str
cfg1.config_nested.double_nested.double = 3.0
assert (
cfg1.config_nested.double_nested.double
!= cfg2.config_nested.double_nested.double
)
def test_dump_config_str_no_default_no_comment():
config_file = './config.toml'
class Conf(Config):
config_str_no_default_or_comment = ConfigProperty(config_type=str)
class Cli(Precept):
config_class = Conf
cli = Cli(config_file=config_file, add_dump_config_command=True)
cli.config.config_format = ConfigFormat.TOML
try:
cli.start(f'dump-configs {config_file}')
finally:
if os.path.exists(config_file):
os.remove(config_file)
def test_dump_config_str_bool_default_less_40_comment():
config_file = './config.toml'
class Conf(Config):
boolean_cfg = ConfigProperty(
config_type=bool, default=True, comment='less than 40'
)
class Cli(Precept):
config_class = Conf
cli = Cli(config_file=config_file, add_dump_config_command=True)
cli.config.config_format = ConfigFormat.TOML
try:
cli.start(f'dump-configs {config_file}')
finally:
if os.path.exists(config_file):
os.remove(config_file)
toml_config = '''
[[nest_list]]
foo = "foo"
hello = "hello"
[[nest_list]]
foo = "bar"
hello = "world"
'''
def test_toml_list():
class Conf(Config):
nest_list = ConfigProperty(config_type=list)
conf = Conf()
config_file = './config.toml'
with open(config_file, 'w') as f:
f.write(toml_config)
conf.config_format = ConfigFormat.TOML
conf.read_file(config_file)
assert conf.nest_list[0]['foo'] == 'foo' | 0.388038 | 0.166354 |
from galaxy_analysis.plot.plot_styles import *
from galaxy_analysis.analysis.compute_time_average import compute_time_average
from galaxy_analysis.utilities import utilities
import sys
import numpy as np
import matplotlib.pyplot as plt
#filepath = '/mnt/ceph/users/emerick/enzo_runs/pleiades/starIC/run11_30km/final_sndriving'
def plot(workdir = './', t_min = 250.0, t_max = 350.0,
dv = 10, outdir = './'):
phase_colors = {'cold' : 'C0', 'warm' : 'C1', 'hot' : 'C3',
'WNM' : 'C0', 'WIM' : 'C1', 'HIM' : 'C3'}
# override with global
for k in phase_colors:
if k in color_dict.keys():
phase_colors[k] = color_dict[k]
labels = {'cold' : 'Cold' , 'warm' : 'Warm', 'hot' : 'Hot',
'WNM' : "WNM", "WIM" : "WIM", "HIM" : "HIM"}
fig, ax = plt.subplots()
fig.set_size_inches(8,8)
sum = None
for phase in ['WNM','WIM','HIM']: #['cold','warm','hot']:
x,avg,min,max,std = compute_time_average(['gas_profiles','velocity','halo',phase], tmin = t_min, tmax = t_max,
dir = workdir, x_field = 'vbins')
print(np.min(x), np.max(x))
x, avg = utilities.simple_rebin(x, avg, dv) # re-bin in 10 km/s
print(np.min(x), np.max(x))
plot_histogram(ax, x, avg, color = phase_colors[phase], lw = line_width,
ls = '-', label = labels[phase])
if sum is None:
sum = 1.0 * avg
else:
sum += avg
plot_histogram(ax, x, sum, color = 'black', lw = line_width, ls = '-', label = 'Total')
ax.set_xlabel(r'Radial Velocity (km s$^{-1})$')
ax.set_ylabel(r'Mass (M$_{\odot}$)')
ax.semilogy()
ymin = 0.001
ax.set_xlim(np.min(x[:-1][sum>=ymin]) ,np.max( x[1:][sum>=ymin] ))
ax.set_ylim(ymin, 4.0E5)
ax.plot([0.0,0.0], ax.get_ylim(), lw = 2.0, color = 'black', ls = '--')
plt.minorticks_on()
plt.tight_layout()
ax.legend(loc='best')
fig.savefig(outdir + 'velocity_distribution_time_average.png')
plt.close()
f = open(outdir + 'velocity_percentiles.dat','w')
cum_sum = np.cumsum(sum)
percent = cum_sum / (cum_sum[-1]) * 100
f.write("#percentile bin val\n")
for q in np.arange(0,100,5):
bin = np.max( [len(percent[percent <= q]) - 1, 0])
f.write("%3i percentile: %3.3E %3.3E\n"%(q, bin, x[bin]))
f.write("#outflowing gas ONLY\n")
xcent = 0.5 * (x[1:] + x[:-1])
x = x[x>0]
sum = sum[ xcent > 0.0]
cum_sum = np.cumsum(sum)
percent = cum_sum / (cum_sum[-1]) * 100.0
for q in np.arange(0,100,5):
bin = np.max( [ len(percent[percent <= q]) - 1, 0])
f.write("%3i percentile: %3.3E %3.3E\n"%(q, bin, x[bin]))
f.close()
return
if __name__ == "__main__":
work_dir = './'
if len(sys.argv) > 1:
work_dir = sys.argv[1]
out_dir = './'
if len(sys.argv) > 2:
out_dir = sys.argv[2]
tmin, tmax = 250.0, 350.0
if len(sys.argv) > 3:
tmin = float(sys.argv[3])
if len(sys.argv) > 4:
tmax = float(sys.argv[4])
plot(workdir = work_dir, outdir = out_dir, t_min = tmin, t_max = tmax) | method_paper_plots/time_average_velocity.py | from galaxy_analysis.plot.plot_styles import *
from galaxy_analysis.analysis.compute_time_average import compute_time_average
from galaxy_analysis.utilities import utilities
import sys
import numpy as np
import matplotlib.pyplot as plt
#filepath = '/mnt/ceph/users/emerick/enzo_runs/pleiades/starIC/run11_30km/final_sndriving'
def plot(workdir = './', t_min = 250.0, t_max = 350.0,
dv = 10, outdir = './'):
phase_colors = {'cold' : 'C0', 'warm' : 'C1', 'hot' : 'C3',
'WNM' : 'C0', 'WIM' : 'C1', 'HIM' : 'C3'}
# override with global
for k in phase_colors:
if k in color_dict.keys():
phase_colors[k] = color_dict[k]
labels = {'cold' : 'Cold' , 'warm' : 'Warm', 'hot' : 'Hot',
'WNM' : "WNM", "WIM" : "WIM", "HIM" : "HIM"}
fig, ax = plt.subplots()
fig.set_size_inches(8,8)
sum = None
for phase in ['WNM','WIM','HIM']: #['cold','warm','hot']:
x,avg,min,max,std = compute_time_average(['gas_profiles','velocity','halo',phase], tmin = t_min, tmax = t_max,
dir = workdir, x_field = 'vbins')
print(np.min(x), np.max(x))
x, avg = utilities.simple_rebin(x, avg, dv) # re-bin in 10 km/s
print(np.min(x), np.max(x))
plot_histogram(ax, x, avg, color = phase_colors[phase], lw = line_width,
ls = '-', label = labels[phase])
if sum is None:
sum = 1.0 * avg
else:
sum += avg
plot_histogram(ax, x, sum, color = 'black', lw = line_width, ls = '-', label = 'Total')
ax.set_xlabel(r'Radial Velocity (km s$^{-1})$')
ax.set_ylabel(r'Mass (M$_{\odot}$)')
ax.semilogy()
ymin = 0.001
ax.set_xlim(np.min(x[:-1][sum>=ymin]) ,np.max( x[1:][sum>=ymin] ))
ax.set_ylim(ymin, 4.0E5)
ax.plot([0.0,0.0], ax.get_ylim(), lw = 2.0, color = 'black', ls = '--')
plt.minorticks_on()
plt.tight_layout()
ax.legend(loc='best')
fig.savefig(outdir + 'velocity_distribution_time_average.png')
plt.close()
f = open(outdir + 'velocity_percentiles.dat','w')
cum_sum = np.cumsum(sum)
percent = cum_sum / (cum_sum[-1]) * 100
f.write("#percentile bin val\n")
for q in np.arange(0,100,5):
bin = np.max( [len(percent[percent <= q]) - 1, 0])
f.write("%3i percentile: %3.3E %3.3E\n"%(q, bin, x[bin]))
f.write("#outflowing gas ONLY\n")
xcent = 0.5 * (x[1:] + x[:-1])
x = x[x>0]
sum = sum[ xcent > 0.0]
cum_sum = np.cumsum(sum)
percent = cum_sum / (cum_sum[-1]) * 100.0
for q in np.arange(0,100,5):
bin = np.max( [ len(percent[percent <= q]) - 1, 0])
f.write("%3i percentile: %3.3E %3.3E\n"%(q, bin, x[bin]))
f.close()
return
if __name__ == "__main__":
work_dir = './'
if len(sys.argv) > 1:
work_dir = sys.argv[1]
out_dir = './'
if len(sys.argv) > 2:
out_dir = sys.argv[2]
tmin, tmax = 250.0, 350.0
if len(sys.argv) > 3:
tmin = float(sys.argv[3])
if len(sys.argv) > 4:
tmax = float(sys.argv[4])
plot(workdir = work_dir, outdir = out_dir, t_min = tmin, t_max = tmax) | 0.325628 | 0.297757 |
from conans import ConanFile, tools, AutoToolsBuildEnvironment
import os
class LibX264Conan(ConanFile):
name = "libx264"
version = "20190605"
url = "https://github.com/bincrafters/conan-libx264"
homepage = "https://www.videolan.org/developers/x264.html"
author = "Bincrafters <<EMAIL>>"
description = "x264 is a free software library and application for encoding video streams into the " \
"H.264/MPEG-4 AVC compression format"
topics = ("conan", "libx264", "video", "encoding")
license = "GPL-2.0"
exports_sources = ["CMakeLists.txt", "LICENSE"]
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [True, False], "fPIC": [True, False], "bit_depth": [8, 10, "all"]}
default_options = {'shared': False, 'fPIC': True, 'bit_depth': 'all'}
build_requires = "nasm_installer/2.13.02@bincrafters/stable"
_source_subfolder = "sources"
@property
def _is_mingw_windows(self):
return self.settings.os == 'Windows' and self.settings.compiler == 'gcc' and os.name == 'nt'
@property
def _is_msvc(self):
return self.settings.compiler == 'Visual Studio'
def build_requirements(self):
if self._is_mingw_windows or self._is_msvc:
self.build_requires("cygwin_installer/2.9.0@bincrafters/stable")
def config_options(self):
if self.settings.os == 'Windows':
del self.options.fPIC
def configure(self):
del self.settings.compiler.libcxx
def source(self):
source_url =\
"http://download.videolan.org/pub/videolan/x264/snapshots/x264-snapshot-%s-2245.tar.bz2" % self.version
tools.get(source_url, sha256="c75203ef4759e4d7bc38e686b156c54c43b78edc73123c0b25db5224758bd1fc")
extracted_dir = 'x264-snapshot-%s-2245' % self.version
os.rename(extracted_dir, self._source_subfolder)
def _build_configure(self):
with tools.chdir(self._source_subfolder):
args = ['--disable-cli']
if self.options.shared:
args.append('--enable-shared')
else:
args.append('--enable-static')
if self.settings.os != 'Windows' and self.options.fPIC:
args.append('--enable-pic')
if self.settings.build_type == 'Debug':
args.append('--enable-debug')
args.append('--bit-depth=%s' % str(self.options.bit_depth))
env_vars = dict()
if self._is_msvc:
env_vars['CC'] = 'cl'
with tools.environment_append(env_vars):
env_build = AutoToolsBuildEnvironment(self, win_bash=self._is_mingw_windows or self._is_msvc)
if self._is_msvc:
env_build.flags.append('-%s' % str(self.settings.compiler.runtime))
# cannot open program database ... if multiple CL.EXE write to the same .PDB file, please use /FS
env_build.flags.append('-FS')
env_build.configure(args=args, build=False, host=False)
env_build.make()
env_build.install()
def build(self):
if self._is_msvc:
with tools.vcvars(self.settings):
self._build_configure()
else:
self._build_configure()
def package(self):
self.copy(pattern="COPYING", src='sources', dst='licenses')
def package_info(self):
if self._is_msvc:
self.cpp_info.libs = ['libx264.dll.lib' if self.options.shared else 'libx264']
if self.options.shared:
self.cpp_info.defines.append("X264_API_IMPORTS")
elif self._is_mingw_windows:
self.cpp_info.libs = ['x264.dll' if self.options.shared else 'x264']
else:
self.cpp_info.libs = ['x264']
if self.settings.os == "Linux":
self.cpp_info.libs.extend(['dl', 'pthread']) | conanfile.py |
from conans import ConanFile, tools, AutoToolsBuildEnvironment
import os
class LibX264Conan(ConanFile):
name = "libx264"
version = "20190605"
url = "https://github.com/bincrafters/conan-libx264"
homepage = "https://www.videolan.org/developers/x264.html"
author = "Bincrafters <<EMAIL>>"
description = "x264 is a free software library and application for encoding video streams into the " \
"H.264/MPEG-4 AVC compression format"
topics = ("conan", "libx264", "video", "encoding")
license = "GPL-2.0"
exports_sources = ["CMakeLists.txt", "LICENSE"]
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [True, False], "fPIC": [True, False], "bit_depth": [8, 10, "all"]}
default_options = {'shared': False, 'fPIC': True, 'bit_depth': 'all'}
build_requires = "nasm_installer/2.13.02@bincrafters/stable"
_source_subfolder = "sources"
@property
def _is_mingw_windows(self):
return self.settings.os == 'Windows' and self.settings.compiler == 'gcc' and os.name == 'nt'
@property
def _is_msvc(self):
return self.settings.compiler == 'Visual Studio'
def build_requirements(self):
if self._is_mingw_windows or self._is_msvc:
self.build_requires("cygwin_installer/2.9.0@bincrafters/stable")
def config_options(self):
if self.settings.os == 'Windows':
del self.options.fPIC
def configure(self):
del self.settings.compiler.libcxx
def source(self):
source_url =\
"http://download.videolan.org/pub/videolan/x264/snapshots/x264-snapshot-%s-2245.tar.bz2" % self.version
tools.get(source_url, sha256="c75203ef4759e4d7bc38e686b156c54c43b78edc73123c0b25db5224758bd1fc")
extracted_dir = 'x264-snapshot-%s-2245' % self.version
os.rename(extracted_dir, self._source_subfolder)
def _build_configure(self):
with tools.chdir(self._source_subfolder):
args = ['--disable-cli']
if self.options.shared:
args.append('--enable-shared')
else:
args.append('--enable-static')
if self.settings.os != 'Windows' and self.options.fPIC:
args.append('--enable-pic')
if self.settings.build_type == 'Debug':
args.append('--enable-debug')
args.append('--bit-depth=%s' % str(self.options.bit_depth))
env_vars = dict()
if self._is_msvc:
env_vars['CC'] = 'cl'
with tools.environment_append(env_vars):
env_build = AutoToolsBuildEnvironment(self, win_bash=self._is_mingw_windows or self._is_msvc)
if self._is_msvc:
env_build.flags.append('-%s' % str(self.settings.compiler.runtime))
# cannot open program database ... if multiple CL.EXE write to the same .PDB file, please use /FS
env_build.flags.append('-FS')
env_build.configure(args=args, build=False, host=False)
env_build.make()
env_build.install()
def build(self):
if self._is_msvc:
with tools.vcvars(self.settings):
self._build_configure()
else:
self._build_configure()
def package(self):
self.copy(pattern="COPYING", src='sources', dst='licenses')
def package_info(self):
if self._is_msvc:
self.cpp_info.libs = ['libx264.dll.lib' if self.options.shared else 'libx264']
if self.options.shared:
self.cpp_info.defines.append("X264_API_IMPORTS")
elif self._is_mingw_windows:
self.cpp_info.libs = ['x264.dll' if self.options.shared else 'x264']
else:
self.cpp_info.libs = ['x264']
if self.settings.os == "Linux":
self.cpp_info.libs.extend(['dl', 'pthread']) | 0.382141 | 0.136868 |
from collections import deque
from operator import itemgetter
import networkx as nx
from ..utils import arbitrary_element
__author__ = """\n""".join(['<NAME> <<EMAIL>>'])
__all__ = ['cuthill_mckee_ordering',
'reverse_cuthill_mckee_ordering']
def cuthill_mckee_ordering(G, heuristic=None):
"""Generate an ordering (permutation) of the graph nodes to make
a sparse matrix.
Uses the Cuthill-McKee heuristic (based on breadth-first search) [1]_.
Parameters
----------
G : graph
A NetworkX graph
heuristic : function, optional
Function to choose starting node for RCM algorithm. If None
a node from a pseudo-peripheral pair is used. A user-defined function
can be supplied that takes a graph object and returns a single node.
Returns
-------
nodes : generator
Generator of nodes in Cuthill-McKee ordering.
Examples
--------
>>> from networkx.utils import cuthill_mckee_ordering
>>> G = nx.path_graph(4)
>>> rcm = list(cuthill_mckee_ordering(G))
>>> A = nx.adjacency_matrix(G, nodelist=rcm)
Smallest degree node as heuristic function:
>>> def smallest_degree(G):
... return min(G, key=G.degree)
>>> rcm = list(cuthill_mckee_ordering(G, heuristic=smallest_degree))
See Also
--------
reverse_cuthill_mckee_ordering
Notes
-----
The optimal solution the the bandwidth reduction is NP-complete [2]_.
References
----------
.. [1] <NAME> and <NAME>.
Reducing the bandwidth of sparse symmetric matrices,
In Proc. 24th Nat. Conf. ACM, pages 157-172, 1969.
http://doi.acm.org/10.1145/800195.805928
.. [2] <NAME>. 1997. The Algorithm Design Manual.
Springer-Verlag New York, Inc., New York, NY, USA.
"""
for c in nx.connected_components(G):
for n in connected_cuthill_mckee_ordering(G.subgraph(c), heuristic):
yield n
def reverse_cuthill_mckee_ordering(G, heuristic=None):
"""Generate an ordering (permutation) of the graph nodes to make
a sparse matrix.
Uses the reverse Cuthill-McKee heuristic (based on breadth-first search)
[1]_.
Parameters
----------
G : graph
A NetworkX graph
heuristic : function, optional
Function to choose starting node for RCM algorithm. If None
a node from a pseudo-peripheral pair is used. A user-defined function
can be supplied that takes a graph object and returns a single node.
Returns
-------
nodes : generator
Generator of nodes in reverse Cuthill-McKee ordering.
Examples
--------
>>> from networkx.utils import reverse_cuthill_mckee_ordering
>>> G = nx.path_graph(4)
>>> rcm = list(reverse_cuthill_mckee_ordering(G))
>>> A = nx.adjacency_matrix(G, nodelist=rcm)
Smallest degree node as heuristic function:
>>> def smallest_degree(G):
... return min(G, key=G.degree)
>>> rcm = list(reverse_cuthill_mckee_ordering(G, heuristic=smallest_degree))
See Also
--------
cuthill_mckee_ordering
Notes
-----
The optimal solution the the bandwidth reduction is NP-complete [2]_.
References
----------
.. [1] <NAME> and <NAME>.
Reducing the bandwidth of sparse symmetric matrices,
In Proc. 24th Nat. Conf. ACM, pages 157-72, 1969.
http://doi.acm.org/10.1145/800195.805928
.. [2] <NAME>. 1997. The Algorithm Design Manual.
Springer-Verlag New York, Inc., New York, NY, USA.
"""
return reversed(list(cuthill_mckee_ordering(G, heuristic=heuristic)))
def connected_cuthill_mckee_ordering(G, heuristic=None):
# the cuthill mckee algorithm for connected graphs
if heuristic is None:
start = pseudo_peripheral_node(G)
else:
start = heuristic(G)
visited = {start}
queue = deque([start])
while queue:
parent = queue.popleft()
yield parent
nd = sorted(list(G.degree(set(G[parent]) - visited)),
key=itemgetter(1))
children = [n for n, d in nd]
visited.update(children)
queue.extend(children)
def pseudo_peripheral_node(G):
# helper for cuthill-mckee to find a node in a "pseudo peripheral pair"
# to use as good starting node
u = arbitrary_element(G)
lp = 0
v = u
while True:
spl = dict(nx.shortest_path_length(G, v))
l = max(spl.values())
if l <= lp:
break
lp = l
farthest = (n for n, dist in spl.items() if dist == l)
v, deg = min(G.degree(farthest), key=itemgetter(1))
return v | networkx/utils/rcm.py | from collections import deque
from operator import itemgetter
import networkx as nx
from ..utils import arbitrary_element
__author__ = """\n""".join(['<NAME> <<EMAIL>>'])
__all__ = ['cuthill_mckee_ordering',
'reverse_cuthill_mckee_ordering']
def cuthill_mckee_ordering(G, heuristic=None):
"""Generate an ordering (permutation) of the graph nodes to make
a sparse matrix.
Uses the Cuthill-McKee heuristic (based on breadth-first search) [1]_.
Parameters
----------
G : graph
A NetworkX graph
heuristic : function, optional
Function to choose starting node for RCM algorithm. If None
a node from a pseudo-peripheral pair is used. A user-defined function
can be supplied that takes a graph object and returns a single node.
Returns
-------
nodes : generator
Generator of nodes in Cuthill-McKee ordering.
Examples
--------
>>> from networkx.utils import cuthill_mckee_ordering
>>> G = nx.path_graph(4)
>>> rcm = list(cuthill_mckee_ordering(G))
>>> A = nx.adjacency_matrix(G, nodelist=rcm)
Smallest degree node as heuristic function:
>>> def smallest_degree(G):
... return min(G, key=G.degree)
>>> rcm = list(cuthill_mckee_ordering(G, heuristic=smallest_degree))
See Also
--------
reverse_cuthill_mckee_ordering
Notes
-----
The optimal solution the the bandwidth reduction is NP-complete [2]_.
References
----------
.. [1] <NAME> and <NAME>.
Reducing the bandwidth of sparse symmetric matrices,
In Proc. 24th Nat. Conf. ACM, pages 157-172, 1969.
http://doi.acm.org/10.1145/800195.805928
.. [2] <NAME>. 1997. The Algorithm Design Manual.
Springer-Verlag New York, Inc., New York, NY, USA.
"""
for c in nx.connected_components(G):
for n in connected_cuthill_mckee_ordering(G.subgraph(c), heuristic):
yield n
def reverse_cuthill_mckee_ordering(G, heuristic=None):
"""Generate an ordering (permutation) of the graph nodes to make
a sparse matrix.
Uses the reverse Cuthill-McKee heuristic (based on breadth-first search)
[1]_.
Parameters
----------
G : graph
A NetworkX graph
heuristic : function, optional
Function to choose starting node for RCM algorithm. If None
a node from a pseudo-peripheral pair is used. A user-defined function
can be supplied that takes a graph object and returns a single node.
Returns
-------
nodes : generator
Generator of nodes in reverse Cuthill-McKee ordering.
Examples
--------
>>> from networkx.utils import reverse_cuthill_mckee_ordering
>>> G = nx.path_graph(4)
>>> rcm = list(reverse_cuthill_mckee_ordering(G))
>>> A = nx.adjacency_matrix(G, nodelist=rcm)
Smallest degree node as heuristic function:
>>> def smallest_degree(G):
... return min(G, key=G.degree)
>>> rcm = list(reverse_cuthill_mckee_ordering(G, heuristic=smallest_degree))
See Also
--------
cuthill_mckee_ordering
Notes
-----
The optimal solution the the bandwidth reduction is NP-complete [2]_.
References
----------
.. [1] <NAME> and <NAME>.
Reducing the bandwidth of sparse symmetric matrices,
In Proc. 24th Nat. Conf. ACM, pages 157-72, 1969.
http://doi.acm.org/10.1145/800195.805928
.. [2] <NAME>. 1997. The Algorithm Design Manual.
Springer-Verlag New York, Inc., New York, NY, USA.
"""
return reversed(list(cuthill_mckee_ordering(G, heuristic=heuristic)))
def connected_cuthill_mckee_ordering(G, heuristic=None):
# the cuthill mckee algorithm for connected graphs
if heuristic is None:
start = pseudo_peripheral_node(G)
else:
start = heuristic(G)
visited = {start}
queue = deque([start])
while queue:
parent = queue.popleft()
yield parent
nd = sorted(list(G.degree(set(G[parent]) - visited)),
key=itemgetter(1))
children = [n for n, d in nd]
visited.update(children)
queue.extend(children)
def pseudo_peripheral_node(G):
# helper for cuthill-mckee to find a node in a "pseudo peripheral pair"
# to use as good starting node
u = arbitrary_element(G)
lp = 0
v = u
while True:
spl = dict(nx.shortest_path_length(G, v))
l = max(spl.values())
if l <= lp:
break
lp = l
farthest = (n for n, dist in spl.items() if dist == l)
v, deg = min(G.degree(farthest), key=itemgetter(1))
return v | 0.900157 | 0.624837 |
"""Caches used by Spack to store data"""
import os
import llnl.util.lang
from llnl.util.filesystem import mkdirp
from llnl.util.symlink import symlink
import spack.config
import spack.error
import spack.fetch_strategy
import spack.paths
import spack.util.file_cache
import spack.util.path
def misc_cache_location():
"""The ``misc_cache`` is Spack's cache for small data.
Currently the ``misc_cache`` stores indexes for virtual dependency
providers and for which packages provide which tags.
"""
path = spack.config.get('config:misc_cache', spack.paths.default_misc_cache_path)
return spack.util.path.canonicalize_path(path)
def _misc_cache():
path = misc_cache_location()
return spack.util.file_cache.FileCache(path)
#: Spack's cache for small data
misc_cache = llnl.util.lang.Singleton(_misc_cache)
def fetch_cache_location():
"""Filesystem cache of downloaded archives.
This prevents Spack from repeatedly fetch the same files when
building the same package different ways or multiple times.
"""
path = spack.config.get('config:source_cache')
if not path:
path = spack.paths.default_fetch_cache_path
path = spack.util.path.canonicalize_path(path)
return path
def _fetch_cache():
path = fetch_cache_location()
return spack.fetch_strategy.FsCache(path)
class MirrorCache(object):
def __init__(self, root, skip_unstable_versions):
self.root = os.path.abspath(root)
self.skip_unstable_versions = skip_unstable_versions
def store(self, fetcher, relative_dest):
"""Fetch and relocate the fetcher's target into our mirror cache."""
# Note this will archive package sources even if they would not
# normally be cached (e.g. the current tip of an hg/git branch)
dst = os.path.join(self.root, relative_dest)
mkdirp(os.path.dirname(dst))
fetcher.archive(dst)
def symlink(self, mirror_ref):
"""Symlink a human readible path in our mirror to the actual
storage location."""
cosmetic_path = os.path.join(self.root, mirror_ref.cosmetic_path)
storage_path = os.path.join(self.root, mirror_ref.storage_path)
relative_dst = os.path.relpath(
storage_path,
start=os.path.dirname(cosmetic_path))
if not os.path.exists(cosmetic_path):
if os.path.lexists(cosmetic_path):
# In this case the link itself exists but it is broken: remove
# it and recreate it (in order to fix any symlinks broken prior
# to https://github.com/spack/spack/pull/13908)
os.unlink(cosmetic_path)
mkdirp(os.path.dirname(cosmetic_path))
symlink(relative_dst, cosmetic_path)
#: Spack's local cache for downloaded source archives
fetch_cache = llnl.util.lang.Singleton(_fetch_cache) | lib/spack/spack/caches.py |
"""Caches used by Spack to store data"""
import os
import llnl.util.lang
from llnl.util.filesystem import mkdirp
from llnl.util.symlink import symlink
import spack.config
import spack.error
import spack.fetch_strategy
import spack.paths
import spack.util.file_cache
import spack.util.path
def misc_cache_location():
"""The ``misc_cache`` is Spack's cache for small data.
Currently the ``misc_cache`` stores indexes for virtual dependency
providers and for which packages provide which tags.
"""
path = spack.config.get('config:misc_cache', spack.paths.default_misc_cache_path)
return spack.util.path.canonicalize_path(path)
def _misc_cache():
path = misc_cache_location()
return spack.util.file_cache.FileCache(path)
#: Spack's cache for small data
misc_cache = llnl.util.lang.Singleton(_misc_cache)
def fetch_cache_location():
"""Filesystem cache of downloaded archives.
This prevents Spack from repeatedly fetch the same files when
building the same package different ways or multiple times.
"""
path = spack.config.get('config:source_cache')
if not path:
path = spack.paths.default_fetch_cache_path
path = spack.util.path.canonicalize_path(path)
return path
def _fetch_cache():
path = fetch_cache_location()
return spack.fetch_strategy.FsCache(path)
class MirrorCache(object):
def __init__(self, root, skip_unstable_versions):
self.root = os.path.abspath(root)
self.skip_unstable_versions = skip_unstable_versions
def store(self, fetcher, relative_dest):
"""Fetch and relocate the fetcher's target into our mirror cache."""
# Note this will archive package sources even if they would not
# normally be cached (e.g. the current tip of an hg/git branch)
dst = os.path.join(self.root, relative_dest)
mkdirp(os.path.dirname(dst))
fetcher.archive(dst)
def symlink(self, mirror_ref):
"""Symlink a human readible path in our mirror to the actual
storage location."""
cosmetic_path = os.path.join(self.root, mirror_ref.cosmetic_path)
storage_path = os.path.join(self.root, mirror_ref.storage_path)
relative_dst = os.path.relpath(
storage_path,
start=os.path.dirname(cosmetic_path))
if not os.path.exists(cosmetic_path):
if os.path.lexists(cosmetic_path):
# In this case the link itself exists but it is broken: remove
# it and recreate it (in order to fix any symlinks broken prior
# to https://github.com/spack/spack/pull/13908)
os.unlink(cosmetic_path)
mkdirp(os.path.dirname(cosmetic_path))
symlink(relative_dst, cosmetic_path)
#: Spack's local cache for downloaded source archives
fetch_cache = llnl.util.lang.Singleton(_fetch_cache) | 0.553264 | 0.224831 |
from __future__ import division
from __future__ import with_statement
aa_colors = {'start' : 'c',
'basic' : 'b',
'acidic' : 'r',
'polar' : 'g',
'nonpolar' : 'y',
'stop' : 'k'}
nucleotides = ['a', 'c', 'g', 'u']
start_codon = 'aug'
stop_codons = ['uaa', 'uag', 'uga']
class Codon:
def __init__(self, name, letter, category):
self.name = name
self.letter = letter
self.category = category
self.color = aa_colors[self.category]
codons = {}
alanine = Codon('alanine', 'A', 'nonpolar')
arginine = Codon('arginine', 'R', 'basic')
asparagine = Codon('asparagine', 'N', 'polar')
aspartic = Codon('aspartic', 'D', 'acidic')
cysteine = Codon('cysteine', 'C', 'polar')
glutamic = Codon('glutamic', 'E', 'acidic')
glutamine = Codon('glutamine', 'Q', 'polar')
glycine = Codon('glycine', 'G', 'polar')
histidine = Codon('histidine', 'H', 'basic')
isoleucine = Codon('isoleucine', 'I', 'nonpolar')
leucine = Codon('leucine', 'L', 'nonpolar')
lysine = Codon('lysine', 'K', 'basic')
methionine = Codon('methionine', 'M', 'start')
phenylalanine = Codon('phenylalanine', 'F', 'nonpolar')
proline = Codon('proline', 'P', 'nonpolar')
serine = Codon('serine', 'S', 'polar')
threonine = Codon('threonine', 'T', 'polar')
tryptophan = Codon('tryptophan', 'W', 'nonpolar')
tyrosine = Codon('tyrosine', 'Y', 'polar')
valine = Codon('valine', 'V', 'nonpolar')
codons[start_codon] = methionine
for nt in nucleotides:
codons['ac'+nt] = threonine
codons['aac'] = asparagine
codons['aau'] = asparagine
codons['aag'] = lysine
codons['aaa'] = lysine
codons['agc'] = serine
codons['agu'] = serine
for nt in nucleotides:
codons['uc'+nt] = serine
codons['aga'] = arginine
codons['agg'] = arginine
for nt in nucleotides:
codons['cg'+nt] = arginine
for nt in nucleotides:
codons['gu'+nt] = valine
for nt in nucleotides:
codons['gc'+nt] = alanine
codons['gau'] = aspartic
codons['gac'] = aspartic
codons['gaa'] = glutamic
codons['gag'] = glutamic
for nt in nucleotides:
codons['gg'+nt] = glycine
codons['uuu'] = phenylalanine
codons['uuc'] = phenylalanine
codons['uua'] = leucine
codons['uug'] = leucine
for nt in nucleotides:
codons['cu'+nt] = leucine
codons['uau'] = tyrosine
codons['uac'] = tyrosine
codons['ugu'] = cysteine
codons['ugc'] = cysteine
codons['ugg'] = tryptophan
for nt in nucleotides:
codons['cc'+nt] = proline
codons['cau'] = histidine
codons['cac'] = histidine
codons['caa'] = glutamine
codons['cag'] = glutamine
codons['auu'] = isoleucine
codons['auc'] = isoleucine
codons['aua'] = isoleucine
stop = Codon('stop', '!', 'stop')
for c in stop_codons:
codons[c] = stop | toolbox_transcription.py | from __future__ import division
from __future__ import with_statement
aa_colors = {'start' : 'c',
'basic' : 'b',
'acidic' : 'r',
'polar' : 'g',
'nonpolar' : 'y',
'stop' : 'k'}
nucleotides = ['a', 'c', 'g', 'u']
start_codon = 'aug'
stop_codons = ['uaa', 'uag', 'uga']
class Codon:
def __init__(self, name, letter, category):
self.name = name
self.letter = letter
self.category = category
self.color = aa_colors[self.category]
codons = {}
alanine = Codon('alanine', 'A', 'nonpolar')
arginine = Codon('arginine', 'R', 'basic')
asparagine = Codon('asparagine', 'N', 'polar')
aspartic = Codon('aspartic', 'D', 'acidic')
cysteine = Codon('cysteine', 'C', 'polar')
glutamic = Codon('glutamic', 'E', 'acidic')
glutamine = Codon('glutamine', 'Q', 'polar')
glycine = Codon('glycine', 'G', 'polar')
histidine = Codon('histidine', 'H', 'basic')
isoleucine = Codon('isoleucine', 'I', 'nonpolar')
leucine = Codon('leucine', 'L', 'nonpolar')
lysine = Codon('lysine', 'K', 'basic')
methionine = Codon('methionine', 'M', 'start')
phenylalanine = Codon('phenylalanine', 'F', 'nonpolar')
proline = Codon('proline', 'P', 'nonpolar')
serine = Codon('serine', 'S', 'polar')
threonine = Codon('threonine', 'T', 'polar')
tryptophan = Codon('tryptophan', 'W', 'nonpolar')
tyrosine = Codon('tyrosine', 'Y', 'polar')
valine = Codon('valine', 'V', 'nonpolar')
codons[start_codon] = methionine
for nt in nucleotides:
codons['ac'+nt] = threonine
codons['aac'] = asparagine
codons['aau'] = asparagine
codons['aag'] = lysine
codons['aaa'] = lysine
codons['agc'] = serine
codons['agu'] = serine
for nt in nucleotides:
codons['uc'+nt] = serine
codons['aga'] = arginine
codons['agg'] = arginine
for nt in nucleotides:
codons['cg'+nt] = arginine
for nt in nucleotides:
codons['gu'+nt] = valine
for nt in nucleotides:
codons['gc'+nt] = alanine
codons['gau'] = aspartic
codons['gac'] = aspartic
codons['gaa'] = glutamic
codons['gag'] = glutamic
for nt in nucleotides:
codons['gg'+nt] = glycine
codons['uuu'] = phenylalanine
codons['uuc'] = phenylalanine
codons['uua'] = leucine
codons['uug'] = leucine
for nt in nucleotides:
codons['cu'+nt] = leucine
codons['uau'] = tyrosine
codons['uac'] = tyrosine
codons['ugu'] = cysteine
codons['ugc'] = cysteine
codons['ugg'] = tryptophan
for nt in nucleotides:
codons['cc'+nt] = proline
codons['cau'] = histidine
codons['cac'] = histidine
codons['caa'] = glutamine
codons['cag'] = glutamine
codons['auu'] = isoleucine
codons['auc'] = isoleucine
codons['aua'] = isoleucine
stop = Codon('stop', '!', 'stop')
for c in stop_codons:
codons[c] = stop | 0.539105 | 0.06727 |
from multiprocessing import Process, Queue, TimeoutError, Value
from multiprocessing.queues import Empty, Full
from abc import abstractmethod, ABCMeta
import logging
import copy
__default_exit_flag__ = Value('b', True)
__fmt__ = "%(levelname)s: %(asctime)s - %(name)s - %(process)s - %(message)s"
class StreamElement(Process):
""" Subclass this abstract class for concrete implementation
of pewpew processing
"""
__metaclass__ = ABCMeta
def __init__(self, exit_flag=None, inqueue=None, outqueue=None, **kwargs):
""" The base constructor must always be called by the subclass.
Parameters:
==========
exit_flag: multiprocessing.Value
A global exit flag. When set to `False`, will cause all
threads to exit gracefully.
inqueue: multiprocessing.Queue
Data queue for incoming data.
outqueue: multiprocessing.Queue
Data queue for outgoing data.
"""
super(StreamElement, self).__init__()
self.inqueue = inqueue
self.outqueue = outqueue
self.config = kwargs
self.fail_flag = exit_flag # Signals False if failure has occurred
if self.fail_flag is None:
self.fail_flag = __default_exit_flag__
self.input_flags = [] # Holds values from inputs to signal chain exit
self.exit_flag = Value('b', True) # For forwarding
self.timeout = int(kwargs.get("timeout", 120))
self.queuelen = int(kwargs.get("default_queuelen", 10))
self.n_tries = int(kwargs.get("n_tries", 10))
self.debug = bool(kwargs.get('debug', False))
def signal_exit_on_failure(fn):
"""Helper decorator which sets appropriate flags when exceptions
occur in daughter processes.
"""
def wrapped(self=None, **kwargs):
try:
return fn(self, **kwargs)
except Exception as e:
self.log.info("signaling exit to all processes")
self.log.warning(e)
self.fail_flag.value = False
raise e
return wrapped
def run(self):
"""Called by multiprocessing.Process.
Executes main event loop for process.
"""
self.event_loop()
msg = "exiting with flags {} {}"
self.log.debug(msg.format(self.fail_flag.value,
self.exit_flag.value))
def _log_(self):
log = logging.getLogger(str(self.__class__).split('\'')[1])
# formatter = logging.Formatter(__fmt__)
return log
@signal_exit_on_failure
def get_data(self):
""" Gets data from the input Queue.
Returns
=======
A dict of pickle-able objects.
"""
if not self.check_input_flags():
self.log.debug("Inputs are finished. Setting timeout to 0.")
self.timeout = 0
if self.inqueue is not None:
try:
return self.inqueue.get(timeout=self.timeout)
except (TimeoutError, Empty):
if not self.check_input_flags():
self.exit_flag.value = False
else:
self.fail_flag.value = False
return None
return {'data': {}, 'meta': {}}
@signal_exit_on_failure
def put_data(self, data):
""" Attempts to put data on the queue for the next node.
If the data is a list, then it puts the data on the queue
one item at a time.
Parameters:
===========
data : list or dict
The data to put on the queue.
Note:
=====
This function must be called as `self.put_data(data={})`. Where
the argument keyword must be used explicitely.
"""
if not self.valid_data(data):
msg = "cannot understand output data type: {}"
self.log.warning(msg.format(type(data)))
return
if self.outqueue is not None:
if isinstance(data, list):
for i in data:
self.put_data(data=i)
else:
for try_ in range(self.n_tries):
success = False
try:
self.outqueue.put(copy.copy(data),
timeout=self.timeout)
success = True
except (TimeoutError, Full) as e:
msg = "Failed putting data in queue: {}".format(e)
self.log.warning(msg)
if try_ == self.n_tries-1:
self.exit_flag.value = False
raise e
else:
self.log.warning("Trying again")
if success:
break
def valid_data(self, data):
""" Validates whether data is valid for the data stream.
Parameters:
===========
data : list or dict
Input data which must be validated
Returns:
========
bool : True if valid data
"""
if isinstance(data, dict):
return True
if isinstance(data, list):
return True
return False
@signal_exit_on_failure
def on_input_completed(self):
""" Utility function which wraps the user on_completion function
and empties data into stream.
"""
output = self.on_completion()
if self.valid_data(output):
self.put_data(data=output)
@signal_exit_on_failure
def event_loop(self):
""" Main event loop. This executes the interior logic of the process.
Warning:
=====
DO NOT OVERWRITE.
"""
self.log = self._log_()
self.on_start()
while self.fail_flag.value and self.exit_flag.value:
data = self.get_data()
if data is None:
continue
output = self.__process__(data=data)
if output is None:
continue
self.put_data(data=output)
msg = 'Exiting Loop with flags\tFail:{}\tExit:{}\tInputs:{}'
self.log.info(msg.format(bool(self.fail_flag.value),
bool(self.exit_flag.value),
bool(self.check_input_flags())))
self.on_input_completed()
self.exit_flag.value = False
if self.outqueue is not None:
self.outqueue.close()
if self.inqueue is not None:
self.inqueue.close()
def set_input(self, other):
""" Add an input :class:`StreamElement` to this one. Creates a :class:`Queue`
between StreamElements in the event there is not an existing one.
Parameters:
===========
other: :class:`StreamElement`
An other Stream Element which will stream
queued data into this one.
"""
if type(other) is list:
if self.inqueue is None:
self.inqueue = Queue(self.queuelen)
for other_element in other:
other_element.outqueue = self.inqueue
self.input_flags.append(other_element.exit_flag)
elif other.outqueue is None:
if self.inqueue is None:
self.inqueue = Queue(self.queuelen)
other.outqueue = self.inqueue
self.input_flags.append(other.exit_flag)
def set_output(self, other):
""" Sets `self` as an input :class:`StreamElement` to `other`.
Creates a :class:`Queue` between StreamElements in the event there
is not an existing one.
Parameters:
===========
other: :class:`StreamElement`
An other Stream Element which will stream
queued data from this one.
"""
if type(other) is list:
if self.outqueue is None:
self.outqueue = Queue(self.queuelen)
for other_element in other:
other_element.inqueue = self.outqueue
other_element.input_flags.append(self.exit_flag)
elif other.inqueue is None:
if self.outqueue is None:
self.outqueue = Queue(self.queuelen)
other.inqueue = self.outqueue
other.input_flags.append(self.exit_flag)
def check_input_flags(self):
""" Checks to see if the inputs have exited or not.
Useful as the exiting condition is the Queue is empty and
the inputs have all finished.
Returns:
========
bool: True if at least one input is OK.
"""
if len(self.input_flags) == 0:
return True
ret = False
for flag in self.input_flags:
ret |= flag.value
return ret
@signal_exit_on_failure
def __process__(self, data):
""" Wrapper function for :meth:`StreamElement.process`.
Warning:
========
DO NOT OVERRIDE
"""
return self.process(data=data)
@abstractmethod
def process(self, data):
""" Abstract method. Implement this for the
primary action this process will take on `data`
Parameters:
===========
data: list or dict or None
Input data to be acted on. Primary data generators can accept
None as an input, and produce data.
Returns:
========
dict or list:
Data to be processed downstream.
"""
raise NotImplementedError()
def on_start(self):
""" Override this method to perform an
action at the process' beginning of execution.
"""
self.log.debug("starting")
def on_completion(self):
""" Override this method to perform an
action at the process' end of execution.
"""
self.log.debug("completing")
def exit_flag():
""" Convenience function for
creating the exit flag data type instance.
"""
return Value('b', True) | pewpew/base.py | from multiprocessing import Process, Queue, TimeoutError, Value
from multiprocessing.queues import Empty, Full
from abc import abstractmethod, ABCMeta
import logging
import copy
__default_exit_flag__ = Value('b', True)
__fmt__ = "%(levelname)s: %(asctime)s - %(name)s - %(process)s - %(message)s"
class StreamElement(Process):
""" Subclass this abstract class for concrete implementation
of pewpew processing
"""
__metaclass__ = ABCMeta
def __init__(self, exit_flag=None, inqueue=None, outqueue=None, **kwargs):
""" The base constructor must always be called by the subclass.
Parameters:
==========
exit_flag: multiprocessing.Value
A global exit flag. When set to `False`, will cause all
threads to exit gracefully.
inqueue: multiprocessing.Queue
Data queue for incoming data.
outqueue: multiprocessing.Queue
Data queue for outgoing data.
"""
super(StreamElement, self).__init__()
self.inqueue = inqueue
self.outqueue = outqueue
self.config = kwargs
self.fail_flag = exit_flag # Signals False if failure has occurred
if self.fail_flag is None:
self.fail_flag = __default_exit_flag__
self.input_flags = [] # Holds values from inputs to signal chain exit
self.exit_flag = Value('b', True) # For forwarding
self.timeout = int(kwargs.get("timeout", 120))
self.queuelen = int(kwargs.get("default_queuelen", 10))
self.n_tries = int(kwargs.get("n_tries", 10))
self.debug = bool(kwargs.get('debug', False))
def signal_exit_on_failure(fn):
"""Helper decorator which sets appropriate flags when exceptions
occur in daughter processes.
"""
def wrapped(self=None, **kwargs):
try:
return fn(self, **kwargs)
except Exception as e:
self.log.info("signaling exit to all processes")
self.log.warning(e)
self.fail_flag.value = False
raise e
return wrapped
def run(self):
"""Called by multiprocessing.Process.
Executes main event loop for process.
"""
self.event_loop()
msg = "exiting with flags {} {}"
self.log.debug(msg.format(self.fail_flag.value,
self.exit_flag.value))
def _log_(self):
log = logging.getLogger(str(self.__class__).split('\'')[1])
# formatter = logging.Formatter(__fmt__)
return log
@signal_exit_on_failure
def get_data(self):
""" Gets data from the input Queue.
Returns
=======
A dict of pickle-able objects.
"""
if not self.check_input_flags():
self.log.debug("Inputs are finished. Setting timeout to 0.")
self.timeout = 0
if self.inqueue is not None:
try:
return self.inqueue.get(timeout=self.timeout)
except (TimeoutError, Empty):
if not self.check_input_flags():
self.exit_flag.value = False
else:
self.fail_flag.value = False
return None
return {'data': {}, 'meta': {}}
@signal_exit_on_failure
def put_data(self, data):
""" Attempts to put data on the queue for the next node.
If the data is a list, then it puts the data on the queue
one item at a time.
Parameters:
===========
data : list or dict
The data to put on the queue.
Note:
=====
This function must be called as `self.put_data(data={})`. Where
the argument keyword must be used explicitely.
"""
if not self.valid_data(data):
msg = "cannot understand output data type: {}"
self.log.warning(msg.format(type(data)))
return
if self.outqueue is not None:
if isinstance(data, list):
for i in data:
self.put_data(data=i)
else:
for try_ in range(self.n_tries):
success = False
try:
self.outqueue.put(copy.copy(data),
timeout=self.timeout)
success = True
except (TimeoutError, Full) as e:
msg = "Failed putting data in queue: {}".format(e)
self.log.warning(msg)
if try_ == self.n_tries-1:
self.exit_flag.value = False
raise e
else:
self.log.warning("Trying again")
if success:
break
def valid_data(self, data):
""" Validates whether data is valid for the data stream.
Parameters:
===========
data : list or dict
Input data which must be validated
Returns:
========
bool : True if valid data
"""
if isinstance(data, dict):
return True
if isinstance(data, list):
return True
return False
@signal_exit_on_failure
def on_input_completed(self):
""" Utility function which wraps the user on_completion function
and empties data into stream.
"""
output = self.on_completion()
if self.valid_data(output):
self.put_data(data=output)
@signal_exit_on_failure
def event_loop(self):
""" Main event loop. This executes the interior logic of the process.
Warning:
=====
DO NOT OVERWRITE.
"""
self.log = self._log_()
self.on_start()
while self.fail_flag.value and self.exit_flag.value:
data = self.get_data()
if data is None:
continue
output = self.__process__(data=data)
if output is None:
continue
self.put_data(data=output)
msg = 'Exiting Loop with flags\tFail:{}\tExit:{}\tInputs:{}'
self.log.info(msg.format(bool(self.fail_flag.value),
bool(self.exit_flag.value),
bool(self.check_input_flags())))
self.on_input_completed()
self.exit_flag.value = False
if self.outqueue is not None:
self.outqueue.close()
if self.inqueue is not None:
self.inqueue.close()
def set_input(self, other):
""" Add an input :class:`StreamElement` to this one. Creates a :class:`Queue`
between StreamElements in the event there is not an existing one.
Parameters:
===========
other: :class:`StreamElement`
An other Stream Element which will stream
queued data into this one.
"""
if type(other) is list:
if self.inqueue is None:
self.inqueue = Queue(self.queuelen)
for other_element in other:
other_element.outqueue = self.inqueue
self.input_flags.append(other_element.exit_flag)
elif other.outqueue is None:
if self.inqueue is None:
self.inqueue = Queue(self.queuelen)
other.outqueue = self.inqueue
self.input_flags.append(other.exit_flag)
def set_output(self, other):
""" Sets `self` as an input :class:`StreamElement` to `other`.
Creates a :class:`Queue` between StreamElements in the event there
is not an existing one.
Parameters:
===========
other: :class:`StreamElement`
An other Stream Element which will stream
queued data from this one.
"""
if type(other) is list:
if self.outqueue is None:
self.outqueue = Queue(self.queuelen)
for other_element in other:
other_element.inqueue = self.outqueue
other_element.input_flags.append(self.exit_flag)
elif other.inqueue is None:
if self.outqueue is None:
self.outqueue = Queue(self.queuelen)
other.inqueue = self.outqueue
other.input_flags.append(self.exit_flag)
def check_input_flags(self):
""" Checks to see if the inputs have exited or not.
Useful as the exiting condition is the Queue is empty and
the inputs have all finished.
Returns:
========
bool: True if at least one input is OK.
"""
if len(self.input_flags) == 0:
return True
ret = False
for flag in self.input_flags:
ret |= flag.value
return ret
@signal_exit_on_failure
def __process__(self, data):
""" Wrapper function for :meth:`StreamElement.process`.
Warning:
========
DO NOT OVERRIDE
"""
return self.process(data=data)
@abstractmethod
def process(self, data):
""" Abstract method. Implement this for the
primary action this process will take on `data`
Parameters:
===========
data: list or dict or None
Input data to be acted on. Primary data generators can accept
None as an input, and produce data.
Returns:
========
dict or list:
Data to be processed downstream.
"""
raise NotImplementedError()
def on_start(self):
""" Override this method to perform an
action at the process' beginning of execution.
"""
self.log.debug("starting")
def on_completion(self):
""" Override this method to perform an
action at the process' end of execution.
"""
self.log.debug("completing")
def exit_flag():
""" Convenience function for
creating the exit flag data type instance.
"""
return Value('b', True) | 0.637144 | 0.127571 |
import future
import builtins
import past
import six
import copy
from timeit import default_timer as timer
from datetime import datetime
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets
from torch.utils.data import Dataset
import decimal
import torch.onnx
import inspect
from inspect import getargspec
import os
import helpers as h
from helpers import Timer
import copy
import random
from components import *
import models
import goals
import scheduling
from goals import *
from scheduling import *
import math
import warnings
from torch.serialization import SourceChangeWarning
POINT_DOMAINS = [m for m in h.getMethods(goals) if issubclass(m, goals.Point)]
SYMETRIC_DOMAINS = [goals.Box] + POINT_DOMAINS
datasets.Imagenet12 = None
class Top(nn.Module):
def __init__(self, args, net, ty = Point):
super(Top, self).__init__()
self.net = net
self.ty = ty
self.w = args.width
self.global_num = 0
self.getSpec = getattr(self, args.spec)
self.sub_batch_size = args.sub_batch_size
self.curve_width = args.curve_width
self.regularize = args.regularize
self.speedCount = 0
self.speed = 0.0
def addSpeed(self, s):
self.speed = (s + self.speed * self.speedCount) / (self.speedCount + 1)
self.speedCount += 1
def forward(self, x):
return self.net(x)
def clip_norm(self):
self.net.clip_norm()
def boxSpec(self, x, target, **kargs):
return [(self.ty.box(x, w = self.w, model=self, target=target, untargeted=True, **kargs).to_dtype(), target)]
def curveSpec(self, x, target, **kargs):
if self.ty.__class__ in SYMETRIC_DOMAINS:
return self.boxSpec(x,target, **kargs)
batch_size = x.size()[0]
newTargs = [ None for i in range(batch_size) ]
newSpecs = [ None for i in range(batch_size) ]
bestSpecs = [ None for i in range(batch_size) ]
for i in range(batch_size):
newTarg = target[i]
newTargs[i] = newTarg
newSpec = x[i]
best_x = newSpec
best_dist = float("inf")
for j in range(batch_size):
potTarg = target[j]
potSpec = x[j]
if (not newTarg.data.equal(potTarg.data)) or i == j:
continue
curr_dist = (newSpec - potSpec).norm(1).item() # must experiment with the type of norm here
if curr_dist <= best_dist:
best_x = potSpec
newSpecs[i] = newSpec
bestSpecs[i] = best_x
new_batch_size = self.sub_batch_size
batchedTargs = h.chunks(newTargs, new_batch_size)
batchedSpecs = h.chunks(newSpecs, new_batch_size)
batchedBest = h.chunks(bestSpecs, new_batch_size)
def batch(t,s,b):
t = h.lten(t)
s = torch.stack(s)
b = torch.stack(b)
if h.use_cuda:
t.cuda()
s.cuda()
b.cuda()
m = self.ty.line(s, b, w = self.curve_width, **kargs)
return (m , t)
return [batch(t,s,b) for t,s,b in zip(batchedTargs, batchedSpecs, batchedBest)]
def regLoss(self):
if self.regularize is None or self.regularize <= 0.0:
return 0
reg_loss = 0
r = self.net.regularize(2)
return self.regularize * r
def aiLoss(self, dom, target, **args):
r = self(dom)
return self.regLoss() + r.loss(target = target, **args)
def printNet(self, f):
self.net.printNet(f)
# Training settings
parser = argparse.ArgumentParser(description='PyTorch DiffAI Example', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--batch-size', type=int, default=10, metavar='N', help='input batch size for training')
parser.add_argument('--test-first', type=h.str2bool, nargs='?', const=True, default=True, help='test first')
parser.add_argument('--test-freq', type=int, default=1, metavar='N', help='number of epochs to skip before testing')
parser.add_argument('--test-batch-size', type=int, default=10, metavar='N', help='input batch size for testing')
parser.add_argument('--sub-batch-size', type=int, default=3, metavar='N', help='input batch size for curve specs')
parser.add_argument('--custom-schedule', type=str, default="", metavar='net', help='Learning rate scheduling for lr-multistep. Defaults to [200,250,300] for CIFAR10 and [15,25] for everything else.')
parser.add_argument('--test', type=str, default=None, metavar='net', help='Saved net to use, in addition to any other nets you specify with -n')
parser.add_argument('--update-test-net', type=h.str2bool, nargs='?', const=True, default=False, help="should update test net")
parser.add_argument('--sgd',type=h.str2bool, nargs='?', const=True, default=False, help="use sgd instead of adam")
parser.add_argument('--onyx', type=h.str2bool, nargs='?', const=True, default=False, help="should output onyx")
parser.add_argument('--save-dot-net', type=h.str2bool, nargs='?', const=True, default=False, help="should output in .net")
parser.add_argument('--update-test-net-name', type=str, choices = h.getMethodNames(models), default=None, help="update test net name")
parser.add_argument('--normalize-layer', type=h.str2bool, nargs='?', const=True, default=True, help="should include a training set specific normalization layer")
parser.add_argument('--clip-norm', type=h.str2bool, nargs='?', const=True, default=False, help="should clip the normal and use normal decomposition for weights")
parser.add_argument('--epochs', type=int, default=1000, metavar='N', help='number of epochs to train')
parser.add_argument('--log-freq', type=int, default=10, metavar='N', help='The frequency with which log statistics are printed')
parser.add_argument('--save-freq', type=int, default=1, metavar='N', help='The frequency with which nets and images are saved, in terms of number of test passes')
parser.add_argument('--number-save-images', type=int, default=0, metavar='N', help='The number of images to save. Should be smaller than test-size.')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate')
parser.add_argument('--lr-multistep', type=h.str2bool, nargs='?', const=True, default=False, help='learning rate multistep scheduling')
parser.add_argument('--threshold', type=float, default=-0.01, metavar='TH', help='threshold for lr schedule')
parser.add_argument('--patience', type=int, default=0, metavar='PT', help='patience for lr schedule')
parser.add_argument('--factor', type=float, default=0.5, metavar='R', help='reduction multiplier for lr schedule')
parser.add_argument('--max-norm', type=float, default=10000, metavar='MN', help='the maximum norm allowed in weight distribution')
parser.add_argument('--curve-width', type=float, default=None, metavar='CW', help='the width of the curve spec')
parser.add_argument('--width', type=float, default=0.01, metavar='CW', help='the width of either the line or box')
parser.add_argument('--spec', choices = [ x for x in dir(Top) if x[-4:] == "Spec" and len(getargspec(getattr(Top, x)).args) == 3]
, default="boxSpec", help='picks which spec builder function to use for training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed')
parser.add_argument("--use-schedule", type=h.str2bool, nargs='?',
const=True, default=False,
help="activate learning rate schedule")
parser.add_argument('-d', '--domain', sub_choices = None, action = h.SubAct
, default=[], help='picks which abstract goals to use for training', required=True)
parser.add_argument('-t', '--test-domain', sub_choices = None, action = h.SubAct
, default=[], help='picks which abstract goals to use for testing. Examples include ' + str(goals), required=True)
parser.add_argument('-n', '--net', choices = h.getMethodNames(models), action = 'append'
, default=[], help='picks which net to use for training') # one net for now
parser.add_argument('-D', '--dataset', choices = [n for (n,k) in inspect.getmembers(datasets, inspect.isclass) if issubclass(k, Dataset)]
, default="MNIST", help='picks which dataset to use.')
parser.add_argument('-o', '--out', default="out", help='picks the folder to save the outputs')
parser.add_argument('--dont-write', type=h.str2bool, nargs='?', const=True, default=False, help='dont write anywhere if this flag is on')
parser.add_argument('--write-first', type=h.str2bool, nargs='?', const=True, default=False, help='write the initial net. Useful for comparing algorithms, a pain for testing.')
parser.add_argument('--test-size', type=int, default=2000, help='number of examples to test with')
parser.add_argument('-r', '--regularize', type=float, default=None, help='use regularization')
args = parser.parse_args()
largest_domain = max([len(h.catStrs(d)) for d in (args.domain)] )
largest_test_domain = max([len(h.catStrs(d)) for d in (args.test_domain)] )
args.log_interval = int(50000 / (args.batch_size * args.log_freq))
h.max_c_for_norm = args.max_norm
if h.use_cuda:
torch.cuda.manual_seed(1 + args.seed)
else:
torch.manual_seed(args.seed)
train_loader = h.loadDataset(args.dataset, args.batch_size, True, False)
test_loader = h.loadDataset(args.dataset, args.test_batch_size, False, False)
input_dims = train_loader.dataset[0][0].size()
num_classes = int(max(getattr(train_loader.dataset, 'train_labels' if args.dataset != "SVHN" else 'labels'))) + 1
print("input_dims: ", input_dims)
print("Num classes: ", num_classes)
vargs = vars(args)
total_batches_seen = 0
def train(epoch, models):
global total_batches_seen
for model in models:
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
total_batches_seen += 1
time = float(total_batches_seen) / len(train_loader)
if h.use_cuda:
data, target = data.cuda(), target.cuda()
for model in models:
model.global_num += data.size()[0]
timer = Timer("train a sample from " + model.name + " with " + model.ty.name, data.size()[0], False)
lossy = 0
with timer:
for s in model.getSpec(data.to_dtype(),target, time = time):
model.optimizer.zero_grad()
loss = model.aiLoss(*s, time = time, **vargs).mean(dim=0)
lossy += loss.detach().item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
for p in model.parameters():
if p is not None and torch.isnan(p).any():
print("Such nan in vals")
if p is not None and p.grad is not None and torch.isnan(p.grad).any():
print("Such nan in postmagic")
stdv = 1 / math.sqrt(h.product(p.data.shape))
p.grad = torch.where(torch.isnan(p.grad), torch.normal(mean=h.zeros(p.grad.shape), std=stdv), p.grad)
model.optimizer.step()
for p in model.parameters():
if p is not None and torch.isnan(p).any():
print("Such nan in vals after grad")
stdv = 1 / math.sqrt(h.product(p.data.shape))
p.data = torch.where(torch.isnan(p.data), torch.normal(mean=h.zeros(p.data.shape), std=stdv), p.data)
if args.clip_norm:
model.clip_norm()
for p in model.parameters():
if p is not None and torch.isnan(p).any():
raise Exception("Such nan in vals after clip")
model.addSpeed(timer.getUnitTime())
if batch_idx % args.log_interval == 0:
print(('Train Epoch {:12} {:'+ str(largest_domain) +'}: {:3} [{:7}/{} ({:.0f}%)] \tAvg sec/ex {:1.8f}\tLoss: {:.6f}').format(
model.name, model.ty.name,
epoch,
batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader),
model.speed,
lossy))
num_tests = 0
def test(models, epoch, f = None):
global num_tests
num_tests += 1
class MStat:
def __init__(self, model):
model.eval()
self.model = model
self.correct = 0
class Stat:
def __init__(self, d, dnm):
self.domain = d
self.name = dnm
self.width = 0
self.max_eps = None
self.safe = 0
self.proved = 0
self.time = 0
self.domains = [ Stat(h.parseValues(d, goals), h.catStrs(d)) for d in args.test_domain ]
model_stats = [ MStat(m) for m in models ]
num_its = 0
saved_data_target = []
for data, target in test_loader:
if num_its >= args.test_size:
break
if num_tests == 1:
saved_data_target += list(zip(list(data), list(target)))
num_its += data.size()[0]
if h.use_cuda:
data, target = data.cuda().to_dtype(), target.cuda()
for m in model_stats:
with torch.no_grad():
pred = m.model(data).vanillaTensorPart().max(1, keepdim=True)[1] # get the index of the max log-probability
m.correct += pred.eq(target.data.view_as(pred)).sum()
for stat in m.domains:
timer = Timer(shouldPrint = False)
with timer:
def calcData(data, target):
box = stat.domain.box(data, w = m.model.w, model=m.model, untargeted = True, target=target).to_dtype()
with torch.no_grad():
bs = m.model(box)
org = m.model(data).vanillaTensorPart().max(1,keepdim=True)[1]
stat.width += bs.diameter().sum().item() # sum up batch loss
stat.proved += bs.isSafe(org).sum().item()
stat.safe += bs.isSafe(target).sum().item()
# stat.max_eps += 0 # TODO: calculate max_eps
if m.model.net.neuronCount() < 5000 or stat.domain.__class__ in SYMETRIC_DOMAINS:
calcData(data, target)
else:
for d,t in zip(data, target):
calcData(d.unsqueeze(0),t.unsqueeze(0))
stat.time += timer.getUnitTime()
l = num_its # len(test_loader.dataset)
for m in model_stats:
if args.lr_multistep:
m.model.lrschedule.step()
pr_corr = float(m.correct) / float(l)
if args.use_schedule:
m.model.lrschedule.step(1 - pr_corr)
h.printBoth(('Test: {:12} trained with {:'+ str(largest_domain) +'} - Avg sec/ex {:1.12f}, Accuracy: {}/{} ({:3.1f}%)').format(
m.model.name, m.model.ty.name,
m.model.speed,
m.correct, l, 100. * pr_corr), f = f)
model_stat_rec = ""
for stat in m.domains:
pr_safe = stat.safe / l
pr_proved = stat.proved / l
pr_corr_given_proved = pr_safe / pr_proved if pr_proved > 0 else 0.0
h.printBoth(("\t{:" + str(largest_test_domain)+"} - Width: {:<36.16f} Pr[Proved]={:<1.3f} Pr[Corr and Proved]={:<1.3f} Pr[Corr|Proved]={:<1.3f} {}Time = {:<7.5f}" ).format(
stat.name,
stat.width / l,
pr_proved,
pr_safe, pr_corr_given_proved,
"AvgMaxEps: {:1.10f} ".format(stat.max_eps / l) if stat.max_eps is not None else "",
stat.time), f = f)
model_stat_rec += "{}_{:1.3f}_{:1.3f}_{:1.3f}__".format(stat.name, pr_proved, pr_safe, pr_corr_given_proved)
prepedname = m.model.ty.name.replace(" ", "_").replace(",", "").replace("(", "_").replace(")", "_").replace("=", "_")
net_file = os.path.join(out_dir, m.model.name +"__" +prepedname + "_checkpoint_"+str(epoch)+"_with_{:1.3f}".format(pr_corr))
h.printBoth("\tSaving netfile: {}\n".format(net_file + ".pynet"), f = f)
if (num_tests % args.save_freq == 1 or args.save_freq == 1) and not args.dont_write and (num_tests > 1 or args.write_first):
print("Actually Saving")
torch.save(m.model.net, net_file + ".pynet")
if args.save_dot_net:
with h.mopen(args.dont_write, net_file + ".net", "w") as f2:
m.model.net.printNet(f2)
f2.close()
if args.onyx:
nn = copy.deepcopy(m.model.net)
nn.remove_norm()
torch.onnx.export(nn, h.zeros([1] + list(input_dims)), net_file + ".onyx",
verbose=False, input_names=["actual_input"] + ["param"+str(i) for i in range(len(list(nn.parameters())))], output_names=["output"])
if num_tests == 1 and not args.dont_write:
img_dir = os.path.join(out_dir, "images")
if not os.path.exists(img_dir):
os.makedirs(img_dir)
for img_num,(img,target) in zip(range(args.number_save_images), saved_data_target[:args.number_save_images]):
sz = ""
for s in img.size():
sz += str(s) + "x"
sz = sz[:-1]
img_file = os.path.join(img_dir, args.dataset + "_" + sz + "_"+ str(img_num))
if img_num == 0:
print("Saving image to: ", img_file + ".img")
with open(img_file + ".img", "w") as imgfile:
flatimg = img.view(h.product(img.size()))
for t in flatimg.cpu():
print(decimal.Decimal(float(t)).__format__("f"), file=imgfile)
with open(img_file + ".class" , "w") as imgfile:
print(int(target.item()), file=imgfile)
def createModel(net, domain, domain_name):
net_weights, net_create = net
domain.name = domain_name
net = net_create()
m = {}
for (k,v) in net_weights.state_dict().items():
m[k] = v.to_dtype()
net.load_state_dict(m)
model = Top(args, net, domain)
if args.clip_norm:
model.clip_norm()
if h.use_cuda:
model.cuda()
if args.sgd:
model.optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
else:
model.optimizer = optim.Adam(model.parameters(), lr=args.lr)
if args.lr_multistep:
model.lrschedule = optim.lr_scheduler.MultiStepLR(
model.optimizer,
gamma = 0.1,
milestones = eval(args.custom_schedule) if args.custom_schedule != "" else ([200, 250, 300] if args.dataset == "CIFAR10" else [15, 25]))
else:
model.lrschedule = optim.lr_scheduler.ReduceLROnPlateau(
model.optimizer,
'min',
patience=args.patience,
threshold= args.threshold,
min_lr=0.000001,
factor=args.factor,
verbose=True)
net.name = net_create.__name__
model.name = net_create.__name__
return model
out_dir = os.path.join(args.out, args.dataset, str(args.net)[1:-1].replace(", ","_").replace("'",""),
args.spec, "width_"+str(args.width), h.file_timestamp() )
print("Saving to:", out_dir)
if not os.path.exists(out_dir) and not args.dont_write:
os.makedirs(out_dir)
print("Starting Training with:")
with h.mopen(args.dont_write, os.path.join(out_dir, "config.txt"), "w") as f:
for k in sorted(vars(args)):
h.printBoth("\t"+k+": "+str(getattr(args,k)), f = f)
print("")
def buildNet(n):
n = n(num_classes)
if args.normalize_layer:
if args.dataset in ["MNIST"]:
n = Seq(Normalize([0.1307], [0.3081] ), n)
elif args.dataset in ["CIFAR10", "CIFAR100"]:
n = Seq(Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]), n)
elif args.dataset in ["SVHN"]:
n = Seq(Normalize([0.5,0.5,0.5], [0.2, 0.2, 0.2]), n)
elif args.dataset in ["Imagenet12"]:
n = Seq(Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225]), n)
n = n.infer(input_dims)
if args.clip_norm:
n.clip_norm()
return n
if not args.test is None:
test_name = None
def loadedNet():
if test_name is not None:
n = getattr(models,test_name)
n = buildNet(n)
if args.clip_norm:
n.clip_norm()
return n
else:
with warnings.catch_warnings():
warnings.simplefilter("ignore", SourceChangeWarning)
return torch.load(args.test)
net = loadedNet().double() if h.dtype == torch.float64 else loadedNet().float()
if args.update_test_net_name is not None:
test_name = args.update_test_net_name
elif args.update_test_net and '__name__' in dir(net):
test_name = net.__name__
if test_name is not None:
loadedNet.__name__ = test_name
nets = [ (net, loadedNet) ]
elif args.net == []:
raise Exception("Need to specify at least one net with either -n or --test")
else:
nets = []
for n in args.net:
m = getattr(models,n)
net_create = (lambda m: lambda: buildNet(m))(m) # why doesn't python do scoping right? This is a thunk. It is bad.
net_create.__name__ = n
net = buildNet(m)
net.__name__ = n
nets += [ (net, net_create) ]
print("Name: ", net_create.__name__)
print("Number of Neurons (relus): ", net.neuronCount())
print("Number of Parameters: ", sum([h.product(s.size()) for s in net.parameters()]))
print("Depth (relu layers): ", net.depth())
print()
net.showNet()
print()
if args.domain == []:
models = [ createModel(net, goals.Box(args.width), "Box") for net in nets]
else:
models = h.flat([[createModel(net, h.parseValues(d, goals, scheduling), h.catStrs(d)) for net in nets] for d in args.domain])
with h.mopen(args.dont_write, os.path.join(out_dir, "log.txt"), "w") as f:
startTime = timer()
for epoch in range(1, args.epochs + 1):
if f is not None:
f.flush()
if (epoch - 1) % args.test_freq == 0 and (epoch > 1 or args.test_first):
with Timer("test all models before epoch "+str(epoch), 1):
test(models, epoch, f)
if f is not None:
f.flush()
h.printBoth("Elapsed-Time: {:.2f}s\n".format(timer() - startTime), f = f)
if args.epochs <= args.test_freq:
break
with Timer("train all models in epoch", 1, f = f):
train(epoch, models) | adv/diffai/__main__.py | import future
import builtins
import past
import six
import copy
from timeit import default_timer as timer
from datetime import datetime
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets
from torch.utils.data import Dataset
import decimal
import torch.onnx
import inspect
from inspect import getargspec
import os
import helpers as h
from helpers import Timer
import copy
import random
from components import *
import models
import goals
import scheduling
from goals import *
from scheduling import *
import math
import warnings
from torch.serialization import SourceChangeWarning
POINT_DOMAINS = [m for m in h.getMethods(goals) if issubclass(m, goals.Point)]
SYMETRIC_DOMAINS = [goals.Box] + POINT_DOMAINS
datasets.Imagenet12 = None
class Top(nn.Module):
def __init__(self, args, net, ty = Point):
super(Top, self).__init__()
self.net = net
self.ty = ty
self.w = args.width
self.global_num = 0
self.getSpec = getattr(self, args.spec)
self.sub_batch_size = args.sub_batch_size
self.curve_width = args.curve_width
self.regularize = args.regularize
self.speedCount = 0
self.speed = 0.0
def addSpeed(self, s):
self.speed = (s + self.speed * self.speedCount) / (self.speedCount + 1)
self.speedCount += 1
def forward(self, x):
return self.net(x)
def clip_norm(self):
self.net.clip_norm()
def boxSpec(self, x, target, **kargs):
return [(self.ty.box(x, w = self.w, model=self, target=target, untargeted=True, **kargs).to_dtype(), target)]
def curveSpec(self, x, target, **kargs):
if self.ty.__class__ in SYMETRIC_DOMAINS:
return self.boxSpec(x,target, **kargs)
batch_size = x.size()[0]
newTargs = [ None for i in range(batch_size) ]
newSpecs = [ None for i in range(batch_size) ]
bestSpecs = [ None for i in range(batch_size) ]
for i in range(batch_size):
newTarg = target[i]
newTargs[i] = newTarg
newSpec = x[i]
best_x = newSpec
best_dist = float("inf")
for j in range(batch_size):
potTarg = target[j]
potSpec = x[j]
if (not newTarg.data.equal(potTarg.data)) or i == j:
continue
curr_dist = (newSpec - potSpec).norm(1).item() # must experiment with the type of norm here
if curr_dist <= best_dist:
best_x = potSpec
newSpecs[i] = newSpec
bestSpecs[i] = best_x
new_batch_size = self.sub_batch_size
batchedTargs = h.chunks(newTargs, new_batch_size)
batchedSpecs = h.chunks(newSpecs, new_batch_size)
batchedBest = h.chunks(bestSpecs, new_batch_size)
def batch(t,s,b):
t = h.lten(t)
s = torch.stack(s)
b = torch.stack(b)
if h.use_cuda:
t.cuda()
s.cuda()
b.cuda()
m = self.ty.line(s, b, w = self.curve_width, **kargs)
return (m , t)
return [batch(t,s,b) for t,s,b in zip(batchedTargs, batchedSpecs, batchedBest)]
def regLoss(self):
if self.regularize is None or self.regularize <= 0.0:
return 0
reg_loss = 0
r = self.net.regularize(2)
return self.regularize * r
def aiLoss(self, dom, target, **args):
r = self(dom)
return self.regLoss() + r.loss(target = target, **args)
def printNet(self, f):
self.net.printNet(f)
# Training settings
parser = argparse.ArgumentParser(description='PyTorch DiffAI Example', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--batch-size', type=int, default=10, metavar='N', help='input batch size for training')
parser.add_argument('--test-first', type=h.str2bool, nargs='?', const=True, default=True, help='test first')
parser.add_argument('--test-freq', type=int, default=1, metavar='N', help='number of epochs to skip before testing')
parser.add_argument('--test-batch-size', type=int, default=10, metavar='N', help='input batch size for testing')
parser.add_argument('--sub-batch-size', type=int, default=3, metavar='N', help='input batch size for curve specs')
parser.add_argument('--custom-schedule', type=str, default="", metavar='net', help='Learning rate scheduling for lr-multistep. Defaults to [200,250,300] for CIFAR10 and [15,25] for everything else.')
parser.add_argument('--test', type=str, default=None, metavar='net', help='Saved net to use, in addition to any other nets you specify with -n')
parser.add_argument('--update-test-net', type=h.str2bool, nargs='?', const=True, default=False, help="should update test net")
parser.add_argument('--sgd',type=h.str2bool, nargs='?', const=True, default=False, help="use sgd instead of adam")
parser.add_argument('--onyx', type=h.str2bool, nargs='?', const=True, default=False, help="should output onyx")
parser.add_argument('--save-dot-net', type=h.str2bool, nargs='?', const=True, default=False, help="should output in .net")
parser.add_argument('--update-test-net-name', type=str, choices = h.getMethodNames(models), default=None, help="update test net name")
parser.add_argument('--normalize-layer', type=h.str2bool, nargs='?', const=True, default=True, help="should include a training set specific normalization layer")
parser.add_argument('--clip-norm', type=h.str2bool, nargs='?', const=True, default=False, help="should clip the normal and use normal decomposition for weights")
parser.add_argument('--epochs', type=int, default=1000, metavar='N', help='number of epochs to train')
parser.add_argument('--log-freq', type=int, default=10, metavar='N', help='The frequency with which log statistics are printed')
parser.add_argument('--save-freq', type=int, default=1, metavar='N', help='The frequency with which nets and images are saved, in terms of number of test passes')
parser.add_argument('--number-save-images', type=int, default=0, metavar='N', help='The number of images to save. Should be smaller than test-size.')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate')
parser.add_argument('--lr-multistep', type=h.str2bool, nargs='?', const=True, default=False, help='learning rate multistep scheduling')
parser.add_argument('--threshold', type=float, default=-0.01, metavar='TH', help='threshold for lr schedule')
parser.add_argument('--patience', type=int, default=0, metavar='PT', help='patience for lr schedule')
parser.add_argument('--factor', type=float, default=0.5, metavar='R', help='reduction multiplier for lr schedule')
parser.add_argument('--max-norm', type=float, default=10000, metavar='MN', help='the maximum norm allowed in weight distribution')
parser.add_argument('--curve-width', type=float, default=None, metavar='CW', help='the width of the curve spec')
parser.add_argument('--width', type=float, default=0.01, metavar='CW', help='the width of either the line or box')
parser.add_argument('--spec', choices = [ x for x in dir(Top) if x[-4:] == "Spec" and len(getargspec(getattr(Top, x)).args) == 3]
, default="boxSpec", help='picks which spec builder function to use for training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed')
parser.add_argument("--use-schedule", type=h.str2bool, nargs='?',
const=True, default=False,
help="activate learning rate schedule")
parser.add_argument('-d', '--domain', sub_choices = None, action = h.SubAct
, default=[], help='picks which abstract goals to use for training', required=True)
parser.add_argument('-t', '--test-domain', sub_choices = None, action = h.SubAct
, default=[], help='picks which abstract goals to use for testing. Examples include ' + str(goals), required=True)
parser.add_argument('-n', '--net', choices = h.getMethodNames(models), action = 'append'
, default=[], help='picks which net to use for training') # one net for now
parser.add_argument('-D', '--dataset', choices = [n for (n,k) in inspect.getmembers(datasets, inspect.isclass) if issubclass(k, Dataset)]
, default="MNIST", help='picks which dataset to use.')
parser.add_argument('-o', '--out', default="out", help='picks the folder to save the outputs')
parser.add_argument('--dont-write', type=h.str2bool, nargs='?', const=True, default=False, help='dont write anywhere if this flag is on')
parser.add_argument('--write-first', type=h.str2bool, nargs='?', const=True, default=False, help='write the initial net. Useful for comparing algorithms, a pain for testing.')
parser.add_argument('--test-size', type=int, default=2000, help='number of examples to test with')
parser.add_argument('-r', '--regularize', type=float, default=None, help='use regularization')
args = parser.parse_args()
largest_domain = max([len(h.catStrs(d)) for d in (args.domain)] )
largest_test_domain = max([len(h.catStrs(d)) for d in (args.test_domain)] )
args.log_interval = int(50000 / (args.batch_size * args.log_freq))
h.max_c_for_norm = args.max_norm
if h.use_cuda:
torch.cuda.manual_seed(1 + args.seed)
else:
torch.manual_seed(args.seed)
train_loader = h.loadDataset(args.dataset, args.batch_size, True, False)
test_loader = h.loadDataset(args.dataset, args.test_batch_size, False, False)
input_dims = train_loader.dataset[0][0].size()
num_classes = int(max(getattr(train_loader.dataset, 'train_labels' if args.dataset != "SVHN" else 'labels'))) + 1
print("input_dims: ", input_dims)
print("Num classes: ", num_classes)
vargs = vars(args)
total_batches_seen = 0
def train(epoch, models):
global total_batches_seen
for model in models:
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
total_batches_seen += 1
time = float(total_batches_seen) / len(train_loader)
if h.use_cuda:
data, target = data.cuda(), target.cuda()
for model in models:
model.global_num += data.size()[0]
timer = Timer("train a sample from " + model.name + " with " + model.ty.name, data.size()[0], False)
lossy = 0
with timer:
for s in model.getSpec(data.to_dtype(),target, time = time):
model.optimizer.zero_grad()
loss = model.aiLoss(*s, time = time, **vargs).mean(dim=0)
lossy += loss.detach().item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
for p in model.parameters():
if p is not None and torch.isnan(p).any():
print("Such nan in vals")
if p is not None and p.grad is not None and torch.isnan(p.grad).any():
print("Such nan in postmagic")
stdv = 1 / math.sqrt(h.product(p.data.shape))
p.grad = torch.where(torch.isnan(p.grad), torch.normal(mean=h.zeros(p.grad.shape), std=stdv), p.grad)
model.optimizer.step()
for p in model.parameters():
if p is not None and torch.isnan(p).any():
print("Such nan in vals after grad")
stdv = 1 / math.sqrt(h.product(p.data.shape))
p.data = torch.where(torch.isnan(p.data), torch.normal(mean=h.zeros(p.data.shape), std=stdv), p.data)
if args.clip_norm:
model.clip_norm()
for p in model.parameters():
if p is not None and torch.isnan(p).any():
raise Exception("Such nan in vals after clip")
model.addSpeed(timer.getUnitTime())
if batch_idx % args.log_interval == 0:
print(('Train Epoch {:12} {:'+ str(largest_domain) +'}: {:3} [{:7}/{} ({:.0f}%)] \tAvg sec/ex {:1.8f}\tLoss: {:.6f}').format(
model.name, model.ty.name,
epoch,
batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader),
model.speed,
lossy))
num_tests = 0
def test(models, epoch, f = None):
global num_tests
num_tests += 1
class MStat:
def __init__(self, model):
model.eval()
self.model = model
self.correct = 0
class Stat:
def __init__(self, d, dnm):
self.domain = d
self.name = dnm
self.width = 0
self.max_eps = None
self.safe = 0
self.proved = 0
self.time = 0
self.domains = [ Stat(h.parseValues(d, goals), h.catStrs(d)) for d in args.test_domain ]
model_stats = [ MStat(m) for m in models ]
num_its = 0
saved_data_target = []
for data, target in test_loader:
if num_its >= args.test_size:
break
if num_tests == 1:
saved_data_target += list(zip(list(data), list(target)))
num_its += data.size()[0]
if h.use_cuda:
data, target = data.cuda().to_dtype(), target.cuda()
for m in model_stats:
with torch.no_grad():
pred = m.model(data).vanillaTensorPart().max(1, keepdim=True)[1] # get the index of the max log-probability
m.correct += pred.eq(target.data.view_as(pred)).sum()
for stat in m.domains:
timer = Timer(shouldPrint = False)
with timer:
def calcData(data, target):
box = stat.domain.box(data, w = m.model.w, model=m.model, untargeted = True, target=target).to_dtype()
with torch.no_grad():
bs = m.model(box)
org = m.model(data).vanillaTensorPart().max(1,keepdim=True)[1]
stat.width += bs.diameter().sum().item() # sum up batch loss
stat.proved += bs.isSafe(org).sum().item()
stat.safe += bs.isSafe(target).sum().item()
# stat.max_eps += 0 # TODO: calculate max_eps
if m.model.net.neuronCount() < 5000 or stat.domain.__class__ in SYMETRIC_DOMAINS:
calcData(data, target)
else:
for d,t in zip(data, target):
calcData(d.unsqueeze(0),t.unsqueeze(0))
stat.time += timer.getUnitTime()
l = num_its # len(test_loader.dataset)
for m in model_stats:
if args.lr_multistep:
m.model.lrschedule.step()
pr_corr = float(m.correct) / float(l)
if args.use_schedule:
m.model.lrschedule.step(1 - pr_corr)
h.printBoth(('Test: {:12} trained with {:'+ str(largest_domain) +'} - Avg sec/ex {:1.12f}, Accuracy: {}/{} ({:3.1f}%)').format(
m.model.name, m.model.ty.name,
m.model.speed,
m.correct, l, 100. * pr_corr), f = f)
model_stat_rec = ""
for stat in m.domains:
pr_safe = stat.safe / l
pr_proved = stat.proved / l
pr_corr_given_proved = pr_safe / pr_proved if pr_proved > 0 else 0.0
h.printBoth(("\t{:" + str(largest_test_domain)+"} - Width: {:<36.16f} Pr[Proved]={:<1.3f} Pr[Corr and Proved]={:<1.3f} Pr[Corr|Proved]={:<1.3f} {}Time = {:<7.5f}" ).format(
stat.name,
stat.width / l,
pr_proved,
pr_safe, pr_corr_given_proved,
"AvgMaxEps: {:1.10f} ".format(stat.max_eps / l) if stat.max_eps is not None else "",
stat.time), f = f)
model_stat_rec += "{}_{:1.3f}_{:1.3f}_{:1.3f}__".format(stat.name, pr_proved, pr_safe, pr_corr_given_proved)
prepedname = m.model.ty.name.replace(" ", "_").replace(",", "").replace("(", "_").replace(")", "_").replace("=", "_")
net_file = os.path.join(out_dir, m.model.name +"__" +prepedname + "_checkpoint_"+str(epoch)+"_with_{:1.3f}".format(pr_corr))
h.printBoth("\tSaving netfile: {}\n".format(net_file + ".pynet"), f = f)
if (num_tests % args.save_freq == 1 or args.save_freq == 1) and not args.dont_write and (num_tests > 1 or args.write_first):
print("Actually Saving")
torch.save(m.model.net, net_file + ".pynet")
if args.save_dot_net:
with h.mopen(args.dont_write, net_file + ".net", "w") as f2:
m.model.net.printNet(f2)
f2.close()
if args.onyx:
nn = copy.deepcopy(m.model.net)
nn.remove_norm()
torch.onnx.export(nn, h.zeros([1] + list(input_dims)), net_file + ".onyx",
verbose=False, input_names=["actual_input"] + ["param"+str(i) for i in range(len(list(nn.parameters())))], output_names=["output"])
if num_tests == 1 and not args.dont_write:
img_dir = os.path.join(out_dir, "images")
if not os.path.exists(img_dir):
os.makedirs(img_dir)
for img_num,(img,target) in zip(range(args.number_save_images), saved_data_target[:args.number_save_images]):
sz = ""
for s in img.size():
sz += str(s) + "x"
sz = sz[:-1]
img_file = os.path.join(img_dir, args.dataset + "_" + sz + "_"+ str(img_num))
if img_num == 0:
print("Saving image to: ", img_file + ".img")
with open(img_file + ".img", "w") as imgfile:
flatimg = img.view(h.product(img.size()))
for t in flatimg.cpu():
print(decimal.Decimal(float(t)).__format__("f"), file=imgfile)
with open(img_file + ".class" , "w") as imgfile:
print(int(target.item()), file=imgfile)
def createModel(net, domain, domain_name):
net_weights, net_create = net
domain.name = domain_name
net = net_create()
m = {}
for (k,v) in net_weights.state_dict().items():
m[k] = v.to_dtype()
net.load_state_dict(m)
model = Top(args, net, domain)
if args.clip_norm:
model.clip_norm()
if h.use_cuda:
model.cuda()
if args.sgd:
model.optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
else:
model.optimizer = optim.Adam(model.parameters(), lr=args.lr)
if args.lr_multistep:
model.lrschedule = optim.lr_scheduler.MultiStepLR(
model.optimizer,
gamma = 0.1,
milestones = eval(args.custom_schedule) if args.custom_schedule != "" else ([200, 250, 300] if args.dataset == "CIFAR10" else [15, 25]))
else:
model.lrschedule = optim.lr_scheduler.ReduceLROnPlateau(
model.optimizer,
'min',
patience=args.patience,
threshold= args.threshold,
min_lr=0.000001,
factor=args.factor,
verbose=True)
net.name = net_create.__name__
model.name = net_create.__name__
return model
out_dir = os.path.join(args.out, args.dataset, str(args.net)[1:-1].replace(", ","_").replace("'",""),
args.spec, "width_"+str(args.width), h.file_timestamp() )
print("Saving to:", out_dir)
if not os.path.exists(out_dir) and not args.dont_write:
os.makedirs(out_dir)
print("Starting Training with:")
with h.mopen(args.dont_write, os.path.join(out_dir, "config.txt"), "w") as f:
for k in sorted(vars(args)):
h.printBoth("\t"+k+": "+str(getattr(args,k)), f = f)
print("")
def buildNet(n):
n = n(num_classes)
if args.normalize_layer:
if args.dataset in ["MNIST"]:
n = Seq(Normalize([0.1307], [0.3081] ), n)
elif args.dataset in ["CIFAR10", "CIFAR100"]:
n = Seq(Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]), n)
elif args.dataset in ["SVHN"]:
n = Seq(Normalize([0.5,0.5,0.5], [0.2, 0.2, 0.2]), n)
elif args.dataset in ["Imagenet12"]:
n = Seq(Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225]), n)
n = n.infer(input_dims)
if args.clip_norm:
n.clip_norm()
return n
if not args.test is None:
test_name = None
def loadedNet():
if test_name is not None:
n = getattr(models,test_name)
n = buildNet(n)
if args.clip_norm:
n.clip_norm()
return n
else:
with warnings.catch_warnings():
warnings.simplefilter("ignore", SourceChangeWarning)
return torch.load(args.test)
net = loadedNet().double() if h.dtype == torch.float64 else loadedNet().float()
if args.update_test_net_name is not None:
test_name = args.update_test_net_name
elif args.update_test_net and '__name__' in dir(net):
test_name = net.__name__
if test_name is not None:
loadedNet.__name__ = test_name
nets = [ (net, loadedNet) ]
elif args.net == []:
raise Exception("Need to specify at least one net with either -n or --test")
else:
nets = []
for n in args.net:
m = getattr(models,n)
net_create = (lambda m: lambda: buildNet(m))(m) # why doesn't python do scoping right? This is a thunk. It is bad.
net_create.__name__ = n
net = buildNet(m)
net.__name__ = n
nets += [ (net, net_create) ]
print("Name: ", net_create.__name__)
print("Number of Neurons (relus): ", net.neuronCount())
print("Number of Parameters: ", sum([h.product(s.size()) for s in net.parameters()]))
print("Depth (relu layers): ", net.depth())
print()
net.showNet()
print()
if args.domain == []:
models = [ createModel(net, goals.Box(args.width), "Box") for net in nets]
else:
models = h.flat([[createModel(net, h.parseValues(d, goals, scheduling), h.catStrs(d)) for net in nets] for d in args.domain])
with h.mopen(args.dont_write, os.path.join(out_dir, "log.txt"), "w") as f:
startTime = timer()
for epoch in range(1, args.epochs + 1):
if f is not None:
f.flush()
if (epoch - 1) % args.test_freq == 0 and (epoch > 1 or args.test_first):
with Timer("test all models before epoch "+str(epoch), 1):
test(models, epoch, f)
if f is not None:
f.flush()
h.printBoth("Elapsed-Time: {:.2f}s\n".format(timer() - startTime), f = f)
if args.epochs <= args.test_freq:
break
with Timer("train all models in epoch", 1, f = f):
train(epoch, models) | 0.759047 | 0.195498 |
import asyncio
import logging
from contextlib import suppress
from typing import Optional
import discord
from discord import Message
from discord.abc import Messageable
from discord.embeds import EmptyEmbed
from discord.ext import commands
from milton.core.config import CONFIG
log = logging.getLogger(__name__)
DELETE_EMOJI = CONFIG.emojis.trash
NEXT_EMOJI = CONFIG.emojis.next
BACK_EMOJI = CONFIG.emojis.back
LAST_EMOJI = CONFIG.emojis.last
FIRST_EMOJI = CONFIG.emojis.first
STOP_EMOJI = CONFIG.emojis.stop
DEFAULT_EMOJIS = (
DELETE_EMOJI,
FIRST_EMOJI,
BACK_EMOJI,
NEXT_EMOJI,
LAST_EMOJI,
STOP_EMOJI,
)
class Paginator(commands.Paginator):
"""Helper that builds and sends messages to channels.
Allows interactive pagination with emojis. This class is heavily copied
from the Python Discord bot.
Args:
prefix: A prefix to give to each page of the resulting embed.
suffix: A suffix to give to each page of the resulting embed.
max_size: The maximum size of a page. Defaults to discord's maximum
message size, 2000 characters.
force_embed: By default, one-page embeds are sent as a normal message.
Should it be sent as an embed instead?
title: An optional title for the embed.
"""
def __init__(
self,
prefix: str = "",
suffix: str = "",
max_size: int = 2000,
force_embed: bool = False,
title: Optional[str] = None,
) -> None:
# As this is used a lot, I expose the parent class arguments explicitly
super().__init__(prefix, suffix, max_size)
self.force_embed = force_embed
self.title = title
async def paginate(self, ctx: Messageable):
"""Send and start to paginate this message
If message is just one page, does not provide interactive pagination,
as it's useless.
Args:
ctx: The messageable channel to send the message to.
"""
# Yanked and modified from the python discord bot paginator
def event_check(reaction_: discord.Reaction, user_: discord.Member) -> bool:
"""Make sure that this reaction is what we want to operate on."""
return (
# Conditions for a successful pagination:
all(
(
# Reaction is on this message
reaction_.message.id == message.id,
# Reaction is one of the pagination emotes
str(reaction_.emoji) in DEFAULT_EMOJIS,
# Reaction was not made by the Bot
user_.id != ctx.bot.user.id,
)
)
)
pages = self.pages
max_pages = len(pages)
embed = discord.Embed(description=pages[0], title=self.title or EmptyEmbed)
current_page = 0
if max_pages <= 1 and self.force_embed is False:
# Only a single page to send. Just send it and stop
return await ctx.send(embed.description)
elif self.force_embed:
# Forced to send an embed anyway.
return await ctx.send(embed=embed)
# Add a handy descriptive footer
embed.set_footer(text=f"Page {current_page + 1} / {max_pages}")
message: Message = await ctx.send(embed=embed)
for emoji in DEFAULT_EMOJIS:
await message.add_reaction(emoji=emoji)
while True:
try:
reaction, user = await ctx.bot.wait_for(
"reaction_add",
timeout=ctx.bot.config.bot.pagination_timeout,
check=event_check,
)
except asyncio.TimeoutError:
log.debug("Timed out waiting for a reaction")
break
if str(reaction.emoji) == DELETE_EMOJI:
log.debug("Got delete reaction")
return await message.delete()
if reaction.emoji == FIRST_EMOJI:
await message.remove_reaction(reaction.emoji, user)
current_page = 0
log.debug(f"Got first page reaction - changing to page 1/{max_pages}")
embed.description = pages[current_page]
embed.set_footer(text=f"Page {current_page + 1}/{max_pages}")
await message.edit(embed=embed)
if reaction.emoji == LAST_EMOJI:
await message.remove_reaction(reaction.emoji, user)
current_page = max_pages - 1
log.debug(
f"Got last page reaction - changing to page {current_page + 1}/{max_pages}"
)
embed.description = pages[current_page]
embed.set_footer(text=f"Page {current_page + 1}/{max_pages}")
await message.edit(embed=embed)
if reaction.emoji == BACK_EMOJI:
await message.remove_reaction(reaction.emoji, user)
if current_page <= 0:
log.debug(
"Got previous page reaction, but we're on the first page - ignoring"
)
continue
current_page -= 1
log.debug(
f"Got previous page reaction - changing to page {current_page + 1}/{max_pages}"
)
embed.description = pages[current_page]
embed.set_footer(text=f"Page {current_page + 1}/{max_pages}")
await message.edit(embed=embed)
if reaction.emoji == NEXT_EMOJI:
await message.remove_reaction(reaction.emoji, user)
if current_page >= max_pages - 1:
log.debug(
"Got next page reaction, but we're on the last page - ignoring"
)
continue
current_page += 1
log.debug(
f"Got next page reaction - changing to page {current_page + 1}/{max_pages}"
)
embed.description = pages[current_page]
embed.set_footer(text=f"Page {current_page + 1}/{max_pages}")
await message.edit(embed=embed)
if reaction.emoji == STOP_EMOJI:
break
log.debug("Ending pagination and clearing reactions.")
with suppress(discord.NotFound):
await message.clear_reactions() | milton/utils/paginator.py | import asyncio
import logging
from contextlib import suppress
from typing import Optional
import discord
from discord import Message
from discord.abc import Messageable
from discord.embeds import EmptyEmbed
from discord.ext import commands
from milton.core.config import CONFIG
log = logging.getLogger(__name__)
DELETE_EMOJI = CONFIG.emojis.trash
NEXT_EMOJI = CONFIG.emojis.next
BACK_EMOJI = CONFIG.emojis.back
LAST_EMOJI = CONFIG.emojis.last
FIRST_EMOJI = CONFIG.emojis.first
STOP_EMOJI = CONFIG.emojis.stop
DEFAULT_EMOJIS = (
DELETE_EMOJI,
FIRST_EMOJI,
BACK_EMOJI,
NEXT_EMOJI,
LAST_EMOJI,
STOP_EMOJI,
)
class Paginator(commands.Paginator):
"""Helper that builds and sends messages to channels.
Allows interactive pagination with emojis. This class is heavily copied
from the Python Discord bot.
Args:
prefix: A prefix to give to each page of the resulting embed.
suffix: A suffix to give to each page of the resulting embed.
max_size: The maximum size of a page. Defaults to discord's maximum
message size, 2000 characters.
force_embed: By default, one-page embeds are sent as a normal message.
Should it be sent as an embed instead?
title: An optional title for the embed.
"""
def __init__(
self,
prefix: str = "",
suffix: str = "",
max_size: int = 2000,
force_embed: bool = False,
title: Optional[str] = None,
) -> None:
# As this is used a lot, I expose the parent class arguments explicitly
super().__init__(prefix, suffix, max_size)
self.force_embed = force_embed
self.title = title
async def paginate(self, ctx: Messageable):
"""Send and start to paginate this message
If message is just one page, does not provide interactive pagination,
as it's useless.
Args:
ctx: The messageable channel to send the message to.
"""
# Yanked and modified from the python discord bot paginator
def event_check(reaction_: discord.Reaction, user_: discord.Member) -> bool:
"""Make sure that this reaction is what we want to operate on."""
return (
# Conditions for a successful pagination:
all(
(
# Reaction is on this message
reaction_.message.id == message.id,
# Reaction is one of the pagination emotes
str(reaction_.emoji) in DEFAULT_EMOJIS,
# Reaction was not made by the Bot
user_.id != ctx.bot.user.id,
)
)
)
pages = self.pages
max_pages = len(pages)
embed = discord.Embed(description=pages[0], title=self.title or EmptyEmbed)
current_page = 0
if max_pages <= 1 and self.force_embed is False:
# Only a single page to send. Just send it and stop
return await ctx.send(embed.description)
elif self.force_embed:
# Forced to send an embed anyway.
return await ctx.send(embed=embed)
# Add a handy descriptive footer
embed.set_footer(text=f"Page {current_page + 1} / {max_pages}")
message: Message = await ctx.send(embed=embed)
for emoji in DEFAULT_EMOJIS:
await message.add_reaction(emoji=emoji)
while True:
try:
reaction, user = await ctx.bot.wait_for(
"reaction_add",
timeout=ctx.bot.config.bot.pagination_timeout,
check=event_check,
)
except asyncio.TimeoutError:
log.debug("Timed out waiting for a reaction")
break
if str(reaction.emoji) == DELETE_EMOJI:
log.debug("Got delete reaction")
return await message.delete()
if reaction.emoji == FIRST_EMOJI:
await message.remove_reaction(reaction.emoji, user)
current_page = 0
log.debug(f"Got first page reaction - changing to page 1/{max_pages}")
embed.description = pages[current_page]
embed.set_footer(text=f"Page {current_page + 1}/{max_pages}")
await message.edit(embed=embed)
if reaction.emoji == LAST_EMOJI:
await message.remove_reaction(reaction.emoji, user)
current_page = max_pages - 1
log.debug(
f"Got last page reaction - changing to page {current_page + 1}/{max_pages}"
)
embed.description = pages[current_page]
embed.set_footer(text=f"Page {current_page + 1}/{max_pages}")
await message.edit(embed=embed)
if reaction.emoji == BACK_EMOJI:
await message.remove_reaction(reaction.emoji, user)
if current_page <= 0:
log.debug(
"Got previous page reaction, but we're on the first page - ignoring"
)
continue
current_page -= 1
log.debug(
f"Got previous page reaction - changing to page {current_page + 1}/{max_pages}"
)
embed.description = pages[current_page]
embed.set_footer(text=f"Page {current_page + 1}/{max_pages}")
await message.edit(embed=embed)
if reaction.emoji == NEXT_EMOJI:
await message.remove_reaction(reaction.emoji, user)
if current_page >= max_pages - 1:
log.debug(
"Got next page reaction, but we're on the last page - ignoring"
)
continue
current_page += 1
log.debug(
f"Got next page reaction - changing to page {current_page + 1}/{max_pages}"
)
embed.description = pages[current_page]
embed.set_footer(text=f"Page {current_page + 1}/{max_pages}")
await message.edit(embed=embed)
if reaction.emoji == STOP_EMOJI:
break
log.debug("Ending pagination and clearing reactions.")
with suppress(discord.NotFound):
await message.clear_reactions() | 0.822332 | 0.148788 |
import os
from wisdem import run_wisdem
import wisdem.postprocessing.compare_designs as compare_designs
# File management
thisdir = os.path.dirname(os.path.realpath(__file__))
ontology_dir = os.path.join(os.path.dirname(thisdir), "WT_Ontology")
fname_wt_input = os.path.join(ontology_dir, "IEA-15-240-RWT.yaml")
fname_modeling = os.path.join(thisdir, "modeling_options_monopile.yaml")
fname_analysis_noopt = os.path.join(thisdir, "analysis_options.yaml")
fname_analysis_opt = os.path.join(thisdir, "analysis_options_monopile.yaml")
folder_output = os.path.join(thisdir, "outputs")
# Run WISDEM tower-monopile optimization
prob, modeling_options, analysis_noopt = run_wisdem(fname_wt_input, fname_modeling, fname_analysis_noopt)
wt_opt, modeling_options, analysis_opt = run_wisdem(fname_wt_input, fname_modeling, fname_analysis_opt)
# Produce standard comparison plots
compare_designs.run([prob, wt_opt], ['Before','After'], modeling_options, analysis_opt)
# print results from the analysis or optimization
print("\n\nTower-monopile z-pts =", wt_opt["towerse.z_param"])
print("Tower diameter =", wt_opt["towerse.tower_outer_diameter"])
print("Tower thickness =", wt_opt["towerse.tower_wall_thickness"])
print("Tower mass (kg) =", wt_opt["towerse.tower_mass"])
print("Monopile diameter =", wt_opt["fixedse.monopile_outer_diameter"])
print("Monopile thickness =", wt_opt["fixedse.monopile_wall_thickness"])
print("Monopile mass (kg) =", wt_opt["fixedse.monopile_mass"])
print("Total mass (kg) =", wt_opt["fixedse.structural_mass"])
print("\nTower Fore-aft freq (Hz) =", wt_opt["towerse.tower.fore_aft_freqs"])
print("Tower Fore-aft mode shapes =", wt_opt["towerse.tower.fore_aft_modes"])
print("Tower Side-side freq (Hz) =", wt_opt["towerse.tower.side_side_freqs"])
print("Tower Side-side mode shapes =", wt_opt["towerse.tower.side_side_modes"])
print("Monopile Fore-aft freq (Hz) =", wt_opt["fixedse.monopile.fore_aft_freqs"])
print("Monopile Fore-aft mode shapes =", wt_opt["fixedse.monopile.fore_aft_modes"])
print("Monopile Side-side freq (Hz) =", wt_opt["fixedse.monopile.side_side_freqs"])
print("Monopile Side-side mode shapes =", wt_opt["fixedse.monopile.side_side_modes"])
print("\nwind: ", wt_opt["towerse.env.Uref"])
print("Tower top_deflection (m) =", wt_opt["towerse.tower.top_deflection"])
print("Tower base forces (N) =", wt_opt["towerse.tower.turbine_F"])
print("Tower base moments (Nm) =", wt_opt["towerse.tower.turbine_M"])
print("Tower Constraint z-pts =", wt_opt["towerse.z_full"])
print("Tower stress =", wt_opt["towerse.post.constr_stress"].flatten())
print("Tower GL buckling =", wt_opt["towerse.post.constr_global_buckling"].flatten())
print("Tower Shell buckling =", wt_opt["towerse.post.constr_shell_buckling"].flatten())
print("Tower taper ratio constraint =", wt_opt["towerse.constr_taper"])
print("Monopile top_deflection (m) =", wt_opt["fixedse.monopile.top_deflection"])
print("Mudline forces (N) =", wt_opt["fixedse.monopile.mudline_F"])
print("Mudline moments (Nm) =", wt_opt["fixedse.monopile.mudline_M"])
print("Monopile Constraint z-pts =", wt_opt["fixedse.z_full"])
print("Monopile stress =", wt_opt["fixedse.post.constr_stress"].flatten())
print("Monopile GL buckling =", wt_opt["fixedse.post.constr_global_buckling"].flatten())
print("Monopile Shell buckling =", wt_opt["fixedse.post.constr_shell_buckling"].flatten())
print("Monopile taper ratio constraint =", wt_opt["fixedse.constr_taper"]) | WISDEM/optimize_monopile_tower.py | import os
from wisdem import run_wisdem
import wisdem.postprocessing.compare_designs as compare_designs
# File management
thisdir = os.path.dirname(os.path.realpath(__file__))
ontology_dir = os.path.join(os.path.dirname(thisdir), "WT_Ontology")
fname_wt_input = os.path.join(ontology_dir, "IEA-15-240-RWT.yaml")
fname_modeling = os.path.join(thisdir, "modeling_options_monopile.yaml")
fname_analysis_noopt = os.path.join(thisdir, "analysis_options.yaml")
fname_analysis_opt = os.path.join(thisdir, "analysis_options_monopile.yaml")
folder_output = os.path.join(thisdir, "outputs")
# Run WISDEM tower-monopile optimization
prob, modeling_options, analysis_noopt = run_wisdem(fname_wt_input, fname_modeling, fname_analysis_noopt)
wt_opt, modeling_options, analysis_opt = run_wisdem(fname_wt_input, fname_modeling, fname_analysis_opt)
# Produce standard comparison plots
compare_designs.run([prob, wt_opt], ['Before','After'], modeling_options, analysis_opt)
# print results from the analysis or optimization
print("\n\nTower-monopile z-pts =", wt_opt["towerse.z_param"])
print("Tower diameter =", wt_opt["towerse.tower_outer_diameter"])
print("Tower thickness =", wt_opt["towerse.tower_wall_thickness"])
print("Tower mass (kg) =", wt_opt["towerse.tower_mass"])
print("Monopile diameter =", wt_opt["fixedse.monopile_outer_diameter"])
print("Monopile thickness =", wt_opt["fixedse.monopile_wall_thickness"])
print("Monopile mass (kg) =", wt_opt["fixedse.monopile_mass"])
print("Total mass (kg) =", wt_opt["fixedse.structural_mass"])
print("\nTower Fore-aft freq (Hz) =", wt_opt["towerse.tower.fore_aft_freqs"])
print("Tower Fore-aft mode shapes =", wt_opt["towerse.tower.fore_aft_modes"])
print("Tower Side-side freq (Hz) =", wt_opt["towerse.tower.side_side_freqs"])
print("Tower Side-side mode shapes =", wt_opt["towerse.tower.side_side_modes"])
print("Monopile Fore-aft freq (Hz) =", wt_opt["fixedse.monopile.fore_aft_freqs"])
print("Monopile Fore-aft mode shapes =", wt_opt["fixedse.monopile.fore_aft_modes"])
print("Monopile Side-side freq (Hz) =", wt_opt["fixedse.monopile.side_side_freqs"])
print("Monopile Side-side mode shapes =", wt_opt["fixedse.monopile.side_side_modes"])
print("\nwind: ", wt_opt["towerse.env.Uref"])
print("Tower top_deflection (m) =", wt_opt["towerse.tower.top_deflection"])
print("Tower base forces (N) =", wt_opt["towerse.tower.turbine_F"])
print("Tower base moments (Nm) =", wt_opt["towerse.tower.turbine_M"])
print("Tower Constraint z-pts =", wt_opt["towerse.z_full"])
print("Tower stress =", wt_opt["towerse.post.constr_stress"].flatten())
print("Tower GL buckling =", wt_opt["towerse.post.constr_global_buckling"].flatten())
print("Tower Shell buckling =", wt_opt["towerse.post.constr_shell_buckling"].flatten())
print("Tower taper ratio constraint =", wt_opt["towerse.constr_taper"])
print("Monopile top_deflection (m) =", wt_opt["fixedse.monopile.top_deflection"])
print("Mudline forces (N) =", wt_opt["fixedse.monopile.mudline_F"])
print("Mudline moments (Nm) =", wt_opt["fixedse.monopile.mudline_M"])
print("Monopile Constraint z-pts =", wt_opt["fixedse.z_full"])
print("Monopile stress =", wt_opt["fixedse.post.constr_stress"].flatten())
print("Monopile GL buckling =", wt_opt["fixedse.post.constr_global_buckling"].flatten())
print("Monopile Shell buckling =", wt_opt["fixedse.post.constr_shell_buckling"].flatten())
print("Monopile taper ratio constraint =", wt_opt["fixedse.constr_taper"]) | 0.347648 | 0.212436 |
import os
import string
import uuid
import cv2
import imutils.text
def update_flag(key_press, current_flag, flags):
"""Handle key press from cv2.waitKey() for capturing frames
:param key_press: output from `cv2.waitKey()`
:param current_flag: value of 'flag' holding previous key press
:param flags: dictionary mapping key presses to class labels
:return: new flag value
"""
if key_press < 0 or chr(key_press) not in flags.keys():
return current_flag
key_press = chr(key_press)
for k in flags.keys():
if k == key_press and k == current_flag:
print(f'Stop capturing for {flags[k]}')
return None
elif k == key_press:
print(f'Capturing for {flags[k]}')
return k
def prompt_labels():
"""Prompt user for class labels and map them to keys for gathering training data
:return: tuple of labels and key press their mapped to
"""
n_class = int(input(f'Number of classes to input: '))
if n_class > 26:
raise ValueError('Only supports up to 26 classes.')
keys = list(string.ascii_lowercase[:n_class])
labels = {}
for key in keys:
label = input(f'Label for key press "{key}": ')
labels[key] = label
return labels
def draw_labels(image, labels):
header = 'Press the below keys to capture data for each class'
lines = [f' {k} - {v}' for k, v in labels.items()]
lines = [header] + lines
text = '\n'.join(lines)
imutils.text.put_text(image,
text,
org=(10, 25),
font_face=cv2.FONT_HERSHEY_SIMPLEX,
font_scale=0.7,
color=(0, 0, 255),
thickness=2)
def mkdirs(dir_names):
"""Create dirs if they don't exist
:param dir_names: names of dirs to create; if nested, provide parent in list before child
:return: None
"""
for dir_name in dir_names:
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
def gather_images(output_dir, labels=None, video_capture=0, snapshot=True):
"""Capture training data for building a 2 class model
:param output_dir: main dir for images to be saved to (they will saved to a subdir named by `labels`)
:param labels: len 2 list of labels for the classes (a will be key for position 0 and b for 1)
:param video_capture: value to pass to `cv2.VideoCapture()`
:param snapshot: Should only a snapshot be taken when key pressed?
If False, a keypress toggles continuous capture mode.
:return: None; images are saved to output_dir
"""
if labels is None:
label_key_dict = prompt_labels()
else:
keys = list(string.ascii_lowercase[:len(labels)])
label_key_dict = {k: v for k, v in zip(keys, labels)}
# Ensure dirs exist (create them if not)
output_sub_dirs = [os.path.join(output_dir, l) for l in labels]
mkdirs([output_dir] + output_sub_dirs)
vidcap = cv2.VideoCapture(video_capture)
capture_flag = None
while True:
grabbed_frame, frame = vidcap.read()
if not grabbed_frame:
break
display_frame = frame.copy()
draw_labels(display_frame, label_key_dict)
display_frame = imutils.resize(display_frame, width=750)
cv2.imshow('Gather Training Data (ESC to quit)', display_frame)
key = cv2.waitKey(10)
if key == 27:
break
else:
capture_flag = update_flag(key, capture_flag, label_key_dict)
if capture_flag is not None:
frame_name = 'frame_' + str(uuid.uuid4())
file_name = os.path.join(output_dir, label_key_dict[capture_flag], frame_name + '.jpg')
cv2.imwrite(file_name, frame)
if snapshot:
capture_flag = None
cv2.destroyAllWindows() | imclassify/gather_images.py | import os
import string
import uuid
import cv2
import imutils.text
def update_flag(key_press, current_flag, flags):
"""Handle key press from cv2.waitKey() for capturing frames
:param key_press: output from `cv2.waitKey()`
:param current_flag: value of 'flag' holding previous key press
:param flags: dictionary mapping key presses to class labels
:return: new flag value
"""
if key_press < 0 or chr(key_press) not in flags.keys():
return current_flag
key_press = chr(key_press)
for k in flags.keys():
if k == key_press and k == current_flag:
print(f'Stop capturing for {flags[k]}')
return None
elif k == key_press:
print(f'Capturing for {flags[k]}')
return k
def prompt_labels():
"""Prompt user for class labels and map them to keys for gathering training data
:return: tuple of labels and key press their mapped to
"""
n_class = int(input(f'Number of classes to input: '))
if n_class > 26:
raise ValueError('Only supports up to 26 classes.')
keys = list(string.ascii_lowercase[:n_class])
labels = {}
for key in keys:
label = input(f'Label for key press "{key}": ')
labels[key] = label
return labels
def draw_labels(image, labels):
header = 'Press the below keys to capture data for each class'
lines = [f' {k} - {v}' for k, v in labels.items()]
lines = [header] + lines
text = '\n'.join(lines)
imutils.text.put_text(image,
text,
org=(10, 25),
font_face=cv2.FONT_HERSHEY_SIMPLEX,
font_scale=0.7,
color=(0, 0, 255),
thickness=2)
def mkdirs(dir_names):
"""Create dirs if they don't exist
:param dir_names: names of dirs to create; if nested, provide parent in list before child
:return: None
"""
for dir_name in dir_names:
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
def gather_images(output_dir, labels=None, video_capture=0, snapshot=True):
"""Capture training data for building a 2 class model
:param output_dir: main dir for images to be saved to (they will saved to a subdir named by `labels`)
:param labels: len 2 list of labels for the classes (a will be key for position 0 and b for 1)
:param video_capture: value to pass to `cv2.VideoCapture()`
:param snapshot: Should only a snapshot be taken when key pressed?
If False, a keypress toggles continuous capture mode.
:return: None; images are saved to output_dir
"""
if labels is None:
label_key_dict = prompt_labels()
else:
keys = list(string.ascii_lowercase[:len(labels)])
label_key_dict = {k: v for k, v in zip(keys, labels)}
# Ensure dirs exist (create them if not)
output_sub_dirs = [os.path.join(output_dir, l) for l in labels]
mkdirs([output_dir] + output_sub_dirs)
vidcap = cv2.VideoCapture(video_capture)
capture_flag = None
while True:
grabbed_frame, frame = vidcap.read()
if not grabbed_frame:
break
display_frame = frame.copy()
draw_labels(display_frame, label_key_dict)
display_frame = imutils.resize(display_frame, width=750)
cv2.imshow('Gather Training Data (ESC to quit)', display_frame)
key = cv2.waitKey(10)
if key == 27:
break
else:
capture_flag = update_flag(key, capture_flag, label_key_dict)
if capture_flag is not None:
frame_name = 'frame_' + str(uuid.uuid4())
file_name = os.path.join(output_dir, label_key_dict[capture_flag], frame_name + '.jpg')
cv2.imwrite(file_name, frame)
if snapshot:
capture_flag = None
cv2.destroyAllWindows() | 0.573678 | 0.457985 |
from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login, authenticate
from .models import Image, Profile, Comments, Likes
from friendship.models import Follow
from .forms import ImageForm, ProfileForm, CommentForm
from django.contrib.auth.models import User
from friendship.exceptions import AlreadyExistsError
from django.contrib.sites.shortcuts import get_current_site
from django.template.loader import render_to_string
# Create your views here.
def home(request):
title = 'Instagram'
current_user = request.user
images = Image.get_all_images()
comments = Comments.objects.all()
likes = Likes.objects.all()
profile = Profile.objects.all()
form = CommentForm()
id = request.user.id
prof = User.objects.all()
liked_images = Likes.objects.filter(liker_id=id)
mylist = [i.imageid for i in liked_images]
return render(request, 'instagram/index.html', locals())
@login_required(login_url='/accounts/login/')
def upload_image(request):
profile = Profile.objects.all()
form = ImageForm()
for profile in profile:
if profile.user.id == request.user.id:
if request.method == 'POST':
form = ImageForm(request.POST, request.FILES)
if form.is_valid():
upload =form.save(commit=False)
upload.user = request.user
upload.profile_pics = profile
upload.save()
return redirect('edit_profile', username=request.user)
else:
form = ImageForm()
return render(request, 'registration/upload_image.html',{'form':form})
@login_required(login_url='/accounts/login')
def edit_profile(request, username):
user = User.objects.get(username=username)
profile = User.objects.get(username=username)
try:
profile_details = Profile.get_by_id(user.id)
except:
profile_details = Profile.filter_by_id(user.id)
images = Image.get_profile_images(user.id)
follower = len(Follow.objects.followers(user))
following = len(Follow.objects.following(user))
users=User.objects.all()
users_following=Follow.objects.following(request.user)
title = f'@{user.username} Instagram photos'
return render(request, 'registration/edit_profile.html', locals())
@login_required(login_url='/accounts/login')
def editprofile(request):
if request.method == 'POST':
form = ProfileForm(request.POST, request.FILES)
if form.is_valid():
edit = form.save(commit=False)
edit.user = request.user
edit.save()
return redirect('editprofile')
else:
form = ProfileForm()
return render(request, 'registration/profile.html', locals())
@login_required(login_url='/accounts/login')
def single_image(request, image_id):
image = Image.get_image_id(image_id)
comments = Comments.get_comments_by_images(image_id)
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.image = image
comment.user = request.user
comment.save()
return redirect('single_image', image_id=image_id)
else:
form = CommentForm()
return render(request, 'image.html', {'image':image, 'form':form, 'comments':comments})
@login_required(login_url='/accounts/login')
def search(request):
if 'search' in request.GET and request.GET['search']:
search_term = request.GET.get('search')
profiles = Profile.search_profile(search_term)
message = f'{search_term}'
return render(request, 'instagram/search.html',{'message':message, 'profiles':profiles})
else:
message = 'Enter term to search'
return render(request, 'instagram/search.html', {'message':message})
def comment(request,image_id):
current_user=request.user
profile = User.objects.get(username=current_user)
image = Image.objects.get(id=image_id)
comments = Comments.objects.all()
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.image = image
comment.user = current_user
comment.save()
return redirect('home')
else:
form = CommentForm()
return render(request, 'comment.html', locals())
def follow(request,user_id):
users = User.objects.get(id = user_id)
try:
follow = Follow.objects.add_follower(request.user, users)
except AlreadyExistsError:
return Http404
return redirect('home', locals())
def like(request, image_id):
current_user = request.user
image=Image.objects.get(id=image_id)
new_like,created= Likes.objects.get_or_create(liker=current_user, imageid=image)
new_like.save()
return redirect('home') | instagramClone/views.py | from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login, authenticate
from .models import Image, Profile, Comments, Likes
from friendship.models import Follow
from .forms import ImageForm, ProfileForm, CommentForm
from django.contrib.auth.models import User
from friendship.exceptions import AlreadyExistsError
from django.contrib.sites.shortcuts import get_current_site
from django.template.loader import render_to_string
# Create your views here.
def home(request):
title = 'Instagram'
current_user = request.user
images = Image.get_all_images()
comments = Comments.objects.all()
likes = Likes.objects.all()
profile = Profile.objects.all()
form = CommentForm()
id = request.user.id
prof = User.objects.all()
liked_images = Likes.objects.filter(liker_id=id)
mylist = [i.imageid for i in liked_images]
return render(request, 'instagram/index.html', locals())
@login_required(login_url='/accounts/login/')
def upload_image(request):
profile = Profile.objects.all()
form = ImageForm()
for profile in profile:
if profile.user.id == request.user.id:
if request.method == 'POST':
form = ImageForm(request.POST, request.FILES)
if form.is_valid():
upload =form.save(commit=False)
upload.user = request.user
upload.profile_pics = profile
upload.save()
return redirect('edit_profile', username=request.user)
else:
form = ImageForm()
return render(request, 'registration/upload_image.html',{'form':form})
@login_required(login_url='/accounts/login')
def edit_profile(request, username):
user = User.objects.get(username=username)
profile = User.objects.get(username=username)
try:
profile_details = Profile.get_by_id(user.id)
except:
profile_details = Profile.filter_by_id(user.id)
images = Image.get_profile_images(user.id)
follower = len(Follow.objects.followers(user))
following = len(Follow.objects.following(user))
users=User.objects.all()
users_following=Follow.objects.following(request.user)
title = f'@{user.username} Instagram photos'
return render(request, 'registration/edit_profile.html', locals())
@login_required(login_url='/accounts/login')
def editprofile(request):
if request.method == 'POST':
form = ProfileForm(request.POST, request.FILES)
if form.is_valid():
edit = form.save(commit=False)
edit.user = request.user
edit.save()
return redirect('editprofile')
else:
form = ProfileForm()
return render(request, 'registration/profile.html', locals())
@login_required(login_url='/accounts/login')
def single_image(request, image_id):
image = Image.get_image_id(image_id)
comments = Comments.get_comments_by_images(image_id)
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.image = image
comment.user = request.user
comment.save()
return redirect('single_image', image_id=image_id)
else:
form = CommentForm()
return render(request, 'image.html', {'image':image, 'form':form, 'comments':comments})
@login_required(login_url='/accounts/login')
def search(request):
if 'search' in request.GET and request.GET['search']:
search_term = request.GET.get('search')
profiles = Profile.search_profile(search_term)
message = f'{search_term}'
return render(request, 'instagram/search.html',{'message':message, 'profiles':profiles})
else:
message = 'Enter term to search'
return render(request, 'instagram/search.html', {'message':message})
def comment(request,image_id):
current_user=request.user
profile = User.objects.get(username=current_user)
image = Image.objects.get(id=image_id)
comments = Comments.objects.all()
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.image = image
comment.user = current_user
comment.save()
return redirect('home')
else:
form = CommentForm()
return render(request, 'comment.html', locals())
def follow(request,user_id):
users = User.objects.get(id = user_id)
try:
follow = Follow.objects.add_follower(request.user, users)
except AlreadyExistsError:
return Http404
return redirect('home', locals())
def like(request, image_id):
current_user = request.user
image=Image.objects.get(id=image_id)
new_like,created= Likes.objects.get_or_create(liker=current_user, imageid=image)
new_like.save()
return redirect('home') | 0.371707 | 0.068102 |
import os
import json
import time
import urllib
import logging
import bs4
import click
import requests
import schooldiggerscraper.utils # noqa
logging.basicConfig(level=logging.INFO)
class SchoolDiggerScraper(object):
"""Class of scraper, can download and save."""
def __init__(self, state, level, out, sleep, max_page):
"""Init needed."""
for k, v in locals().items():
if not k.startswith("self"):
setattr(self, k, v)
self.out_path = os.path.join(
out,
"{}_{}_data.json".format(state, schooldiggerscraper.utils.school_level[level])
)
self.beautifulsoup = bs4.BeautifulSoup
for k, v in schooldiggerscraper.utils.__dict__.items():
if not k.startswith("__"):
setattr(self, k, v)
self.main_url = self.main_url.format(**locals())
self.headers_page["referer"] = \
self.headers_page["referer"].format(**locals())
self.headers_init["referer"] = \
self.headers_init["referer"].format(**locals())
self.form = self.forms[self.level]
self.form["values[FIPS]"] = self.state_to_fips[self.state]
self.logger = logging.getLogger("SchoolDiggerScraper")
self.data = []
def download(self):
session = requests.Session()
# init run to get cookies
resp_init = session.get(self.main_url, headers=self.headers_init)
if resp_init.status_code != 200:
self.logger.error(
"Status code {}, job aborted".format(resp_init.status_code)
)
exit()
cookies = requests.utils.cookiejar_from_dict(
requests.utils.dict_from_cookiejar(session.cookies)
)
# get total data size and num pages in first run
content_dict = self._pull_one_page(
self.entry_point,
1,
0,
self.form,
session,
cookies,
self.headers_page,
self.logger,
)
self.data.append(content_dict)
self.pause()
# get rest
num_total_records = content_dict["recordsTotal"]
num_pages = num_total_records // 10 + 1
for draw in range(2, self.max_page or num_pages + 1):
start = (draw - 1) * 10
content_dict = self._pull_one_page(
self.entry_point,
draw,
start,
self.form,
session,
cookies,
self.headers_page,
self.logger,
)
self.data.append(content_dict)
self.pause()
def _pull_one_page(
self,
post_url,
draw,
start,
form,
session,
cookies,
headers,
logger,
):
"""Download one page."""
form["draw"] = draw
form["start"] = start
form_urlencoded = urllib.parse.urlencode(form)
content_len = len(form_urlencoded)
headers["content-length"] = str(content_len)
response = session.post(
post_url,
headers=headers,
data=form_urlencoded,
cookies=cookies,
)
scode = response.status_code
content_dict = {}
if scode != 200:
logger.error(
"Draw {}, Status code {}, skipping".format(draw, scode)
)
else:
logger.info(
"Draw {} finished".format(draw, scode)
)
content_dict = json.loads(response.content)
return content_dict
def pause(self, duration=None):
"""Pause."""
sleep = duration or self.sleep
time.sleep(sleep)
def save(self):
"""Save to disc."""
self.logger.info(
"Saving to {}".format(os.path.abspath(self.out_path))
)
with open(self.out_path, "w") as f:
for content_dict in self.data:
f.write("{}\n".format(json.dumps(content_dict)))
@click.command()
@click.option(
"--state",
help="State code",
type=click.Choice(schooldiggerscraper.utils.state_codes),
)
@click.option(
"--level",
help="School level, 1 for elementary, 2 for middle, 3 for high",
type=click.Choice(["1", "2", "3"]),
)
@click.option(
"--out",
help="Output directory",
type=str,
default="/tmp",
)
@click.option(
"--sleep",
help="Sleep seconds between each page",
type=int,
default=5,
)
@click.option(
"--max-page",
help="Maximum page number",
type=int,
default=None,
)
def main(state, level, out, sleep, max_page):
scraper = SchoolDiggerScraper(state, int(level), out, sleep, max_page)
scraper.download()
scraper.save()
if __name__ == "__main__":
main() | SchoolDiggerScraper/scrape.py | import os
import json
import time
import urllib
import logging
import bs4
import click
import requests
import schooldiggerscraper.utils # noqa
logging.basicConfig(level=logging.INFO)
class SchoolDiggerScraper(object):
"""Class of scraper, can download and save."""
def __init__(self, state, level, out, sleep, max_page):
"""Init needed."""
for k, v in locals().items():
if not k.startswith("self"):
setattr(self, k, v)
self.out_path = os.path.join(
out,
"{}_{}_data.json".format(state, schooldiggerscraper.utils.school_level[level])
)
self.beautifulsoup = bs4.BeautifulSoup
for k, v in schooldiggerscraper.utils.__dict__.items():
if not k.startswith("__"):
setattr(self, k, v)
self.main_url = self.main_url.format(**locals())
self.headers_page["referer"] = \
self.headers_page["referer"].format(**locals())
self.headers_init["referer"] = \
self.headers_init["referer"].format(**locals())
self.form = self.forms[self.level]
self.form["values[FIPS]"] = self.state_to_fips[self.state]
self.logger = logging.getLogger("SchoolDiggerScraper")
self.data = []
def download(self):
session = requests.Session()
# init run to get cookies
resp_init = session.get(self.main_url, headers=self.headers_init)
if resp_init.status_code != 200:
self.logger.error(
"Status code {}, job aborted".format(resp_init.status_code)
)
exit()
cookies = requests.utils.cookiejar_from_dict(
requests.utils.dict_from_cookiejar(session.cookies)
)
# get total data size and num pages in first run
content_dict = self._pull_one_page(
self.entry_point,
1,
0,
self.form,
session,
cookies,
self.headers_page,
self.logger,
)
self.data.append(content_dict)
self.pause()
# get rest
num_total_records = content_dict["recordsTotal"]
num_pages = num_total_records // 10 + 1
for draw in range(2, self.max_page or num_pages + 1):
start = (draw - 1) * 10
content_dict = self._pull_one_page(
self.entry_point,
draw,
start,
self.form,
session,
cookies,
self.headers_page,
self.logger,
)
self.data.append(content_dict)
self.pause()
def _pull_one_page(
self,
post_url,
draw,
start,
form,
session,
cookies,
headers,
logger,
):
"""Download one page."""
form["draw"] = draw
form["start"] = start
form_urlencoded = urllib.parse.urlencode(form)
content_len = len(form_urlencoded)
headers["content-length"] = str(content_len)
response = session.post(
post_url,
headers=headers,
data=form_urlencoded,
cookies=cookies,
)
scode = response.status_code
content_dict = {}
if scode != 200:
logger.error(
"Draw {}, Status code {}, skipping".format(draw, scode)
)
else:
logger.info(
"Draw {} finished".format(draw, scode)
)
content_dict = json.loads(response.content)
return content_dict
def pause(self, duration=None):
"""Pause."""
sleep = duration or self.sleep
time.sleep(sleep)
def save(self):
"""Save to disc."""
self.logger.info(
"Saving to {}".format(os.path.abspath(self.out_path))
)
with open(self.out_path, "w") as f:
for content_dict in self.data:
f.write("{}\n".format(json.dumps(content_dict)))
@click.command()
@click.option(
"--state",
help="State code",
type=click.Choice(schooldiggerscraper.utils.state_codes),
)
@click.option(
"--level",
help="School level, 1 for elementary, 2 for middle, 3 for high",
type=click.Choice(["1", "2", "3"]),
)
@click.option(
"--out",
help="Output directory",
type=str,
default="/tmp",
)
@click.option(
"--sleep",
help="Sleep seconds between each page",
type=int,
default=5,
)
@click.option(
"--max-page",
help="Maximum page number",
type=int,
default=None,
)
def main(state, level, out, sleep, max_page):
scraper = SchoolDiggerScraper(state, int(level), out, sleep, max_page)
scraper.download()
scraper.save()
if __name__ == "__main__":
main() | 0.431584 | 0.083255 |
from django.core.urlresolvers import reverse
from cms.test_utils.testcases import CMSTestCase
from richie.apps.core.factories import UserFactory
from richie.apps.courses.factories import (
CourseFactory,
OrganizationFactory,
SubjectFactory,
)
class CourseAdminTestCase(CMSTestCase):
"""
Integration test suite to validate the behavior of admin pages for the Course model
"""
def test_admin_course_list_view(self):
"""
The admin list view of courses should display their active session, their
organization_main and the title of the related page
"""
user = UserFactory(is_staff=True, is_superuser=True)
self.client.login(username=user.username, password="password")
# Create a course linked to a page
course = CourseFactory()
# Get the admin list view
url = reverse("admin:courses_course_changelist")
response = self.client.get(url, follow=True)
# Check that the page includes all our fields
self.assertContains(
response, course.extended_object.get_title(), status_code=200
)
self.assertContains(
response, course.organization_main.extended_object.get_title()
)
def test_admin_course_add_view(self):
"""
The admin add view should work for courses
"""
user = UserFactory(is_staff=True, is_superuser=True)
self.client.login(username=user.username, password="password")
# Get the admin change view
url = reverse("admin:courses_course_add")
response = self.client.get(url, follow=True)
# Check that the page includes the field to edit the main organization
self.assertContains(response, "id_organization_main")
def test_admin_course_change_view_get(self):
"""
The admin change view should include the editable and readonly fields as expected.
In particular, the relation fields should only include options for related objects in
their draft version.
"""
user = UserFactory(is_staff=True, is_superuser=True)
self.client.login(username=user.username, password="password")
# Create a course
course = CourseFactory()
# Create an organization and publish it
organization = OrganizationFactory()
organization.extended_object.publish("en")
organization.refresh_from_db()
# Create a subject and publish it
subject = SubjectFactory()
subject.extended_object.publish("en")
subject.refresh_from_db()
# Get the admin change view
url = reverse("admin:courses_course_change", args=[course.id])
response = self.client.get(url)
# Check that the page includes all our fields
self.assertContains(
response, course.extended_object.get_title(), status_code=200
)
self.assertContains(
response, course.organization_main.extended_object.get_title()
)
# Only the draft organization and subject should be proposed as options in select boxes
self.assertContains(
response, '<option value="{:d}">{!s}'.format(subject.id, subject)
)
self.assertNotContains(
response,
'<option value="{:d}">{!s}'.format(
subject.public_extension.id, subject.public_extension
),
)
self.assertContains(
response, '<option value="{:d}">{!s}'.format(organization.id, organization)
)
self.assertNotContains(
response,
'<option value="{:d}">{!s}'.format(
organization.public_extension.id, organization.public_extension
),
)
def test_admin_course_change_view_post(self):
"""
Validate that the course can be updated via the admin.
In particular, make sure that when a course is updated from the admin, the main
organization is automatically added to the many-to-many field "organizations".
See http://stackoverflow.com/a/1925784/469575 for details on the issue.
"""
user = UserFactory(is_staff=True, is_superuser=True)
self.client.login(username=user.username, password="password")
# Create a course, some organizations and some subjects
organization1, organization2, organization3 = OrganizationFactory.create_batch(
3
)
subject1, subject2 = SubjectFactory.create_batch(2)
course = CourseFactory(
with_organizations=[organization1], with_subjects=[subject1]
)
self.assertEqual(
set(course.organizations.all()), {organization1, course.organization_main}
)
self.assertEqual(set(course.subjects.all()), {subject1})
# Get the admin change view
url = reverse("admin:courses_course_change", args=[course.id])
data = {
"organization_main": organization2.id,
"organizations": [organization3.id],
"subjects": [subject2.id],
"courserun_set-TOTAL_FORMS": 0,
"courserun_set-INITIAL_FORMS": 0,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
# Check that the course was updated as expected
course.refresh_from_db()
self.assertEqual(course.organization_main, organization2)
self.assertEqual(set(course.subjects.all()), {subject2})
# Check that the main organization was added and the old organization cleared
self.assertEqual(
set(course.organizations.all()), {organization2, organization3}
) | tests/apps/courses/test_admin_course.py | from django.core.urlresolvers import reverse
from cms.test_utils.testcases import CMSTestCase
from richie.apps.core.factories import UserFactory
from richie.apps.courses.factories import (
CourseFactory,
OrganizationFactory,
SubjectFactory,
)
class CourseAdminTestCase(CMSTestCase):
"""
Integration test suite to validate the behavior of admin pages for the Course model
"""
def test_admin_course_list_view(self):
"""
The admin list view of courses should display their active session, their
organization_main and the title of the related page
"""
user = UserFactory(is_staff=True, is_superuser=True)
self.client.login(username=user.username, password="password")
# Create a course linked to a page
course = CourseFactory()
# Get the admin list view
url = reverse("admin:courses_course_changelist")
response = self.client.get(url, follow=True)
# Check that the page includes all our fields
self.assertContains(
response, course.extended_object.get_title(), status_code=200
)
self.assertContains(
response, course.organization_main.extended_object.get_title()
)
def test_admin_course_add_view(self):
"""
The admin add view should work for courses
"""
user = UserFactory(is_staff=True, is_superuser=True)
self.client.login(username=user.username, password="password")
# Get the admin change view
url = reverse("admin:courses_course_add")
response = self.client.get(url, follow=True)
# Check that the page includes the field to edit the main organization
self.assertContains(response, "id_organization_main")
def test_admin_course_change_view_get(self):
"""
The admin change view should include the editable and readonly fields as expected.
In particular, the relation fields should only include options for related objects in
their draft version.
"""
user = UserFactory(is_staff=True, is_superuser=True)
self.client.login(username=user.username, password="password")
# Create a course
course = CourseFactory()
# Create an organization and publish it
organization = OrganizationFactory()
organization.extended_object.publish("en")
organization.refresh_from_db()
# Create a subject and publish it
subject = SubjectFactory()
subject.extended_object.publish("en")
subject.refresh_from_db()
# Get the admin change view
url = reverse("admin:courses_course_change", args=[course.id])
response = self.client.get(url)
# Check that the page includes all our fields
self.assertContains(
response, course.extended_object.get_title(), status_code=200
)
self.assertContains(
response, course.organization_main.extended_object.get_title()
)
# Only the draft organization and subject should be proposed as options in select boxes
self.assertContains(
response, '<option value="{:d}">{!s}'.format(subject.id, subject)
)
self.assertNotContains(
response,
'<option value="{:d}">{!s}'.format(
subject.public_extension.id, subject.public_extension
),
)
self.assertContains(
response, '<option value="{:d}">{!s}'.format(organization.id, organization)
)
self.assertNotContains(
response,
'<option value="{:d}">{!s}'.format(
organization.public_extension.id, organization.public_extension
),
)
def test_admin_course_change_view_post(self):
"""
Validate that the course can be updated via the admin.
In particular, make sure that when a course is updated from the admin, the main
organization is automatically added to the many-to-many field "organizations".
See http://stackoverflow.com/a/1925784/469575 for details on the issue.
"""
user = UserFactory(is_staff=True, is_superuser=True)
self.client.login(username=user.username, password="password")
# Create a course, some organizations and some subjects
organization1, organization2, organization3 = OrganizationFactory.create_batch(
3
)
subject1, subject2 = SubjectFactory.create_batch(2)
course = CourseFactory(
with_organizations=[organization1], with_subjects=[subject1]
)
self.assertEqual(
set(course.organizations.all()), {organization1, course.organization_main}
)
self.assertEqual(set(course.subjects.all()), {subject1})
# Get the admin change view
url = reverse("admin:courses_course_change", args=[course.id])
data = {
"organization_main": organization2.id,
"organizations": [organization3.id],
"subjects": [subject2.id],
"courserun_set-TOTAL_FORMS": 0,
"courserun_set-INITIAL_FORMS": 0,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
# Check that the course was updated as expected
course.refresh_from_db()
self.assertEqual(course.organization_main, organization2)
self.assertEqual(set(course.subjects.all()), {subject2})
# Check that the main organization was added and the old organization cleared
self.assertEqual(
set(course.organizations.all()), {organization2, organization3}
) | 0.692434 | 0.390912 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
from six.moves import xrange
import torch
import torch.nn.functional as F
from torch.autograd import Variable as Var
from torch.utils.data import DataLoader, Dataset
ckpt_path = "checkpoint/"
def partition_dataset(data, labels, nb_teachers, teacher_id):
"""Simple partitioning algorithm that returns the right portion of the data
needed by a given teacher out of a certain nb of teachers.
:param data: input data to be partitioned
:param labels: output data to be partitioned
:param nb_teachers: number of teachers in the ensemble (affects size of each
partition)
:param teacher_id: id of partition to retrieve
:return:
"""
# Sanity check
assert len(data) == len(labels)
assert int(teacher_id) < int(nb_teachers)
# This will floor the possible number of batches
batch_len = int(len(data) / nb_teachers)
# Compute start, end indices of partition
start = teacher_id * batch_len
end = (teacher_id + 1) * batch_len
# Slice partition off
partition_data = data[start:end]
partition_labels = labels[start:end]
return partition_data, partition_labels
class PrepareData(Dataset):
def __init__(self, X, y):
self.X = X
self.y = y
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.X[idx], self.y[idx]
def train(model, train_loader, test_loader, ckpt_path, filename):
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
for epoch in range(10):
model.train() # set model to training mode
# set up training metrics we want to track
correct = 0
train_num = len(train_loader.sampler)
for ix, (img, label) in enumerate(
train_loader
): # iterate over training batches
# img, label = img.to(device), label.to(device)
# get data, send to gpu if needed
img = Var(img.float())
# label = label.type(torch.float32)
label = Var(label.type(torch.LongTensor))
optimizer.zero_grad() # clear parameter gradients from previous update
output = model(img) # forward pass
# output = output.type(torch.float32)
loss = F.cross_entropy(
output, label, size_average=False
) # calculate network loss
loss.backward() # backward pass
optimizer.step() # take an optimization step to update model's parameters
pred = output.max(1, keepdim=True)[1] # get the index of the max logit
correct += int(
pred.eq(label.view_as(pred)).sum()
) # add to running total of hits
# print whole epoch's training accuracy; useful for monitoring overfitting
print(
"Train Accuracy: {}/{} ({:.0f}%)".format(
correct, int(train_num), 100.0 * float(correct / train_num)
)
)
# set up training metrics we want to track
test_correct = 0
test_num = len(test_loader.sampler)
for ix, (img, label) in enumerate(test_loader): # iterate over training batches
# img, label = img.to(device), label.to(device)
# get data, send to gpu if needed
img = Var(img.float())
# label = label.type(torch.float32)
label = Var(label.type(torch.LongTensor))
optimizer.zero_grad() # clear parameter gradients from previous training update
output = model(img) # forward pass
# output = output.type(torch.float32)
loss = F.cross_entropy(
output, label, size_average=False
) # calculate network loss
pred = output.max(1, keepdim=True)[1] # get the index of the max logit
test_correct += int(
pred.eq(label.view_as(pred)).sum()
) # add to running total of hits
# print whole epoch's training accuracy; useful for monitoring overfitting
print(
"Test Accuracy: {}/{} ({:.0f}%)".format(
test_correct, test_num, 100.0 * test_correct / test_num
)
)
if not os.path.isdir(ckpt_path):
os.makedirs(ckpt_path)
torch.save(model.state_dict(), ckpt_path + filename)
def train_teachers(
model,
train_data,
train_labels,
test_data,
test_labels,
nb_teachers,
teacher_id,
filename,
):
data, labels = partition_dataset(train_data, train_labels, nb_teachers, teacher_id)
train_prep = PrepareData(data, labels)
train_loader = DataLoader(train_prep, batch_size=64, shuffle=True)
test_prep = PrepareData(test_data, test_labels)
test_loader = DataLoader(test_prep, batch_size=64, shuffle=False)
print("\nTrain teacher ID: " + str(teacher_id))
train(model, train_loader, test_loader, ckpt_path, filename)
def softmax_preds(model, nb_labels, images_loader, ckpt_path, return_logits=False):
"""Compute softmax activations (probabilities) with the model saved in the
path specified as an argument.
:param images: a np array of images
:param ckpt_path: a TF model checkpoint
:param logits: if set to True, return logits instead of probabilities
:return: probabilities (or logits if logits is set to True)
"""
# Compute nb samples and deduce nb of batches
data_length = len(images_loader.dataset)
preds = np.zeros((data_length, nb_labels), dtype=np.float32)
start = 0
check = torch.load(ckpt_path)
model.load_state_dict(check)
model.eval() # set model to evaluate mode
for img, label in images_loader:
output = model(Var(img))
output_softmax = F.softmax(output).data.numpy()
end = start + len(img)
preds[start:end, :] = output_softmax
start += len(img)
return preds
def ensemble_preds(model, dataset, nb_labels, nb_teachers, stdnt_data_loader):
"""Given a dataset, a number of teachers, and some input data, this helper
function queries each teacher for predictions on the data and returns all
predictions in a single array. (That can then be aggregated into one single
prediction per input using aggregation.py (cf. function
prepare_student_data() below)
:param dataset: string corresponding to mnist, cifar10, or svhn
:param nb_teachers: number of teachers (in the ensemble) to learn from
:param stdnt_data: unlabeled student training data
:return: 3d array (teacher id, sample id, probability per class)
"""
# Compute shape of array that will hold probabilities produced by each
# teacher, for each training point, and each output class
result_shape = (nb_teachers, len(stdnt_data_loader.dataset), nb_labels)
# Create array that will hold result
result = np.zeros(result_shape, dtype=np.float32)
# Get predictions from each teacher
for teacher_id in xrange(nb_teachers):
# Compute path of checkpoint file for teacher model with ID teacher_id
filename = (
str(dataset)
+ "_"
+ str(nb_teachers)
+ "_teachers_"
+ str(teacher_id)
+ ".pth"
)
# Get predictions on our training data and store in result array
result[teacher_id] = softmax_preds(
model, nb_labels, stdnt_data_loader, ckpt_path + filename
)
# This can take a while when there are a lot of teachers so output status
print("Computed Teacher " + str(teacher_id) + " softmax predictions")
return result
def prepare_student_data(
model,
dataset,
test_data,
test_labels,
nb_labels,
nb_teachers,
stdnt_share,
lap_scale,
):
"""Takes a dataset name and the size of the teacher ensemble and prepares
training data for the student model, according to parameters indicated in
flags above.
:param dataset: string corresponding to mnist, cifar10, or svhn
:param nb_teachers: number of teachers (in the ensemble) to learn from
:return: pairs of (data, labels) to be used for student training and testing
"""
# Transfor tensor to numpy
test_labels = test_labels.numpy()
# Make sure there is data leftover to be used as a test set
assert stdnt_share < len(test_data)
# Prepare [unlabeled] student training data (subset of test set)
stdnt_data = test_data[:stdnt_share]
stdnt_label = test_labels[:stdnt_share]
stdnt_prep = PrepareData(stdnt_data, stdnt_label)
stdnt_loader = DataLoader(stdnt_prep, batch_size=64, shuffle=False)
# Compute teacher predictions for student training data
teachers_preds = ensemble_preds(
model, dataset, nb_labels, nb_teachers, stdnt_loader
)
# Aggregate teacher predictions to get student training labels
stdnt_labels = noisy_max(teachers_preds, nb_labels, lap_scale)
# Print accuracy of aggregated labels
ac_ag_labels = accuracy(stdnt_labels, test_labels[:stdnt_share])
print("\nAccuracy of the aggregated labels: " + str(ac_ag_labels) + "\n")
# Store unused part of test set for use as a test set after student training
stdnt_test_data = test_data[stdnt_share:]
stdnt_test_labels = test_labels[stdnt_share:]
return stdnt_data, stdnt_labels, stdnt_test_data, stdnt_test_labels
def train_student(
model,
dataset,
test_data,
test_labels,
nb_labels,
nb_teachers,
stdnt_share,
lap_scale,
):
"""This function trains a student using predictions made by an ensemble of
teachers. The student and teacher models are trained using the same neural
network architecture.
:param dataset: string corresponding to mnist, cifar10, or svhn
:param nb_teachers: number of teachers (in the ensemble) to learn from
:return: True if student training went well
"""
# Call helper function to prepare student data using teacher predictions
stdnt_dataset = prepare_student_data(
model,
dataset,
test_data,
test_labels,
nb_labels,
nb_teachers,
stdnt_share,
lap_scale,
)
# Unpack the student dataset
stdnt_data, stdnt_labels, stdnt_test_data, stdnt_test_labels = stdnt_dataset
# Prepare checkpoint filename and path
filename = str(dataset) + "_" + str(nb_teachers) + "_student.ckpt"
stdnt_prep = PrepareData(stdnt_data, stdnt_labels)
stdnt_loader = DataLoader(stdnt_prep, batch_size=64, shuffle=False)
stdnt_test_prep = PrepareData(stdnt_test_data, stdnt_test_labels)
stdnt_test_loader = DataLoader(stdnt_test_prep, batch_size=64, shuffle=False)
# Start student training
train(model, stdnt_loader, stdnt_test_loader, ckpt_path, filename)
# Compute final checkpoint name for student
student_preds = softmax_preds(
model, nb_labels, stdnt_test_loader, ckpt_path + filename
)
# Compute teacher accuracy
precision = accuracy(student_preds, stdnt_test_labels)
print("\nPrecision of student after training: " + str(precision))
return True
def labels_from_probs(probs):
"""Helper function: computes argmax along last dimension of array to obtain
labels (max prob or max logit value)
:param probs: numpy array where probabilities or logits are on last dimension
:return: array with same shape as input besides last dimension with shape 1
now containing the labels
"""
# Compute last axis index
last_axis = len(np.shape(probs)) - 1
# Label is argmax over last dimension
labels = np.argmax(probs, axis=last_axis)
# Return as np.int32
return np.asarray(labels, dtype=np.int32)
def noisy_max(logits, nb_labels, lap_scale, return_clean_votes=False):
"""This aggregation mechanism takes the softmax/logit output of several
models resulting from inference on identical inputs and computes the noisy-
max of the votes for candidate classes to select a label for each sample:
it adds Laplacian noise to label counts and returns the most frequent
label.
:param logits: logits or probabilities for each sample
:param lap_scale: scale of the Laplacian noise to be added to counts
:param return_clean_votes: if set to True, also returns clean votes (without
Laplacian noise). This can be used to perform the
privacy analysis of this aggregation mechanism.
:return: pair of result and (if clean_votes is set to True) the clean counts
for each class per sample and the the original labels produced by
the teachers.
"""
# Compute labels from logits/probs and reshape array properly
labels = labels_from_probs(logits)
labels_shape = np.shape(labels)
labels = labels.reshape((labels_shape[0], labels_shape[1]))
# Initialize array to hold final labels
result = np.zeros(int(labels_shape[1]))
if return_clean_votes:
# Initialize array to hold clean votes for each sample
clean_votes = np.zeros((int(labels_shape[1]), nb_labels))
# Parse each sample
for i in xrange(int(labels_shape[1])):
# Count number of votes assigned to each class
label_counts = np.bincount(labels[:, i], minlength=10)
if return_clean_votes:
# Store vote counts for export
clean_votes[i] = label_counts
# Cast in float32 to prepare before addition of Laplacian noise
label_counts = np.asarray(label_counts, dtype=np.float32)
# Sample independent Laplacian noise for each class
for item in xrange(10):
label_counts[item] += np.random.laplace(loc=0.0, scale=float(lap_scale))
# Result is the most frequent label
result[i] = np.argmax(label_counts)
# Cast labels to np.int32 for compatibility with deep_cnn.py feed dictionaries
result = np.asarray(result, dtype=np.int32)
if return_clean_votes:
# Returns several array, which are later saved:
# result: labels obtained from the noisy aggregation
# clean_votes: the number of teacher votes assigned to each sample and class
# labels: the labels assigned by teachers (before the noisy aggregation)
return result, clean_votes, labels
else:
# Only return labels resulting from noisy aggregation
return result
def aggregation_most_frequent(logits):
"""This aggregation mechanism takes the softmax/logit output of several
models resulting from inference on identical inputs and computes the most
frequent label. It is deterministic (no noise injection like noisy_max()
above.
:param logits: logits or probabilities for each sample
:return:
"""
# Compute labels from logits/probs and reshape array properly
labels = labels_from_probs(logits)
labels_shape = np.shape(labels)
labels = labels.reshape((labels_shape[0], labels_shape[1]))
# Initialize array to hold final labels
result = np.zeros(int(labels_shape[1]))
# Parse each sample
for i in xrange(int(labels_shape[1])):
# Count number of votes assigned to each class
label_counts = np.bincount(labels[:, i], minlength=10)
label_counts = np.asarray(label_counts, dtype=np.int32)
# Result is the most frequent label
result[i] = np.argmax(label_counts)
return np.asarray(result, dtype=np.int32)
def accuracy(logits, labels):
"""Return accuracy of the array of logits (or label predictions) wrt the
labels.
:param logits: this can either be logits, probabilities, or a single label
:param labels: the correct labels to match against
:return: the accuracy as a float
"""
assert len(logits) == len(labels)
if len(np.shape(logits)) > 1:
# Predicted labels are the argmax over axis 1
predicted_labels = np.argmax(logits, axis=1)
else:
# Input was already labels
assert len(np.shape(logits)) == 1
predicted_labels = logits
# Check against correct labels to compute correct guesses
correct = np.sum(predicted_labels == labels.reshape(len(labels)))
# Divide by number of labels to obtain accuracy
accuracy = float(correct) / len(labels)
# Return float value
return accuracy | syft/dp/pate.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
from six.moves import xrange
import torch
import torch.nn.functional as F
from torch.autograd import Variable as Var
from torch.utils.data import DataLoader, Dataset
ckpt_path = "checkpoint/"
def partition_dataset(data, labels, nb_teachers, teacher_id):
"""Simple partitioning algorithm that returns the right portion of the data
needed by a given teacher out of a certain nb of teachers.
:param data: input data to be partitioned
:param labels: output data to be partitioned
:param nb_teachers: number of teachers in the ensemble (affects size of each
partition)
:param teacher_id: id of partition to retrieve
:return:
"""
# Sanity check
assert len(data) == len(labels)
assert int(teacher_id) < int(nb_teachers)
# This will floor the possible number of batches
batch_len = int(len(data) / nb_teachers)
# Compute start, end indices of partition
start = teacher_id * batch_len
end = (teacher_id + 1) * batch_len
# Slice partition off
partition_data = data[start:end]
partition_labels = labels[start:end]
return partition_data, partition_labels
class PrepareData(Dataset):
def __init__(self, X, y):
self.X = X
self.y = y
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.X[idx], self.y[idx]
def train(model, train_loader, test_loader, ckpt_path, filename):
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
for epoch in range(10):
model.train() # set model to training mode
# set up training metrics we want to track
correct = 0
train_num = len(train_loader.sampler)
for ix, (img, label) in enumerate(
train_loader
): # iterate over training batches
# img, label = img.to(device), label.to(device)
# get data, send to gpu if needed
img = Var(img.float())
# label = label.type(torch.float32)
label = Var(label.type(torch.LongTensor))
optimizer.zero_grad() # clear parameter gradients from previous update
output = model(img) # forward pass
# output = output.type(torch.float32)
loss = F.cross_entropy(
output, label, size_average=False
) # calculate network loss
loss.backward() # backward pass
optimizer.step() # take an optimization step to update model's parameters
pred = output.max(1, keepdim=True)[1] # get the index of the max logit
correct += int(
pred.eq(label.view_as(pred)).sum()
) # add to running total of hits
# print whole epoch's training accuracy; useful for monitoring overfitting
print(
"Train Accuracy: {}/{} ({:.0f}%)".format(
correct, int(train_num), 100.0 * float(correct / train_num)
)
)
# set up training metrics we want to track
test_correct = 0
test_num = len(test_loader.sampler)
for ix, (img, label) in enumerate(test_loader): # iterate over training batches
# img, label = img.to(device), label.to(device)
# get data, send to gpu if needed
img = Var(img.float())
# label = label.type(torch.float32)
label = Var(label.type(torch.LongTensor))
optimizer.zero_grad() # clear parameter gradients from previous training update
output = model(img) # forward pass
# output = output.type(torch.float32)
loss = F.cross_entropy(
output, label, size_average=False
) # calculate network loss
pred = output.max(1, keepdim=True)[1] # get the index of the max logit
test_correct += int(
pred.eq(label.view_as(pred)).sum()
) # add to running total of hits
# print whole epoch's training accuracy; useful for monitoring overfitting
print(
"Test Accuracy: {}/{} ({:.0f}%)".format(
test_correct, test_num, 100.0 * test_correct / test_num
)
)
if not os.path.isdir(ckpt_path):
os.makedirs(ckpt_path)
torch.save(model.state_dict(), ckpt_path + filename)
def train_teachers(
model,
train_data,
train_labels,
test_data,
test_labels,
nb_teachers,
teacher_id,
filename,
):
data, labels = partition_dataset(train_data, train_labels, nb_teachers, teacher_id)
train_prep = PrepareData(data, labels)
train_loader = DataLoader(train_prep, batch_size=64, shuffle=True)
test_prep = PrepareData(test_data, test_labels)
test_loader = DataLoader(test_prep, batch_size=64, shuffle=False)
print("\nTrain teacher ID: " + str(teacher_id))
train(model, train_loader, test_loader, ckpt_path, filename)
def softmax_preds(model, nb_labels, images_loader, ckpt_path, return_logits=False):
"""Compute softmax activations (probabilities) with the model saved in the
path specified as an argument.
:param images: a np array of images
:param ckpt_path: a TF model checkpoint
:param logits: if set to True, return logits instead of probabilities
:return: probabilities (or logits if logits is set to True)
"""
# Compute nb samples and deduce nb of batches
data_length = len(images_loader.dataset)
preds = np.zeros((data_length, nb_labels), dtype=np.float32)
start = 0
check = torch.load(ckpt_path)
model.load_state_dict(check)
model.eval() # set model to evaluate mode
for img, label in images_loader:
output = model(Var(img))
output_softmax = F.softmax(output).data.numpy()
end = start + len(img)
preds[start:end, :] = output_softmax
start += len(img)
return preds
def ensemble_preds(model, dataset, nb_labels, nb_teachers, stdnt_data_loader):
"""Given a dataset, a number of teachers, and some input data, this helper
function queries each teacher for predictions on the data and returns all
predictions in a single array. (That can then be aggregated into one single
prediction per input using aggregation.py (cf. function
prepare_student_data() below)
:param dataset: string corresponding to mnist, cifar10, or svhn
:param nb_teachers: number of teachers (in the ensemble) to learn from
:param stdnt_data: unlabeled student training data
:return: 3d array (teacher id, sample id, probability per class)
"""
# Compute shape of array that will hold probabilities produced by each
# teacher, for each training point, and each output class
result_shape = (nb_teachers, len(stdnt_data_loader.dataset), nb_labels)
# Create array that will hold result
result = np.zeros(result_shape, dtype=np.float32)
# Get predictions from each teacher
for teacher_id in xrange(nb_teachers):
# Compute path of checkpoint file for teacher model with ID teacher_id
filename = (
str(dataset)
+ "_"
+ str(nb_teachers)
+ "_teachers_"
+ str(teacher_id)
+ ".pth"
)
# Get predictions on our training data and store in result array
result[teacher_id] = softmax_preds(
model, nb_labels, stdnt_data_loader, ckpt_path + filename
)
# This can take a while when there are a lot of teachers so output status
print("Computed Teacher " + str(teacher_id) + " softmax predictions")
return result
def prepare_student_data(
model,
dataset,
test_data,
test_labels,
nb_labels,
nb_teachers,
stdnt_share,
lap_scale,
):
"""Takes a dataset name and the size of the teacher ensemble and prepares
training data for the student model, according to parameters indicated in
flags above.
:param dataset: string corresponding to mnist, cifar10, or svhn
:param nb_teachers: number of teachers (in the ensemble) to learn from
:return: pairs of (data, labels) to be used for student training and testing
"""
# Transfor tensor to numpy
test_labels = test_labels.numpy()
# Make sure there is data leftover to be used as a test set
assert stdnt_share < len(test_data)
# Prepare [unlabeled] student training data (subset of test set)
stdnt_data = test_data[:stdnt_share]
stdnt_label = test_labels[:stdnt_share]
stdnt_prep = PrepareData(stdnt_data, stdnt_label)
stdnt_loader = DataLoader(stdnt_prep, batch_size=64, shuffle=False)
# Compute teacher predictions for student training data
teachers_preds = ensemble_preds(
model, dataset, nb_labels, nb_teachers, stdnt_loader
)
# Aggregate teacher predictions to get student training labels
stdnt_labels = noisy_max(teachers_preds, nb_labels, lap_scale)
# Print accuracy of aggregated labels
ac_ag_labels = accuracy(stdnt_labels, test_labels[:stdnt_share])
print("\nAccuracy of the aggregated labels: " + str(ac_ag_labels) + "\n")
# Store unused part of test set for use as a test set after student training
stdnt_test_data = test_data[stdnt_share:]
stdnt_test_labels = test_labels[stdnt_share:]
return stdnt_data, stdnt_labels, stdnt_test_data, stdnt_test_labels
def train_student(
model,
dataset,
test_data,
test_labels,
nb_labels,
nb_teachers,
stdnt_share,
lap_scale,
):
"""This function trains a student using predictions made by an ensemble of
teachers. The student and teacher models are trained using the same neural
network architecture.
:param dataset: string corresponding to mnist, cifar10, or svhn
:param nb_teachers: number of teachers (in the ensemble) to learn from
:return: True if student training went well
"""
# Call helper function to prepare student data using teacher predictions
stdnt_dataset = prepare_student_data(
model,
dataset,
test_data,
test_labels,
nb_labels,
nb_teachers,
stdnt_share,
lap_scale,
)
# Unpack the student dataset
stdnt_data, stdnt_labels, stdnt_test_data, stdnt_test_labels = stdnt_dataset
# Prepare checkpoint filename and path
filename = str(dataset) + "_" + str(nb_teachers) + "_student.ckpt"
stdnt_prep = PrepareData(stdnt_data, stdnt_labels)
stdnt_loader = DataLoader(stdnt_prep, batch_size=64, shuffle=False)
stdnt_test_prep = PrepareData(stdnt_test_data, stdnt_test_labels)
stdnt_test_loader = DataLoader(stdnt_test_prep, batch_size=64, shuffle=False)
# Start student training
train(model, stdnt_loader, stdnt_test_loader, ckpt_path, filename)
# Compute final checkpoint name for student
student_preds = softmax_preds(
model, nb_labels, stdnt_test_loader, ckpt_path + filename
)
# Compute teacher accuracy
precision = accuracy(student_preds, stdnt_test_labels)
print("\nPrecision of student after training: " + str(precision))
return True
def labels_from_probs(probs):
"""Helper function: computes argmax along last dimension of array to obtain
labels (max prob or max logit value)
:param probs: numpy array where probabilities or logits are on last dimension
:return: array with same shape as input besides last dimension with shape 1
now containing the labels
"""
# Compute last axis index
last_axis = len(np.shape(probs)) - 1
# Label is argmax over last dimension
labels = np.argmax(probs, axis=last_axis)
# Return as np.int32
return np.asarray(labels, dtype=np.int32)
def noisy_max(logits, nb_labels, lap_scale, return_clean_votes=False):
"""This aggregation mechanism takes the softmax/logit output of several
models resulting from inference on identical inputs and computes the noisy-
max of the votes for candidate classes to select a label for each sample:
it adds Laplacian noise to label counts and returns the most frequent
label.
:param logits: logits or probabilities for each sample
:param lap_scale: scale of the Laplacian noise to be added to counts
:param return_clean_votes: if set to True, also returns clean votes (without
Laplacian noise). This can be used to perform the
privacy analysis of this aggregation mechanism.
:return: pair of result and (if clean_votes is set to True) the clean counts
for each class per sample and the the original labels produced by
the teachers.
"""
# Compute labels from logits/probs and reshape array properly
labels = labels_from_probs(logits)
labels_shape = np.shape(labels)
labels = labels.reshape((labels_shape[0], labels_shape[1]))
# Initialize array to hold final labels
result = np.zeros(int(labels_shape[1]))
if return_clean_votes:
# Initialize array to hold clean votes for each sample
clean_votes = np.zeros((int(labels_shape[1]), nb_labels))
# Parse each sample
for i in xrange(int(labels_shape[1])):
# Count number of votes assigned to each class
label_counts = np.bincount(labels[:, i], minlength=10)
if return_clean_votes:
# Store vote counts for export
clean_votes[i] = label_counts
# Cast in float32 to prepare before addition of Laplacian noise
label_counts = np.asarray(label_counts, dtype=np.float32)
# Sample independent Laplacian noise for each class
for item in xrange(10):
label_counts[item] += np.random.laplace(loc=0.0, scale=float(lap_scale))
# Result is the most frequent label
result[i] = np.argmax(label_counts)
# Cast labels to np.int32 for compatibility with deep_cnn.py feed dictionaries
result = np.asarray(result, dtype=np.int32)
if return_clean_votes:
# Returns several array, which are later saved:
# result: labels obtained from the noisy aggregation
# clean_votes: the number of teacher votes assigned to each sample and class
# labels: the labels assigned by teachers (before the noisy aggregation)
return result, clean_votes, labels
else:
# Only return labels resulting from noisy aggregation
return result
def aggregation_most_frequent(logits):
"""This aggregation mechanism takes the softmax/logit output of several
models resulting from inference on identical inputs and computes the most
frequent label. It is deterministic (no noise injection like noisy_max()
above.
:param logits: logits or probabilities for each sample
:return:
"""
# Compute labels from logits/probs and reshape array properly
labels = labels_from_probs(logits)
labels_shape = np.shape(labels)
labels = labels.reshape((labels_shape[0], labels_shape[1]))
# Initialize array to hold final labels
result = np.zeros(int(labels_shape[1]))
# Parse each sample
for i in xrange(int(labels_shape[1])):
# Count number of votes assigned to each class
label_counts = np.bincount(labels[:, i], minlength=10)
label_counts = np.asarray(label_counts, dtype=np.int32)
# Result is the most frequent label
result[i] = np.argmax(label_counts)
return np.asarray(result, dtype=np.int32)
def accuracy(logits, labels):
"""Return accuracy of the array of logits (or label predictions) wrt the
labels.
:param logits: this can either be logits, probabilities, or a single label
:param labels: the correct labels to match against
:return: the accuracy as a float
"""
assert len(logits) == len(labels)
if len(np.shape(logits)) > 1:
# Predicted labels are the argmax over axis 1
predicted_labels = np.argmax(logits, axis=1)
else:
# Input was already labels
assert len(np.shape(logits)) == 1
predicted_labels = logits
# Check against correct labels to compute correct guesses
correct = np.sum(predicted_labels == labels.reshape(len(labels)))
# Divide by number of labels to obtain accuracy
accuracy = float(correct) / len(labels)
# Return float value
return accuracy | 0.912969 | 0.543409 |
import logging
import os.path
import sys
# Automagically add util/py_lib to PYTHONPATH environment variable.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'..', '..', 'util', 'py_lib'))
sys.path.insert(0, path)
import seqan.app_tests as app_tests
def main(source_base, binary_base):
"""Main entry point of the script."""
print 'Executing test for insegt'
print '========================='
print
ph = app_tests.TestPathHelper(
source_base, binary_base,
'apps/insegt/tests') # tests dir
# ============================================================
# Auto-detect the binary path.
# ============================================================
path_to_program = app_tests.autolocateBinary(
binary_base, 'bin', 'insegt')
# ============================================================
# Built TestConf list.
# ============================================================
# Build list with TestConf objects, analoguely to how the output
# was generated in generate_outputs.sh.
conf_list = []
# ============================================================
# First Section.
# ============================================================
# App TestConf objects to conf_list, just like this for each
# test you want to run.
conf = app_tests.TestConf(
program=path_to_program,
args=['-ro', ph.outFile('default_readOutput.gff'),
'-ao', ph.outFile('default_annoOutput.gff'),
'-to', ph.outFile('default_tupleOutput.gff'),
ph.inFile('alignments.sam'),
ph.inFile('annotations.gff')],
to_diff=[(ph.inFile('default_annoOutput.gff'),
ph.outFile('default_annoOutput.gff')),
(ph.inFile('default_readOutput.gff'),
ph.outFile('default_readOutput.gff')),
(ph.inFile('default_tupleOutput.gff'),
ph.outFile('default_tupleOutput.gff'))])
conf_list.append(conf)
conf = app_tests.TestConf(
program=path_to_program,
args=['-c', str(2),
'-ro', ph.outFile('threshold-count2_readOutput.gff'),
'-ao', ph.outFile('threshold-count2_annoOutput.gff'),
'-to', ph.outFile('threshold-count2_tupleOutput.gff'),
ph.inFile('alignments.sam'),
ph.inFile('annotations.gff')],
to_diff=[(ph.inFile('threshold-count2_annoOutput.gff'),
ph.outFile('threshold-count2_annoOutput.gff')),
(ph.inFile('threshold-count2_readOutput.gff'),
ph.outFile('threshold-count2_readOutput.gff')),
(ph.inFile('threshold-count2_tupleOutput.gff'),
ph.outFile('threshold-count2_tupleOutput.gff'))])
conf_list.append(conf)
conf = app_tests.TestConf(
program=path_to_program,
args=['-n', str(3),
'-ro', ph.outFile('ntuple3_readOutput.gff'),
'-ao', ph.outFile('ntuple3_annoOutput.gff'),
'-to', ph.outFile('ntuple3_tupleOutput.gff'),
ph.inFile('alignments.sam'),
ph.inFile('annotations.gff')],
to_diff=[(ph.inFile('ntuple3_annoOutput.gff'),
ph.outFile('ntuple3_annoOutput.gff')),
(ph.inFile('ntuple3_readOutput.gff'),
ph.outFile('ntuple3_readOutput.gff')),
(ph.inFile('ntuple3_tupleOutput.gff'),
ph.outFile('ntuple3_tupleOutput.gff'))])
conf_list.append(conf)
conf = app_tests.TestConf(
program=path_to_program,
args=['-m',
'-ro', ph.outFile('max-tuple_readOutput.gff'),
'-ao', ph.outFile('max-tuple_annoOutput.gff'),
'-to', ph.outFile('max-tuple_tupleOutput.gff'),
ph.inFile('alignments.sam'),
ph.inFile('annotations.gff')],
to_diff=[(ph.inFile('max-tuple_annoOutput.gff'),
ph.outFile('max-tuple_annoOutput.gff')),
(ph.inFile('max-tuple_readOutput.gff'),
ph.outFile('max-tuple_readOutput.gff')),
(ph.inFile('max-tuple_tupleOutput.gff'),
ph.outFile('max-tuple_tupleOutput.gff'))])
conf_list.append(conf)
conf = app_tests.TestConf(
program=path_to_program,
args=['-e',
'-ro', ph.outFile('exact-ntuple_readOutput.gff'),
'-ao', ph.outFile('exact-ntuple_annoOutput.gff'),
'-to', ph.outFile('exact-ntuple_tupleOutput.gff'),
ph.inFile('alignments.sam'),
ph.inFile('annotations.gff')],
to_diff=[(ph.inFile('exact-ntuple_annoOutput.gff'),
ph.outFile('exact-ntuple_annoOutput.gff')),
(ph.inFile('exact-ntuple_readOutput.gff'),
ph.outFile('exact-ntuple_readOutput.gff')),
(ph.inFile('exact-ntuple_tupleOutput.gff'),
ph.outFile('exact-ntuple_tupleOutput.gff'))])
conf_list.append(conf)
# ============================================================
# Execute the tests.
# ============================================================
failures = 0
for conf in conf_list:
res = app_tests.runTest(conf)
# Output to the user.
print ' '.join(['insegt'] + conf.args),
if res:
print 'OK'
else:
failures += 1
print 'FAILED'
# Cleanup.
ph.deleteTempDir()
print '=============================='
print ' total tests: %d' % len(conf_list)
print ' failed tests: %d' % failures
print 'successful tests: %d' % (len(conf_list) - failures)
print '=============================='
# Compute and return return code.
return failures != 0
if __name__ == '__main__':
sys.exit(app_tests.main(main)) | apps/insegt/tests/run_tests.py | import logging
import os.path
import sys
# Automagically add util/py_lib to PYTHONPATH environment variable.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'..', '..', 'util', 'py_lib'))
sys.path.insert(0, path)
import seqan.app_tests as app_tests
def main(source_base, binary_base):
"""Main entry point of the script."""
print 'Executing test for insegt'
print '========================='
print
ph = app_tests.TestPathHelper(
source_base, binary_base,
'apps/insegt/tests') # tests dir
# ============================================================
# Auto-detect the binary path.
# ============================================================
path_to_program = app_tests.autolocateBinary(
binary_base, 'bin', 'insegt')
# ============================================================
# Built TestConf list.
# ============================================================
# Build list with TestConf objects, analoguely to how the output
# was generated in generate_outputs.sh.
conf_list = []
# ============================================================
# First Section.
# ============================================================
# App TestConf objects to conf_list, just like this for each
# test you want to run.
conf = app_tests.TestConf(
program=path_to_program,
args=['-ro', ph.outFile('default_readOutput.gff'),
'-ao', ph.outFile('default_annoOutput.gff'),
'-to', ph.outFile('default_tupleOutput.gff'),
ph.inFile('alignments.sam'),
ph.inFile('annotations.gff')],
to_diff=[(ph.inFile('default_annoOutput.gff'),
ph.outFile('default_annoOutput.gff')),
(ph.inFile('default_readOutput.gff'),
ph.outFile('default_readOutput.gff')),
(ph.inFile('default_tupleOutput.gff'),
ph.outFile('default_tupleOutput.gff'))])
conf_list.append(conf)
conf = app_tests.TestConf(
program=path_to_program,
args=['-c', str(2),
'-ro', ph.outFile('threshold-count2_readOutput.gff'),
'-ao', ph.outFile('threshold-count2_annoOutput.gff'),
'-to', ph.outFile('threshold-count2_tupleOutput.gff'),
ph.inFile('alignments.sam'),
ph.inFile('annotations.gff')],
to_diff=[(ph.inFile('threshold-count2_annoOutput.gff'),
ph.outFile('threshold-count2_annoOutput.gff')),
(ph.inFile('threshold-count2_readOutput.gff'),
ph.outFile('threshold-count2_readOutput.gff')),
(ph.inFile('threshold-count2_tupleOutput.gff'),
ph.outFile('threshold-count2_tupleOutput.gff'))])
conf_list.append(conf)
conf = app_tests.TestConf(
program=path_to_program,
args=['-n', str(3),
'-ro', ph.outFile('ntuple3_readOutput.gff'),
'-ao', ph.outFile('ntuple3_annoOutput.gff'),
'-to', ph.outFile('ntuple3_tupleOutput.gff'),
ph.inFile('alignments.sam'),
ph.inFile('annotations.gff')],
to_diff=[(ph.inFile('ntuple3_annoOutput.gff'),
ph.outFile('ntuple3_annoOutput.gff')),
(ph.inFile('ntuple3_readOutput.gff'),
ph.outFile('ntuple3_readOutput.gff')),
(ph.inFile('ntuple3_tupleOutput.gff'),
ph.outFile('ntuple3_tupleOutput.gff'))])
conf_list.append(conf)
conf = app_tests.TestConf(
program=path_to_program,
args=['-m',
'-ro', ph.outFile('max-tuple_readOutput.gff'),
'-ao', ph.outFile('max-tuple_annoOutput.gff'),
'-to', ph.outFile('max-tuple_tupleOutput.gff'),
ph.inFile('alignments.sam'),
ph.inFile('annotations.gff')],
to_diff=[(ph.inFile('max-tuple_annoOutput.gff'),
ph.outFile('max-tuple_annoOutput.gff')),
(ph.inFile('max-tuple_readOutput.gff'),
ph.outFile('max-tuple_readOutput.gff')),
(ph.inFile('max-tuple_tupleOutput.gff'),
ph.outFile('max-tuple_tupleOutput.gff'))])
conf_list.append(conf)
conf = app_tests.TestConf(
program=path_to_program,
args=['-e',
'-ro', ph.outFile('exact-ntuple_readOutput.gff'),
'-ao', ph.outFile('exact-ntuple_annoOutput.gff'),
'-to', ph.outFile('exact-ntuple_tupleOutput.gff'),
ph.inFile('alignments.sam'),
ph.inFile('annotations.gff')],
to_diff=[(ph.inFile('exact-ntuple_annoOutput.gff'),
ph.outFile('exact-ntuple_annoOutput.gff')),
(ph.inFile('exact-ntuple_readOutput.gff'),
ph.outFile('exact-ntuple_readOutput.gff')),
(ph.inFile('exact-ntuple_tupleOutput.gff'),
ph.outFile('exact-ntuple_tupleOutput.gff'))])
conf_list.append(conf)
# ============================================================
# Execute the tests.
# ============================================================
failures = 0
for conf in conf_list:
res = app_tests.runTest(conf)
# Output to the user.
print ' '.join(['insegt'] + conf.args),
if res:
print 'OK'
else:
failures += 1
print 'FAILED'
# Cleanup.
ph.deleteTempDir()
print '=============================='
print ' total tests: %d' % len(conf_list)
print ' failed tests: %d' % failures
print 'successful tests: %d' % (len(conf_list) - failures)
print '=============================='
# Compute and return return code.
return failures != 0
if __name__ == '__main__':
sys.exit(app_tests.main(main)) | 0.309754 | 0.069954 |
import csv
import sys
from datetime import datetime, timedelta
import itertools
import operator
import os
use_colors = sys.stdout.isatty()
if use_colors:
try:
import colorama
if os.name == 'nt':
colorama.init(strip=True, convert=True)
else:
colorama.init()
except ImportError:
print 'For colors install colorama ($ pip install colorama)'
use_colors = False
tformat = "%H:%M"
def parsetime(str):
try:
return datetime.strptime(str, tformat)
except ValueError:
return None
def abshourminute(td):
hours, rem = divmod(td.seconds, 60**2)
minutes, sec = divmod(rem, 60)
hours += td.days * 24
return hours, minutes
def hourminute(td):
if td < timedelta():
return abshourminute(-td), True
else:
return abshourminute(td), False
def formatahm(hm):
return "%02d:%02d" % hm
def formathm(hm, pos=' ', neg='-'):
return "%s%02d:%02d" % ((pos, neg)[hm[1]], hm[0][0], hm[0][1])
def formatd(td, *args):
return formathm(hourminute(td), *args)
def grouped(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
workday = timedelta(hours=7, minutes=45)
lunchbonus = timedelta(minutes=30)
mapping = { }
total_time = timedelta()
expected_time = timedelta()
days = []
cur_time_raw = datetime.now()
cur_time = datetime(1900,1,1, hour=cur_time_raw.hour, minute=cur_time_raw.minute)
nulltime = (timedelta(), timedelta())
addtime = lambda x,y: tuple(map(operator.add, x, y))
zerotime = parsetime('0:00')
with open(sys.argv[1], 'rb') as csvfile:
reader = csv.reader(csvfile)
mapping = { n: i for (i, n) in enumerate(next(reader)) }
for row in reader:
getcol = lambda n: row[mapping[n]]
gettimecol = lambda n: parsetime(getcol(n))
start = gettimecol('Start')
end = gettimecol('End')
lunch = getcol('Lunch?') == 'Yes'
adjust = gettimecol('Adjust')
adjust_delta = adjust - zerotime if adjust else None
if start and not end:
end = cur_time
if None in (start, end, lunch):
days.append(nulltime)
continue
duration = end - start
if not lunch:
duration += lunchbonus
if adjust_delta:
duration -= adjust_delta
total_time += duration
expected_time += workday
delta = duration - workday
days.append((duration, delta))
weeks = list(grouped(days, 5, nulltime))
months = list(grouped(weeks, 4, []))
def isnull(td):
return td.seconds == 0 and td.days == 0
def formattad(t, td):
if use_colors:
ts = ''
ds = ((colorama.Fore.RED, colorama.Fore.GREEN)[td >= timedelta()] +
(colorama.Style.BRIGHT if abs(td) >= timedelta(minutes=30) else ''))
ns = ''
rs = colorama.Fore.RESET + colorama.Style.RESET_ALL
else:
ts = ds = ns = rs = ''
if isnull(t) and isnull(td):
return ns + ' ' + '.' * 12 + rs
return "%s %s" % (ts + formatd(t), ds + formatd(td, '+')) + rs
total_sum = nulltime
print ''
for month in months:
weeklist = []
sumlist = []
for week in month:
weeklist.append([x if x else nulltime for x in week])
sumlist.append(reduce(addtime, week, nulltime))
weeklist_transposed = itertools.izip_longest(*weeklist, fillvalue=nulltime)
msum = reduce(addtime, sumlist, nulltime)
total_sum = addtime(total_sum, msum)
ind = ' ' * 2
sep = ' ' * 3
print '\n'.join(ind + sep.join(formattad(*day) for day in week) for week in weeklist_transposed)
print ''
print ind + sep.join(formattad(*x) for x in sumlist)
print ''
print 'Month: %s' % formattad(*msum)
print ''
print 'Total: %s' % formattad(*total_sum)
if use_colors:
colorama.deinit() | workcalc.py |
import csv
import sys
from datetime import datetime, timedelta
import itertools
import operator
import os
use_colors = sys.stdout.isatty()
if use_colors:
try:
import colorama
if os.name == 'nt':
colorama.init(strip=True, convert=True)
else:
colorama.init()
except ImportError:
print 'For colors install colorama ($ pip install colorama)'
use_colors = False
tformat = "%H:%M"
def parsetime(str):
try:
return datetime.strptime(str, tformat)
except ValueError:
return None
def abshourminute(td):
hours, rem = divmod(td.seconds, 60**2)
minutes, sec = divmod(rem, 60)
hours += td.days * 24
return hours, minutes
def hourminute(td):
if td < timedelta():
return abshourminute(-td), True
else:
return abshourminute(td), False
def formatahm(hm):
return "%02d:%02d" % hm
def formathm(hm, pos=' ', neg='-'):
return "%s%02d:%02d" % ((pos, neg)[hm[1]], hm[0][0], hm[0][1])
def formatd(td, *args):
return formathm(hourminute(td), *args)
def grouped(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
workday = timedelta(hours=7, minutes=45)
lunchbonus = timedelta(minutes=30)
mapping = { }
total_time = timedelta()
expected_time = timedelta()
days = []
cur_time_raw = datetime.now()
cur_time = datetime(1900,1,1, hour=cur_time_raw.hour, minute=cur_time_raw.minute)
nulltime = (timedelta(), timedelta())
addtime = lambda x,y: tuple(map(operator.add, x, y))
zerotime = parsetime('0:00')
with open(sys.argv[1], 'rb') as csvfile:
reader = csv.reader(csvfile)
mapping = { n: i for (i, n) in enumerate(next(reader)) }
for row in reader:
getcol = lambda n: row[mapping[n]]
gettimecol = lambda n: parsetime(getcol(n))
start = gettimecol('Start')
end = gettimecol('End')
lunch = getcol('Lunch?') == 'Yes'
adjust = gettimecol('Adjust')
adjust_delta = adjust - zerotime if adjust else None
if start and not end:
end = cur_time
if None in (start, end, lunch):
days.append(nulltime)
continue
duration = end - start
if not lunch:
duration += lunchbonus
if adjust_delta:
duration -= adjust_delta
total_time += duration
expected_time += workday
delta = duration - workday
days.append((duration, delta))
weeks = list(grouped(days, 5, nulltime))
months = list(grouped(weeks, 4, []))
def isnull(td):
return td.seconds == 0 and td.days == 0
def formattad(t, td):
if use_colors:
ts = ''
ds = ((colorama.Fore.RED, colorama.Fore.GREEN)[td >= timedelta()] +
(colorama.Style.BRIGHT if abs(td) >= timedelta(minutes=30) else ''))
ns = ''
rs = colorama.Fore.RESET + colorama.Style.RESET_ALL
else:
ts = ds = ns = rs = ''
if isnull(t) and isnull(td):
return ns + ' ' + '.' * 12 + rs
return "%s %s" % (ts + formatd(t), ds + formatd(td, '+')) + rs
total_sum = nulltime
print ''
for month in months:
weeklist = []
sumlist = []
for week in month:
weeklist.append([x if x else nulltime for x in week])
sumlist.append(reduce(addtime, week, nulltime))
weeklist_transposed = itertools.izip_longest(*weeklist, fillvalue=nulltime)
msum = reduce(addtime, sumlist, nulltime)
total_sum = addtime(total_sum, msum)
ind = ' ' * 2
sep = ' ' * 3
print '\n'.join(ind + sep.join(formattad(*day) for day in week) for week in weeklist_transposed)
print ''
print ind + sep.join(formattad(*x) for x in sumlist)
print ''
print 'Month: %s' % formattad(*msum)
print ''
print 'Total: %s' % formattad(*total_sum)
if use_colors:
colorama.deinit() | 0.228587 | 0.173131 |
import os
import sys
import logging
import threading
currPath = os.path.dirname(os.path.realpath(__file__))
rootPath = os.path.dirname(currPath)
sys.path.append(rootPath)
from rtCommon.exampleInterface import ExampleInterface
from rtCommon.wsRemoteService import WsRemoteService, parseConnectionArgs
from rtCommon.utils import installLoggers
class ExampleService:
def __init__(self, args, webSocketChannelName='wsData'):
"""
Uses the WsRemoteService framework to parse connection-related args and establish
a connection to a remote projectServer. Instantiates a local version of
ExampleInterface to handle client requests coming from the projectServer connection.
Args:
args: Argparse args related to connecting to the remote server. These include
"-s <server>", "-u <username>", "-p <password>", "--test",
"-i <retry-connection-interval>"
webSocketChannelName: The websocket url extension used to connect and communicate
to the remote projectServer, 'wsData' will connect to 'ws://server:port/wsData'
"""
self.exampleInterface = ExampleInterface(dataRemote=False)
self.wsRemoteService = WsRemoteService(args, webSocketChannelName)
self.wsRemoteService.addHandlerClass(ExampleInterface, self.exampleInterface)
def runDetached(self):
"""Starts the receiver in it's own thread."""
self.recvThread = threading.Thread(name='recvThread',
target=self.wsRemoteService.runForever)
self.recvThread.setDaemon(True)
self.recvThread.start()
if __name__ == "__main__":
installLoggers(logging.INFO, logging.INFO, filename='logs/ExampleService.log')
# parse connection args
# These include: "-s <server>", "-u <username>", "-p <password>", "--test",
# "-i <retry-connection-interval>"
connectionArgs = parseConnectionArgs()
exampleService = ExampleService(connectionArgs)
exampleService.wsRemoteService.runForever() | rtCommon/exampleService.py | import os
import sys
import logging
import threading
currPath = os.path.dirname(os.path.realpath(__file__))
rootPath = os.path.dirname(currPath)
sys.path.append(rootPath)
from rtCommon.exampleInterface import ExampleInterface
from rtCommon.wsRemoteService import WsRemoteService, parseConnectionArgs
from rtCommon.utils import installLoggers
class ExampleService:
def __init__(self, args, webSocketChannelName='wsData'):
"""
Uses the WsRemoteService framework to parse connection-related args and establish
a connection to a remote projectServer. Instantiates a local version of
ExampleInterface to handle client requests coming from the projectServer connection.
Args:
args: Argparse args related to connecting to the remote server. These include
"-s <server>", "-u <username>", "-p <password>", "--test",
"-i <retry-connection-interval>"
webSocketChannelName: The websocket url extension used to connect and communicate
to the remote projectServer, 'wsData' will connect to 'ws://server:port/wsData'
"""
self.exampleInterface = ExampleInterface(dataRemote=False)
self.wsRemoteService = WsRemoteService(args, webSocketChannelName)
self.wsRemoteService.addHandlerClass(ExampleInterface, self.exampleInterface)
def runDetached(self):
"""Starts the receiver in it's own thread."""
self.recvThread = threading.Thread(name='recvThread',
target=self.wsRemoteService.runForever)
self.recvThread.setDaemon(True)
self.recvThread.start()
if __name__ == "__main__":
installLoggers(logging.INFO, logging.INFO, filename='logs/ExampleService.log')
# parse connection args
# These include: "-s <server>", "-u <username>", "-p <password>", "--test",
# "-i <retry-connection-interval>"
connectionArgs = parseConnectionArgs()
exampleService = ExampleService(connectionArgs)
exampleService.wsRemoteService.runForever() | 0.442155 | 0.078148 |
"""Base task runner"""
import getpass
import os
import subprocess
import threading
from tempfile import NamedTemporaryFile
from typing import Optional, Union
from airflow.configuration import conf
from airflow.exceptions import AirflowConfigException
from airflow.models.taskinstance import load_error_file
from airflow.utils.configuration import tmp_configuration_copy
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.net import get_hostname
PYTHONPATH_VAR = 'PYTHONPATH'
class BaseTaskRunner(LoggingMixin):
"""
Runs Airflow task instances by invoking the `airflow tasks run` command with raw
mode enabled in a subprocess.
:param local_task_job: The local task job associated with running the
associated task instance.
:type local_task_job: airflow.jobs.local_task_job.LocalTaskJob
"""
def __init__(self, local_task_job):
# Pass task instance context into log handlers to setup the logger.
super().__init__(local_task_job.task_instance)
self._task_instance = local_task_job.task_instance
popen_prepend = []
if self._task_instance.run_as_user:
self.run_as_user = self._task_instance.run_as_user
else:
try:
self.run_as_user = conf.get('core', 'default_impersonation')
except AirflowConfigException:
self.run_as_user = None
# Add sudo commands to change user if we need to. Needed to handle SubDagOperator
# case using a SequentialExecutor.
self.log.debug("Planning to run as the %s user", self.run_as_user)
if self.run_as_user and (self.run_as_user != getpass.getuser()):
# We want to include any environment variables now, as we won't
# want to have to specify them in the sudo call - they would show
# up in `ps` that way! And run commands now, as the other user
# might not be able to run the cmds to get credentials
cfg_path = tmp_configuration_copy(chmod=0o600)
# Give ownership of file to user; only they can read and write
subprocess.call(['sudo', 'chown', self.run_as_user, cfg_path], close_fds=True)
# propagate PYTHONPATH environment variable
pythonpath_value = os.environ.get(PYTHONPATH_VAR, '')
popen_prepend = ['sudo', '-E', '-H', '-u', self.run_as_user]
if pythonpath_value:
popen_prepend.append(f'{PYTHONPATH_VAR}={pythonpath_value}')
else:
# Always provide a copy of the configuration file settings. Since
# we are running as the same user, and can pass through environment
# variables then we don't need to include those in the config copy
# - the runner can read/execute those values as it needs
cfg_path = tmp_configuration_copy(chmod=0o600)
self._error_file = NamedTemporaryFile(delete=True)
self._cfg_path = cfg_path
self._command = (
popen_prepend
+ self._task_instance.command_as_list(
raw=True,
pickle_id=local_task_job.pickle_id,
mark_success=local_task_job.mark_success,
job_id=local_task_job.id,
pool=local_task_job.pool,
cfg_path=cfg_path,
)
+ ["--error-file", self._error_file.name]
)
self.process = None
def deserialize_run_error(self) -> Optional[Union[str, Exception]]:
"""Return task runtime error if its written to provided error file."""
return load_error_file(self._error_file)
def _read_task_logs(self, stream):
while True:
line = stream.readline()
if isinstance(line, bytes):
line = line.decode('utf-8')
if not line:
break
self.log.info(
'Job %s: Subtask %s %s',
self._task_instance.job_id,
self._task_instance.task_id,
line.rstrip('\n'),
)
def run_command(self, run_with=None):
"""
Run the task command.
:param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']``
:type run_with: list
:return: the process that was run
:rtype: subprocess.Popen
"""
run_with = run_with or []
full_cmd = run_with + self._command
self.log.info("Running on host: %s", get_hostname())
self.log.info('Running: %s', full_cmd)
# pylint: disable=subprocess-popen-preexec-fn
proc = subprocess.Popen(
full_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
close_fds=True,
env=os.environ.copy(),
preexec_fn=os.setsid,
)
# Start daemon thread to read subprocess logging output
log_reader = threading.Thread(
target=self._read_task_logs,
args=(proc.stdout,),
)
log_reader.daemon = True
log_reader.start()
return proc
def start(self):
"""Start running the task instance in a subprocess."""
raise NotImplementedError()
def return_code(self) -> Optional[int]:
"""
:return: The return code associated with running the task instance or
None if the task is not yet done.
:rtype: int
"""
raise NotImplementedError()
def terminate(self) -> None:
"""Force kill the running task instance."""
raise NotImplementedError()
def on_finish(self) -> None:
"""A callback that should be called when this is done running."""
if self._cfg_path and os.path.isfile(self._cfg_path):
if self.run_as_user:
subprocess.call(['sudo', 'rm', self._cfg_path], close_fds=True)
else:
os.remove(self._cfg_path)
self._error_file.close() | airflow/task/task_runner/base_task_runner.py | """Base task runner"""
import getpass
import os
import subprocess
import threading
from tempfile import NamedTemporaryFile
from typing import Optional, Union
from airflow.configuration import conf
from airflow.exceptions import AirflowConfigException
from airflow.models.taskinstance import load_error_file
from airflow.utils.configuration import tmp_configuration_copy
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.net import get_hostname
PYTHONPATH_VAR = 'PYTHONPATH'
class BaseTaskRunner(LoggingMixin):
"""
Runs Airflow task instances by invoking the `airflow tasks run` command with raw
mode enabled in a subprocess.
:param local_task_job: The local task job associated with running the
associated task instance.
:type local_task_job: airflow.jobs.local_task_job.LocalTaskJob
"""
def __init__(self, local_task_job):
# Pass task instance context into log handlers to setup the logger.
super().__init__(local_task_job.task_instance)
self._task_instance = local_task_job.task_instance
popen_prepend = []
if self._task_instance.run_as_user:
self.run_as_user = self._task_instance.run_as_user
else:
try:
self.run_as_user = conf.get('core', 'default_impersonation')
except AirflowConfigException:
self.run_as_user = None
# Add sudo commands to change user if we need to. Needed to handle SubDagOperator
# case using a SequentialExecutor.
self.log.debug("Planning to run as the %s user", self.run_as_user)
if self.run_as_user and (self.run_as_user != getpass.getuser()):
# We want to include any environment variables now, as we won't
# want to have to specify them in the sudo call - they would show
# up in `ps` that way! And run commands now, as the other user
# might not be able to run the cmds to get credentials
cfg_path = tmp_configuration_copy(chmod=0o600)
# Give ownership of file to user; only they can read and write
subprocess.call(['sudo', 'chown', self.run_as_user, cfg_path], close_fds=True)
# propagate PYTHONPATH environment variable
pythonpath_value = os.environ.get(PYTHONPATH_VAR, '')
popen_prepend = ['sudo', '-E', '-H', '-u', self.run_as_user]
if pythonpath_value:
popen_prepend.append(f'{PYTHONPATH_VAR}={pythonpath_value}')
else:
# Always provide a copy of the configuration file settings. Since
# we are running as the same user, and can pass through environment
# variables then we don't need to include those in the config copy
# - the runner can read/execute those values as it needs
cfg_path = tmp_configuration_copy(chmod=0o600)
self._error_file = NamedTemporaryFile(delete=True)
self._cfg_path = cfg_path
self._command = (
popen_prepend
+ self._task_instance.command_as_list(
raw=True,
pickle_id=local_task_job.pickle_id,
mark_success=local_task_job.mark_success,
job_id=local_task_job.id,
pool=local_task_job.pool,
cfg_path=cfg_path,
)
+ ["--error-file", self._error_file.name]
)
self.process = None
def deserialize_run_error(self) -> Optional[Union[str, Exception]]:
"""Return task runtime error if its written to provided error file."""
return load_error_file(self._error_file)
def _read_task_logs(self, stream):
while True:
line = stream.readline()
if isinstance(line, bytes):
line = line.decode('utf-8')
if not line:
break
self.log.info(
'Job %s: Subtask %s %s',
self._task_instance.job_id,
self._task_instance.task_id,
line.rstrip('\n'),
)
def run_command(self, run_with=None):
"""
Run the task command.
:param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']``
:type run_with: list
:return: the process that was run
:rtype: subprocess.Popen
"""
run_with = run_with or []
full_cmd = run_with + self._command
self.log.info("Running on host: %s", get_hostname())
self.log.info('Running: %s', full_cmd)
# pylint: disable=subprocess-popen-preexec-fn
proc = subprocess.Popen(
full_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
close_fds=True,
env=os.environ.copy(),
preexec_fn=os.setsid,
)
# Start daemon thread to read subprocess logging output
log_reader = threading.Thread(
target=self._read_task_logs,
args=(proc.stdout,),
)
log_reader.daemon = True
log_reader.start()
return proc
def start(self):
"""Start running the task instance in a subprocess."""
raise NotImplementedError()
def return_code(self) -> Optional[int]:
"""
:return: The return code associated with running the task instance or
None if the task is not yet done.
:rtype: int
"""
raise NotImplementedError()
def terminate(self) -> None:
"""Force kill the running task instance."""
raise NotImplementedError()
def on_finish(self) -> None:
"""A callback that should be called when this is done running."""
if self._cfg_path and os.path.isfile(self._cfg_path):
if self.run_as_user:
subprocess.call(['sudo', 'rm', self._cfg_path], close_fds=True)
else:
os.remove(self._cfg_path)
self._error_file.close() | 0.715026 | 0.1291 |
def readFile(filename):
f = open(filename, "r")
content = f.read()
f.close()
return content
def writeFile(filename, content):
f = open(filename, "w")
f.write(content)
f.close()
return
def removeComments(text): # Removes Comments (/**/ and //) in and before the portlist and whitespace at the beginning
result = ""
insideMultilineComment = False
insideOneLineComment = False
for i in range (0, len(text)):
charPair = text[i:i+2]
lastCharPair = text[i-1:i+1]
if (insideMultilineComment):
result += '' #"\n INSIDE MULTILINE COMMENT \n"
if (insideOneLineComment):
result += '' #"\n INSIDE ONELINE COMMENT \n"
if (charPair == "/*"):
insideMultilineComment = True
if (charPair == "//"):
insideOneLineComment = True
if ((not insideMultilineComment) and (not insideOneLineComment)):
result += text[i]
if (lastCharPair == "*/"):
insideMultilineComment = False
if (text[i] == '\n'):
insideOneLineComment = False
if (text[i] == ";"): # we only need the part up to the portlist
break
# now remove whitespace at beginning
start = result.find('m') # find 'module'
result = result[start:]
return result
def removeLengthSpecifiers(input):
output = []
stop = False
temp = ""
for element in input:
for char in element:
if (char == '['):
stop = True
if (stop == False and char != ' '):
temp += char
if (char == ']'):
stop = False
output.append(temp)
temp = ""
return output
def getInputsAndOutputs (portlist):
portlist = portlist + ','
inputs = [] # array of strings
outputs = [] # array of strings
temp = ""
inputTag = True # boolean True = input False = output
for char in portlist:
if (temp == "inputwire"):
inputTag = True
temp = ""
if (temp == "outputreg"):
inputTag = False
temp = ""
if (char == ','):
if (inputTag == True):
inputs.append(temp)
temp = ""
if (inputTag == False):
outputs.append(temp)
temp = ""
if ((char != ' ') and (char != '\n') and (char != '\t') and (char != ',')):
temp = temp + char
if (char == ']'):
temp = temp + " "
return [inputs, outputs]
def createInstantiation(modulename, portlist):
inputs = getInputsAndOutputs(portlist)[0]
outputs = getInputsAndOutputs(portlist)[1]
result = ""
# Remove lenght specifiers (z.B.: [63:0])
inputs = removeLengthSpecifiers(inputs)
outputs = removeLengthSpecifiers(outputs)
# Instantiate module
result += "\n" + modulename + " " + modulename + "_I (\n"
for input in inputs:
result += "." + input + "(" + input + "),\n"
for output in outputs:
result += "." + output + "(" + output + "),\n"
result = result[:-2]
result += ");"
return result
def getModuleName(file): # call removeComments first
temp = ""
for char in file:
if (temp == "module "):
temp = ""
if (char == ' ' and temp != "module"):
return temp
temp += char
def getPortlist (file):
portlist = ""
stop = True
for char in file:
if (char == ")"):
stop = True
if (stop == False):
portlist += char
if (char == "("):
stop = False
if (char == ";"):
break
return portlist
def createInitialiseTask(inputs):
inputs = removeLengthSpecifiers(inputs)
result = ""
result += "task initialise; \n"
result += " begin\n"
for input in inputs:
if (input == "clk"):
continue
if (input[-2:] == "_n"):
result += " " + input + " <= 1;\n"
else:
result += " " + input + " <= 0;\n"
result += " #PERIOD\n"
result += " reset;\n"
result += " end\n"
result += "endtask\n \n"
return result
def createResetTask():
result = "task reset;\n"
result += " begin\n"
result += " @(negedge clk) res_n <= 0;\n"
result += " #PERIOD\n"
result += " @(negedge clk) res_n <= 1;\n"
result += " end\n"
result += "endtask\n \n"
return result
def createClockInstance():
result = "parameter PERIOD = 20;\n"
result += "clock #(.PERIOD(PERIOD)) clk_I (clk);\n"
return result
def createTb (filename):
file = readFile(filename)
file = removeComments(file)
modulename = getModuleName(file)
portlist = getPortlist(file)
inputs = getInputsAndOutputs(portlist)[0]
outputs = getInputsAndOutputs(portlist)[1]
result = ""
result += "module " + modulename + "_tb;\n \n"
# Declare inputs and outputs
for input in inputs:
result += "reg " + input + "; \n"
for output in outputs:
result += "wire " + output + "; \n"
# Instantiate clock and module
result += createClockInstance()
result += createInstantiation(modulename, portlist)
# Create tasks
result += "\n"
result += createInitialiseTask(inputs)
result += createResetTask()
result += "\n" + "endmodule"
return result
print("Hi!\nThis program can take your SystemVerilog module and create a skeleton for your testbench.\n")
print("Please note that this program currently does not support modules with parameters or any types other than wire and reg. Also note that all inputs must be wires and all outputs must be regs\n")
print("Please enter the name of the .sv sourcefile (if your file is called counter.sv, please enter counter)")
inp = input()
filename = inp + ".sv"
tbfilename = inp + "_tb.sv"
print("Reading " + filename + " and creating testbench...")
writeFile(tbfilename, (createTb(filename)))
print("Testbench skeleton saved as " + tbfilename) | tbgenerator.py | def readFile(filename):
f = open(filename, "r")
content = f.read()
f.close()
return content
def writeFile(filename, content):
f = open(filename, "w")
f.write(content)
f.close()
return
def removeComments(text): # Removes Comments (/**/ and //) in and before the portlist and whitespace at the beginning
result = ""
insideMultilineComment = False
insideOneLineComment = False
for i in range (0, len(text)):
charPair = text[i:i+2]
lastCharPair = text[i-1:i+1]
if (insideMultilineComment):
result += '' #"\n INSIDE MULTILINE COMMENT \n"
if (insideOneLineComment):
result += '' #"\n INSIDE ONELINE COMMENT \n"
if (charPair == "/*"):
insideMultilineComment = True
if (charPair == "//"):
insideOneLineComment = True
if ((not insideMultilineComment) and (not insideOneLineComment)):
result += text[i]
if (lastCharPair == "*/"):
insideMultilineComment = False
if (text[i] == '\n'):
insideOneLineComment = False
if (text[i] == ";"): # we only need the part up to the portlist
break
# now remove whitespace at beginning
start = result.find('m') # find 'module'
result = result[start:]
return result
def removeLengthSpecifiers(input):
output = []
stop = False
temp = ""
for element in input:
for char in element:
if (char == '['):
stop = True
if (stop == False and char != ' '):
temp += char
if (char == ']'):
stop = False
output.append(temp)
temp = ""
return output
def getInputsAndOutputs (portlist):
portlist = portlist + ','
inputs = [] # array of strings
outputs = [] # array of strings
temp = ""
inputTag = True # boolean True = input False = output
for char in portlist:
if (temp == "inputwire"):
inputTag = True
temp = ""
if (temp == "outputreg"):
inputTag = False
temp = ""
if (char == ','):
if (inputTag == True):
inputs.append(temp)
temp = ""
if (inputTag == False):
outputs.append(temp)
temp = ""
if ((char != ' ') and (char != '\n') and (char != '\t') and (char != ',')):
temp = temp + char
if (char == ']'):
temp = temp + " "
return [inputs, outputs]
def createInstantiation(modulename, portlist):
inputs = getInputsAndOutputs(portlist)[0]
outputs = getInputsAndOutputs(portlist)[1]
result = ""
# Remove lenght specifiers (z.B.: [63:0])
inputs = removeLengthSpecifiers(inputs)
outputs = removeLengthSpecifiers(outputs)
# Instantiate module
result += "\n" + modulename + " " + modulename + "_I (\n"
for input in inputs:
result += "." + input + "(" + input + "),\n"
for output in outputs:
result += "." + output + "(" + output + "),\n"
result = result[:-2]
result += ");"
return result
def getModuleName(file): # call removeComments first
temp = ""
for char in file:
if (temp == "module "):
temp = ""
if (char == ' ' and temp != "module"):
return temp
temp += char
def getPortlist (file):
portlist = ""
stop = True
for char in file:
if (char == ")"):
stop = True
if (stop == False):
portlist += char
if (char == "("):
stop = False
if (char == ";"):
break
return portlist
def createInitialiseTask(inputs):
inputs = removeLengthSpecifiers(inputs)
result = ""
result += "task initialise; \n"
result += " begin\n"
for input in inputs:
if (input == "clk"):
continue
if (input[-2:] == "_n"):
result += " " + input + " <= 1;\n"
else:
result += " " + input + " <= 0;\n"
result += " #PERIOD\n"
result += " reset;\n"
result += " end\n"
result += "endtask\n \n"
return result
def createResetTask():
result = "task reset;\n"
result += " begin\n"
result += " @(negedge clk) res_n <= 0;\n"
result += " #PERIOD\n"
result += " @(negedge clk) res_n <= 1;\n"
result += " end\n"
result += "endtask\n \n"
return result
def createClockInstance():
result = "parameter PERIOD = 20;\n"
result += "clock #(.PERIOD(PERIOD)) clk_I (clk);\n"
return result
def createTb (filename):
file = readFile(filename)
file = removeComments(file)
modulename = getModuleName(file)
portlist = getPortlist(file)
inputs = getInputsAndOutputs(portlist)[0]
outputs = getInputsAndOutputs(portlist)[1]
result = ""
result += "module " + modulename + "_tb;\n \n"
# Declare inputs and outputs
for input in inputs:
result += "reg " + input + "; \n"
for output in outputs:
result += "wire " + output + "; \n"
# Instantiate clock and module
result += createClockInstance()
result += createInstantiation(modulename, portlist)
# Create tasks
result += "\n"
result += createInitialiseTask(inputs)
result += createResetTask()
result += "\n" + "endmodule"
return result
print("Hi!\nThis program can take your SystemVerilog module and create a skeleton for your testbench.\n")
print("Please note that this program currently does not support modules with parameters or any types other than wire and reg. Also note that all inputs must be wires and all outputs must be regs\n")
print("Please enter the name of the .sv sourcefile (if your file is called counter.sv, please enter counter)")
inp = input()
filename = inp + ".sv"
tbfilename = inp + "_tb.sv"
print("Reading " + filename + " and creating testbench...")
writeFile(tbfilename, (createTb(filename)))
print("Testbench skeleton saved as " + tbfilename) | 0.193452 | 0.143758 |
from __future__ import print_function, division, absolute_import
import itertools
import numpy as np
import regreg.atoms.group_lasso as GL
import regreg.api as rr
import nose.tools as nt
from .test_seminorms import Solver, all_close, SolverFactory
from .test_cones import ConeSolverFactory
class GroupSolverFactory(SolverFactory):
group_choices = [np.arange(10),
np.array([1,1,2,2,2,3,3,4,4,4,4,5,5,6,6,6,6])]
FISTA_choices = [True]
L_choices = [0.3]
coef_stop_choices = [False]
def __init__(self, klass, mode):
self.klass = klass
self.mode = mode
def __iter__(self):
for offset, FISTA, coef_stop, L, q, groups in itertools.product(self.offset_choices,
self.FISTA_choices,
self.coef_stop_choices,
self.L_choices,
self.quadratic_choices,
self.group_choices):
self.FISTA = FISTA
self.coef_stop = coef_stop
self.L = L
if self.mode == 'lagrange':
atom = self.klass(groups, lagrange=self.lagrange)
else:
atom = self.klass(groups, bound=self.bound)
if q:
atom.quadratic = rr.identity_quadratic(0,0,np.random.standard_normal(atom.shape)*0.02)
if offset:
atom.offset = 0.02 * np.random.standard_normal(atom.shape)
solver = Solver(atom, interactive=self.interactive,
coef_stop=coef_stop,
FISTA=FISTA,
L=L)
yield solver
class GroupConeSolverFactory(ConeSolverFactory):
group_choices = [np.arange(10),
np.array([1,1,2,2,2,3,3,4,4,4,4,5,5,6,6,6,6])]
def __iter__(self):
for offset, FISTA, coef_stop, L, q, groups in itertools.product(self.offset_choices,
self.FISTA_choices,
self.coef_stop_choices,
self.L_choices,
self.quadratic_choices,
self.group_choices):
self.FISTA = FISTA
self.coef_stop = coef_stop
self.L = L
atom = self.klass(groups)
if q:
atom.quadratic = rr.identity_quadratic(0,0,np.random.standard_normal(atom.shape)*0.02)
if offset:
atom.offset = 0.02 * np.random.standard_normal(atom.shape)
solver = Solver(atom, interactive=self.interactive,
coef_stop=coef_stop,
FISTA=FISTA,
L=L)
yield solver
@np.testing.dec.slow
def test_proximal_maps(interactive=False):
for klass in GL.conjugate_seminorm_pairs.keys():
factory = GroupSolverFactory(klass, 'lagrange')
for solver in factory:
penalty = solver.atom
dual = penalty.conjugate
Z = solver.prox_center
L = solver.L
yield all_close, penalty.lagrange_prox(Z, lipschitz=L), Z-dual.bound_prox(Z*L)/L, 'testing lagrange_prox and bound_prox starting from atom\n %s ' % klass, None
# some arguments of the constructor
nt.assert_raises(AttributeError, setattr, penalty, 'bound', 4.)
nt.assert_raises(AttributeError, setattr, dual, 'lagrange', 4.)
nt.assert_raises(AttributeError, setattr, penalty, 'bound', 4.)
nt.assert_raises(AttributeError, setattr, dual, 'lagrange', 4.)
for t in solver.all_tests():
yield t
factory = GroupSolverFactory(klass, 'bound')
for solver in factory:
for t in solver.all_tests():
yield t
for klass in GL.conjugate_cone_pairs.keys():
factory = GroupConeSolverFactory(klass)
for solver in factory:
for t in solver.all_tests():
yield t | regreg/atoms/tests/test_group_lasso.py | from __future__ import print_function, division, absolute_import
import itertools
import numpy as np
import regreg.atoms.group_lasso as GL
import regreg.api as rr
import nose.tools as nt
from .test_seminorms import Solver, all_close, SolverFactory
from .test_cones import ConeSolverFactory
class GroupSolverFactory(SolverFactory):
group_choices = [np.arange(10),
np.array([1,1,2,2,2,3,3,4,4,4,4,5,5,6,6,6,6])]
FISTA_choices = [True]
L_choices = [0.3]
coef_stop_choices = [False]
def __init__(self, klass, mode):
self.klass = klass
self.mode = mode
def __iter__(self):
for offset, FISTA, coef_stop, L, q, groups in itertools.product(self.offset_choices,
self.FISTA_choices,
self.coef_stop_choices,
self.L_choices,
self.quadratic_choices,
self.group_choices):
self.FISTA = FISTA
self.coef_stop = coef_stop
self.L = L
if self.mode == 'lagrange':
atom = self.klass(groups, lagrange=self.lagrange)
else:
atom = self.klass(groups, bound=self.bound)
if q:
atom.quadratic = rr.identity_quadratic(0,0,np.random.standard_normal(atom.shape)*0.02)
if offset:
atom.offset = 0.02 * np.random.standard_normal(atom.shape)
solver = Solver(atom, interactive=self.interactive,
coef_stop=coef_stop,
FISTA=FISTA,
L=L)
yield solver
class GroupConeSolverFactory(ConeSolverFactory):
group_choices = [np.arange(10),
np.array([1,1,2,2,2,3,3,4,4,4,4,5,5,6,6,6,6])]
def __iter__(self):
for offset, FISTA, coef_stop, L, q, groups in itertools.product(self.offset_choices,
self.FISTA_choices,
self.coef_stop_choices,
self.L_choices,
self.quadratic_choices,
self.group_choices):
self.FISTA = FISTA
self.coef_stop = coef_stop
self.L = L
atom = self.klass(groups)
if q:
atom.quadratic = rr.identity_quadratic(0,0,np.random.standard_normal(atom.shape)*0.02)
if offset:
atom.offset = 0.02 * np.random.standard_normal(atom.shape)
solver = Solver(atom, interactive=self.interactive,
coef_stop=coef_stop,
FISTA=FISTA,
L=L)
yield solver
@np.testing.dec.slow
def test_proximal_maps(interactive=False):
for klass in GL.conjugate_seminorm_pairs.keys():
factory = GroupSolverFactory(klass, 'lagrange')
for solver in factory:
penalty = solver.atom
dual = penalty.conjugate
Z = solver.prox_center
L = solver.L
yield all_close, penalty.lagrange_prox(Z, lipschitz=L), Z-dual.bound_prox(Z*L)/L, 'testing lagrange_prox and bound_prox starting from atom\n %s ' % klass, None
# some arguments of the constructor
nt.assert_raises(AttributeError, setattr, penalty, 'bound', 4.)
nt.assert_raises(AttributeError, setattr, dual, 'lagrange', 4.)
nt.assert_raises(AttributeError, setattr, penalty, 'bound', 4.)
nt.assert_raises(AttributeError, setattr, dual, 'lagrange', 4.)
for t in solver.all_tests():
yield t
factory = GroupSolverFactory(klass, 'bound')
for solver in factory:
for t in solver.all_tests():
yield t
for klass in GL.conjugate_cone_pairs.keys():
factory = GroupConeSolverFactory(klass)
for solver in factory:
for t in solver.all_tests():
yield t | 0.704567 | 0.231842 |
import serial
import time
import random
import sys
s = None
num_leds = 93
play_time = 0
def flush_input():
s.flushInput()
def wait_for_ack():
while s.inWaiting() <= 0:
pass
s.read(s.inWaiting())
def command(cmd_text):
s.write((cmd_text + ':').encode())
wait_for_ack()
def setup():
global s, ticks, play_time
s = serial.Serial("/dev/ttyS0", 115200)
flush_input()
choose_colors()
command(":::pause:reset:erase")
if len(sys.argv) > 1:
command(sys.argv[1])
if len(sys.argv) > 2:
play_time = float(sys.argv[2])
command("6:zone:red:7:repeat:white:7:repeat:red:7:repeat:white:7:repeat")
command("5:zone:red:5:repeat:white:5:repeat:red:5:repeat:white:5:repeat")
command("4:zone:red:3:repeat:white:3:repeat:red:3:repeat:white:3:repeat")
command("3:zone:red:2:repeat:white:2:repeat:red:2:repeat:white:2:repeat")
command("2:zone:red:1:repeat:white:1:repeat:red:1:repeat:white:1:repeat")
num_colors = 12
colors = [ "red", "orange", "yellow", "ltgreen", "green", "seafoam", "cyan", "ltblue", "blue", "purple", "magenta", "pink", "black", "random" ]
effects = ['blink1','blink2','blink3','blink4','blink5','blink6']
effect_index = 0
chosen_colors = [0,1,2,3,4,5]
def random_color():
r = random.randrange(0, num_colors)
return colors[r]
def choose_colors():
global chosen_colors
for i in range(0, 6):
chosen_colors[i] = random_color()
def shift_colors():
global chosen_colors
for i in xrange(5, 0, -1):
chosen_colors[i] = chosen_colors[i-1]
def clear_colors():
for j in range(0,6):
chosen_colors[j] = "black"
def place_color(zone, color):
command(str(zone) + ":zone:" + color + ":blink" + str(zone) + ":flood")
def place_colors():
place_color(6, chosen_colors[0])
place_color(5, chosen_colors[1])
place_color(4, chosen_colors[2])
place_color(3, chosen_colors[3])
place_color(2, chosen_colors[4])
place_color(1, chosen_colors[5])
def display():
place_colors()
command("flush")
global idx
idx = -1
def do_zone(zone):
command(str(zone) + ":zone:rotate")
def loop():
for i in range(2, 7):
do_zone(i)
command("flush")
if __name__ == '__main__':
setup()
while True:
loop() | python/flower20.py |
import serial
import time
import random
import sys
s = None
num_leds = 93
play_time = 0
def flush_input():
s.flushInput()
def wait_for_ack():
while s.inWaiting() <= 0:
pass
s.read(s.inWaiting())
def command(cmd_text):
s.write((cmd_text + ':').encode())
wait_for_ack()
def setup():
global s, ticks, play_time
s = serial.Serial("/dev/ttyS0", 115200)
flush_input()
choose_colors()
command(":::pause:reset:erase")
if len(sys.argv) > 1:
command(sys.argv[1])
if len(sys.argv) > 2:
play_time = float(sys.argv[2])
command("6:zone:red:7:repeat:white:7:repeat:red:7:repeat:white:7:repeat")
command("5:zone:red:5:repeat:white:5:repeat:red:5:repeat:white:5:repeat")
command("4:zone:red:3:repeat:white:3:repeat:red:3:repeat:white:3:repeat")
command("3:zone:red:2:repeat:white:2:repeat:red:2:repeat:white:2:repeat")
command("2:zone:red:1:repeat:white:1:repeat:red:1:repeat:white:1:repeat")
num_colors = 12
colors = [ "red", "orange", "yellow", "ltgreen", "green", "seafoam", "cyan", "ltblue", "blue", "purple", "magenta", "pink", "black", "random" ]
effects = ['blink1','blink2','blink3','blink4','blink5','blink6']
effect_index = 0
chosen_colors = [0,1,2,3,4,5]
def random_color():
r = random.randrange(0, num_colors)
return colors[r]
def choose_colors():
global chosen_colors
for i in range(0, 6):
chosen_colors[i] = random_color()
def shift_colors():
global chosen_colors
for i in xrange(5, 0, -1):
chosen_colors[i] = chosen_colors[i-1]
def clear_colors():
for j in range(0,6):
chosen_colors[j] = "black"
def place_color(zone, color):
command(str(zone) + ":zone:" + color + ":blink" + str(zone) + ":flood")
def place_colors():
place_color(6, chosen_colors[0])
place_color(5, chosen_colors[1])
place_color(4, chosen_colors[2])
place_color(3, chosen_colors[3])
place_color(2, chosen_colors[4])
place_color(1, chosen_colors[5])
def display():
place_colors()
command("flush")
global idx
idx = -1
def do_zone(zone):
command(str(zone) + ":zone:rotate")
def loop():
for i in range(2, 7):
do_zone(i)
command("flush")
if __name__ == '__main__':
setup()
while True:
loop() | 0.211335 | 0.144722 |
import os, time;
from mWindowsAPI import *;
from mWindowsSDK import SECURITY_MANDATORY_MEDIUM_RID;
from mConsole import oConsole;
def fTestProcess(sComSpec, sThisProcessISA, sExpectedChildProcessISA):
oConsole.fOutput("=== Testing process related functions ", sPadding = "=");
oConsole.fOutput("* This process ISA: %s, child process ISA: %s" % (sThisProcessISA, sExpectedChildProcessISA));
uExitCode = 1234;
# Start cmd.exe and have it exit with a specific error code.
oConsole.fStatus(" * Calling cProcess.foCreateForBinaryPath(%s, [\"/K\", \"EXIT %s\"], bHidden = True)..." % (repr(sComSpec), uExitCode));
oTestProcess = cProcess.foCreateForBinaryPathAndArguments(sComSpec, ["/K", "EXIT %s" % uExitCode], bHidden = True);
try:
oConsole.fOutput(" + cProcess.foCreateForBinaryPath(%s, [\"/K\", \"EXIT %s\"], bHidden = True) = <cProcess #%X>" % (repr(sComSpec), uExitCode, oTestProcess.uId));
oTestProcess.fbWait();
assert not oTestProcess.bIsRunning, \
"Expected process not to be running.";
assert oTestProcess.uExitCode == uExitCode, \
"Expected exit code %d, got %d" % (uExitCode, oTestProcess.uExitCode);
# Restart cmd.exe and let it wait for input.
oTestProcess = cProcess.foCreateForBinaryPath(sComSpec, bMinimizedWindow = True);
time.sleep(1); # Allow process to start
oConsole.fOutput(" + Started test process %d..." % oTestProcess.uId);
# cProcess
assert oTestProcess.sISA == sExpectedChildProcessISA, \
"cProcess.sISA == %s instead of %s" % (oTestProcess.sISA, sExpectedChildProcessISA);
oConsole.fOutput(" * Testing cProcess...");
time.sleep(1); # Allow process to start
assert oTestProcess.bIsRunning, \
"Expected process to be running.";
sISAFromId = fsGetISAForProcessId(oTestProcess.uId);
assert sISAFromId == oTestProcess.sISA, \
"Process ISA %s != %s" % (sISAFromId, oTestProcess.sISA);
oConsole.fOutput(" + ISA = %s" % repr(oTestProcess.sISA));
oConsole.fOutput(" + Binary start address = 0x%08X" % oTestProcess.uBinaryStartAddress);
assert oTestProcess.sBinaryPath.lower() == sComSpec.lower(), \
"Expected binary path %s, got %s" % (repr(sComSpec), repr(oTestProcess.sBinaryPath));
assert oTestProcess.sBinaryName.lower() == os.path.basename(sComSpec).lower(), \
"Expected binary name %s, got %s" % (os.path.basename(sComSpec), oTestProcess.sBinaryName);
oConsole.fOutput(" + Binary Path = %s" % repr(oTestProcess.sBinaryPath));
oConsole.fOutput(" + Command line = %s" % repr(oTestProcess.sCommandLine));
assert oTestProcess.uIntegrityLevel == SECURITY_MANDATORY_MEDIUM_RID, \
"Expected process integrity level 0, got %d" % oTestProcess.uIntegrityLevel;
oConsole.fOutput(" + Integrity level = 0x%X" % oTestProcess.uIntegrityLevel);
oConsole.fOutput(" * Testing cProcess.fbSuspendThreads()...");
assert oTestProcess.fbSuspendThreads(), \
"Cannot suspend threads";
oConsole.fOutput(" * Testing cProcess.fbResumeThreads()...");
assert oTestProcess.fbResumeThreads(), \
"Cannot resume threads";
oConsole.fOutput(" * Testing cProcess.foGetPEB()...");
for sLine in oTestProcess.foGetPEB().fasDump("Process %d/0x%X PEB" % (oTestProcess.uId, oTestProcess.uId)):
oConsole.fOutput(" | " + sLine);
oConsole.fOutput(" * Testing cProcess.foGetProcessParameters()...");
for sLine in oTestProcess.foGetProcessParameters().fasDump("Process %d/0x%X ProcessParameters" % (oTestProcess.uId, oTestProcess.uId)):
oConsole.fOutput(" | " + sLine);
# cVirtualAllocation
oBinaryVirtualAllocation = cVirtualAllocation(oTestProcess.uId, oTestProcess.uBinaryStartAddress);
assert oBinaryVirtualAllocation.bAllocated, \
"Expected memory to be allocated at address 0x%08X" % oTestProcess.uBinaryStartAddress;
assert oBinaryVirtualAllocation.uStartAddress == oTestProcess.uBinaryStartAddress, \
"Expected binary virtual allocation to start at address 0x%08X, not 0x%08X" % \
(oTestProcess.uBinaryStartAddress, oBinaryVirtualAllocation.uStartAddress);
oConsole.fOutput(" + There are 0x%X bytes of memory allocated at address 0x%08X." % \
(oBinaryVirtualAllocation.uSize, oBinaryVirtualAllocation.uStartAddress));
# fdsGetProcessesExecutableName_by_uId (make sure test process binary is included)
oConsole.fOutput(" * Testing fdsGetProcessesExecutableName_by_uId...");
dsProcessesExecutableName_by_uId = fdsGetProcessesExecutableName_by_uId();
sProcessesExecutableName = dsProcessesExecutableName_by_uId.get(oTestProcess.uId);
assert sProcessesExecutableName, \
"Test process id %d/0x%X not found in process list (%s)!" % \
(oTestProcess.uId, oTestProcess.uId, ", ".join(["0x%X" % uId for uId in dsProcessesExecutableName_by_uId]));
assert sProcessesExecutableName.lower() == os.path.basename(sComSpec).lower(), \
"Text process %d/0x%X is reported to run %s" % (oTestProcess.uId, oTestProcess.uId, repr(sProcessesExecutableName));
# fuGetIntegrityLevelForProcessId
oConsole.fOutput(" * Testing oTestProcess.uIntegrityLevel...");
uProcessIntegrityLevel = oTestProcess.uIntegrityLevel;
assert uProcessIntegrityLevel is not None, \
"Test process %d/0x%X integrity level could not be determined!" % (oTestProcess.uId, oTestProcess.uId);
oConsole.fOutput(" + IntegrityLevel = 0x%X." % uProcessIntegrityLevel);
# fuGetMemoryUsageForProcessId
# cVirtualAllocation.fo0CreateForProcessId()
# cVirtualAllocation.fCommit()
# cVirtualAllocation.fFree()
oConsole.fOutput(" * Testing Memory management functions...");
uProcessMemoryUsage = fuGetMemoryUsageForProcessId(oTestProcess.uId);
oConsole.fOutput(" + Memory usage = 0x%X." % uProcessMemoryUsage);
uMemoryAllocationSize = 0x1230000;
oVirtualAllocation = cVirtualAllocation.fo0CreateForProcessId(oTestProcess.uId, uMemoryAllocationSize, bReserved = True);
assert oVirtualAllocation is not None, \
"Attempt to reserve 0x%X bytes failed" % uMemoryAllocationSize;
assert oVirtualAllocation.uSize == uMemoryAllocationSize, \
"Attempted to reserve 0x%X bytes, but got 0x%X" % (uMemoryAllocationSize, oVirtualAllocation.uSize);
uProcessMemoryUsageAfterReservation = oTestProcess.uMemoryUsage;
oConsole.fOutput(" + Memory usage after reserving 0x%X bytes = 0x%X." % \
(oVirtualAllocation.uSize, uProcessMemoryUsageAfterReservation));
# For unknown reasons, the memory usage can drop after reserving memory !?
# assert uProcessMemoryUsageAfterReservation >= uProcessMemoryUsage, \
# "Process memory usage was expected to be at least 0x%X after reservation, but is 0x%X" % \
# (uProcessMemoryUsage, uProcessMemoryUsageAfterReservation);
oVirtualAllocation.fCommit();
uProcessMemoryUsageAfterAllocation = oTestProcess.uMemoryUsage;
oConsole.fOutput(" + Memory usage after allocating 0x%X bytes = 0x%X." % \
(oVirtualAllocation.uSize, uProcessMemoryUsageAfterAllocation));
assert uProcessMemoryUsageAfterAllocation >= uProcessMemoryUsageAfterReservation + uMemoryAllocationSize, \
"Process memory usage was expected to be 0x%X after allocation, but is 0x%X" % \
(uProcessMemoryUsage + uMemoryAllocationSize, uProcessMemoryUsageAfterAllocation);
oVirtualAllocation.fFree();
uProcessMemoryUsageAfterFree = oTestProcess.uMemoryUsage;
oConsole.fOutput(" + Memory usage after freeing memory = 0x%X." % uProcessMemoryUsageAfterFree);
assert uProcessMemoryUsageAfterFree >= uProcessMemoryUsage, \
"Process memory usage was expected to be at least 0x%X after free, but is 0x%X" % \
(uProcessMemoryUsage, uProcessMemoryUsageAfterFree);
# cJobObject
# Also test if OOM error codes cause a Python MemoryError exception to be thrown.
oConsole.fOutput(" * Testing cJobObject...");
oJobObject = cJobObject(oTestProcess.uId);
oJobObject.fSetMaxTotalMemoryUse(uProcessMemoryUsageAfterFree + uMemoryAllocationSize / 2);
try:
cVirtualAllocation.fo0CreateForProcessId(oTestProcess.uId, uMemoryAllocationSize);
except MemoryError as oMemoryError:
pass;
else:
oConsole.fOutput(",".ljust(80, "-"));
for sLine in oVirtualAllocation.fasDump():
oConsole.fOutput("| %s" % sLine);
oConsole.fOutput("`".ljust(80, "-"));
raise AssertionError("Attempt to allocate 0x%X bytes succeeded despite JobObject memory allocation limits" % \
uMemoryAllocationSize);
oConsole.fOutput(" + JobObject memory limits applied correctly.");
# fbTerminateForProcessId
oConsole.fOutput(" * Testing fbTerminateForProcessId...");
fbTerminateForProcessId(oTestProcess.uId);
assert oTestProcess.bIsTerminated, \
"Test process was not terminated!";
# fdsGetProcessesExecutableName_by_uId (make sure test process is removed)
assert oTestProcess.uId not in fdsGetProcessesExecutableName_by_uId(), \
"Test process is still reported to exist after being terminated!?";
oConsole.fOutput(" + Test process was terminated.");
# TODO: add test for fDebugBreakForProcessId, fuCreateThreadForProcessIdAndAddress and fSendCtrlCForProcessId.
# This will require attaching a debugger to the process to determine a thread id, resume the application, or catch
# the exceptions these functions throw.
finally:
if oTestProcess.bIsRunning:
oTestProcess.fbTerminate(); | Tests/fTestProcess.py | import os, time;
from mWindowsAPI import *;
from mWindowsSDK import SECURITY_MANDATORY_MEDIUM_RID;
from mConsole import oConsole;
def fTestProcess(sComSpec, sThisProcessISA, sExpectedChildProcessISA):
oConsole.fOutput("=== Testing process related functions ", sPadding = "=");
oConsole.fOutput("* This process ISA: %s, child process ISA: %s" % (sThisProcessISA, sExpectedChildProcessISA));
uExitCode = 1234;
# Start cmd.exe and have it exit with a specific error code.
oConsole.fStatus(" * Calling cProcess.foCreateForBinaryPath(%s, [\"/K\", \"EXIT %s\"], bHidden = True)..." % (repr(sComSpec), uExitCode));
oTestProcess = cProcess.foCreateForBinaryPathAndArguments(sComSpec, ["/K", "EXIT %s" % uExitCode], bHidden = True);
try:
oConsole.fOutput(" + cProcess.foCreateForBinaryPath(%s, [\"/K\", \"EXIT %s\"], bHidden = True) = <cProcess #%X>" % (repr(sComSpec), uExitCode, oTestProcess.uId));
oTestProcess.fbWait();
assert not oTestProcess.bIsRunning, \
"Expected process not to be running.";
assert oTestProcess.uExitCode == uExitCode, \
"Expected exit code %d, got %d" % (uExitCode, oTestProcess.uExitCode);
# Restart cmd.exe and let it wait for input.
oTestProcess = cProcess.foCreateForBinaryPath(sComSpec, bMinimizedWindow = True);
time.sleep(1); # Allow process to start
oConsole.fOutput(" + Started test process %d..." % oTestProcess.uId);
# cProcess
assert oTestProcess.sISA == sExpectedChildProcessISA, \
"cProcess.sISA == %s instead of %s" % (oTestProcess.sISA, sExpectedChildProcessISA);
oConsole.fOutput(" * Testing cProcess...");
time.sleep(1); # Allow process to start
assert oTestProcess.bIsRunning, \
"Expected process to be running.";
sISAFromId = fsGetISAForProcessId(oTestProcess.uId);
assert sISAFromId == oTestProcess.sISA, \
"Process ISA %s != %s" % (sISAFromId, oTestProcess.sISA);
oConsole.fOutput(" + ISA = %s" % repr(oTestProcess.sISA));
oConsole.fOutput(" + Binary start address = 0x%08X" % oTestProcess.uBinaryStartAddress);
assert oTestProcess.sBinaryPath.lower() == sComSpec.lower(), \
"Expected binary path %s, got %s" % (repr(sComSpec), repr(oTestProcess.sBinaryPath));
assert oTestProcess.sBinaryName.lower() == os.path.basename(sComSpec).lower(), \
"Expected binary name %s, got %s" % (os.path.basename(sComSpec), oTestProcess.sBinaryName);
oConsole.fOutput(" + Binary Path = %s" % repr(oTestProcess.sBinaryPath));
oConsole.fOutput(" + Command line = %s" % repr(oTestProcess.sCommandLine));
assert oTestProcess.uIntegrityLevel == SECURITY_MANDATORY_MEDIUM_RID, \
"Expected process integrity level 0, got %d" % oTestProcess.uIntegrityLevel;
oConsole.fOutput(" + Integrity level = 0x%X" % oTestProcess.uIntegrityLevel);
oConsole.fOutput(" * Testing cProcess.fbSuspendThreads()...");
assert oTestProcess.fbSuspendThreads(), \
"Cannot suspend threads";
oConsole.fOutput(" * Testing cProcess.fbResumeThreads()...");
assert oTestProcess.fbResumeThreads(), \
"Cannot resume threads";
oConsole.fOutput(" * Testing cProcess.foGetPEB()...");
for sLine in oTestProcess.foGetPEB().fasDump("Process %d/0x%X PEB" % (oTestProcess.uId, oTestProcess.uId)):
oConsole.fOutput(" | " + sLine);
oConsole.fOutput(" * Testing cProcess.foGetProcessParameters()...");
for sLine in oTestProcess.foGetProcessParameters().fasDump("Process %d/0x%X ProcessParameters" % (oTestProcess.uId, oTestProcess.uId)):
oConsole.fOutput(" | " + sLine);
# cVirtualAllocation
oBinaryVirtualAllocation = cVirtualAllocation(oTestProcess.uId, oTestProcess.uBinaryStartAddress);
assert oBinaryVirtualAllocation.bAllocated, \
"Expected memory to be allocated at address 0x%08X" % oTestProcess.uBinaryStartAddress;
assert oBinaryVirtualAllocation.uStartAddress == oTestProcess.uBinaryStartAddress, \
"Expected binary virtual allocation to start at address 0x%08X, not 0x%08X" % \
(oTestProcess.uBinaryStartAddress, oBinaryVirtualAllocation.uStartAddress);
oConsole.fOutput(" + There are 0x%X bytes of memory allocated at address 0x%08X." % \
(oBinaryVirtualAllocation.uSize, oBinaryVirtualAllocation.uStartAddress));
# fdsGetProcessesExecutableName_by_uId (make sure test process binary is included)
oConsole.fOutput(" * Testing fdsGetProcessesExecutableName_by_uId...");
dsProcessesExecutableName_by_uId = fdsGetProcessesExecutableName_by_uId();
sProcessesExecutableName = dsProcessesExecutableName_by_uId.get(oTestProcess.uId);
assert sProcessesExecutableName, \
"Test process id %d/0x%X not found in process list (%s)!" % \
(oTestProcess.uId, oTestProcess.uId, ", ".join(["0x%X" % uId for uId in dsProcessesExecutableName_by_uId]));
assert sProcessesExecutableName.lower() == os.path.basename(sComSpec).lower(), \
"Text process %d/0x%X is reported to run %s" % (oTestProcess.uId, oTestProcess.uId, repr(sProcessesExecutableName));
# fuGetIntegrityLevelForProcessId
oConsole.fOutput(" * Testing oTestProcess.uIntegrityLevel...");
uProcessIntegrityLevel = oTestProcess.uIntegrityLevel;
assert uProcessIntegrityLevel is not None, \
"Test process %d/0x%X integrity level could not be determined!" % (oTestProcess.uId, oTestProcess.uId);
oConsole.fOutput(" + IntegrityLevel = 0x%X." % uProcessIntegrityLevel);
# fuGetMemoryUsageForProcessId
# cVirtualAllocation.fo0CreateForProcessId()
# cVirtualAllocation.fCommit()
# cVirtualAllocation.fFree()
oConsole.fOutput(" * Testing Memory management functions...");
uProcessMemoryUsage = fuGetMemoryUsageForProcessId(oTestProcess.uId);
oConsole.fOutput(" + Memory usage = 0x%X." % uProcessMemoryUsage);
uMemoryAllocationSize = 0x1230000;
oVirtualAllocation = cVirtualAllocation.fo0CreateForProcessId(oTestProcess.uId, uMemoryAllocationSize, bReserved = True);
assert oVirtualAllocation is not None, \
"Attempt to reserve 0x%X bytes failed" % uMemoryAllocationSize;
assert oVirtualAllocation.uSize == uMemoryAllocationSize, \
"Attempted to reserve 0x%X bytes, but got 0x%X" % (uMemoryAllocationSize, oVirtualAllocation.uSize);
uProcessMemoryUsageAfterReservation = oTestProcess.uMemoryUsage;
oConsole.fOutput(" + Memory usage after reserving 0x%X bytes = 0x%X." % \
(oVirtualAllocation.uSize, uProcessMemoryUsageAfterReservation));
# For unknown reasons, the memory usage can drop after reserving memory !?
# assert uProcessMemoryUsageAfterReservation >= uProcessMemoryUsage, \
# "Process memory usage was expected to be at least 0x%X after reservation, but is 0x%X" % \
# (uProcessMemoryUsage, uProcessMemoryUsageAfterReservation);
oVirtualAllocation.fCommit();
uProcessMemoryUsageAfterAllocation = oTestProcess.uMemoryUsage;
oConsole.fOutput(" + Memory usage after allocating 0x%X bytes = 0x%X." % \
(oVirtualAllocation.uSize, uProcessMemoryUsageAfterAllocation));
assert uProcessMemoryUsageAfterAllocation >= uProcessMemoryUsageAfterReservation + uMemoryAllocationSize, \
"Process memory usage was expected to be 0x%X after allocation, but is 0x%X" % \
(uProcessMemoryUsage + uMemoryAllocationSize, uProcessMemoryUsageAfterAllocation);
oVirtualAllocation.fFree();
uProcessMemoryUsageAfterFree = oTestProcess.uMemoryUsage;
oConsole.fOutput(" + Memory usage after freeing memory = 0x%X." % uProcessMemoryUsageAfterFree);
assert uProcessMemoryUsageAfterFree >= uProcessMemoryUsage, \
"Process memory usage was expected to be at least 0x%X after free, but is 0x%X" % \
(uProcessMemoryUsage, uProcessMemoryUsageAfterFree);
# cJobObject
# Also test if OOM error codes cause a Python MemoryError exception to be thrown.
oConsole.fOutput(" * Testing cJobObject...");
oJobObject = cJobObject(oTestProcess.uId);
oJobObject.fSetMaxTotalMemoryUse(uProcessMemoryUsageAfterFree + uMemoryAllocationSize / 2);
try:
cVirtualAllocation.fo0CreateForProcessId(oTestProcess.uId, uMemoryAllocationSize);
except MemoryError as oMemoryError:
pass;
else:
oConsole.fOutput(",".ljust(80, "-"));
for sLine in oVirtualAllocation.fasDump():
oConsole.fOutput("| %s" % sLine);
oConsole.fOutput("`".ljust(80, "-"));
raise AssertionError("Attempt to allocate 0x%X bytes succeeded despite JobObject memory allocation limits" % \
uMemoryAllocationSize);
oConsole.fOutput(" + JobObject memory limits applied correctly.");
# fbTerminateForProcessId
oConsole.fOutput(" * Testing fbTerminateForProcessId...");
fbTerminateForProcessId(oTestProcess.uId);
assert oTestProcess.bIsTerminated, \
"Test process was not terminated!";
# fdsGetProcessesExecutableName_by_uId (make sure test process is removed)
assert oTestProcess.uId not in fdsGetProcessesExecutableName_by_uId(), \
"Test process is still reported to exist after being terminated!?";
oConsole.fOutput(" + Test process was terminated.");
# TODO: add test for fDebugBreakForProcessId, fuCreateThreadForProcessIdAndAddress and fSendCtrlCForProcessId.
# This will require attaching a debugger to the process to determine a thread id, resume the application, or catch
# the exceptions these functions throw.
finally:
if oTestProcess.bIsRunning:
oTestProcess.fbTerminate(); | 0.359027 | 0.270155 |
import click
from aiida.cmdline.commands.cmd_data import verdi_data
from aiida.cmdline.params import arguments
from aiida.cmdline.utils import echo
from aiida.common.utils import get_mode_string
@verdi_data.group('remote')
def remote():
"""
Managing Remote_Data data types
"""
pass
@remote.command('ls')
@click.option('-l', '--long', 'ls_long', is_flag=True, default=False, help="Display also file metadata")
@click.option('-p', '--path', type=click.STRING, default='.', help="The folder to list")
@arguments.NODE()
def lsfunction(ls_long, path, node):
"""
List directory content on remote RemoteData objects.
"""
import datetime
try:
content = node.listdir_withattributes(path=path)
except (IOError, OSError) as err:
echo.echo_critical("Unable to access the remote folder"
" or file, check if it exists.\n"
"Original error: {}".format(str(err)))
for metadata in content:
if ls_long:
mtime = datetime.datetime.fromtimestamp(metadata['attributes'].st_mtime)
pre_line = '{} {:10} {} '.format(
get_mode_string(metadata['attributes'].st_mode), metadata['attributes'].st_size,
mtime.strftime("%d %b %Y %H:%M"))
click.echo(pre_line, nl=False)
if metadata['isdir']:
click.echo(click.style(metadata['name'], fg='blue'))
else:
click.echo(metadata['name'])
@remote.command('cat')
@arguments.NODE()
@click.argument('path', type=click.STRING)
def cat(node, path):
"""
Show the content of remote files in RemoteData objects.
"""
import os
import sys
import tempfile
try:
with tempfile.NamedTemporaryFile(delete=False) as tmpf:
tmpf.close()
node.getfile(path, tmpf.name)
with open(tmpf.name) as fobj:
sys.stdout.write(fobj.read())
except IOError as err:
click.echo("ERROR {}: {}".format(err.errno, str(err)), err=True)
sys.exit(1)
try:
os.remove(tmpf.name)
except OSError:
# If you cannot delete, ignore (maybe I didn't manage to create it in the first place
pass
@remote.command('show')
@arguments.NODE()
def show(node):
"""
Show information on a RemoteData object.
"""
click.echo("- Remote computer name:")
click.echo(" {}".format(node.get_computer_name()))
click.echo("- Remote folder full path:")
click.echo(" {}".format(node.get_remote_path())) | aiida/cmdline/commands/cmd_data/cmd_remote.py | import click
from aiida.cmdline.commands.cmd_data import verdi_data
from aiida.cmdline.params import arguments
from aiida.cmdline.utils import echo
from aiida.common.utils import get_mode_string
@verdi_data.group('remote')
def remote():
"""
Managing Remote_Data data types
"""
pass
@remote.command('ls')
@click.option('-l', '--long', 'ls_long', is_flag=True, default=False, help="Display also file metadata")
@click.option('-p', '--path', type=click.STRING, default='.', help="The folder to list")
@arguments.NODE()
def lsfunction(ls_long, path, node):
"""
List directory content on remote RemoteData objects.
"""
import datetime
try:
content = node.listdir_withattributes(path=path)
except (IOError, OSError) as err:
echo.echo_critical("Unable to access the remote folder"
" or file, check if it exists.\n"
"Original error: {}".format(str(err)))
for metadata in content:
if ls_long:
mtime = datetime.datetime.fromtimestamp(metadata['attributes'].st_mtime)
pre_line = '{} {:10} {} '.format(
get_mode_string(metadata['attributes'].st_mode), metadata['attributes'].st_size,
mtime.strftime("%d %b %Y %H:%M"))
click.echo(pre_line, nl=False)
if metadata['isdir']:
click.echo(click.style(metadata['name'], fg='blue'))
else:
click.echo(metadata['name'])
@remote.command('cat')
@arguments.NODE()
@click.argument('path', type=click.STRING)
def cat(node, path):
"""
Show the content of remote files in RemoteData objects.
"""
import os
import sys
import tempfile
try:
with tempfile.NamedTemporaryFile(delete=False) as tmpf:
tmpf.close()
node.getfile(path, tmpf.name)
with open(tmpf.name) as fobj:
sys.stdout.write(fobj.read())
except IOError as err:
click.echo("ERROR {}: {}".format(err.errno, str(err)), err=True)
sys.exit(1)
try:
os.remove(tmpf.name)
except OSError:
# If you cannot delete, ignore (maybe I didn't manage to create it in the first place
pass
@remote.command('show')
@arguments.NODE()
def show(node):
"""
Show information on a RemoteData object.
"""
click.echo("- Remote computer name:")
click.echo(" {}".format(node.get_computer_name()))
click.echo("- Remote folder full path:")
click.echo(" {}".format(node.get_remote_path())) | 0.176849 | 0.07383 |
import argparse
import codecs
import logging
import math
def ComputeMutualInfo(char_freq_file, bi_freq_file, output_file, filter_file):
"""Computes mutual information of multi-character terms
Use the corpus character and character bigram frequency information to compute
the mutual information for each bigram, compared to the characters being
placed randomly next to each other. The frequency files are those produced by
the charcount.py and char_bigram_count.py programs in this repo.
The list are filtered by dictionary terms from the term_frequency.py
program in this repo.
"""
logging.info('ComputeMutualInfo: {}, {}'.format(char_freq_file,
bi_freq_file))
(char_freq, char_count) = load_freq(char_freq_file)
(bigram_freq, bigram_count) = load_freq(bi_freq_file)
(filter_freq, filter_count) = load_freq(filter_file)
if char_count == 0 or bigram_count == 0:
logging.error('ComputeMutualInfo: count zero: {}, {}'.format(char_count,
bigram_count))
return
mi = {}
for term in filter_freq:
pc = 1.0
# Only compute mutual information for two-character terms
if len(term) == 2:
c1 = term[0]
c2 = term[1]
if c1 in char_freq and c2 in char_freq:
pc = (char_freq[c1] * char_freq[c2]) / (char_count * char_count)
b1 = '{}{}'.format(c1, c2)
fb1 = 0
if b1 in bigram_freq:
fb1 = bigram_freq[b1]
b2 = '{}{}'.format(c2, c1)
fb2 = 0
if b2 in bigram_freq and b1 != b2:
fb2 = bigram_freq[b2]
pb = (fb1 + fb2) / bigram_count
if pb > 0 and pc > 0:
mi[term] = math.log(pb / pc, 2)
write_mi(output_file, mi)
def load_freq(fname):
"""Reads the frequency distribution from a TSV file
"""
dist = {}
count = 0
with codecs.open(fname, 'r', 'utf-8') as f:
for line in f:
fields = line.split('\t')
if len(fields) > 1:
key = fields[0]
val = int(fields[1])
dist[key] = val
count += val
logging.info('load_freq: {} count loaded from {}'.format(count, fname))
return (dist, count)
def write_mi(fname, mi):
"""Writes the mutual informaiton distribution to the TSV output file
"""
with codecs.open(fname, 'w', 'utf-8') as f:
for t in mi:
f.write('{}\t{}\n'.format(t, mi[t]))
logging.info('wrote {} terms to {}'.format(len(mi), fname))
# For use from command line
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--char_freq_file',
dest='char_freq_file',
required=True,
help='Character frequency file')
parser.add_argument('--bigram_freq_file',
dest='bigram_freq_file',
required=True,
help='Character bigram frequency file')
parser.add_argument('--filter_file',
dest='filter_file',
required=True,
help='Filter file to restrict results to')
parser.add_argument('--output_file',
dest='output_file',
required=True,
help='Output file to write results to')
args = parser.parse_args()
ComputeMutualInfo(args.char_freq_file,
args.bigram_freq_file,
args.output_file,
args.filter_file)
if __name__ == "__main__":
main() | chinesenotes/mutualinfo.py | import argparse
import codecs
import logging
import math
def ComputeMutualInfo(char_freq_file, bi_freq_file, output_file, filter_file):
"""Computes mutual information of multi-character terms
Use the corpus character and character bigram frequency information to compute
the mutual information for each bigram, compared to the characters being
placed randomly next to each other. The frequency files are those produced by
the charcount.py and char_bigram_count.py programs in this repo.
The list are filtered by dictionary terms from the term_frequency.py
program in this repo.
"""
logging.info('ComputeMutualInfo: {}, {}'.format(char_freq_file,
bi_freq_file))
(char_freq, char_count) = load_freq(char_freq_file)
(bigram_freq, bigram_count) = load_freq(bi_freq_file)
(filter_freq, filter_count) = load_freq(filter_file)
if char_count == 0 or bigram_count == 0:
logging.error('ComputeMutualInfo: count zero: {}, {}'.format(char_count,
bigram_count))
return
mi = {}
for term in filter_freq:
pc = 1.0
# Only compute mutual information for two-character terms
if len(term) == 2:
c1 = term[0]
c2 = term[1]
if c1 in char_freq and c2 in char_freq:
pc = (char_freq[c1] * char_freq[c2]) / (char_count * char_count)
b1 = '{}{}'.format(c1, c2)
fb1 = 0
if b1 in bigram_freq:
fb1 = bigram_freq[b1]
b2 = '{}{}'.format(c2, c1)
fb2 = 0
if b2 in bigram_freq and b1 != b2:
fb2 = bigram_freq[b2]
pb = (fb1 + fb2) / bigram_count
if pb > 0 and pc > 0:
mi[term] = math.log(pb / pc, 2)
write_mi(output_file, mi)
def load_freq(fname):
"""Reads the frequency distribution from a TSV file
"""
dist = {}
count = 0
with codecs.open(fname, 'r', 'utf-8') as f:
for line in f:
fields = line.split('\t')
if len(fields) > 1:
key = fields[0]
val = int(fields[1])
dist[key] = val
count += val
logging.info('load_freq: {} count loaded from {}'.format(count, fname))
return (dist, count)
def write_mi(fname, mi):
"""Writes the mutual informaiton distribution to the TSV output file
"""
with codecs.open(fname, 'w', 'utf-8') as f:
for t in mi:
f.write('{}\t{}\n'.format(t, mi[t]))
logging.info('wrote {} terms to {}'.format(len(mi), fname))
# For use from command line
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--char_freq_file',
dest='char_freq_file',
required=True,
help='Character frequency file')
parser.add_argument('--bigram_freq_file',
dest='bigram_freq_file',
required=True,
help='Character bigram frequency file')
parser.add_argument('--filter_file',
dest='filter_file',
required=True,
help='Filter file to restrict results to')
parser.add_argument('--output_file',
dest='output_file',
required=True,
help='Output file to write results to')
args = parser.parse_args()
ComputeMutualInfo(args.char_freq_file,
args.bigram_freq_file,
args.output_file,
args.filter_file)
if __name__ == "__main__":
main() | 0.555676 | 0.452475 |
import random
import cv2
import numpy as np
from augraphy.augmentations.lib import add_noise
from augraphy.base.augmentation import Augmentation
class DustyInk(Augmentation):
"""Applies random noise to the ink itself, emulating a dusty or
inconsistent ink tone when followed by a blur.
:param intensity_range: Pair of bounds for intensity sample.
:type intensity_range: tuple, optional
:param color_range: Pair of bounds for 8-bit colors.
:type color_range: tuple, optional
:param value_range: Min value of pixel to enable dusty ink effect.
:type value_range: tuple, optional
:param p: Probability of this Augmentation being applied.
:type p: float, optional
"""
def __init__(
self,
intensity_range=(0.1, 0.2),
color_range=(0, 224),
value_range=(0, 5),
p=1,
):
"""Constructor method"""
super().__init__(p=p)
self.intensity_range = list(intensity_range)
self.color_range = list(color_range)
self.value_range = list(value_range)
# prevent second range value > first range value
self.intensity_range[0] = min(self.intensity_range[0], self.intensity_range[1])
self.color_range[0] = min(self.color_range[0], self.color_range[1])
self.value_range[0] = min(self.value_range[0], self.value_range[1])
# Constructs a string representation of this Augmentation.
def __repr__(self):
return f"DustyInk(intensity_range={self.intensity_range}, color_range={self.color_range}, p={self.p})"
# Applies the Augmentation to input data.
def __call__(self, image, layer=None, force=False):
if force or self.should_run():
image = image.copy()
min_value = random.randint(self.value_range[0], self.value_range[1])
apply_mask_fn = lambda x, y: y if (x < min_value) else x
apply_mask = np.vectorize(apply_mask_fn)
noise_mask = add_noise(image, self.intensity_range, self.color_range)
noise_mask = cv2.GaussianBlur(noise_mask, (3, 3), 0)
image = apply_mask(image, noise_mask)
return image | augraphy/augmentations/dustyink.py | import random
import cv2
import numpy as np
from augraphy.augmentations.lib import add_noise
from augraphy.base.augmentation import Augmentation
class DustyInk(Augmentation):
"""Applies random noise to the ink itself, emulating a dusty or
inconsistent ink tone when followed by a blur.
:param intensity_range: Pair of bounds for intensity sample.
:type intensity_range: tuple, optional
:param color_range: Pair of bounds for 8-bit colors.
:type color_range: tuple, optional
:param value_range: Min value of pixel to enable dusty ink effect.
:type value_range: tuple, optional
:param p: Probability of this Augmentation being applied.
:type p: float, optional
"""
def __init__(
self,
intensity_range=(0.1, 0.2),
color_range=(0, 224),
value_range=(0, 5),
p=1,
):
"""Constructor method"""
super().__init__(p=p)
self.intensity_range = list(intensity_range)
self.color_range = list(color_range)
self.value_range = list(value_range)
# prevent second range value > first range value
self.intensity_range[0] = min(self.intensity_range[0], self.intensity_range[1])
self.color_range[0] = min(self.color_range[0], self.color_range[1])
self.value_range[0] = min(self.value_range[0], self.value_range[1])
# Constructs a string representation of this Augmentation.
def __repr__(self):
return f"DustyInk(intensity_range={self.intensity_range}, color_range={self.color_range}, p={self.p})"
# Applies the Augmentation to input data.
def __call__(self, image, layer=None, force=False):
if force or self.should_run():
image = image.copy()
min_value = random.randint(self.value_range[0], self.value_range[1])
apply_mask_fn = lambda x, y: y if (x < min_value) else x
apply_mask = np.vectorize(apply_mask_fn)
noise_mask = add_noise(image, self.intensity_range, self.color_range)
noise_mask = cv2.GaussianBlur(noise_mask, (3, 3), 0)
image = apply_mask(image, noise_mask)
return image | 0.86988 | 0.401629 |
import subprocess
from uuid import getnode as get_mac
import datetime
import logging
from logging.handlers import RotatingFileHandler
import requests
import json
import netifaces
import os
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
file_handler = RotatingFileHandler('policies.log', 'a', 1000000, 1)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.info("Running apply policies script!")
try:
with open('/root/deviceInfo.json') as json_data:
d = json.load(json_data)
OPENAP_HOST=d["apiEndPoint"]
except:
OPENAP_HOST="https://staging-api.openap.io/"
def getMac():
logger.info("Getting mac")
mac = str(netifaces.ifaddresses('eth0')[netifaces.AF_LINK][0]["addr"]).upper()
logger.info("Mac is {}".format(mac))
return mac
try:
headers = {
'Content-Type': "application/json",
'Mac-Adress': getMac(),
}
url = "{}devices/getDevicePolicies".format(OPENAP_HOST)
response = requests.request("GET", url, headers=headers)
policy = json.loads(response.text)
logger.info("Policies downloaded, applying")
logger.info(policy)
logger.info("ebtables --flush")
os.system("ebtables --flush")
if policy["parameters"]["policy_type"]=="blacklist":
key_word = "DROP"
logger.info("ebtables -P FORWARD ACCEPT")
os.system("ebtables -P FORWARD ACCEPT")
if policy["parameters"]["policy_type"]=="whitelist":
key_word = "ACCEPT"
logger.info("ebtables -P FORWARD DROP")
os.system("ebtables -P FORWARD DROP")
for client in policy["parameters"]["clients"]:
if client["always"]:
logger.info("ebtables -A FORWARD -s {} -j {}".format(client["mac_address"],key_word))
os.system("ebtables -A FORWARD -s {} -j {}".format(client["mac_address"],key_word))
else:
date_from = datetime.datetime.strptime(client["from"],'%H:%M')
date_to = datetime.datetime.strptime(client["to"], '%H:%M')
date_now = datetime.datetime.now()
if date_from.time() > date_to.time():
if (date_now.time()<=date_from.time() and date_now.time()<=date_to.time()) or (date_now.time()>=date_from.time() and date_now.time()>=date_to.time()):
logger.info("ebtables -A FORWARD -s {} -j {}".format(client["mac_address"], key_word))
os.system("ebtables -A FORWARD -s {} -j {}".format(client["mac_address"],key_word))
else:
if date_now.time()>=date_from.time() and date_now.time()<=date_to.time():
logger.info("ebtables -A FORWARD -s {} -j {}".format(client["mac_address"], key_word))
os.system("ebtables -A FORWARD -s {} -j {}".format(client["mac_address"],key_word))
except:
logger.exception("Error") | code/applyPolicies.py | import subprocess
from uuid import getnode as get_mac
import datetime
import logging
from logging.handlers import RotatingFileHandler
import requests
import json
import netifaces
import os
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
file_handler = RotatingFileHandler('policies.log', 'a', 1000000, 1)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.info("Running apply policies script!")
try:
with open('/root/deviceInfo.json') as json_data:
d = json.load(json_data)
OPENAP_HOST=d["apiEndPoint"]
except:
OPENAP_HOST="https://staging-api.openap.io/"
def getMac():
logger.info("Getting mac")
mac = str(netifaces.ifaddresses('eth0')[netifaces.AF_LINK][0]["addr"]).upper()
logger.info("Mac is {}".format(mac))
return mac
try:
headers = {
'Content-Type': "application/json",
'Mac-Adress': getMac(),
}
url = "{}devices/getDevicePolicies".format(OPENAP_HOST)
response = requests.request("GET", url, headers=headers)
policy = json.loads(response.text)
logger.info("Policies downloaded, applying")
logger.info(policy)
logger.info("ebtables --flush")
os.system("ebtables --flush")
if policy["parameters"]["policy_type"]=="blacklist":
key_word = "DROP"
logger.info("ebtables -P FORWARD ACCEPT")
os.system("ebtables -P FORWARD ACCEPT")
if policy["parameters"]["policy_type"]=="whitelist":
key_word = "ACCEPT"
logger.info("ebtables -P FORWARD DROP")
os.system("ebtables -P FORWARD DROP")
for client in policy["parameters"]["clients"]:
if client["always"]:
logger.info("ebtables -A FORWARD -s {} -j {}".format(client["mac_address"],key_word))
os.system("ebtables -A FORWARD -s {} -j {}".format(client["mac_address"],key_word))
else:
date_from = datetime.datetime.strptime(client["from"],'%H:%M')
date_to = datetime.datetime.strptime(client["to"], '%H:%M')
date_now = datetime.datetime.now()
if date_from.time() > date_to.time():
if (date_now.time()<=date_from.time() and date_now.time()<=date_to.time()) or (date_now.time()>=date_from.time() and date_now.time()>=date_to.time()):
logger.info("ebtables -A FORWARD -s {} -j {}".format(client["mac_address"], key_word))
os.system("ebtables -A FORWARD -s {} -j {}".format(client["mac_address"],key_word))
else:
if date_now.time()>=date_from.time() and date_now.time()<=date_to.time():
logger.info("ebtables -A FORWARD -s {} -j {}".format(client["mac_address"], key_word))
os.system("ebtables -A FORWARD -s {} -j {}".format(client["mac_address"],key_word))
except:
logger.exception("Error") | 0.177847 | 0.041365 |
from urlparse import urljoin
from sqlalchemy import orm, inspect
from sqlalchemy.ext import hybrid
from ggrc import db
from ggrc.fulltext import attributes as ft_attributes
from ggrc.fulltext import mixin as ft_mixin
from ggrc.models import mixins
from ggrc.models import reflection
from ggrc.models import relationship
from ggrc.models.mixins import base
from ggrc.utils import get_url_root
from ggrc import builder
from ggrc.access_control import roleable
from ggrc_workflows.models import mixins as wf_mixins
def _query_filtered_by_contact(person):
"""Returns cycle required to reindex for sent persons."""
attrs = inspect(person).attrs
if any([attrs["email"].history.has_changes(),
attrs["name"].history.has_changes()]):
return Cycle.query.filter(Cycle.contact_id == person.id)
return []
class Cycle(roleable.Roleable,
relationship.Relatable,
mixins.WithContact,
wf_mixins.CycleStatusValidatedMixin,
mixins.Timeboxed,
mixins.Described,
mixins.Titled,
base.ContextRBAC,
mixins.Slugged,
mixins.Notifiable,
ft_mixin.Indexed,
db.Model):
"""Workflow Cycle model
"""
# pylint: disable=too-many-instance-attributes
__tablename__ = 'cycles'
_title_uniqueness = False
workflow_id = db.Column(
db.Integer,
db.ForeignKey('workflows.id', ondelete="CASCADE"),
nullable=False,
)
cycle_task_groups = db.relationship(
'CycleTaskGroup', backref='_cycle', cascade='all, delete-orphan')
cycle_task_group_object_tasks = db.relationship(
'CycleTaskGroupObjectTask', backref='cycle',
cascade='all, delete-orphan')
cycle_task_entries = db.relationship(
'CycleTaskEntry', backref='cycle', cascade='all, delete-orphan')
is_current = db.Column(db.Boolean,
default=True,
nullable=False)
next_due_date = db.Column(db.Date)
# This parameter is overridden by workflow backref, but is here to ensure
# pylint does not complain
_workflow = None
@hybrid.hybrid_property
def workflow(self):
"""Getter for workflow foreign key."""
return self._workflow
@workflow.setter
def workflow(self, workflow):
"""Set workflow foreign key and relationship."""
if not self._workflow and workflow:
relationship.Relationship(source=workflow, destination=self)
self._workflow = workflow
@property
def is_done(self):
"""Check if cycle's done
Overrides StatusValidatedMixin method because cycle's is_done state
depends on is_verification_needed flag
"""
if super(Cycle, self).is_done:
return True
if self.cycle_task_group_object_tasks:
return False
return True
@builder.simple_property
def folder(self):
"""Get the workflow folder."""
if self.workflow:
return self.workflow.folder
return ""
_api_attrs = reflection.ApiAttributes(
'workflow',
'cycle_task_groups',
'is_current',
'next_due_date',
reflection.Attribute('folder', create=False, update=False),
)
_aliases = {
"cycle_workflow": {
"display_name": "Workflow",
"filter_by": "_filter_by_cycle_workflow",
},
"contact": "Assignee",
"secondary_contact": None,
}
PROPERTY_TEMPLATE = u"cycle {}"
_fulltext_attrs = [
ft_attributes.DateFullTextAttr("due date", "next_due_date"),
"folder",
]
@property
def _task_assignees(self):
"""Property. Return the list of persons as assignee of related tasks."""
people = set()
for ctask in self.cycle_task_group_object_tasks:
people.update(ctask.get_persons_for_rolename("Task Assignees"))
return list(people)
@property
def _task_secondary_assignees(self):
"""Property. Returns people list as Secondary Assignee of related tasks."""
people = set()
for ctask in self.cycle_task_group_object_tasks:
people.update(ctask.get_persons_for_rolename("Task Secondary Assignees"))
return list(people)
AUTO_REINDEX_RULES = [
ft_mixin.ReindexRule("Person", _query_filtered_by_contact),
]
@classmethod
def _filter_by_cycle_workflow(cls, predicate):
"""Filter by cycle workflow."""
from ggrc_workflows.models.workflow import Workflow
return Workflow.query.filter(
(Workflow.id == cls.workflow_id) &
(predicate(Workflow.slug) | predicate(Workflow.title))
).exists()
@classmethod
def eager_query(cls):
"""Add cycle task groups to cycle eager query
This function adds cycle_task_groups as a join option when fetching cycles,
and makes sure we fetch all cycle related data needed for generating cycle
json, in one query.
Returns:
a query object with cycle_task_groups added to joined load options.
"""
query = super(Cycle, cls).eager_query()
return query.options(
orm.joinedload('cycle_task_groups'),
orm.Load(cls).joinedload("workflow").undefer_group(
"Workflow_complete"
),
)
@classmethod
def indexed_query(cls):
return super(Cycle, cls).indexed_query().options(
orm.Load(cls).load_only("next_due_date"),
orm.Load(cls).joinedload("workflow").undefer_group(
"Workflow_complete"
),
)
def _get_cycle_url(self, widget_name):
return urljoin(
get_url_root(),
"workflows/{workflow_id}#{widget_name}/cycle/{cycle_id}".format(
workflow_id=self.workflow.id,
cycle_id=self.id,
widget_name=widget_name
)
)
@property
def cycle_url(self):
return self._get_cycle_url("current")
@property
def cycle_inactive_url(self):
return self._get_cycle_url("history")
def log_json(self):
out_json = super(Cycle, self).log_json()
out_json["folder"] = self.folder
return out_json | src/ggrc_workflows/models/cycle.py | from urlparse import urljoin
from sqlalchemy import orm, inspect
from sqlalchemy.ext import hybrid
from ggrc import db
from ggrc.fulltext import attributes as ft_attributes
from ggrc.fulltext import mixin as ft_mixin
from ggrc.models import mixins
from ggrc.models import reflection
from ggrc.models import relationship
from ggrc.models.mixins import base
from ggrc.utils import get_url_root
from ggrc import builder
from ggrc.access_control import roleable
from ggrc_workflows.models import mixins as wf_mixins
def _query_filtered_by_contact(person):
"""Returns cycle required to reindex for sent persons."""
attrs = inspect(person).attrs
if any([attrs["email"].history.has_changes(),
attrs["name"].history.has_changes()]):
return Cycle.query.filter(Cycle.contact_id == person.id)
return []
class Cycle(roleable.Roleable,
relationship.Relatable,
mixins.WithContact,
wf_mixins.CycleStatusValidatedMixin,
mixins.Timeboxed,
mixins.Described,
mixins.Titled,
base.ContextRBAC,
mixins.Slugged,
mixins.Notifiable,
ft_mixin.Indexed,
db.Model):
"""Workflow Cycle model
"""
# pylint: disable=too-many-instance-attributes
__tablename__ = 'cycles'
_title_uniqueness = False
workflow_id = db.Column(
db.Integer,
db.ForeignKey('workflows.id', ondelete="CASCADE"),
nullable=False,
)
cycle_task_groups = db.relationship(
'CycleTaskGroup', backref='_cycle', cascade='all, delete-orphan')
cycle_task_group_object_tasks = db.relationship(
'CycleTaskGroupObjectTask', backref='cycle',
cascade='all, delete-orphan')
cycle_task_entries = db.relationship(
'CycleTaskEntry', backref='cycle', cascade='all, delete-orphan')
is_current = db.Column(db.Boolean,
default=True,
nullable=False)
next_due_date = db.Column(db.Date)
# This parameter is overridden by workflow backref, but is here to ensure
# pylint does not complain
_workflow = None
@hybrid.hybrid_property
def workflow(self):
"""Getter for workflow foreign key."""
return self._workflow
@workflow.setter
def workflow(self, workflow):
"""Set workflow foreign key and relationship."""
if not self._workflow and workflow:
relationship.Relationship(source=workflow, destination=self)
self._workflow = workflow
@property
def is_done(self):
"""Check if cycle's done
Overrides StatusValidatedMixin method because cycle's is_done state
depends on is_verification_needed flag
"""
if super(Cycle, self).is_done:
return True
if self.cycle_task_group_object_tasks:
return False
return True
@builder.simple_property
def folder(self):
"""Get the workflow folder."""
if self.workflow:
return self.workflow.folder
return ""
_api_attrs = reflection.ApiAttributes(
'workflow',
'cycle_task_groups',
'is_current',
'next_due_date',
reflection.Attribute('folder', create=False, update=False),
)
_aliases = {
"cycle_workflow": {
"display_name": "Workflow",
"filter_by": "_filter_by_cycle_workflow",
},
"contact": "Assignee",
"secondary_contact": None,
}
PROPERTY_TEMPLATE = u"cycle {}"
_fulltext_attrs = [
ft_attributes.DateFullTextAttr("due date", "next_due_date"),
"folder",
]
@property
def _task_assignees(self):
"""Property. Return the list of persons as assignee of related tasks."""
people = set()
for ctask in self.cycle_task_group_object_tasks:
people.update(ctask.get_persons_for_rolename("Task Assignees"))
return list(people)
@property
def _task_secondary_assignees(self):
"""Property. Returns people list as Secondary Assignee of related tasks."""
people = set()
for ctask in self.cycle_task_group_object_tasks:
people.update(ctask.get_persons_for_rolename("Task Secondary Assignees"))
return list(people)
AUTO_REINDEX_RULES = [
ft_mixin.ReindexRule("Person", _query_filtered_by_contact),
]
@classmethod
def _filter_by_cycle_workflow(cls, predicate):
"""Filter by cycle workflow."""
from ggrc_workflows.models.workflow import Workflow
return Workflow.query.filter(
(Workflow.id == cls.workflow_id) &
(predicate(Workflow.slug) | predicate(Workflow.title))
).exists()
@classmethod
def eager_query(cls):
"""Add cycle task groups to cycle eager query
This function adds cycle_task_groups as a join option when fetching cycles,
and makes sure we fetch all cycle related data needed for generating cycle
json, in one query.
Returns:
a query object with cycle_task_groups added to joined load options.
"""
query = super(Cycle, cls).eager_query()
return query.options(
orm.joinedload('cycle_task_groups'),
orm.Load(cls).joinedload("workflow").undefer_group(
"Workflow_complete"
),
)
@classmethod
def indexed_query(cls):
return super(Cycle, cls).indexed_query().options(
orm.Load(cls).load_only("next_due_date"),
orm.Load(cls).joinedload("workflow").undefer_group(
"Workflow_complete"
),
)
def _get_cycle_url(self, widget_name):
return urljoin(
get_url_root(),
"workflows/{workflow_id}#{widget_name}/cycle/{cycle_id}".format(
workflow_id=self.workflow.id,
cycle_id=self.id,
widget_name=widget_name
)
)
@property
def cycle_url(self):
return self._get_cycle_url("current")
@property
def cycle_inactive_url(self):
return self._get_cycle_url("history")
def log_json(self):
out_json = super(Cycle, self).log_json()
out_json["folder"] = self.folder
return out_json | 0.780495 | 0.08438 |
import os
import argparse
from model_init import init_dataloader
from mindspore import dataset as ds
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', default=None, help='Location of data.')
parser.add_argument('--data_output_path', default=None, help='Location of converted data.')
parser.add_argument('--label_classses_output_path', default=None,
help='Location of converted label and classes.')
parser.add_argument('-its', '--iterations', type=int, help='number of episodes per epoch, default=100',
default=100)
parser.add_argument('-cTr', '--classes_per_it_tr', type=int,
help='number of random classes per episode for training, default=60', default=20)
parser.add_argument('-nsTr', '--num_support_tr', type=int,
help='number of samples per class to use as support for training, default=5', default=5)
parser.add_argument('-nqTr', '--num_query_tr', type=int,
help='number of samples per class to use as query for training, default=5', default=5)
parser.add_argument('-cVa', '--classes_per_it_val', type=int,
help='number of random classes per episode for validation, default=5', default=5)
parser.add_argument('-nsVa', '--num_support_val', type=int,
help='number of samples per class to use as support for validation, default=5', default=5)
parser.add_argument('-nqVa', '--num_query_val', type=int,
help='number of samples per class to use as query for validation, default=15', default=15)
def convert_img_to_bin(options_, root, output_path, label_classses_path):
'''
convert the image to binary file
'''
val_dataloader = init_dataloader(options_, 'val', root)
inp = ds.GeneratorDataset(val_dataloader, column_names=['data', 'label', 'classes'])
i = 1
for batch in inp.create_dict_iterator():
x = batch['data']
y = batch['label']
classes = batch['classes']
x_array = x.asnumpy()
y_array = y.asnumpy()
classes_array = classes.asnumpy()
x_array.tofile(output_path + os.sep +"data_" + str(i) + ".bin")
y_array.tofile(label_classses_path + os.sep +"label_" + str(i) + ".bin")
classes_array.tofile(label_classses_path + os.sep +"classes_" + str(i) + ".bin")
i = i + 1
if __name__ == '__main__':
options = parser.parse_args()
convert_img_to_bin(options, options.dataset_path, options.data_output_path, options.label_classses_output_path) | research/cv/ProtoNet/preprocess.py | import os
import argparse
from model_init import init_dataloader
from mindspore import dataset as ds
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', default=None, help='Location of data.')
parser.add_argument('--data_output_path', default=None, help='Location of converted data.')
parser.add_argument('--label_classses_output_path', default=None,
help='Location of converted label and classes.')
parser.add_argument('-its', '--iterations', type=int, help='number of episodes per epoch, default=100',
default=100)
parser.add_argument('-cTr', '--classes_per_it_tr', type=int,
help='number of random classes per episode for training, default=60', default=20)
parser.add_argument('-nsTr', '--num_support_tr', type=int,
help='number of samples per class to use as support for training, default=5', default=5)
parser.add_argument('-nqTr', '--num_query_tr', type=int,
help='number of samples per class to use as query for training, default=5', default=5)
parser.add_argument('-cVa', '--classes_per_it_val', type=int,
help='number of random classes per episode for validation, default=5', default=5)
parser.add_argument('-nsVa', '--num_support_val', type=int,
help='number of samples per class to use as support for validation, default=5', default=5)
parser.add_argument('-nqVa', '--num_query_val', type=int,
help='number of samples per class to use as query for validation, default=15', default=15)
def convert_img_to_bin(options_, root, output_path, label_classses_path):
'''
convert the image to binary file
'''
val_dataloader = init_dataloader(options_, 'val', root)
inp = ds.GeneratorDataset(val_dataloader, column_names=['data', 'label', 'classes'])
i = 1
for batch in inp.create_dict_iterator():
x = batch['data']
y = batch['label']
classes = batch['classes']
x_array = x.asnumpy()
y_array = y.asnumpy()
classes_array = classes.asnumpy()
x_array.tofile(output_path + os.sep +"data_" + str(i) + ".bin")
y_array.tofile(label_classses_path + os.sep +"label_" + str(i) + ".bin")
classes_array.tofile(label_classses_path + os.sep +"classes_" + str(i) + ".bin")
i = i + 1
if __name__ == '__main__':
options = parser.parse_args()
convert_img_to_bin(options, options.dataset_path, options.data_output_path, options.label_classses_output_path) | 0.413004 | 0.118232 |
"""Tests for tensorflow.python.framework.importer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def _unknown_shape(op):
return [tensor_shape.unknown_shape() for _ in op.outputs]
# NOTE(cwhipkey): Dummy shape registration for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape("If")(_unknown_shape)
ops.RegisterShape("Iff")(_unknown_shape)
ops.RegisterShape("Ii")(_unknown_shape)
ops.RegisterShape("Iif")(_unknown_shape)
ops.RegisterShape("Iii")(_unknown_shape)
ops.RegisterShape("In")(_unknown_shape)
ops.RegisterShape("Iri")(_unknown_shape)
ops.RegisterShape("None")(_unknown_shape)
ops.RegisterShape("Of")(_unknown_shape)
ops.RegisterShape("Oi")(_unknown_shape)
ops.RegisterShape("Oif")(_unknown_shape)
ops.RegisterShape("Oii")(_unknown_shape)
ops.RegisterShape("OpWithDefaultAttr")(_unknown_shape)
ops.RegisterShape("OpWithFutureDefaultAttr")(_unknown_shape)
ops.RegisterShape("Or")(_unknown_shape)
ops.RegisterShape("Otl")(_unknown_shape)
ops.RegisterShape("Unary")(_unknown_shape)
_op_list = op_def_pb2.OpList()
text_format.Merge("""
op {
name: 'None'
}
op {
name: 'Oi'
output_arg { name: 'a' type: DT_INT32 }
}
op {
name: 'Or'
output_arg { name: 'a' type: DT_INT32 is_ref: true }
}
op {
name: 'Of'
output_arg { name: 'a' type: DT_FLOAT }
}
op {
name: 'Ii'
input_arg { name: 'a' type: DT_INT32 }
}
op {
name: 'If'
input_arg { name: 'a' type: DT_FLOAT }
}
op {
name: 'Oii'
output_arg { name: 'a' type: DT_INT32 }
output_arg { name: 'b' type: DT_INT32 }
}
op {
name: 'Oif'
output_arg { name: 'a' type: DT_INT32 }
output_arg { name: 'b' type: DT_FLOAT }
}
op {
name: 'Iii'
input_arg { name: 'a' type: DT_INT32 }
input_arg { name: 'b' type: DT_INT32 }
}
op {
name: 'Iff'
input_arg { name: 'a' type: DT_FLOAT }
input_arg { name: 'b' type: DT_FLOAT }
}
op {
name: 'Iif'
input_arg { name: 'a' type: DT_INT32 }
input_arg { name: 'b' type: DT_FLOAT }
}
op {
name: 'Iri'
input_arg { name: 'a' type: DT_INT32 is_ref: true }
input_arg { name: 'b' type: DT_INT32 }
}
op {
name: 'In'
input_arg { name: 'a' number_attr: 'N' type_attr: 'T' }
attr { name: 'N' type: 'int' minimum: 1 }
attr { name: 'T' type: 'type' }
}
op {
name: 'Otl'
output_arg { name: 'a' type_list_attr: 't' }
attr { name: 'T' type: 'list(type)' minimum: 1 }
}
op {
name: 'Unary'
input_arg { name: 'a' type_attr: 'T' }
output_arg { name: 'b' type_attr: 'T' }
attr { name: 'T' type: 'type' }
}
op {
name: 'OpWithDefaultAttr'
output_arg { name: 'a' type: DT_INT32 }
attr { name: 'default_float' type: 'float' default_value { f: 123.0 } }
}
op {
name: 'OpWithFutureDefaultAttr'
}
""", _op_list)
op_def_registry.register_op_list(_op_list)
# NOTE(mrry): Dummy shape registrations for ops used in the tests.
for op_def in _op_list.op:
ops.RegisterShape(op_def.name)(None)
class ImportGraphDefTest(test.TestCase):
def _MakeGraphDef(self,
text,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER):
text = "versions: { producer: %d min_consumer: %d };\n%s" % (producer,
min_consumer,
text)
ret = graph_pb2.GraphDef()
text_format.Merge(text, ret)
return ret
def testBasic(self):
with ops.Graph().as_default():
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oif' }
node { name: 'B' op: 'Otl'
attr { key: 't'
value { list { type: DT_INT32 type: DT_FLOAT } } } }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_FLOAT } }
input: 'A:1' input: 'B:1' }
"""),
return_elements=["A", "B", "C", "D"],
name="import")
# Assert that the import process creates distinct tensors.
self.assertNotEqual(a.outputs[0].name, a.outputs[1].name)
self.assertNotEqual(b.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[1].name)
# Assert that the ops are connected according to the GraphDef topology.
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], b.outputs[1])
# Check the types of the returned ops and tensors.
self.assertEqual(a.type, "Oif")
self.assertEqual(b.type, "Otl")
self.assertEqual(c.type, "In")
self.assertEqual(d.type, "In")
self.assertEqual(a.outputs[0].dtype, dtypes.int32)
self.assertEqual(a.outputs[1].dtype, dtypes.float32)
self.assertEqual(b.outputs[0].dtype, dtypes.int32)
self.assertEqual(b.outputs[1].dtype, dtypes.float32)
# Check the names of the returned ops.
self.assertEqual(a.name, "import/A")
self.assertEqual(b.name, "import/B")
self.assertEqual(c.name, "import/C")
self.assertEqual(d.name, "import/D")
# Check that the op_def is still available.
self.assertNotEqual(None, a.op_def)
def testInputMap(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Oii' }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={"A:0": feed_a_0,
"B:1": feed_b_1},
return_elements=["A", "B", "C", "D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapBytes(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Oii' }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={b"A:0": feed_a_0,
b"B:1": feed_b_1},
return_elements=[b"A", b"B", b"C", b"D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapUnicode(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Oii' }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={u"A:0": feed_a_0,
u"B:1": feed_b_1},
return_elements=[u"A", u"B", u"C", u"D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testImplicitZerothOutput(self):
with ops.Graph().as_default():
a, b = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Ii' input: 'A' }
"""),
return_elements=["A", "B"])
self.assertEqual(b.inputs[0], a.outputs[0])
def testInputMapImplicitZerothOutput(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
b, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Ii' input: 'A:0' }
"""),
input_map={"A": feed_a_0},
return_elements=["B"])
self.assertEqual(b.inputs[0], feed_a_0)
def testWithControlDependency(self):
with ops.Graph().as_default():
a, b = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' input: '^A' }
"""),
return_elements=["A", "B"])
self.assertEqual(b.control_inputs, [a])
def testWithRefs(self):
with ops.Graph().as_default():
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Or' }
node { name: 'B' op: 'Oi' }
node { name: 'C' op: 'Iii' input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'Iri' input: 'A:0' input: 'B:0' }
"""),
return_elements=["A", "B", "C", "D"])
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[0])
self.assertEqual(d.inputs[1], b.outputs[0])
self.assertEqual(a.outputs[0].dtype, dtypes.int32_ref)
self.assertEqual(c._input_dtypes, [dtypes.int32, dtypes.int32])
self.assertEqual(c.outputs, [])
self.assertEqual(d._input_dtypes, [dtypes.int32_ref, dtypes.int32])
self.assertEqual(d.outputs, [])
def testCyclic(self):
with ops.Graph().as_default():
a, b = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Unary'
attr { key: 'T' value { type: DT_INT32 } } input: 'B:0' }
node { name: 'B' op: 'Unary'
attr { key: 'T' value { type: DT_INT32 } } input: 'A:0' }
"""),
return_elements=["A", "B"])
self.assertEqual(a.inputs[0], b.outputs[0])
self.assertEqual(b.inputs[0], a.outputs[0])
def testTypeMismatchInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
node { name: 'B' op: 'If' input: 'A:0' }
"""))
self.assertTrue(
"Cannot convert a tensor of type int32 to an input of type float" in
str(e.exception))
def testShapeWhitelist(self):
# Barrier's shape is an output vector of 2, but the
# graph says it's a scalar. This is currently whitelisted.
with ops.Graph().as_default():
_ = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Barrier'
attr { key: '_output_shapes'
value { list { shape { } } } } }
"""),
return_elements=["A"],
name="import")
def testShapeWhitelistViolation(self):
# L2 loss produces a scalar shape, but the graph
# has the wrong shape, so raise an error.
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
_ = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Of' }
node { name: 'B' op: 'L2Loss'
input: 'A:0'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: '_output_shapes'
value { list { shape { dim { size: 43 } } } } } }
"""),
return_elements=["B"],
name="import")
self.assertTrue(
"Shapes () and (43,) are not compatible" in str(e.exception))
def testInvalidSignatureTooManyInputsInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
node { name: 'B' op: 'None' input: 'A:0' }
"""))
self.assertTrue("More inputs specified ('A:0') than the op expects" in
str(e.exception))
def testInvalidSignatureNotEnoughInputsInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
node { name: 'B' op: 'Iif' input: 'A:0' }
"""))
self.assertTrue("Input types mismatch (expected 'int32, float32' but "
"got 'int32')" in str(e.exception))
def testMissingInputOpInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'If' input: 'A:0' }
"""))
self.assertTrue("Input tensor 'A:0' not found" in str(e.exception))
def testMissingInputOpInGraphDefButAppearsInInputMap(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(5.0)
b, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'If' input: 'A:0' }
"""),
input_map={"A:0": feed_a_0},
return_elements=["B"])
self.assertEqual(b.inputs[0], feed_a_0)
def testMissingInputTensorInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Of' }
node { name: 'B' op: 'If' input: 'A:1' }
"""))
self.assertTrue("Input tensor 'A:1' not found" in str(e.exception))
def testMissingControlInputInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: '^A' }
"""))
self.assertTrue("Control input '^A' not found" in str(e.exception))
def testInvalidTensorNameOutputIndexInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: 'A:B' }
"""))
self.assertEqual("Cannot convert 'A:B' to a tensor name.",
str(e.exception))
def testInvalidTensorNameInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: 'A:B:0' }
"""))
self.assertEqual("Cannot convert 'A:B:0' to a tensor name.",
str(e.exception))
def testMissingReturnOperation(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["B"])
self.assertTrue(
"return_element 'B' not found in graph_def." in str(e.exception))
def testMissingReturnTensor(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
"""),
return_elements=["A:1"])
self.assertTrue(
"return_element 'A:1' not found in graph_def." in str(e.exception))
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
"""),
return_elements=["B:0"])
self.assertTrue(
"return_element 'B:0' not found in graph_def." in str(e.exception))
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
"""),
return_elements=["A:B:0"])
self.assertTrue(
"return_element 'A:B:0' not found in graph_def." in str(e.exception))
def testMissingInputMap(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
input_map={"B:0": constant_op.constant(5.0)})
self.assertTrue("not found in graph_def: [B:0]" in str(e.exception))
def testInputMapTypeMismatch(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
node { name: 'B' op: 'Ii' input: 'A:0' }
"""),
input_map={"A:0": constant_op.constant(5.0)})
self.assertTrue(
"Cannot convert a tensor of type float32 to an input of type int32."
in str(e.exception))
def testNoReturns(self):
with ops.Graph().as_default() as g:
ret = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""))
self.assertEqual(ret, None)
a = g.get_operation_by_name("import/A")
self.assertEqual(a.type, "None")
def testOverrideNamePrefix(self):
with ops.Graph().as_default():
a, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["A"],
name="imported_graph")
self.assertEqual(a.name, "imported_graph/A")
def testNamePrefixColocationAttrs(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
b, = importer.import_graph_def(
original_graph_def, return_elements=["B"], name="imported_graph")
self.assertProtoEqualsVersion("""
node { name: 'imported_graph/A' op: 'None' }
node { name: 'imported_graph/B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@imported_graph/A' } }
} }""", b.graph.as_graph_def())
def testNamePrefixColocationAttrsMultipleImport(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
b, = importer.import_graph_def(
original_graph_def, return_elements=["B"], name="")
_, = importer.import_graph_def(
original_graph_def, return_elements=["B"], name="")
self.assertProtoEqualsVersion("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }
node { name: 'A_1' op: 'None' }
node { name: 'B_1' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A_1' } }
} }""", b.graph.as_graph_def())
def testNamePrefixColocationAttrsNotFound(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "does not exist during import"):
importer.import_graph_def(
original_graph_def, return_elements=["B"], name="imported_graph")
def testEmptyGraph(self):
with ops.Graph().as_default() as g:
init_version = g.version
importer.import_graph_def(self._MakeGraphDef(""))
self.assertEqual(init_version, g.version)
def testInvalidInputForGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(TypeError) as e:
importer.import_graph_def("")
self.assertEqual("graph_def must be a GraphDef proto.", str(e.exception))
def testInvalidInputForInputMap(self):
with ops.Graph().as_default():
with self.assertRaises(TypeError) as e:
importer.import_graph_def(
self._MakeGraphDef(""), input_map=[constant_op.constant(5.0)])
self.assertEqual("input_map must be a dictionary mapping strings to "
"Tensor objects.", str(e.exception))
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef(""),
input_map={"a:0": constant_op.constant(5.0)},
name="")
self.assertEqual("tf.import_graph_def() requires a non-empty `name` "
"if `input_map` is used.", str(e.exception))
def testInvalidInputForReturnOperations(self):
with ops.Graph().as_default():
with self.assertRaises(TypeError) as e:
importer.import_graph_def(self._MakeGraphDef(""), return_elements=[7])
self.assertEqual("return_elements must be a list of strings.",
str(e.exception))
def testWithExtensionAndAttr(self):
with ops.Graph().as_default() as g:
c = constant_op.constant(5.0, dtype=dtypes.float32, name="c")
array_ops.stack([c, c], name="pack")
gdef = g.as_graph_def()
with self.test_session():
pack, = importer.import_graph_def(gdef, return_elements=["pack"])
self.assertAllEqual(pack.outputs[0].eval(), [5.0, 5.0])
def testWithDevice(self):
with ops.Graph().as_default() as g:
# No device.
a = constant_op.constant(3.0, name="a")
with ops.device("/cpu:0"):
b = constant_op.constant(4.0, name="b")
with ops.device("/job:worker"):
c = constant_op.constant(5.0, name="c")
gdef = g.as_graph_def()
with ops.Graph().as_default():
a2, b2, c2 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual(a.device, a2.device)
self.assertEqual(b.device, b2.device)
self.assertEqual(c.device, c2.device)
with ops.Graph().as_default():
with ops.device(device.merge_device("/task:0")):
a3, b3, c3 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/task:0", a3.device)
self.assertEqual("/task:0/device:CPU:0", b3.device) # canonicalized.
self.assertEqual(c.device + "/task:0", c3.device)
with ops.Graph().as_default():
with ops.device(device.merge_device("/job:ps")):
a4, b4, c4 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/job:ps", a4.device)
self.assertEqual("/job:ps/device:CPU:0", b4.device) # canonicalized.
self.assertEqual(c.device, c4.device) # worker overrides ps.
with ops.Graph().as_default():
with ops.device(device.merge_device("/gpu:0")):
a5, b5, c5 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/device:GPU:0", a5.device)
self.assertEqual("/device:CPU:0", b5.device) # cpu overrides gpu.
self.assertEqual(c.device + "/device:GPU:0", c5.device)
def testWithDeviceFunctionDependingOnInputs(self):
with ops.Graph().as_default() as g:
with ops.device("/job:ps"):
v = variables.Variable(1.0)
unused_assign_op = v.assign(2.0)
unused_assign_2_op = v.assign(3.0)
unused_add_t = v + v
gdef = g.as_graph_def()
# We'll use the following device function to observe ops with two inputs.
ops_with_two_inputs = []
def input_counter(op):
if any(in_t.dtype._is_ref_dtype for in_t in op.inputs): # pylint: disable=protected-access
ops_with_two_inputs.append(op)
return ""
with ops.Graph().as_default() as g:
with ops.device(input_counter):
importer.import_graph_def(gdef)
# We expect to see the initializer, two assign operations, and the add op.
self.assertEqual(4, len(ops_with_two_inputs))
def testGradient(self):
with ops.Graph().as_default() as g:
inputs = array_ops.placeholder(
dtypes.float32, shape=[None, 100], name="input")
weights = array_ops.placeholder(
dtypes.float32, shape=[100, 10], name="weights")
biases = array_ops.placeholder(dtypes.float32, shape=[10], name="biases")
activations = nn_ops.relu(
math_ops.matmul(inputs, weights) + biases, name="activations")
loss = math_ops.reduce_mean(activations, name="loss")
gdef = g.as_graph_def()
with ops.Graph().as_default() as g:
input_placeholder = array_ops.placeholder(dtypes.float32, shape=[32, 100])
weights_var = variables.Variable(
random_ops.truncated_normal([100, 10]), name="weights")
biases_var = variables.Variable(array_ops.zeros([10]), name="biases")
activations, loss = importer.import_graph_def(
gdef,
input_map={
"input:0": input_placeholder,
"weights:0": weights_var,
"biases:0": biases_var
},
return_elements=["activations:0", "loss:0"])
self.assertEqual([32, 10], activations.get_shape())
self.assertEqual([], loss.get_shape())
weights_grad, biases_grad = gradients_impl.gradients(
loss, [weights_var, biases_var])
self.assertEqual([100, 10], weights_grad.get_shape())
self.assertEqual([10], biases_grad.get_shape())
def testLargeGraph(self):
with self.test_session():
# The default message byte limit is 64M. Ours is 2G with a warning at 512.
# Adding a 130M entries float32 tensor should exceed the warning, but not
# the hard limit.
input_shape = [130, 1000, 1000]
tensor_input = np.ones(input_shape, dtype=np.float32)
t = constant_op.constant(tensor_input, shape=input_shape)
g = array_ops.identity(t)
g.eval()
def testVersion(self):
v0 = versions.GRAPH_DEF_VERSION_MIN_CONSUMER
v2 = versions.GRAPH_DEF_VERSION
v1 = (v0 + v2) // 2
for producer in v0, v1, v2:
for min_consumer in v0, v1, v2:
with ops.Graph().as_default():
a, = importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'A' op: 'Oii' }",
producer=producer,
min_consumer=min_consumer),
return_elements=["A"])
self.assertEqual(a.graph.graph_def_versions.producer, producer)
self.assertEqual(a.graph.graph_def_versions.min_consumer,
min_consumer)
def testVersionLow(self):
with ops.Graph().as_default() as g:
pat = (r"GraphDef producer version -1 below min producer %d supported "
r"by TensorFlow \S+\. Please regenerate your graph.$" %
versions.GRAPH_DEF_VERSION_MIN_PRODUCER)
importer.import_graph_def(self._MakeGraphDef("", producer=-1))
x = constant_op.constant(
7) # Need at least one op to get a C++ graph generated
with self.test_session(graph=g) as sess:
with self.assertRaisesRegexp(Exception, pat):
sess.run(x)
def testVersionHigh(self):
with ops.Graph().as_default() as g:
pat = (r"GraphDef min consumer version %d above current version %d "
r"for TensorFlow \S+\. Please upgrade TensorFlow\.$" %
(1 << 30, versions.GRAPH_DEF_VERSION))
importer.import_graph_def(self._MakeGraphDef("", min_consumer=1 << 30))
x = constant_op.constant(
7) # Need at least one op to get a C++ graph generated
with self.test_session(graph=g) as sess:
with self.assertRaisesRegexp(Exception, pat):
sess.run(x)
def testDefaultAttrsAdded(self):
with ops.Graph().as_default():
a = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithDefaultAttr' }
"""),
return_elements=["A"])
self.assertEqual(123.0, a[0].get_attr("default_float"))
def testDefaultAttrsRemoved(self):
producer_op_list = op_def_pb2.OpList()
text_format.Merge("""
op {
name: 'OpWithFutureDefaultAttr'
attr { name: 'default_int' type: 'int' default_value { i: 456 } }
}
""", producer_op_list)
# Attr only in producer_op_list with default value gets removed.
with ops.Graph().as_default():
a = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithFutureDefaultAttr'
attr { key: 'default_int' value { i: 456 } } }
"""),
return_elements=["A"],
producer_op_list=producer_op_list)
with self.assertRaisesRegexp(ValueError, "No attr named 'default_int'"):
a[0].get_attr("default_int")
# Attr only in producer_op_list with non-default value is preserved.
with ops.Graph().as_default():
a = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithFutureDefaultAttr'
attr { key: 'default_int' value { i: 987 } } }
"""),
return_elements=["A"],
producer_op_list=producer_op_list)
self.assertEqual(987, a[0].get_attr("default_int"))
if __name__ == "__main__":
test.main() | tensorflow/python/framework/importer_test.py | """Tests for tensorflow.python.framework.importer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def _unknown_shape(op):
return [tensor_shape.unknown_shape() for _ in op.outputs]
# NOTE(cwhipkey): Dummy shape registration for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape("If")(_unknown_shape)
ops.RegisterShape("Iff")(_unknown_shape)
ops.RegisterShape("Ii")(_unknown_shape)
ops.RegisterShape("Iif")(_unknown_shape)
ops.RegisterShape("Iii")(_unknown_shape)
ops.RegisterShape("In")(_unknown_shape)
ops.RegisterShape("Iri")(_unknown_shape)
ops.RegisterShape("None")(_unknown_shape)
ops.RegisterShape("Of")(_unknown_shape)
ops.RegisterShape("Oi")(_unknown_shape)
ops.RegisterShape("Oif")(_unknown_shape)
ops.RegisterShape("Oii")(_unknown_shape)
ops.RegisterShape("OpWithDefaultAttr")(_unknown_shape)
ops.RegisterShape("OpWithFutureDefaultAttr")(_unknown_shape)
ops.RegisterShape("Or")(_unknown_shape)
ops.RegisterShape("Otl")(_unknown_shape)
ops.RegisterShape("Unary")(_unknown_shape)
_op_list = op_def_pb2.OpList()
text_format.Merge("""
op {
name: 'None'
}
op {
name: 'Oi'
output_arg { name: 'a' type: DT_INT32 }
}
op {
name: 'Or'
output_arg { name: 'a' type: DT_INT32 is_ref: true }
}
op {
name: 'Of'
output_arg { name: 'a' type: DT_FLOAT }
}
op {
name: 'Ii'
input_arg { name: 'a' type: DT_INT32 }
}
op {
name: 'If'
input_arg { name: 'a' type: DT_FLOAT }
}
op {
name: 'Oii'
output_arg { name: 'a' type: DT_INT32 }
output_arg { name: 'b' type: DT_INT32 }
}
op {
name: 'Oif'
output_arg { name: 'a' type: DT_INT32 }
output_arg { name: 'b' type: DT_FLOAT }
}
op {
name: 'Iii'
input_arg { name: 'a' type: DT_INT32 }
input_arg { name: 'b' type: DT_INT32 }
}
op {
name: 'Iff'
input_arg { name: 'a' type: DT_FLOAT }
input_arg { name: 'b' type: DT_FLOAT }
}
op {
name: 'Iif'
input_arg { name: 'a' type: DT_INT32 }
input_arg { name: 'b' type: DT_FLOAT }
}
op {
name: 'Iri'
input_arg { name: 'a' type: DT_INT32 is_ref: true }
input_arg { name: 'b' type: DT_INT32 }
}
op {
name: 'In'
input_arg { name: 'a' number_attr: 'N' type_attr: 'T' }
attr { name: 'N' type: 'int' minimum: 1 }
attr { name: 'T' type: 'type' }
}
op {
name: 'Otl'
output_arg { name: 'a' type_list_attr: 't' }
attr { name: 'T' type: 'list(type)' minimum: 1 }
}
op {
name: 'Unary'
input_arg { name: 'a' type_attr: 'T' }
output_arg { name: 'b' type_attr: 'T' }
attr { name: 'T' type: 'type' }
}
op {
name: 'OpWithDefaultAttr'
output_arg { name: 'a' type: DT_INT32 }
attr { name: 'default_float' type: 'float' default_value { f: 123.0 } }
}
op {
name: 'OpWithFutureDefaultAttr'
}
""", _op_list)
op_def_registry.register_op_list(_op_list)
# NOTE(mrry): Dummy shape registrations for ops used in the tests.
for op_def in _op_list.op:
ops.RegisterShape(op_def.name)(None)
class ImportGraphDefTest(test.TestCase):
def _MakeGraphDef(self,
text,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER):
text = "versions: { producer: %d min_consumer: %d };\n%s" % (producer,
min_consumer,
text)
ret = graph_pb2.GraphDef()
text_format.Merge(text, ret)
return ret
def testBasic(self):
with ops.Graph().as_default():
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oif' }
node { name: 'B' op: 'Otl'
attr { key: 't'
value { list { type: DT_INT32 type: DT_FLOAT } } } }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_FLOAT } }
input: 'A:1' input: 'B:1' }
"""),
return_elements=["A", "B", "C", "D"],
name="import")
# Assert that the import process creates distinct tensors.
self.assertNotEqual(a.outputs[0].name, a.outputs[1].name)
self.assertNotEqual(b.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[1].name)
# Assert that the ops are connected according to the GraphDef topology.
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], b.outputs[1])
# Check the types of the returned ops and tensors.
self.assertEqual(a.type, "Oif")
self.assertEqual(b.type, "Otl")
self.assertEqual(c.type, "In")
self.assertEqual(d.type, "In")
self.assertEqual(a.outputs[0].dtype, dtypes.int32)
self.assertEqual(a.outputs[1].dtype, dtypes.float32)
self.assertEqual(b.outputs[0].dtype, dtypes.int32)
self.assertEqual(b.outputs[1].dtype, dtypes.float32)
# Check the names of the returned ops.
self.assertEqual(a.name, "import/A")
self.assertEqual(b.name, "import/B")
self.assertEqual(c.name, "import/C")
self.assertEqual(d.name, "import/D")
# Check that the op_def is still available.
self.assertNotEqual(None, a.op_def)
def testInputMap(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Oii' }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={"A:0": feed_a_0,
"B:1": feed_b_1},
return_elements=["A", "B", "C", "D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapBytes(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Oii' }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={b"A:0": feed_a_0,
b"B:1": feed_b_1},
return_elements=[b"A", b"B", b"C", b"D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapUnicode(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Oii' }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={u"A:0": feed_a_0,
u"B:1": feed_b_1},
return_elements=[u"A", u"B", u"C", u"D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testImplicitZerothOutput(self):
with ops.Graph().as_default():
a, b = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Ii' input: 'A' }
"""),
return_elements=["A", "B"])
self.assertEqual(b.inputs[0], a.outputs[0])
def testInputMapImplicitZerothOutput(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
b, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Ii' input: 'A:0' }
"""),
input_map={"A": feed_a_0},
return_elements=["B"])
self.assertEqual(b.inputs[0], feed_a_0)
def testWithControlDependency(self):
with ops.Graph().as_default():
a, b = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' input: '^A' }
"""),
return_elements=["A", "B"])
self.assertEqual(b.control_inputs, [a])
def testWithRefs(self):
with ops.Graph().as_default():
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Or' }
node { name: 'B' op: 'Oi' }
node { name: 'C' op: 'Iii' input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'Iri' input: 'A:0' input: 'B:0' }
"""),
return_elements=["A", "B", "C", "D"])
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[0])
self.assertEqual(d.inputs[1], b.outputs[0])
self.assertEqual(a.outputs[0].dtype, dtypes.int32_ref)
self.assertEqual(c._input_dtypes, [dtypes.int32, dtypes.int32])
self.assertEqual(c.outputs, [])
self.assertEqual(d._input_dtypes, [dtypes.int32_ref, dtypes.int32])
self.assertEqual(d.outputs, [])
def testCyclic(self):
with ops.Graph().as_default():
a, b = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Unary'
attr { key: 'T' value { type: DT_INT32 } } input: 'B:0' }
node { name: 'B' op: 'Unary'
attr { key: 'T' value { type: DT_INT32 } } input: 'A:0' }
"""),
return_elements=["A", "B"])
self.assertEqual(a.inputs[0], b.outputs[0])
self.assertEqual(b.inputs[0], a.outputs[0])
def testTypeMismatchInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
node { name: 'B' op: 'If' input: 'A:0' }
"""))
self.assertTrue(
"Cannot convert a tensor of type int32 to an input of type float" in
str(e.exception))
def testShapeWhitelist(self):
# Barrier's shape is an output vector of 2, but the
# graph says it's a scalar. This is currently whitelisted.
with ops.Graph().as_default():
_ = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Barrier'
attr { key: '_output_shapes'
value { list { shape { } } } } }
"""),
return_elements=["A"],
name="import")
def testShapeWhitelistViolation(self):
# L2 loss produces a scalar shape, but the graph
# has the wrong shape, so raise an error.
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
_ = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Of' }
node { name: 'B' op: 'L2Loss'
input: 'A:0'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: '_output_shapes'
value { list { shape { dim { size: 43 } } } } } }
"""),
return_elements=["B"],
name="import")
self.assertTrue(
"Shapes () and (43,) are not compatible" in str(e.exception))
def testInvalidSignatureTooManyInputsInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
node { name: 'B' op: 'None' input: 'A:0' }
"""))
self.assertTrue("More inputs specified ('A:0') than the op expects" in
str(e.exception))
def testInvalidSignatureNotEnoughInputsInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
node { name: 'B' op: 'Iif' input: 'A:0' }
"""))
self.assertTrue("Input types mismatch (expected 'int32, float32' but "
"got 'int32')" in str(e.exception))
def testMissingInputOpInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'If' input: 'A:0' }
"""))
self.assertTrue("Input tensor 'A:0' not found" in str(e.exception))
def testMissingInputOpInGraphDefButAppearsInInputMap(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(5.0)
b, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'If' input: 'A:0' }
"""),
input_map={"A:0": feed_a_0},
return_elements=["B"])
self.assertEqual(b.inputs[0], feed_a_0)
def testMissingInputTensorInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Of' }
node { name: 'B' op: 'If' input: 'A:1' }
"""))
self.assertTrue("Input tensor 'A:1' not found" in str(e.exception))
def testMissingControlInputInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: '^A' }
"""))
self.assertTrue("Control input '^A' not found" in str(e.exception))
def testInvalidTensorNameOutputIndexInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: 'A:B' }
"""))
self.assertEqual("Cannot convert 'A:B' to a tensor name.",
str(e.exception))
def testInvalidTensorNameInGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: 'A:B:0' }
"""))
self.assertEqual("Cannot convert 'A:B:0' to a tensor name.",
str(e.exception))
def testMissingReturnOperation(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["B"])
self.assertTrue(
"return_element 'B' not found in graph_def." in str(e.exception))
def testMissingReturnTensor(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
"""),
return_elements=["A:1"])
self.assertTrue(
"return_element 'A:1' not found in graph_def." in str(e.exception))
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
"""),
return_elements=["B:0"])
self.assertTrue(
"return_element 'B:0' not found in graph_def." in str(e.exception))
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
"""),
return_elements=["A:B:0"])
self.assertTrue(
"return_element 'A:B:0' not found in graph_def." in str(e.exception))
def testMissingInputMap(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
input_map={"B:0": constant_op.constant(5.0)})
self.assertTrue("not found in graph_def: [B:0]" in str(e.exception))
def testInputMapTypeMismatch(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
node { name: 'B' op: 'Ii' input: 'A:0' }
"""),
input_map={"A:0": constant_op.constant(5.0)})
self.assertTrue(
"Cannot convert a tensor of type float32 to an input of type int32."
in str(e.exception))
def testNoReturns(self):
with ops.Graph().as_default() as g:
ret = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""))
self.assertEqual(ret, None)
a = g.get_operation_by_name("import/A")
self.assertEqual(a.type, "None")
def testOverrideNamePrefix(self):
with ops.Graph().as_default():
a, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["A"],
name="imported_graph")
self.assertEqual(a.name, "imported_graph/A")
def testNamePrefixColocationAttrs(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
b, = importer.import_graph_def(
original_graph_def, return_elements=["B"], name="imported_graph")
self.assertProtoEqualsVersion("""
node { name: 'imported_graph/A' op: 'None' }
node { name: 'imported_graph/B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@imported_graph/A' } }
} }""", b.graph.as_graph_def())
def testNamePrefixColocationAttrsMultipleImport(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
b, = importer.import_graph_def(
original_graph_def, return_elements=["B"], name="")
_, = importer.import_graph_def(
original_graph_def, return_elements=["B"], name="")
self.assertProtoEqualsVersion("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }
node { name: 'A_1' op: 'None' }
node { name: 'B_1' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A_1' } }
} }""", b.graph.as_graph_def())
def testNamePrefixColocationAttrsNotFound(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "does not exist during import"):
importer.import_graph_def(
original_graph_def, return_elements=["B"], name="imported_graph")
def testEmptyGraph(self):
with ops.Graph().as_default() as g:
init_version = g.version
importer.import_graph_def(self._MakeGraphDef(""))
self.assertEqual(init_version, g.version)
def testInvalidInputForGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(TypeError) as e:
importer.import_graph_def("")
self.assertEqual("graph_def must be a GraphDef proto.", str(e.exception))
def testInvalidInputForInputMap(self):
with ops.Graph().as_default():
with self.assertRaises(TypeError) as e:
importer.import_graph_def(
self._MakeGraphDef(""), input_map=[constant_op.constant(5.0)])
self.assertEqual("input_map must be a dictionary mapping strings to "
"Tensor objects.", str(e.exception))
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef(""),
input_map={"a:0": constant_op.constant(5.0)},
name="")
self.assertEqual("tf.import_graph_def() requires a non-empty `name` "
"if `input_map` is used.", str(e.exception))
def testInvalidInputForReturnOperations(self):
with ops.Graph().as_default():
with self.assertRaises(TypeError) as e:
importer.import_graph_def(self._MakeGraphDef(""), return_elements=[7])
self.assertEqual("return_elements must be a list of strings.",
str(e.exception))
def testWithExtensionAndAttr(self):
with ops.Graph().as_default() as g:
c = constant_op.constant(5.0, dtype=dtypes.float32, name="c")
array_ops.stack([c, c], name="pack")
gdef = g.as_graph_def()
with self.test_session():
pack, = importer.import_graph_def(gdef, return_elements=["pack"])
self.assertAllEqual(pack.outputs[0].eval(), [5.0, 5.0])
def testWithDevice(self):
with ops.Graph().as_default() as g:
# No device.
a = constant_op.constant(3.0, name="a")
with ops.device("/cpu:0"):
b = constant_op.constant(4.0, name="b")
with ops.device("/job:worker"):
c = constant_op.constant(5.0, name="c")
gdef = g.as_graph_def()
with ops.Graph().as_default():
a2, b2, c2 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual(a.device, a2.device)
self.assertEqual(b.device, b2.device)
self.assertEqual(c.device, c2.device)
with ops.Graph().as_default():
with ops.device(device.merge_device("/task:0")):
a3, b3, c3 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/task:0", a3.device)
self.assertEqual("/task:0/device:CPU:0", b3.device) # canonicalized.
self.assertEqual(c.device + "/task:0", c3.device)
with ops.Graph().as_default():
with ops.device(device.merge_device("/job:ps")):
a4, b4, c4 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/job:ps", a4.device)
self.assertEqual("/job:ps/device:CPU:0", b4.device) # canonicalized.
self.assertEqual(c.device, c4.device) # worker overrides ps.
with ops.Graph().as_default():
with ops.device(device.merge_device("/gpu:0")):
a5, b5, c5 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/device:GPU:0", a5.device)
self.assertEqual("/device:CPU:0", b5.device) # cpu overrides gpu.
self.assertEqual(c.device + "/device:GPU:0", c5.device)
def testWithDeviceFunctionDependingOnInputs(self):
with ops.Graph().as_default() as g:
with ops.device("/job:ps"):
v = variables.Variable(1.0)
unused_assign_op = v.assign(2.0)
unused_assign_2_op = v.assign(3.0)
unused_add_t = v + v
gdef = g.as_graph_def()
# We'll use the following device function to observe ops with two inputs.
ops_with_two_inputs = []
def input_counter(op):
if any(in_t.dtype._is_ref_dtype for in_t in op.inputs): # pylint: disable=protected-access
ops_with_two_inputs.append(op)
return ""
with ops.Graph().as_default() as g:
with ops.device(input_counter):
importer.import_graph_def(gdef)
# We expect to see the initializer, two assign operations, and the add op.
self.assertEqual(4, len(ops_with_two_inputs))
def testGradient(self):
with ops.Graph().as_default() as g:
inputs = array_ops.placeholder(
dtypes.float32, shape=[None, 100], name="input")
weights = array_ops.placeholder(
dtypes.float32, shape=[100, 10], name="weights")
biases = array_ops.placeholder(dtypes.float32, shape=[10], name="biases")
activations = nn_ops.relu(
math_ops.matmul(inputs, weights) + biases, name="activations")
loss = math_ops.reduce_mean(activations, name="loss")
gdef = g.as_graph_def()
with ops.Graph().as_default() as g:
input_placeholder = array_ops.placeholder(dtypes.float32, shape=[32, 100])
weights_var = variables.Variable(
random_ops.truncated_normal([100, 10]), name="weights")
biases_var = variables.Variable(array_ops.zeros([10]), name="biases")
activations, loss = importer.import_graph_def(
gdef,
input_map={
"input:0": input_placeholder,
"weights:0": weights_var,
"biases:0": biases_var
},
return_elements=["activations:0", "loss:0"])
self.assertEqual([32, 10], activations.get_shape())
self.assertEqual([], loss.get_shape())
weights_grad, biases_grad = gradients_impl.gradients(
loss, [weights_var, biases_var])
self.assertEqual([100, 10], weights_grad.get_shape())
self.assertEqual([10], biases_grad.get_shape())
def testLargeGraph(self):
with self.test_session():
# The default message byte limit is 64M. Ours is 2G with a warning at 512.
# Adding a 130M entries float32 tensor should exceed the warning, but not
# the hard limit.
input_shape = [130, 1000, 1000]
tensor_input = np.ones(input_shape, dtype=np.float32)
t = constant_op.constant(tensor_input, shape=input_shape)
g = array_ops.identity(t)
g.eval()
def testVersion(self):
v0 = versions.GRAPH_DEF_VERSION_MIN_CONSUMER
v2 = versions.GRAPH_DEF_VERSION
v1 = (v0 + v2) // 2
for producer in v0, v1, v2:
for min_consumer in v0, v1, v2:
with ops.Graph().as_default():
a, = importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'A' op: 'Oii' }",
producer=producer,
min_consumer=min_consumer),
return_elements=["A"])
self.assertEqual(a.graph.graph_def_versions.producer, producer)
self.assertEqual(a.graph.graph_def_versions.min_consumer,
min_consumer)
def testVersionLow(self):
with ops.Graph().as_default() as g:
pat = (r"GraphDef producer version -1 below min producer %d supported "
r"by TensorFlow \S+\. Please regenerate your graph.$" %
versions.GRAPH_DEF_VERSION_MIN_PRODUCER)
importer.import_graph_def(self._MakeGraphDef("", producer=-1))
x = constant_op.constant(
7) # Need at least one op to get a C++ graph generated
with self.test_session(graph=g) as sess:
with self.assertRaisesRegexp(Exception, pat):
sess.run(x)
def testVersionHigh(self):
with ops.Graph().as_default() as g:
pat = (r"GraphDef min consumer version %d above current version %d "
r"for TensorFlow \S+\. Please upgrade TensorFlow\.$" %
(1 << 30, versions.GRAPH_DEF_VERSION))
importer.import_graph_def(self._MakeGraphDef("", min_consumer=1 << 30))
x = constant_op.constant(
7) # Need at least one op to get a C++ graph generated
with self.test_session(graph=g) as sess:
with self.assertRaisesRegexp(Exception, pat):
sess.run(x)
def testDefaultAttrsAdded(self):
with ops.Graph().as_default():
a = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithDefaultAttr' }
"""),
return_elements=["A"])
self.assertEqual(123.0, a[0].get_attr("default_float"))
def testDefaultAttrsRemoved(self):
producer_op_list = op_def_pb2.OpList()
text_format.Merge("""
op {
name: 'OpWithFutureDefaultAttr'
attr { name: 'default_int' type: 'int' default_value { i: 456 } }
}
""", producer_op_list)
# Attr only in producer_op_list with default value gets removed.
with ops.Graph().as_default():
a = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithFutureDefaultAttr'
attr { key: 'default_int' value { i: 456 } } }
"""),
return_elements=["A"],
producer_op_list=producer_op_list)
with self.assertRaisesRegexp(ValueError, "No attr named 'default_int'"):
a[0].get_attr("default_int")
# Attr only in producer_op_list with non-default value is preserved.
with ops.Graph().as_default():
a = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithFutureDefaultAttr'
attr { key: 'default_int' value { i: 987 } } }
"""),
return_elements=["A"],
producer_op_list=producer_op_list)
self.assertEqual(987, a[0].get_attr("default_int"))
if __name__ == "__main__":
test.main() | 0.795062 | 0.188026 |
"""Integration tests for the Osquery flow, its API client and API endpoints."""
import json
from absl import app
from grr_api_client import utils
from grr_response_proto.api import osquery_pb2 as api_osquery_pb2
from grr_response_server.flows.general import osquery as osquery_flow
from grr_response_server.gui import api_integration_test_lib
from grr.test_lib import action_mocks
from grr.test_lib import flow_test_lib
from grr.test_lib import osquery_test_lib
from grr.test_lib import test_lib
class OsqueryResultsExportTest(api_integration_test_lib.ApiIntegrationTest):
"""Tests exporting Osquery results using functionality in the API client."""
def _RunOsqueryExportResults(self, stdout: str) -> utils.BinaryChunkIterator:
client_id = self.SetupClient(0)
with osquery_test_lib.FakeOsqueryiOutput(stdout=stdout, stderr=""):
flow_id = flow_test_lib.TestFlowHelper(
osquery_flow.OsqueryFlow.__name__,
action_mocks.OsqueryClientMock(),
client_id=client_id,
creator=self.test_username,
query="doesn't matter")
result_flow = self.api.Client(client_id=client_id).Flow(flow_id)
result_flow.WaitUntilDone()
format_csv = api_osquery_pb2.ApiGetOsqueryResultsArgs.Format.CSV
return result_flow.GetOsqueryResults(format_csv)
def testExportSomeResults(self):
stdout = """
[
{ "foo": "quux", "bar": "norf" },
{ "foo": "blargh", "bar": "plugh" }
]
"""
results_iterator = self._RunOsqueryExportResults(stdout)
output_bytes = next(results_iterator)
output_text = output_bytes.decode("utf-8")
self.assertEqual("foo,bar\r\nquux,norf\r\nblargh,plugh\r\n", output_text)
def testExportNoRows(self):
stdout = """
[
]
"""
output_bytes = b"".join(self._RunOsqueryExportResults(stdout))
output_text = output_bytes.decode("utf-8")
self.assertEmpty(output_text)
def testExportUnicodeCharacters(self):
stdout = """
[
{ "🇬 🇷 🇷": "🔝🔝🔝"}
]
"""
results_iterator = self._RunOsqueryExportResults(stdout)
output_bytes = next(results_iterator)
output_text = output_bytes.decode("utf-8")
self.assertEqual("🇬 🇷 🇷\r\n🔝🔝🔝\r\n", output_text)
def testExportMultipleChunks(self):
row_count = 100
split_pieces = 10
cell_value = "fixed"
table = [{"column1": cell_value}] * row_count
table_json = json.dumps(table)
table_bytes = row_count * len(cell_value.encode("utf-8"))
chunk_bytes = table_bytes // split_pieces
with test_lib.ConfigOverrider({"Osquery.max_chunk_size": chunk_bytes}):
results_iterator = self._RunOsqueryExportResults(table_json)
output_bytes = next(results_iterator)
output_text = output_bytes.decode("utf-8")
expected_rows = "\r\n".join([cell_value] * row_count)
self.assertEqual("column1\r\n" + expected_rows + "\r\n", output_text)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main) | grr/server/grr_response_server/gui/api_integration_tests/osquery_test.py | """Integration tests for the Osquery flow, its API client and API endpoints."""
import json
from absl import app
from grr_api_client import utils
from grr_response_proto.api import osquery_pb2 as api_osquery_pb2
from grr_response_server.flows.general import osquery as osquery_flow
from grr_response_server.gui import api_integration_test_lib
from grr.test_lib import action_mocks
from grr.test_lib import flow_test_lib
from grr.test_lib import osquery_test_lib
from grr.test_lib import test_lib
class OsqueryResultsExportTest(api_integration_test_lib.ApiIntegrationTest):
"""Tests exporting Osquery results using functionality in the API client."""
def _RunOsqueryExportResults(self, stdout: str) -> utils.BinaryChunkIterator:
client_id = self.SetupClient(0)
with osquery_test_lib.FakeOsqueryiOutput(stdout=stdout, stderr=""):
flow_id = flow_test_lib.TestFlowHelper(
osquery_flow.OsqueryFlow.__name__,
action_mocks.OsqueryClientMock(),
client_id=client_id,
creator=self.test_username,
query="doesn't matter")
result_flow = self.api.Client(client_id=client_id).Flow(flow_id)
result_flow.WaitUntilDone()
format_csv = api_osquery_pb2.ApiGetOsqueryResultsArgs.Format.CSV
return result_flow.GetOsqueryResults(format_csv)
def testExportSomeResults(self):
stdout = """
[
{ "foo": "quux", "bar": "norf" },
{ "foo": "blargh", "bar": "plugh" }
]
"""
results_iterator = self._RunOsqueryExportResults(stdout)
output_bytes = next(results_iterator)
output_text = output_bytes.decode("utf-8")
self.assertEqual("foo,bar\r\nquux,norf\r\nblargh,plugh\r\n", output_text)
def testExportNoRows(self):
stdout = """
[
]
"""
output_bytes = b"".join(self._RunOsqueryExportResults(stdout))
output_text = output_bytes.decode("utf-8")
self.assertEmpty(output_text)
def testExportUnicodeCharacters(self):
stdout = """
[
{ "🇬 🇷 🇷": "🔝🔝🔝"}
]
"""
results_iterator = self._RunOsqueryExportResults(stdout)
output_bytes = next(results_iterator)
output_text = output_bytes.decode("utf-8")
self.assertEqual("🇬 🇷 🇷\r\n🔝🔝🔝\r\n", output_text)
def testExportMultipleChunks(self):
row_count = 100
split_pieces = 10
cell_value = "fixed"
table = [{"column1": cell_value}] * row_count
table_json = json.dumps(table)
table_bytes = row_count * len(cell_value.encode("utf-8"))
chunk_bytes = table_bytes // split_pieces
with test_lib.ConfigOverrider({"Osquery.max_chunk_size": chunk_bytes}):
results_iterator = self._RunOsqueryExportResults(table_json)
output_bytes = next(results_iterator)
output_text = output_bytes.decode("utf-8")
expected_rows = "\r\n".join([cell_value] * row_count)
self.assertEqual("column1\r\n" + expected_rows + "\r\n", output_text)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main) | 0.746693 | 0.351784 |
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
import os
import json
import gdown
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML
from PULSE.loss.sphericaloptimizer import SphericalOptimizer
__PREFIX__ = os.path.dirname(os.path.realpath(__file__))
class PULSE(object):
def __init__(self, data='processed images/', batch_size=64, image_size=32, lr=0.0001, ngpu=1):
#path to the dataset used for training which has preprcossed downsampled images.
self.dataroot = data
#the batch size used in training.
self.batch_size = batch_size
#the spatial size of the image used for training.
self.image_size = image_size
#learning rate for training.
self.lr = lr
#number of GPUs available for training. If no GPU is available, the model will train on CPU. Here, we have only 1 GPU available.
self.ngpu = ngpu
if ngpu > 0 and not torch.cuda.is_available():
raise ValueError('ngpu > 0 but cuda not available')
#device used for training.
self.device = torch.device("cuda:0" if (torch.cuda.is_available() and ngpu > 0) else "cpu")
#linear mapping layer used to map the latent distribution to that of the input mapping network
self.lrelu = torch.nn.LeakyReLU(negative_slope=0.2)
#the generator network of the stylegan
self.synthesis = G_synthesis().cuda()
#the input mapping network of the stylegan
self.inp_mapping = G_mapping().cuda()
if ngpu > 0 and not torch.cuda.is_available():
raise ValueError('ngpu > 0 but cuda not available')
print('Loading the synthesis network')
#download weights for the pre-trained generator network of the stylegan
with open( __PREFIX__+"/config/file_downloader.json", 'rb') as fp:
json_file = json.load(fp)
url = 'https://drive.google.com/uc?id={}'.format(json_file['synthesis'])
gdown.download(url, 'synthesis.pt', quiet=False)
f1 = 'synthesis.pt'
self.synthesis.load_state_dict(torch.load(f1))
for params in self.synthesis.parameters():
params.requires_grad = False
print('Loading the input mapping network')
#download weights for the pre-trained input mapping network of the stylegan
with open( __PREFIX__+"/config/file_downloader.json", 'rb') as fp:
json_file = json.load(fp)
url = 'https://drive.google.com/uc?id={}'.format(json_file['mapping'])
gdown.download(url, 'mapping.pt', quiet=False)
f1 = 'mapping.pt'
self.inp_mapping.load_state_dict(torch.load(f1))
#create a gaussian distribution of latent vectors
with torch.no_grad():
latent_input = torch.randn((1000000,512), dtype=torch.float32, device="cuda")
latent_output = torch.nn.LeakyReLU(5)(inp_mapping(latent_input))
self.gaussian = {"mean": latent_output.mean(0), "std": latent_output.std(0)}
#save the distribution as a pytorch file.
torch.save(self.gaussian, "gaussian.pth")
def data_loader(self):
#create the dataset
dataset = dset.ImageFolder(root = self.dataroot,
transform = transforms.Compose([
transforms.Resize(self.image_size),
transforms.CenterCrop(self.image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
#create the dataloader
dataloader = torch.utils.data.DataLoader(dataset, batch_size = self.batch_size,
shuffle = True)
return dataloader
def train(self):
latent = torch.randn((self.batch_size, 18, 512), dtype=torch.float, requires_grad=True, device='cuda')
dataloader = self.data_loader()
#generate a list of noise tensors
noise = [] # stores all of the noise tensors
#noise_optimizer = [] # stores the noise tensors that we want to optimize on
for i in range(18):
# dimension of the ith noise tensor
res = (self.batch_size, 1, 2**(i//2+2), 2**(i//2+2))
#generate a random tensor that is to be used as noise
new_noise = torch.randn(res, dtype=torch.float, device='cuda')
new_noise.requires_grad = True
#append the noise tensors in a list
noise.append(new_noise)
#add the noise to the latent distribution
vars = [latent] + noise
#set up Adam as the base optimizer function
optimizer_function = optim.Adam
#modify the adam optimizer to work for hyperspheres
optimizer = SphericalOptimizer(optimizer_function, vars, self.lr) | PULSE.py | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
import os
import json
import gdown
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML
from PULSE.loss.sphericaloptimizer import SphericalOptimizer
__PREFIX__ = os.path.dirname(os.path.realpath(__file__))
class PULSE(object):
def __init__(self, data='processed images/', batch_size=64, image_size=32, lr=0.0001, ngpu=1):
#path to the dataset used for training which has preprcossed downsampled images.
self.dataroot = data
#the batch size used in training.
self.batch_size = batch_size
#the spatial size of the image used for training.
self.image_size = image_size
#learning rate for training.
self.lr = lr
#number of GPUs available for training. If no GPU is available, the model will train on CPU. Here, we have only 1 GPU available.
self.ngpu = ngpu
if ngpu > 0 and not torch.cuda.is_available():
raise ValueError('ngpu > 0 but cuda not available')
#device used for training.
self.device = torch.device("cuda:0" if (torch.cuda.is_available() and ngpu > 0) else "cpu")
#linear mapping layer used to map the latent distribution to that of the input mapping network
self.lrelu = torch.nn.LeakyReLU(negative_slope=0.2)
#the generator network of the stylegan
self.synthesis = G_synthesis().cuda()
#the input mapping network of the stylegan
self.inp_mapping = G_mapping().cuda()
if ngpu > 0 and not torch.cuda.is_available():
raise ValueError('ngpu > 0 but cuda not available')
print('Loading the synthesis network')
#download weights for the pre-trained generator network of the stylegan
with open( __PREFIX__+"/config/file_downloader.json", 'rb') as fp:
json_file = json.load(fp)
url = 'https://drive.google.com/uc?id={}'.format(json_file['synthesis'])
gdown.download(url, 'synthesis.pt', quiet=False)
f1 = 'synthesis.pt'
self.synthesis.load_state_dict(torch.load(f1))
for params in self.synthesis.parameters():
params.requires_grad = False
print('Loading the input mapping network')
#download weights for the pre-trained input mapping network of the stylegan
with open( __PREFIX__+"/config/file_downloader.json", 'rb') as fp:
json_file = json.load(fp)
url = 'https://drive.google.com/uc?id={}'.format(json_file['mapping'])
gdown.download(url, 'mapping.pt', quiet=False)
f1 = 'mapping.pt'
self.inp_mapping.load_state_dict(torch.load(f1))
#create a gaussian distribution of latent vectors
with torch.no_grad():
latent_input = torch.randn((1000000,512), dtype=torch.float32, device="cuda")
latent_output = torch.nn.LeakyReLU(5)(inp_mapping(latent_input))
self.gaussian = {"mean": latent_output.mean(0), "std": latent_output.std(0)}
#save the distribution as a pytorch file.
torch.save(self.gaussian, "gaussian.pth")
def data_loader(self):
#create the dataset
dataset = dset.ImageFolder(root = self.dataroot,
transform = transforms.Compose([
transforms.Resize(self.image_size),
transforms.CenterCrop(self.image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
#create the dataloader
dataloader = torch.utils.data.DataLoader(dataset, batch_size = self.batch_size,
shuffle = True)
return dataloader
def train(self):
latent = torch.randn((self.batch_size, 18, 512), dtype=torch.float, requires_grad=True, device='cuda')
dataloader = self.data_loader()
#generate a list of noise tensors
noise = [] # stores all of the noise tensors
#noise_optimizer = [] # stores the noise tensors that we want to optimize on
for i in range(18):
# dimension of the ith noise tensor
res = (self.batch_size, 1, 2**(i//2+2), 2**(i//2+2))
#generate a random tensor that is to be used as noise
new_noise = torch.randn(res, dtype=torch.float, device='cuda')
new_noise.requires_grad = True
#append the noise tensors in a list
noise.append(new_noise)
#add the noise to the latent distribution
vars = [latent] + noise
#set up Adam as the base optimizer function
optimizer_function = optim.Adam
#modify the adam optimizer to work for hyperspheres
optimizer = SphericalOptimizer(optimizer_function, vars, self.lr) | 0.718989 | 0.429549 |
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
try:
import pwd
HAS_PWD = True
except ImportError:
HAS_PWD = False
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
patch,
)
# Import Salt Libs
import salt.modules.useradd as useradd
from salt.exceptions import CommandExecutionError
class UserAddTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.useradd
'''
def setup_loader_modules(self):
return {useradd: {}}
@classmethod
def setUpClass(cls):
cls.mock_pwall = {'gid': 0,
'groups': ['root'],
'home': '/root',
'name': 'root',
'passwd': 'x',
'shell': '/bin/bash',
'uid': 0,
'fullname': 'root',
'roomnumber': '',
'workphone': '',
'homephone': '',
'other': ''}
@classmethod
def tearDownClass(cls):
del cls.mock_pwall
# 'add' function tests: 1
def test_add(self):
'''
Test for adding a user
'''
with patch.dict(useradd.__grains__, {'kernel': 'OpenBSD'}):
mock_primary = MagicMock(return_value='Salt')
with patch.dict(useradd.__salt__,
{'file.gid_to_group': mock_primary}):
mock = MagicMock(return_value={'retcode': 0})
with patch.dict(useradd.__salt__, {'cmd.run_all': mock}):
self.assertTrue(useradd.add('Salt'))
mock = MagicMock(return_value={'retcode': 1})
with patch.dict(useradd.__salt__, {'cmd.run_all': mock}):
self.assertFalse(useradd.add('Salt'))
# 'getent' function tests: 2
@skipIf(HAS_PWD is False, 'The pwd module is not available')
def test_getent(self):
'''
Test if user.getent already have a value
'''
with patch('salt.modules.useradd.__context__', MagicMock(return_value='Salt')):
self.assertTrue(useradd.getent())
@skipIf(HAS_PWD is False, 'The pwd module is not available')
def test_getent_user(self):
'''
Tests the return information on all users
'''
with patch('pwd.getpwall', MagicMock(return_value=[''])):
ret = [{'gid': 0,
'groups': ['root'],
'home': '/root',
'name': 'root',
'passwd': 'x',
'shell': '/bin/bash',
'uid': 0,
'fullname': 'root',
'roomnumber': '',
'workphone': '',
'homephone': '',
'other': ''}]
with patch('salt.modules.useradd._format_info', MagicMock(return_value=self.mock_pwall)):
self.assertEqual(useradd.getent(), ret)
# 'chuid' function tests: 1
def test_chuid(self):
'''
Test if the uid of a user change
'''
mock = MagicMock(return_value={'uid': 11})
with patch.object(useradd, 'info', mock):
self.assertTrue(useradd.chuid('name', 11))
mock_run = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock_run}):
mock = MagicMock(side_effect=[{'uid': 11}, {'uid': 11}])
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chuid('name', 22))
with patch.dict(useradd.__salt__, {'cmd.run': mock_run}):
mock = MagicMock(side_effect=[{'uid': 11}, {'uid': 22}])
with patch.object(useradd, 'info', mock):
self.assertTrue(useradd.chuid('name', 11))
# 'chgid' function tests: 1
def test_chgid(self):
'''
Test the default group of the user
'''
mock = MagicMock(return_value={'gid': 11})
with patch.object(useradd, 'info', mock):
self.assertTrue(useradd.chgid('name', 11))
mock_run = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock_run}):
mock = MagicMock(side_effect=[{'gid': 22}, {'gid': 22}])
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chgid('name', 11))
with patch.dict(useradd.__salt__, {'cmd.run': mock_run}):
mock = MagicMock(side_effect=[{'gid': 11}, {'gid': 22}])
with patch.object(useradd, 'info', mock):
self.assertTrue(useradd.chgid('name', 11))
# 'chshell' function tests: 1
def test_chshell(self):
'''
Test the default shell of user
'''
mock = MagicMock(return_value={'shell': '/bin/bash'})
with patch.object(useradd, 'info', mock):
self.assertTrue(useradd.chshell('name', '/bin/bash'))
mock_run = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock_run}):
mock = MagicMock(side_effect=[{'shell': '/bin/bash'},
{'shell': '/bin/bash'}])
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chshell('name', '/usr/bash'))
with patch.dict(useradd.__salt__, {'cmd.run': mock_run}):
mock = MagicMock(side_effect=[{'shell': '/bin/bash'},
{'shell': '/usr/bash'}])
with patch.object(useradd, 'info', mock):
self.assertTrue(useradd.chshell('name', '/bin/bash'))
# 'chhome' function tests: 1
def test_chhome(self):
'''
Test if home directory given is same as previous home directory
'''
mock = MagicMock(return_value={'home': '/root'})
with patch.object(useradd, 'info', mock):
self.assertTrue(useradd.chhome('name', '/root'))
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(side_effect=[{'home': '/root'}, {'home': '/root'}])
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chhome('name', '/user'))
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(side_effect=[{'home': '/root'}, {'home': '/root'}])
with patch.object(useradd, 'info', mock):
self.assertTrue(useradd.chhome('name', '/root'))
# 'chgroups' function tests: 1
def test_chgroups(self):
'''
Test if user groups changed
'''
mock = MagicMock(return_value=['wheel', 'root'])
with patch.object(useradd, 'list_groups', mock):
self.assertTrue(useradd.chgroups('foo', 'wheel,root'))
mock = MagicMock(return_value=['wheel', 'root'])
with patch.object(useradd, 'list_groups', mock):
with patch.dict(useradd.__grains__, {'kernel': 'OpenBSD'}):
mock_runall = MagicMock(return_value={'retcode': False,
'stderr': ''})
with patch.dict(useradd.__salt__, {'cmd.run_all': mock_runall}):
self.assertTrue(useradd.chgroups('foo', 'wheel,test,root'))
mock_runall = MagicMock(return_value={'retcode': True,
'stderr': ''})
with patch.dict(useradd.__salt__, {'cmd.run_all': mock_runall}):
self.assertFalse(useradd.chgroups('foo', 'wheel,test,root'))
# 'chfullname' function tests: 1
def test_chfullname(self):
'''
Test if the user's Full Name is changed
'''
mock = MagicMock(return_value=False)
with patch.object(useradd, '_get_gecos', mock):
self.assertFalse(useradd.chfullname('Salt', 'SaltStack'))
mock = MagicMock(return_value={'fullname': 'SaltStack'})
with patch.object(useradd, '_get_gecos', mock):
self.assertTrue(useradd.chfullname('Salt', 'SaltStack'))
mock = MagicMock(return_value={'fullname': 'SaltStack'})
with patch.object(useradd, '_get_gecos', mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(return_value={'fullname': 'SaltStack2'})
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chfullname('Salt', 'SaltStack1'))
mock = MagicMock(return_value={'fullname': 'SaltStack2'})
with patch.object(useradd, '_get_gecos', mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(return_value={'fullname': 'SaltStack2'})
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chfullname('Salt', 'SaltStack1'))
# 'chroomnumber' function tests: 1
def test_chroomnumber(self):
'''
Test if the user's Room Number is changed
'''
mock = MagicMock(return_value=False)
with patch.object(useradd, '_get_gecos', mock):
self.assertFalse(useradd.chroomnumber('salt', 1))
mock = MagicMock(return_value={'roomnumber': '1'})
with patch.object(useradd, '_get_gecos', mock):
self.assertTrue(useradd.chroomnumber('salt', 1))
mock = MagicMock(return_value={'roomnumber': '2'})
with patch.object(useradd, '_get_gecos', mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(return_value={'roomnumber': '3'})
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chroomnumber('salt', 1))
mock = MagicMock(return_value={'roomnumber': '3'})
with patch.object(useradd, '_get_gecos', mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(return_value={'roomnumber': '3'})
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chroomnumber('salt', 1))
# 'chworkphone' function tests: 1
def test_chworkphone(self):
'''
Test if the user's Work Phone is changed
'''
mock = MagicMock(return_value=False)
with patch.object(useradd, '_get_gecos', mock):
self.assertFalse(useradd.chworkphone('salt', 1))
mock = MagicMock(return_value={'workphone': '1'})
with patch.object(useradd, '_get_gecos', mock):
self.assertTrue(useradd.chworkphone('salt', 1))
mock = MagicMock(return_value={'workphone': '2'})
with patch.object(useradd, '_get_gecos', mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(return_value={'workphone': '3'})
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chworkphone('salt', 1))
mock = MagicMock(return_value={'workphone': '3'})
with patch.object(useradd, '_get_gecos', mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(return_value={'workphone': '3'})
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chworkphone('salt', 1))
# 'chhomephone' function tests: 1
def test_chhomephone(self):
'''
Test if the user's Home Phone is changed
'''
mock = MagicMock(return_value=False)
with patch.object(useradd, '_get_gecos', mock):
self.assertFalse(useradd.chhomephone('salt', 1))
mock = MagicMock(return_value={'homephone': '1'})
with patch.object(useradd, '_get_gecos', mock):
self.assertTrue(useradd.chhomephone('salt', 1))
mock = MagicMock(return_value={'homephone': '2'})
with patch.object(useradd, '_get_gecos', mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(return_value={'homephone': '3'})
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chhomephone('salt', 1))
mock = MagicMock(return_value={'homephone': '3'})
with patch.object(useradd, '_get_gecos', mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(return_value={'homephone': '3'})
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chhomephone('salt', 1))
# 'chother' function tests: 1
def test_chother(self):
'''
Test if the user's other GECOS attribute is changed
'''
mock = MagicMock(return_value=False)
with patch.object(useradd, '_get_gecos', mock):
self.assertFalse(useradd.chother('salt', 1))
mock = MagicMock(return_value={'other': 'foobar'})
with patch.object(useradd, '_get_gecos', mock):
self.assertTrue(useradd.chother('salt', 'foobar'))
mock = MagicMock(return_value={'other': 'foobar2'})
with patch.object(useradd, '_get_gecos', mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(return_value={'other': 'foobar3'})
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chother('salt', 'foobar'))
mock = MagicMock(return_value={'other': 'foobar3'})
with patch.object(useradd, '_get_gecos', mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(return_value={'other': 'foobar3'})
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chother('salt', 'foobar'))
# 'info' function tests: 1
@skipIf(HAS_PWD is False, 'The pwd module is not available')
def test_info(self):
'''
Test the user information
'''
self.assertEqual(useradd.info('username-that-doesnt-exist'), {})
mock = MagicMock(return_value=pwd.struct_passwd(('_TEST_GROUP',
'*',
83,
83,
'AMaViS Daemon',
'/var/virusmails',
'/usr/bin/false')))
with patch.object(pwd, 'getpwnam', mock):
self.assertEqual(useradd.info('username-that-doesnt-exist')['name'], '_TEST_GROUP')
# 'list_groups' function tests: 1
def test_list_groups(self):
'''
Test if it return a list of groups the named user belongs to
'''
with patch('salt.utils.user.get_group_list', MagicMock(return_value='Salt')):
self.assertEqual(useradd.list_groups('name'), 'Salt')
# 'list_users' function tests: 1
@skipIf(HAS_PWD is False, 'The pwd module is not available')
def test_list_users(self):
'''
Test if it returns a list of all users
'''
self.assertTrue(useradd.list_users())
# 'list_users' function tests: 1
def test_rename(self):
'''
Test if the username for a named user changed
'''
mock = MagicMock(return_value=False)
with patch.object(useradd, 'info', mock):
self.assertRaises(CommandExecutionError, useradd.rename, 'salt', 1)
mock = MagicMock(return_value=True)
with patch.object(useradd, 'info', mock):
self.assertRaises(CommandExecutionError, useradd.rename, 'salt', 1)
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(side_effect=[False, {'name': ''},
{'name': 'salt'}])
with patch.object(useradd, 'info', mock):
self.assertTrue(useradd.rename('name', 'salt'))
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(side_effect=[False, {'name': ''},
{'name': ''}])
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.rename('salt', 'salt'))
def test_build_gecos_field(self):
'''
Test if gecos fields are built correctly (removing trailing commas)
'''
test_gecos = {'fullname': 'Testing',
'roomnumber': 1234,
'workphone': 22222,
'homephone': 99999}
expected_gecos_fields = 'Testing,1234,22222,99999'
self.assertEqual(useradd._build_gecos(test_gecos), expected_gecos_fields)
test_gecos.pop('roomnumber')
test_gecos.pop('workphone')
expected_gecos_fields = 'Testing,,,99999'
self.assertEqual(useradd._build_gecos(test_gecos), expected_gecos_fields)
test_gecos.pop('homephone')
expected_gecos_fields = 'Testing'
self.assertEqual(useradd._build_gecos(test_gecos), expected_gecos_fields) | tests/unit/modules/test_useradd.py | # Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
try:
import pwd
HAS_PWD = True
except ImportError:
HAS_PWD = False
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
patch,
)
# Import Salt Libs
import salt.modules.useradd as useradd
from salt.exceptions import CommandExecutionError
class UserAddTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.useradd
'''
def setup_loader_modules(self):
return {useradd: {}}
@classmethod
def setUpClass(cls):
cls.mock_pwall = {'gid': 0,
'groups': ['root'],
'home': '/root',
'name': 'root',
'passwd': 'x',
'shell': '/bin/bash',
'uid': 0,
'fullname': 'root',
'roomnumber': '',
'workphone': '',
'homephone': '',
'other': ''}
@classmethod
def tearDownClass(cls):
del cls.mock_pwall
# 'add' function tests: 1
def test_add(self):
'''
Test for adding a user
'''
with patch.dict(useradd.__grains__, {'kernel': 'OpenBSD'}):
mock_primary = MagicMock(return_value='Salt')
with patch.dict(useradd.__salt__,
{'file.gid_to_group': mock_primary}):
mock = MagicMock(return_value={'retcode': 0})
with patch.dict(useradd.__salt__, {'cmd.run_all': mock}):
self.assertTrue(useradd.add('Salt'))
mock = MagicMock(return_value={'retcode': 1})
with patch.dict(useradd.__salt__, {'cmd.run_all': mock}):
self.assertFalse(useradd.add('Salt'))
# 'getent' function tests: 2
@skipIf(HAS_PWD is False, 'The pwd module is not available')
def test_getent(self):
'''
Test if user.getent already have a value
'''
with patch('salt.modules.useradd.__context__', MagicMock(return_value='Salt')):
self.assertTrue(useradd.getent())
@skipIf(HAS_PWD is False, 'The pwd module is not available')
def test_getent_user(self):
'''
Tests the return information on all users
'''
with patch('pwd.getpwall', MagicMock(return_value=[''])):
ret = [{'gid': 0,
'groups': ['root'],
'home': '/root',
'name': 'root',
'passwd': 'x',
'shell': '/bin/bash',
'uid': 0,
'fullname': 'root',
'roomnumber': '',
'workphone': '',
'homephone': '',
'other': ''}]
with patch('salt.modules.useradd._format_info', MagicMock(return_value=self.mock_pwall)):
self.assertEqual(useradd.getent(), ret)
# 'chuid' function tests: 1
def test_chuid(self):
'''
Test if the uid of a user change
'''
mock = MagicMock(return_value={'uid': 11})
with patch.object(useradd, 'info', mock):
self.assertTrue(useradd.chuid('name', 11))
mock_run = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock_run}):
mock = MagicMock(side_effect=[{'uid': 11}, {'uid': 11}])
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chuid('name', 22))
with patch.dict(useradd.__salt__, {'cmd.run': mock_run}):
mock = MagicMock(side_effect=[{'uid': 11}, {'uid': 22}])
with patch.object(useradd, 'info', mock):
self.assertTrue(useradd.chuid('name', 11))
# 'chgid' function tests: 1
def test_chgid(self):
'''
Test the default group of the user
'''
mock = MagicMock(return_value={'gid': 11})
with patch.object(useradd, 'info', mock):
self.assertTrue(useradd.chgid('name', 11))
mock_run = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock_run}):
mock = MagicMock(side_effect=[{'gid': 22}, {'gid': 22}])
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chgid('name', 11))
with patch.dict(useradd.__salt__, {'cmd.run': mock_run}):
mock = MagicMock(side_effect=[{'gid': 11}, {'gid': 22}])
with patch.object(useradd, 'info', mock):
self.assertTrue(useradd.chgid('name', 11))
# 'chshell' function tests: 1
def test_chshell(self):
'''
Test the default shell of user
'''
mock = MagicMock(return_value={'shell': '/bin/bash'})
with patch.object(useradd, 'info', mock):
self.assertTrue(useradd.chshell('name', '/bin/bash'))
mock_run = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock_run}):
mock = MagicMock(side_effect=[{'shell': '/bin/bash'},
{'shell': '/bin/bash'}])
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chshell('name', '/usr/bash'))
with patch.dict(useradd.__salt__, {'cmd.run': mock_run}):
mock = MagicMock(side_effect=[{'shell': '/bin/bash'},
{'shell': '/usr/bash'}])
with patch.object(useradd, 'info', mock):
self.assertTrue(useradd.chshell('name', '/bin/bash'))
# 'chhome' function tests: 1
def test_chhome(self):
'''
Test if home directory given is same as previous home directory
'''
mock = MagicMock(return_value={'home': '/root'})
with patch.object(useradd, 'info', mock):
self.assertTrue(useradd.chhome('name', '/root'))
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(side_effect=[{'home': '/root'}, {'home': '/root'}])
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chhome('name', '/user'))
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(side_effect=[{'home': '/root'}, {'home': '/root'}])
with patch.object(useradd, 'info', mock):
self.assertTrue(useradd.chhome('name', '/root'))
# 'chgroups' function tests: 1
def test_chgroups(self):
'''
Test if user groups changed
'''
mock = MagicMock(return_value=['wheel', 'root'])
with patch.object(useradd, 'list_groups', mock):
self.assertTrue(useradd.chgroups('foo', 'wheel,root'))
mock = MagicMock(return_value=['wheel', 'root'])
with patch.object(useradd, 'list_groups', mock):
with patch.dict(useradd.__grains__, {'kernel': 'OpenBSD'}):
mock_runall = MagicMock(return_value={'retcode': False,
'stderr': ''})
with patch.dict(useradd.__salt__, {'cmd.run_all': mock_runall}):
self.assertTrue(useradd.chgroups('foo', 'wheel,test,root'))
mock_runall = MagicMock(return_value={'retcode': True,
'stderr': ''})
with patch.dict(useradd.__salt__, {'cmd.run_all': mock_runall}):
self.assertFalse(useradd.chgroups('foo', 'wheel,test,root'))
# 'chfullname' function tests: 1
def test_chfullname(self):
'''
Test if the user's Full Name is changed
'''
mock = MagicMock(return_value=False)
with patch.object(useradd, '_get_gecos', mock):
self.assertFalse(useradd.chfullname('Salt', 'SaltStack'))
mock = MagicMock(return_value={'fullname': 'SaltStack'})
with patch.object(useradd, '_get_gecos', mock):
self.assertTrue(useradd.chfullname('Salt', 'SaltStack'))
mock = MagicMock(return_value={'fullname': 'SaltStack'})
with patch.object(useradd, '_get_gecos', mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(return_value={'fullname': 'SaltStack2'})
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chfullname('Salt', 'SaltStack1'))
mock = MagicMock(return_value={'fullname': 'SaltStack2'})
with patch.object(useradd, '_get_gecos', mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(return_value={'fullname': 'SaltStack2'})
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chfullname('Salt', 'SaltStack1'))
# 'chroomnumber' function tests: 1
def test_chroomnumber(self):
'''
Test if the user's Room Number is changed
'''
mock = MagicMock(return_value=False)
with patch.object(useradd, '_get_gecos', mock):
self.assertFalse(useradd.chroomnumber('salt', 1))
mock = MagicMock(return_value={'roomnumber': '1'})
with patch.object(useradd, '_get_gecos', mock):
self.assertTrue(useradd.chroomnumber('salt', 1))
mock = MagicMock(return_value={'roomnumber': '2'})
with patch.object(useradd, '_get_gecos', mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(return_value={'roomnumber': '3'})
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chroomnumber('salt', 1))
mock = MagicMock(return_value={'roomnumber': '3'})
with patch.object(useradd, '_get_gecos', mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(return_value={'roomnumber': '3'})
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chroomnumber('salt', 1))
# 'chworkphone' function tests: 1
def test_chworkphone(self):
'''
Test if the user's Work Phone is changed
'''
mock = MagicMock(return_value=False)
with patch.object(useradd, '_get_gecos', mock):
self.assertFalse(useradd.chworkphone('salt', 1))
mock = MagicMock(return_value={'workphone': '1'})
with patch.object(useradd, '_get_gecos', mock):
self.assertTrue(useradd.chworkphone('salt', 1))
mock = MagicMock(return_value={'workphone': '2'})
with patch.object(useradd, '_get_gecos', mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(return_value={'workphone': '3'})
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chworkphone('salt', 1))
mock = MagicMock(return_value={'workphone': '3'})
with patch.object(useradd, '_get_gecos', mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(return_value={'workphone': '3'})
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chworkphone('salt', 1))
# 'chhomephone' function tests: 1
def test_chhomephone(self):
'''
Test if the user's Home Phone is changed
'''
mock = MagicMock(return_value=False)
with patch.object(useradd, '_get_gecos', mock):
self.assertFalse(useradd.chhomephone('salt', 1))
mock = MagicMock(return_value={'homephone': '1'})
with patch.object(useradd, '_get_gecos', mock):
self.assertTrue(useradd.chhomephone('salt', 1))
mock = MagicMock(return_value={'homephone': '2'})
with patch.object(useradd, '_get_gecos', mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(return_value={'homephone': '3'})
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chhomephone('salt', 1))
mock = MagicMock(return_value={'homephone': '3'})
with patch.object(useradd, '_get_gecos', mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(return_value={'homephone': '3'})
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chhomephone('salt', 1))
# 'chother' function tests: 1
def test_chother(self):
'''
Test if the user's other GECOS attribute is changed
'''
mock = MagicMock(return_value=False)
with patch.object(useradd, '_get_gecos', mock):
self.assertFalse(useradd.chother('salt', 1))
mock = MagicMock(return_value={'other': 'foobar'})
with patch.object(useradd, '_get_gecos', mock):
self.assertTrue(useradd.chother('salt', 'foobar'))
mock = MagicMock(return_value={'other': 'foobar2'})
with patch.object(useradd, '_get_gecos', mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(return_value={'other': 'foobar3'})
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chother('salt', 'foobar'))
mock = MagicMock(return_value={'other': 'foobar3'})
with patch.object(useradd, '_get_gecos', mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(return_value={'other': 'foobar3'})
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.chother('salt', 'foobar'))
# 'info' function tests: 1
@skipIf(HAS_PWD is False, 'The pwd module is not available')
def test_info(self):
'''
Test the user information
'''
self.assertEqual(useradd.info('username-that-doesnt-exist'), {})
mock = MagicMock(return_value=pwd.struct_passwd(('_TEST_GROUP',
'*',
83,
83,
'AMaViS Daemon',
'/var/virusmails',
'/usr/bin/false')))
with patch.object(pwd, 'getpwnam', mock):
self.assertEqual(useradd.info('username-that-doesnt-exist')['name'], '_TEST_GROUP')
# 'list_groups' function tests: 1
def test_list_groups(self):
'''
Test if it return a list of groups the named user belongs to
'''
with patch('salt.utils.user.get_group_list', MagicMock(return_value='Salt')):
self.assertEqual(useradd.list_groups('name'), 'Salt')
# 'list_users' function tests: 1
@skipIf(HAS_PWD is False, 'The pwd module is not available')
def test_list_users(self):
'''
Test if it returns a list of all users
'''
self.assertTrue(useradd.list_users())
# 'list_users' function tests: 1
def test_rename(self):
'''
Test if the username for a named user changed
'''
mock = MagicMock(return_value=False)
with patch.object(useradd, 'info', mock):
self.assertRaises(CommandExecutionError, useradd.rename, 'salt', 1)
mock = MagicMock(return_value=True)
with patch.object(useradd, 'info', mock):
self.assertRaises(CommandExecutionError, useradd.rename, 'salt', 1)
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(side_effect=[False, {'name': ''},
{'name': 'salt'}])
with patch.object(useradd, 'info', mock):
self.assertTrue(useradd.rename('name', 'salt'))
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {'cmd.run': mock}):
mock = MagicMock(side_effect=[False, {'name': ''},
{'name': ''}])
with patch.object(useradd, 'info', mock):
self.assertFalse(useradd.rename('salt', 'salt'))
def test_build_gecos_field(self):
'''
Test if gecos fields are built correctly (removing trailing commas)
'''
test_gecos = {'fullname': 'Testing',
'roomnumber': 1234,
'workphone': 22222,
'homephone': 99999}
expected_gecos_fields = 'Testing,1234,22222,99999'
self.assertEqual(useradd._build_gecos(test_gecos), expected_gecos_fields)
test_gecos.pop('roomnumber')
test_gecos.pop('workphone')
expected_gecos_fields = 'Testing,,,99999'
self.assertEqual(useradd._build_gecos(test_gecos), expected_gecos_fields)
test_gecos.pop('homephone')
expected_gecos_fields = 'Testing'
self.assertEqual(useradd._build_gecos(test_gecos), expected_gecos_fields) | 0.615897 | 0.173708 |
import os
import argparse
import cv2
import shutil
import itertools
import tqdm
import numpy as np
import json
import six
import tensorflow as tf
try:
import horovod.tensorflow as hvd
except ImportError:
pass
assert six.PY3, "FasterRCNN requires Python 3!"
from tensorpack import *
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.tfutils import optimizer
from tensorpack.tfutils.common import get_tf_version_tuple
import tensorpack.utils.viz as tpviz
from coco import COCODetection
from basemodel import (
image_preprocess, resnet_c4_backbone, resnet_conv5,
resnet_fpn_backbone)
import model_frcnn
import model_mrcnn
from model_frcnn import (
sample_fast_rcnn_targets, fastrcnn_outputs,
fastrcnn_predictions, BoxProposals, FastRCNNHead)
from model_mrcnn import maskrcnn_upXconv_head, maskrcnn_loss
from model_rpn import rpn_head, rpn_losses, generate_rpn_proposals
from model_fpn import (
fpn_model, multilevel_roi_align,
multilevel_rpn_losses, generate_fpn_proposals)
from model_cascade import CascadeRCNNHead
from model_box import (
clip_boxes, crop_and_resize, roi_align, RPNAnchors)
from data import (
get_train_dataflow, get_eval_dataflow,
get_all_anchors, get_all_anchors_fpn)
from viz import (
draw_annotation, draw_proposal_recall,
draw_predictions, draw_final_outputs)
from eval import (
eval_coco, multithread_eval_coco,
detect_one_image, print_coco_metrics, DetectionResult)
from config import finalize_configs, config as cfg
class DetectionModel(ModelDesc):
def preprocess(self, image):
image = tf.expand_dims(image, 0)
image = image_preprocess(image, bgr=True)
return tf.transpose(image, [0, 3, 1, 2])
@property
def training(self):
return get_current_tower_context().is_training
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=0.003, trainable=False)
tf.summary.scalar('learning_rate-summary', lr)
# The learning rate is set for 8 GPUs, and we use trainers with average=False.
lr = lr / 8.
opt = tf.train.MomentumOptimizer(lr, 0.9)
if cfg.TRAIN.NUM_GPUS < 8:
opt = optimizer.AccumGradOptimizer(opt, 8 // cfg.TRAIN.NUM_GPUS)
return opt
def get_inference_tensor_names(self):
"""
Returns two lists of tensor names to be used to create an inference callable.
Returns:
[str]: input names
[str]: output names
"""
out = ['output/boxes', 'output/scores', 'output/labels']
if cfg.MODE_MASK:
out.append('output/masks')
return ['image'], out
def build_graph(self, *inputs):
inputs = dict(zip(self.input_names, inputs))
image = self.preprocess(inputs['image']) # 1CHW
features = self.backbone(image)
anchor_inputs = {k: v for k, v in inputs.items() if k.startswith('anchor_')}
proposals, rpn_losses = self.rpn(image, features, anchor_inputs) # inputs?
targets = [inputs[k] for k in ['gt_boxes', 'gt_labels', 'gt_masks'] if k in inputs]
head_losses = self.roi_heads(image, features, proposals, targets)
if self.training:
wd_cost = regularize_cost(
'.*/W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost')
total_cost = tf.add_n(
rpn_losses + head_losses + [wd_cost], 'total_cost')
add_moving_summary(total_cost, wd_cost)
return total_cost
class ResNetC4Model(DetectionModel):
def inputs(self):
ret = [
tf.placeholder(tf.float32, (None, None, 3), 'image'),
tf.placeholder(tf.int32, (None, None, cfg.RPN.NUM_ANCHOR), 'anchor_labels'),
tf.placeholder(tf.float32, (None, None, cfg.RPN.NUM_ANCHOR, 4), 'anchor_boxes'),
tf.placeholder(tf.float32, (None, 4), 'gt_boxes'),
tf.placeholder(tf.int64, (None,), 'gt_labels')] # all > 0
if cfg.MODE_MASK:
ret.append(
tf.placeholder(tf.uint8, (None, None, None), 'gt_masks')
) # NR_GT x height x width
return ret
def backbone(self, image):
return [resnet_c4_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS[:3])]
def rpn(self, image, features, inputs):
featuremap = features[0]
rpn_label_logits, rpn_box_logits = rpn_head('rpn', featuremap, cfg.RPN.HEAD_DIM, cfg.RPN.NUM_ANCHOR)
anchors = RPNAnchors(get_all_anchors(), inputs['anchor_labels'], inputs['anchor_boxes'])
anchors = anchors.narrow_to(featuremap)
image_shape2d = tf.shape(image)[2:] # h,w
pred_boxes_decoded = anchors.decode_logits(rpn_box_logits) # fHxfWxNAx4, floatbox
proposal_boxes, proposal_scores = generate_rpn_proposals(
tf.reshape(pred_boxes_decoded, [-1, 4]),
tf.reshape(rpn_label_logits, [-1]),
image_shape2d,
cfg.RPN.TRAIN_PRE_NMS_TOPK if self.training else cfg.RPN.TEST_PRE_NMS_TOPK,
cfg.RPN.TRAIN_POST_NMS_TOPK if self.training else cfg.RPN.TEST_POST_NMS_TOPK)
if self.training:
losses = rpn_losses(
anchors.gt_labels, anchors.encoded_gt_boxes(), rpn_label_logits, rpn_box_logits)
else:
losses = []
return BoxProposals(proposal_boxes), losses
def roi_heads(self, image, features, proposals, targets):
image_shape2d = tf.shape(image)[2:] # h,w
featuremap = features[0]
gt_boxes, gt_labels, *_ = targets
if self.training:
# sample proposal boxes in training
proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)
# The boxes to be used to crop RoIs.
# Use all proposal boxes in inference
boxes_on_featuremap = proposals.boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE)
roi_resized = roi_align(featuremap, boxes_on_featuremap, 14)
feature_fastrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1]) # nxcx7x7
# Keep C5 feature to be shared with mask branch
feature_gap = GlobalAvgPooling('gap', feature_fastrcnn, data_format='channels_first')
fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs('fastrcnn', feature_gap, cfg.DATA.NUM_CLASS)
fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits, gt_boxes,
tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))
if self.training:
all_losses = fastrcnn_head.losses()
if cfg.MODE_MASK:
gt_masks = targets[2]
# maskrcnn loss
# In training, mask branch shares the same C5 feature.
fg_feature = tf.gather(feature_fastrcnn, proposals.fg_inds())
mask_logits = maskrcnn_upXconv_head(
'maskrcnn', fg_feature, cfg.DATA.NUM_CATEGORY, num_convs=0) # #fg x #cat x 14x14
target_masks_for_fg = crop_and_resize(
tf.expand_dims(gt_masks, 1),
proposals.fg_boxes(),
proposals.fg_inds_wrt_gt, 14,
pad_border=False) # nfg x 1x14x14
target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')
all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))
return all_losses
else:
decoded_boxes = fastrcnn_head.decoded_output_boxes()
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')
label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')
final_boxes, final_scores, final_labels = fastrcnn_predictions(
decoded_boxes, label_scores, name_scope='output')
if cfg.MODE_MASK:
roi_resized = roi_align(featuremap, final_boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE), 14)
feature_maskrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1])
mask_logits = maskrcnn_upXconv_head(
'maskrcnn', feature_maskrcnn, cfg.DATA.NUM_CATEGORY, 0) # #result x #cat x 14x14
indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)
final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx14x14
tf.sigmoid(final_mask_logits, name='output/masks')
return []
class ResNetFPNModel(DetectionModel):
def inputs(self):
ret = [
tf.placeholder(tf.float32, (None, None, 3), 'image')]
num_anchors = len(cfg.RPN.ANCHOR_RATIOS)
for k in range(len(cfg.FPN.ANCHOR_STRIDES)):
ret.extend([
tf.placeholder(tf.int32, (None, None, num_anchors),
'anchor_labels_lvl{}'.format(k + 2)),
tf.placeholder(tf.float32, (None, None, num_anchors, 4),
'anchor_boxes_lvl{}'.format(k + 2))])
ret.extend([
tf.placeholder(tf.float32, (None, 4), 'gt_boxes'),
tf.placeholder(tf.int64, (None,), 'gt_labels')]) # all > 0
if cfg.MODE_MASK:
ret.append(
tf.placeholder(tf.uint8, (None, None, None), 'gt_masks')
) # NR_GT x height x width
return ret
def slice_feature_and_anchors(self, p23456, anchors):
for i, stride in enumerate(cfg.FPN.ANCHOR_STRIDES):
with tf.name_scope('FPN_slice_lvl{}'.format(i)):
anchors[i] = anchors[i].narrow_to(p23456[i])
def backbone(self, image):
c2345 = resnet_fpn_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS)
p23456 = fpn_model('fpn', c2345)
return p23456
def rpn(self, image, features, inputs):
assert len(cfg.RPN.ANCHOR_SIZES) == len(cfg.FPN.ANCHOR_STRIDES)
image_shape2d = tf.shape(image)[2:] # h,w
all_anchors_fpn = get_all_anchors_fpn()
multilevel_anchors = [RPNAnchors(
all_anchors_fpn[i],
inputs['anchor_labels_lvl{}'.format(i + 2)],
inputs['anchor_boxes_lvl{}'.format(i + 2)]) for i in range(len(all_anchors_fpn))]
self.slice_feature_and_anchors(features, multilevel_anchors)
# Multi-Level RPN Proposals
rpn_outputs = [rpn_head('rpn', pi, cfg.FPN.NUM_CHANNEL, len(cfg.RPN.ANCHOR_RATIOS))
for pi in features]
multilevel_label_logits = [k[0] for k in rpn_outputs]
multilevel_box_logits = [k[1] for k in rpn_outputs]
multilevel_pred_boxes = [anchor.decode_logits(logits)
for anchor, logits in zip(multilevel_anchors, multilevel_box_logits)]
proposal_boxes, proposal_scores = generate_fpn_proposals(
multilevel_pred_boxes, multilevel_label_logits, image_shape2d)
if self.training:
losses = multilevel_rpn_losses(
multilevel_anchors, multilevel_label_logits, multilevel_box_logits)
else:
losses = []
return BoxProposals(proposal_boxes), losses
def roi_heads(self, image, features, proposals, targets):
image_shape2d = tf.shape(image)[2:] # h,w
assert len(features) == 5, "Features have to be P23456!"
gt_boxes, gt_labels, *_ = targets
if self.training:
proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)
fastrcnn_head_func = getattr(model_frcnn, cfg.FPN.FRCNN_HEAD_FUNC)
if not cfg.FPN.CASCADE:
roi_feature_fastrcnn = multilevel_roi_align(features[:4], proposals.boxes, 7)
head_feature = fastrcnn_head_func('fastrcnn', roi_feature_fastrcnn)
fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs(
'fastrcnn/outputs', head_feature, cfg.DATA.NUM_CLASS)
fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits,
gt_boxes, tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))
else:
def roi_func(boxes):
return multilevel_roi_align(features[:4], boxes, 7)
fastrcnn_head = CascadeRCNNHead(
proposals, roi_func, fastrcnn_head_func,
(gt_boxes, gt_labels), image_shape2d, cfg.DATA.NUM_CLASS)
if self.training:
all_losses = fastrcnn_head.losses()
if cfg.MODE_MASK:
gt_masks = targets[2]
# maskrcnn loss
roi_feature_maskrcnn = multilevel_roi_align(
features[:4], proposals.fg_boxes(), 14,
name_scope='multilevel_roi_align_mask')
maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)
mask_logits = maskrcnn_head_func(
'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28
target_masks_for_fg = crop_and_resize(
tf.expand_dims(gt_masks, 1),
proposals.fg_boxes(),
proposals.fg_inds_wrt_gt, 28,
pad_border=False) # fg x 1x28x28
target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')
all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))
return all_losses
else:
decoded_boxes = fastrcnn_head.decoded_output_boxes()
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')
label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')
final_boxes, final_scores, final_labels = fastrcnn_predictions(
decoded_boxes, label_scores, name_scope='output')
if cfg.MODE_MASK:
# Cascade inference needs roi transform with refined boxes.
roi_feature_maskrcnn = multilevel_roi_align(features[:4], final_boxes, 14)
maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)
mask_logits = maskrcnn_head_func(
'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28
indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)
final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx28x28
tf.sigmoid(final_mask_logits, name='output/masks')
return []
def visualize(model, model_path, nr_visualize=100, output_dir='output'):
"""
Visualize some intermediate results (proposals, raw predictions) inside the pipeline.
"""
df = get_train_dataflow() # we don't visualize mask stuff
df.reset_state()
pred = OfflinePredictor(PredictConfig(
model=model,
session_init=get_model_loader(model_path),
input_names=['image', 'gt_boxes', 'gt_labels'],
output_names=[
'generate_{}_proposals/boxes'.format('fpn' if cfg.MODE_FPN else 'rpn'),
'generate_{}_proposals/scores'.format('fpn' if cfg.MODE_FPN else 'rpn'),
'fastrcnn_all_scores',
'output/boxes',
'output/scores',
'output/labels',
]))
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
utils.fs.mkdir_p(output_dir)
with tqdm.tqdm(total=nr_visualize) as pbar:
for idx, dp in itertools.islice(enumerate(df), nr_visualize):
img, gt_boxes, gt_labels = dp['image'], dp['gt_boxes'], dp['gt_labels']
rpn_boxes, rpn_scores, all_scores, \
final_boxes, final_scores, final_labels = pred(img, gt_boxes, gt_labels)
# draw groundtruth boxes
gt_viz = draw_annotation(img, gt_boxes, gt_labels)
# draw best proposals for each groundtruth, to show recall
proposal_viz, good_proposals_ind = draw_proposal_recall(img, rpn_boxes, rpn_scores, gt_boxes)
# draw the scores for the above proposals
score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind], all_scores[good_proposals_ind])
results = [DetectionResult(*args) for args in
zip(final_boxes, final_scores, final_labels,
[None] * len(final_labels))]
final_viz = draw_final_outputs(img, results)
viz = tpviz.stack_patches([
gt_viz, proposal_viz,
score_viz, final_viz], 2, 2)
if os.environ.get('DISPLAY', None):
tpviz.interactive_imshow(viz)
cv2.imwrite("{}/{:03d}.png".format(output_dir, idx), viz)
pbar.update()
def offline_evaluate(pred_config, output_file):
num_gpu = cfg.TRAIN.NUM_GPUS
graph_funcs = MultiTowerOfflinePredictor(
pred_config, list(range(num_gpu))).get_predictors()
predictors = []
dataflows = []
for k in range(num_gpu):
predictors.append(lambda img,
pred=graph_funcs[k]: detect_one_image(img, pred))
dataflows.append(get_eval_dataflow(shard=k, num_shards=num_gpu))
if num_gpu > 1:
all_results = multithread_eval_coco(dataflows, predictors)
else:
all_results = eval_coco(dataflows[0], predictors[0])
with open(output_file, 'w') as f:
json.dump(all_results, f)
print_coco_metrics(output_file)
def predict(pred_func, input_file):
img = cv2.imread(input_file, cv2.IMREAD_COLOR)
results = detect_one_image(img, pred_func)
final = draw_final_outputs(img, results)
viz = np.concatenate((img, final), axis=1)
cv2.imwrite("output.png", viz)
logger.info("Inference output written to output.png")
tpviz.interactive_imshow(viz)
class EvalCallback(Callback):
"""
A callback that runs COCO evaluation once a while.
It supports multi-gpu evaluation.
"""
_chief_only = False
def __init__(self, in_names, out_names):
self._in_names, self._out_names = in_names, out_names
def _setup_graph(self):
num_gpu = cfg.TRAIN.NUM_GPUS
if cfg.TRAINER == 'replicated':
# TF bug in version 1.11, 1.12: https://github.com/tensorflow/tensorflow/issues/22750
buggy_tf = get_tf_version_tuple() in [(1, 11), (1, 12)]
# Use two predictor threads per GPU to get better throughput
self.num_predictor = num_gpu if buggy_tf else num_gpu * 2
self.predictors = [self._build_coco_predictor(k % num_gpu) for k in range(self.num_predictor)]
self.dataflows = [get_eval_dataflow(shard=k, num_shards=self.num_predictor)
for k in range(self.num_predictor)]
else:
# Only eval on the first machine.
# Alternatively, can eval on all ranks and use allgather, but allgather sometimes hangs
self._horovod_run_eval = hvd.rank() == hvd.local_rank()
if self._horovod_run_eval:
self.predictor = self._build_coco_predictor(0)
self.dataflow = get_eval_dataflow(shard=hvd.local_rank(), num_shards=hvd.local_size())
self.barrier = hvd.allreduce(tf.random_normal(shape=[1]))
def _build_coco_predictor(self, idx):
graph_func = self.trainer.get_predictor(self._in_names, self._out_names, device=idx)
return lambda img: detect_one_image(img, graph_func)
def _before_train(self):
eval_period = cfg.TRAIN.EVAL_PERIOD
self.epochs_to_eval = set()
for k in itertools.count(1):
if k * eval_period > self.trainer.max_epoch:
break
self.epochs_to_eval.add(k * eval_period)
self.epochs_to_eval.add(self.trainer.max_epoch)
logger.info("[EvalCallback] Will evaluate every {} epochs".format(eval_period))
def _eval(self):
logdir = args.logdir
if cfg.TRAINER == 'replicated':
all_results = multithread_eval_coco(self.dataflows, self.predictors)
else:
filenames = [os.path.join(
logdir, 'outputs{}-part{}.json'.format(self.global_step, rank)
) for rank in range(hvd.local_size())]
if self._horovod_run_eval:
local_results = eval_coco(self.dataflow, self.predictor)
fname = filenames[hvd.local_rank()]
with open(fname, 'w') as f:
json.dump(local_results, f)
self.barrier.eval()
if hvd.rank() > 0:
return
all_results = []
for fname in filenames:
with open(fname, 'r') as f:
obj = json.load(f)
all_results.extend(obj)
os.unlink(fname)
output_file = os.path.join(
logdir, 'outputs{}.json'.format(self.global_step))
with open(output_file, 'w') as f:
json.dump(all_results, f)
try:
scores = print_coco_metrics(output_file)
for k, v in scores.items():
self.trainer.monitors.put_scalar(k, v)
except Exception:
logger.exception("Exception in COCO evaluation.")
def _trigger_epoch(self):
if self.epoch_num in self.epochs_to_eval:
logger.info("Running evaluation ...")
self._eval()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--load', help='load a model for evaluation or training. Can overwrite BACKBONE.WEIGHTS')
parser.add_argument('--logdir', help='log directory', default='train_log/maskrcnn')
parser.add_argument('--visualize', action='store_true', help='visualize intermediate results')
parser.add_argument('--evaluate', help="Run evaluation on COCO. "
"This argument is the path to the output json evaluation file")
parser.add_argument('--predict', help="Run prediction on a given image. "
"This argument is the path to the input image file")
parser.add_argument('--config', help="A list of KEY=VALUE to overwrite those defined in config.py",
nargs='+')
if get_tf_version_tuple() < (1, 6):
# https://github.com/tensorflow/tensorflow/issues/14657
logger.warn("TF<1.6 has a bug which may lead to crash in FasterRCNN if you're unlucky.")
args = parser.parse_args()
if args.config:
cfg.update_args(args.config)
MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()
if args.visualize or args.evaluate or args.predict:
assert tf.test.is_gpu_available()
assert args.load
finalize_configs(is_training=False)
if args.predict or args.visualize:
cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS
if args.visualize:
visualize(MODEL, args.load)
else:
predcfg = PredictConfig(
model=MODEL,
session_init=get_model_loader(args.load),
input_names=MODEL.get_inference_tensor_names()[0],
output_names=MODEL.get_inference_tensor_names()[1])
if args.predict:
COCODetection(cfg.DATA.BASEDIR, 'val2014') # Only to load the class names into caches
predict(OfflinePredictor(predcfg), args.predict)
elif args.evaluate:
assert args.evaluate.endswith('.json'), args.evaluate
offline_evaluate(predcfg, args.evaluate)
else:
is_horovod = cfg.TRAINER == 'horovod'
if is_horovod:
hvd.init()
logger.info("Horovod Rank={}, Size={}".format(hvd.rank(), hvd.size()))
if not is_horovod or hvd.rank() == 0:
logger.set_logger_dir(args.logdir, 'd')
finalize_configs(is_training=True)
stepnum = cfg.TRAIN.STEPS_PER_EPOCH
# warmup is step based, lr is epoch based
init_lr = cfg.TRAIN.BASE_LR * 0.33 * min(8. / cfg.TRAIN.NUM_GPUS, 1.)
warmup_schedule = [(0, init_lr), (cfg.TRAIN.WARMUP, cfg.TRAIN.BASE_LR)]
warmup_end_epoch = cfg.TRAIN.WARMUP * 1. / stepnum
lr_schedule = [(int(warmup_end_epoch + 0.5), cfg.TRAIN.BASE_LR)]
factor = 8. / cfg.TRAIN.NUM_GPUS
for idx, steps in enumerate(cfg.TRAIN.LR_SCHEDULE[:-1]):
mult = 0.1 ** (idx + 1)
lr_schedule.append(
(steps * factor // stepnum, cfg.TRAIN.BASE_LR * mult))
logger.info("Warm Up Schedule (steps, value): " + str(warmup_schedule))
logger.info("LR Schedule (epochs, value): " + str(lr_schedule))
train_dataflow = get_train_dataflow()
# This is what's commonly referred to as "epochs"
total_passes = cfg.TRAIN.LR_SCHEDULE[-1] * 8 / train_dataflow.size()
logger.info("Total passes of the training set is: {:.5g}".format(total_passes))
callbacks = [
PeriodicCallback(
ModelSaver(max_to_keep=10, keep_checkpoint_every_n_hours=1),
every_k_epochs=20),
# linear warmup
ScheduledHyperParamSetter(
'learning_rate', warmup_schedule, interp='linear', step_based=True),
ScheduledHyperParamSetter('learning_rate', lr_schedule),
EvalCallback(*MODEL.get_inference_tensor_names()),
PeakMemoryTracker(),
EstimatedTimeLeft(median=True),
SessionRunTimeout(60000).set_chief_only(True), # 1 minute timeout
]
if not is_horovod:
callbacks.append(GPUUtilizationTracker())
if is_horovod and hvd.rank() > 0:
session_init = None
else:
if args.load:
session_init = get_model_loader(args.load)
else:
session_init = get_model_loader(cfg.BACKBONE.WEIGHTS) if cfg.BACKBONE.WEIGHTS else None
traincfg = TrainConfig(
model=MODEL,
data=QueueInput(train_dataflow),
callbacks=callbacks,
steps_per_epoch=stepnum,
max_epoch=cfg.TRAIN.LR_SCHEDULE[-1] * factor // stepnum,
session_init=session_init,
starting_epoch=cfg.TRAIN.STARTING_EPOCH
)
if is_horovod:
trainer = HorovodTrainer(average=False)
else:
# nccl mode appears faster than cpu mode
trainer = SyncMultiGPUTrainerReplicated(cfg.TRAIN.NUM_GPUS, average=False, mode='nccl')
launch_train_with_config(traincfg, trainer) | examples/FasterRCNN/train.py |
import os
import argparse
import cv2
import shutil
import itertools
import tqdm
import numpy as np
import json
import six
import tensorflow as tf
try:
import horovod.tensorflow as hvd
except ImportError:
pass
assert six.PY3, "FasterRCNN requires Python 3!"
from tensorpack import *
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.tfutils import optimizer
from tensorpack.tfutils.common import get_tf_version_tuple
import tensorpack.utils.viz as tpviz
from coco import COCODetection
from basemodel import (
image_preprocess, resnet_c4_backbone, resnet_conv5,
resnet_fpn_backbone)
import model_frcnn
import model_mrcnn
from model_frcnn import (
sample_fast_rcnn_targets, fastrcnn_outputs,
fastrcnn_predictions, BoxProposals, FastRCNNHead)
from model_mrcnn import maskrcnn_upXconv_head, maskrcnn_loss
from model_rpn import rpn_head, rpn_losses, generate_rpn_proposals
from model_fpn import (
fpn_model, multilevel_roi_align,
multilevel_rpn_losses, generate_fpn_proposals)
from model_cascade import CascadeRCNNHead
from model_box import (
clip_boxes, crop_and_resize, roi_align, RPNAnchors)
from data import (
get_train_dataflow, get_eval_dataflow,
get_all_anchors, get_all_anchors_fpn)
from viz import (
draw_annotation, draw_proposal_recall,
draw_predictions, draw_final_outputs)
from eval import (
eval_coco, multithread_eval_coco,
detect_one_image, print_coco_metrics, DetectionResult)
from config import finalize_configs, config as cfg
class DetectionModel(ModelDesc):
def preprocess(self, image):
image = tf.expand_dims(image, 0)
image = image_preprocess(image, bgr=True)
return tf.transpose(image, [0, 3, 1, 2])
@property
def training(self):
return get_current_tower_context().is_training
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=0.003, trainable=False)
tf.summary.scalar('learning_rate-summary', lr)
# The learning rate is set for 8 GPUs, and we use trainers with average=False.
lr = lr / 8.
opt = tf.train.MomentumOptimizer(lr, 0.9)
if cfg.TRAIN.NUM_GPUS < 8:
opt = optimizer.AccumGradOptimizer(opt, 8 // cfg.TRAIN.NUM_GPUS)
return opt
def get_inference_tensor_names(self):
"""
Returns two lists of tensor names to be used to create an inference callable.
Returns:
[str]: input names
[str]: output names
"""
out = ['output/boxes', 'output/scores', 'output/labels']
if cfg.MODE_MASK:
out.append('output/masks')
return ['image'], out
def build_graph(self, *inputs):
inputs = dict(zip(self.input_names, inputs))
image = self.preprocess(inputs['image']) # 1CHW
features = self.backbone(image)
anchor_inputs = {k: v for k, v in inputs.items() if k.startswith('anchor_')}
proposals, rpn_losses = self.rpn(image, features, anchor_inputs) # inputs?
targets = [inputs[k] for k in ['gt_boxes', 'gt_labels', 'gt_masks'] if k in inputs]
head_losses = self.roi_heads(image, features, proposals, targets)
if self.training:
wd_cost = regularize_cost(
'.*/W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost')
total_cost = tf.add_n(
rpn_losses + head_losses + [wd_cost], 'total_cost')
add_moving_summary(total_cost, wd_cost)
return total_cost
class ResNetC4Model(DetectionModel):
def inputs(self):
ret = [
tf.placeholder(tf.float32, (None, None, 3), 'image'),
tf.placeholder(tf.int32, (None, None, cfg.RPN.NUM_ANCHOR), 'anchor_labels'),
tf.placeholder(tf.float32, (None, None, cfg.RPN.NUM_ANCHOR, 4), 'anchor_boxes'),
tf.placeholder(tf.float32, (None, 4), 'gt_boxes'),
tf.placeholder(tf.int64, (None,), 'gt_labels')] # all > 0
if cfg.MODE_MASK:
ret.append(
tf.placeholder(tf.uint8, (None, None, None), 'gt_masks')
) # NR_GT x height x width
return ret
def backbone(self, image):
return [resnet_c4_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS[:3])]
def rpn(self, image, features, inputs):
featuremap = features[0]
rpn_label_logits, rpn_box_logits = rpn_head('rpn', featuremap, cfg.RPN.HEAD_DIM, cfg.RPN.NUM_ANCHOR)
anchors = RPNAnchors(get_all_anchors(), inputs['anchor_labels'], inputs['anchor_boxes'])
anchors = anchors.narrow_to(featuremap)
image_shape2d = tf.shape(image)[2:] # h,w
pred_boxes_decoded = anchors.decode_logits(rpn_box_logits) # fHxfWxNAx4, floatbox
proposal_boxes, proposal_scores = generate_rpn_proposals(
tf.reshape(pred_boxes_decoded, [-1, 4]),
tf.reshape(rpn_label_logits, [-1]),
image_shape2d,
cfg.RPN.TRAIN_PRE_NMS_TOPK if self.training else cfg.RPN.TEST_PRE_NMS_TOPK,
cfg.RPN.TRAIN_POST_NMS_TOPK if self.training else cfg.RPN.TEST_POST_NMS_TOPK)
if self.training:
losses = rpn_losses(
anchors.gt_labels, anchors.encoded_gt_boxes(), rpn_label_logits, rpn_box_logits)
else:
losses = []
return BoxProposals(proposal_boxes), losses
def roi_heads(self, image, features, proposals, targets):
image_shape2d = tf.shape(image)[2:] # h,w
featuremap = features[0]
gt_boxes, gt_labels, *_ = targets
if self.training:
# sample proposal boxes in training
proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)
# The boxes to be used to crop RoIs.
# Use all proposal boxes in inference
boxes_on_featuremap = proposals.boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE)
roi_resized = roi_align(featuremap, boxes_on_featuremap, 14)
feature_fastrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1]) # nxcx7x7
# Keep C5 feature to be shared with mask branch
feature_gap = GlobalAvgPooling('gap', feature_fastrcnn, data_format='channels_first')
fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs('fastrcnn', feature_gap, cfg.DATA.NUM_CLASS)
fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits, gt_boxes,
tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))
if self.training:
all_losses = fastrcnn_head.losses()
if cfg.MODE_MASK:
gt_masks = targets[2]
# maskrcnn loss
# In training, mask branch shares the same C5 feature.
fg_feature = tf.gather(feature_fastrcnn, proposals.fg_inds())
mask_logits = maskrcnn_upXconv_head(
'maskrcnn', fg_feature, cfg.DATA.NUM_CATEGORY, num_convs=0) # #fg x #cat x 14x14
target_masks_for_fg = crop_and_resize(
tf.expand_dims(gt_masks, 1),
proposals.fg_boxes(),
proposals.fg_inds_wrt_gt, 14,
pad_border=False) # nfg x 1x14x14
target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')
all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))
return all_losses
else:
decoded_boxes = fastrcnn_head.decoded_output_boxes()
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')
label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')
final_boxes, final_scores, final_labels = fastrcnn_predictions(
decoded_boxes, label_scores, name_scope='output')
if cfg.MODE_MASK:
roi_resized = roi_align(featuremap, final_boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE), 14)
feature_maskrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1])
mask_logits = maskrcnn_upXconv_head(
'maskrcnn', feature_maskrcnn, cfg.DATA.NUM_CATEGORY, 0) # #result x #cat x 14x14
indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)
final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx14x14
tf.sigmoid(final_mask_logits, name='output/masks')
return []
class ResNetFPNModel(DetectionModel):
def inputs(self):
ret = [
tf.placeholder(tf.float32, (None, None, 3), 'image')]
num_anchors = len(cfg.RPN.ANCHOR_RATIOS)
for k in range(len(cfg.FPN.ANCHOR_STRIDES)):
ret.extend([
tf.placeholder(tf.int32, (None, None, num_anchors),
'anchor_labels_lvl{}'.format(k + 2)),
tf.placeholder(tf.float32, (None, None, num_anchors, 4),
'anchor_boxes_lvl{}'.format(k + 2))])
ret.extend([
tf.placeholder(tf.float32, (None, 4), 'gt_boxes'),
tf.placeholder(tf.int64, (None,), 'gt_labels')]) # all > 0
if cfg.MODE_MASK:
ret.append(
tf.placeholder(tf.uint8, (None, None, None), 'gt_masks')
) # NR_GT x height x width
return ret
def slice_feature_and_anchors(self, p23456, anchors):
for i, stride in enumerate(cfg.FPN.ANCHOR_STRIDES):
with tf.name_scope('FPN_slice_lvl{}'.format(i)):
anchors[i] = anchors[i].narrow_to(p23456[i])
def backbone(self, image):
c2345 = resnet_fpn_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS)
p23456 = fpn_model('fpn', c2345)
return p23456
def rpn(self, image, features, inputs):
assert len(cfg.RPN.ANCHOR_SIZES) == len(cfg.FPN.ANCHOR_STRIDES)
image_shape2d = tf.shape(image)[2:] # h,w
all_anchors_fpn = get_all_anchors_fpn()
multilevel_anchors = [RPNAnchors(
all_anchors_fpn[i],
inputs['anchor_labels_lvl{}'.format(i + 2)],
inputs['anchor_boxes_lvl{}'.format(i + 2)]) for i in range(len(all_anchors_fpn))]
self.slice_feature_and_anchors(features, multilevel_anchors)
# Multi-Level RPN Proposals
rpn_outputs = [rpn_head('rpn', pi, cfg.FPN.NUM_CHANNEL, len(cfg.RPN.ANCHOR_RATIOS))
for pi in features]
multilevel_label_logits = [k[0] for k in rpn_outputs]
multilevel_box_logits = [k[1] for k in rpn_outputs]
multilevel_pred_boxes = [anchor.decode_logits(logits)
for anchor, logits in zip(multilevel_anchors, multilevel_box_logits)]
proposal_boxes, proposal_scores = generate_fpn_proposals(
multilevel_pred_boxes, multilevel_label_logits, image_shape2d)
if self.training:
losses = multilevel_rpn_losses(
multilevel_anchors, multilevel_label_logits, multilevel_box_logits)
else:
losses = []
return BoxProposals(proposal_boxes), losses
def roi_heads(self, image, features, proposals, targets):
image_shape2d = tf.shape(image)[2:] # h,w
assert len(features) == 5, "Features have to be P23456!"
gt_boxes, gt_labels, *_ = targets
if self.training:
proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)
fastrcnn_head_func = getattr(model_frcnn, cfg.FPN.FRCNN_HEAD_FUNC)
if not cfg.FPN.CASCADE:
roi_feature_fastrcnn = multilevel_roi_align(features[:4], proposals.boxes, 7)
head_feature = fastrcnn_head_func('fastrcnn', roi_feature_fastrcnn)
fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs(
'fastrcnn/outputs', head_feature, cfg.DATA.NUM_CLASS)
fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits,
gt_boxes, tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))
else:
def roi_func(boxes):
return multilevel_roi_align(features[:4], boxes, 7)
fastrcnn_head = CascadeRCNNHead(
proposals, roi_func, fastrcnn_head_func,
(gt_boxes, gt_labels), image_shape2d, cfg.DATA.NUM_CLASS)
if self.training:
all_losses = fastrcnn_head.losses()
if cfg.MODE_MASK:
gt_masks = targets[2]
# maskrcnn loss
roi_feature_maskrcnn = multilevel_roi_align(
features[:4], proposals.fg_boxes(), 14,
name_scope='multilevel_roi_align_mask')
maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)
mask_logits = maskrcnn_head_func(
'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28
target_masks_for_fg = crop_and_resize(
tf.expand_dims(gt_masks, 1),
proposals.fg_boxes(),
proposals.fg_inds_wrt_gt, 28,
pad_border=False) # fg x 1x28x28
target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')
all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))
return all_losses
else:
decoded_boxes = fastrcnn_head.decoded_output_boxes()
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')
label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')
final_boxes, final_scores, final_labels = fastrcnn_predictions(
decoded_boxes, label_scores, name_scope='output')
if cfg.MODE_MASK:
# Cascade inference needs roi transform with refined boxes.
roi_feature_maskrcnn = multilevel_roi_align(features[:4], final_boxes, 14)
maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)
mask_logits = maskrcnn_head_func(
'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28
indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)
final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx28x28
tf.sigmoid(final_mask_logits, name='output/masks')
return []
def visualize(model, model_path, nr_visualize=100, output_dir='output'):
"""
Visualize some intermediate results (proposals, raw predictions) inside the pipeline.
"""
df = get_train_dataflow() # we don't visualize mask stuff
df.reset_state()
pred = OfflinePredictor(PredictConfig(
model=model,
session_init=get_model_loader(model_path),
input_names=['image', 'gt_boxes', 'gt_labels'],
output_names=[
'generate_{}_proposals/boxes'.format('fpn' if cfg.MODE_FPN else 'rpn'),
'generate_{}_proposals/scores'.format('fpn' if cfg.MODE_FPN else 'rpn'),
'fastrcnn_all_scores',
'output/boxes',
'output/scores',
'output/labels',
]))
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
utils.fs.mkdir_p(output_dir)
with tqdm.tqdm(total=nr_visualize) as pbar:
for idx, dp in itertools.islice(enumerate(df), nr_visualize):
img, gt_boxes, gt_labels = dp['image'], dp['gt_boxes'], dp['gt_labels']
rpn_boxes, rpn_scores, all_scores, \
final_boxes, final_scores, final_labels = pred(img, gt_boxes, gt_labels)
# draw groundtruth boxes
gt_viz = draw_annotation(img, gt_boxes, gt_labels)
# draw best proposals for each groundtruth, to show recall
proposal_viz, good_proposals_ind = draw_proposal_recall(img, rpn_boxes, rpn_scores, gt_boxes)
# draw the scores for the above proposals
score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind], all_scores[good_proposals_ind])
results = [DetectionResult(*args) for args in
zip(final_boxes, final_scores, final_labels,
[None] * len(final_labels))]
final_viz = draw_final_outputs(img, results)
viz = tpviz.stack_patches([
gt_viz, proposal_viz,
score_viz, final_viz], 2, 2)
if os.environ.get('DISPLAY', None):
tpviz.interactive_imshow(viz)
cv2.imwrite("{}/{:03d}.png".format(output_dir, idx), viz)
pbar.update()
def offline_evaluate(pred_config, output_file):
num_gpu = cfg.TRAIN.NUM_GPUS
graph_funcs = MultiTowerOfflinePredictor(
pred_config, list(range(num_gpu))).get_predictors()
predictors = []
dataflows = []
for k in range(num_gpu):
predictors.append(lambda img,
pred=graph_funcs[k]: detect_one_image(img, pred))
dataflows.append(get_eval_dataflow(shard=k, num_shards=num_gpu))
if num_gpu > 1:
all_results = multithread_eval_coco(dataflows, predictors)
else:
all_results = eval_coco(dataflows[0], predictors[0])
with open(output_file, 'w') as f:
json.dump(all_results, f)
print_coco_metrics(output_file)
def predict(pred_func, input_file):
img = cv2.imread(input_file, cv2.IMREAD_COLOR)
results = detect_one_image(img, pred_func)
final = draw_final_outputs(img, results)
viz = np.concatenate((img, final), axis=1)
cv2.imwrite("output.png", viz)
logger.info("Inference output written to output.png")
tpviz.interactive_imshow(viz)
class EvalCallback(Callback):
"""
A callback that runs COCO evaluation once a while.
It supports multi-gpu evaluation.
"""
_chief_only = False
def __init__(self, in_names, out_names):
self._in_names, self._out_names = in_names, out_names
def _setup_graph(self):
num_gpu = cfg.TRAIN.NUM_GPUS
if cfg.TRAINER == 'replicated':
# TF bug in version 1.11, 1.12: https://github.com/tensorflow/tensorflow/issues/22750
buggy_tf = get_tf_version_tuple() in [(1, 11), (1, 12)]
# Use two predictor threads per GPU to get better throughput
self.num_predictor = num_gpu if buggy_tf else num_gpu * 2
self.predictors = [self._build_coco_predictor(k % num_gpu) for k in range(self.num_predictor)]
self.dataflows = [get_eval_dataflow(shard=k, num_shards=self.num_predictor)
for k in range(self.num_predictor)]
else:
# Only eval on the first machine.
# Alternatively, can eval on all ranks and use allgather, but allgather sometimes hangs
self._horovod_run_eval = hvd.rank() == hvd.local_rank()
if self._horovod_run_eval:
self.predictor = self._build_coco_predictor(0)
self.dataflow = get_eval_dataflow(shard=hvd.local_rank(), num_shards=hvd.local_size())
self.barrier = hvd.allreduce(tf.random_normal(shape=[1]))
def _build_coco_predictor(self, idx):
graph_func = self.trainer.get_predictor(self._in_names, self._out_names, device=idx)
return lambda img: detect_one_image(img, graph_func)
def _before_train(self):
eval_period = cfg.TRAIN.EVAL_PERIOD
self.epochs_to_eval = set()
for k in itertools.count(1):
if k * eval_period > self.trainer.max_epoch:
break
self.epochs_to_eval.add(k * eval_period)
self.epochs_to_eval.add(self.trainer.max_epoch)
logger.info("[EvalCallback] Will evaluate every {} epochs".format(eval_period))
def _eval(self):
logdir = args.logdir
if cfg.TRAINER == 'replicated':
all_results = multithread_eval_coco(self.dataflows, self.predictors)
else:
filenames = [os.path.join(
logdir, 'outputs{}-part{}.json'.format(self.global_step, rank)
) for rank in range(hvd.local_size())]
if self._horovod_run_eval:
local_results = eval_coco(self.dataflow, self.predictor)
fname = filenames[hvd.local_rank()]
with open(fname, 'w') as f:
json.dump(local_results, f)
self.barrier.eval()
if hvd.rank() > 0:
return
all_results = []
for fname in filenames:
with open(fname, 'r') as f:
obj = json.load(f)
all_results.extend(obj)
os.unlink(fname)
output_file = os.path.join(
logdir, 'outputs{}.json'.format(self.global_step))
with open(output_file, 'w') as f:
json.dump(all_results, f)
try:
scores = print_coco_metrics(output_file)
for k, v in scores.items():
self.trainer.monitors.put_scalar(k, v)
except Exception:
logger.exception("Exception in COCO evaluation.")
def _trigger_epoch(self):
if self.epoch_num in self.epochs_to_eval:
logger.info("Running evaluation ...")
self._eval()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--load', help='load a model for evaluation or training. Can overwrite BACKBONE.WEIGHTS')
parser.add_argument('--logdir', help='log directory', default='train_log/maskrcnn')
parser.add_argument('--visualize', action='store_true', help='visualize intermediate results')
parser.add_argument('--evaluate', help="Run evaluation on COCO. "
"This argument is the path to the output json evaluation file")
parser.add_argument('--predict', help="Run prediction on a given image. "
"This argument is the path to the input image file")
parser.add_argument('--config', help="A list of KEY=VALUE to overwrite those defined in config.py",
nargs='+')
if get_tf_version_tuple() < (1, 6):
# https://github.com/tensorflow/tensorflow/issues/14657
logger.warn("TF<1.6 has a bug which may lead to crash in FasterRCNN if you're unlucky.")
args = parser.parse_args()
if args.config:
cfg.update_args(args.config)
MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()
if args.visualize or args.evaluate or args.predict:
assert tf.test.is_gpu_available()
assert args.load
finalize_configs(is_training=False)
if args.predict or args.visualize:
cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS
if args.visualize:
visualize(MODEL, args.load)
else:
predcfg = PredictConfig(
model=MODEL,
session_init=get_model_loader(args.load),
input_names=MODEL.get_inference_tensor_names()[0],
output_names=MODEL.get_inference_tensor_names()[1])
if args.predict:
COCODetection(cfg.DATA.BASEDIR, 'val2014') # Only to load the class names into caches
predict(OfflinePredictor(predcfg), args.predict)
elif args.evaluate:
assert args.evaluate.endswith('.json'), args.evaluate
offline_evaluate(predcfg, args.evaluate)
else:
is_horovod = cfg.TRAINER == 'horovod'
if is_horovod:
hvd.init()
logger.info("Horovod Rank={}, Size={}".format(hvd.rank(), hvd.size()))
if not is_horovod or hvd.rank() == 0:
logger.set_logger_dir(args.logdir, 'd')
finalize_configs(is_training=True)
stepnum = cfg.TRAIN.STEPS_PER_EPOCH
# warmup is step based, lr is epoch based
init_lr = cfg.TRAIN.BASE_LR * 0.33 * min(8. / cfg.TRAIN.NUM_GPUS, 1.)
warmup_schedule = [(0, init_lr), (cfg.TRAIN.WARMUP, cfg.TRAIN.BASE_LR)]
warmup_end_epoch = cfg.TRAIN.WARMUP * 1. / stepnum
lr_schedule = [(int(warmup_end_epoch + 0.5), cfg.TRAIN.BASE_LR)]
factor = 8. / cfg.TRAIN.NUM_GPUS
for idx, steps in enumerate(cfg.TRAIN.LR_SCHEDULE[:-1]):
mult = 0.1 ** (idx + 1)
lr_schedule.append(
(steps * factor // stepnum, cfg.TRAIN.BASE_LR * mult))
logger.info("Warm Up Schedule (steps, value): " + str(warmup_schedule))
logger.info("LR Schedule (epochs, value): " + str(lr_schedule))
train_dataflow = get_train_dataflow()
# This is what's commonly referred to as "epochs"
total_passes = cfg.TRAIN.LR_SCHEDULE[-1] * 8 / train_dataflow.size()
logger.info("Total passes of the training set is: {:.5g}".format(total_passes))
callbacks = [
PeriodicCallback(
ModelSaver(max_to_keep=10, keep_checkpoint_every_n_hours=1),
every_k_epochs=20),
# linear warmup
ScheduledHyperParamSetter(
'learning_rate', warmup_schedule, interp='linear', step_based=True),
ScheduledHyperParamSetter('learning_rate', lr_schedule),
EvalCallback(*MODEL.get_inference_tensor_names()),
PeakMemoryTracker(),
EstimatedTimeLeft(median=True),
SessionRunTimeout(60000).set_chief_only(True), # 1 minute timeout
]
if not is_horovod:
callbacks.append(GPUUtilizationTracker())
if is_horovod and hvd.rank() > 0:
session_init = None
else:
if args.load:
session_init = get_model_loader(args.load)
else:
session_init = get_model_loader(cfg.BACKBONE.WEIGHTS) if cfg.BACKBONE.WEIGHTS else None
traincfg = TrainConfig(
model=MODEL,
data=QueueInput(train_dataflow),
callbacks=callbacks,
steps_per_epoch=stepnum,
max_epoch=cfg.TRAIN.LR_SCHEDULE[-1] * factor // stepnum,
session_init=session_init,
starting_epoch=cfg.TRAIN.STARTING_EPOCH
)
if is_horovod:
trainer = HorovodTrainer(average=False)
else:
# nccl mode appears faster than cpu mode
trainer = SyncMultiGPUTrainerReplicated(cfg.TRAIN.NUM_GPUS, average=False, mode='nccl')
launch_train_with_config(traincfg, trainer) | 0.720565 | 0.21596 |
import inspect
from abc import ABC
from typing import Dict
from fedrec.data_models.trainer_state_model import TrainerState
from fedrec.python_executors.base_actor import BaseActor
from fedrec.user_modules.envis_base_module import EnvisBase
from fedrec.utilities import registry
from fedrec.utilities.logger import BaseLogger
class Trainer(BaseActor, ABC):
"""
The Trainer class is responsible for training the model.
"""
def __init__(self,
worker_index: int,
config: Dict,
logger: BaseLogger,
client_id: int,
is_mobile: bool = True,
round_idx: int = 0):
"""
Initialize the Trainer class.
Attributes
----------
round_idx : int
Number of local iterations finished
worker_index : int
The unique id alloted to the worker by the orchestrator
is_mobile : bool
Whether the worker represents a mobile device or not
persistent_storage : str
The location to serialize and store the `WorkerState`
local_sample_number : int or None
The number of datapoints in the local dataset
"""
super().__init__(worker_index, config, logger,
is_mobile, round_idx)
self.local_sample_number = None
self.local_training_steps = 10
self._data_loaders = {}
self.client_id = client_id
# TODO update trainer logic to avoid double model initialization
self.worker: EnvisBase = registry.construct(
'trainer',
config["trainer"],
unused_keys=(),
config_dict=config,
client_id=self.client_id,
logger=logger)
self.worker_funcs = {
func_name_list[0]: getattr(self.worker, func_name_list[0])
for func_name_list in
inspect.getmembers(self.worker, predicate=inspect.ismethod)
}
# self.worker_funcs = {"test_run": getattr(self.worker, "test_run")}
def reset_loaders(self):
self._data_loaders = {}
def serialize(self):
"""Serialize the state of the worker to a TrainerState.
Returns
-------
`TrainerState`
The serialised class object to be written
to Json or persisted into the file.
"""
state = {
'model': self._get_model_params(),
'worker_state': self.worker.envis_state,
'step': self.local_training_steps
}
if self.optimizer is not None:
state['optimizer'] = self._get_optimizer_params()
return TrainerState(
worker_index=self.worker_index,
round_idx=self.round_idx,
state_dict=state,
model_preproc=self.model_preproc,
storage=self.persistent_storage,
local_sample_number=self.local_sample_number,
local_training_steps=self.local_training_steps
)
def load_worker(
self,
state: TrainerState):
"""Constructs a trainer object from the state.
Parameters
----------
state : TrainerState
TrainerState containing the weights
"""
self.worker_index = state.worker_index
self.persistent_storage = state.storage
self.round_idx = state.round_idx
self.load_model(state.state_dict['model'])
self.local_training_steps = state.state_dict['step']
if self.optimizer is not None:
self.load_optimizer(state.state_dict['optimizer'])
self.worker.update(state.state_dict["worker_state"])
def update_dataset(self, model_preproc):
"""Update the dataset, trainer_index and model_index .
Parameters
----------
worker_index : int
unique worker id
model_preproc : `Preprocessor`
The preprocessor contains the dataset of the worker
"""
self.model_preproc = model_preproc
self.local_sample_number = len(
self.model_preproc.datasets('train'))
self.reset_loaders()
def run(self, func_name, *args, **kwargs):
"""
Run the model.
func_name : Name of the function to run in the trainer
"""
if func_name in self.worker_funcs:
print(f"Running function name: {func_name}")
return self.process_args(
self.worker_funcs[func_name](*args, **kwargs))
else:
raise ValueError(
f"Job type <{func_name}> not part of worker"
+ f"<{self.worker.__class__.__name__}> functions") | fedrec/python_executors/trainer.py | import inspect
from abc import ABC
from typing import Dict
from fedrec.data_models.trainer_state_model import TrainerState
from fedrec.python_executors.base_actor import BaseActor
from fedrec.user_modules.envis_base_module import EnvisBase
from fedrec.utilities import registry
from fedrec.utilities.logger import BaseLogger
class Trainer(BaseActor, ABC):
"""
The Trainer class is responsible for training the model.
"""
def __init__(self,
worker_index: int,
config: Dict,
logger: BaseLogger,
client_id: int,
is_mobile: bool = True,
round_idx: int = 0):
"""
Initialize the Trainer class.
Attributes
----------
round_idx : int
Number of local iterations finished
worker_index : int
The unique id alloted to the worker by the orchestrator
is_mobile : bool
Whether the worker represents a mobile device or not
persistent_storage : str
The location to serialize and store the `WorkerState`
local_sample_number : int or None
The number of datapoints in the local dataset
"""
super().__init__(worker_index, config, logger,
is_mobile, round_idx)
self.local_sample_number = None
self.local_training_steps = 10
self._data_loaders = {}
self.client_id = client_id
# TODO update trainer logic to avoid double model initialization
self.worker: EnvisBase = registry.construct(
'trainer',
config["trainer"],
unused_keys=(),
config_dict=config,
client_id=self.client_id,
logger=logger)
self.worker_funcs = {
func_name_list[0]: getattr(self.worker, func_name_list[0])
for func_name_list in
inspect.getmembers(self.worker, predicate=inspect.ismethod)
}
# self.worker_funcs = {"test_run": getattr(self.worker, "test_run")}
def reset_loaders(self):
self._data_loaders = {}
def serialize(self):
"""Serialize the state of the worker to a TrainerState.
Returns
-------
`TrainerState`
The serialised class object to be written
to Json or persisted into the file.
"""
state = {
'model': self._get_model_params(),
'worker_state': self.worker.envis_state,
'step': self.local_training_steps
}
if self.optimizer is not None:
state['optimizer'] = self._get_optimizer_params()
return TrainerState(
worker_index=self.worker_index,
round_idx=self.round_idx,
state_dict=state,
model_preproc=self.model_preproc,
storage=self.persistent_storage,
local_sample_number=self.local_sample_number,
local_training_steps=self.local_training_steps
)
def load_worker(
self,
state: TrainerState):
"""Constructs a trainer object from the state.
Parameters
----------
state : TrainerState
TrainerState containing the weights
"""
self.worker_index = state.worker_index
self.persistent_storage = state.storage
self.round_idx = state.round_idx
self.load_model(state.state_dict['model'])
self.local_training_steps = state.state_dict['step']
if self.optimizer is not None:
self.load_optimizer(state.state_dict['optimizer'])
self.worker.update(state.state_dict["worker_state"])
def update_dataset(self, model_preproc):
"""Update the dataset, trainer_index and model_index .
Parameters
----------
worker_index : int
unique worker id
model_preproc : `Preprocessor`
The preprocessor contains the dataset of the worker
"""
self.model_preproc = model_preproc
self.local_sample_number = len(
self.model_preproc.datasets('train'))
self.reset_loaders()
def run(self, func_name, *args, **kwargs):
"""
Run the model.
func_name : Name of the function to run in the trainer
"""
if func_name in self.worker_funcs:
print(f"Running function name: {func_name}")
return self.process_args(
self.worker_funcs[func_name](*args, **kwargs))
else:
raise ValueError(
f"Job type <{func_name}> not part of worker"
+ f"<{self.worker.__class__.__name__}> functions") | 0.738763 | 0.200989 |
import os
import os.path
from typing import Any, Callable, List, Optional, Union, Tuple
from PIL import Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class Caltech101(VisionDataset):
"""`Caltech 101 <http://www.vision.caltech.edu/Image_Datasets/Caltech101/>`_ Dataset.
.. warning::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
Args:
root (string): Root directory of dataset where directory
``caltech101`` exists or will be saved to if download is set to True.
target_type (string or list, optional): Type of target to use, ``category`` or
``annotation``. Can also be a list to output a tuple with all specified
target types. ``category`` represents the target class, and
``annotation`` is a list of points from a hand-generated outline.
Defaults to ``category``.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
def __init__(
self,
root: str,
target_type: Union[List[str], str] = "category",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(os.path.join(root, "caltech101"), transform=transform, target_transform=target_transform)
os.makedirs(self.root, exist_ok=True)
if isinstance(target_type, str):
target_type = [target_type]
self.target_type = [verify_str_arg(t, "target_type", ("category", "annotation")) for t in target_type]
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
self.categories = sorted(os.listdir(os.path.join(self.root, "101_ObjectCategories")))
self.categories.remove("BACKGROUND_Google") # this is not a real class
# For some reason, the category names in "101_ObjectCategories" and
# "Annotations" do not always match. This is a manual map between the
# two. Defaults to using same name, since most names are fine.
name_map = {
"Faces": "Faces_2",
"Faces_easy": "Faces_3",
"Motorbikes": "Motorbikes_16",
"airplanes": "Airplanes_Side_2",
}
self.annotation_categories = list(map(lambda x: name_map[x] if x in name_map else x, self.categories))
self.index: List[int] = []
self.y = []
for (i, c) in enumerate(self.categories):
n = len(os.listdir(os.path.join(self.root, "101_ObjectCategories", c)))
self.index.extend(range(1, n + 1))
self.y.extend(n * [i])
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where the type of target specified by target_type.
"""
import scipy.io
img = Image.open(
os.path.join(
self.root,
"101_ObjectCategories",
self.categories[self.y[index]],
f"image_{self.index[index]:04d}.jpg",
)
)
target: Any = []
for t in self.target_type:
if t == "category":
target.append(self.y[index])
elif t == "annotation":
data = scipy.io.loadmat(
os.path.join(
self.root,
"Annotations",
self.annotation_categories[self.y[index]],
f"annotation_{self.index[index]:04d}.mat",
)
)
target.append(data["obj_contour"])
target = tuple(target) if len(target) > 1 else target[0]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def _check_integrity(self) -> bool:
# can be more robust and check hash of files
return os.path.exists(os.path.join(self.root, "101_ObjectCategories"))
def __len__(self) -> int:
return len(self.index)
def download(self) -> None:
if self._check_integrity():
print("Files already downloaded and verified")
return
download_and_extract_archive(
"http://www.vision.caltech.edu/Image_Datasets/Caltech101/101_ObjectCategories.tar.gz",
self.root,
md5="b224c7392d521a49829488ab0f1120d9",
)
download_and_extract_archive(
"http://www.vision.caltech.edu/Image_Datasets/Caltech101/Annotations.tar",
self.root,
md5="6f83eeb1f24d99cab4eb377263132c91",
)
def extra_repr(self) -> str:
return "Target type: {target_type}".format(**self.__dict__)
class Caltech256(VisionDataset):
"""`Caltech 256 <http://www.vision.caltech.edu/Image_Datasets/Caltech256/>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``caltech256`` exists or will be saved to if download is set to True.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(os.path.join(root, "caltech256"), transform=transform, target_transform=target_transform)
os.makedirs(self.root, exist_ok=True)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
self.categories = sorted(os.listdir(os.path.join(self.root, "256_ObjectCategories")))
self.index: List[int] = []
self.y = []
for (i, c) in enumerate(self.categories):
n = len(
[
item
for item in os.listdir(os.path.join(self.root, "256_ObjectCategories", c))
if item.endswith(".jpg")
]
)
self.index.extend(range(1, n + 1))
self.y.extend(n * [i])
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img = Image.open(
os.path.join(
self.root,
"256_ObjectCategories",
self.categories[self.y[index]],
f"{self.y[index] + 1:03d}_{self.index[index]:04d}.jpg",
)
)
target = self.y[index]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def _check_integrity(self) -> bool:
# can be more robust and check hash of files
return os.path.exists(os.path.join(self.root, "256_ObjectCategories"))
def __len__(self) -> int:
return len(self.index)
def download(self) -> None:
if self._check_integrity():
print("Files already downloaded and verified")
return
download_and_extract_archive(
"http://www.vision.caltech.edu/Image_Datasets/Caltech256/256_ObjectCategories.tar",
self.root,
filename="256_ObjectCategories.tar",
md5="67b4f42ca05d46448c6bb8ecd2220f6d",
) | torchvision/datasets/caltech.py | import os
import os.path
from typing import Any, Callable, List, Optional, Union, Tuple
from PIL import Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class Caltech101(VisionDataset):
"""`Caltech 101 <http://www.vision.caltech.edu/Image_Datasets/Caltech101/>`_ Dataset.
.. warning::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
Args:
root (string): Root directory of dataset where directory
``caltech101`` exists or will be saved to if download is set to True.
target_type (string or list, optional): Type of target to use, ``category`` or
``annotation``. Can also be a list to output a tuple with all specified
target types. ``category`` represents the target class, and
``annotation`` is a list of points from a hand-generated outline.
Defaults to ``category``.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
def __init__(
self,
root: str,
target_type: Union[List[str], str] = "category",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(os.path.join(root, "caltech101"), transform=transform, target_transform=target_transform)
os.makedirs(self.root, exist_ok=True)
if isinstance(target_type, str):
target_type = [target_type]
self.target_type = [verify_str_arg(t, "target_type", ("category", "annotation")) for t in target_type]
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
self.categories = sorted(os.listdir(os.path.join(self.root, "101_ObjectCategories")))
self.categories.remove("BACKGROUND_Google") # this is not a real class
# For some reason, the category names in "101_ObjectCategories" and
# "Annotations" do not always match. This is a manual map between the
# two. Defaults to using same name, since most names are fine.
name_map = {
"Faces": "Faces_2",
"Faces_easy": "Faces_3",
"Motorbikes": "Motorbikes_16",
"airplanes": "Airplanes_Side_2",
}
self.annotation_categories = list(map(lambda x: name_map[x] if x in name_map else x, self.categories))
self.index: List[int] = []
self.y = []
for (i, c) in enumerate(self.categories):
n = len(os.listdir(os.path.join(self.root, "101_ObjectCategories", c)))
self.index.extend(range(1, n + 1))
self.y.extend(n * [i])
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where the type of target specified by target_type.
"""
import scipy.io
img = Image.open(
os.path.join(
self.root,
"101_ObjectCategories",
self.categories[self.y[index]],
f"image_{self.index[index]:04d}.jpg",
)
)
target: Any = []
for t in self.target_type:
if t == "category":
target.append(self.y[index])
elif t == "annotation":
data = scipy.io.loadmat(
os.path.join(
self.root,
"Annotations",
self.annotation_categories[self.y[index]],
f"annotation_{self.index[index]:04d}.mat",
)
)
target.append(data["obj_contour"])
target = tuple(target) if len(target) > 1 else target[0]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def _check_integrity(self) -> bool:
# can be more robust and check hash of files
return os.path.exists(os.path.join(self.root, "101_ObjectCategories"))
def __len__(self) -> int:
return len(self.index)
def download(self) -> None:
if self._check_integrity():
print("Files already downloaded and verified")
return
download_and_extract_archive(
"http://www.vision.caltech.edu/Image_Datasets/Caltech101/101_ObjectCategories.tar.gz",
self.root,
md5="b224c7392d521a49829488ab0f1120d9",
)
download_and_extract_archive(
"http://www.vision.caltech.edu/Image_Datasets/Caltech101/Annotations.tar",
self.root,
md5="6f83eeb1f24d99cab4eb377263132c91",
)
def extra_repr(self) -> str:
return "Target type: {target_type}".format(**self.__dict__)
class Caltech256(VisionDataset):
"""`Caltech 256 <http://www.vision.caltech.edu/Image_Datasets/Caltech256/>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``caltech256`` exists or will be saved to if download is set to True.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(os.path.join(root, "caltech256"), transform=transform, target_transform=target_transform)
os.makedirs(self.root, exist_ok=True)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
self.categories = sorted(os.listdir(os.path.join(self.root, "256_ObjectCategories")))
self.index: List[int] = []
self.y = []
for (i, c) in enumerate(self.categories):
n = len(
[
item
for item in os.listdir(os.path.join(self.root, "256_ObjectCategories", c))
if item.endswith(".jpg")
]
)
self.index.extend(range(1, n + 1))
self.y.extend(n * [i])
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img = Image.open(
os.path.join(
self.root,
"256_ObjectCategories",
self.categories[self.y[index]],
f"{self.y[index] + 1:03d}_{self.index[index]:04d}.jpg",
)
)
target = self.y[index]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def _check_integrity(self) -> bool:
# can be more robust and check hash of files
return os.path.exists(os.path.join(self.root, "256_ObjectCategories"))
def __len__(self) -> int:
return len(self.index)
def download(self) -> None:
if self._check_integrity():
print("Files already downloaded and verified")
return
download_and_extract_archive(
"http://www.vision.caltech.edu/Image_Datasets/Caltech256/256_ObjectCategories.tar",
self.root,
filename="256_ObjectCategories.tar",
md5="67b4f42ca05d46448c6bb8ecd2220f6d",
) | 0.865181 | 0.396915 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import filer.fields.file
import filer.fields.image
import publications.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.FILER_IMAGE_MODEL),
('filer', '0011_auto_20190418_0137'),
]
operations = [
migrations.CreateModel(
name='Archive',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=2024)),
],
),
migrations.CreateModel(
name='Creator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='List',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('list', models.CharField(max_length=128)),
('description', models.CharField(max_length=128)),
],
options={
'verbose_name_plural': 'Lists',
'ordering': ('list',),
},
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstName', models.CharField(blank=True, max_length=1024, null=True)),
('lastName', models.CharField(blank=True, max_length=1024, null=True)),
('name', models.CharField(blank=True, max_length=1024, null=True)),
],
),
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('typ', models.CharField(blank=True, max_length=512, null=True)),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=2024)),
],
),
migrations.CreateModel(
name='Type',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False)),
('type', models.CharField(max_length=128)),
('description', models.CharField(max_length=128)),
('zotero_types', models.CharField(default='', help_text='Possible Zotero types, separated by comma.', max_length=256, verbose_name='zotero type')),
('bibtex_types', models.CharField(default='article', help_text='Possible BibTex types, separated by comma.', max_length=256, verbose_name='BibTex types')),
('hidden', models.BooleanField(default=False, help_text='Hide publications from main view.')),
],
options={
'verbose_name_plural': ' Types',
'ordering': ('order',),
},
),
migrations.CreateModel(
name='Publication',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('zoterokey', models.CharField(blank=True, help_text='Zotero key. Leave blank if unsure.', max_length=512, null=True)),
('citekey', models.CharField(blank=True, help_text='BibTex citation key. Leave blank if unsure.', max_length=512, null=True)),
('title', models.CharField(max_length=512)),
('authors', models.CharField(help_text='List of authors separated by commas or <i>and</i>.', max_length=2048)),
('year', models.PositiveIntegerField(default=0)),
('month', models.IntegerField(blank=True, choices=[(1, 'January'), (2, 'February'), (3, 'March'), (4, 'April'), (5, 'May'), (6, 'June'), (7, 'July'), (8, 'August'), (9, 'September'), (10, 'October'), (11, 'November'), (12, 'December')], null=True)),
('date', models.CharField(blank=True, max_length=256)),
('journal', models.CharField(blank=True, max_length=256)),
('book_title', models.CharField(blank=True, max_length=256)),
('publisher', models.CharField(blank=True, max_length=256)),
('institution', models.CharField(blank=True, max_length=256)),
('volume', models.CharField(blank=True, max_length=256, null=True)),
('number', models.IntegerField(blank=True, null=True, verbose_name='Issue number')),
('pages', publications.fields.PagesField(blank=True, max_length=32)),
('note', models.CharField(blank=True, max_length=256)),
('keywords', models.CharField(blank=True, help_text='List of keywords separated by commas.', max_length=256)),
('url', models.URLField(blank=True, help_text='Link to PDF or journal page.', verbose_name='URL')),
('code', models.URLField(blank=True, help_text='Link to page with code.')),
('pdf', models.FileField(blank=True, null=True, upload_to='publications/', verbose_name='PDF')),
('image', models.ImageField(blank=True, null=True, upload_to='publications/images/')),
('thumbnail', models.ImageField(blank=True, null=True, upload_to='publications/thumbnails/')),
('doi', models.CharField(blank=True, max_length=128, verbose_name='DOI')),
('external', models.BooleanField(default=False, help_text='If publication was written in another lab, mark as external.')),
('abstract', models.TextField(blank=True)),
('isbn', models.CharField(blank=True, help_text='Only for a book.', max_length=32, verbose_name='ISBN')),
('artworkMedium', models.CharField(default='', max_length=2024)),
('artworkSize', models.CharField(default='', max_length=1024)),
('creators', models.ManyToManyField(to='publications.Creator')),
('lists', models.ManyToManyField(blank=True, to='publications.List')),
('tags', models.ManyToManyField(blank=True, to='publications.Tag')),
('type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='publications.Type')),
],
options={
'verbose_name_plural': ' Publications',
'ordering': ['-year', '-month', '-id'],
},
),
migrations.CreateModel(
name='PDFAttachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('zoterokey', models.CharField(max_length=30)),
('file', filer.fields.file.FilerFileField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='filer.File')),
('parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='publications.Publication')),
('tags', models.ManyToManyField(blank=True, to='publications.Tag')),
],
),
migrations.CreateModel(
name='ImageAttachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('zoterokey', models.CharField(max_length=30)),
('file', filer.fields.image.FilerImageField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.FILER_IMAGE_MODEL)),
('parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='publications.Publication')),
('tags', models.ManyToManyField(blank=True, to='publications.Tag')),
],
),
migrations.CreateModel(
name='CustomLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=256)),
('url', models.URLField(verbose_name='URL')),
('publication', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='publications.Publication')),
],
),
migrations.CreateModel(
name='CustomFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=256)),
('file', models.FileField(upload_to='publications/')),
('publication', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='publications.Publication')),
],
),
migrations.AddField(
model_name='creator',
name='person',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='publications.Person'),
),
migrations.AddField(
model_name='creator',
name='role',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='publications.Role'),
),
migrations.CreateModel(
name='Collection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('zoterokey', models.CharField(max_length=100)),
('name', models.CharField(default='', max_length=2024)),
('items', models.ManyToManyField(to='publications.Publication')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='publications.Collection')),
],
),
] | publications/migrations/0001_initial.py |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import filer.fields.file
import filer.fields.image
import publications.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.FILER_IMAGE_MODEL),
('filer', '0011_auto_20190418_0137'),
]
operations = [
migrations.CreateModel(
name='Archive',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=2024)),
],
),
migrations.CreateModel(
name='Creator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='List',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('list', models.CharField(max_length=128)),
('description', models.CharField(max_length=128)),
],
options={
'verbose_name_plural': 'Lists',
'ordering': ('list',),
},
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstName', models.CharField(blank=True, max_length=1024, null=True)),
('lastName', models.CharField(blank=True, max_length=1024, null=True)),
('name', models.CharField(blank=True, max_length=1024, null=True)),
],
),
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('typ', models.CharField(blank=True, max_length=512, null=True)),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=2024)),
],
),
migrations.CreateModel(
name='Type',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False)),
('type', models.CharField(max_length=128)),
('description', models.CharField(max_length=128)),
('zotero_types', models.CharField(default='', help_text='Possible Zotero types, separated by comma.', max_length=256, verbose_name='zotero type')),
('bibtex_types', models.CharField(default='article', help_text='Possible BibTex types, separated by comma.', max_length=256, verbose_name='BibTex types')),
('hidden', models.BooleanField(default=False, help_text='Hide publications from main view.')),
],
options={
'verbose_name_plural': ' Types',
'ordering': ('order',),
},
),
migrations.CreateModel(
name='Publication',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('zoterokey', models.CharField(blank=True, help_text='Zotero key. Leave blank if unsure.', max_length=512, null=True)),
('citekey', models.CharField(blank=True, help_text='BibTex citation key. Leave blank if unsure.', max_length=512, null=True)),
('title', models.CharField(max_length=512)),
('authors', models.CharField(help_text='List of authors separated by commas or <i>and</i>.', max_length=2048)),
('year', models.PositiveIntegerField(default=0)),
('month', models.IntegerField(blank=True, choices=[(1, 'January'), (2, 'February'), (3, 'March'), (4, 'April'), (5, 'May'), (6, 'June'), (7, 'July'), (8, 'August'), (9, 'September'), (10, 'October'), (11, 'November'), (12, 'December')], null=True)),
('date', models.CharField(blank=True, max_length=256)),
('journal', models.CharField(blank=True, max_length=256)),
('book_title', models.CharField(blank=True, max_length=256)),
('publisher', models.CharField(blank=True, max_length=256)),
('institution', models.CharField(blank=True, max_length=256)),
('volume', models.CharField(blank=True, max_length=256, null=True)),
('number', models.IntegerField(blank=True, null=True, verbose_name='Issue number')),
('pages', publications.fields.PagesField(blank=True, max_length=32)),
('note', models.CharField(blank=True, max_length=256)),
('keywords', models.CharField(blank=True, help_text='List of keywords separated by commas.', max_length=256)),
('url', models.URLField(blank=True, help_text='Link to PDF or journal page.', verbose_name='URL')),
('code', models.URLField(blank=True, help_text='Link to page with code.')),
('pdf', models.FileField(blank=True, null=True, upload_to='publications/', verbose_name='PDF')),
('image', models.ImageField(blank=True, null=True, upload_to='publications/images/')),
('thumbnail', models.ImageField(blank=True, null=True, upload_to='publications/thumbnails/')),
('doi', models.CharField(blank=True, max_length=128, verbose_name='DOI')),
('external', models.BooleanField(default=False, help_text='If publication was written in another lab, mark as external.')),
('abstract', models.TextField(blank=True)),
('isbn', models.CharField(blank=True, help_text='Only for a book.', max_length=32, verbose_name='ISBN')),
('artworkMedium', models.CharField(default='', max_length=2024)),
('artworkSize', models.CharField(default='', max_length=1024)),
('creators', models.ManyToManyField(to='publications.Creator')),
('lists', models.ManyToManyField(blank=True, to='publications.List')),
('tags', models.ManyToManyField(blank=True, to='publications.Tag')),
('type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='publications.Type')),
],
options={
'verbose_name_plural': ' Publications',
'ordering': ['-year', '-month', '-id'],
},
),
migrations.CreateModel(
name='PDFAttachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('zoterokey', models.CharField(max_length=30)),
('file', filer.fields.file.FilerFileField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='filer.File')),
('parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='publications.Publication')),
('tags', models.ManyToManyField(blank=True, to='publications.Tag')),
],
),
migrations.CreateModel(
name='ImageAttachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('zoterokey', models.CharField(max_length=30)),
('file', filer.fields.image.FilerImageField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.FILER_IMAGE_MODEL)),
('parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='publications.Publication')),
('tags', models.ManyToManyField(blank=True, to='publications.Tag')),
],
),
migrations.CreateModel(
name='CustomLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=256)),
('url', models.URLField(verbose_name='URL')),
('publication', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='publications.Publication')),
],
),
migrations.CreateModel(
name='CustomFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=256)),
('file', models.FileField(upload_to='publications/')),
('publication', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='publications.Publication')),
],
),
migrations.AddField(
model_name='creator',
name='person',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='publications.Person'),
),
migrations.AddField(
model_name='creator',
name='role',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='publications.Role'),
),
migrations.CreateModel(
name='Collection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('zoterokey', models.CharField(max_length=100)),
('name', models.CharField(default='', max_length=2024)),
('items', models.ManyToManyField(to='publications.Publication')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='publications.Collection')),
],
),
] | 0.52975 | 0.134151 |
import pyspark
import json
import pandas as pd
import numpy as np
from pyspark.sql.functions import udf
from pyspark.sql.types import ArrayType, StringType
from pyspark.ml.feature import Tokenizer, CountVectorizer, StopWordsRemover, NGram, IDF
from nltk.corpus import stopwords
from string import maketrans
"""
========================================
TOKENIZATION FUNCTIONS
========================================
Functions to tokenize reviewText
"""
def clean_reviewText(df):
# create translation table for punctuation
intab = '~!@#$%^&*()-_+=[]}{\|;:"<>,.?/'
outtab = ' '
punc_tab = maketrans(intab, outtab)
# remove punctuation
punc_trans_udf = udf(lambda x: x.encode("utf-8").translate(punc_tab))
df_clean = df.withColumn("cleanText", punc_trans_udf(df["reviewText"]))
return df_clean
def remove_empty_tokens(df):
remove_empty_udf = udf(lambda x: filter(None, x), ArrayType(StringType()))
df_raw_tokens_clean = df.withColumn("rawTokens", remove_empty_udf(df["rawTokens"]))
return df_raw_tokens_clean
def tokenize(df):
# instantiate tokenizer
tokenizer = Tokenizer(inputCol="cleanText", outputCol="rawTokens")
# create tokens
df_raw_tokens = tokenizer.transform(df)
# remove empty tokens
df_raw_tokens_clean = remove_empty_tokens(df_raw_tokens)
return df_raw_tokens_clean
def remove_stop_words(df):
remover = StopWordsRemover(inputCol="rawTokens", outputCol="tokens", stopWords=stopwords.words("english"))
df_tokens = remover.transform(df)
return df_tokens
def add_tokens(df):
# clean
df_clean = clean_reviewText(df)
# tokenize
df_raw_tokens = tokenize(df_clean)
# remove stopwords
df_tokens = remove_stop_words(df_raw_tokens)
return df_tokens
"""
========================================
TFIDF VECTORIZATION FUNCTIONS
========================================
Functions to create TFIDF vectors and
extract vocabulary for vectors
"""
def add_tf_and_vocab(df):
cv = CountVectorizer(inputCol="tokens", outputCol="tf_vector")
tf_model = cv.fit(df)
df_tf = tf_model.transform(df)
vocab = tf_model.vocabulary
return df_tf, vocab
def add_tfidf(df):
idf = IDF(inputCol="tf_vector", outputCol="tfidf_vector")
idf_model = idf.fit(df)
df_tfidf = idf_model.transform(df)
return df_tfidf
"""
========================================
TFIDF MAPPING FUNCTIONS
========================================
Functions to map elements in TFIDF
vectors to terms in vocabularies
"""
def extract_top_features(tfidf_vector, vocab, n):
"""
INPUT: SparseVector, List, Int
RETURN: List
Take in TFIDF vector, vocabulary for vector,
and number of terms. Return top n terms
"""
# note - tfidf elements are pre-sorted by importance
term_indices = tfidf_vector.indices[-n:]
# Map features to terms
features = [vocab[i] for i in term_indices]
return features
def add_top_features(df, vocab, n=10):
"""
INPUT: PySpark DataFrame, List, Int
RETURN: PySpark DataFrame
Take in DataFrame with TFIDF vectors, list of vocabulary words,
and number of features to extract. Map top features from TFIDF
vectors to vocabulary terms. Return new DataFrame with terms
"""
# Create udf function to extract top n features
extract_features_udf = udf(lambda x: extract_top_features(x, vocab, n))
# Apply udf, create new df with features column
df_features = df.withColumn("top_features",
extract_features_udf(df["tfidf_vectors_sum"]))
return df_features
def add_pos_neg_features(df, vocab_pos, vocab_neg, n=10):
"""
INPUT: Spark DataFrame, List, List, Int
RETURN: Spark DataFrame
Take in DataFrame grouped by asin, positive with tfidf vectors summed.
Extract top positive and negative terms from each group, add features column
"""
# split dataframe on postitive
df_pos = df.where(df.positive==True)
df_neg = df.where(df.positive==False)
# add features
df_pos_terms = add_top_features(df_pos, vocab_pos, n)
df_neg_terms = add_top_features(df_neg, vocab_neg, n)
return df_pos_terms.unionAll(df_neg_terms)
"""
========================================
METADATA FUNCTIONS
========================================
Functions to join product review data
with metadata
"""
def join_metadata(df_products, df_meta):
# select fields to join
df_meta_subset = df_meta.select("asin", "categories")
# join fields on product id asin
df_cats = df_products.join(df_meta_subset, df_products.asin == df_meta_subset.asin).drop(df_meta_subset.asin)
return df_cats
"""
========================================
MAIN
========================================
"""
if __name__=="__main__":
pass | src/sentimentAnalysis/dataProcessing.py | import pyspark
import json
import pandas as pd
import numpy as np
from pyspark.sql.functions import udf
from pyspark.sql.types import ArrayType, StringType
from pyspark.ml.feature import Tokenizer, CountVectorizer, StopWordsRemover, NGram, IDF
from nltk.corpus import stopwords
from string import maketrans
"""
========================================
TOKENIZATION FUNCTIONS
========================================
Functions to tokenize reviewText
"""
def clean_reviewText(df):
# create translation table for punctuation
intab = '~!@#$%^&*()-_+=[]}{\|;:"<>,.?/'
outtab = ' '
punc_tab = maketrans(intab, outtab)
# remove punctuation
punc_trans_udf = udf(lambda x: x.encode("utf-8").translate(punc_tab))
df_clean = df.withColumn("cleanText", punc_trans_udf(df["reviewText"]))
return df_clean
def remove_empty_tokens(df):
remove_empty_udf = udf(lambda x: filter(None, x), ArrayType(StringType()))
df_raw_tokens_clean = df.withColumn("rawTokens", remove_empty_udf(df["rawTokens"]))
return df_raw_tokens_clean
def tokenize(df):
# instantiate tokenizer
tokenizer = Tokenizer(inputCol="cleanText", outputCol="rawTokens")
# create tokens
df_raw_tokens = tokenizer.transform(df)
# remove empty tokens
df_raw_tokens_clean = remove_empty_tokens(df_raw_tokens)
return df_raw_tokens_clean
def remove_stop_words(df):
remover = StopWordsRemover(inputCol="rawTokens", outputCol="tokens", stopWords=stopwords.words("english"))
df_tokens = remover.transform(df)
return df_tokens
def add_tokens(df):
# clean
df_clean = clean_reviewText(df)
# tokenize
df_raw_tokens = tokenize(df_clean)
# remove stopwords
df_tokens = remove_stop_words(df_raw_tokens)
return df_tokens
"""
========================================
TFIDF VECTORIZATION FUNCTIONS
========================================
Functions to create TFIDF vectors and
extract vocabulary for vectors
"""
def add_tf_and_vocab(df):
cv = CountVectorizer(inputCol="tokens", outputCol="tf_vector")
tf_model = cv.fit(df)
df_tf = tf_model.transform(df)
vocab = tf_model.vocabulary
return df_tf, vocab
def add_tfidf(df):
idf = IDF(inputCol="tf_vector", outputCol="tfidf_vector")
idf_model = idf.fit(df)
df_tfidf = idf_model.transform(df)
return df_tfidf
"""
========================================
TFIDF MAPPING FUNCTIONS
========================================
Functions to map elements in TFIDF
vectors to terms in vocabularies
"""
def extract_top_features(tfidf_vector, vocab, n):
"""
INPUT: SparseVector, List, Int
RETURN: List
Take in TFIDF vector, vocabulary for vector,
and number of terms. Return top n terms
"""
# note - tfidf elements are pre-sorted by importance
term_indices = tfidf_vector.indices[-n:]
# Map features to terms
features = [vocab[i] for i in term_indices]
return features
def add_top_features(df, vocab, n=10):
"""
INPUT: PySpark DataFrame, List, Int
RETURN: PySpark DataFrame
Take in DataFrame with TFIDF vectors, list of vocabulary words,
and number of features to extract. Map top features from TFIDF
vectors to vocabulary terms. Return new DataFrame with terms
"""
# Create udf function to extract top n features
extract_features_udf = udf(lambda x: extract_top_features(x, vocab, n))
# Apply udf, create new df with features column
df_features = df.withColumn("top_features",
extract_features_udf(df["tfidf_vectors_sum"]))
return df_features
def add_pos_neg_features(df, vocab_pos, vocab_neg, n=10):
"""
INPUT: Spark DataFrame, List, List, Int
RETURN: Spark DataFrame
Take in DataFrame grouped by asin, positive with tfidf vectors summed.
Extract top positive and negative terms from each group, add features column
"""
# split dataframe on postitive
df_pos = df.where(df.positive==True)
df_neg = df.where(df.positive==False)
# add features
df_pos_terms = add_top_features(df_pos, vocab_pos, n)
df_neg_terms = add_top_features(df_neg, vocab_neg, n)
return df_pos_terms.unionAll(df_neg_terms)
"""
========================================
METADATA FUNCTIONS
========================================
Functions to join product review data
with metadata
"""
def join_metadata(df_products, df_meta):
# select fields to join
df_meta_subset = df_meta.select("asin", "categories")
# join fields on product id asin
df_cats = df_products.join(df_meta_subset, df_products.asin == df_meta_subset.asin).drop(df_meta_subset.asin)
return df_cats
"""
========================================
MAIN
========================================
"""
if __name__=="__main__":
pass | 0.603465 | 0.466481 |
from ..broker import Broker
class SensorDatumBroker(Broker):
controller = "sensor_data"
def index(self, **kwargs):
"""Lists the available sensor data. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param data_source_id: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type data_source_id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param data_source_id: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type data_source_id: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the table entry.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the table entry.
:type id: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, data_source_id, name, name_index, label, category, value, status, units, details, updated_at, first_seen.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each SensorDatum. Valid values are id, data_source_id, name, name_index, label, category, value, status, units, details, updated_at, first_seen. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return sensor_data: An array of the SensorDatum objects that match the specified input criteria.
:rtype sensor_data: Array of SensorDatum
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available sensor data matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param category: The type of sensor data.
:type category: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param category: The type of sensor data.
:type category: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param data_source_id: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type data_source_id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param data_source_id: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type data_source_id: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param details: The description of the status of the sensor data.
:type details: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param details: The description of the status of the sensor data.
:type details: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param first_seen: The time when the failure was first detected.
:type first_seen: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param first_seen: The time when the failure was first detected.
:type first_seen: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the table entry.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the table entry.
:type id: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param label: The label for the sensor data.
:type label: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param label: The label for the sensor data.
:type label: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param name: The name of the sensor data.
:type name: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param name: The name of the sensor data.
:type name: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param name_index: The index for all data with a given name.
:type name_index: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param name_index: The index for all data with a given name.
:type name_index: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param status: The status of the sensor data.
:type status: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param status: The status of the sensor data.
:type status: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param units: The units the value of the sensor data is in.
:type units: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param units: The units the value of the sensor data is in.
:type units: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param updated_at: The date and time the record was last modified in NetMRI.
:type updated_at: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param updated_at: The date and time the record was last modified in NetMRI.
:type updated_at: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param value: The value of the sensor data.
:type value: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param value: The value of the sensor data.
:type value: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, data_source_id, name, name_index, label, category, value, status, units, details, updated_at, first_seen.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each SensorDatum. Valid values are id, data_source_id, name, name_index, label, category, value, status, units, details, updated_at, first_seen. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against sensor data, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: category, data_source_id, details, first_seen, id, label, name, name_index, status, units, updated_at, value.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return sensor_data: An array of the SensorDatum objects that match the specified input criteria.
:rtype sensor_data: Array of SensorDatum
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available sensor data matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: category, data_source_id, details, first_seen, id, label, name, name_index, status, units, updated_at, value.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_category: The operator to apply to the field category. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. category: The type of sensor data. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_category: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_category: If op_category is specified, the field named in this input will be compared to the value in category using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_category must be specified if op_category is specified.
:type val_f_category: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_category: If op_category is specified, this value will be compared to the value in category using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_category must be specified if op_category is specified.
:type val_c_category: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_data_source_id: The operator to apply to the field data_source_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. data_source_id: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_data_source_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_data_source_id: If op_data_source_id is specified, the field named in this input will be compared to the value in data_source_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_data_source_id must be specified if op_data_source_id is specified.
:type val_f_data_source_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_data_source_id: If op_data_source_id is specified, this value will be compared to the value in data_source_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_data_source_id must be specified if op_data_source_id is specified.
:type val_c_data_source_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_details: The operator to apply to the field details. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. details: The description of the status of the sensor data. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_details: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_details: If op_details is specified, the field named in this input will be compared to the value in details using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_details must be specified if op_details is specified.
:type val_f_details: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_details: If op_details is specified, this value will be compared to the value in details using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_details must be specified if op_details is specified.
:type val_c_details: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_first_seen: The operator to apply to the field first_seen. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. first_seen: The time when the failure was first detected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_first_seen: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_first_seen: If op_first_seen is specified, the field named in this input will be compared to the value in first_seen using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_first_seen must be specified if op_first_seen is specified.
:type val_f_first_seen: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_first_seen: If op_first_seen is specified, this value will be compared to the value in first_seen using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_first_seen must be specified if op_first_seen is specified.
:type val_c_first_seen: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The internal NetMRI identifier for the table entry. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_id must be specified if op_id is specified.
:type val_f_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_id: If op_id is specified, this value will be compared to the value in id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_id must be specified if op_id is specified.
:type val_c_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_label: The operator to apply to the field label. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. label: The label for the sensor data. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_label: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_label: If op_label is specified, the field named in this input will be compared to the value in label using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_label must be specified if op_label is specified.
:type val_f_label: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_label: If op_label is specified, this value will be compared to the value in label using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_label must be specified if op_label is specified.
:type val_c_label: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_name: The operator to apply to the field name. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. name: The name of the sensor data. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_name: If op_name is specified, the field named in this input will be compared to the value in name using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_name must be specified if op_name is specified.
:type val_f_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_name: If op_name is specified, this value will be compared to the value in name using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_name must be specified if op_name is specified.
:type val_c_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_name_index: The operator to apply to the field name_index. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. name_index: The index for all data with a given name. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_name_index: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_name_index: If op_name_index is specified, the field named in this input will be compared to the value in name_index using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_name_index must be specified if op_name_index is specified.
:type val_f_name_index: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_name_index: If op_name_index is specified, this value will be compared to the value in name_index using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_name_index must be specified if op_name_index is specified.
:type val_c_name_index: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_status: The operator to apply to the field status. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. status: The status of the sensor data. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_status: If op_status is specified, the field named in this input will be compared to the value in status using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_status must be specified if op_status is specified.
:type val_f_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_status: If op_status is specified, this value will be compared to the value in status using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_status must be specified if op_status is specified.
:type val_c_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_units: The operator to apply to the field units. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. units: The units the value of the sensor data is in. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_units: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_units: If op_units is specified, the field named in this input will be compared to the value in units using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_units must be specified if op_units is specified.
:type val_f_units: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_units: If op_units is specified, this value will be compared to the value in units using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_units must be specified if op_units is specified.
:type val_c_units: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_updated_at: The operator to apply to the field updated_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. updated_at: The date and time the record was last modified in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_updated_at: If op_updated_at is specified, the field named in this input will be compared to the value in updated_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_updated_at must be specified if op_updated_at is specified.
:type val_f_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_updated_at: If op_updated_at is specified, this value will be compared to the value in updated_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_updated_at must be specified if op_updated_at is specified.
:type val_c_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_value: The operator to apply to the field value. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. value: The value of the sensor data. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_value: If op_value is specified, the field named in this input will be compared to the value in value using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_value must be specified if op_value is specified.
:type val_f_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_value: If op_value is specified, this value will be compared to the value in value using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_value must be specified if op_value is specified.
:type val_c_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, data_source_id, name, name_index, label, category, value, status, units, details, updated_at, first_seen.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each SensorDatum. Valid values are id, data_source_id, name, name_index, label, category, value, status, units, details, updated_at, first_seen. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return sensor_data: An array of the SensorDatum objects that match the specified input criteria.
:rtype sensor_data: Array of SensorDatum
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified sensor datum.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for the table entry.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return sensor_datum: The sensor datum identified by the specified id.
:rtype sensor_datum: SensorDatum
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def failures(self, **kwargs):
"""List of sensor data which indicates an error condition.
**Inputs**
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return sensor_data: An array of the SensorDataum objects with a failure condition.
:rtype sensor_data: Array of SensorDatum
"""
return self.api_request(self._get_method_fullname("failures"), kwargs)
def raid(self, **kwargs):
"""Summary of the status of the RAID as a whole.
**Inputs**
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return status: The most serious RAID status condition.
:rtype status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return detail: Detail on the status condition.
:rtype detail: String
"""
return self.api_request(self._get_method_fullname("raid"), kwargs)
def fan(self, **kwargs):
"""Status for individual fans.
**Inputs**
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return statuses: List of status for each fan, either OK or Failed.
:rtype statuses: Array of String
"""
return self.api_request(self._get_method_fullname("fan"), kwargs)
def power_supply(self, **kwargs):
"""Status for the power supplies
**Inputs**
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return statuses: List of status for each power supply, eitehr OK or Failed.
:rtype statuses: Array of String
"""
return self.api_request(self._get_method_fullname("power_supply"), kwargs) | infoblox_netmri/api/broker/v3_8_0/sensor_datum_broker.py | from ..broker import Broker
class SensorDatumBroker(Broker):
controller = "sensor_data"
def index(self, **kwargs):
"""Lists the available sensor data. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param data_source_id: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type data_source_id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param data_source_id: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type data_source_id: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the table entry.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the table entry.
:type id: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, data_source_id, name, name_index, label, category, value, status, units, details, updated_at, first_seen.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each SensorDatum. Valid values are id, data_source_id, name, name_index, label, category, value, status, units, details, updated_at, first_seen. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return sensor_data: An array of the SensorDatum objects that match the specified input criteria.
:rtype sensor_data: Array of SensorDatum
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available sensor data matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param category: The type of sensor data.
:type category: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param category: The type of sensor data.
:type category: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param data_source_id: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type data_source_id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param data_source_id: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type data_source_id: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param details: The description of the status of the sensor data.
:type details: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param details: The description of the status of the sensor data.
:type details: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param first_seen: The time when the failure was first detected.
:type first_seen: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param first_seen: The time when the failure was first detected.
:type first_seen: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the table entry.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the table entry.
:type id: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param label: The label for the sensor data.
:type label: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param label: The label for the sensor data.
:type label: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param name: The name of the sensor data.
:type name: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param name: The name of the sensor data.
:type name: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param name_index: The index for all data with a given name.
:type name_index: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param name_index: The index for all data with a given name.
:type name_index: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param status: The status of the sensor data.
:type status: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param status: The status of the sensor data.
:type status: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param units: The units the value of the sensor data is in.
:type units: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param units: The units the value of the sensor data is in.
:type units: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param updated_at: The date and time the record was last modified in NetMRI.
:type updated_at: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param updated_at: The date and time the record was last modified in NetMRI.
:type updated_at: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param value: The value of the sensor data.
:type value: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param value: The value of the sensor data.
:type value: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, data_source_id, name, name_index, label, category, value, status, units, details, updated_at, first_seen.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each SensorDatum. Valid values are id, data_source_id, name, name_index, label, category, value, status, units, details, updated_at, first_seen. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against sensor data, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: category, data_source_id, details, first_seen, id, label, name, name_index, status, units, updated_at, value.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return sensor_data: An array of the SensorDatum objects that match the specified input criteria.
:rtype sensor_data: Array of SensorDatum
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available sensor data matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: category, data_source_id, details, first_seen, id, label, name, name_index, status, units, updated_at, value.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_category: The operator to apply to the field category. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. category: The type of sensor data. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_category: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_category: If op_category is specified, the field named in this input will be compared to the value in category using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_category must be specified if op_category is specified.
:type val_f_category: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_category: If op_category is specified, this value will be compared to the value in category using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_category must be specified if op_category is specified.
:type val_c_category: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_data_source_id: The operator to apply to the field data_source_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. data_source_id: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_data_source_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_data_source_id: If op_data_source_id is specified, the field named in this input will be compared to the value in data_source_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_data_source_id must be specified if op_data_source_id is specified.
:type val_f_data_source_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_data_source_id: If op_data_source_id is specified, this value will be compared to the value in data_source_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_data_source_id must be specified if op_data_source_id is specified.
:type val_c_data_source_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_details: The operator to apply to the field details. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. details: The description of the status of the sensor data. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_details: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_details: If op_details is specified, the field named in this input will be compared to the value in details using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_details must be specified if op_details is specified.
:type val_f_details: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_details: If op_details is specified, this value will be compared to the value in details using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_details must be specified if op_details is specified.
:type val_c_details: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_first_seen: The operator to apply to the field first_seen. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. first_seen: The time when the failure was first detected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_first_seen: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_first_seen: If op_first_seen is specified, the field named in this input will be compared to the value in first_seen using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_first_seen must be specified if op_first_seen is specified.
:type val_f_first_seen: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_first_seen: If op_first_seen is specified, this value will be compared to the value in first_seen using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_first_seen must be specified if op_first_seen is specified.
:type val_c_first_seen: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The internal NetMRI identifier for the table entry. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_id must be specified if op_id is specified.
:type val_f_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_id: If op_id is specified, this value will be compared to the value in id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_id must be specified if op_id is specified.
:type val_c_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_label: The operator to apply to the field label. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. label: The label for the sensor data. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_label: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_label: If op_label is specified, the field named in this input will be compared to the value in label using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_label must be specified if op_label is specified.
:type val_f_label: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_label: If op_label is specified, this value will be compared to the value in label using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_label must be specified if op_label is specified.
:type val_c_label: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_name: The operator to apply to the field name. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. name: The name of the sensor data. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_name: If op_name is specified, the field named in this input will be compared to the value in name using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_name must be specified if op_name is specified.
:type val_f_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_name: If op_name is specified, this value will be compared to the value in name using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_name must be specified if op_name is specified.
:type val_c_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_name_index: The operator to apply to the field name_index. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. name_index: The index for all data with a given name. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_name_index: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_name_index: If op_name_index is specified, the field named in this input will be compared to the value in name_index using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_name_index must be specified if op_name_index is specified.
:type val_f_name_index: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_name_index: If op_name_index is specified, this value will be compared to the value in name_index using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_name_index must be specified if op_name_index is specified.
:type val_c_name_index: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_status: The operator to apply to the field status. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. status: The status of the sensor data. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_status: If op_status is specified, the field named in this input will be compared to the value in status using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_status must be specified if op_status is specified.
:type val_f_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_status: If op_status is specified, this value will be compared to the value in status using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_status must be specified if op_status is specified.
:type val_c_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_units: The operator to apply to the field units. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. units: The units the value of the sensor data is in. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_units: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_units: If op_units is specified, the field named in this input will be compared to the value in units using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_units must be specified if op_units is specified.
:type val_f_units: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_units: If op_units is specified, this value will be compared to the value in units using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_units must be specified if op_units is specified.
:type val_c_units: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_updated_at: The operator to apply to the field updated_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. updated_at: The date and time the record was last modified in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_updated_at: If op_updated_at is specified, the field named in this input will be compared to the value in updated_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_updated_at must be specified if op_updated_at is specified.
:type val_f_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_updated_at: If op_updated_at is specified, this value will be compared to the value in updated_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_updated_at must be specified if op_updated_at is specified.
:type val_c_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_value: The operator to apply to the field value. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. value: The value of the sensor data. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_value: If op_value is specified, the field named in this input will be compared to the value in value using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_value must be specified if op_value is specified.
:type val_f_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_value: If op_value is specified, this value will be compared to the value in value using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_value must be specified if op_value is specified.
:type val_c_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, data_source_id, name, name_index, label, category, value, status, units, details, updated_at, first_seen.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each SensorDatum. Valid values are id, data_source_id, name, name_index, label, category, value, status, units, details, updated_at, first_seen. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return sensor_data: An array of the SensorDatum objects that match the specified input criteria.
:rtype sensor_data: Array of SensorDatum
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified sensor datum.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for the table entry.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return sensor_datum: The sensor datum identified by the specified id.
:rtype sensor_datum: SensorDatum
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def failures(self, **kwargs):
"""List of sensor data which indicates an error condition.
**Inputs**
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return sensor_data: An array of the SensorDataum objects with a failure condition.
:rtype sensor_data: Array of SensorDatum
"""
return self.api_request(self._get_method_fullname("failures"), kwargs)
def raid(self, **kwargs):
"""Summary of the status of the RAID as a whole.
**Inputs**
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return status: The most serious RAID status condition.
:rtype status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return detail: Detail on the status condition.
:rtype detail: String
"""
return self.api_request(self._get_method_fullname("raid"), kwargs)
def fan(self, **kwargs):
"""Status for individual fans.
**Inputs**
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return statuses: List of status for each fan, either OK or Failed.
:rtype statuses: Array of String
"""
return self.api_request(self._get_method_fullname("fan"), kwargs)
def power_supply(self, **kwargs):
"""Status for the power supplies
**Inputs**
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return statuses: List of status for each power supply, eitehr OK or Failed.
:rtype statuses: Array of String
"""
return self.api_request(self._get_method_fullname("power_supply"), kwargs) | 0.868688 | 0.633495 |
import os
import sys
import glob
import logging
import argparse
import synapse.exc as s_exc
import synapse.glob as s_glob
import synapse.common as s_common
import synapse.telepath as s_telepath
import synapse.lib.output as s_output
import synapse.lib.hashset as s_hashset
logger = logging.getLogger(__name__)
def main(argv, outp=None):
if outp is None: # pragma: no cover
outp = s_output.OutPut()
pars = makeargparser()
opts = pars.parse_args(argv)
axon = s_telepath.openurl(opts.axon)
core = None
if opts.cortex:
core = s_telepath.openurl(opts.cortex)
tags = {}
if opts.tags:
for tag in opts.tags.split(','):
tags[tag] = (None, None)
if tags:
outp.printf('adding tags: %r' % (list(tags.keys())))
filepaths = set()
for item in opts.filenames:
paths = glob.glob(item, recursive=opts.recursive)
if not paths:
outp.printf(f'filepath does not contain any files: {item}')
continue
filepaths.update([path for path in paths if os.path.isfile(path)])
for path in filepaths:
bname = os.path.basename(path)
hset = s_hashset.HashSet()
with s_common.reqfile(path) as fd:
hset.eatfd(fd)
fhashes = {htyp: hasher.hexdigest() for htyp, hasher in hset.hashes}
sha256 = fhashes.get('sha256')
bsha256 = s_common.uhex(sha256)
if not axon.has(bsha256):
with axon.upload() as upfd:
with s_common.genfile(path) as fd:
for byts in s_common.iterfd(fd):
upfd.write(byts)
size, hashval = upfd.save()
if hashval != bsha256: # pragma: no cover
raise s_exc.SynErr(mesg='hashes do not match',
ehash=s_common.ehex(hashval),
ahash=hashval)
outp.printf(f'Uploaded [{bname}] to axon')
else:
outp.printf(f'Axon already had [{bname}]')
if core:
pnode = (
('file:bytes', f'sha256:{sha256}'),
{
'props': {
'md5': fhashes.get('md5'),
'sha1': fhashes.get('sha1'),
'sha256': fhashes.get('sha256'),
'size': hset.size,
'name': bname,
},
'tags': tags,
}
)
node = list(core.addNodes([pnode]))[0]
iden = node[0][1]
size = node[1]['props']['size']
name = node[1]['props']['name']
mesg = f'file: {bname} ({size}) added to core ({iden}) as {name}'
outp.printf(mesg)
s_glob.sync(axon.fini())
if core:
s_glob.sync(core.fini())
return 0
def makeargparser():
desc = 'Command line tool for uploading files to an Axon and making ' \
'file:bytes in a Cortex.'
pars = argparse.ArgumentParser('synapse.tools.pushfile', description=desc)
pars.add_argument('-a', '--axon', required=True, type=str, dest='axon',
help='URL for a target Axon to store files at.')
pars.add_argument('-c', '--cortex', default=None, type=str, dest='cortex',
help='URL for a target Cortex to make file:bytes nodes.')
pars.add_argument('filenames', nargs='+', help='File names (or glob patterns) to upload')
pars.add_argument('-r', '--recursive', action='store_true',
help='Recursively search paths to upload files.')
pars.add_argument('-t', '--tags', help='comma separated list of tags to add to the nodes')
return pars
def _main(): # pragma: no cover
s_common.setlogging(logger, 'DEBUG')
return main(sys.argv[1:])
if __name__ == '__main__': # pragma: no cover
sys.exit(_main()) | synapse/tools/pushfile.py | import os
import sys
import glob
import logging
import argparse
import synapse.exc as s_exc
import synapse.glob as s_glob
import synapse.common as s_common
import synapse.telepath as s_telepath
import synapse.lib.output as s_output
import synapse.lib.hashset as s_hashset
logger = logging.getLogger(__name__)
def main(argv, outp=None):
if outp is None: # pragma: no cover
outp = s_output.OutPut()
pars = makeargparser()
opts = pars.parse_args(argv)
axon = s_telepath.openurl(opts.axon)
core = None
if opts.cortex:
core = s_telepath.openurl(opts.cortex)
tags = {}
if opts.tags:
for tag in opts.tags.split(','):
tags[tag] = (None, None)
if tags:
outp.printf('adding tags: %r' % (list(tags.keys())))
filepaths = set()
for item in opts.filenames:
paths = glob.glob(item, recursive=opts.recursive)
if not paths:
outp.printf(f'filepath does not contain any files: {item}')
continue
filepaths.update([path for path in paths if os.path.isfile(path)])
for path in filepaths:
bname = os.path.basename(path)
hset = s_hashset.HashSet()
with s_common.reqfile(path) as fd:
hset.eatfd(fd)
fhashes = {htyp: hasher.hexdigest() for htyp, hasher in hset.hashes}
sha256 = fhashes.get('sha256')
bsha256 = s_common.uhex(sha256)
if not axon.has(bsha256):
with axon.upload() as upfd:
with s_common.genfile(path) as fd:
for byts in s_common.iterfd(fd):
upfd.write(byts)
size, hashval = upfd.save()
if hashval != bsha256: # pragma: no cover
raise s_exc.SynErr(mesg='hashes do not match',
ehash=s_common.ehex(hashval),
ahash=hashval)
outp.printf(f'Uploaded [{bname}] to axon')
else:
outp.printf(f'Axon already had [{bname}]')
if core:
pnode = (
('file:bytes', f'sha256:{sha256}'),
{
'props': {
'md5': fhashes.get('md5'),
'sha1': fhashes.get('sha1'),
'sha256': fhashes.get('sha256'),
'size': hset.size,
'name': bname,
},
'tags': tags,
}
)
node = list(core.addNodes([pnode]))[0]
iden = node[0][1]
size = node[1]['props']['size']
name = node[1]['props']['name']
mesg = f'file: {bname} ({size}) added to core ({iden}) as {name}'
outp.printf(mesg)
s_glob.sync(axon.fini())
if core:
s_glob.sync(core.fini())
return 0
def makeargparser():
desc = 'Command line tool for uploading files to an Axon and making ' \
'file:bytes in a Cortex.'
pars = argparse.ArgumentParser('synapse.tools.pushfile', description=desc)
pars.add_argument('-a', '--axon', required=True, type=str, dest='axon',
help='URL for a target Axon to store files at.')
pars.add_argument('-c', '--cortex', default=None, type=str, dest='cortex',
help='URL for a target Cortex to make file:bytes nodes.')
pars.add_argument('filenames', nargs='+', help='File names (or glob patterns) to upload')
pars.add_argument('-r', '--recursive', action='store_true',
help='Recursively search paths to upload files.')
pars.add_argument('-t', '--tags', help='comma separated list of tags to add to the nodes')
return pars
def _main(): # pragma: no cover
s_common.setlogging(logger, 'DEBUG')
return main(sys.argv[1:])
if __name__ == '__main__': # pragma: no cover
sys.exit(_main()) | 0.209551 | 0.095181 |
import copy
import logging
from typing import Any, Dict, Optional, Union, List, Tuple
import pickle
from .variant_generator import parse_spec_vars
from ..tune.sample import Categorical, Domain, Float, Integer, LogUniform, \
Quantized, Uniform
from ..tune.trial import flatten_dict, unflatten_dict
logger = logging.getLogger(__name__)
UNRESOLVED_SEARCH_SPACE = str(
"You passed a `{par}` parameter to {cls} that contained unresolved search "
"space definitions. {cls} should however be instantiated with fully "
"configured search spaces only. To use Ray Tune's automatic search space "
"conversion, pass the space definition as part of the `config` argument "
"to `tune.run()` instead.")
UNDEFINED_SEARCH_SPACE = str(
"Trying to sample a configuration from {cls}, but no search "
"space has been defined. Either pass the `{space}` argument when "
"instantiating the search algorithm, or pass a `config` to "
"`tune.run()`.")
UNDEFINED_METRIC_MODE = str(
"Trying to sample a configuration from {cls}, but the `metric` "
"({metric}) or `mode` ({mode}) parameters have not been set. "
"Either pass these arguments when instantiating the search algorithm, "
"or pass them to `tune.run()`.")
class Searcher:
"""Abstract class for wrapping suggesting algorithms.
Custom algorithms can extend this class easily by overriding the
`suggest` method provide generated parameters for the trials.
Any subclass that implements ``__init__`` must also call the
constructor of this class: ``super(Subclass, self).__init__(...)``.
To track suggestions and their corresponding evaluations, the method
`suggest` will be passed a trial_id, which will be used in
subsequent notifications.
Not all implementations support multi objectives.
Args:
metric (str or list): The training result objective value attribute. If
list then list of training result objective value attributes
mode (str or list): If string One of {min, max}. If list then
list of max and min, determines whether objective is minimizing
or maximizing the metric attribute. Must match type of metric.
.. code-block:: python
class ExampleSearch(Searcher):
def __init__(self, metric="mean_loss", mode="min", **kwargs):
super(ExampleSearch, self).__init__(
metric=metric, mode=mode, **kwargs)
self.optimizer = Optimizer()
self.configurations = {}
def suggest(self, trial_id):
configuration = self.optimizer.query()
self.configurations[trial_id] = configuration
def on_trial_complete(self, trial_id, result, **kwargs):
configuration = self.configurations[trial_id]
if result and self.metric in result:
self.optimizer.update(configuration, result[self.metric])
tune.run(trainable_function, search_alg=ExampleSearch())
"""
FINISHED = "FINISHED"
CKPT_FILE_TMPL = "searcher-state-{}.pkl"
def __init__(self,
metric: Optional[str] = None,
mode: Optional[str] = None,
max_concurrent: Optional[int] = None,
use_early_stopped_trials: Optional[bool] = None):
if use_early_stopped_trials is False:
raise DeprecationWarning(
"Early stopped trials are now always used. If this is a "
"problem, file an issue: https://github.com/ray-project/ray.")
if max_concurrent is not None:
logger.warning(
"DeprecationWarning: `max_concurrent` is deprecated for this "
"search algorithm. Use tune.suggest.ConcurrencyLimiter() "
"instead. This will raise an error in future versions of Ray.")
self._metric = metric
self._mode = mode
if not mode or not metric:
# Early return to avoid assertions
return
assert isinstance(
metric, type(mode)), "metric and mode must be of the same type"
if isinstance(mode, str):
assert mode in ["min", "max"
], "if `mode` is a str must be 'min' or 'max'!"
elif isinstance(mode, list):
assert len(mode) == len(
metric), "Metric and mode must be the same length"
assert all(mod in ["min", "max", "obs"] for mod in
mode), "All of mode must be 'min' or 'max' or 'obs'!"
else:
raise ValueError("Mode most either be a list or string")
def set_search_properties(self, metric: Optional[str], mode: Optional[str],
config: Dict) -> bool:
"""Pass search properties to searcher.
This method acts as an alternative to instantiating search algorithms
with their own specific search spaces. Instead they can accept a
Tune config through this method. A searcher should return ``True``
if setting the config was successful, or ``False`` if it was
unsuccessful, e.g. when the search space has already been set.
Args:
metric (str): Metric to optimize
mode (str): One of ["min", "max"]. Direction to optimize.
config (dict): Tune config dict.
"""
return False
def on_trial_result(self, trial_id: str, result: Dict):
"""Optional notification for result during training.
Note that by default, the result dict may include NaNs or
may not include the optimization metric. It is up to the
subclass implementation to preprocess the result to
avoid breaking the optimization process.
Args:
trial_id (str): A unique string ID for the trial.
result (dict): Dictionary of metrics for current training progress.
Note that the result dict may include NaNs or
may not include the optimization metric. It is up to the
subclass implementation to preprocess the result to
avoid breaking the optimization process.
"""
pass
def on_trial_complete(self,
trial_id: str,
result: Optional[Dict] = None,
error: bool = False):
"""Notification for the completion of trial.
Typically, this method is used for notifying the underlying
optimizer of the result.
Args:
trial_id (str): A unique string ID for the trial.
result (dict): Dictionary of metrics for current training progress.
Note that the result dict may include NaNs or
may not include the optimization metric. It is up to the
subclass implementation to preprocess the result to
avoid breaking the optimization process. Upon errors, this
may also be None.
error (bool): True if the training process raised an error.
"""
raise NotImplementedError
def suggest(self, trial_id: str) -> Optional[Dict]:
"""Queries the algorithm to retrieve the next set of parameters.
Arguments:
trial_id (str): Trial ID used for subsequent notifications.
Returns:
dict | FINISHED | None: Configuration for a trial, if possible.
If FINISHED is returned, Tune will be notified that
no more suggestions/configurations will be provided.
If None is returned, Tune will skip the querying of the
searcher for this step.
"""
raise NotImplementedError
def save(self, checkpoint_path: str):
"""Save state to path for this search algorithm.
Args:
checkpoint_path (str): File where the search algorithm
state is saved. This path should be used later when
restoring from file.
Example:
.. code-block:: python
search_alg = Searcher(...)
analysis = tune.run(
cost,
num_samples=5,
search_alg=search_alg,
name=self.experiment_name,
local_dir=self.tmpdir)
search_alg.save("./my_favorite_path.pkl")
.. versionchanged:: 0.8.7
Save is automatically called by `tune.run`. You can use
`restore_from_dir` to restore from an experiment directory
such as `~/ray_results/trainable`.
"""
raise NotImplementedError
def restore(self, checkpoint_path: str):
"""Restore state for this search algorithm
Args:
checkpoint_path (str): File where the search algorithm
state is saved. This path should be the same
as the one provided to "save".
Example:
.. code-block:: python
search_alg.save("./my_favorite_path.pkl")
search_alg2 = Searcher(...)
search_alg2 = ConcurrencyLimiter(search_alg2, 1)
search_alg2.restore(checkpoint_path)
tune.run(cost, num_samples=5, search_alg=search_alg2)
"""
raise NotImplementedError
def get_state(self) -> Dict:
raise NotImplementedError
def set_state(self, state: Dict):
raise NotImplementedError
@property
def metric(self) -> str:
"""The training result objective value attribute."""
return self._metric
@property
def mode(self) -> str:
"""Specifies if minimizing or maximizing the metric."""
return self._mode
class ConcurrencyLimiter(Searcher):
"""A wrapper algorithm for limiting the number of concurrent trials.
Args:
searcher (Searcher): Searcher object that the
ConcurrencyLimiter will manage.
max_concurrent (int): Maximum concurrent samples from the underlying
searcher.
batch (bool): Whether to wait for all concurrent samples
to finish before updating the underlying searcher.
Example:
.. code-block:: python
from ray.tune.suggest import ConcurrencyLimiter
search_alg = HyperOptSearch(metric="accuracy")
search_alg = ConcurrencyLimiter(search_alg, max_concurrent=2)
tune.run(trainable, search_alg=search_alg)
"""
def __init__(self,
searcher: Searcher,
max_concurrent: int,
batch: bool = False):
assert type(max_concurrent) is int and max_concurrent > 0
self.searcher = searcher
self.max_concurrent = max_concurrent
self.batch = batch
self.live_trials = set()
self.cached_results = {}
super(ConcurrencyLimiter, self).__init__(
metric=self.searcher.metric, mode=self.searcher.mode)
def suggest(self, trial_id: str) -> Optional[Dict]:
assert trial_id not in self.live_trials, (
f"Trial ID {trial_id} must be unique: already found in set.")
if len(self.live_trials) >= self.max_concurrent:
logger.debug(
f"Not providing a suggestion for {trial_id} due to "
"concurrency limit: %s/%s.", len(self.live_trials),
self.max_concurrent)
return
suggestion = self.searcher.suggest(trial_id)
if suggestion not in (None, Searcher.FINISHED):
self.live_trials.add(trial_id)
return suggestion
def on_trial_complete(self,
trial_id: str,
result: Optional[Dict] = None,
error: bool = False):
if trial_id not in self.live_trials:
return
elif self.batch:
self.cached_results[trial_id] = (result, error)
if len(self.cached_results) == self.max_concurrent:
# Update the underlying searcher once the
# full batch is completed.
for trial_id, (result, error) in self.cached_results.items():
self.searcher.on_trial_complete(
trial_id, result=result, error=error)
self.live_trials.remove(trial_id)
self.cached_results = {}
else:
return
else:
self.searcher.on_trial_complete(
trial_id, result=result, error=error)
self.live_trials.remove(trial_id)
def get_state(self) -> Dict:
state = self.__dict__.copy()
del state["searcher"]
return copy.deepcopy(state)
def set_state(self, state: Dict):
self.__dict__.update(state)
def save(self, checkpoint_path: str):
self.searcher.save(checkpoint_path)
def restore(self, checkpoint_path: str):
self.searcher.restore(checkpoint_path)
def on_pause(self, trial_id: str):
self.searcher.on_pause(trial_id)
def on_unpause(self, trial_id: str):
self.searcher.on_unpause(trial_id)
def set_search_properties(self, metric: Optional[str], mode: Optional[str],
config: Dict) -> bool:
return self.searcher.set_search_properties(metric, mode, config)
try:
import optuna as ot
from optuna.trial import TrialState as OptunaTrialState
from optuna.samplers import BaseSampler
except ImportError:
ot = None
OptunaTrialState = None
BaseSampler = None
# (Optional) Default (anonymous) metric when using tune.report(x)
DEFAULT_METRIC = "_metric"
# (Auto-filled) The index of this training iteration.
TRAINING_ITERATION = "training_iteration"
class OptunaSearch(Searcher):
"""A wrapper around Optuna to provide trial suggestions.
`Optuna <https://optuna.org/>`_ is a hyperparameter optimization library.
In contrast to other libraries, it employs define-by-run style
hyperparameter definitions.
This Searcher is a thin wrapper around Optuna's search algorithms.
You can pass any Optuna sampler, which will be used to generate
hyperparameter suggestions.
Please note that this wrapper does not support define-by-run, so the
search space will be configured before running the optimization. You will
also need to use a Tune trainable (e.g. using the function API) with
this wrapper.
For defining the search space, use ``ray.tune.suggest.optuna.param``
(see example).
Args:
space (list): Hyperparameter search space definition for Optuna's
sampler. This is a list, and samples for the parameters will
be obtained in order.
metric (str): The training result objective value attribute. If None
but a mode was passed, the anonymous metric `_metric` will be used
per default.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
points_to_evaluate (list): Initial parameter suggestions to be run
first. This is for when you already have some good parameters
you want to run first to help the algorithm make better suggestions
for future parameters. Needs to be a list of dicts containing the
configurations.
sampler (optuna.samplers.BaseSampler): Optuna sampler used to
draw hyperparameter configurations. Defaults to ``TPESampler``.
seed (int): Seed to initialize sampler with. This parameter is only
used when ``sampler=None``. In all other cases, the sampler
you pass should be initialized with the seed already.
evaluated_rewards (list): If you have previously evaluated the
parameters passed in as points_to_evaluate you can avoid
re-running those trials by passing in the reward attributes
as a list so the optimiser can be told the results without
needing to re-compute the trial. Must be the same length as
points_to_evaluate.
Tune automatically converts search spaces to Optuna's format:
.. code-block:: python
from ray.tune.suggest.optuna import OptunaSearch
config = {
"a": tune.uniform(6, 8)
"b": tune.loguniform(1e-4, 1e-2)
}
optuna_search = OptunaSearch(
metric="loss",
mode="min")
tune.run(trainable, config=config, search_alg=optuna_search)
If you would like to pass the search space manually, the code would
look like this:
.. code-block:: python
from ray.tune.suggest.optuna import OptunaSearch
import optuna
config = {
"a": optuna.distributions.UniformDistribution(6, 8),
"b": optuna.distributions.LogUniformDistribution(1e-4, 1e-2),
}
optuna_search = OptunaSearch(
space,
metric="loss",
mode="min")
tune.run(trainable, search_alg=optuna_search)
.. versionadded:: 0.8.8
"""
def __init__(self,
space: Optional[Union[Dict, List[Tuple]]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
points_to_evaluate: Optional[List[Dict]] = None,
sampler: Optional[BaseSampler] = None,
seed: Optional[int] = None,
evaluated_rewards: Optional[List] = None):
assert ot is not None, (
"Optuna must be installed! Run `pip install optuna`.")
super(OptunaSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=None,
use_early_stopped_trials=None)
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(
par="space", cls=type(self).__name__))
space = self.convert_search_space(space)
else:
# Flatten to support nested dicts
space = flatten_dict(space, "/")
# Deprecate: 1.5
if isinstance(space, list):
logger.warning(
"Passing lists of `param.suggest_*()` calls to OptunaSearch "
"as a search space is deprecated and will be removed in "
"a future release of Ray. Please pass a dict mapping "
"to `optuna.distributions` objects instead.")
self._space = space
self._points_to_evaluate = points_to_evaluate or []
self._evaluated_rewards = evaluated_rewards
self._study_name = "optuna" # Fixed study name for in-memory storage
if sampler and seed:
logger.warning(
"You passed an initialized sampler to `OptunaSearch`. The "
"`seed` parameter has to be passed to the sampler directly "
"and will be ignored.")
self._sampler = sampler or ot.samplers.TPESampler(seed=seed)
assert isinstance(self._sampler, BaseSampler), \
"You can only pass an instance of `optuna.samplers.BaseSampler` " \
"as a sampler to `OptunaSearcher`."
self._ot_trials = {}
self._ot_study = None
if self._space:
self._setup_study(mode)
def _setup_study(self, mode: str):
if self._metric is None and self._mode:
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC
pruner = ot.pruners.NopPruner()
storage = ot.storages.InMemoryStorage()
self._ot_study = ot.study.create_study(
storage=storage,
sampler=self._sampler,
pruner=pruner,
study_name=self._study_name,
direction="minimize" if mode == "min" else "maximize",
load_if_exists=True)
if self._points_to_evaluate:
if self._evaluated_rewards:
for point, reward in zip(self._points_to_evaluate,
self._evaluated_rewards):
self.add_evaluated_point(point, reward)
else:
for point in self._points_to_evaluate:
self._ot_study.enqueue_trial(point)
def set_search_properties(self, metric: Optional[str], mode: Optional[str],
config: Dict) -> bool:
if self._space:
return False
space = self.convert_search_space(config)
self._space = space
if metric:
self._metric = metric
if mode:
self._mode = mode
self._setup_study(mode)
return True
def suggest(self, trial_id: str) -> Optional[Dict]:
if not self._space:
raise RuntimeError(
UNDEFINED_SEARCH_SPACE.format(
cls=self.__class__.__name__, space="space"))
if not self._metric or not self._mode:
raise RuntimeError(
UNDEFINED_METRIC_MODE.format(
cls=self.__class__.__name__,
metric=self._metric,
mode=self._mode))
if isinstance(self._space, list):
# Keep for backwards compatibility
# Deprecate: 1.5
if trial_id not in self._ot_trials:
self._ot_trials[trial_id] = self._ot_study.ask()
ot_trial = self._ot_trials[trial_id]
# getattr will fetch the trial.suggest_ function on Optuna trials
params = {
args[0] if len(args) > 0 else kwargs["name"]: getattr(
ot_trial, fn)(*args, **kwargs)
for (fn, args, kwargs) in self._space
}
else:
# Use Optuna ask interface (since version 2.6.0)
if trial_id not in self._ot_trials:
self._ot_trials[trial_id] = self._ot_study.ask(
fixed_distributions=self._space)
ot_trial = self._ot_trials[trial_id]
params = ot_trial.params
return unflatten_dict(params)
def on_trial_result(self, trial_id: str, result: Dict):
metric = result[self.metric]
step = result[TRAINING_ITERATION]
ot_trial = self._ot_trials[trial_id]
ot_trial.report(metric, step)
def on_trial_complete(self,
trial_id: str,
result: Optional[Dict] = None,
error: bool = False):
ot_trial = self._ot_trials[trial_id]
val = result.get(self.metric, None) if result else None
ot_trial_state = OptunaTrialState.COMPLETE
if val is None:
if error:
ot_trial_state = OptunaTrialState.FAIL
else:
ot_trial_state = OptunaTrialState.PRUNED
try:
self._ot_study.tell(ot_trial, val, state=ot_trial_state)
except ValueError as exc:
logger.warning(exc) # E.g. if NaN was reported
def add_evaluated_point(self,
parameters: Dict,
value: float,
error: bool = False,
pruned: bool = False,
intermediate_values: Optional[List[float]] = None):
if not self._space:
raise RuntimeError(
UNDEFINED_SEARCH_SPACE.format(
cls=self.__class__.__name__, space="space"))
if not self._metric or not self._mode:
raise RuntimeError(
UNDEFINED_METRIC_MODE.format(
cls=self.__class__.__name__,
metric=self._metric,
mode=self._mode))
ot_trial_state = OptunaTrialState.COMPLETE
if error:
ot_trial_state = OptunaTrialState.FAIL
elif pruned:
ot_trial_state = OptunaTrialState.PRUNED
if intermediate_values:
intermediate_values_dict = {
i: value
for i, value in enumerate(intermediate_values)
}
else:
intermediate_values_dict = None
trial = ot.trial.create_trial(
state=ot_trial_state,
value=value,
params=parameters,
distributions=self._space,
intermediate_values=intermediate_values_dict)
self._ot_study.add_trial(trial)
def save(self, checkpoint_path: str):
save_object = (self._sampler, self._ot_trials, self._ot_study,
self._points_to_evaluate, self._evaluated_rewards)
with open(checkpoint_path, "wb") as outputFile:
pickle.dump(save_object, outputFile)
def restore(self, checkpoint_path: str):
with open(checkpoint_path, "rb") as inputFile:
save_object = pickle.load(inputFile)
if len(save_object) == 5:
self._sampler, self._ot_trials, self._ot_study, \
self._points_to_evaluate, self._evaluated_rewards = save_object
else:
# Backwards compatibility
self._sampler, self._ot_trials, self._ot_study, \
self._points_to_evaluate = save_object
@staticmethod
def convert_search_space(spec: Dict) -> Dict[str, Any]:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if not domain_vars and not grid_vars:
return {}
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to an Optuna search space.")
# Flatten and resolve again after checking for grid search.
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
def resolve_value(domain: Domain) -> ot.distributions.BaseDistribution:
quantize = None
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
quantize = sampler.q
sampler = sampler.sampler
if isinstance(sampler, LogUniform):
logger.warning(
"Optuna does not handle quantization in loguniform "
"sampling. The parameter will be passed but it will "
"probably be ignored.")
if isinstance(domain, Float):
if isinstance(sampler, LogUniform):
if quantize:
logger.warning(
"Optuna does not support both quantization and "
"sampling from LogUniform. Dropped quantization.")
return ot.distributions.LogUniformDistribution(
domain.lower, domain.upper)
elif isinstance(sampler, Uniform):
if quantize:
return ot.distributions.DiscreteUniformDistribution(
domain.lower, domain.upper, quantize)
return ot.distributions.UniformDistribution(
domain.lower, domain.upper)
elif isinstance(domain, Integer):
if isinstance(sampler, LogUniform):
return ot.distributions.IntLogUniformDistribution(
domain.lower, domain.upper - 1, step=quantize or 1)
elif isinstance(sampler, Uniform):
# Upper bound should be inclusive for quantization and
# exclusive otherwise
return ot.distributions.IntUniformDistribution(
domain.lower,
domain.upper - int(bool(not quantize)),
step=quantize or 1)
elif isinstance(domain, Categorical):
if isinstance(sampler, Uniform):
return ot.distributions.CategoricalDistribution(
domain.categories)
raise ValueError(
"Optuna search does not support parameters of type "
"`{}` with samplers of type `{}`".format(
type(domain).__name__,
type(domain.sampler).__name__))
# Parameter name is e.g. "a/b/c" for nested dicts
values = {
"/".join(path): resolve_value(domain)
for path, domain in domain_vars
}
return values | flaml/searcher/suggestion.py | import copy
import logging
from typing import Any, Dict, Optional, Union, List, Tuple
import pickle
from .variant_generator import parse_spec_vars
from ..tune.sample import Categorical, Domain, Float, Integer, LogUniform, \
Quantized, Uniform
from ..tune.trial import flatten_dict, unflatten_dict
logger = logging.getLogger(__name__)
UNRESOLVED_SEARCH_SPACE = str(
"You passed a `{par}` parameter to {cls} that contained unresolved search "
"space definitions. {cls} should however be instantiated with fully "
"configured search spaces only. To use Ray Tune's automatic search space "
"conversion, pass the space definition as part of the `config` argument "
"to `tune.run()` instead.")
UNDEFINED_SEARCH_SPACE = str(
"Trying to sample a configuration from {cls}, but no search "
"space has been defined. Either pass the `{space}` argument when "
"instantiating the search algorithm, or pass a `config` to "
"`tune.run()`.")
UNDEFINED_METRIC_MODE = str(
"Trying to sample a configuration from {cls}, but the `metric` "
"({metric}) or `mode` ({mode}) parameters have not been set. "
"Either pass these arguments when instantiating the search algorithm, "
"or pass them to `tune.run()`.")
class Searcher:
"""Abstract class for wrapping suggesting algorithms.
Custom algorithms can extend this class easily by overriding the
`suggest` method provide generated parameters for the trials.
Any subclass that implements ``__init__`` must also call the
constructor of this class: ``super(Subclass, self).__init__(...)``.
To track suggestions and their corresponding evaluations, the method
`suggest` will be passed a trial_id, which will be used in
subsequent notifications.
Not all implementations support multi objectives.
Args:
metric (str or list): The training result objective value attribute. If
list then list of training result objective value attributes
mode (str or list): If string One of {min, max}. If list then
list of max and min, determines whether objective is minimizing
or maximizing the metric attribute. Must match type of metric.
.. code-block:: python
class ExampleSearch(Searcher):
def __init__(self, metric="mean_loss", mode="min", **kwargs):
super(ExampleSearch, self).__init__(
metric=metric, mode=mode, **kwargs)
self.optimizer = Optimizer()
self.configurations = {}
def suggest(self, trial_id):
configuration = self.optimizer.query()
self.configurations[trial_id] = configuration
def on_trial_complete(self, trial_id, result, **kwargs):
configuration = self.configurations[trial_id]
if result and self.metric in result:
self.optimizer.update(configuration, result[self.metric])
tune.run(trainable_function, search_alg=ExampleSearch())
"""
FINISHED = "FINISHED"
CKPT_FILE_TMPL = "searcher-state-{}.pkl"
def __init__(self,
metric: Optional[str] = None,
mode: Optional[str] = None,
max_concurrent: Optional[int] = None,
use_early_stopped_trials: Optional[bool] = None):
if use_early_stopped_trials is False:
raise DeprecationWarning(
"Early stopped trials are now always used. If this is a "
"problem, file an issue: https://github.com/ray-project/ray.")
if max_concurrent is not None:
logger.warning(
"DeprecationWarning: `max_concurrent` is deprecated for this "
"search algorithm. Use tune.suggest.ConcurrencyLimiter() "
"instead. This will raise an error in future versions of Ray.")
self._metric = metric
self._mode = mode
if not mode or not metric:
# Early return to avoid assertions
return
assert isinstance(
metric, type(mode)), "metric and mode must be of the same type"
if isinstance(mode, str):
assert mode in ["min", "max"
], "if `mode` is a str must be 'min' or 'max'!"
elif isinstance(mode, list):
assert len(mode) == len(
metric), "Metric and mode must be the same length"
assert all(mod in ["min", "max", "obs"] for mod in
mode), "All of mode must be 'min' or 'max' or 'obs'!"
else:
raise ValueError("Mode most either be a list or string")
def set_search_properties(self, metric: Optional[str], mode: Optional[str],
config: Dict) -> bool:
"""Pass search properties to searcher.
This method acts as an alternative to instantiating search algorithms
with their own specific search spaces. Instead they can accept a
Tune config through this method. A searcher should return ``True``
if setting the config was successful, or ``False`` if it was
unsuccessful, e.g. when the search space has already been set.
Args:
metric (str): Metric to optimize
mode (str): One of ["min", "max"]. Direction to optimize.
config (dict): Tune config dict.
"""
return False
def on_trial_result(self, trial_id: str, result: Dict):
"""Optional notification for result during training.
Note that by default, the result dict may include NaNs or
may not include the optimization metric. It is up to the
subclass implementation to preprocess the result to
avoid breaking the optimization process.
Args:
trial_id (str): A unique string ID for the trial.
result (dict): Dictionary of metrics for current training progress.
Note that the result dict may include NaNs or
may not include the optimization metric. It is up to the
subclass implementation to preprocess the result to
avoid breaking the optimization process.
"""
pass
def on_trial_complete(self,
trial_id: str,
result: Optional[Dict] = None,
error: bool = False):
"""Notification for the completion of trial.
Typically, this method is used for notifying the underlying
optimizer of the result.
Args:
trial_id (str): A unique string ID for the trial.
result (dict): Dictionary of metrics for current training progress.
Note that the result dict may include NaNs or
may not include the optimization metric. It is up to the
subclass implementation to preprocess the result to
avoid breaking the optimization process. Upon errors, this
may also be None.
error (bool): True if the training process raised an error.
"""
raise NotImplementedError
def suggest(self, trial_id: str) -> Optional[Dict]:
"""Queries the algorithm to retrieve the next set of parameters.
Arguments:
trial_id (str): Trial ID used for subsequent notifications.
Returns:
dict | FINISHED | None: Configuration for a trial, if possible.
If FINISHED is returned, Tune will be notified that
no more suggestions/configurations will be provided.
If None is returned, Tune will skip the querying of the
searcher for this step.
"""
raise NotImplementedError
def save(self, checkpoint_path: str):
"""Save state to path for this search algorithm.
Args:
checkpoint_path (str): File where the search algorithm
state is saved. This path should be used later when
restoring from file.
Example:
.. code-block:: python
search_alg = Searcher(...)
analysis = tune.run(
cost,
num_samples=5,
search_alg=search_alg,
name=self.experiment_name,
local_dir=self.tmpdir)
search_alg.save("./my_favorite_path.pkl")
.. versionchanged:: 0.8.7
Save is automatically called by `tune.run`. You can use
`restore_from_dir` to restore from an experiment directory
such as `~/ray_results/trainable`.
"""
raise NotImplementedError
def restore(self, checkpoint_path: str):
"""Restore state for this search algorithm
Args:
checkpoint_path (str): File where the search algorithm
state is saved. This path should be the same
as the one provided to "save".
Example:
.. code-block:: python
search_alg.save("./my_favorite_path.pkl")
search_alg2 = Searcher(...)
search_alg2 = ConcurrencyLimiter(search_alg2, 1)
search_alg2.restore(checkpoint_path)
tune.run(cost, num_samples=5, search_alg=search_alg2)
"""
raise NotImplementedError
def get_state(self) -> Dict:
raise NotImplementedError
def set_state(self, state: Dict):
raise NotImplementedError
@property
def metric(self) -> str:
"""The training result objective value attribute."""
return self._metric
@property
def mode(self) -> str:
"""Specifies if minimizing or maximizing the metric."""
return self._mode
class ConcurrencyLimiter(Searcher):
"""A wrapper algorithm for limiting the number of concurrent trials.
Args:
searcher (Searcher): Searcher object that the
ConcurrencyLimiter will manage.
max_concurrent (int): Maximum concurrent samples from the underlying
searcher.
batch (bool): Whether to wait for all concurrent samples
to finish before updating the underlying searcher.
Example:
.. code-block:: python
from ray.tune.suggest import ConcurrencyLimiter
search_alg = HyperOptSearch(metric="accuracy")
search_alg = ConcurrencyLimiter(search_alg, max_concurrent=2)
tune.run(trainable, search_alg=search_alg)
"""
def __init__(self,
searcher: Searcher,
max_concurrent: int,
batch: bool = False):
assert type(max_concurrent) is int and max_concurrent > 0
self.searcher = searcher
self.max_concurrent = max_concurrent
self.batch = batch
self.live_trials = set()
self.cached_results = {}
super(ConcurrencyLimiter, self).__init__(
metric=self.searcher.metric, mode=self.searcher.mode)
def suggest(self, trial_id: str) -> Optional[Dict]:
assert trial_id not in self.live_trials, (
f"Trial ID {trial_id} must be unique: already found in set.")
if len(self.live_trials) >= self.max_concurrent:
logger.debug(
f"Not providing a suggestion for {trial_id} due to "
"concurrency limit: %s/%s.", len(self.live_trials),
self.max_concurrent)
return
suggestion = self.searcher.suggest(trial_id)
if suggestion not in (None, Searcher.FINISHED):
self.live_trials.add(trial_id)
return suggestion
def on_trial_complete(self,
trial_id: str,
result: Optional[Dict] = None,
error: bool = False):
if trial_id not in self.live_trials:
return
elif self.batch:
self.cached_results[trial_id] = (result, error)
if len(self.cached_results) == self.max_concurrent:
# Update the underlying searcher once the
# full batch is completed.
for trial_id, (result, error) in self.cached_results.items():
self.searcher.on_trial_complete(
trial_id, result=result, error=error)
self.live_trials.remove(trial_id)
self.cached_results = {}
else:
return
else:
self.searcher.on_trial_complete(
trial_id, result=result, error=error)
self.live_trials.remove(trial_id)
def get_state(self) -> Dict:
state = self.__dict__.copy()
del state["searcher"]
return copy.deepcopy(state)
def set_state(self, state: Dict):
self.__dict__.update(state)
def save(self, checkpoint_path: str):
self.searcher.save(checkpoint_path)
def restore(self, checkpoint_path: str):
self.searcher.restore(checkpoint_path)
def on_pause(self, trial_id: str):
self.searcher.on_pause(trial_id)
def on_unpause(self, trial_id: str):
self.searcher.on_unpause(trial_id)
def set_search_properties(self, metric: Optional[str], mode: Optional[str],
config: Dict) -> bool:
return self.searcher.set_search_properties(metric, mode, config)
try:
import optuna as ot
from optuna.trial import TrialState as OptunaTrialState
from optuna.samplers import BaseSampler
except ImportError:
ot = None
OptunaTrialState = None
BaseSampler = None
# (Optional) Default (anonymous) metric when using tune.report(x)
DEFAULT_METRIC = "_metric"
# (Auto-filled) The index of this training iteration.
TRAINING_ITERATION = "training_iteration"
class OptunaSearch(Searcher):
"""A wrapper around Optuna to provide trial suggestions.
`Optuna <https://optuna.org/>`_ is a hyperparameter optimization library.
In contrast to other libraries, it employs define-by-run style
hyperparameter definitions.
This Searcher is a thin wrapper around Optuna's search algorithms.
You can pass any Optuna sampler, which will be used to generate
hyperparameter suggestions.
Please note that this wrapper does not support define-by-run, so the
search space will be configured before running the optimization. You will
also need to use a Tune trainable (e.g. using the function API) with
this wrapper.
For defining the search space, use ``ray.tune.suggest.optuna.param``
(see example).
Args:
space (list): Hyperparameter search space definition for Optuna's
sampler. This is a list, and samples for the parameters will
be obtained in order.
metric (str): The training result objective value attribute. If None
but a mode was passed, the anonymous metric `_metric` will be used
per default.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
points_to_evaluate (list): Initial parameter suggestions to be run
first. This is for when you already have some good parameters
you want to run first to help the algorithm make better suggestions
for future parameters. Needs to be a list of dicts containing the
configurations.
sampler (optuna.samplers.BaseSampler): Optuna sampler used to
draw hyperparameter configurations. Defaults to ``TPESampler``.
seed (int): Seed to initialize sampler with. This parameter is only
used when ``sampler=None``. In all other cases, the sampler
you pass should be initialized with the seed already.
evaluated_rewards (list): If you have previously evaluated the
parameters passed in as points_to_evaluate you can avoid
re-running those trials by passing in the reward attributes
as a list so the optimiser can be told the results without
needing to re-compute the trial. Must be the same length as
points_to_evaluate.
Tune automatically converts search spaces to Optuna's format:
.. code-block:: python
from ray.tune.suggest.optuna import OptunaSearch
config = {
"a": tune.uniform(6, 8)
"b": tune.loguniform(1e-4, 1e-2)
}
optuna_search = OptunaSearch(
metric="loss",
mode="min")
tune.run(trainable, config=config, search_alg=optuna_search)
If you would like to pass the search space manually, the code would
look like this:
.. code-block:: python
from ray.tune.suggest.optuna import OptunaSearch
import optuna
config = {
"a": optuna.distributions.UniformDistribution(6, 8),
"b": optuna.distributions.LogUniformDistribution(1e-4, 1e-2),
}
optuna_search = OptunaSearch(
space,
metric="loss",
mode="min")
tune.run(trainable, search_alg=optuna_search)
.. versionadded:: 0.8.8
"""
def __init__(self,
space: Optional[Union[Dict, List[Tuple]]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
points_to_evaluate: Optional[List[Dict]] = None,
sampler: Optional[BaseSampler] = None,
seed: Optional[int] = None,
evaluated_rewards: Optional[List] = None):
assert ot is not None, (
"Optuna must be installed! Run `pip install optuna`.")
super(OptunaSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=None,
use_early_stopped_trials=None)
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(
par="space", cls=type(self).__name__))
space = self.convert_search_space(space)
else:
# Flatten to support nested dicts
space = flatten_dict(space, "/")
# Deprecate: 1.5
if isinstance(space, list):
logger.warning(
"Passing lists of `param.suggest_*()` calls to OptunaSearch "
"as a search space is deprecated and will be removed in "
"a future release of Ray. Please pass a dict mapping "
"to `optuna.distributions` objects instead.")
self._space = space
self._points_to_evaluate = points_to_evaluate or []
self._evaluated_rewards = evaluated_rewards
self._study_name = "optuna" # Fixed study name for in-memory storage
if sampler and seed:
logger.warning(
"You passed an initialized sampler to `OptunaSearch`. The "
"`seed` parameter has to be passed to the sampler directly "
"and will be ignored.")
self._sampler = sampler or ot.samplers.TPESampler(seed=seed)
assert isinstance(self._sampler, BaseSampler), \
"You can only pass an instance of `optuna.samplers.BaseSampler` " \
"as a sampler to `OptunaSearcher`."
self._ot_trials = {}
self._ot_study = None
if self._space:
self._setup_study(mode)
def _setup_study(self, mode: str):
if self._metric is None and self._mode:
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC
pruner = ot.pruners.NopPruner()
storage = ot.storages.InMemoryStorage()
self._ot_study = ot.study.create_study(
storage=storage,
sampler=self._sampler,
pruner=pruner,
study_name=self._study_name,
direction="minimize" if mode == "min" else "maximize",
load_if_exists=True)
if self._points_to_evaluate:
if self._evaluated_rewards:
for point, reward in zip(self._points_to_evaluate,
self._evaluated_rewards):
self.add_evaluated_point(point, reward)
else:
for point in self._points_to_evaluate:
self._ot_study.enqueue_trial(point)
def set_search_properties(self, metric: Optional[str], mode: Optional[str],
config: Dict) -> bool:
if self._space:
return False
space = self.convert_search_space(config)
self._space = space
if metric:
self._metric = metric
if mode:
self._mode = mode
self._setup_study(mode)
return True
def suggest(self, trial_id: str) -> Optional[Dict]:
if not self._space:
raise RuntimeError(
UNDEFINED_SEARCH_SPACE.format(
cls=self.__class__.__name__, space="space"))
if not self._metric or not self._mode:
raise RuntimeError(
UNDEFINED_METRIC_MODE.format(
cls=self.__class__.__name__,
metric=self._metric,
mode=self._mode))
if isinstance(self._space, list):
# Keep for backwards compatibility
# Deprecate: 1.5
if trial_id not in self._ot_trials:
self._ot_trials[trial_id] = self._ot_study.ask()
ot_trial = self._ot_trials[trial_id]
# getattr will fetch the trial.suggest_ function on Optuna trials
params = {
args[0] if len(args) > 0 else kwargs["name"]: getattr(
ot_trial, fn)(*args, **kwargs)
for (fn, args, kwargs) in self._space
}
else:
# Use Optuna ask interface (since version 2.6.0)
if trial_id not in self._ot_trials:
self._ot_trials[trial_id] = self._ot_study.ask(
fixed_distributions=self._space)
ot_trial = self._ot_trials[trial_id]
params = ot_trial.params
return unflatten_dict(params)
def on_trial_result(self, trial_id: str, result: Dict):
metric = result[self.metric]
step = result[TRAINING_ITERATION]
ot_trial = self._ot_trials[trial_id]
ot_trial.report(metric, step)
def on_trial_complete(self,
trial_id: str,
result: Optional[Dict] = None,
error: bool = False):
ot_trial = self._ot_trials[trial_id]
val = result.get(self.metric, None) if result else None
ot_trial_state = OptunaTrialState.COMPLETE
if val is None:
if error:
ot_trial_state = OptunaTrialState.FAIL
else:
ot_trial_state = OptunaTrialState.PRUNED
try:
self._ot_study.tell(ot_trial, val, state=ot_trial_state)
except ValueError as exc:
logger.warning(exc) # E.g. if NaN was reported
def add_evaluated_point(self,
parameters: Dict,
value: float,
error: bool = False,
pruned: bool = False,
intermediate_values: Optional[List[float]] = None):
if not self._space:
raise RuntimeError(
UNDEFINED_SEARCH_SPACE.format(
cls=self.__class__.__name__, space="space"))
if not self._metric or not self._mode:
raise RuntimeError(
UNDEFINED_METRIC_MODE.format(
cls=self.__class__.__name__,
metric=self._metric,
mode=self._mode))
ot_trial_state = OptunaTrialState.COMPLETE
if error:
ot_trial_state = OptunaTrialState.FAIL
elif pruned:
ot_trial_state = OptunaTrialState.PRUNED
if intermediate_values:
intermediate_values_dict = {
i: value
for i, value in enumerate(intermediate_values)
}
else:
intermediate_values_dict = None
trial = ot.trial.create_trial(
state=ot_trial_state,
value=value,
params=parameters,
distributions=self._space,
intermediate_values=intermediate_values_dict)
self._ot_study.add_trial(trial)
def save(self, checkpoint_path: str):
save_object = (self._sampler, self._ot_trials, self._ot_study,
self._points_to_evaluate, self._evaluated_rewards)
with open(checkpoint_path, "wb") as outputFile:
pickle.dump(save_object, outputFile)
def restore(self, checkpoint_path: str):
with open(checkpoint_path, "rb") as inputFile:
save_object = pickle.load(inputFile)
if len(save_object) == 5:
self._sampler, self._ot_trials, self._ot_study, \
self._points_to_evaluate, self._evaluated_rewards = save_object
else:
# Backwards compatibility
self._sampler, self._ot_trials, self._ot_study, \
self._points_to_evaluate = save_object
@staticmethod
def convert_search_space(spec: Dict) -> Dict[str, Any]:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if not domain_vars and not grid_vars:
return {}
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to an Optuna search space.")
# Flatten and resolve again after checking for grid search.
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
def resolve_value(domain: Domain) -> ot.distributions.BaseDistribution:
quantize = None
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
quantize = sampler.q
sampler = sampler.sampler
if isinstance(sampler, LogUniform):
logger.warning(
"Optuna does not handle quantization in loguniform "
"sampling. The parameter will be passed but it will "
"probably be ignored.")
if isinstance(domain, Float):
if isinstance(sampler, LogUniform):
if quantize:
logger.warning(
"Optuna does not support both quantization and "
"sampling from LogUniform. Dropped quantization.")
return ot.distributions.LogUniformDistribution(
domain.lower, domain.upper)
elif isinstance(sampler, Uniform):
if quantize:
return ot.distributions.DiscreteUniformDistribution(
domain.lower, domain.upper, quantize)
return ot.distributions.UniformDistribution(
domain.lower, domain.upper)
elif isinstance(domain, Integer):
if isinstance(sampler, LogUniform):
return ot.distributions.IntLogUniformDistribution(
domain.lower, domain.upper - 1, step=quantize or 1)
elif isinstance(sampler, Uniform):
# Upper bound should be inclusive for quantization and
# exclusive otherwise
return ot.distributions.IntUniformDistribution(
domain.lower,
domain.upper - int(bool(not quantize)),
step=quantize or 1)
elif isinstance(domain, Categorical):
if isinstance(sampler, Uniform):
return ot.distributions.CategoricalDistribution(
domain.categories)
raise ValueError(
"Optuna search does not support parameters of type "
"`{}` with samplers of type `{}`".format(
type(domain).__name__,
type(domain.sampler).__name__))
# Parameter name is e.g. "a/b/c" for nested dicts
values = {
"/".join(path): resolve_value(domain)
for path, domain in domain_vars
}
return values | 0.921123 | 0.292358 |
def ComShrDecom(U, V, E):
delta = max_unicore(U+V, E)
for a in range(1, delta+1):
peelByB(U, V, E, a)
for b in range(1, delta+1):
peelByA(U, V, E, b)
def histogram(tracker):
parallel filter(tracker, 0) # filter out empty elements
parallel sort(tracker)
hist = parallel freqCount(tracker)
return hist
def prefixSums(x):
for d in range(0, (lg n)):
parallel for i in range((2**d), n-1):
newx[i] = x[i-2**d] + x[i]
x = newx
def max_unicore(V, E):
degbuckets = ParallelBucketArray(V)
# bucketqueue datastructure: a dynamic array of buckets
# where each bucket (implemented as dynamic arrays) stores vertices of a certain deg
max_deg = 0
# store exp search
while exponentialSearch(degbuckets) is not None:
cur_bucket = exponentialSearch(degbuckets)
# each bucket stores the deg it corresponds to
max_deg = max(max_deg, cur_bucket.deg)
while cur_bucket is not None: # need a wrapper because new vertices could be moved to cur_bucket
nextLayerTracker = []
parallel for i, v in enumerate(cur_bucket):
indices[i] = deg(v)
indices = parallel prefix_sum(indices)
parallel for i,v in enumerate(cur_bucket):
parallel for j,u in enumerate(E[v]) if u is not removed:
nextLayerTracker[indices[i]+j] = u
set v as removed
nextLayerTracker = parallel filter(nextLayerTracker,removed)
freqs,nextLayerTracker = histogram(nextLayerTracker)
indices,nextLayerTracker = aggregate(nextLayerTracker,deg)
parallel for i in indices:
deg_u = deg(nextLayerTracker[indices[i]])
degbuckets[deg_u].removeAll(nextLayerTracker[indices[i] : indices[i+1]-1])
parallel for i,freq,u in enumerate(freqs,nextLayerTracker):
deg(u)-=freq
if deg(u)<=cur_bucket.deg:
filterAddCur[i] = True
else:
filterAddNew[i] = True
trackerAddCur = filter(nextLayerTracker,filterAddCur)
trackerAddNew = filter(nextLayerTracker,filterAddNew)
indices,trackerAddCur = aggregate(trackerAddCur,deg)
parallel for i in indices:
cur_bucket_new.addAll(trackerAddCur[indices[i] : indices[i+1]-1])
indices,trackerAddNew = aggregate(trackerAddNew,deg)
parallel for i in indices:
deg_u = deg(trackerAddNew[indices[i]])
degbuckets[deg_u].addAll(trackerAddNew[indices[i] : indices[i+1]-1])
cur_bucket = cur_bucket_new
return max_deg
def peelByA(U, V, E, a): # peelFixB
# u correspond to a; v correspond to b
# we need Bmax(a, u) and Amax(b, v)
# U = set of vertices u
parallel for u in U:
if deg(u)<a:
set u as removed
update E[u]
bbuckets = ParallelBucketArray(V)
while exponentialSearch(bbuckets) is not None:
vbucket = exponentialSearch(bbuckets)
cur_b = vbucket.deg
while vbucket is not None:
uTracker = []
vTracker = []
parallel for i,v in enumerate(vbucket):
indices[i] = deg(v)
indices = parallel prefix_sum(indices)
parallel for i,v in enumerate(vbucket):
set v as removed
parallel for bi in range(1, cur_b+1):
if Amax(bi,v)<a:
Amax(bi,v)=a
parallel for j,u in enumerate(E[v]) if not removed:
uTracker[indices[i]+j] = u
uTracker = parallel filter(uTracker,None)
# some empty positions exist because u is already removed
parallel for i,u,freq in enumerate(histogram(uTracker)):
deg(u)-=freq
if deg(u)<a:
filterMap[i]=True
Bmax(a,u)=cur_b
set u as removed
uTracker = parallel filter(uTracker,filterMap)
parallel for i,u in enumerate(uTracker):
indices[i] = deg(u)
indices = parallel prefix_sum(indices)
parallel for i,u in enumerate(uTracker):
parallel for j,v in enumerate(E[u]) if v is not removed:
vTracker[indices[i]+j] = v
vTracker = parallel filter(vTracker,None)
freqs,vTracker = histogram(vTracker)
indices,vTrackerRemove = aggregate(vTracker,deg)
# do aggregation over degree; return sorted tracker and an array indicating the starts of vertices of a certain deg
parallel for i in range(indices):
deg_v = deg(vTrackerRemove[indices[i]])
bbuckets[deg_v].removeAll(vTrackerRemove[indices[i] : indices[i+1]-1])
parallel for i,freq,v in enumerate(freq,vTracker):
deg(v)-=freq
if deg(v)<=cur_b:
filterAddCur[i]=True
else:
filterAddNew[i]=True
vTrackerAddNew = parallel filter(vTracker,filterAddNew)
vTrackerAddCur = parallel filter(vTracker,filterAddCur)
indices,vTrackerAddNew = aggregate(vTrackerAddNew,deg)
parallel for i in range(indices):
deg_v = deg(vTrackerAddNew[indices[i]])
bbuckets[deg(v)].addAll(vTrackerAddNew[indices[i] : indices[i+1]-1])
indices,vTrackerAddCur = aggregate(vTrackerAddCur,deg)
parallel for i in range(indices):
vbucket_new.addAll(vTrackerAddCur[indices[i] : indices[i+1]-1])
vbucket = vbucket_new
def peelByB(U, V, E, b):
reverse peelByA
def checkInterval(arr):
# this checks in O(1) span whether the interval contains a nonempty bucket
hasNext = False
parallel for bucket in arr:
if bucket is not None:
compare_and_swap(hasNext,True)
return hasNext
def exponentialSearch (curPos, degbuckets, max_deg):
n = 1
while n <= max_deg:
# we check the interval [curPos+2^(i-1)+1,curPos+2^i]
start = curPos+n//2+1
end = curPos+n
if checkInterval(degbuckets[start : end+1]):
return parallel reduce_min(degbuckets[start : end+1])
n *= 2 | benchmarks/BiCore/pseudocode.py | def ComShrDecom(U, V, E):
delta = max_unicore(U+V, E)
for a in range(1, delta+1):
peelByB(U, V, E, a)
for b in range(1, delta+1):
peelByA(U, V, E, b)
def histogram(tracker):
parallel filter(tracker, 0) # filter out empty elements
parallel sort(tracker)
hist = parallel freqCount(tracker)
return hist
def prefixSums(x):
for d in range(0, (lg n)):
parallel for i in range((2**d), n-1):
newx[i] = x[i-2**d] + x[i]
x = newx
def max_unicore(V, E):
degbuckets = ParallelBucketArray(V)
# bucketqueue datastructure: a dynamic array of buckets
# where each bucket (implemented as dynamic arrays) stores vertices of a certain deg
max_deg = 0
# store exp search
while exponentialSearch(degbuckets) is not None:
cur_bucket = exponentialSearch(degbuckets)
# each bucket stores the deg it corresponds to
max_deg = max(max_deg, cur_bucket.deg)
while cur_bucket is not None: # need a wrapper because new vertices could be moved to cur_bucket
nextLayerTracker = []
parallel for i, v in enumerate(cur_bucket):
indices[i] = deg(v)
indices = parallel prefix_sum(indices)
parallel for i,v in enumerate(cur_bucket):
parallel for j,u in enumerate(E[v]) if u is not removed:
nextLayerTracker[indices[i]+j] = u
set v as removed
nextLayerTracker = parallel filter(nextLayerTracker,removed)
freqs,nextLayerTracker = histogram(nextLayerTracker)
indices,nextLayerTracker = aggregate(nextLayerTracker,deg)
parallel for i in indices:
deg_u = deg(nextLayerTracker[indices[i]])
degbuckets[deg_u].removeAll(nextLayerTracker[indices[i] : indices[i+1]-1])
parallel for i,freq,u in enumerate(freqs,nextLayerTracker):
deg(u)-=freq
if deg(u)<=cur_bucket.deg:
filterAddCur[i] = True
else:
filterAddNew[i] = True
trackerAddCur = filter(nextLayerTracker,filterAddCur)
trackerAddNew = filter(nextLayerTracker,filterAddNew)
indices,trackerAddCur = aggregate(trackerAddCur,deg)
parallel for i in indices:
cur_bucket_new.addAll(trackerAddCur[indices[i] : indices[i+1]-1])
indices,trackerAddNew = aggregate(trackerAddNew,deg)
parallel for i in indices:
deg_u = deg(trackerAddNew[indices[i]])
degbuckets[deg_u].addAll(trackerAddNew[indices[i] : indices[i+1]-1])
cur_bucket = cur_bucket_new
return max_deg
def peelByA(U, V, E, a): # peelFixB
# u correspond to a; v correspond to b
# we need Bmax(a, u) and Amax(b, v)
# U = set of vertices u
parallel for u in U:
if deg(u)<a:
set u as removed
update E[u]
bbuckets = ParallelBucketArray(V)
while exponentialSearch(bbuckets) is not None:
vbucket = exponentialSearch(bbuckets)
cur_b = vbucket.deg
while vbucket is not None:
uTracker = []
vTracker = []
parallel for i,v in enumerate(vbucket):
indices[i] = deg(v)
indices = parallel prefix_sum(indices)
parallel for i,v in enumerate(vbucket):
set v as removed
parallel for bi in range(1, cur_b+1):
if Amax(bi,v)<a:
Amax(bi,v)=a
parallel for j,u in enumerate(E[v]) if not removed:
uTracker[indices[i]+j] = u
uTracker = parallel filter(uTracker,None)
# some empty positions exist because u is already removed
parallel for i,u,freq in enumerate(histogram(uTracker)):
deg(u)-=freq
if deg(u)<a:
filterMap[i]=True
Bmax(a,u)=cur_b
set u as removed
uTracker = parallel filter(uTracker,filterMap)
parallel for i,u in enumerate(uTracker):
indices[i] = deg(u)
indices = parallel prefix_sum(indices)
parallel for i,u in enumerate(uTracker):
parallel for j,v in enumerate(E[u]) if v is not removed:
vTracker[indices[i]+j] = v
vTracker = parallel filter(vTracker,None)
freqs,vTracker = histogram(vTracker)
indices,vTrackerRemove = aggregate(vTracker,deg)
# do aggregation over degree; return sorted tracker and an array indicating the starts of vertices of a certain deg
parallel for i in range(indices):
deg_v = deg(vTrackerRemove[indices[i]])
bbuckets[deg_v].removeAll(vTrackerRemove[indices[i] : indices[i+1]-1])
parallel for i,freq,v in enumerate(freq,vTracker):
deg(v)-=freq
if deg(v)<=cur_b:
filterAddCur[i]=True
else:
filterAddNew[i]=True
vTrackerAddNew = parallel filter(vTracker,filterAddNew)
vTrackerAddCur = parallel filter(vTracker,filterAddCur)
indices,vTrackerAddNew = aggregate(vTrackerAddNew,deg)
parallel for i in range(indices):
deg_v = deg(vTrackerAddNew[indices[i]])
bbuckets[deg(v)].addAll(vTrackerAddNew[indices[i] : indices[i+1]-1])
indices,vTrackerAddCur = aggregate(vTrackerAddCur,deg)
parallel for i in range(indices):
vbucket_new.addAll(vTrackerAddCur[indices[i] : indices[i+1]-1])
vbucket = vbucket_new
def peelByB(U, V, E, b):
reverse peelByA
def checkInterval(arr):
# this checks in O(1) span whether the interval contains a nonempty bucket
hasNext = False
parallel for bucket in arr:
if bucket is not None:
compare_and_swap(hasNext,True)
return hasNext
def exponentialSearch (curPos, degbuckets, max_deg):
n = 1
while n <= max_deg:
# we check the interval [curPos+2^(i-1)+1,curPos+2^i]
start = curPos+n//2+1
end = curPos+n
if checkInterval(degbuckets[start : end+1]):
return parallel reduce_min(degbuckets[start : end+1])
n *= 2 | 0.433262 | 0.407392 |
import grpc
from yandex.cloud.operation import operation_pb2 as yandex_dot_cloud_dot_operation_dot_operation__pb2
from yandex.cloud.vpc.v1 import route_table_pb2 as yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__pb2
from yandex.cloud.vpc.v1 import route_table_service_pb2 as yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2
class RouteTableServiceStub(object):
"""A set of methods for managing RouteTable resources.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Get = channel.unary_unary(
'/yandex.cloud.vpc.v1.RouteTableService/Get',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.GetRouteTableRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__pb2.RouteTable.FromString,
)
self.List = channel.unary_unary(
'/yandex.cloud.vpc.v1.RouteTableService/List',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.ListRouteTablesRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.ListRouteTablesResponse.FromString,
)
self.Create = channel.unary_unary(
'/yandex.cloud.vpc.v1.RouteTableService/Create',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.CreateRouteTableRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Update = channel.unary_unary(
'/yandex.cloud.vpc.v1.RouteTableService/Update',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.UpdateRouteTableRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Delete = channel.unary_unary(
'/yandex.cloud.vpc.v1.RouteTableService/Delete',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.DeleteRouteTableRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.ListOperations = channel.unary_unary(
'/yandex.cloud.vpc.v1.RouteTableService/ListOperations',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.ListRouteTableOperationsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.ListRouteTableOperationsResponse.FromString,
)
self.Move = channel.unary_unary(
'/yandex.cloud.vpc.v1.RouteTableService/Move',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.MoveRouteTableRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
class RouteTableServiceServicer(object):
"""A set of methods for managing RouteTable resources.
"""
def Get(self, request, context):
"""Returns the specified RouteTable resource.
To get the list of available RouteTable resources, make a [List] request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""Retrieves the list of RouteTable resources in the specified folder.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Create(self, request, context):
"""Creates a route table in the specified folder and network.
Method starts an asynchronous operation that can be cancelled while it is in progress.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Updates the specified route table.
Method starts an asynchronous operation that can be cancelled while it is in progress.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Deletes the specified route table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListOperations(self, request, context):
"""List operations for the specified route table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Move(self, request, context):
"""Move route table to another folder.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RouteTableServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.GetRouteTableRequest.FromString,
response_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__pb2.RouteTable.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.ListRouteTablesRequest.FromString,
response_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.ListRouteTablesResponse.SerializeToString,
),
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.CreateRouteTableRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.UpdateRouteTableRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.DeleteRouteTableRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'ListOperations': grpc.unary_unary_rpc_method_handler(
servicer.ListOperations,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.ListRouteTableOperationsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.ListRouteTableOperationsResponse.SerializeToString,
),
'Move': grpc.unary_unary_rpc_method_handler(
servicer.Move,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.MoveRouteTableRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'yandex.cloud.vpc.v1.RouteTableService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,)) | yandex/cloud/vpc/v1/route_table_service_pb2_grpc.py | import grpc
from yandex.cloud.operation import operation_pb2 as yandex_dot_cloud_dot_operation_dot_operation__pb2
from yandex.cloud.vpc.v1 import route_table_pb2 as yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__pb2
from yandex.cloud.vpc.v1 import route_table_service_pb2 as yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2
class RouteTableServiceStub(object):
"""A set of methods for managing RouteTable resources.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Get = channel.unary_unary(
'/yandex.cloud.vpc.v1.RouteTableService/Get',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.GetRouteTableRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__pb2.RouteTable.FromString,
)
self.List = channel.unary_unary(
'/yandex.cloud.vpc.v1.RouteTableService/List',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.ListRouteTablesRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.ListRouteTablesResponse.FromString,
)
self.Create = channel.unary_unary(
'/yandex.cloud.vpc.v1.RouteTableService/Create',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.CreateRouteTableRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Update = channel.unary_unary(
'/yandex.cloud.vpc.v1.RouteTableService/Update',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.UpdateRouteTableRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Delete = channel.unary_unary(
'/yandex.cloud.vpc.v1.RouteTableService/Delete',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.DeleteRouteTableRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.ListOperations = channel.unary_unary(
'/yandex.cloud.vpc.v1.RouteTableService/ListOperations',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.ListRouteTableOperationsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.ListRouteTableOperationsResponse.FromString,
)
self.Move = channel.unary_unary(
'/yandex.cloud.vpc.v1.RouteTableService/Move',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.MoveRouteTableRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
class RouteTableServiceServicer(object):
"""A set of methods for managing RouteTable resources.
"""
def Get(self, request, context):
"""Returns the specified RouteTable resource.
To get the list of available RouteTable resources, make a [List] request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""Retrieves the list of RouteTable resources in the specified folder.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Create(self, request, context):
"""Creates a route table in the specified folder and network.
Method starts an asynchronous operation that can be cancelled while it is in progress.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Updates the specified route table.
Method starts an asynchronous operation that can be cancelled while it is in progress.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Deletes the specified route table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListOperations(self, request, context):
"""List operations for the specified route table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Move(self, request, context):
"""Move route table to another folder.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RouteTableServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.GetRouteTableRequest.FromString,
response_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__pb2.RouteTable.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.ListRouteTablesRequest.FromString,
response_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.ListRouteTablesResponse.SerializeToString,
),
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.CreateRouteTableRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.UpdateRouteTableRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.DeleteRouteTableRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'ListOperations': grpc.unary_unary_rpc_method_handler(
servicer.ListOperations,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.ListRouteTableOperationsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.ListRouteTableOperationsResponse.SerializeToString,
),
'Move': grpc.unary_unary_rpc_method_handler(
servicer.Move,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_route__table__service__pb2.MoveRouteTableRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'yandex.cloud.vpc.v1.RouteTableService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,)) | 0.619126 | 0.106435 |
import numpy as np
from pyannote.audio.keras_utils import load_model
from pyannote.audio.signal import Binarize, Peak
from pyannote.audio.features import Precomputed
import my_cluster
from pyannote.core import Annotation
from pyannote.audio.embedding.utils import l2_normalize
from pyannote.database import get_annotated
class SpeakerDiarizationPre(object):
'''Speaker diarization with affinity propagation'''
def __init__(self, feature_extraction, sad__pre, scd__pre, emb__pre,
sad__onset=0.7, sad__offset=0.7, sad__dimension=1,
scd__alpha=0.5, scd__min_duration=1., scd__dimension=1,
emb__internal=False,
cls__damping=0.8, cls__preference=-20,
cls__metric='cosine'):
super(SpeakerDiarizationPre, self).__init__()
self.feature_extraction = feature_extraction
# speech activity detection hyper-parameters
self.sad__onset = sad__onset
self.sad__offset = sad__offset
self.sad__dimension = sad__dimension
# speaker change detection hyper-parameters
self.scd__alpha = scd__alpha
self.scd__min_duration = scd__min_duration
self.scd__dimension = scd__dimension
# embedding hyper-parameters
self.emb__internal = emb__internal
# clustering hyper-parameters
self.cls__damping = cls__damping
self.cls__preference = cls__preference
self.cls__metric = cls__metric
step = self.feature_extraction.sliding_window().step
# initialize speech activity detection module
self.sad_ = Precomputed(sad__pre)
self.sad_binarize_ = Binarize(onset=self.sad__onset,
offset=self.sad__offset)
# initialize speaker change detection module
self.scd_ = Precomputed(scd__pre)
self.scd_peak_ = Peak(alpha=self.scd__alpha,
min_duration=self.scd__min_duration,
percentile=False)
# initialize speech turn embedding module
self.emb_ = Precomputed(emb__pre)
# initialize clustering module
self.cls_ = my_cluster.ClusteringAP(metric=self.cls__metric,
damping=self.cls__damping,
preference=self.cls__preference)
def __call__(self, current_file, annotated=False):
# speech activity detection
soft_sad = self.sad_(current_file)
hard_sad = self.sad_binarize_.apply(
soft_sad, dimension=self.sad__dimension)
# speaker change detection
soft_scd = self.scd_(current_file)
hard_scd = self.scd_peak_.apply(
soft_scd, dimension=self.scd__dimension)
# speech turns
speech_turns = hard_scd.crop(hard_sad)
if annotated:
speech_turns = speech_turns.crop(
get_annotated(current_file))
# remove small speech turns
emb = self.emb_(current_file)
speech_turns = [speech_turn for speech_turn in speech_turns if len(emb.crop(speech_turn, mode='loose')) > 0]
# speech turns embedding
to_stack = [
np.sum(emb.crop(speech_turn, mode='loose'), axis=0)
for speech_turn in speech_turns]
if len(to_stack) < 1:
return None
fX = l2_normalize(np.vstack(to_stack))
# speech turn clustering
cluster_labels = self.cls_.apply(fX)
# build hypothesis from clustering results
hypothesis = Annotation(uri=current_file['uri'])
for speech_turn, label in zip(speech_turns, cluster_labels):
hypothesis[speech_turn] = label
return hypothesis
class SpeakerDiarizationOracleSegAP(object):
'''Speaker diarization with oracle segmentation and affinity propagation'''
def __init__(self, feature_extraction, emb__pre,
emb__internal=False,
cls__damping=0.8, cls__preference=-20,
cls__metric='cosine'):
super(SpeakerDiarizationOracleSegAP, self).__init__()
self.feature_extraction = feature_extraction
# embedding hyper-parameters
self.emb__internal = emb__internal
# clustering hyper-parameters
self.cls__damping = cls__damping
self.cls__preference = cls__preference
self.cls__metric = cls__metric
step = self.feature_extraction.sliding_window().step
# initialize speech turn embedding module
self.emb_ = Precomputed(emb__pre)
# initialize clustering module
self.cls_ = my_cluster.ClusteringAP(metric=self.cls__metric,
damping=self.cls__damping,
preference=self.cls__preference)
def __call__(self, current_file, annotated=False):
# speech turns
speech_turns = current_file['annotation'].get_timeline()
if annotated:
speech_turns = speech_turns.crop(
get_annotated(current_file))
# remove small speech turns
emb = self.emb_(current_file)
speech_turns = [speech_turn for speech_turn in speech_turns if len(emb.crop(speech_turn, mode='loose')) > 0]
# speech turns embedding
to_stack = [
np.sum(emb.crop(speech_turn, mode='loose'), axis=0)
for speech_turn in speech_turns]
if len(to_stack) < 1:
return None
fX = l2_normalize(np.vstack(to_stack))
# speech turn clustering
cluster_labels = self.cls_.apply(fX)
# build hypothesis from clustering results
hypothesis = Annotation(uri=current_file['uri'])
for speech_turn, label in zip(speech_turns, cluster_labels):
hypothesis[speech_turn] = label
return hypothesis
class SpeakerDiarizationHACPre(object):
'''Speaker diarization with hierarchical agglomerative clustering'''
def __init__(self, feature_extraction, sad__pre, scd__pre, emb__pre,
sad__onset=0.7, sad__offset=0.7, sad__dimension=1,
scd__alpha=0.5, scd__min_duration=1., scd__dimension=1,
emb__internal=False,
cls__method='average', cls__threshold=5,
cls__metric='cosine'):
super(SpeakerDiarizationHACPre, self).__init__()
self.feature_extraction = feature_extraction
# speech activity detection hyper-parameters
self.sad__onset = sad__onset
self.sad__offset = sad__offset
self.sad__dimension = sad__dimension
# speaker change detection hyper-parameters
self.scd__alpha = scd__alpha
self.scd__min_duration = scd__min_duration
self.scd__dimension = scd__dimension
# embedding hyper-parameters
self.emb__internal = emb__internal
# clustering hyper-parameters
self.cls__method = cls__method
self.cls__threshold = cls__threshold
self.cls__metric = cls__metric
step = self.feature_extraction.sliding_window().step
# initialize speech activity detection module
self.sad_ = Precomputed(sad__pre)
self.sad_binarize_ = Binarize(onset=self.sad__onset,
offset=self.sad__offset)
# initialize speaker change detection module
self.scd_ = Precomputed(scd__pre)
self.scd_peak_ = Peak(alpha=self.scd__alpha,
min_duration=self.scd__min_duration,
percentile=False)
# initialize speech turn embedding module
self.emb_ = Precomputed(emb__pre)
# initialize clustering module
self.cls_ = my_cluster.ClusteringHAC(metric=self.cls__metric,
method=self.cls__method,
threshold=self.cls__threshold)
def __call__(self, current_file, annotated=False):
# speech activity detection
soft_sad = self.sad_(current_file)
hard_sad = self.sad_binarize_.apply(
soft_sad, dimension=self.sad__dimension)
# speaker change detection
soft_scd = self.scd_(current_file)
hard_scd = self.scd_peak_.apply(
soft_scd, dimension=self.scd__dimension)
# speech turns
speech_turns = hard_scd.crop(hard_sad)
if annotated:
speech_turns = speech_turns.crop(
get_annotated(current_file))
# remove small speech turns
emb = self.emb_(current_file)
speech_turns = [speech_turn for speech_turn in speech_turns if len(emb.crop(speech_turn, mode='loose')) > 0]
# speech turns embedding
to_stack = [
np.sum(emb.crop(speech_turn, mode='loose'), axis=0)
for speech_turn in speech_turns]
if len(to_stack) < 1:
return None
fX = l2_normalize(np.vstack(to_stack))
# speech turn clustering
cluster_labels = self.cls_.apply(fX)
# build hypothesis from clustering results
hypothesis = Annotation(uri=current_file['uri'])
for speech_turn, label in zip(speech_turns, cluster_labels):
hypothesis[speech_turn] = label
return hypothesis
class SpeakerDiarizationPreStages(object):
def __init__(self, feature_extraction, sad__pre, scd__pre, emb__pre,
sad__onset=0.7, sad__offset=0.7, sad__dimension=1,
scd__alpha=0.5, scd__min_duration=1., scd__dimension=1,
emb__internal=False,
cls__damping=0.8, cls__preference=-20,
cls__metric='cosine'):
super(SpeakerDiarizationPreStages, self).__init__()
self.feature_extraction = feature_extraction
# speech activity detection hyper-parameters
self.sad__onset = sad__onset
self.sad__offset = sad__offset
self.sad__dimension = sad__dimension
# speaker change detection hyper-parameters
self.scd__alpha = scd__alpha
self.scd__min_duration = scd__min_duration
self.scd__dimension = scd__dimension
# embedding hyper-parameters
self.emb__internal = emb__internal
# clustering hyper-parameters
self.cls__damping = cls__damping
self.cls__preference = cls__preference
self.cls__metric = cls__metric
step = self.feature_extraction.sliding_window().step
# initialize speech activity detection module
self.sad_ = Precomputed(sad__pre)
self.sad_binarize_ = Binarize(onset=self.sad__onset,
offset=self.sad__offset)
# initialize speaker change detection module
self.scd_ = Precomputed(scd__pre)
self.scd_peak_ = Peak(alpha=self.scd__alpha,
min_duration=self.scd__min_duration,
percentile=False)
# initialize speech turn embedding module
self.emb_ = Precomputed(emb__pre)
# initialize clustering module
self.cls_ = my_cluster.ClusteringAP(metric=self.cls__metric,
damping=self.cls__damping,
preference=self.cls__preference)
def __call__(self, current_file, annotated=False):
# speech activity detection
soft_sad = self.sad_(current_file)
hard_sad = self.sad_binarize_.apply(
soft_sad, dimension=self.sad__dimension)
sad_output = hard_sad.to_annotation()
# speaker change detection
soft_scd = self.scd_(current_file)
hard_scd = self.scd_peak_.apply(
soft_scd, dimension=self.scd__dimension)
# speech turns
speech_turns = hard_scd.crop(hard_sad)
scd_output = speech_turns.to_annotation()
if annotated:
speech_turns = speech_turns.crop(
get_annotated(current_file))
# remove small speech turns
emb = self.emb_(current_file)
speech_turns = [speech_turn for speech_turn in speech_turns if len(emb.crop(speech_turn, mode='loose')) > 0]
# speech turns embedding
to_stack = [
np.sum(emb.crop(speech_turn, mode='loose'), axis=0)
for speech_turn in speech_turns]
if len(to_stack) < 1:
return None
fX = l2_normalize(np.vstack(to_stack))
# speech turn clustering
cluster_labels = self.cls_.apply(fX)
# build hypothesis from clustering results
hypothesis = Annotation(uri=current_file['uri'])
for speech_turn, label in zip(speech_turns, cluster_labels):
hypothesis[speech_turn] = label
return hypothesis, sad_output, scd_output
class SpeakerDiarizationWeighted(object):
def __init__(self, feature_extraction, sad__pre, scd__pre, weight__pre, emb__pre,
sad__onset=0.7, sad__offset=0.7, sad__dimension=1,
scd__alpha=0.5, scd__min_duration=1., scd__dimension=1,
emb__internal=False,
cls__damping=0.8, cls__preference=-20,
cls__metric='cosine'):
super(SpeakerDiarizationWeighted, self).__init__()
self.feature_extraction = feature_extraction
# speech activity detection hyper-parameters
self.sad__onset = sad__onset
self.sad__offset = sad__offset
self.sad__dimension = sad__dimension
# speaker change detection hyper-parameters
self.scd__alpha = scd__alpha
self.scd__min_duration = scd__min_duration
self.scd__dimension = scd__dimension
# embedding hyper-parameters
self.emb__internal = emb__internal
# clustering hyper-parameters
self.cls__damping = cls__damping
self.cls__preference = cls__preference
self.cls__metric = cls__metric
step = self.feature_extraction.sliding_window().step
# initialize speech activity detection module
self.sad_ = Precomputed(sad__pre)
self.sad_binarize_ = Binarize(onset=self.sad__onset,
offset=self.sad__offset)
# initialize speaker change detection module
self.scd_ = Precomputed(scd__pre)
self.scd_peak_ = Peak(alpha=self.scd__alpha,
min_duration=self.scd__min_duration,
percentile=False)
# initialize weights
self.weight_ = Precomputed(weight__pre)
# initialize speech turn embedding module
self.emb_ = Precomputed(emb__pre)
# initialize clustering module
self.cls_ = my_cluster.ClusteringAP(metric=self.cls__metric,
damping=self.cls__damping,
preference=self.cls__preference)
def __call__(self, current_file, annotated=False):
# speech activity detection
soft_sad = self.sad_(current_file)
hard_sad = self.sad_binarize_.apply(
soft_sad, dimension=self.sad__dimension)
# speaker change detection
soft_scd = self.scd_(current_file)
hard_scd = self.scd_peak_.apply(
soft_scd, dimension=self.scd__dimension)
# speech turns
speech_turns = hard_scd.crop(hard_sad)
if annotated:
speech_turns = speech_turns.crop(
get_annotated(current_file))
# remove small speech turns
emb = self.emb_(current_file)
speech_turns = [speech_turn for speech_turn in speech_turns if len(emb.crop(speech_turn, mode='loose')) > 0]
# weights
weight = self.weight_(current_file)
# speech turns embedding
to_stack = [
np.mean(emb.crop(speech_turn, mode='loose')*(1-weight.crop(speech_turn, mode='loose')), axis=0)
for speech_turn in speech_turns]
if len(to_stack) < 1:
return None
fX = l2_normalize(np.vstack(to_stack))
# speech turn clustering
cluster_labels = self.cls_.apply(fX)
# build hypothesis from clustering results
hypothesis = Annotation(uri=current_file['uri'])
for speech_turn, label in zip(speech_turns, cluster_labels):
hypothesis[speech_turn] = label
return hypothesis
class SpeakerDiarizationOnSceneHAC(object):
def __init__(self, emb__pre,
cls__method='average', cls__threshold=5,
cls__metric='cosine'):
super(SpeakerDiarizationOnSceneHAC, self).__init__()
# clustering hyper-parameters
self.cls__method = cls__method
self.cls__threshold = cls__threshold
self.cls__metric = cls__metric
# initialize speech turn embedding module
self.emb_ = Precomputed(emb__pre)
# initialize clustering module
self.cls_ = my_cluster.ClusteringHAC(metric=self.cls__metric,
method=self.cls__method,
threshold=self.cls__threshold)
def __call__(self, current_file):
# speech turns
hypothesis = Annotation(uri=current_file['uri'])
sencences = current_file['speech_timeline']
scenes = current_file['scenes']
# remove small speech turns
emb = self.emb_(current_file)
#speech_turns = [speech_turn for speech_turn in speech_turns if len(emb.crop(speech_turn, mode='loose')) > 0]
for scene in scenes:
speech_turns = sencences.crop(scene)
if len(speech_turns) == 0:
continue
if len(speech_turns) == 1:
hypothesis[speech_turns[0]] = 1
continue
# speech turns embedding
to_stack = [
np.sum(emb.crop(speech_turn, mode='loose'), axis=0)
for speech_turn in speech_turns]
fX = l2_normalize(np.vstack(to_stack))
# speech turn clustering
cluster_labels = self.cls_.apply(fX)
# build hypothesis from clustering results
for speech_turn, label in zip(speech_turns, cluster_labels):
hypothesis[speech_turn] = label
return hypothesis
class SpeakerDiarizationOnEnrollHAC(object):
def __init__(self,
cls__method='average', cls__threshold=5,
cls__metric='cosine'):
super(SpeakerDiarizationOnEnrollHAC, self).__init__()
# clustering hyper-parameters
self.cls__method = cls__method
self.cls__threshold = cls__threshold
self.cls__metric = cls__metric
# initialize clustering module
self.cls_ = my_cluster.ClusteringHAC(metric=self.cls__metric,
method=self.cls__method,
threshold=self.cls__threshold)
def __call__(self, embedding, speech_turns):
hypothesis = Annotation()
#speech_turns = [speech_turn for speech_turn in speech_turns if len(emb.crop(speech_turn, mode='loose')) > 0]
if len(speech_turns) == 0:
return hypothesis
if len(speech_turns) == 1:
hypothesis[speech_turns[0]] = 1
return hypothesis
# speech turns embedding
to_stack = [
np.sum(embedding.crop(speech_turn, mode='loose'), axis=0)
for speech_turn in speech_turns]
fX = l2_normalize(np.vstack(to_stack))
# speech turn clustering
cluster_labels = self.cls_.apply(fX)
# build hypothesis from clustering results
for speech_turn, label in zip(speech_turns, cluster_labels):
hypothesis[speech_turn] = label
return hypothesis | diarization_with_neural_approach/optimization/speaker_diarization.py | import numpy as np
from pyannote.audio.keras_utils import load_model
from pyannote.audio.signal import Binarize, Peak
from pyannote.audio.features import Precomputed
import my_cluster
from pyannote.core import Annotation
from pyannote.audio.embedding.utils import l2_normalize
from pyannote.database import get_annotated
class SpeakerDiarizationPre(object):
'''Speaker diarization with affinity propagation'''
def __init__(self, feature_extraction, sad__pre, scd__pre, emb__pre,
sad__onset=0.7, sad__offset=0.7, sad__dimension=1,
scd__alpha=0.5, scd__min_duration=1., scd__dimension=1,
emb__internal=False,
cls__damping=0.8, cls__preference=-20,
cls__metric='cosine'):
super(SpeakerDiarizationPre, self).__init__()
self.feature_extraction = feature_extraction
# speech activity detection hyper-parameters
self.sad__onset = sad__onset
self.sad__offset = sad__offset
self.sad__dimension = sad__dimension
# speaker change detection hyper-parameters
self.scd__alpha = scd__alpha
self.scd__min_duration = scd__min_duration
self.scd__dimension = scd__dimension
# embedding hyper-parameters
self.emb__internal = emb__internal
# clustering hyper-parameters
self.cls__damping = cls__damping
self.cls__preference = cls__preference
self.cls__metric = cls__metric
step = self.feature_extraction.sliding_window().step
# initialize speech activity detection module
self.sad_ = Precomputed(sad__pre)
self.sad_binarize_ = Binarize(onset=self.sad__onset,
offset=self.sad__offset)
# initialize speaker change detection module
self.scd_ = Precomputed(scd__pre)
self.scd_peak_ = Peak(alpha=self.scd__alpha,
min_duration=self.scd__min_duration,
percentile=False)
# initialize speech turn embedding module
self.emb_ = Precomputed(emb__pre)
# initialize clustering module
self.cls_ = my_cluster.ClusteringAP(metric=self.cls__metric,
damping=self.cls__damping,
preference=self.cls__preference)
def __call__(self, current_file, annotated=False):
# speech activity detection
soft_sad = self.sad_(current_file)
hard_sad = self.sad_binarize_.apply(
soft_sad, dimension=self.sad__dimension)
# speaker change detection
soft_scd = self.scd_(current_file)
hard_scd = self.scd_peak_.apply(
soft_scd, dimension=self.scd__dimension)
# speech turns
speech_turns = hard_scd.crop(hard_sad)
if annotated:
speech_turns = speech_turns.crop(
get_annotated(current_file))
# remove small speech turns
emb = self.emb_(current_file)
speech_turns = [speech_turn for speech_turn in speech_turns if len(emb.crop(speech_turn, mode='loose')) > 0]
# speech turns embedding
to_stack = [
np.sum(emb.crop(speech_turn, mode='loose'), axis=0)
for speech_turn in speech_turns]
if len(to_stack) < 1:
return None
fX = l2_normalize(np.vstack(to_stack))
# speech turn clustering
cluster_labels = self.cls_.apply(fX)
# build hypothesis from clustering results
hypothesis = Annotation(uri=current_file['uri'])
for speech_turn, label in zip(speech_turns, cluster_labels):
hypothesis[speech_turn] = label
return hypothesis
class SpeakerDiarizationOracleSegAP(object):
'''Speaker diarization with oracle segmentation and affinity propagation'''
def __init__(self, feature_extraction, emb__pre,
emb__internal=False,
cls__damping=0.8, cls__preference=-20,
cls__metric='cosine'):
super(SpeakerDiarizationOracleSegAP, self).__init__()
self.feature_extraction = feature_extraction
# embedding hyper-parameters
self.emb__internal = emb__internal
# clustering hyper-parameters
self.cls__damping = cls__damping
self.cls__preference = cls__preference
self.cls__metric = cls__metric
step = self.feature_extraction.sliding_window().step
# initialize speech turn embedding module
self.emb_ = Precomputed(emb__pre)
# initialize clustering module
self.cls_ = my_cluster.ClusteringAP(metric=self.cls__metric,
damping=self.cls__damping,
preference=self.cls__preference)
def __call__(self, current_file, annotated=False):
# speech turns
speech_turns = current_file['annotation'].get_timeline()
if annotated:
speech_turns = speech_turns.crop(
get_annotated(current_file))
# remove small speech turns
emb = self.emb_(current_file)
speech_turns = [speech_turn for speech_turn in speech_turns if len(emb.crop(speech_turn, mode='loose')) > 0]
# speech turns embedding
to_stack = [
np.sum(emb.crop(speech_turn, mode='loose'), axis=0)
for speech_turn in speech_turns]
if len(to_stack) < 1:
return None
fX = l2_normalize(np.vstack(to_stack))
# speech turn clustering
cluster_labels = self.cls_.apply(fX)
# build hypothesis from clustering results
hypothesis = Annotation(uri=current_file['uri'])
for speech_turn, label in zip(speech_turns, cluster_labels):
hypothesis[speech_turn] = label
return hypothesis
class SpeakerDiarizationHACPre(object):
'''Speaker diarization with hierarchical agglomerative clustering'''
def __init__(self, feature_extraction, sad__pre, scd__pre, emb__pre,
sad__onset=0.7, sad__offset=0.7, sad__dimension=1,
scd__alpha=0.5, scd__min_duration=1., scd__dimension=1,
emb__internal=False,
cls__method='average', cls__threshold=5,
cls__metric='cosine'):
super(SpeakerDiarizationHACPre, self).__init__()
self.feature_extraction = feature_extraction
# speech activity detection hyper-parameters
self.sad__onset = sad__onset
self.sad__offset = sad__offset
self.sad__dimension = sad__dimension
# speaker change detection hyper-parameters
self.scd__alpha = scd__alpha
self.scd__min_duration = scd__min_duration
self.scd__dimension = scd__dimension
# embedding hyper-parameters
self.emb__internal = emb__internal
# clustering hyper-parameters
self.cls__method = cls__method
self.cls__threshold = cls__threshold
self.cls__metric = cls__metric
step = self.feature_extraction.sliding_window().step
# initialize speech activity detection module
self.sad_ = Precomputed(sad__pre)
self.sad_binarize_ = Binarize(onset=self.sad__onset,
offset=self.sad__offset)
# initialize speaker change detection module
self.scd_ = Precomputed(scd__pre)
self.scd_peak_ = Peak(alpha=self.scd__alpha,
min_duration=self.scd__min_duration,
percentile=False)
# initialize speech turn embedding module
self.emb_ = Precomputed(emb__pre)
# initialize clustering module
self.cls_ = my_cluster.ClusteringHAC(metric=self.cls__metric,
method=self.cls__method,
threshold=self.cls__threshold)
def __call__(self, current_file, annotated=False):
# speech activity detection
soft_sad = self.sad_(current_file)
hard_sad = self.sad_binarize_.apply(
soft_sad, dimension=self.sad__dimension)
# speaker change detection
soft_scd = self.scd_(current_file)
hard_scd = self.scd_peak_.apply(
soft_scd, dimension=self.scd__dimension)
# speech turns
speech_turns = hard_scd.crop(hard_sad)
if annotated:
speech_turns = speech_turns.crop(
get_annotated(current_file))
# remove small speech turns
emb = self.emb_(current_file)
speech_turns = [speech_turn for speech_turn in speech_turns if len(emb.crop(speech_turn, mode='loose')) > 0]
# speech turns embedding
to_stack = [
np.sum(emb.crop(speech_turn, mode='loose'), axis=0)
for speech_turn in speech_turns]
if len(to_stack) < 1:
return None
fX = l2_normalize(np.vstack(to_stack))
# speech turn clustering
cluster_labels = self.cls_.apply(fX)
# build hypothesis from clustering results
hypothesis = Annotation(uri=current_file['uri'])
for speech_turn, label in zip(speech_turns, cluster_labels):
hypothesis[speech_turn] = label
return hypothesis
class SpeakerDiarizationPreStages(object):
def __init__(self, feature_extraction, sad__pre, scd__pre, emb__pre,
sad__onset=0.7, sad__offset=0.7, sad__dimension=1,
scd__alpha=0.5, scd__min_duration=1., scd__dimension=1,
emb__internal=False,
cls__damping=0.8, cls__preference=-20,
cls__metric='cosine'):
super(SpeakerDiarizationPreStages, self).__init__()
self.feature_extraction = feature_extraction
# speech activity detection hyper-parameters
self.sad__onset = sad__onset
self.sad__offset = sad__offset
self.sad__dimension = sad__dimension
# speaker change detection hyper-parameters
self.scd__alpha = scd__alpha
self.scd__min_duration = scd__min_duration
self.scd__dimension = scd__dimension
# embedding hyper-parameters
self.emb__internal = emb__internal
# clustering hyper-parameters
self.cls__damping = cls__damping
self.cls__preference = cls__preference
self.cls__metric = cls__metric
step = self.feature_extraction.sliding_window().step
# initialize speech activity detection module
self.sad_ = Precomputed(sad__pre)
self.sad_binarize_ = Binarize(onset=self.sad__onset,
offset=self.sad__offset)
# initialize speaker change detection module
self.scd_ = Precomputed(scd__pre)
self.scd_peak_ = Peak(alpha=self.scd__alpha,
min_duration=self.scd__min_duration,
percentile=False)
# initialize speech turn embedding module
self.emb_ = Precomputed(emb__pre)
# initialize clustering module
self.cls_ = my_cluster.ClusteringAP(metric=self.cls__metric,
damping=self.cls__damping,
preference=self.cls__preference)
def __call__(self, current_file, annotated=False):
# speech activity detection
soft_sad = self.sad_(current_file)
hard_sad = self.sad_binarize_.apply(
soft_sad, dimension=self.sad__dimension)
sad_output = hard_sad.to_annotation()
# speaker change detection
soft_scd = self.scd_(current_file)
hard_scd = self.scd_peak_.apply(
soft_scd, dimension=self.scd__dimension)
# speech turns
speech_turns = hard_scd.crop(hard_sad)
scd_output = speech_turns.to_annotation()
if annotated:
speech_turns = speech_turns.crop(
get_annotated(current_file))
# remove small speech turns
emb = self.emb_(current_file)
speech_turns = [speech_turn for speech_turn in speech_turns if len(emb.crop(speech_turn, mode='loose')) > 0]
# speech turns embedding
to_stack = [
np.sum(emb.crop(speech_turn, mode='loose'), axis=0)
for speech_turn in speech_turns]
if len(to_stack) < 1:
return None
fX = l2_normalize(np.vstack(to_stack))
# speech turn clustering
cluster_labels = self.cls_.apply(fX)
# build hypothesis from clustering results
hypothesis = Annotation(uri=current_file['uri'])
for speech_turn, label in zip(speech_turns, cluster_labels):
hypothesis[speech_turn] = label
return hypothesis, sad_output, scd_output
class SpeakerDiarizationWeighted(object):
def __init__(self, feature_extraction, sad__pre, scd__pre, weight__pre, emb__pre,
sad__onset=0.7, sad__offset=0.7, sad__dimension=1,
scd__alpha=0.5, scd__min_duration=1., scd__dimension=1,
emb__internal=False,
cls__damping=0.8, cls__preference=-20,
cls__metric='cosine'):
super(SpeakerDiarizationWeighted, self).__init__()
self.feature_extraction = feature_extraction
# speech activity detection hyper-parameters
self.sad__onset = sad__onset
self.sad__offset = sad__offset
self.sad__dimension = sad__dimension
# speaker change detection hyper-parameters
self.scd__alpha = scd__alpha
self.scd__min_duration = scd__min_duration
self.scd__dimension = scd__dimension
# embedding hyper-parameters
self.emb__internal = emb__internal
# clustering hyper-parameters
self.cls__damping = cls__damping
self.cls__preference = cls__preference
self.cls__metric = cls__metric
step = self.feature_extraction.sliding_window().step
# initialize speech activity detection module
self.sad_ = Precomputed(sad__pre)
self.sad_binarize_ = Binarize(onset=self.sad__onset,
offset=self.sad__offset)
# initialize speaker change detection module
self.scd_ = Precomputed(scd__pre)
self.scd_peak_ = Peak(alpha=self.scd__alpha,
min_duration=self.scd__min_duration,
percentile=False)
# initialize weights
self.weight_ = Precomputed(weight__pre)
# initialize speech turn embedding module
self.emb_ = Precomputed(emb__pre)
# initialize clustering module
self.cls_ = my_cluster.ClusteringAP(metric=self.cls__metric,
damping=self.cls__damping,
preference=self.cls__preference)
def __call__(self, current_file, annotated=False):
# speech activity detection
soft_sad = self.sad_(current_file)
hard_sad = self.sad_binarize_.apply(
soft_sad, dimension=self.sad__dimension)
# speaker change detection
soft_scd = self.scd_(current_file)
hard_scd = self.scd_peak_.apply(
soft_scd, dimension=self.scd__dimension)
# speech turns
speech_turns = hard_scd.crop(hard_sad)
if annotated:
speech_turns = speech_turns.crop(
get_annotated(current_file))
# remove small speech turns
emb = self.emb_(current_file)
speech_turns = [speech_turn for speech_turn in speech_turns if len(emb.crop(speech_turn, mode='loose')) > 0]
# weights
weight = self.weight_(current_file)
# speech turns embedding
to_stack = [
np.mean(emb.crop(speech_turn, mode='loose')*(1-weight.crop(speech_turn, mode='loose')), axis=0)
for speech_turn in speech_turns]
if len(to_stack) < 1:
return None
fX = l2_normalize(np.vstack(to_stack))
# speech turn clustering
cluster_labels = self.cls_.apply(fX)
# build hypothesis from clustering results
hypothesis = Annotation(uri=current_file['uri'])
for speech_turn, label in zip(speech_turns, cluster_labels):
hypothesis[speech_turn] = label
return hypothesis
class SpeakerDiarizationOnSceneHAC(object):
def __init__(self, emb__pre,
cls__method='average', cls__threshold=5,
cls__metric='cosine'):
super(SpeakerDiarizationOnSceneHAC, self).__init__()
# clustering hyper-parameters
self.cls__method = cls__method
self.cls__threshold = cls__threshold
self.cls__metric = cls__metric
# initialize speech turn embedding module
self.emb_ = Precomputed(emb__pre)
# initialize clustering module
self.cls_ = my_cluster.ClusteringHAC(metric=self.cls__metric,
method=self.cls__method,
threshold=self.cls__threshold)
def __call__(self, current_file):
# speech turns
hypothesis = Annotation(uri=current_file['uri'])
sencences = current_file['speech_timeline']
scenes = current_file['scenes']
# remove small speech turns
emb = self.emb_(current_file)
#speech_turns = [speech_turn for speech_turn in speech_turns if len(emb.crop(speech_turn, mode='loose')) > 0]
for scene in scenes:
speech_turns = sencences.crop(scene)
if len(speech_turns) == 0:
continue
if len(speech_turns) == 1:
hypothesis[speech_turns[0]] = 1
continue
# speech turns embedding
to_stack = [
np.sum(emb.crop(speech_turn, mode='loose'), axis=0)
for speech_turn in speech_turns]
fX = l2_normalize(np.vstack(to_stack))
# speech turn clustering
cluster_labels = self.cls_.apply(fX)
# build hypothesis from clustering results
for speech_turn, label in zip(speech_turns, cluster_labels):
hypothesis[speech_turn] = label
return hypothesis
class SpeakerDiarizationOnEnrollHAC(object):
def __init__(self,
cls__method='average', cls__threshold=5,
cls__metric='cosine'):
super(SpeakerDiarizationOnEnrollHAC, self).__init__()
# clustering hyper-parameters
self.cls__method = cls__method
self.cls__threshold = cls__threshold
self.cls__metric = cls__metric
# initialize clustering module
self.cls_ = my_cluster.ClusteringHAC(metric=self.cls__metric,
method=self.cls__method,
threshold=self.cls__threshold)
def __call__(self, embedding, speech_turns):
hypothesis = Annotation()
#speech_turns = [speech_turn for speech_turn in speech_turns if len(emb.crop(speech_turn, mode='loose')) > 0]
if len(speech_turns) == 0:
return hypothesis
if len(speech_turns) == 1:
hypothesis[speech_turns[0]] = 1
return hypothesis
# speech turns embedding
to_stack = [
np.sum(embedding.crop(speech_turn, mode='loose'), axis=0)
for speech_turn in speech_turns]
fX = l2_normalize(np.vstack(to_stack))
# speech turn clustering
cluster_labels = self.cls_.apply(fX)
# build hypothesis from clustering results
for speech_turn, label in zip(speech_turns, cluster_labels):
hypothesis[speech_turn] = label
return hypothesis | 0.566139 | 0.1933 |
from functools import partial
import re
class Bot(object):
def __init__(self, id):
self.id = id
self.chips = []
def get(self, value):
if not value in self.chips:
self.chips.append(value)
self.chips.sort()
def remove_low(self):
return self.chips.pop(0)
def remove_high(self):
return self.chips.pop()
def __repr__(self):
return "<{}> {}".format(self.id, self.chips)
class Factory(object):
def __init__(self):
self.bots = {}
def get_or_create(self, id):
if not id in self.bots:
self.bots[id] = Bot(id)
return self.bots[id]
def __str__(self):
return "\n".join(str(b) for b in sorted(self.bots.values(), key=lambda bot: bot.id))
class Move(object):
def __init__(self, bot, target1, target2):
self.bot = bot
self.target1 = target1
self.target2 = target2
def applies(self):
return len(self.bot.chips) == 2
def apply(self):
low = self.bot.remove_low()
high = self.bot.remove_high()
self.target1.get(low)
self.target2.get(high)
def execute(sequence, detect_callback):
queue = []
factory = Factory()
for line in sequence.split("\n"):
get_match = re.match("^value (?P<value>[0-9]+) goes to (?P<target>bot [0-9]+)$", line)
if get_match:
target = factory.get_or_create(get_match.group("target"))
value = int(get_match.group("value"))
target.get(value)
gives_match = re.match(
"^(?P<bot1>bot [0-9]+) gives low to (?P<target1>(bot|output) [0-9]+) and high to (?P<target2>(bot|output) [0-9]+)$",
line)
if gives_match:
bot = factory.get_or_create(gives_match.group("bot1"))
target1 = factory.get_or_create(gives_match.group("target1"))
target2 = factory.get_or_create(gives_match.group("target2"))
queue.append(Move(bot, target1, target2))
assert get_match or gives_match
while True:
next_queue = []
while queue:
queued = queue.pop(0)
if queued.applies():
detect_callback(queued.bot)
queued.apply()
else:
next_queue.append(queued)
if not next_queue:
break
queue = next_queue
print
print str(factory)
input = """value 5 goes to bot 2
bot 2 gives low to bot 1 and high to bot 0
value 3 goes to bot 1
bot 1 gives low to output 1 and high to bot 0
bot 0 gives low to output 2 and high to output 0
value 2 goes to bot 2"""
def detect_5_2(bot):
if 5 in bot.chips and 2 in bot.chips:
print "!", bot
execute(input, detect_5_2)
input = """bot 59 gives low to bot 176 and high to bot 120
bot 92 gives low to bot 42 and high to bot 187
value 31 goes to bot 114
bot 182 gives low to bot 49 and high to bot 176
bot 17 gives low to bot 181 and high to bot 162
bot 36 gives low to bot 118 and high to bot 121
bot 118 gives low to bot 164 and high to bot 55
bot 172 gives low to bot 79 and high to bot 123
bot 51 gives low to bot 60 and high to bot 31
bot 48 gives low to bot 107 and high to bot 58
bot 142 gives low to output 6 and high to bot 35
bot 133 gives low to output 4 and high to bot 47
bot 134 gives low to bot 122 and high to bot 66
bot 106 gives low to bot 155 and high to bot 99
bot 77 gives low to bot 93 and high to bot 84
bot 9 gives low to bot 173 and high to bot 197
bot 64 gives low to bot 123 and high to bot 48
bot 177 gives low to bot 21 and high to bot 132
bot 94 gives low to bot 6 and high to bot 25
bot 126 gives low to bot 193 and high to bot 56
bot 74 gives low to bot 187 and high to bot 125
bot 80 gives low to bot 41 and high to bot 191
bot 62 gives low to bot 157 and high to bot 138
bot 66 gives low to bot 1 and high to bot 209
bot 90 gives low to bot 104 and high to bot 34
bot 68 gives low to bot 23 and high to bot 87
bot 121 gives low to bot 55 and high to bot 126
bot 122 gives low to bot 137 and high to bot 1
bot 209 gives low to bot 168 and high to bot 26
bot 141 gives low to bot 170 and high to bot 6
bot 149 gives low to bot 62 and high to bot 13
bot 120 gives low to bot 179 and high to bot 71
bot 160 gives low to bot 194 and high to bot 151
bot 86 gives low to bot 96 and high to bot 106
value 13 goes to bot 9
bot 180 gives low to bot 189 and high to bot 27
value 67 goes to bot 88
bot 169 gives low to bot 99 and high to bot 159
bot 56 gives low to bot 98 and high to bot 147
bot 197 gives low to bot 174 and high to bot 81
bot 57 gives low to bot 113 and high to bot 179
bot 39 gives low to bot 115 and high to bot 3
bot 79 gives low to bot 22 and high to bot 40
bot 161 gives low to output 14 and high to bot 185
bot 21 gives low to bot 114 and high to bot 119
bot 136 gives low to bot 28 and high to bot 158
bot 105 gives low to bot 89 and high to bot 19
bot 168 gives low to bot 126 and high to bot 26
bot 193 gives low to bot 64 and high to bot 98
bot 186 gives low to bot 86 and high to bot 178
value 11 goes to bot 165
bot 33 gives low to bot 116 and high to bot 150
bot 32 gives low to bot 154 and high to bot 206
bot 166 gives low to bot 33 and high to bot 139
value 7 goes to bot 63
bot 203 gives low to bot 172 and high to bot 64
bot 200 gives low to bot 94 and high to bot 25
value 43 goes to bot 76
bot 145 gives low to bot 103 and high to bot 128
bot 119 gives low to bot 186 and high to bot 97
bot 12 gives low to bot 31 and high to bot 4
bot 23 gives low to bot 198 and high to bot 171
bot 34 gives low to bot 10 and high to bot 20
bot 198 gives low to bot 43 and high to bot 17
bot 50 gives low to output 1 and high to bot 127
bot 155 gives low to bot 191 and high to bot 32
bot 206 gives low to bot 12 and high to bot 43
bot 96 gives low to bot 80 and high to bot 155
bot 93 gives low to bot 44 and high to bot 70
bot 24 gives low to bot 85 and high to bot 83
bot 30 gives low to bot 159 and high to bot 68
bot 55 gives low to bot 203 and high to bot 193
bot 199 gives low to bot 68 and high to bot 135
bot 170 gives low to bot 97 and high to bot 5
bot 65 gives low to bot 152 and high to bot 194
bot 43 gives low to bot 4 and high to bot 181
bot 113 gives low to output 9 and high to bot 161
bot 81 gives low to bot 141 and high to bot 94
value 29 goes to bot 7
bot 46 gives low to bot 175 and high to bot 195
value 47 goes to bot 21
value 23 goes to bot 42
bot 13 gives low to bot 138 and high to bot 61
bot 135 gives low to bot 87 and high to bot 111
bot 194 gives low to bot 190 and high to bot 82
value 73 goes to bot 109
bot 154 gives low to bot 51 and high to bot 12
bot 1 gives low to bot 18 and high to bot 209
bot 98 gives low to bot 48 and high to bot 45
bot 147 gives low to bot 45 and high to bot 95
bot 47 gives low to output 19 and high to bot 152
bot 26 gives low to bot 56 and high to bot 147
bot 179 gives low to bot 161 and high to bot 71
bot 148 gives low to bot 204 and high to bot 137
bot 5 gives low to bot 67 and high to bot 85
bot 174 gives low to bot 132 and high to bot 141
bot 8 gives low to bot 13 and high to bot 75
bot 82 gives low to bot 146 and high to bot 22
bot 123 gives low to bot 40 and high to bot 107
bot 99 gives low to bot 32 and high to bot 201
bot 41 gives low to bot 196 and high to bot 192
bot 139 gives low to bot 150 and high to bot 153
bot 11 gives low to output 16 and high to bot 113
bot 72 gives low to bot 65 and high to bot 160
bot 195 gives low to bot 133 and high to bot 183
bot 54 gives low to output 12 and high to output 10
bot 158 gives low to bot 102 and high to bot 110
bot 112 gives low to bot 19 and high to bot 118
bot 31 gives low to bot 208 and high to bot 143
bot 167 gives low to bot 7 and high to bot 96
bot 63 gives low to bot 92 and high to bot 74
bot 116 gives low to bot 20 and high to bot 131
bot 184 gives low to bot 39 and high to bot 3
bot 162 gives low to bot 205 and high to bot 39
bot 108 gives low to output 11 and high to bot 175
value 53 goes to bot 207
bot 111 gives low to bot 202 and high to bot 184
bot 25 gives low to bot 24 and high to bot 83
value 71 goes to bot 77
bot 69 gives low to bot 142 and high to bot 0
bot 146 gives low to output 13 and high to bot 53
bot 7 gives low to bot 76 and high to bot 80
bot 131 gives low to bot 73 and high to bot 204
bot 102 gives low to bot 195 and high to bot 117
bot 76 gives low to bot 165 and high to bot 41
bot 153 gives low to bot 148 and high to bot 122
bot 208 gives low to bot 90 and high to bot 163
bot 70 gives low to bot 144 and high to bot 78
bot 125 gives low to bot 8 and high to bot 156
bot 83 gives low to bot 199 and high to bot 135
bot 75 gives low to bot 61 and high to bot 104
bot 67 gives low to bot 169 and high to bot 30
bot 14 gives low to bot 81 and high to bot 200
bot 159 gives low to bot 201 and high to bot 23
value 3 goes to bot 93
bot 110 gives low to bot 117 and high to bot 89
bot 128 gives low to bot 129 and high to bot 182
bot 87 gives low to bot 171 and high to bot 111
bot 45 gives low to bot 58 and high to bot 95
bot 4 gives low to bot 143 and high to bot 166
bot 60 gives low to bot 156 and high to bot 208
bot 27 gives low to bot 108 and high to bot 46
bot 42 gives low to bot 207 and high to bot 149
bot 117 gives low to bot 183 and high to bot 72
bot 115 gives low to bot 153 and high to bot 134
bot 140 gives low to bot 125 and high to bot 60
bot 173 gives low to bot 177 and high to bot 174
bot 138 gives low to bot 180 and high to bot 52
bot 100 gives low to bot 38 and high to bot 59
value 41 goes to bot 173
value 59 goes to bot 177
bot 165 gives low to bot 63 and high to bot 196
bot 84 gives low to bot 70 and high to bot 78
bot 2 gives low to bot 160 and high to bot 91
value 61 goes to bot 29
bot 114 gives low to bot 109 and high to bot 186
bot 205 gives low to bot 139 and high to bot 115
bot 175 gives low to output 17 and high to bot 133
bot 176 gives low to bot 57 and high to bot 120
bot 107 gives low to bot 124 and high to bot 15
bot 52 gives low to bot 27 and high to bot 28
bot 103 gives low to bot 50 and high to bot 129
bot 150 gives low to bot 131 and high to bot 148
bot 16 gives low to output 20 and high to bot 189
bot 190 gives low to output 18 and high to bot 146
bot 157 gives low to bot 16 and high to bot 180
bot 10 gives low to bot 158 and high to bot 130
bot 202 gives low to bot 162 and high to bot 184
bot 88 gives low to bot 77 and high to bot 84
bot 188 gives low to bot 128 and high to bot 38
bot 58 gives low to bot 15 and high to bot 101
bot 171 gives low to bot 17 and high to bot 202
bot 97 gives low to bot 178 and high to bot 67
bot 163 gives low to bot 34 and high to bot 116
bot 124 gives low to bot 0 and high to bot 145
bot 71 gives low to bot 185 and high to bot 54
bot 78 gives low to bot 14 and high to bot 200
bot 101 gives low to bot 188 and high to bot 100
bot 189 gives low to output 7 and high to bot 108
bot 95 gives low to bot 101 and high to bot 100
bot 0 gives low to bot 35 and high to bot 103
bot 207 gives low to bot 37 and high to bot 62
bot 49 gives low to bot 11 and high to bot 57
bot 85 gives low to bot 30 and high to bot 199
bot 89 gives low to bot 72 and high to bot 2
bot 3 gives low to bot 134 and high to bot 66
bot 181 gives low to bot 166 and high to bot 205
bot 91 gives low to bot 151 and high to bot 172
value 17 goes to bot 167
bot 20 gives low to bot 130 and high to bot 73
bot 196 gives low to bot 74 and high to bot 140
bot 18 gives low to bot 121 and high to bot 168
bot 185 gives low to output 15 and high to bot 54
bot 178 gives low to bot 106 and high to bot 169
bot 129 gives low to bot 127 and high to bot 49
bot 19 gives low to bot 2 and high to bot 164
bot 15 gives low to bot 145 and high to bot 188
bot 144 gives low to bot 197 and high to bot 14
bot 201 gives low to bot 206 and high to bot 198
bot 164 gives low to bot 91 and high to bot 203
bot 73 gives low to bot 105 and high to bot 112
bot 191 gives low to bot 192 and high to bot 154
bot 109 gives low to bot 167 and high to bot 86
bot 151 gives low to bot 82 and high to bot 79
bot 53 gives low to output 2 and high to bot 142
bot 37 gives low to bot 29 and high to bot 157
value 2 goes to bot 44
bot 204 gives low to bot 112 and high to bot 36
bot 40 gives low to bot 69 and high to bot 124
bot 22 gives low to bot 53 and high to bot 69
bot 104 gives low to bot 136 and high to bot 10
value 19 goes to bot 88
bot 127 gives low to output 5 and high to bot 11
bot 183 gives low to bot 47 and high to bot 65
bot 192 gives low to bot 140 and high to bot 51
bot 38 gives low to bot 182 and high to bot 59
bot 61 gives low to bot 52 and high to bot 136
bot 156 gives low to bot 75 and high to bot 90
value 37 goes to bot 37
bot 28 gives low to bot 46 and high to bot 102
bot 187 gives low to bot 149 and high to bot 8
bot 132 gives low to bot 119 and high to bot 170
bot 44 gives low to bot 9 and high to bot 144
bot 29 gives low to output 0 and high to bot 16
bot 6 gives low to bot 5 and high to bot 24
bot 137 gives low to bot 36 and high to bot 18
bot 130 gives low to bot 110 and high to bot 105
value 5 goes to bot 92
bot 35 gives low to output 3 and high to bot 50
bot 152 gives low to output 8 and high to bot 190
bot 143 gives low to bot 163 and high to bot 33"""
def detect_61_17(bot):
if 61 in bot.chips and 17 in bot.chips:
print "!", bot
execute(input, detect_61_17) | 10.py | from functools import partial
import re
class Bot(object):
def __init__(self, id):
self.id = id
self.chips = []
def get(self, value):
if not value in self.chips:
self.chips.append(value)
self.chips.sort()
def remove_low(self):
return self.chips.pop(0)
def remove_high(self):
return self.chips.pop()
def __repr__(self):
return "<{}> {}".format(self.id, self.chips)
class Factory(object):
def __init__(self):
self.bots = {}
def get_or_create(self, id):
if not id in self.bots:
self.bots[id] = Bot(id)
return self.bots[id]
def __str__(self):
return "\n".join(str(b) for b in sorted(self.bots.values(), key=lambda bot: bot.id))
class Move(object):
def __init__(self, bot, target1, target2):
self.bot = bot
self.target1 = target1
self.target2 = target2
def applies(self):
return len(self.bot.chips) == 2
def apply(self):
low = self.bot.remove_low()
high = self.bot.remove_high()
self.target1.get(low)
self.target2.get(high)
def execute(sequence, detect_callback):
queue = []
factory = Factory()
for line in sequence.split("\n"):
get_match = re.match("^value (?P<value>[0-9]+) goes to (?P<target>bot [0-9]+)$", line)
if get_match:
target = factory.get_or_create(get_match.group("target"))
value = int(get_match.group("value"))
target.get(value)
gives_match = re.match(
"^(?P<bot1>bot [0-9]+) gives low to (?P<target1>(bot|output) [0-9]+) and high to (?P<target2>(bot|output) [0-9]+)$",
line)
if gives_match:
bot = factory.get_or_create(gives_match.group("bot1"))
target1 = factory.get_or_create(gives_match.group("target1"))
target2 = factory.get_or_create(gives_match.group("target2"))
queue.append(Move(bot, target1, target2))
assert get_match or gives_match
while True:
next_queue = []
while queue:
queued = queue.pop(0)
if queued.applies():
detect_callback(queued.bot)
queued.apply()
else:
next_queue.append(queued)
if not next_queue:
break
queue = next_queue
print
print str(factory)
input = """value 5 goes to bot 2
bot 2 gives low to bot 1 and high to bot 0
value 3 goes to bot 1
bot 1 gives low to output 1 and high to bot 0
bot 0 gives low to output 2 and high to output 0
value 2 goes to bot 2"""
def detect_5_2(bot):
if 5 in bot.chips and 2 in bot.chips:
print "!", bot
execute(input, detect_5_2)
input = """bot 59 gives low to bot 176 and high to bot 120
bot 92 gives low to bot 42 and high to bot 187
value 31 goes to bot 114
bot 182 gives low to bot 49 and high to bot 176
bot 17 gives low to bot 181 and high to bot 162
bot 36 gives low to bot 118 and high to bot 121
bot 118 gives low to bot 164 and high to bot 55
bot 172 gives low to bot 79 and high to bot 123
bot 51 gives low to bot 60 and high to bot 31
bot 48 gives low to bot 107 and high to bot 58
bot 142 gives low to output 6 and high to bot 35
bot 133 gives low to output 4 and high to bot 47
bot 134 gives low to bot 122 and high to bot 66
bot 106 gives low to bot 155 and high to bot 99
bot 77 gives low to bot 93 and high to bot 84
bot 9 gives low to bot 173 and high to bot 197
bot 64 gives low to bot 123 and high to bot 48
bot 177 gives low to bot 21 and high to bot 132
bot 94 gives low to bot 6 and high to bot 25
bot 126 gives low to bot 193 and high to bot 56
bot 74 gives low to bot 187 and high to bot 125
bot 80 gives low to bot 41 and high to bot 191
bot 62 gives low to bot 157 and high to bot 138
bot 66 gives low to bot 1 and high to bot 209
bot 90 gives low to bot 104 and high to bot 34
bot 68 gives low to bot 23 and high to bot 87
bot 121 gives low to bot 55 and high to bot 126
bot 122 gives low to bot 137 and high to bot 1
bot 209 gives low to bot 168 and high to bot 26
bot 141 gives low to bot 170 and high to bot 6
bot 149 gives low to bot 62 and high to bot 13
bot 120 gives low to bot 179 and high to bot 71
bot 160 gives low to bot 194 and high to bot 151
bot 86 gives low to bot 96 and high to bot 106
value 13 goes to bot 9
bot 180 gives low to bot 189 and high to bot 27
value 67 goes to bot 88
bot 169 gives low to bot 99 and high to bot 159
bot 56 gives low to bot 98 and high to bot 147
bot 197 gives low to bot 174 and high to bot 81
bot 57 gives low to bot 113 and high to bot 179
bot 39 gives low to bot 115 and high to bot 3
bot 79 gives low to bot 22 and high to bot 40
bot 161 gives low to output 14 and high to bot 185
bot 21 gives low to bot 114 and high to bot 119
bot 136 gives low to bot 28 and high to bot 158
bot 105 gives low to bot 89 and high to bot 19
bot 168 gives low to bot 126 and high to bot 26
bot 193 gives low to bot 64 and high to bot 98
bot 186 gives low to bot 86 and high to bot 178
value 11 goes to bot 165
bot 33 gives low to bot 116 and high to bot 150
bot 32 gives low to bot 154 and high to bot 206
bot 166 gives low to bot 33 and high to bot 139
value 7 goes to bot 63
bot 203 gives low to bot 172 and high to bot 64
bot 200 gives low to bot 94 and high to bot 25
value 43 goes to bot 76
bot 145 gives low to bot 103 and high to bot 128
bot 119 gives low to bot 186 and high to bot 97
bot 12 gives low to bot 31 and high to bot 4
bot 23 gives low to bot 198 and high to bot 171
bot 34 gives low to bot 10 and high to bot 20
bot 198 gives low to bot 43 and high to bot 17
bot 50 gives low to output 1 and high to bot 127
bot 155 gives low to bot 191 and high to bot 32
bot 206 gives low to bot 12 and high to bot 43
bot 96 gives low to bot 80 and high to bot 155
bot 93 gives low to bot 44 and high to bot 70
bot 24 gives low to bot 85 and high to bot 83
bot 30 gives low to bot 159 and high to bot 68
bot 55 gives low to bot 203 and high to bot 193
bot 199 gives low to bot 68 and high to bot 135
bot 170 gives low to bot 97 and high to bot 5
bot 65 gives low to bot 152 and high to bot 194
bot 43 gives low to bot 4 and high to bot 181
bot 113 gives low to output 9 and high to bot 161
bot 81 gives low to bot 141 and high to bot 94
value 29 goes to bot 7
bot 46 gives low to bot 175 and high to bot 195
value 47 goes to bot 21
value 23 goes to bot 42
bot 13 gives low to bot 138 and high to bot 61
bot 135 gives low to bot 87 and high to bot 111
bot 194 gives low to bot 190 and high to bot 82
value 73 goes to bot 109
bot 154 gives low to bot 51 and high to bot 12
bot 1 gives low to bot 18 and high to bot 209
bot 98 gives low to bot 48 and high to bot 45
bot 147 gives low to bot 45 and high to bot 95
bot 47 gives low to output 19 and high to bot 152
bot 26 gives low to bot 56 and high to bot 147
bot 179 gives low to bot 161 and high to bot 71
bot 148 gives low to bot 204 and high to bot 137
bot 5 gives low to bot 67 and high to bot 85
bot 174 gives low to bot 132 and high to bot 141
bot 8 gives low to bot 13 and high to bot 75
bot 82 gives low to bot 146 and high to bot 22
bot 123 gives low to bot 40 and high to bot 107
bot 99 gives low to bot 32 and high to bot 201
bot 41 gives low to bot 196 and high to bot 192
bot 139 gives low to bot 150 and high to bot 153
bot 11 gives low to output 16 and high to bot 113
bot 72 gives low to bot 65 and high to bot 160
bot 195 gives low to bot 133 and high to bot 183
bot 54 gives low to output 12 and high to output 10
bot 158 gives low to bot 102 and high to bot 110
bot 112 gives low to bot 19 and high to bot 118
bot 31 gives low to bot 208 and high to bot 143
bot 167 gives low to bot 7 and high to bot 96
bot 63 gives low to bot 92 and high to bot 74
bot 116 gives low to bot 20 and high to bot 131
bot 184 gives low to bot 39 and high to bot 3
bot 162 gives low to bot 205 and high to bot 39
bot 108 gives low to output 11 and high to bot 175
value 53 goes to bot 207
bot 111 gives low to bot 202 and high to bot 184
bot 25 gives low to bot 24 and high to bot 83
value 71 goes to bot 77
bot 69 gives low to bot 142 and high to bot 0
bot 146 gives low to output 13 and high to bot 53
bot 7 gives low to bot 76 and high to bot 80
bot 131 gives low to bot 73 and high to bot 204
bot 102 gives low to bot 195 and high to bot 117
bot 76 gives low to bot 165 and high to bot 41
bot 153 gives low to bot 148 and high to bot 122
bot 208 gives low to bot 90 and high to bot 163
bot 70 gives low to bot 144 and high to bot 78
bot 125 gives low to bot 8 and high to bot 156
bot 83 gives low to bot 199 and high to bot 135
bot 75 gives low to bot 61 and high to bot 104
bot 67 gives low to bot 169 and high to bot 30
bot 14 gives low to bot 81 and high to bot 200
bot 159 gives low to bot 201 and high to bot 23
value 3 goes to bot 93
bot 110 gives low to bot 117 and high to bot 89
bot 128 gives low to bot 129 and high to bot 182
bot 87 gives low to bot 171 and high to bot 111
bot 45 gives low to bot 58 and high to bot 95
bot 4 gives low to bot 143 and high to bot 166
bot 60 gives low to bot 156 and high to bot 208
bot 27 gives low to bot 108 and high to bot 46
bot 42 gives low to bot 207 and high to bot 149
bot 117 gives low to bot 183 and high to bot 72
bot 115 gives low to bot 153 and high to bot 134
bot 140 gives low to bot 125 and high to bot 60
bot 173 gives low to bot 177 and high to bot 174
bot 138 gives low to bot 180 and high to bot 52
bot 100 gives low to bot 38 and high to bot 59
value 41 goes to bot 173
value 59 goes to bot 177
bot 165 gives low to bot 63 and high to bot 196
bot 84 gives low to bot 70 and high to bot 78
bot 2 gives low to bot 160 and high to bot 91
value 61 goes to bot 29
bot 114 gives low to bot 109 and high to bot 186
bot 205 gives low to bot 139 and high to bot 115
bot 175 gives low to output 17 and high to bot 133
bot 176 gives low to bot 57 and high to bot 120
bot 107 gives low to bot 124 and high to bot 15
bot 52 gives low to bot 27 and high to bot 28
bot 103 gives low to bot 50 and high to bot 129
bot 150 gives low to bot 131 and high to bot 148
bot 16 gives low to output 20 and high to bot 189
bot 190 gives low to output 18 and high to bot 146
bot 157 gives low to bot 16 and high to bot 180
bot 10 gives low to bot 158 and high to bot 130
bot 202 gives low to bot 162 and high to bot 184
bot 88 gives low to bot 77 and high to bot 84
bot 188 gives low to bot 128 and high to bot 38
bot 58 gives low to bot 15 and high to bot 101
bot 171 gives low to bot 17 and high to bot 202
bot 97 gives low to bot 178 and high to bot 67
bot 163 gives low to bot 34 and high to bot 116
bot 124 gives low to bot 0 and high to bot 145
bot 71 gives low to bot 185 and high to bot 54
bot 78 gives low to bot 14 and high to bot 200
bot 101 gives low to bot 188 and high to bot 100
bot 189 gives low to output 7 and high to bot 108
bot 95 gives low to bot 101 and high to bot 100
bot 0 gives low to bot 35 and high to bot 103
bot 207 gives low to bot 37 and high to bot 62
bot 49 gives low to bot 11 and high to bot 57
bot 85 gives low to bot 30 and high to bot 199
bot 89 gives low to bot 72 and high to bot 2
bot 3 gives low to bot 134 and high to bot 66
bot 181 gives low to bot 166 and high to bot 205
bot 91 gives low to bot 151 and high to bot 172
value 17 goes to bot 167
bot 20 gives low to bot 130 and high to bot 73
bot 196 gives low to bot 74 and high to bot 140
bot 18 gives low to bot 121 and high to bot 168
bot 185 gives low to output 15 and high to bot 54
bot 178 gives low to bot 106 and high to bot 169
bot 129 gives low to bot 127 and high to bot 49
bot 19 gives low to bot 2 and high to bot 164
bot 15 gives low to bot 145 and high to bot 188
bot 144 gives low to bot 197 and high to bot 14
bot 201 gives low to bot 206 and high to bot 198
bot 164 gives low to bot 91 and high to bot 203
bot 73 gives low to bot 105 and high to bot 112
bot 191 gives low to bot 192 and high to bot 154
bot 109 gives low to bot 167 and high to bot 86
bot 151 gives low to bot 82 and high to bot 79
bot 53 gives low to output 2 and high to bot 142
bot 37 gives low to bot 29 and high to bot 157
value 2 goes to bot 44
bot 204 gives low to bot 112 and high to bot 36
bot 40 gives low to bot 69 and high to bot 124
bot 22 gives low to bot 53 and high to bot 69
bot 104 gives low to bot 136 and high to bot 10
value 19 goes to bot 88
bot 127 gives low to output 5 and high to bot 11
bot 183 gives low to bot 47 and high to bot 65
bot 192 gives low to bot 140 and high to bot 51
bot 38 gives low to bot 182 and high to bot 59
bot 61 gives low to bot 52 and high to bot 136
bot 156 gives low to bot 75 and high to bot 90
value 37 goes to bot 37
bot 28 gives low to bot 46 and high to bot 102
bot 187 gives low to bot 149 and high to bot 8
bot 132 gives low to bot 119 and high to bot 170
bot 44 gives low to bot 9 and high to bot 144
bot 29 gives low to output 0 and high to bot 16
bot 6 gives low to bot 5 and high to bot 24
bot 137 gives low to bot 36 and high to bot 18
bot 130 gives low to bot 110 and high to bot 105
value 5 goes to bot 92
bot 35 gives low to output 3 and high to bot 50
bot 152 gives low to output 8 and high to bot 190
bot 143 gives low to bot 163 and high to bot 33"""
def detect_61_17(bot):
if 61 in bot.chips and 17 in bot.chips:
print "!", bot
execute(input, detect_61_17) | 0.531209 | 0.541773 |
from pprint import pformat
from six import iteritems
import re
class InstanceMetaData(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'private_ip': 'str',
'public_ip': 'str',
'ssh_port': 'int',
'instance_id': 'str',
'ambari_server': 'bool',
'discovery_fqdn': 'str',
'instance_group': 'str',
'instance_status': 'str',
'instance_type': 'str'
}
attribute_map = {
'private_ip': 'privateIp',
'public_ip': 'publicIp',
'ssh_port': 'sshPort',
'instance_id': 'instanceId',
'ambari_server': 'ambariServer',
'discovery_fqdn': 'discoveryFQDN',
'instance_group': 'instanceGroup',
'instance_status': 'instanceStatus',
'instance_type': 'instanceType'
}
def __init__(self, private_ip=None, public_ip=None, ssh_port=None, instance_id=None, ambari_server=False, discovery_fqdn=None, instance_group=None, instance_status=None, instance_type=None):
"""
InstanceMetaData - a model defined in Swagger
"""
self._private_ip = None
self._public_ip = None
self._ssh_port = None
self._instance_id = None
self._ambari_server = None
self._discovery_fqdn = None
self._instance_group = None
self._instance_status = None
self._instance_type = None
if private_ip is not None:
self.private_ip = private_ip
if public_ip is not None:
self.public_ip = public_ip
if ssh_port is not None:
self.ssh_port = ssh_port
if instance_id is not None:
self.instance_id = instance_id
if ambari_server is not None:
self.ambari_server = ambari_server
if discovery_fqdn is not None:
self.discovery_fqdn = discovery_fqdn
if instance_group is not None:
self.instance_group = instance_group
if instance_status is not None:
self.instance_status = instance_status
if instance_type is not None:
self.instance_type = instance_type
@property
def private_ip(self):
"""
Gets the private_ip of this InstanceMetaData.
private ip of the insctance
:return: The private_ip of this InstanceMetaData.
:rtype: str
"""
return self._private_ip
@private_ip.setter
def private_ip(self, private_ip):
"""
Sets the private_ip of this InstanceMetaData.
private ip of the insctance
:param private_ip: The private_ip of this InstanceMetaData.
:type: str
"""
self._private_ip = private_ip
@property
def public_ip(self):
"""
Gets the public_ip of this InstanceMetaData.
public ip of the instance
:return: The public_ip of this InstanceMetaData.
:rtype: str
"""
return self._public_ip
@public_ip.setter
def public_ip(self, public_ip):
"""
Sets the public_ip of this InstanceMetaData.
public ip of the instance
:param public_ip: The public_ip of this InstanceMetaData.
:type: str
"""
self._public_ip = public_ip
@property
def ssh_port(self):
"""
Gets the ssh_port of this InstanceMetaData.
:return: The ssh_port of this InstanceMetaData.
:rtype: int
"""
return self._ssh_port
@ssh_port.setter
def ssh_port(self, ssh_port):
"""
Sets the ssh_port of this InstanceMetaData.
:param ssh_port: The ssh_port of this InstanceMetaData.
:type: int
"""
self._ssh_port = ssh_port
@property
def instance_id(self):
"""
Gets the instance_id of this InstanceMetaData.
id of the instance
:return: The instance_id of this InstanceMetaData.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""
Sets the instance_id of this InstanceMetaData.
id of the instance
:param instance_id: The instance_id of this InstanceMetaData.
:type: str
"""
self._instance_id = instance_id
@property
def ambari_server(self):
"""
Gets the ambari_server of this InstanceMetaData.
ambari server address
:return: The ambari_server of this InstanceMetaData.
:rtype: bool
"""
return self._ambari_server
@ambari_server.setter
def ambari_server(self, ambari_server):
"""
Sets the ambari_server of this InstanceMetaData.
ambari server address
:param ambari_server: The ambari_server of this InstanceMetaData.
:type: bool
"""
self._ambari_server = ambari_server
@property
def discovery_fqdn(self):
"""
Gets the discovery_fqdn of this InstanceMetaData.
the fully qualified domain name of the node in the service discovery cluster
:return: The discovery_fqdn of this InstanceMetaData.
:rtype: str
"""
return self._discovery_fqdn
@discovery_fqdn.setter
def discovery_fqdn(self, discovery_fqdn):
"""
Sets the discovery_fqdn of this InstanceMetaData.
the fully qualified domain name of the node in the service discovery cluster
:param discovery_fqdn: The discovery_fqdn of this InstanceMetaData.
:type: str
"""
self._discovery_fqdn = discovery_fqdn
@property
def instance_group(self):
"""
Gets the instance_group of this InstanceMetaData.
name of the instance group
:return: The instance_group of this InstanceMetaData.
:rtype: str
"""
return self._instance_group
@instance_group.setter
def instance_group(self, instance_group):
"""
Sets the instance_group of this InstanceMetaData.
name of the instance group
:param instance_group: The instance_group of this InstanceMetaData.
:type: str
"""
self._instance_group = instance_group
@property
def instance_status(self):
"""
Gets the instance_status of this InstanceMetaData.
status of the instance
:return: The instance_status of this InstanceMetaData.
:rtype: str
"""
return self._instance_status
@instance_status.setter
def instance_status(self, instance_status):
"""
Sets the instance_status of this InstanceMetaData.
status of the instance
:param instance_status: The instance_status of this InstanceMetaData.
:type: str
"""
allowed_values = ["REQUESTED", "CREATED", "UNREGISTERED", "REGISTERED", "DECOMMISSIONED", "TERMINATED", "DELETED_ON_PROVIDER_SIDE", "FAILED", "STOPPED"]
if instance_status not in allowed_values:
raise ValueError(
"Invalid value for `instance_status` ({0}), must be one of {1}"
.format(instance_status, allowed_values)
)
self._instance_status = instance_status
@property
def instance_type(self):
"""
Gets the instance_type of this InstanceMetaData.
type of the instance
:return: The instance_type of this InstanceMetaData.
:rtype: str
"""
return self._instance_type
@instance_type.setter
def instance_type(self, instance_type):
"""
Sets the instance_type of this InstanceMetaData.
type of the instance
:param instance_type: The instance_type of this InstanceMetaData.
:type: str
"""
allowed_values = ["GATEWAY", "GATEWAY_PRIMARY", "CORE"]
if instance_type not in allowed_values:
raise ValueError(
"Invalid value for `instance_type` ({0}), must be one of {1}"
.format(instance_type, allowed_values)
)
self._instance_type = instance_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, InstanceMetaData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other | whoville/cloudbreak/models/instance_meta_data.py | from pprint import pformat
from six import iteritems
import re
class InstanceMetaData(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'private_ip': 'str',
'public_ip': 'str',
'ssh_port': 'int',
'instance_id': 'str',
'ambari_server': 'bool',
'discovery_fqdn': 'str',
'instance_group': 'str',
'instance_status': 'str',
'instance_type': 'str'
}
attribute_map = {
'private_ip': 'privateIp',
'public_ip': 'publicIp',
'ssh_port': 'sshPort',
'instance_id': 'instanceId',
'ambari_server': 'ambariServer',
'discovery_fqdn': 'discoveryFQDN',
'instance_group': 'instanceGroup',
'instance_status': 'instanceStatus',
'instance_type': 'instanceType'
}
def __init__(self, private_ip=None, public_ip=None, ssh_port=None, instance_id=None, ambari_server=False, discovery_fqdn=None, instance_group=None, instance_status=None, instance_type=None):
"""
InstanceMetaData - a model defined in Swagger
"""
self._private_ip = None
self._public_ip = None
self._ssh_port = None
self._instance_id = None
self._ambari_server = None
self._discovery_fqdn = None
self._instance_group = None
self._instance_status = None
self._instance_type = None
if private_ip is not None:
self.private_ip = private_ip
if public_ip is not None:
self.public_ip = public_ip
if ssh_port is not None:
self.ssh_port = ssh_port
if instance_id is not None:
self.instance_id = instance_id
if ambari_server is not None:
self.ambari_server = ambari_server
if discovery_fqdn is not None:
self.discovery_fqdn = discovery_fqdn
if instance_group is not None:
self.instance_group = instance_group
if instance_status is not None:
self.instance_status = instance_status
if instance_type is not None:
self.instance_type = instance_type
@property
def private_ip(self):
"""
Gets the private_ip of this InstanceMetaData.
private ip of the insctance
:return: The private_ip of this InstanceMetaData.
:rtype: str
"""
return self._private_ip
@private_ip.setter
def private_ip(self, private_ip):
"""
Sets the private_ip of this InstanceMetaData.
private ip of the insctance
:param private_ip: The private_ip of this InstanceMetaData.
:type: str
"""
self._private_ip = private_ip
@property
def public_ip(self):
"""
Gets the public_ip of this InstanceMetaData.
public ip of the instance
:return: The public_ip of this InstanceMetaData.
:rtype: str
"""
return self._public_ip
@public_ip.setter
def public_ip(self, public_ip):
"""
Sets the public_ip of this InstanceMetaData.
public ip of the instance
:param public_ip: The public_ip of this InstanceMetaData.
:type: str
"""
self._public_ip = public_ip
@property
def ssh_port(self):
"""
Gets the ssh_port of this InstanceMetaData.
:return: The ssh_port of this InstanceMetaData.
:rtype: int
"""
return self._ssh_port
@ssh_port.setter
def ssh_port(self, ssh_port):
"""
Sets the ssh_port of this InstanceMetaData.
:param ssh_port: The ssh_port of this InstanceMetaData.
:type: int
"""
self._ssh_port = ssh_port
@property
def instance_id(self):
"""
Gets the instance_id of this InstanceMetaData.
id of the instance
:return: The instance_id of this InstanceMetaData.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""
Sets the instance_id of this InstanceMetaData.
id of the instance
:param instance_id: The instance_id of this InstanceMetaData.
:type: str
"""
self._instance_id = instance_id
@property
def ambari_server(self):
"""
Gets the ambari_server of this InstanceMetaData.
ambari server address
:return: The ambari_server of this InstanceMetaData.
:rtype: bool
"""
return self._ambari_server
@ambari_server.setter
def ambari_server(self, ambari_server):
"""
Sets the ambari_server of this InstanceMetaData.
ambari server address
:param ambari_server: The ambari_server of this InstanceMetaData.
:type: bool
"""
self._ambari_server = ambari_server
@property
def discovery_fqdn(self):
"""
Gets the discovery_fqdn of this InstanceMetaData.
the fully qualified domain name of the node in the service discovery cluster
:return: The discovery_fqdn of this InstanceMetaData.
:rtype: str
"""
return self._discovery_fqdn
@discovery_fqdn.setter
def discovery_fqdn(self, discovery_fqdn):
"""
Sets the discovery_fqdn of this InstanceMetaData.
the fully qualified domain name of the node in the service discovery cluster
:param discovery_fqdn: The discovery_fqdn of this InstanceMetaData.
:type: str
"""
self._discovery_fqdn = discovery_fqdn
@property
def instance_group(self):
"""
Gets the instance_group of this InstanceMetaData.
name of the instance group
:return: The instance_group of this InstanceMetaData.
:rtype: str
"""
return self._instance_group
@instance_group.setter
def instance_group(self, instance_group):
"""
Sets the instance_group of this InstanceMetaData.
name of the instance group
:param instance_group: The instance_group of this InstanceMetaData.
:type: str
"""
self._instance_group = instance_group
@property
def instance_status(self):
"""
Gets the instance_status of this InstanceMetaData.
status of the instance
:return: The instance_status of this InstanceMetaData.
:rtype: str
"""
return self._instance_status
@instance_status.setter
def instance_status(self, instance_status):
"""
Sets the instance_status of this InstanceMetaData.
status of the instance
:param instance_status: The instance_status of this InstanceMetaData.
:type: str
"""
allowed_values = ["REQUESTED", "CREATED", "UNREGISTERED", "REGISTERED", "DECOMMISSIONED", "TERMINATED", "DELETED_ON_PROVIDER_SIDE", "FAILED", "STOPPED"]
if instance_status not in allowed_values:
raise ValueError(
"Invalid value for `instance_status` ({0}), must be one of {1}"
.format(instance_status, allowed_values)
)
self._instance_status = instance_status
@property
def instance_type(self):
"""
Gets the instance_type of this InstanceMetaData.
type of the instance
:return: The instance_type of this InstanceMetaData.
:rtype: str
"""
return self._instance_type
@instance_type.setter
def instance_type(self, instance_type):
"""
Sets the instance_type of this InstanceMetaData.
type of the instance
:param instance_type: The instance_type of this InstanceMetaData.
:type: str
"""
allowed_values = ["GATEWAY", "GATEWAY_PRIMARY", "CORE"]
if instance_type not in allowed_values:
raise ValueError(
"Invalid value for `instance_type` ({0}), must be one of {1}"
.format(instance_type, allowed_values)
)
self._instance_type = instance_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, InstanceMetaData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other | 0.562177 | 0.131257 |
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_keystone_service
short_description: Manage OpenStack Identity services
extends_documentation_fragment: openstack
author: "<NAME> (@SamYaple)"
version_added: "2.2"
description:
- Create, update, or delete OpenStack Identity service. If a service
with the supplied name already exists, it will be updated with the
new description and enabled attributes.
options:
name:
description:
- Name of the service
required: true
description:
description:
- Description of the service
required: false
default: None
enabled:
description:
- Is the service enabled
required: false
default: True
service_type:
description:
- The type of service
required: true
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a service for glance
- os_keystone_service:
cloud: mycloud
state: present
name: glance
service_type: image
description: OpenStack Image Service
# Delete a service
- os_keystone_service:
cloud: mycloud
state: absent
name: glance
service_type: image
'''
RETURN = '''
service:
description: Dictionary describing the service.
returned: On success when I(state) is 'present'
type: complex
contains:
id:
description: Service ID.
type: string
sample: "3292f020780b4d5baf27ff7e1d224c44"
name:
description: Service name.
type: string
sample: "glance"
service_type:
description: Service type.
type: string
sample: "image"
description:
description: Service description.
type: string
sample: "OpenStack Image Service"
enabled:
description: Service status.
type: boolean
sample: True
id:
description: The service ID.
returned: On success when I(state) is 'present'
type: string
sample: "3292f020780b4d5baf27ff7e1d224c44"
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
def _needs_update(module, service):
if service.enabled != module.params['enabled']:
return True
if service.description is not None and \
service.description != module.params['description']:
return True
return False
def _system_state_change(module, service):
state = module.params['state']
if state == 'absent' and service:
return True
if state == 'present':
if service is None:
return True
return _needs_update(module, service)
return False
def main():
argument_spec = openstack_full_argument_spec(
description=dict(default=None),
enabled=dict(default=True, type='bool'),
name=dict(required=True),
service_type=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if StrictVersion(shade.__version__) < StrictVersion('1.6.0'):
module.fail_json(msg="To utilize this module, the installed version of"
"the shade library MUST be >=1.6.0")
description = module.params['description']
enabled = module.params['enabled']
name = module.params['name']
state = module.params['state']
service_type = module.params['service_type']
try:
cloud = shade.operator_cloud(**module.params)
services = cloud.search_services(name_or_id=name,
filters=dict(type=service_type))
if len(services) > 1:
module.fail_json(msg='Service name %s and type %s are not unique' %
(name, service_type))
elif len(services) == 1:
service = services[0]
else:
service = None
if module.check_mode:
module.exit_json(changed=_system_state_change(module, service))
if state == 'present':
if service is None:
service = cloud.create_service(name=name,
description=description, type=service_type, enabled=True)
changed = True
else:
if _needs_update(module, service):
service = cloud.update_service(
service.id, name=name, type=service_type, enabled=enabled,
description=description)
changed = True
else:
changed = False
module.exit_json(changed=changed, service=service, id=service.id)
elif state == 'absent':
if service is None:
changed=False
else:
cloud.delete_service(service.id)
changed=True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main() | ansible/modules/cloud/openstack/os_keystone_service.py |
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_keystone_service
short_description: Manage OpenStack Identity services
extends_documentation_fragment: openstack
author: "<NAME> (@SamYaple)"
version_added: "2.2"
description:
- Create, update, or delete OpenStack Identity service. If a service
with the supplied name already exists, it will be updated with the
new description and enabled attributes.
options:
name:
description:
- Name of the service
required: true
description:
description:
- Description of the service
required: false
default: None
enabled:
description:
- Is the service enabled
required: false
default: True
service_type:
description:
- The type of service
required: true
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a service for glance
- os_keystone_service:
cloud: mycloud
state: present
name: glance
service_type: image
description: OpenStack Image Service
# Delete a service
- os_keystone_service:
cloud: mycloud
state: absent
name: glance
service_type: image
'''
RETURN = '''
service:
description: Dictionary describing the service.
returned: On success when I(state) is 'present'
type: complex
contains:
id:
description: Service ID.
type: string
sample: "3292f020780b4d5baf27ff7e1d224c44"
name:
description: Service name.
type: string
sample: "glance"
service_type:
description: Service type.
type: string
sample: "image"
description:
description: Service description.
type: string
sample: "OpenStack Image Service"
enabled:
description: Service status.
type: boolean
sample: True
id:
description: The service ID.
returned: On success when I(state) is 'present'
type: string
sample: "3292f020780b4d5baf27ff7e1d224c44"
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
def _needs_update(module, service):
if service.enabled != module.params['enabled']:
return True
if service.description is not None and \
service.description != module.params['description']:
return True
return False
def _system_state_change(module, service):
state = module.params['state']
if state == 'absent' and service:
return True
if state == 'present':
if service is None:
return True
return _needs_update(module, service)
return False
def main():
argument_spec = openstack_full_argument_spec(
description=dict(default=None),
enabled=dict(default=True, type='bool'),
name=dict(required=True),
service_type=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if StrictVersion(shade.__version__) < StrictVersion('1.6.0'):
module.fail_json(msg="To utilize this module, the installed version of"
"the shade library MUST be >=1.6.0")
description = module.params['description']
enabled = module.params['enabled']
name = module.params['name']
state = module.params['state']
service_type = module.params['service_type']
try:
cloud = shade.operator_cloud(**module.params)
services = cloud.search_services(name_or_id=name,
filters=dict(type=service_type))
if len(services) > 1:
module.fail_json(msg='Service name %s and type %s are not unique' %
(name, service_type))
elif len(services) == 1:
service = services[0]
else:
service = None
if module.check_mode:
module.exit_json(changed=_system_state_change(module, service))
if state == 'present':
if service is None:
service = cloud.create_service(name=name,
description=description, type=service_type, enabled=True)
changed = True
else:
if _needs_update(module, service):
service = cloud.update_service(
service.id, name=name, type=service_type, enabled=enabled,
description=description)
changed = True
else:
changed = False
module.exit_json(changed=changed, service=service, id=service.id)
elif state == 'absent':
if service is None:
changed=False
else:
cloud.delete_service(service.id)
changed=True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main() | 0.586404 | 0.209449 |
from __future__ import print_function
import numpy as np
import pandas as pd
import argparse
import json
import os
import sys
import shortuuid
import platform
import ast
from time import strftime, time
import visdom
import torch
import torch.optim.lr_scheduler as lr_sched
from torch.autograd import Variable
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms
from model_parser import get_model, PrintNetList
from datasets.minc2500 import MINC2500
from datasets.minc import MINC
from cmstats import updateCM, MulticlassStat
def main():
# Model and data parameters
model = args.model
dataset = args.dataset
batch_size = args.batch_size
classes = ast.literal_eval(args.classes)
gpu = args.gpu
seed = args.seed
# Training parameters
method = args.method
epochs = args.epochs
momentum = args.momentum
w_decay = args.w_decay
# Learning rate scheduler parameters
l_rate = args.l_rate
scheduler = args.lrate_sched
step_size = args.step_size
milestones = ast.literal_eval(args.milestones)
gamma = args.gamma
# Start training from scratch
if not args.resume and not args.test:
# Load the network model
net = get_model(model, len(classes))
if net is None:
print("Unknown model name:", model + ".",
"Use '--net-list' option",
"to check the available network models")
sys.exit(2)
if gpu > 0:
net.cuda()
# Initialize the random generator
torch.manual_seed(seed)
if gpu > 0:
torch.cuda.manual_seed_all(seed)
# Dictionary used to store the training results and metadata
json_data = {"platform": platform.platform(),
"date": strftime("%Y-%m-%d_%H:%M:%S"), "impl": "pytorch",
"dataset": dataset, "gpu": gpu,
"model": model, "epochs": epochs,
"classes": classes}
json_data["train_params"] = {"method": method,
"batch_size": batch_size,
"seed": seed,
"last_epoch": 0,
"train_time": 0.0}
epochs = range(epochs)
# Optimization method
if method == "SGD":
optimizer = torch.optim.SGD(net.parameters(),
lr=l_rate,
momentum=momentum,
weight_decay=w_decay)
# Learning rate scheduler
lrate_dict = dict()
lrate_dict["sched"] = args.lrate_sched
if args.lrate_sched is not "constant":
if args.lrate_sched == "step":
lrate_dict["step_size"] = step_size
lrate_dict["gamma"] = gamma
scheduler = lr_sched.StepLR(optimizer, step_size, gamma)
elif args.lrate_sched == "multistep":
lrate_dict["milestones"] = milestones
lrate_dict["gamma"] = gamma
scheduler = lr_sched.MultiStepLR(optimizer, milestones, gamma)
elif args.lrate_sched == "exponential":
lrate_dict["gamma"] = gamma
scheduler = lr_sched.ExponentialLR(optimizer, gamma)
json_data["train_params"]["l_rate"] = lrate_dict
# Extract training parameters from the optimizer state
for t_param in optimizer.state_dict()["param_groups"][0]:
if t_param is not "params":
json_data["train_params"][t_param] = \
optimizer.state_dict()["param_groups"][0][t_param]
num_par = 0
for parameter in net.parameters():
num_par += parameter.numel()
json_data["num_params"] = num_par
# Resume from a training checkpoint or test the network
else:
with open(args.resume or args.test, 'rb') as f:
json_data = json.load(f)
train_info = json_data["train_params"]
dataset = json_data["dataset"]
batch_size = train_info["batch_size"]
torch.manual_seed(train_info["seed"])
if json_data["gpu"] > 0:
torch.cuda.manual_seed_all(train_info["seed"])
# Load the network model
classes = json_data["classes"]
net = get_model(json_data["model"], len(classes))
if (json_data["gpu"] > 0):
net.cuda()
if args.resume:
# Resume training
# Load the saved state
# (in the same directory as the json file)
last_epoch = train_info["last_epoch"]
epochs = range(last_epoch, json_data["epochs"])
chk_dir = os.path.split(args.resume)[0]
state = torch.load(os.path.join(chk_dir, json_data["state"]))
# Load the network parameters
net.load_state_dict(state["params"])
# Load the optimizer state
method = train_info["method"]
if method == "SGD":
optimizer = torch.optim.SGD(net.parameters(),
lr=train_info["initial_lr"])
optimizer.load_state_dict(state["optim"])
# Load the learning rate scheduler info
if train_info["l_rate"]["sched"] == "step":
step_size = train_info["l_rate"]["step_size"]
gamma = train_info["l_rate"]["gamma"]
scheduler = lr_sched.StepLR(optimizer, step_size, gamma,
last_epoch)
elif train_info["l_rate"]["sched"] == "multistep":
milestones = train_info["l_rate"]["milestones"]
gamma = train_info["l_rate"]["gamma"]
scheduler = lr_sched.MultiStepLR(optimizer, milestones, gamma,
last_epoch)
elif args.lrate_sched == "exponential":
gamma = train_info["l_rate"]["gamma"]
scheduler = lr_sched.ExponentialLR(optimizer, gamma,
last_epoch)
else:
# Test the network
# Load the saved parameters
# (in the same directory as the json file)
res_dir = os.path.split(args.test)[0]
if "params" in json_data:
net.load_state_dict(torch.load(os.path.join(res_dir,
json_data["params"]
)))
elif "state" in json_data:
# Test a checkpointed network
state = torch.load(os.path.join(res_dir, json_data["state"]))
net.load_state_dict(state["params"])
else:
sys.exit("No network parameters found in JSON file")
if args.data_root:
data_root = args.data_root
else:
# Default directory
data_root = os.path.join(os.curdir, dataset + "_root")
# Prepare data structures
if not args.test:
# Training phase
train_trans = transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
val_trans = transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor()
])
if dataset == "minc2500":
train_set = MINC2500(root_dir=data_root, set_type='train',
split=1, transform=train_trans)
val_set = MINC2500(root_dir=data_root, set_type='validate',
split=1, transform=val_trans)
else:
train_set = MINC(root_dir=data_root, set_type='train',
classes=classes, transform=train_trans)
val_set = MINC(root_dir=data_root, set_type='validate',
classes=classes, transform=val_trans)
train_loader = DataLoader(dataset=train_set,
batch_size=batch_size,
shuffle=True, num_workers=args.workers,
pin_memory=(args.gpu > 0))
val_loader = DataLoader(dataset=val_set,
batch_size=batch_size,
shuffle=False, num_workers=args.workers,
pin_memory=(args.gpu > 0))
# Loss function
if gpu > 0:
criterion = nn.CrossEntropyLoss().cuda()
else:
criterion = nn.CrossEntropyLoss()
# Visdom windows to draw the training graphs
loss_window = vis.line(X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1)).cpu(),
opts=dict(xlabel='Iteration (batch size = ' +
str(batch_size) + ')',
ylabel='Loss',
title='Training Loss',
legend=['Loss']))
acc_window = vis.line(X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1)).cpu(),
opts=dict(xlabel='Epoch',
ylabel='Accuracy',
title='Validation Accuracy',
legend=['Accuracy']))
prec_window = vis.line(X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1)).cpu(),
opts=dict(xlabel='Epoch',
ylabel='Precision',
title='Validation Precision (Macro)',
legend=['Precision']))
recall_window = vis.line(X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1)).cpu(),
opts=dict(xlabel='Epoch',
ylabel='Recall',
title='Validation Recall (Macro)',
legend=['Recall']))
val_windows = [acc_window, prec_window, recall_window]
# Testing phase
test_trans = transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor()
])
if dataset == "minc2500":
test_set = MINC2500(root_dir=data_root, set_type='test', split=1,
transform=test_trans)
else:
test_set = MINC(root_dir=data_root, set_type='test',
classes=classes, transform=test_trans)
test_loader = DataLoader(dataset=test_set, batch_size=batch_size,
shuffle=False, num_workers=args.workers,
pin_memory=(args.gpu > 0))
if not args.test:
# Training loop
print("Training network on the", len(train_set), "training examples")
for epoch in epochs:
start_epoch = time()
# Train the Model
scheduler.step()
train(net, train_loader, criterion, optimizer, epoch, epochs,
loss_window)
# Check accuracy on validation set
print("Validating network on the", len(val_set),
"validation images...")
validate(net, val_loader, epoch, len(classes), val_windows)
json_data["train_params"]["train_time"] += round(time() -
start_epoch, 3)
# Save the checkpoint state
save_state(net, optimizer, json_data, epoch + 1, args.chk_dir)
# Test the model on the testing set
print("Testing network on the", len(test_set), "testing images...")
test(net, test_loader, args, json_data)
# Save the trained network parameters and the testing results
save_params(net, json_data, args.save_dir)
def train(net, train_loader, criterion, optimizer, epoch, epochs,
loss_window):
""" Train the network on the whole training set
Parameters:
net -- Module object containing the network model;
train_loader -- DataLoader object for the dataset in use;
criterion -- Method used to compute the loss;
optimizer -- Method used to update the network paramets;
epoch -- actual training epoch;
epochs -- total training epochs;
loss_window -- visdom window used to plot the loss;
"""
print_interval = 50
batch_time = 0.0
# Switch to train mode
net.train()
for i, (images, labels) in enumerate(train_loader):
start_batch = time()
if args.gpu > 0:
images = Variable(images.cuda(async=True))
labels = Variable(labels.cuda(async=True))
else:
images = Variable(images)
labels = Variable(labels)
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = net(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
batch_time += time() - start_batch
if i % print_interval == 0:
vis.line(
X=torch.ones((1, 1)).cpu() * ((epoch) * len(train_loader) + i),
Y=torch.Tensor([loss.data[0]]).unsqueeze(0).cpu(),
win=loss_window,
update='append')
print('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f Time: %.3f s/batch'
% (epoch + 1, epochs[-1] + 1, i, len(train_loader),
loss.data[0], batch_time / (i + 1)))
def validate(net, val_loader, epoch, n_class, val_windows):
""" Compute the network accuracy on the validation set
Parameters:
net -- Module object containing the network model;
val_loader -- DataLoader object for the validation set
epoch -- Actual training epoch
n_class -- Number of object classes
val_windows -- List containing the visdom windows used for validation
plots
"""
# Switch to evaluation mode
net.eval()
# Create the confusion matrix
cm = np.zeros([n_class, n_class])
for images, labels in val_loader:
if args.gpu > 0:
images = Variable(images.cuda(async=True), volatile=True)
else:
images = Variable(images)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
# Update the confusion matrix
cm = updateCM(cm, predicted.cpu(), labels)
stats = MulticlassStat(cm).get_stats_dict()
acc = stats["accuracy"]
prec = stats["precision_M"]
Fscore = stats["Fscore_M"]
vis.line(
X=torch.ones((1, 1)).cpu() * (epoch + 1),
Y=torch.ones((1, 1)).cpu() * acc,
win=val_windows[0],
update='append')
vis.line(
X=torch.ones((1, 1)).cpu() * (epoch + 1),
Y=torch.ones((1, 1)).cpu() * prec,
win=val_windows[1],
update='append')
vis.line(
X=torch.ones((1, 1)).cpu() * (epoch + 1),
Y=torch.ones((1, 1)).cpu() * Fscore,
win=val_windows[2],
update='append')
print('Validation: accuracy of the model: %.2f %%'
% (acc * 100))
def test(net, test_loader, json_data):
""" Compute the network outputs and extract the performance measues
Parameters:
net -- Module object containing the network model;
test_loader -- DataLoader object for the testing set;
json_data -- Dictionary used to store the training metadata;
"""
# Switch to evaluation mode
net.eval()
test_time = 0.0
scores = torch.Tensor()
all_labels = torch.LongTensor()
# Create the confusion matrix
n_class = len(json_data["classes"])
cm = np.zeros([n_class, n_class])
for images, labels in test_loader:
start_batch = time()
if args.gpu > 0:
images = Variable(images.cuda(async=True), volatile=True)
else:
images = Variable(images)
outputs = net(images)
scores = torch.cat((scores, outputs.cpu().data))
all_labels = torch.cat((all_labels, labels))
_, predicted = torch.max(outputs.data, 1)
test_time += time() - start_batch
# Update the confusion matrix
cm = updateCM(cm, predicted.cpu(), labels)
# Save the scores on the testing set
f_name = os.path.join(args.save_dir, json_data["impl"] + "_" +
json_data["model"] + "_" +
json_data["dataset"] + "_" +
json_data["UUID"] + ".scores")
torch.save(scores, f_name)
# Compute the testing statistics and print them
mc_stats = MulticlassStat(cm)
print('******Test Results******')
print('Time: ', round(test_time, 3), "seconds")
mc_stats.print_stats()
# Update the json data
json_data["test_stats"] = mc_stats.get_stats_dict()
json_data["test_stats"]["confusion_matrix"] = \
pd.DataFrame(cm).to_dict(orient='split')
json_data["test_stats"]["test_time"] = round(test_time, 6)
# Plot the ROCs
mc_stats.plot_multi_roc()
mc_stats.plot_scores_roc(all_labels.numpy(), scores.numpy())
def save_state(net, optimizer, json_data, epoch, dir):
""" Saves the training status.
Parameters:
net -- Module object containing the network model;
optimizer -- Optimizer object obtained from torch.optim
json_data -- Dictionary used to store the training metadata;
epoch -- Actual training epoch
dir -- Directory used to save the data
"""
json_data["train_params"]["last_epoch"] = epoch
epoch_str = '_epoch_' + str(epoch)
if epoch == 1:
# Generate the UUID (8 characters long)
id = shortuuid.uuid()[:8]
json_data["UUID"] = id
else:
id = json_data["UUID"]
f_name = os.path.join(dir, json_data["impl"] + "_" +
json_data["model"] + "_" +
json_data["dataset"] + "_" +
id + epoch_str)
# Save training state
state = dict()
state["params"] = net.state_dict()
state["optim"] = optimizer.state_dict()
torch.save(state, f_name + '.state')
# Update train parameters from optimizer state
for t_param in state["optim"]["param_groups"][0]:
if t_param is not "params":
print(state["optim"])
json_data["train_params"][t_param] = \
state["optim"]["param_groups"][0][t_param]
# Save experiment metadata
json_data['state'] = os.path.split(f_name + '.state')[1]
with open(f_name + ".json", 'wb') as f:
json.dump(json_data, f)
def save_params(net, json_data, dir):
""" Saves the parameteres of the trained network.
Parameters:
net -- Module object containing the network model;
json_data -- Dictionary used to store the training metadata;
dir -- Directory used to save the data
"""
if "last_epoch" in json_data["train_params"]:
del json_data["train_params"]["last_epoch"]
if "state" in json_data:
del json_data["state"]
f_name = os.path.join(dir, json_data["impl"] + "_" +
json_data["model"] + "_" +
json_data["dataset"] + "_" +
json_data["UUID"])
# Save training state
torch.save(net.state_dict(), f_name + '.state')
# Save experiment metadata
json_data['params'] = os.path.split(f_name + '.params')[1]
with open(f_name + ".json", 'wb') as f:
json.dump(json_data, f)
if __name__ == '__main__':
vis = visdom.Visdom()
parser = argparse.ArgumentParser(description='Train and test a network ' +
'on the MINC datasets')
# Data Options
data_args = parser.add_argument_group('Data arguments')
data_args.add_argument('--dataset', metavar='NAME', default='minc2500',
choices=['minc2500', 'minc'],
help='name of the dataset to be used' +
' (default: minc2500)')
data_args.add_argument('--data-root', metavar='DIR', help='path to ' +
'dataset (default: ./$(DATASET)_root)')
data_args.add_argument('--save-dir', metavar='DIR', default='./results',
help='path to trained models (default: results/)')
data_args.add_argument('--chk-dir', metavar='DIR', default='./checkpoints',
help='path to checkpoints (default: checkpoints/)')
data_args.add_argument('--workers', metavar='NUM', type=int,
default=8, help='number of worker threads for' +
' the data loader')
# Model Options
model_args = parser.add_argument_group('Model arguments')
model_args.add_argument('-m', '--model', metavar='NAME',
default='tv-densenet121', type=str,
help='name of the netwrok model to be used')
model_args.add_argument('--classes', metavar='LIST',
default='[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,' +
'16,17,18,19,20,21,22]',
help='indicies of the classes to be used for the' +
' classification')
# Training Options
train_args = parser.add_argument_group('Training arguments')
train_args.add_argument('--method', default='SGD', metavar='NAME',
help='training method to be used')
train_args.add_argument('--gpu', type=int, default=1, metavar='NUM',
help='number of GPUs to use')
train_args.add_argument('--epochs', default=20, type=int, metavar='NUM',
help='number of total epochs to run (default: 20)')
train_args.add_argument('-b', '--batch-size', default=64, type=int,
metavar='NUM',
help='mini-batch size (default: 64)')
train_args.add_argument('--momentum', type=float, default=0.9,
metavar='NUM', help='Momentum (default: 0.9)')
train_args.add_argument('--w-decay', type=float, default=1e-4,
metavar='NUM', help='weigth decay (default: 1e-4)')
train_args.add_argument('--seed', type=int, metavar='NUM',
default=179424691,
help='random seed (default: 179424691)')
# Learning Rate Scheduler Options
lrate_args = parser.add_argument_group('Learning rate arguments')
lrate_args.add_argument('--l-rate', type=float, default=0.1,
metavar='NUM', help='initial learning Rate' +
' (default: 0.1)')
lrate_args.add_argument('--lrate-sched', default="multistep",
metavar="NAME", help="name of the learning " +
"rate scheduler (default: constant)",
choices=['step', 'multistep', 'exponential',
'constant'])
lrate_args.add_argument('--milestones', default='[5,10]', metavar='LIST',
help='epoch indicies for learning rate reduction' +
' (multistep, default: [5,10])')
lrate_args.add_argument('--gamma', type=float, default=0.1,
metavar='NUM', help='multiplicative factor of ' +
'learning rate decay (default: 0.1)')
lrate_args.add_argument('--step-size', type=int, default=5,
metavar='NUM', help='pediod of learning rate ' +
'decay (step, default: 5)')
# Other Options
parser.add_argument('--resume', default='', type=str, metavar='JSON_FILE',
help='resume the training from the specified JSON ' +
'file (default: none)')
parser.add_argument('--test', default='', type=str, metavar='JSON_FILE',
help='test the network from the specified JSON file')
parser.add_argument('--net-list', action=PrintNetList,
help='Print the list of the available network ' +
'architectures')
args = parser.parse_args()
if not args.net_list:
main() | main.py | from __future__ import print_function
import numpy as np
import pandas as pd
import argparse
import json
import os
import sys
import shortuuid
import platform
import ast
from time import strftime, time
import visdom
import torch
import torch.optim.lr_scheduler as lr_sched
from torch.autograd import Variable
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms
from model_parser import get_model, PrintNetList
from datasets.minc2500 import MINC2500
from datasets.minc import MINC
from cmstats import updateCM, MulticlassStat
def main():
# Model and data parameters
model = args.model
dataset = args.dataset
batch_size = args.batch_size
classes = ast.literal_eval(args.classes)
gpu = args.gpu
seed = args.seed
# Training parameters
method = args.method
epochs = args.epochs
momentum = args.momentum
w_decay = args.w_decay
# Learning rate scheduler parameters
l_rate = args.l_rate
scheduler = args.lrate_sched
step_size = args.step_size
milestones = ast.literal_eval(args.milestones)
gamma = args.gamma
# Start training from scratch
if not args.resume and not args.test:
# Load the network model
net = get_model(model, len(classes))
if net is None:
print("Unknown model name:", model + ".",
"Use '--net-list' option",
"to check the available network models")
sys.exit(2)
if gpu > 0:
net.cuda()
# Initialize the random generator
torch.manual_seed(seed)
if gpu > 0:
torch.cuda.manual_seed_all(seed)
# Dictionary used to store the training results and metadata
json_data = {"platform": platform.platform(),
"date": strftime("%Y-%m-%d_%H:%M:%S"), "impl": "pytorch",
"dataset": dataset, "gpu": gpu,
"model": model, "epochs": epochs,
"classes": classes}
json_data["train_params"] = {"method": method,
"batch_size": batch_size,
"seed": seed,
"last_epoch": 0,
"train_time": 0.0}
epochs = range(epochs)
# Optimization method
if method == "SGD":
optimizer = torch.optim.SGD(net.parameters(),
lr=l_rate,
momentum=momentum,
weight_decay=w_decay)
# Learning rate scheduler
lrate_dict = dict()
lrate_dict["sched"] = args.lrate_sched
if args.lrate_sched is not "constant":
if args.lrate_sched == "step":
lrate_dict["step_size"] = step_size
lrate_dict["gamma"] = gamma
scheduler = lr_sched.StepLR(optimizer, step_size, gamma)
elif args.lrate_sched == "multistep":
lrate_dict["milestones"] = milestones
lrate_dict["gamma"] = gamma
scheduler = lr_sched.MultiStepLR(optimizer, milestones, gamma)
elif args.lrate_sched == "exponential":
lrate_dict["gamma"] = gamma
scheduler = lr_sched.ExponentialLR(optimizer, gamma)
json_data["train_params"]["l_rate"] = lrate_dict
# Extract training parameters from the optimizer state
for t_param in optimizer.state_dict()["param_groups"][0]:
if t_param is not "params":
json_data["train_params"][t_param] = \
optimizer.state_dict()["param_groups"][0][t_param]
num_par = 0
for parameter in net.parameters():
num_par += parameter.numel()
json_data["num_params"] = num_par
# Resume from a training checkpoint or test the network
else:
with open(args.resume or args.test, 'rb') as f:
json_data = json.load(f)
train_info = json_data["train_params"]
dataset = json_data["dataset"]
batch_size = train_info["batch_size"]
torch.manual_seed(train_info["seed"])
if json_data["gpu"] > 0:
torch.cuda.manual_seed_all(train_info["seed"])
# Load the network model
classes = json_data["classes"]
net = get_model(json_data["model"], len(classes))
if (json_data["gpu"] > 0):
net.cuda()
if args.resume:
# Resume training
# Load the saved state
# (in the same directory as the json file)
last_epoch = train_info["last_epoch"]
epochs = range(last_epoch, json_data["epochs"])
chk_dir = os.path.split(args.resume)[0]
state = torch.load(os.path.join(chk_dir, json_data["state"]))
# Load the network parameters
net.load_state_dict(state["params"])
# Load the optimizer state
method = train_info["method"]
if method == "SGD":
optimizer = torch.optim.SGD(net.parameters(),
lr=train_info["initial_lr"])
optimizer.load_state_dict(state["optim"])
# Load the learning rate scheduler info
if train_info["l_rate"]["sched"] == "step":
step_size = train_info["l_rate"]["step_size"]
gamma = train_info["l_rate"]["gamma"]
scheduler = lr_sched.StepLR(optimizer, step_size, gamma,
last_epoch)
elif train_info["l_rate"]["sched"] == "multistep":
milestones = train_info["l_rate"]["milestones"]
gamma = train_info["l_rate"]["gamma"]
scheduler = lr_sched.MultiStepLR(optimizer, milestones, gamma,
last_epoch)
elif args.lrate_sched == "exponential":
gamma = train_info["l_rate"]["gamma"]
scheduler = lr_sched.ExponentialLR(optimizer, gamma,
last_epoch)
else:
# Test the network
# Load the saved parameters
# (in the same directory as the json file)
res_dir = os.path.split(args.test)[0]
if "params" in json_data:
net.load_state_dict(torch.load(os.path.join(res_dir,
json_data["params"]
)))
elif "state" in json_data:
# Test a checkpointed network
state = torch.load(os.path.join(res_dir, json_data["state"]))
net.load_state_dict(state["params"])
else:
sys.exit("No network parameters found in JSON file")
if args.data_root:
data_root = args.data_root
else:
# Default directory
data_root = os.path.join(os.curdir, dataset + "_root")
# Prepare data structures
if not args.test:
# Training phase
train_trans = transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
val_trans = transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor()
])
if dataset == "minc2500":
train_set = MINC2500(root_dir=data_root, set_type='train',
split=1, transform=train_trans)
val_set = MINC2500(root_dir=data_root, set_type='validate',
split=1, transform=val_trans)
else:
train_set = MINC(root_dir=data_root, set_type='train',
classes=classes, transform=train_trans)
val_set = MINC(root_dir=data_root, set_type='validate',
classes=classes, transform=val_trans)
train_loader = DataLoader(dataset=train_set,
batch_size=batch_size,
shuffle=True, num_workers=args.workers,
pin_memory=(args.gpu > 0))
val_loader = DataLoader(dataset=val_set,
batch_size=batch_size,
shuffle=False, num_workers=args.workers,
pin_memory=(args.gpu > 0))
# Loss function
if gpu > 0:
criterion = nn.CrossEntropyLoss().cuda()
else:
criterion = nn.CrossEntropyLoss()
# Visdom windows to draw the training graphs
loss_window = vis.line(X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1)).cpu(),
opts=dict(xlabel='Iteration (batch size = ' +
str(batch_size) + ')',
ylabel='Loss',
title='Training Loss',
legend=['Loss']))
acc_window = vis.line(X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1)).cpu(),
opts=dict(xlabel='Epoch',
ylabel='Accuracy',
title='Validation Accuracy',
legend=['Accuracy']))
prec_window = vis.line(X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1)).cpu(),
opts=dict(xlabel='Epoch',
ylabel='Precision',
title='Validation Precision (Macro)',
legend=['Precision']))
recall_window = vis.line(X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1)).cpu(),
opts=dict(xlabel='Epoch',
ylabel='Recall',
title='Validation Recall (Macro)',
legend=['Recall']))
val_windows = [acc_window, prec_window, recall_window]
# Testing phase
test_trans = transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor()
])
if dataset == "minc2500":
test_set = MINC2500(root_dir=data_root, set_type='test', split=1,
transform=test_trans)
else:
test_set = MINC(root_dir=data_root, set_type='test',
classes=classes, transform=test_trans)
test_loader = DataLoader(dataset=test_set, batch_size=batch_size,
shuffle=False, num_workers=args.workers,
pin_memory=(args.gpu > 0))
if not args.test:
# Training loop
print("Training network on the", len(train_set), "training examples")
for epoch in epochs:
start_epoch = time()
# Train the Model
scheduler.step()
train(net, train_loader, criterion, optimizer, epoch, epochs,
loss_window)
# Check accuracy on validation set
print("Validating network on the", len(val_set),
"validation images...")
validate(net, val_loader, epoch, len(classes), val_windows)
json_data["train_params"]["train_time"] += round(time() -
start_epoch, 3)
# Save the checkpoint state
save_state(net, optimizer, json_data, epoch + 1, args.chk_dir)
# Test the model on the testing set
print("Testing network on the", len(test_set), "testing images...")
test(net, test_loader, args, json_data)
# Save the trained network parameters and the testing results
save_params(net, json_data, args.save_dir)
def train(net, train_loader, criterion, optimizer, epoch, epochs,
loss_window):
""" Train the network on the whole training set
Parameters:
net -- Module object containing the network model;
train_loader -- DataLoader object for the dataset in use;
criterion -- Method used to compute the loss;
optimizer -- Method used to update the network paramets;
epoch -- actual training epoch;
epochs -- total training epochs;
loss_window -- visdom window used to plot the loss;
"""
print_interval = 50
batch_time = 0.0
# Switch to train mode
net.train()
for i, (images, labels) in enumerate(train_loader):
start_batch = time()
if args.gpu > 0:
images = Variable(images.cuda(async=True))
labels = Variable(labels.cuda(async=True))
else:
images = Variable(images)
labels = Variable(labels)
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = net(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
batch_time += time() - start_batch
if i % print_interval == 0:
vis.line(
X=torch.ones((1, 1)).cpu() * ((epoch) * len(train_loader) + i),
Y=torch.Tensor([loss.data[0]]).unsqueeze(0).cpu(),
win=loss_window,
update='append')
print('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f Time: %.3f s/batch'
% (epoch + 1, epochs[-1] + 1, i, len(train_loader),
loss.data[0], batch_time / (i + 1)))
def validate(net, val_loader, epoch, n_class, val_windows):
""" Compute the network accuracy on the validation set
Parameters:
net -- Module object containing the network model;
val_loader -- DataLoader object for the validation set
epoch -- Actual training epoch
n_class -- Number of object classes
val_windows -- List containing the visdom windows used for validation
plots
"""
# Switch to evaluation mode
net.eval()
# Create the confusion matrix
cm = np.zeros([n_class, n_class])
for images, labels in val_loader:
if args.gpu > 0:
images = Variable(images.cuda(async=True), volatile=True)
else:
images = Variable(images)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
# Update the confusion matrix
cm = updateCM(cm, predicted.cpu(), labels)
stats = MulticlassStat(cm).get_stats_dict()
acc = stats["accuracy"]
prec = stats["precision_M"]
Fscore = stats["Fscore_M"]
vis.line(
X=torch.ones((1, 1)).cpu() * (epoch + 1),
Y=torch.ones((1, 1)).cpu() * acc,
win=val_windows[0],
update='append')
vis.line(
X=torch.ones((1, 1)).cpu() * (epoch + 1),
Y=torch.ones((1, 1)).cpu() * prec,
win=val_windows[1],
update='append')
vis.line(
X=torch.ones((1, 1)).cpu() * (epoch + 1),
Y=torch.ones((1, 1)).cpu() * Fscore,
win=val_windows[2],
update='append')
print('Validation: accuracy of the model: %.2f %%'
% (acc * 100))
def test(net, test_loader, json_data):
""" Compute the network outputs and extract the performance measues
Parameters:
net -- Module object containing the network model;
test_loader -- DataLoader object for the testing set;
json_data -- Dictionary used to store the training metadata;
"""
# Switch to evaluation mode
net.eval()
test_time = 0.0
scores = torch.Tensor()
all_labels = torch.LongTensor()
# Create the confusion matrix
n_class = len(json_data["classes"])
cm = np.zeros([n_class, n_class])
for images, labels in test_loader:
start_batch = time()
if args.gpu > 0:
images = Variable(images.cuda(async=True), volatile=True)
else:
images = Variable(images)
outputs = net(images)
scores = torch.cat((scores, outputs.cpu().data))
all_labels = torch.cat((all_labels, labels))
_, predicted = torch.max(outputs.data, 1)
test_time += time() - start_batch
# Update the confusion matrix
cm = updateCM(cm, predicted.cpu(), labels)
# Save the scores on the testing set
f_name = os.path.join(args.save_dir, json_data["impl"] + "_" +
json_data["model"] + "_" +
json_data["dataset"] + "_" +
json_data["UUID"] + ".scores")
torch.save(scores, f_name)
# Compute the testing statistics and print them
mc_stats = MulticlassStat(cm)
print('******Test Results******')
print('Time: ', round(test_time, 3), "seconds")
mc_stats.print_stats()
# Update the json data
json_data["test_stats"] = mc_stats.get_stats_dict()
json_data["test_stats"]["confusion_matrix"] = \
pd.DataFrame(cm).to_dict(orient='split')
json_data["test_stats"]["test_time"] = round(test_time, 6)
# Plot the ROCs
mc_stats.plot_multi_roc()
mc_stats.plot_scores_roc(all_labels.numpy(), scores.numpy())
def save_state(net, optimizer, json_data, epoch, dir):
""" Saves the training status.
Parameters:
net -- Module object containing the network model;
optimizer -- Optimizer object obtained from torch.optim
json_data -- Dictionary used to store the training metadata;
epoch -- Actual training epoch
dir -- Directory used to save the data
"""
json_data["train_params"]["last_epoch"] = epoch
epoch_str = '_epoch_' + str(epoch)
if epoch == 1:
# Generate the UUID (8 characters long)
id = shortuuid.uuid()[:8]
json_data["UUID"] = id
else:
id = json_data["UUID"]
f_name = os.path.join(dir, json_data["impl"] + "_" +
json_data["model"] + "_" +
json_data["dataset"] + "_" +
id + epoch_str)
# Save training state
state = dict()
state["params"] = net.state_dict()
state["optim"] = optimizer.state_dict()
torch.save(state, f_name + '.state')
# Update train parameters from optimizer state
for t_param in state["optim"]["param_groups"][0]:
if t_param is not "params":
print(state["optim"])
json_data["train_params"][t_param] = \
state["optim"]["param_groups"][0][t_param]
# Save experiment metadata
json_data['state'] = os.path.split(f_name + '.state')[1]
with open(f_name + ".json", 'wb') as f:
json.dump(json_data, f)
def save_params(net, json_data, dir):
""" Saves the parameteres of the trained network.
Parameters:
net -- Module object containing the network model;
json_data -- Dictionary used to store the training metadata;
dir -- Directory used to save the data
"""
if "last_epoch" in json_data["train_params"]:
del json_data["train_params"]["last_epoch"]
if "state" in json_data:
del json_data["state"]
f_name = os.path.join(dir, json_data["impl"] + "_" +
json_data["model"] + "_" +
json_data["dataset"] + "_" +
json_data["UUID"])
# Save training state
torch.save(net.state_dict(), f_name + '.state')
# Save experiment metadata
json_data['params'] = os.path.split(f_name + '.params')[1]
with open(f_name + ".json", 'wb') as f:
json.dump(json_data, f)
if __name__ == '__main__':
vis = visdom.Visdom()
parser = argparse.ArgumentParser(description='Train and test a network ' +
'on the MINC datasets')
# Data Options
data_args = parser.add_argument_group('Data arguments')
data_args.add_argument('--dataset', metavar='NAME', default='minc2500',
choices=['minc2500', 'minc'],
help='name of the dataset to be used' +
' (default: minc2500)')
data_args.add_argument('--data-root', metavar='DIR', help='path to ' +
'dataset (default: ./$(DATASET)_root)')
data_args.add_argument('--save-dir', metavar='DIR', default='./results',
help='path to trained models (default: results/)')
data_args.add_argument('--chk-dir', metavar='DIR', default='./checkpoints',
help='path to checkpoints (default: checkpoints/)')
data_args.add_argument('--workers', metavar='NUM', type=int,
default=8, help='number of worker threads for' +
' the data loader')
# Model Options
model_args = parser.add_argument_group('Model arguments')
model_args.add_argument('-m', '--model', metavar='NAME',
default='tv-densenet121', type=str,
help='name of the netwrok model to be used')
model_args.add_argument('--classes', metavar='LIST',
default='[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,' +
'16,17,18,19,20,21,22]',
help='indicies of the classes to be used for the' +
' classification')
# Training Options
train_args = parser.add_argument_group('Training arguments')
train_args.add_argument('--method', default='SGD', metavar='NAME',
help='training method to be used')
train_args.add_argument('--gpu', type=int, default=1, metavar='NUM',
help='number of GPUs to use')
train_args.add_argument('--epochs', default=20, type=int, metavar='NUM',
help='number of total epochs to run (default: 20)')
train_args.add_argument('-b', '--batch-size', default=64, type=int,
metavar='NUM',
help='mini-batch size (default: 64)')
train_args.add_argument('--momentum', type=float, default=0.9,
metavar='NUM', help='Momentum (default: 0.9)')
train_args.add_argument('--w-decay', type=float, default=1e-4,
metavar='NUM', help='weigth decay (default: 1e-4)')
train_args.add_argument('--seed', type=int, metavar='NUM',
default=179424691,
help='random seed (default: 179424691)')
# Learning Rate Scheduler Options
lrate_args = parser.add_argument_group('Learning rate arguments')
lrate_args.add_argument('--l-rate', type=float, default=0.1,
metavar='NUM', help='initial learning Rate' +
' (default: 0.1)')
lrate_args.add_argument('--lrate-sched', default="multistep",
metavar="NAME", help="name of the learning " +
"rate scheduler (default: constant)",
choices=['step', 'multistep', 'exponential',
'constant'])
lrate_args.add_argument('--milestones', default='[5,10]', metavar='LIST',
help='epoch indicies for learning rate reduction' +
' (multistep, default: [5,10])')
lrate_args.add_argument('--gamma', type=float, default=0.1,
metavar='NUM', help='multiplicative factor of ' +
'learning rate decay (default: 0.1)')
lrate_args.add_argument('--step-size', type=int, default=5,
metavar='NUM', help='pediod of learning rate ' +
'decay (step, default: 5)')
# Other Options
parser.add_argument('--resume', default='', type=str, metavar='JSON_FILE',
help='resume the training from the specified JSON ' +
'file (default: none)')
parser.add_argument('--test', default='', type=str, metavar='JSON_FILE',
help='test the network from the specified JSON file')
parser.add_argument('--net-list', action=PrintNetList,
help='Print the list of the available network ' +
'architectures')
args = parser.parse_args()
if not args.net_list:
main() | 0.678327 | 0.213767 |
import string, sys, glob, os
import collections
HERE = os.path.dirname(__file__)
if HERE == '':
HERE = '.'
print '############################ %s' % HERE
NT_esn = collections.namedtuple( 'errorShortName', ['name', 'long_name', 'description' ] )
class errorShortNames(object):
def __init__(self, file='config/testStandardNames.txt'):
assert os.path.isfile(file), 'File %s not found' % file
ii = map( string.strip, open(file).readlines() )
ll = [[ii[0],]]
for l in ii[1:]:
if len(l) > 0 and l[0] == '=':
ll.append( [l,] )
else:
ll[-1].append( l )
self.ll = []
for l in ll:
if len(l) < 2:
print l
else:
self.ll.append( NT_esn( string.strip(l[0],'='), l[1][1:], string.join(l[2:]) ) )
def cmin(x,y):
if x < 0:
return y
else:
return min(x,y)
class LogSummariser(object):
def __init__(self):
pass
def summarise(self):
args = sys.argv[1:-1]
idir = sys.argv[-1]
ndisp = 2
dohtml = False
while len(args) > 0:
x = args.pop(0)
if x == '-n':
ndisp = int( args.pop(0) )
elif x == '-html':
dohtml = True
assert os.path.isdir( idir ), 'Directory %s not found' % idir
fb = glob.glob( '%s/qcBatchLog*' % idir )
fb.sort()
fb = fb[-1]
ii = open( fb )
jj = []
for k in range(10):
jj.append( string.strip(ii.readline()) )
ii.close()
i0 = jj[0].index( ' INFO ' )
tstart = jj[0][:i0]
m1 = jj[0][i0+6:]
m2 = jj[1][i0+6:]
self.info = (tstart, m1, m2)
##2014-09-06 18:42:24,109 INFO Starting batch -- number of file: 338
##2014-09-06 18:42:24,109 INFO Source: /data/work/cordex/early/AFR-44i/SMHI/ECMWF-ERAINT/evaluation//.....
ee = {}
fl = glob.glob( '%s/*__qclog_*.txt' % idir )
self.write( 'Summarising error reports from %s log file' % len(fl) )
nne = 0
nerr = 0
ff = {}
for f in fl:
nef = 0
elist = []
for l in open(f).readlines():
fn = string.split(f,'/')[-1]
if (l[:3] in ('C4.', 'C5.') and l.find('FAILED') > -1) or l.find('CDMSError:') > -1:
nef += 1
nerr += 1
bits = map( string.strip, string.split(l, ':' ) )
if 'FAILED' in bits:
kb1 = bits.index('FAILED') + 1
else:
kb1 = 1
if len(bits) > kb1:
code = bits[0]
if kb1 == 3:
msg0 = string.join(bits[kb1:], ':' )
msg = string.strip( bits[1] + ' ' + msg0 )
se = bits[1][1:-1]
else:
msg = string.strip( string.join(bits[kb1:], ':' ) )
msg0 = msg
se = None
if code not in ee.keys():
ee[code] = [0,{msg:[0,[]]},se]
elif msg not in ee[code][1].keys():
ee[code][1][msg] = [0,[]]
ee[code][0] += 1
ee[code][1][msg][0] += 1
if ee[code][1][msg][0]:
ee[code][1][msg][1].append(fn)
elist.append( (code,msg,se) )
else:
self.write( str(bits) )
if nef == 0:
nne += 1
else:
ff[fn] = elist
keys = ee.keys()
keys.sort()
for k in keys:
ks = ee[k][1].keys()
if len(ks) == 1:
self.write( '%s: %s %s' % (k,ee[k][0],ks[0]) )
# Show first set of files that failed [To show them all change to: range(len(ee[k][1][ks[0]][1])) ]
for i in range(cmin(ndisp,ee[k][0])):
self.write( ' %s' % ee[k][1][ks[0]][1][i] )
else:
self.write( '%s: %s' % (k,ee[k][0]) )
ks.sort()
for k2 in ks:
self.write( ' --- %s: %s' % (k2,ee[k][1][k2][0]) )
# Show first set of files that failed [To show them all change to: range(len(ee[k][1][k2][1]))
for i in range(cmin(ndisp,ee[k][1][k2][0])):
self.write( ' %s' % ee[k][1][k2][1][i] )
self.write( 'Number of files with no errors: %s' % nne )
esum = (len(fl), nerr, nne )
self.testnames()
if dohtml:
self.htmlout( ee, ff, esum )
self.htmlEsn( )
def testnames(self):
tnfile = '%s/config/testStandardNames.txt' % HERE
ii = open( tnfile ).readlines()
self.tests = []
thistest = None
for l in ii:
if l[0] == '=':
name = string.strip(l)[1:-1]
if thistest != None:
thistest.append(defn)
self.tests.append( thistest )
thistest = [name,]
defn = ''
elif l[0] == '*':
thistest.append( string.strip(l)[1:] )
elif string.strip(l) != '':
defn += l
thistest.append(defn)
self.tests.append( thistest )
self.testdict = {}
for t in self.tests:
self.testdict[t[0]] = (t[1],t[2])
def write( self, s ):
print s
def htmlEsn( self ):
esn = errorShortNames()
cnt = '<h1>Error Short Names</h1>\n'
for l in esn.ll:
cnt += '''<a name="%s"><h2>%s</h2></a>
<p><i>%s</i><br/>
%s
</p>
''' % (l.name,l.name, l.long_name, l.description )
self.__htmlPageWrite( 'html/ref/errorShortNames.html', cnt )
def htmlout( self, ee, ff, esum ):
if not os.path.isdir( 'html' ):
os.mkdir( 'html' )
os.mkdir( 'html/ref' )
os.mkdir( 'html/files' )
os.mkdir( 'html/errors' )
about = """<p>Output from CEDA CC</p>
<p>This report contains a list of errors for each file, and a list of files associated with each error.</p>
"""
data = """<p>%s<br/>
%s<br/>
Start of checks: %s</p>
""" % (self.info[1], self.info[2], self.info[0] )
results = """<ul><li>Number of files tested: %s: <a href="files/findex.html">index by file</a></li>
<li>Number of errors: %s: <a href="errors/eindex.html">index by error</a></li>
<li>Number of error free files: %s</li></ul>
""" % esum
keys = ee.keys()
keys.sort()
list = []
for k in keys:
if ee[k][2] == None:
list.append( '<li>%s: %s</li>' % (k,ee[k][0]) )
else:
assert ee[k][2] in self.testdict.keys(), 'unrecognised test name: %s' % ee[k][2]
list.append( '<li>%s [%s:%s]: %s</li>' % (self.testdict[ee[k][2]][0],k,ee[k][2],ee[k][0]) )
res2 = '<ul>%s</ul>' % string.join(list, '\n' )
results += res2
maincontent = """<h1>The test</h1>
%s
<h1>The data</h1>
%s
<h1>Results</h1>
%s
""" % (about,data,results)
self.__htmlPageWrite( 'html/index.html', maincontent )
keys = ee.keys()
keys.sort()
eItemTmpl = '<li><a href="rep.%3.3i.html">%s [%s]</a>: %s</li>'
list = []
nn = 0
for k in keys:
ks = ee[k][1].keys()
ks.sort()
sect_esn = None
for k2 in ks:
nn += 1
this_esn = string.split(k2,']')[0][1:]
if this_esn != sect_esn:
sect_esn = this_esn
list.append( '<h2>%s: %s<a href="../ref/errorShortNames.html#%s">(definition)</a></h2>' % (k,this_esn, this_esn) )
list.append( eItemTmpl % (nn,k, ee[k][1][k2][0], k2 ) )
l2 = []
for ss in ee[k][1][k2][1]:
i0 = string.index( ss, '__qclog' )
fs = ss[:i0]
l2.append( '<li><a href="../files/rep.%s.html">%s</a></li>' % (fs,fs) )
ePage = """<h1>Error %s </h1> %s <ul>%s</ul> """ % (nn,k2,string.join( l2, '\n' ) )
efp = 'html/errors/rep.%3.3i.html' % nn
self.__htmlPageWrite( efp, ePage )
eIndexContent = """<h1>List of detected errors</h1>
<p>Code[number of files with error]: result <br/>
Click on the code to see a list of the files in which each error is detected.
</p>
<ul>%s</ul>
""" % (string.join(list, '\n' ) )
self.__htmlPageWrite( 'html/errors/eindex.html', eIndexContent )
keys = ff.keys()
keys.sort()
fItemTmpl = '<li><a href="%s">%s [%s]</a></li>'
list = []
for k in ff:
i0 = string.index( k, '__qclog' )
fs = k[:i0]
knc = fs + '.nc'
hfn = 'rep.%s.html' % fs
hfp = 'html/files/%s' % hfn
list.append( fItemTmpl % (hfn, knc, len(ff[k]) ) )
l2 = []
for f in ff[k]:
l2.append( '<li>%s: %s</li>' % f[:2] )
fPage = """<h1>Errors in %s.nc</h1>
<ul>%s</ul>
""" % (fs,string.join( l2, '\n' ) )
self.__htmlPageWrite( hfp, fPage )
list.sort()
fIndexContent = """<h1>List of files with errors</h1>
File name [number of errors]
<ul> %s </ul>
""" % string.join( list, '\n' )
self.__htmlPageWrite( 'html/files/findex.html', fIndexContent )
def __htmlPageWrite(self, pp, content):
ptmpl = """<html><body>%s</body></html>"""
oo = open( pp, 'w' )
oo.write( ptmpl % content )
oo.close()
def summariseLogs():
summariser = LogSummariser()
summariser.summarise()
if __name__ == '__main__':
summariseLogs() | ceda_cc/summary.py | import string, sys, glob, os
import collections
HERE = os.path.dirname(__file__)
if HERE == '':
HERE = '.'
print '############################ %s' % HERE
NT_esn = collections.namedtuple( 'errorShortName', ['name', 'long_name', 'description' ] )
class errorShortNames(object):
def __init__(self, file='config/testStandardNames.txt'):
assert os.path.isfile(file), 'File %s not found' % file
ii = map( string.strip, open(file).readlines() )
ll = [[ii[0],]]
for l in ii[1:]:
if len(l) > 0 and l[0] == '=':
ll.append( [l,] )
else:
ll[-1].append( l )
self.ll = []
for l in ll:
if len(l) < 2:
print l
else:
self.ll.append( NT_esn( string.strip(l[0],'='), l[1][1:], string.join(l[2:]) ) )
def cmin(x,y):
if x < 0:
return y
else:
return min(x,y)
class LogSummariser(object):
def __init__(self):
pass
def summarise(self):
args = sys.argv[1:-1]
idir = sys.argv[-1]
ndisp = 2
dohtml = False
while len(args) > 0:
x = args.pop(0)
if x == '-n':
ndisp = int( args.pop(0) )
elif x == '-html':
dohtml = True
assert os.path.isdir( idir ), 'Directory %s not found' % idir
fb = glob.glob( '%s/qcBatchLog*' % idir )
fb.sort()
fb = fb[-1]
ii = open( fb )
jj = []
for k in range(10):
jj.append( string.strip(ii.readline()) )
ii.close()
i0 = jj[0].index( ' INFO ' )
tstart = jj[0][:i0]
m1 = jj[0][i0+6:]
m2 = jj[1][i0+6:]
self.info = (tstart, m1, m2)
##2014-09-06 18:42:24,109 INFO Starting batch -- number of file: 338
##2014-09-06 18:42:24,109 INFO Source: /data/work/cordex/early/AFR-44i/SMHI/ECMWF-ERAINT/evaluation//.....
ee = {}
fl = glob.glob( '%s/*__qclog_*.txt' % idir )
self.write( 'Summarising error reports from %s log file' % len(fl) )
nne = 0
nerr = 0
ff = {}
for f in fl:
nef = 0
elist = []
for l in open(f).readlines():
fn = string.split(f,'/')[-1]
if (l[:3] in ('C4.', 'C5.') and l.find('FAILED') > -1) or l.find('CDMSError:') > -1:
nef += 1
nerr += 1
bits = map( string.strip, string.split(l, ':' ) )
if 'FAILED' in bits:
kb1 = bits.index('FAILED') + 1
else:
kb1 = 1
if len(bits) > kb1:
code = bits[0]
if kb1 == 3:
msg0 = string.join(bits[kb1:], ':' )
msg = string.strip( bits[1] + ' ' + msg0 )
se = bits[1][1:-1]
else:
msg = string.strip( string.join(bits[kb1:], ':' ) )
msg0 = msg
se = None
if code not in ee.keys():
ee[code] = [0,{msg:[0,[]]},se]
elif msg not in ee[code][1].keys():
ee[code][1][msg] = [0,[]]
ee[code][0] += 1
ee[code][1][msg][0] += 1
if ee[code][1][msg][0]:
ee[code][1][msg][1].append(fn)
elist.append( (code,msg,se) )
else:
self.write( str(bits) )
if nef == 0:
nne += 1
else:
ff[fn] = elist
keys = ee.keys()
keys.sort()
for k in keys:
ks = ee[k][1].keys()
if len(ks) == 1:
self.write( '%s: %s %s' % (k,ee[k][0],ks[0]) )
# Show first set of files that failed [To show them all change to: range(len(ee[k][1][ks[0]][1])) ]
for i in range(cmin(ndisp,ee[k][0])):
self.write( ' %s' % ee[k][1][ks[0]][1][i] )
else:
self.write( '%s: %s' % (k,ee[k][0]) )
ks.sort()
for k2 in ks:
self.write( ' --- %s: %s' % (k2,ee[k][1][k2][0]) )
# Show first set of files that failed [To show them all change to: range(len(ee[k][1][k2][1]))
for i in range(cmin(ndisp,ee[k][1][k2][0])):
self.write( ' %s' % ee[k][1][k2][1][i] )
self.write( 'Number of files with no errors: %s' % nne )
esum = (len(fl), nerr, nne )
self.testnames()
if dohtml:
self.htmlout( ee, ff, esum )
self.htmlEsn( )
def testnames(self):
tnfile = '%s/config/testStandardNames.txt' % HERE
ii = open( tnfile ).readlines()
self.tests = []
thistest = None
for l in ii:
if l[0] == '=':
name = string.strip(l)[1:-1]
if thistest != None:
thistest.append(defn)
self.tests.append( thistest )
thistest = [name,]
defn = ''
elif l[0] == '*':
thistest.append( string.strip(l)[1:] )
elif string.strip(l) != '':
defn += l
thistest.append(defn)
self.tests.append( thistest )
self.testdict = {}
for t in self.tests:
self.testdict[t[0]] = (t[1],t[2])
def write( self, s ):
print s
def htmlEsn( self ):
esn = errorShortNames()
cnt = '<h1>Error Short Names</h1>\n'
for l in esn.ll:
cnt += '''<a name="%s"><h2>%s</h2></a>
<p><i>%s</i><br/>
%s
</p>
''' % (l.name,l.name, l.long_name, l.description )
self.__htmlPageWrite( 'html/ref/errorShortNames.html', cnt )
def htmlout( self, ee, ff, esum ):
if not os.path.isdir( 'html' ):
os.mkdir( 'html' )
os.mkdir( 'html/ref' )
os.mkdir( 'html/files' )
os.mkdir( 'html/errors' )
about = """<p>Output from CEDA CC</p>
<p>This report contains a list of errors for each file, and a list of files associated with each error.</p>
"""
data = """<p>%s<br/>
%s<br/>
Start of checks: %s</p>
""" % (self.info[1], self.info[2], self.info[0] )
results = """<ul><li>Number of files tested: %s: <a href="files/findex.html">index by file</a></li>
<li>Number of errors: %s: <a href="errors/eindex.html">index by error</a></li>
<li>Number of error free files: %s</li></ul>
""" % esum
keys = ee.keys()
keys.sort()
list = []
for k in keys:
if ee[k][2] == None:
list.append( '<li>%s: %s</li>' % (k,ee[k][0]) )
else:
assert ee[k][2] in self.testdict.keys(), 'unrecognised test name: %s' % ee[k][2]
list.append( '<li>%s [%s:%s]: %s</li>' % (self.testdict[ee[k][2]][0],k,ee[k][2],ee[k][0]) )
res2 = '<ul>%s</ul>' % string.join(list, '\n' )
results += res2
maincontent = """<h1>The test</h1>
%s
<h1>The data</h1>
%s
<h1>Results</h1>
%s
""" % (about,data,results)
self.__htmlPageWrite( 'html/index.html', maincontent )
keys = ee.keys()
keys.sort()
eItemTmpl = '<li><a href="rep.%3.3i.html">%s [%s]</a>: %s</li>'
list = []
nn = 0
for k in keys:
ks = ee[k][1].keys()
ks.sort()
sect_esn = None
for k2 in ks:
nn += 1
this_esn = string.split(k2,']')[0][1:]
if this_esn != sect_esn:
sect_esn = this_esn
list.append( '<h2>%s: %s<a href="../ref/errorShortNames.html#%s">(definition)</a></h2>' % (k,this_esn, this_esn) )
list.append( eItemTmpl % (nn,k, ee[k][1][k2][0], k2 ) )
l2 = []
for ss in ee[k][1][k2][1]:
i0 = string.index( ss, '__qclog' )
fs = ss[:i0]
l2.append( '<li><a href="../files/rep.%s.html">%s</a></li>' % (fs,fs) )
ePage = """<h1>Error %s </h1> %s <ul>%s</ul> """ % (nn,k2,string.join( l2, '\n' ) )
efp = 'html/errors/rep.%3.3i.html' % nn
self.__htmlPageWrite( efp, ePage )
eIndexContent = """<h1>List of detected errors</h1>
<p>Code[number of files with error]: result <br/>
Click on the code to see a list of the files in which each error is detected.
</p>
<ul>%s</ul>
""" % (string.join(list, '\n' ) )
self.__htmlPageWrite( 'html/errors/eindex.html', eIndexContent )
keys = ff.keys()
keys.sort()
fItemTmpl = '<li><a href="%s">%s [%s]</a></li>'
list = []
for k in ff:
i0 = string.index( k, '__qclog' )
fs = k[:i0]
knc = fs + '.nc'
hfn = 'rep.%s.html' % fs
hfp = 'html/files/%s' % hfn
list.append( fItemTmpl % (hfn, knc, len(ff[k]) ) )
l2 = []
for f in ff[k]:
l2.append( '<li>%s: %s</li>' % f[:2] )
fPage = """<h1>Errors in %s.nc</h1>
<ul>%s</ul>
""" % (fs,string.join( l2, '\n' ) )
self.__htmlPageWrite( hfp, fPage )
list.sort()
fIndexContent = """<h1>List of files with errors</h1>
File name [number of errors]
<ul> %s </ul>
""" % string.join( list, '\n' )
self.__htmlPageWrite( 'html/files/findex.html', fIndexContent )
def __htmlPageWrite(self, pp, content):
ptmpl = """<html><body>%s</body></html>"""
oo = open( pp, 'w' )
oo.write( ptmpl % content )
oo.close()
def summariseLogs():
summariser = LogSummariser()
summariser.summarise()
if __name__ == '__main__':
summariseLogs() | 0.052936 | 0.190385 |
"""utility script to parse given filenames or string
"""
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import cssutils
import logging
import optparse
import sys
def main(args=None):
"""
Parses given filename(s) or string or URL (using optional encoding) and
prints the parsed style sheet to stdout.
Redirect stdout to save CSS. Redirect stderr to save parser log infos.
"""
usage = """usage: %prog [options] filename1.css [filename2.css ...]
[>filename_combined.css] [2>parserinfo.log] """
p = optparse.OptionParser(usage=usage)
p.add_option('-s', '--string', action='store_true', dest='string',
help='parse given string')
p.add_option('-u', '--url', action='store', dest='url',
help='parse given url')
p.add_option('-e', '--encoding', action='store', dest='encoding',
help='encoding of the file or override encoding found')
p.add_option('-m', '--minify', action='store_true', dest='minify',
help='minify parsed CSS', default=False)
p.add_option('-d', '--debug', action='store_true', dest='debug',
help='activate debugging output')
(options, params) = p.parse_args(args)
if not params and not options.url:
p.error("no filename given")
if options.debug:
p = cssutils.CSSParser(loglevel=logging.DEBUG)
else:
p = cssutils.CSSParser()
if options.minify:
cssutils.ser.prefs.useMinified()
if options.string:
sheet = p.parseString(''.join(params), encoding=options.encoding)
print(sheet.cssText)
elif options.url:
sheet = p.parseUrl(options.url, encoding=options.encoding)
print(sheet.cssText)
else:
for filename in params:
sys.stderr.write('=== CSS FILE: "%s" ===\n' % filename)
sheet = p.parseFile(filename, encoding=options.encoding)
print(sheet.cssText)
print()
sys.stderr.write('\n')
if __name__ == "__main__":
sys.exit(main()) | venv/lib/python3.6/site-packages/cssutils/scripts/cssparse.py | """utility script to parse given filenames or string
"""
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import cssutils
import logging
import optparse
import sys
def main(args=None):
"""
Parses given filename(s) or string or URL (using optional encoding) and
prints the parsed style sheet to stdout.
Redirect stdout to save CSS. Redirect stderr to save parser log infos.
"""
usage = """usage: %prog [options] filename1.css [filename2.css ...]
[>filename_combined.css] [2>parserinfo.log] """
p = optparse.OptionParser(usage=usage)
p.add_option('-s', '--string', action='store_true', dest='string',
help='parse given string')
p.add_option('-u', '--url', action='store', dest='url',
help='parse given url')
p.add_option('-e', '--encoding', action='store', dest='encoding',
help='encoding of the file or override encoding found')
p.add_option('-m', '--minify', action='store_true', dest='minify',
help='minify parsed CSS', default=False)
p.add_option('-d', '--debug', action='store_true', dest='debug',
help='activate debugging output')
(options, params) = p.parse_args(args)
if not params and not options.url:
p.error("no filename given")
if options.debug:
p = cssutils.CSSParser(loglevel=logging.DEBUG)
else:
p = cssutils.CSSParser()
if options.minify:
cssutils.ser.prefs.useMinified()
if options.string:
sheet = p.parseString(''.join(params), encoding=options.encoding)
print(sheet.cssText)
elif options.url:
sheet = p.parseUrl(options.url, encoding=options.encoding)
print(sheet.cssText)
else:
for filename in params:
sys.stderr.write('=== CSS FILE: "%s" ===\n' % filename)
sheet = p.parseFile(filename, encoding=options.encoding)
print(sheet.cssText)
print()
sys.stderr.write('\n')
if __name__ == "__main__":
sys.exit(main()) | 0.337968 | 0.158956 |