CombinedText stringlengths 4 3.42M |
|---|
import argparse
import logging
from random import randrange
import string
class CaesarCipher(object):
def __init__(self, message=None, encode=False, decode=False, offset=False,
alphabet=None):
"""
A class that encodes and decodes strings using the Caesar shift cipher.
Accepts messages in a string and encodes or decodes by shifting the
value of the letter by an arbitrary integer and transforming to
uppercase.
http://en.wikipedia.org/wiki/Caesar_cipher
Do not ever use this for real communication, but definitely use it for
fun events like the Hacker Olympics.
Attributes:
message: The string you wish to encode.
encode: A boolean indicating desire to encode the string, used as
command line script flag.
decoded: A boolean indicating desire to decode the string, used as
command line script flag.
offset: Integer by which you want to shift the value of a letter.
alphabet: A tuple containing the ASCII alphabet in uppercase.
"""
self.message = message
self.encode = encode
self.decode = decode
self.offset = offset
self.alphabet = alphabet
# Get alphabet based on locale value set on machine.
if alphabet is None:
self.alphabet = tuple(string.uppercase)
def cipher(self):
"""Applies the Caesar shift cipher.
Based on the attributes of the object, applies the Caesar shift cipher
to the message attribute.
Required attributes:
message
offset
Returns:
String with cipher applied.
"""
if self.offset is False:
self.offset = randrange(5, 25)
logging.info("Random offset selected: {0}".format(self.offset))
logging.info("Encoding message: {0}".format(self.message))
encoded_message_list = list(self.message.upper())
for i, letter in enumerate(encoded_message_list):
if letter.isupper():
logging.debug("Encoding letter: {0}".format(letter))
value = self.alphabet.index(letter)
cipher_value = value + self.offset
if cipher_value > 25 or cipher_value < 0:
cipher_value = cipher_value % 26
logging.debug("Cipher value: {0}".format(cipher_value))
encoded_message_list[i] = self.alphabet[cipher_value]
logging.debug("Encoded letter: {0}".format(letter))
self.message = ''.join(encoded_message_list)
return self.message
@property
def encoded(self):
"""Encodes message using Caesar shift cipher
Returns:
String encoded with cipher.
"""
return self.cipher()
@property
def decoded(self):
"""Decodes message using Caesar shift cipher
Inverse operation of encoding, applies negative offset to Caesar shift
cipher.
Returns:
String decoded with cipher.
"""
self.offset = self.offset * -1
return self.cipher()
class CaesarCipherError(Exception):
def __init__(self, message):
logging.error(message)
# Logging configuration
logging.basicConfig(level=logging.INFO, format='%(message)s')
# Parser configuration
parser = argparse.ArgumentParser(description="Caesar Cipher - encode and "
"decode messages with an English"
"alphabet offset.",
epilog="Written by Rob Spectre for Hacker "
"Olympics London.")
parser.add_argument('message',
help="Message to be encoded or decoded.")
parser.add_argument('-e', '--encode', action="store_true",
help="Encode this message.")
parser.add_argument('-d', '--decode', action="store_true",
help="Decode this message.")
parser.add_argument('-o', '--offset',
help="Integer offset to encode/decode message against.")
parser.add_argument('-a', '--alphabet',
help="String of alphabet you want to use to apply the "
"cipher against.")
if __name__ == "__main__":
caesar_cipher = CaesarCipher()
parser.parse_args(namespace=caesar_cipher)
if caesar_cipher.offset:
caesar_cipher.offset = int(caesar_cipher.offset)
if caesar_cipher.offset is False and caesar_cipher.decode is True:
raise CaesarCipherError("Message cannot be decoded without "
"selecting an offset. Please try "
"again with -o switch.")
if caesar_cipher.encode is True and caesar_cipher.decode is True:
raise CaesarCipherError("Please select to encode *or* decode message, "
"not both.")
if caesar_cipher.encode is True:
print "Encoded message: {0}".format(caesar_cipher.encoded)
if caesar_cipher.decode is True:
print "Decoded message: {0}".format(caesar_cipher.decoded)
Print as function instead statement.
import argparse
import logging
from random import randrange
import string
class CaesarCipher(object):
def __init__(self, message=None, encode=False, decode=False, offset=False,
alphabet=None):
"""
A class that encodes and decodes strings using the Caesar shift cipher.
Accepts messages in a string and encodes or decodes by shifting the
value of the letter by an arbitrary integer and transforming to
uppercase.
http://en.wikipedia.org/wiki/Caesar_cipher
Do not ever use this for real communication, but definitely use it for
fun events like the Hacker Olympics.
Attributes:
message: The string you wish to encode.
encode: A boolean indicating desire to encode the string, used as
command line script flag.
decoded: A boolean indicating desire to decode the string, used as
command line script flag.
offset: Integer by which you want to shift the value of a letter.
alphabet: A tuple containing the ASCII alphabet in uppercase.
"""
self.message = message
self.encode = encode
self.decode = decode
self.offset = offset
self.alphabet = alphabet
# Get alphabet based on locale value set on machine.
if alphabet is None:
self.alphabet = tuple(string.uppercase)
def cipher(self):
"""Applies the Caesar shift cipher.
Based on the attributes of the object, applies the Caesar shift cipher
to the message attribute.
Required attributes:
message
offset
Returns:
String with cipher applied.
"""
if self.offset is False:
self.offset = randrange(5, 25)
logging.info("Random offset selected: {0}".format(self.offset))
logging.info("Encoding message: {0}".format(self.message))
encoded_message_list = list(self.message.upper())
for i, letter in enumerate(encoded_message_list):
if letter.isupper():
logging.debug("Encoding letter: {0}".format(letter))
value = self.alphabet.index(letter)
cipher_value = value + self.offset
if cipher_value > 25 or cipher_value < 0:
cipher_value = cipher_value % 26
logging.debug("Cipher value: {0}".format(cipher_value))
encoded_message_list[i] = self.alphabet[cipher_value]
logging.debug("Encoded letter: {0}".format(letter))
self.message = ''.join(encoded_message_list)
return self.message
@property
def encoded(self):
"""Encodes message using Caesar shift cipher
Returns:
String encoded with cipher.
"""
return self.cipher()
@property
def decoded(self):
"""Decodes message using Caesar shift cipher
Inverse operation of encoding, applies negative offset to Caesar shift
cipher.
Returns:
String decoded with cipher.
"""
self.offset = self.offset * -1
return self.cipher()
class CaesarCipherError(Exception):
def __init__(self, message):
logging.error(message)
# Logging configuration
logging.basicConfig(level=logging.INFO, format='%(message)s')
# Parser configuration
parser = argparse.ArgumentParser(description="Caesar Cipher - encode and "
"decode messages with an English"
"alphabet offset.",
epilog="Written by Rob Spectre for Hacker "
"Olympics London.")
parser.add_argument('message',
help="Message to be encoded or decoded.")
parser.add_argument('-e', '--encode', action="store_true",
help="Encode this message.")
parser.add_argument('-d', '--decode', action="store_true",
help="Decode this message.")
parser.add_argument('-o', '--offset',
help="Integer offset to encode/decode message against.")
parser.add_argument('-a', '--alphabet',
help="String of alphabet you want to use to apply the "
"cipher against.")
if __name__ == "__main__":
caesar_cipher = CaesarCipher()
parser.parse_args(namespace=caesar_cipher)
if caesar_cipher.offset:
caesar_cipher.offset = int(caesar_cipher.offset)
if caesar_cipher.offset is False and caesar_cipher.decode is True:
raise CaesarCipherError("Message cannot be decoded without "
"selecting an offset. Please try "
"again with -o switch.")
if caesar_cipher.encode is True and caesar_cipher.decode is True:
raise CaesarCipherError("Please select to encode *or* decode message, "
"not both.")
if caesar_cipher.encode is True:
print("Encoded message: {0}".format(caesar_cipher.encoded))
if caesar_cipher.decode is True:
print("Decoded message: {0}".format(caesar_cipher.decoded))
|
# -*- coding: utf-8 -*-
# Copyright 2011 Lucas Clemente Vella
# Software under Affero GPL license, see LICENSE.txt
import itertools
import datetime
import json
import ssl
import gevent
import http_cli
import hashlib
import locale
import gevent
from diggems import settings
from wsgiref.handlers import format_date_time
from time import mktime
from datetime import datetime, time
from django.shortcuts import get_object_or_404, render_to_response
from django.http import *
from django.db import IntegrityError, transaction
from django.db.models import Q
from django.template import Context, RequestContext, loader, TemplateDoesNotExist
from django.template.defaultfilters import floatformat
from django.utils.html import escape, mark_safe
from django.utils.translation import pgettext
from django.core.exceptions import ObjectDoesNotExist
from game_helpers import *
from models import *
from diggems.utils import gen_token, true_random
from django.utils.translation import to_locale, get_language
from async_events import channel
def get_user_info(user, with_private=False):
if user.facebook:
info = {
'name': user.facebook.name,
'pic_url': '//graph.facebook.com/{}/picture'.format(user.facebook.uid),
'profile_url': '//facebook.com/{}/'.format(user.facebook.uid)
}
if with_private:
info['auth'] = {'fb': {'uid': user.facebook.uid}}
else:
info = {
'name': user.guest_name(),
'pic_url': '//www.gravatar.com/avatar/{}.jpg?d=identicon'.format(hashlib.md5(user.id).hexdigest()),
}
# For now, score info is private
if with_private:
stats = {
'score': user.total_score,
'victories': user.games_won,
}
try:
stats['win_ratio'] = floatformat((float(user.games_won) / user.games_finished) * 100.0)
except ZeroDivisionError:
pass
info['stats'] = stats
return info
def render_with_extra(template_name, user, data={}, status=200):
t = loader.get_template(template_name)
c = Context(data)
extra = {'FB_APP_ID': settings.FB_APP_ID,
'user': get_user_info(user, True)
}
c.update(extra)
return HttpResponse(t.render(c), status=status)
def fb_channel(request):
resp = HttpResponse(
'<script src="//connect.facebook.net/{}/all.js"></script>'.format(pgettext("Facebook", "en_US")),
content_type='text/html')
secs = 60*60*24*365
resp['Pragma'] = 'public'
resp['Cache-Control'] = 'max-age=' + str(secs)
far_future = (datetime.datetime.now() + datetime.timedelta(seconds=secs))
resp['Expires'] = format_date_time(mktime(far_future.timetuple()))
return resp
@transaction.commit_on_success
def fb_login(request):
token = request.POST.get('token')
if not token:
return HttpResponseBadRequest()
try:
with http_cli.get_conn('https://graph.facebook.com/').get('me?access_token=' + token) as res:
fb_user = json.load(res)
except ssl.SSLError:
# TODO: Log this error? What to do when Facebook
# connection has been compromised?
return HttpResponseServerError()
fb, created = FacebookCache.objects.get_or_create(uid=fb_user['id'])
fb.name = fb_user['name']
fb.save()
old_user_id = request.session.get('user_id')
try:
profile = UserProfile.objects.get(facebook=fb)
if old_user_id and old_user_id != profile.id:
try:
old_profile = UserProfile.objects.get(pk=old_user_id)
if not old_profile.user and not old_profile.facebook:
profile.merge(old_profile)
except UserProfile.DoesNotExist:
pass
except UserProfile.DoesNotExist:
# First time login with Facebook
try:
profile = UserProfile.objects.get(pk=old_user_id)
if not profile.user and not profile.facebook:
profile.facebook = fb
else:
raise Exception()
except:
profile = UserProfile(id=gen_token(), facebook=fb)
profile.save()
request.session['user_id'] = profile.id
# Just public user info
user_info = json.dumps(get_user_info(profile, False))
# Send this new user info to every channel where user is a player:
for p in (1, 2):
# Games where player p is this user
query = Game.objects.filter(**{'p{}__user__exact'.format(p): profile}).values('id')
# Build the message to send to the game channels regarding player p
msg = '\n'.join(p, user_info)
# TODO: find a way to make this a single query, because I could not.
# TODO: make this asyncronous.
for game in query:
channel.post_update(game.channel(), 'p', msg)
# Full user info
user_info = json.dumps(get_user_info(profile, True))
return HttpResponse(user_info, content_type='application/json')
@transaction.commit_on_success
def fb_logout(request):
profile = UserProfile.get(request.session)
if profile.user or profile.facebook:
profile = UserProfile(id=gen_token())
profile.save()
request.session['user_id'] = profile.id
user_info = json.dumps(get_user_info(profile, True))
return HttpResponse(user_info, content_type='application/json')
return HttpResponseForbidden()
def adhack(request, ad_id):
ad_id = int(ad_id)
return render_to_response('adhack.html',
Context({'GOOGLE_AD_ID': settings.GOOGLE_AD_ID,
'GOOGLE_AD_SLOT': settings.GOOGLE_AD_SLOTS[ad_id]}),
content_type='text/html; charset=utf-8')
def index(request):
profile = UserProfile.get(request.session)
playing_now = Game.objects.filter(Q(p1__user=profile) | Q(p2__user=profile)).exclude(state__gte=3)
chosen = Game.objects.filter(state__exact=0, token__isnull=True).exclude(p1__user__exact=profile).order_by('?')[:5]
new_games = []
for game in chosen:
info = {'id': game.id,
'user': get_user_info(game.p1.user)
}
new_games.append(info)
context = {'your_games': playing_now, 'new_games': new_games, 'like_url': settings.FB_LIKE_URL}
return render_with_extra('index.html', profile, context)
@transaction.commit_on_success
def new_game(request):
if request.method != 'POST':
c = Context({'url': '/new_game/'})
return render_to_response('post_redirect.html', c)
profile = UserProfile.get(request.session)
mine = [[0] * 16 for i in xrange(16)]
indexes = list(itertools.product(xrange(16), repeat=2))
gems = true_random.sample(indexes, 51)
for (m, n) in gems:
mine[m][n] = 9
for m, n in indexes:
if mine[m][n] == 0:
def inc_count(x, y):
if mine[x][y] == 9:
mine[m][n] += 1
for_each_surrounding(m, n, inc_count)
p1 = Player(user=profile)
p1.save()
game = Game()
game.mine = mine_encode(mine)
if request.REQUEST.get('private', default=False):
game.token = gen_token()
game.p1 = p1
game.save()
return HttpResponseRedirect('/game/' + str(game.id))
@transaction.commit_on_success
def join_game(request, game_id):
profile = UserProfile.get(request.session)
# If game is too old, render 404 game error screen
try:
game = Game.objects.get(pk=int(game_id))
except ObjectDoesNotExist:
return render_with_extra('game404.html', profile, status=404)
# If already playing this game, redirect to game screen
if game.what_player(profile):
return HttpResponseRedirect('/game/' + str(game.id))
# If user cannot start this game, then 403
token = request.REQUEST.get('token')
if game.state != 0 or (game.token and
token != game.token):
return render_with_extra('game403.html', profile, status=403)
# If we got here via GET, return a page that will make the client/user
# retry via POST. Done so that Facebook and other robots do not join
# the game in place of a real user.
if request.method != 'POST':
url = '/game/{}/join/'.format(game_id)
if token:
url = '{}?token={}'.format(url, token)
c = Context({'url': url})
return render_to_response('post_redirect.html', c)
p2 = Player(user=profile)
p2.save()
game.p2 = p2
game.state = 1
game.save()
transaction.commit()
ch_id = game.channel()
# Game state change
channel.post_update(ch_id, 'g', str(game.state), game.seq_num)
# Player info display
outdata = '2\n' + json.dumps(get_user_info(profile))
channel.post_update(ch_id, 'p', outdata)
return HttpResponseRedirect('/game/' + game_id)
@transaction.commit_on_success
def abort_game(request, game_id):
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
game = get_object_or_404(Game, pk=game_id)
if game.state == 0:
profile = UserProfile.get(request.session)
pdata = game.what_player(profile)
if pdata:
pdata[1].delete()
game.delete()
return HttpResponseRedirect('/')
return HttpResponseForbidden()
@transaction.commit_on_success
def claim_game(request, game_id):
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
game = get_object_or_404(Game, pk=game_id)
if game.state not in (1,2):
return HttpResponseForbidden()
profile = UserProfile.get(request.session)
pdata = game.what_player(profile)
if pdata:
my_number, me = pdata
else:
return HttpResponseForbidden()
term = request.POST.get('terminate')
if term != 'z' and (my_number == game.state or game.timeout_diff() > 0):
return HttpResponseForbidden()
if term == 'y':
points = game.mine.count(tile_encode(19)) + game.mine.count(tile_encode(20))
profile.total_score += points
profile.save()
game.state = my_number + 4
# If one of the players give up...
elif term == 'z':
for pnum,player in ((1,game.p1),(2,game.p2)):
points = game.mine.count(tile_encode(pnum + 18))
prof = player.user
prof.total_score += points
prof.games_finished += 1
if pnum != my_number:
prof.games_won += 1
game.state = pnum + 4
prof.save()
else:
game.state = my_number;
game.save()
transaction.commit()
channel.post_update(game.channel(), 'g', str(game.state), game.seq_num)
if term == 'y':
gevent.spawn(publish_score, me.user)
elif term == 'z':
gevent.spawn(publish_score, game.p1.user)
gevent.spawn(publish_score, game.p2.user)
return HttpResponse()
@transaction.commit_on_success
def game(request, game_id):
# TODO: maybe control who can watch a game
profile = UserProfile.get(request.session)
#game = get_object_or_404(Game, pk=game_id)
try:
game = Game.objects.get(pk=int(game_id))
except ObjectDoesNotExist:
return render_with_extra('game404.html', profile, status=404)
user_id = profile.display_name()
data = {'state': game.state,
'game_id': game_id,
'seq_num': game.seq_num,
'channel': game.channel,
'p1_last_move': game.p1.last_move,
'player_info': {1: get_user_info(game.p1.user),
2: None},
}
if(game.p2):
data['p2_last_move'] = game.p2.last_move
data['player_info'][2] = get_user_info(game.p2.user)
if (game.state <= 2):
data['time_left'] = max(0, game.timeout_diff())
pdata = game.what_player(profile)
if pdata:
my_number, me = pdata
data['tnt_used'] = not me.has_tnt
data['player'] = my_number
me.save()
if game.state == 0:
protocol = 'https' if request.is_secure() else 'http'
data['base_url'] = '{}://{}'.format(protocol, request.get_host())
if game.state == 0 and game.token: # Uninitialized private game
data['token'] = game.token
else:
masked = mine_mask(game.mine, game.state in (3, 4))
if masked.count('?') != 256:
data['mine'] = masked
return render_with_extra('game.html', profile, data)
@transaction.commit_on_success
def move(request, game_id):
if request.method != 'POST':
return HttpResponseForbidden()
game = get_object_or_404(Game, pk=game_id)
pdata = game.what_player(UserProfile.get(request.session))
if not pdata or pdata[0] != game.state:
return HttpResponseForbidden()
player, me = pdata
try:
m = int(request.REQUEST['m'])
n = int(request.REQUEST['n'])
if not (0 <= m <= 15 and 0 <= n <= 15):
raise ValueError
except:
return HttpResponseBadRequest()
mine = mine_decode(game.mine)
to_reveal = [(m, n)]
tnt_used = False
if request.REQUEST.get('tnt') == 'y':
if not me.has_tnt:
return HttpResponseBadRequest()
me.has_tnt = False
to_reveal = itertools.product(xrange(max(m-2, 0),
min(m+3, 16)),
xrange(max(n-2, 0),
min(n+3, 16)))
tnt_used = True
revealed = []
def reveal(m, n):
if mine[m][n] >= 10:
return
old = mine[m][n]
mine[m][n] += 10
if mine[m][n] == 19 and player == 2:
mine[m][n] = 20
revealed.append((m, n, tile_mask(mine[m][n])))
if old == 0:
for_each_surrounding(m, n, reveal)
for x, y in to_reveal:
reveal(x, y)
if not revealed:
return HttpResponseBadRequest()
if mine[m][n] <= 18 or tnt_used:
game.state = (game.state % 2) + 1
new_mine = mine_encode(mine)
points = [new_mine.count(tile_encode(19)), new_mine.count(tile_encode(20))]
if points[0] >= 26 or points[1] >= 26:
game.state = player + 2
coded_move = '%s%x%x' % ('b' if tnt_used else 'd', m, n)
me.last_move = coded_move
game.mine = new_mine
game.save()
me.save()
if game.state >= 3: # Game is over
remaining = 51 - points[0] - points[1]
points[0 if points[0] > points[1] else 1] += remaining
for user, idx in ((game.p1.user, 0), (game.p2.user, 1)):
user.games_finished = F('games_finished') + 1
user.total_score = F('total_score') + points[idx]
if game.state == (idx + 3):
user.games_won = F('games_won') + 1
user.save()
for m, n in itertools.product(xrange(0, 16), xrange(0, 16)):
if mine[m][n] == 9:
revealed.append((m, n, 'x'))
result = itertools.chain((str(game.state), str(player), coded_move), ['%d,%d:%c' % x for x in revealed])
result = '\n'.join(result)
# Everything is OK until now, so commit DB transaction
transaction.commit()
# Post the update to the users...
channel.post_update(game.channel(), 'g', result, game.seq_num)
# ... and then publish the scores on FB, if game is over.
if game.state >= 3:
gevent.spawn(publish_score, game.p1.user)
gevent.spawn(publish_score, game.p2.user)
return HttpResponse()
def donate(request):
profile = UserProfile.get(request.session)
return render_with_extra('donate.html', profile, {'like_url': settings.FB_LIKE_URL})
def info(request, page):
if page not in info.existing_pages:
raise Http404
for locale in (request.LANGUAGE_CODE, 'en'):
try:
return render_with_extra('{}/{}.html'.format(locale, page), UserProfile.get(request.session))
except TemplateDoesNotExist:
continue
info.existing_pages = frozenset(('about', 'howtoplay', 'sourcecode', 'contact', 'privacy', 'terms'))
One more fix.
# -*- coding: utf-8 -*-
# Copyright 2011 Lucas Clemente Vella
# Software under Affero GPL license, see LICENSE.txt
import itertools
import datetime
import json
import ssl
import gevent
import http_cli
import hashlib
import locale
import gevent
from diggems import settings
from wsgiref.handlers import format_date_time
from time import mktime
from datetime import datetime, time
from django.shortcuts import get_object_or_404, render_to_response
from django.http import *
from django.db import IntegrityError, transaction
from django.db.models import Q
from django.template import Context, RequestContext, loader, TemplateDoesNotExist
from django.template.defaultfilters import floatformat
from django.utils.html import escape, mark_safe
from django.utils.translation import pgettext
from django.core.exceptions import ObjectDoesNotExist
from game_helpers import *
from models import *
from diggems.utils import gen_token, true_random
from django.utils.translation import to_locale, get_language
from async_events import channel
def get_user_info(user, with_private=False):
if user.facebook:
info = {
'name': user.facebook.name,
'pic_url': '//graph.facebook.com/{}/picture'.format(user.facebook.uid),
'profile_url': '//facebook.com/{}/'.format(user.facebook.uid)
}
if with_private:
info['auth'] = {'fb': {'uid': user.facebook.uid}}
else:
info = {
'name': user.guest_name(),
'pic_url': '//www.gravatar.com/avatar/{}.jpg?d=identicon'.format(hashlib.md5(user.id).hexdigest()),
}
# For now, score info is private
if with_private:
stats = {
'score': user.total_score,
'victories': user.games_won,
}
try:
stats['win_ratio'] = floatformat((float(user.games_won) / user.games_finished) * 100.0)
except ZeroDivisionError:
pass
info['stats'] = stats
return info
def render_with_extra(template_name, user, data={}, status=200):
t = loader.get_template(template_name)
c = Context(data)
extra = {'FB_APP_ID': settings.FB_APP_ID,
'user': get_user_info(user, True)
}
c.update(extra)
return HttpResponse(t.render(c), status=status)
def fb_channel(request):
resp = HttpResponse(
'<script src="//connect.facebook.net/{}/all.js"></script>'.format(pgettext("Facebook", "en_US")),
content_type='text/html')
secs = 60*60*24*365
resp['Pragma'] = 'public'
resp['Cache-Control'] = 'max-age=' + str(secs)
far_future = (datetime.datetime.now() + datetime.timedelta(seconds=secs))
resp['Expires'] = format_date_time(mktime(far_future.timetuple()))
return resp
@transaction.commit_on_success
def fb_login(request):
token = request.POST.get('token')
if not token:
return HttpResponseBadRequest()
try:
with http_cli.get_conn('https://graph.facebook.com/').get('me?access_token=' + token) as res:
fb_user = json.load(res)
except ssl.SSLError:
# TODO: Log this error? What to do when Facebook
# connection has been compromised?
return HttpResponseServerError()
fb, created = FacebookCache.objects.get_or_create(uid=fb_user['id'])
fb.name = fb_user['name']
fb.save()
old_user_id = request.session.get('user_id')
try:
profile = UserProfile.objects.get(facebook=fb)
if old_user_id and old_user_id != profile.id:
try:
old_profile = UserProfile.objects.get(pk=old_user_id)
if not old_profile.user and not old_profile.facebook:
profile.merge(old_profile)
except UserProfile.DoesNotExist:
pass
except UserProfile.DoesNotExist:
# First time login with Facebook
try:
profile = UserProfile.objects.get(pk=old_user_id)
if not profile.user and not profile.facebook:
profile.facebook = fb
else:
raise Exception()
except:
profile = UserProfile(id=gen_token(), facebook=fb)
profile.save()
request.session['user_id'] = profile.id
# Just public user info
user_info = json.dumps(get_user_info(profile, False))
# Send this new user info to every channel where user is a player:
for p in (1, 2):
# Games where player p is this user
query = Game.objects.filter(**{'p{}__user__exact'.format(p): profile}).values('id')
# Build the message to send to the game channels regarding player p
msg = '\n'.join((p, user_info))
# TODO: find a way to make this a single query, because I could not.
# TODO: make this asyncronous.
for game in query:
channel.post_update(game.channel(), 'p', msg)
# Full user info
user_info = json.dumps(get_user_info(profile, True))
return HttpResponse(user_info, content_type='application/json')
@transaction.commit_on_success
def fb_logout(request):
profile = UserProfile.get(request.session)
if profile.user or profile.facebook:
profile = UserProfile(id=gen_token())
profile.save()
request.session['user_id'] = profile.id
user_info = json.dumps(get_user_info(profile, True))
return HttpResponse(user_info, content_type='application/json')
return HttpResponseForbidden()
def adhack(request, ad_id):
ad_id = int(ad_id)
return render_to_response('adhack.html',
Context({'GOOGLE_AD_ID': settings.GOOGLE_AD_ID,
'GOOGLE_AD_SLOT': settings.GOOGLE_AD_SLOTS[ad_id]}),
content_type='text/html; charset=utf-8')
def index(request):
profile = UserProfile.get(request.session)
playing_now = Game.objects.filter(Q(p1__user=profile) | Q(p2__user=profile)).exclude(state__gte=3)
chosen = Game.objects.filter(state__exact=0, token__isnull=True).exclude(p1__user__exact=profile).order_by('?')[:5]
new_games = []
for game in chosen:
info = {'id': game.id,
'user': get_user_info(game.p1.user)
}
new_games.append(info)
context = {'your_games': playing_now, 'new_games': new_games, 'like_url': settings.FB_LIKE_URL}
return render_with_extra('index.html', profile, context)
@transaction.commit_on_success
def new_game(request):
if request.method != 'POST':
c = Context({'url': '/new_game/'})
return render_to_response('post_redirect.html', c)
profile = UserProfile.get(request.session)
mine = [[0] * 16 for i in xrange(16)]
indexes = list(itertools.product(xrange(16), repeat=2))
gems = true_random.sample(indexes, 51)
for (m, n) in gems:
mine[m][n] = 9
for m, n in indexes:
if mine[m][n] == 0:
def inc_count(x, y):
if mine[x][y] == 9:
mine[m][n] += 1
for_each_surrounding(m, n, inc_count)
p1 = Player(user=profile)
p1.save()
game = Game()
game.mine = mine_encode(mine)
if request.REQUEST.get('private', default=False):
game.token = gen_token()
game.p1 = p1
game.save()
return HttpResponseRedirect('/game/' + str(game.id))
@transaction.commit_on_success
def join_game(request, game_id):
profile = UserProfile.get(request.session)
# If game is too old, render 404 game error screen
try:
game = Game.objects.get(pk=int(game_id))
except ObjectDoesNotExist:
return render_with_extra('game404.html', profile, status=404)
# If already playing this game, redirect to game screen
if game.what_player(profile):
return HttpResponseRedirect('/game/' + str(game.id))
# If user cannot start this game, then 403
token = request.REQUEST.get('token')
if game.state != 0 or (game.token and
token != game.token):
return render_with_extra('game403.html', profile, status=403)
# If we got here via GET, return a page that will make the client/user
# retry via POST. Done so that Facebook and other robots do not join
# the game in place of a real user.
if request.method != 'POST':
url = '/game/{}/join/'.format(game_id)
if token:
url = '{}?token={}'.format(url, token)
c = Context({'url': url})
return render_to_response('post_redirect.html', c)
p2 = Player(user=profile)
p2.save()
game.p2 = p2
game.state = 1
game.save()
transaction.commit()
ch_id = game.channel()
# Game state change
channel.post_update(ch_id, 'g', str(game.state), game.seq_num)
# Player info display
outdata = '2\n' + json.dumps(get_user_info(profile))
channel.post_update(ch_id, 'p', outdata)
return HttpResponseRedirect('/game/' + game_id)
@transaction.commit_on_success
def abort_game(request, game_id):
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
game = get_object_or_404(Game, pk=game_id)
if game.state == 0:
profile = UserProfile.get(request.session)
pdata = game.what_player(profile)
if pdata:
pdata[1].delete()
game.delete()
return HttpResponseRedirect('/')
return HttpResponseForbidden()
@transaction.commit_on_success
def claim_game(request, game_id):
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
game = get_object_or_404(Game, pk=game_id)
if game.state not in (1,2):
return HttpResponseForbidden()
profile = UserProfile.get(request.session)
pdata = game.what_player(profile)
if pdata:
my_number, me = pdata
else:
return HttpResponseForbidden()
term = request.POST.get('terminate')
if term != 'z' and (my_number == game.state or game.timeout_diff() > 0):
return HttpResponseForbidden()
if term == 'y':
points = game.mine.count(tile_encode(19)) + game.mine.count(tile_encode(20))
profile.total_score += points
profile.save()
game.state = my_number + 4
# If one of the players give up...
elif term == 'z':
for pnum,player in ((1,game.p1),(2,game.p2)):
points = game.mine.count(tile_encode(pnum + 18))
prof = player.user
prof.total_score += points
prof.games_finished += 1
if pnum != my_number:
prof.games_won += 1
game.state = pnum + 4
prof.save()
else:
game.state = my_number;
game.save()
transaction.commit()
channel.post_update(game.channel(), 'g', str(game.state), game.seq_num)
if term == 'y':
gevent.spawn(publish_score, me.user)
elif term == 'z':
gevent.spawn(publish_score, game.p1.user)
gevent.spawn(publish_score, game.p2.user)
return HttpResponse()
@transaction.commit_on_success
def game(request, game_id):
# TODO: maybe control who can watch a game
profile = UserProfile.get(request.session)
#game = get_object_or_404(Game, pk=game_id)
try:
game = Game.objects.get(pk=int(game_id))
except ObjectDoesNotExist:
return render_with_extra('game404.html', profile, status=404)
user_id = profile.display_name()
data = {'state': game.state,
'game_id': game_id,
'seq_num': game.seq_num,
'channel': game.channel,
'p1_last_move': game.p1.last_move,
'player_info': {1: get_user_info(game.p1.user),
2: None},
}
if(game.p2):
data['p2_last_move'] = game.p2.last_move
data['player_info'][2] = get_user_info(game.p2.user)
if (game.state <= 2):
data['time_left'] = max(0, game.timeout_diff())
pdata = game.what_player(profile)
if pdata:
my_number, me = pdata
data['tnt_used'] = not me.has_tnt
data['player'] = my_number
me.save()
if game.state == 0:
protocol = 'https' if request.is_secure() else 'http'
data['base_url'] = '{}://{}'.format(protocol, request.get_host())
if game.state == 0 and game.token: # Uninitialized private game
data['token'] = game.token
else:
masked = mine_mask(game.mine, game.state in (3, 4))
if masked.count('?') != 256:
data['mine'] = masked
return render_with_extra('game.html', profile, data)
@transaction.commit_on_success
def move(request, game_id):
if request.method != 'POST':
return HttpResponseForbidden()
game = get_object_or_404(Game, pk=game_id)
pdata = game.what_player(UserProfile.get(request.session))
if not pdata or pdata[0] != game.state:
return HttpResponseForbidden()
player, me = pdata
try:
m = int(request.REQUEST['m'])
n = int(request.REQUEST['n'])
if not (0 <= m <= 15 and 0 <= n <= 15):
raise ValueError
except:
return HttpResponseBadRequest()
mine = mine_decode(game.mine)
to_reveal = [(m, n)]
tnt_used = False
if request.REQUEST.get('tnt') == 'y':
if not me.has_tnt:
return HttpResponseBadRequest()
me.has_tnt = False
to_reveal = itertools.product(xrange(max(m-2, 0),
min(m+3, 16)),
xrange(max(n-2, 0),
min(n+3, 16)))
tnt_used = True
revealed = []
def reveal(m, n):
if mine[m][n] >= 10:
return
old = mine[m][n]
mine[m][n] += 10
if mine[m][n] == 19 and player == 2:
mine[m][n] = 20
revealed.append((m, n, tile_mask(mine[m][n])))
if old == 0:
for_each_surrounding(m, n, reveal)
for x, y in to_reveal:
reveal(x, y)
if not revealed:
return HttpResponseBadRequest()
if mine[m][n] <= 18 or tnt_used:
game.state = (game.state % 2) + 1
new_mine = mine_encode(mine)
points = [new_mine.count(tile_encode(19)), new_mine.count(tile_encode(20))]
if points[0] >= 26 or points[1] >= 26:
game.state = player + 2
coded_move = '%s%x%x' % ('b' if tnt_used else 'd', m, n)
me.last_move = coded_move
game.mine = new_mine
game.save()
me.save()
if game.state >= 3: # Game is over
remaining = 51 - points[0] - points[1]
points[0 if points[0] > points[1] else 1] += remaining
for user, idx in ((game.p1.user, 0), (game.p2.user, 1)):
user.games_finished = F('games_finished') + 1
user.total_score = F('total_score') + points[idx]
if game.state == (idx + 3):
user.games_won = F('games_won') + 1
user.save()
for m, n in itertools.product(xrange(0, 16), xrange(0, 16)):
if mine[m][n] == 9:
revealed.append((m, n, 'x'))
result = itertools.chain((str(game.state), str(player), coded_move), ['%d,%d:%c' % x for x in revealed])
result = '\n'.join(result)
# Everything is OK until now, so commit DB transaction
transaction.commit()
# Post the update to the users...
channel.post_update(game.channel(), 'g', result, game.seq_num)
# ... and then publish the scores on FB, if game is over.
if game.state >= 3:
gevent.spawn(publish_score, game.p1.user)
gevent.spawn(publish_score, game.p2.user)
return HttpResponse()
def donate(request):
profile = UserProfile.get(request.session)
return render_with_extra('donate.html', profile, {'like_url': settings.FB_LIKE_URL})
def info(request, page):
if page not in info.existing_pages:
raise Http404
for locale in (request.LANGUAGE_CODE, 'en'):
try:
return render_with_extra('{}/{}.html'.format(locale, page), UserProfile.get(request.session))
except TemplateDoesNotExist:
continue
info.existing_pages = frozenset(('about', 'howtoplay', 'sourcecode', 'contact', 'privacy', 'terms'))
|
"""
Abinit Workflows
"""
from __future__ import division, print_function
import sys
import os
import shutil
import time
import abc
import collections
import numpy as np
try:
from pydispatch import dispatcher
except ImportError:
pass
from pymatgen.core.units import ArrayWithUnit, Ha_to_eV
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.design_patterns import Enum, AttrDict
from pymatgen.serializers.json_coders import MSONable, json_pretty_dump
from pymatgen.io.smartio import read_structure
from pymatgen.util.num_utils import iterator_from_slice, chunks, monotonic
from pymatgen.util.string_utils import list_strings, pprint_table, WildCard
from pymatgen.io.abinitio import wrappers
from pymatgen.io.abinitio.tasks import (Task, AbinitTask, Dependency, Node, ScfTask, NscfTask, HaydockBseTask, RelaxTask)
from pymatgen.io.abinitio.strategies import Strategy
from pymatgen.io.abinitio.utils import File, Directory
from pymatgen.io.abinitio.netcdf import ETSF_Reader
from pymatgen.io.abinitio.abiobjects import Smearing, AbiStructure, KSampling, Electrons
from pymatgen.io.abinitio.pseudos import Pseudo
from pymatgen.io.abinitio.strategies import ScfStrategy
from pymatgen.io.abinitio.eos import EOS
from pymatgen.io.abinitio.abitimer import AbinitTimerParser
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__all__ = [
"Workflow",
"IterativeWorkflow",
"BandStructureWorkflow",
"RelaxWorkflow",
"DeltaFactorWorkflow",
"G0W0_Workflow",
"SigmaConvWorkflow",
"BSEMDF_Workflow",
"PhononWorkflow",
]
class WorkflowError(Exception):
"""Base class for the exceptions raised by Workflow objects."""
class BaseWorkflow(Node):
__metaclass__ = abc.ABCMeta
Error = WorkflowError
# interface modeled after subprocess.Popen
@abc.abstractproperty
def processes(self):
"""Return a list of objects that support the subprocess.Popen protocol."""
def poll(self):
"""
Check if all child processes have terminated. Set and return
returncode attribute.
"""
return [task.poll() for task in self]
def wait(self):
"""
Wait for child processed to terminate. Set and return returncode attribute.
"""
return [task.wait() for task in self]
def communicate(self, input=None):
"""
Interact with processes: Send data to stdin. Read data from stdout and
stderr, until end-of-file is reached.
Wait for process to terminate. The optional input argument should be a
string to be sent to the child processed, or None, if no data should be
sent to the children.
communicate() returns a list of tuples (stdoutdata, stderrdata).
"""
return [task.communicate(input) for task in self]
def show_intrawork_deps(self):
"""Show the dependencies within the `Workflow`."""
table = [["Task #"] + [str(i) for i in range(len(self))]]
for ii, task1 in enumerate(self):
line = (1 + len(self)) * [""]
line[0] = str(ii)
for jj, task2 in enumerate(self):
if task1.depends_on(task2):
line[jj+1] = "^"
table.append(line)
pprint_table(table)
@property
def returncodes(self):
"""
The children return codes, set by poll() and wait() (and indirectly by communicate()).
A None value indicates that the process hasn't terminated yet.
A negative value -N indicates that the child was terminated by signal N (Unix only).
"""
return [task.returncode for task in self]
@property
def ncpus_reserved(self):
"""
Returns the number of CPUs reserved in this moment.
A CPUS is reserved if it's still not running but
we have submitted the task to the queue manager.
"""
return sum(task.tot_ncpus for task in self if task.status == task.S_SUB)
@property
def ncpus_allocated(self):
"""
Returns the number of CPUs allocated in this moment.
A CPU is allocated if it's running a task or if we have
submitted a task to the queue manager but the job is still pending.
"""
return sum(task.tot_ncpus for task in self if task.status in [task.S_SUB, task.S_RUN])
@property
def ncpus_inuse(self):
"""
Returns the number of CPUs used in this moment.
A CPU is used if there's a job that is running on it.
"""
return sum(task.tot_ncpus for task in self if task.status == task.S_RUN)
def fetch_task_to_run(self):
"""
Returns the first task that is ready to run or
None if no task can be submitted at present"
Raises:
`StopIteration` if all tasks are done.
"""
# All the tasks are done so raise an exception
# that will be handled by the client code.
if all(task.is_completed for task in self):
raise StopIteration("All tasks completed.")
for task in self:
if task.can_run:
#print(task, str(task.status), [task.deps_status])
return task
# No task found, this usually happens when we have dependencies.
# Beware of possible deadlocks here!
logger.warning("Possible deadlock in fetch_task_to_run!")
return None
def fetch_alltasks_to_run(self):
"""
Returns a list with all the tasks that can be submitted.
Empty list if not task has been found.
"""
#if all(task.is_completed for task in self):
# return []
return [task for task in self if task.can_run]
@abc.abstractmethod
def setup(self, *args, **kwargs):
"""Method called before submitting the calculations."""
def _setup(self, *args, **kwargs):
self.setup(*args, **kwargs)
def connect_signals(self):
"""
Connect the signals within the workflow.
self is responsible for catching the important signals raised from
its task and raise new signals when some particular condition occurs.
"""
for task in self:
dispatcher.connect(self.on_ok, signal=task.S_OK, sender=task)
@property
def all_ok(self):
return all(task.status == task.S_OK for task in self)
def on_ok(self, sender):
"""
This callback is called when one task reaches status S_OK.
"""
logger.debug("in on_ok with sender %s" % sender)
if self.all_ok:
if self.finalized:
return AttrDict(returncode=0, message="Workflow has been already finalized")
else:
results = AttrDict(**self.on_all_ok())
self._finalized = True
# Signal to possible observers that the `Workflow` reached S_OK
print("Workflow %s is finalized and broadcasts signal S_OK" % str(self))
print("Workflow %s status = %s" % (str(self), self.status))
dispatcher.send(signal=self.S_OK, sender=self)
return results
return AttrDict(returncode=1, message="Not all tasks are OK!")
def on_all_ok(self):
"""
This method is called once the `workflow` is completed i.e. when all the tasks
have reached status S_OK. Subclasses should provide their own implementation
Returns:
Dictionary that must contain at least the following entries:
returncode:
0 on success.
message:
a string that should provide a human-readable description of what has been performed.
"""
return dict(returncode=0,
message="Calling on_all_ok of the base class!",
)
def get_results(self):
"""
Method called once the calculations are completed.
The base version returns a dictionary task_name : TaskResults for each task in self.
"""
return WorkflowResults(task_results={task.name: task.results for task in self})
class Workflow(BaseWorkflow):
"""
A Workflow is a list of (possibly connected) tasks.
"""
Error = WorkflowError
def __init__(self, workdir=None, manager=None):
"""
Args:
workdir:
Path to the working directory.
manager:
`TaskManager` object.
"""
super(Workflow, self).__init__()
self._tasks = []
if workdir is not None:
self.set_workdir(workdir)
if manager is not None:
self.set_manager(manager)
def set_manager(self, manager):
"""Set the `TaskManager` to use to launch the Task."""
self.manager = manager.deepcopy()
for task in self:
task.set_manager(manager)
@property
def flow(self):
"""The flow containing this `Workflow`."""
return self._flow
def set_flow(self, flow):
"""Set the flow associated to this `Workflow`."""
if not hasattr(self, "_flow"):
self._flow = flow
else:
if self._flow != flow:
raise ValueError("self._flow != flow")
def set_workdir(self, workdir, chroot=False):
"""Set the working directory. Cannot be set more than once unless chroot is True"""
if not chroot and hasattr(self, "workdir") and self.workdir != workdir:
raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir))
self.workdir = os.path.abspath(workdir)
# Directories with (input|output|temporary) data.
# The workflow will use these directories to connect
# itself to other workflows and/or to produce new data
# that will be used by its children.
self.indir = Directory(os.path.join(self.workdir, "indata"))
self.outdir = Directory(os.path.join(self.workdir, "outdata"))
self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata"))
def chroot(self, new_workdir):
self.set_workdir(new_workdir, chroot=True)
for i, task in enumerate(self):
new_tdir = os.path.join(self.workdir, "t" + str(i))
task.set_workdir(new_tdir, chroot=True)
def __len__(self):
return len(self._tasks)
def __iter__(self):
return self._tasks.__iter__()
def __getitem__(self, slice):
return self._tasks[slice]
def chunks(self, chunk_size):
"""Yield successive chunks of tasks of lenght chunk_size."""
for tasks in chunks(self, chunk_size):
yield tasks
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return self.indir.path_in("in_" + ext)
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return self.outdir.path_in("out_" + ext)
@property
def processes(self):
return [task.process for task in self]
@property
def all_done(self):
"""True if all the `Task` in the `Workflow` are done."""
return all(task.status >= task.S_DONE for task in self)
@property
def isnc(self):
"""True if norm-conserving calculation."""
return all(task.isnc for task in self)
@property
def ispaw(self):
"""True if PAW calculation."""
return all(task.ispaw for task in self)
@property
def status_counter(self):
"""
Returns a `Counter` object that counts the number of task with
given status (use the string representation of the status as key).
"""
counter = collections.Counter()
for task in self:
counter[str(task.status)] += 1
return counter
def allocate(self, manager=None):
"""
This function is called once we have completed the initialization
of the `Workflow`. It sets the manager of each task (if not already done)
and defines the working directories of the tasks.
Args:
manager:
`TaskManager` object or None
"""
for i, task in enumerate(self):
if not hasattr(task, "manager"):
# Set the manager
if manager is not None:
# Use the one provided in input.
task.set_manager(manager)
else:
# Use the one of the workflow.
task.set_manager(self.manager)
task_workdir = os.path.join(self.workdir, "t" + str(i))
if not hasattr(task, "workdir"):
task.set_workdir(task_workdir)
else:
if task.workdir != task_workdir:
raise ValueError("task.workdir != task_workdir: %s, %s" % (task.workdir, task_workdir))
def register(self, obj, deps=None, required_files=None, manager=None, task_class=None):
"""
Registers a new `Task` and add it to the internal list, taking into account possible dependencies.
Args:
obj:
`Strategy` object or `AbinitInput` instance.
if Strategy object, we create a new `AbinitTask` from the input strategy and add it to the list.
deps:
Dictionary specifying the dependency of this node.
None means that this obj has no dependency.
required_files:
List of strings with the path of the files used by the task.
manager:
The `TaskManager` responsible for the submission of the task. If manager is None, we use
the `TaskManager` specified during the creation of the `Workflow`.
task_class:
Task subclass to instantiate. Default: `AbinitTask`
Returns:
`Task` object
"""
task_workdir = None
if hasattr(self, "workdir"):
task_workdir = os.path.join(self.workdir, "t" + str(len(self)))
if isinstance(obj, Task):
task = obj
else:
# Set the class
if task_class is None:
task_class = AbinitTask
if isinstance(obj, Strategy):
# Create the new task (note the factory so that we create subclasses easily).
task = task_class(obj, task_workdir, manager)
else:
task = task_class.from_input(obj, task_workdir, manager)
self._tasks.append(task)
# Handle possible dependencies.
if deps is not None:
deps = [Dependency(node, exts) for (node, exts) in deps.items()]
task.add_deps(deps)
# Handle possible dependencies.
if required_files is not None:
task.add_required_files(required_files)
return task
def path_in_workdir(self, filename):
"""Create the absolute path of filename in the working directory."""
return os.path.join(self.workdir, filename)
def setup(self, *args, **kwargs):
"""
Method called before running the calculations.
The default implementation is empty.
"""
def build(self, *args, **kwargs):
"""Creates the top level directory."""
# Create the directories of the workflow.
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Build dirs and files of each task.
for task in self:
task.build(*args, **kwargs)
# Connect signals within the workflow.
self.connect_signals()
@property
def status(self):
"""
Returns the status of the workflow i.e. the minimum of the status of the tasks.
"""
return self.get_all_status(only_min=True)
#def set_status(self, status):
def get_all_status(self, only_min=False):
"""
Returns a list with the status of the tasks in self.
Args:
only_min:
If True, the minimum of the status is returned.
"""
if len(self) == 0:
# The workflow will be created in the future.
if only_min:
return self.S_INIT
else:
return [self.S_INIT]
self.check_status()
status_list = [task.status for task in self]
#print("status_list", status_list)
if only_min:
return min(status_list)
else:
return status_list
def check_status(self):
"""Check the status of the tasks."""
# Recompute the status of the tasks
for task in self:
task.check_status()
# Take into account possible dependencies.Use a list instead of generators
for task in self:
if task.status <= task.S_SUB and all([status == task.S_OK for status in task.deps_status]):
task.set_status(task.S_READY)
def rmtree(self, exclude_wildcard=""):
"""
Remove all files and directories in the working directory
Args:
exclude_wildcard:
Optional string with regular expressions separated by `|`.
Files matching one of the regular expressions will be preserved.
example: exclude_wildard="*.nc|*.txt" preserves all the files
whose extension is in ["nc", "txt"].
"""
if not exclude_wildcard:
shutil.rmtree(self.workdir)
else:
w = WildCard(exclude_wildcard)
for dirpath, dirnames, filenames in os.walk(self.workdir):
for fname in filenames:
path = os.path.join(dirpath, fname)
if not w.match(fname):
os.remove(path)
def rm_indatadir(self):
"""Remove all the indata directories."""
for task in self:
task.rm_indatadir()
def rm_outdatadir(self):
"""Remove all the indata directories."""
for task in self:
task.rm_outatadir()
def rm_tmpdatadir(self):
"""Remove all the tmpdata directories."""
for task in self:
task.rm_tmpdatadir()
def move(self, dest, isabspath=False):
"""
Recursively move self.workdir to another location. This is similar to the Unix "mv" command.
The destination path must not already exist. If the destination already exists
but is not a directory, it may be overwritten depending on os.rename() semantics.
Be default, dest is located in the parent directory of self.workdir, use isabspath=True
to specify an absolute path.
"""
if not isabspath:
dest = os.path.join(os.path.dirname(self.workdir), dest)
shutil.move(self.workdir, dest)
def submit_tasks(self, wait=False):
"""
Submits the task in self and wait.
TODO: change name.
"""
for task in self:
task.start()
if wait:
for task in self: task.wait()
def start(self, *args, **kwargs):
"""
Start the work. Calls build and _setup first, then submit the tasks.
Non-blocking call unless wait is set to True
"""
wait = kwargs.pop("wait", False)
# Build dirs and files.
self.build(*args, **kwargs)
# Initial setup
self._setup(*args, **kwargs)
# Submit tasks (does not block)
self.submit_tasks(wait=wait)
def read_etotal(self):
"""
Reads the total energy from the GSR file produced by the task.
Return a numpy array with the total energies in Hartree
The array element is set to np.inf if an exception is raised while reading the GSR file.
"""
if not self.all_done:
raise self.Error("Some task is still in running/submitted state")
etotal = []
for task in self:
# Open the GSR file and read etotal (Hartree)
gsr_path = task.outdir.has_abiext("GSR")
etot = np.inf
if gsr_path:
with ETSF_Reader(gsr_path) as r:
etot = r.read_value("etotal")
etotal.append(etot)
return etotal
def parse_timers(self):
"""
Parse the TIMER section reported in the ABINIT output files.
Returns:
`AbinitTimerParser` object
"""
filenames = filter(os.path.exists, [task.output_file.path for task in self])
parser = AbinitTimerParser()
parser.parse(filenames)
return parser
class IterativeWorkflow(Workflow):
"""
This object defines a `Workflow` that produces `Tasks` until a particular
condition is satisfied (mainly used for convergence studies or iterative algorithms.)
"""
__metaclass__ = abc.ABCMeta
def __init__(self, strategy_generator, max_niter=25, workdir=None, manager=None):
"""
Args:
strategy_generator:
Generator object that produces `Strategy` objects.
max_niter:
Maximum number of iterations. A negative value or zero value
is equivalent to having an infinite number of iterations.
workdir:
Working directory.
manager:
`TaskManager` class.
"""
super(IterativeWorkflow, self).__init__(workdir, manager)
self.strategy_generator = strategy_generator
self._max_niter = max_niter
self.niter = 0
@property
def max_niter(self):
return self._max_niter
#def set_max_niter(self, max_niter):
# self._max_niter = max_niter
#def set_inputs(self, inputs):
# self.strategy_generator = list(inputs)
def next_task(self):
"""
Generate and register a new `Task`.
Returns:
New `Task` object
"""
try:
next_strategy = next(self.strategy_generator)
except StopIteration:
raise
self.register(next_strategy)
assert len(self) == self.niter
return self[-1]
def submit_tasks(self, *args, **kwargs):
"""
Run the tasks till self.exit_iteration says to exit
or the number of iterations exceeds self.max_niter
Returns:
dictionary with the final results
"""
self.niter = 1
while True:
if self.niter > self.max_niter > 0:
logger.debug("niter %d > max_niter %d" % (self.niter, self.max_niter))
break
try:
task = self.next_task()
except StopIteration:
break
# Start the task and block till completion.
task.start(*args, **kwargs)
task.wait()
data = self.exit_iteration(*args, **kwargs)
if data["exit"]:
break
self.niter += 1
@abc.abstractmethod
def exit_iteration(self, *args, **kwargs):
"""
Return a dictionary with the results produced at the given iteration.
The dictionary must contains an entry "converged" that evaluates to
True if the iteration should be stopped.
"""
def check_conv(values, tol, min_numpts=1, mode="abs", vinf=None):
"""
Given a list of values and a tolerance tol, returns the leftmost index for which
abs(value[i] - vinf) < tol if mode == "abs"
or
abs(value[i] - vinf) / vinf < tol if mode == "rel"
returns -1 if convergence is not achieved. By default, vinf = values[-1]
Args:
tol:
Tolerance
min_numpts:
Minimum number of points that must be converged.
mode:
"abs" for absolute convergence, "rel" for relative convergence.
vinf:
Used to specify an alternative value instead of values[-1].
"""
vinf = values[-1] if vinf is None else vinf
if mode == "abs":
vdiff = [abs(v - vinf) for v in values]
elif mode == "rel":
vdiff = [abs(v - vinf) / vinf for v in values]
else:
raise ValueError("Wrong mode %s" % mode)
numpts = len(vdiff)
i = -2
if (numpts > min_numpts) and vdiff[-2] < tol:
for i in range(numpts-1, -1, -1):
if vdiff[i] > tol:
break
if (numpts - i -1) < min_numpts: i = -2
return i + 1
def compute_hints(ecut_list, etotal, atols_mev, pseudo, min_numpts=1, stream=sys.stdout):
de_low, de_normal, de_high = [a / (1000 * Ha_to_eV) for a in atols_mev]
num_ene = len(etotal)
etotal_inf = etotal[-1]
ihigh = check_conv(etotal, de_high, min_numpts=min_numpts)
inormal = check_conv(etotal, de_normal)
ilow = check_conv(etotal, de_low)
accidx = {"H": ihigh, "N": inormal, "L": ilow}
table = []; app = table.append
app(["iter", "ecut", "etotal", "et-e_inf [meV]", "accuracy",])
for idx, (ec, et) in enumerate(zip(ecut_list, etotal)):
line = "%d %.1f %.7f %.3f" % (idx, ec, et, (et-etotal_inf) * Ha_to_eV * 1.e+3)
row = line.split() + ["".join(c for c,v in accidx.items() if v == idx)]
app(row)
if stream is not None:
stream.write("pseudo: %s\n" % pseudo.name)
pprint_table(table, out=stream)
ecut_high, ecut_normal, ecut_low = 3 * (None,)
exit = (ihigh != -1)
if exit:
ecut_low = ecut_list[ilow]
ecut_normal = ecut_list[inormal]
ecut_high = ecut_list[ihigh]
aug_ratios = [1,]
aug_ratio_low, aug_ratio_normal, aug_ratio_high = 3 * (1,)
data = {
"exit" : ihigh != -1,
"etotal" : list(etotal),
"ecut_list" : ecut_list,
"aug_ratios" : aug_ratios,
"low" : {"ecut": ecut_low, "aug_ratio": aug_ratio_low},
"normal" : {"ecut": ecut_normal, "aug_ratio": aug_ratio_normal},
"high" : {"ecut": ecut_high, "aug_ratio": aug_ratio_high},
"pseudo_name": pseudo.name,
"pseudo_path": pseudo.path,
"atols_mev" : atols_mev,
"dojo_level" : 0,
}
return data
def plot_etotal(ecut_list, etotals, aug_ratios, **kwargs):
"""
Uses Matplotlib to plot the energy curve as function of ecut
Args:
ecut_list:
List of cutoff energies
etotals:
Total energies in Hartree, see aug_ratios
aug_ratios:
List augmentation rations. [1,] for norm-conserving, [4, ...] for PAW
The number of elements in aug_ration must equal the number of (sub)lists
in etotals. Example:
- NC: etotals = [3.4, 4,5 ...], aug_ratios = [1,]
- PAW: etotals = [[3.4, ...], [3.6, ...]], aug_ratios = [4,6]
========= ==============================================================
kwargs description
========= ==============================================================
show True to show the figure
savefig 'abc.png' or 'abc.eps'* to save the figure to a file.
========= ==============================================================
Returns:
`matplotlib` figure.
"""
show = kwargs.pop("show", True)
savefig = kwargs.pop("savefig", None)
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
npts = len(ecut_list)
if len(aug_ratios) != 1 and len(aug_ratios) != len(etotals):
raise ValueError("The number of sublists in etotal must equal the number of aug_ratios")
if len(aug_ratios) == 1:
etotals = [etotals,]
lines, legends = [], []
emax = -np.inf
for (aratio, etot) in zip(aug_ratios, etotals):
emev = np.array(etot) * Ha_to_eV * 1000
emev_inf = npts * [emev[-1]]
yy = emev - emev_inf
emax = np.max(emax, np.max(yy))
line, = ax.plot(ecut_list, yy, "-->", linewidth=3.0, markersize=10)
lines.append(line)
legends.append("aug_ratio = %s" % aratio)
ax.legend(lines, legends, 'upper right', shadow=True)
# Set xticks and labels.
ax.grid(True)
ax.set_xlabel("Ecut [Ha]")
ax.set_ylabel("$\Delta$ Etotal [meV]")
ax.set_xticks(ecut_list)
#ax.yaxis.set_view_interval(-10, emax + 0.01 * abs(emax))
#ax.xaxis.set_view_interval(-10, 20)
ax.yaxis.set_view_interval(-10, 20)
ax.set_title("$\Delta$ Etotal Vs Ecut")
if show:
plt.show()
if savefig is not None:
fig.savefig(savefig)
return fig
class PseudoConvergence(Workflow):
def __init__(self, workdir, manager, pseudo, ecut_list, atols_mev,
toldfe=1.e-8, spin_mode="polarized",
acell=(8, 9, 10), smearing="fermi_dirac:0.1 eV"):
super(PseudoConvergence, self).__init__(workdir, manager)
# Temporary object used to build the strategy.
generator = PseudoIterativeConvergence(workdir, manager, pseudo, ecut_list, atols_mev,
toldfe = toldfe,
spin_mode = spin_mode,
acell = acell,
smearing = smearing,
max_niter = len(ecut_list),
)
self.atols_mev = atols_mev
self.pseudo = Pseudo.aspseudo(pseudo)
self.ecut_list = []
for ecut in ecut_list:
strategy = generator.strategy_with_ecut(ecut)
self.ecut_list.append(ecut)
self.register(strategy)
def get_results(self):
# Get the results of the tasks.
wf_results = super(PseudoConvergence, self).get_results()
etotal = self.read_etotal()
data = compute_hints(self.ecut_list, etotal, self.atols_mev, self.pseudo)
plot_etotal(data["ecut_list"], data["etotal"], data["aug_ratios"],
show=False, savefig=self.path_in_workdir("etotal.pdf"))
wf_results.update(data)
if not monotonic(etotal, mode="<", atol=1.0e-5):
logger.warning("E(ecut) is not decreasing")
wf_results.push_exceptions("E(ecut) is not decreasing:\n" + str(etotal))
#if kwargs.get("json_dump", True):
# wf_results.json_dump(self.path_in_workdir("results.json"))
return wf_results
class PseudoIterativeConvergence(IterativeWorkflow):
def __init__(self, workdir, manager, pseudo, ecut_list_or_slice, atols_mev,
toldfe=1.e-8, spin_mode="polarized",
acell=(8, 9, 10), smearing="fermi_dirac:0.1 eV", max_niter=50,):
"""
Args:
workdir:
Working directory.
pseudo:
string or Pseudo instance
ecut_list_or_slice:
List of cutoff energies or slice object (mainly used for infinite iterations).
atols_mev:
List of absolute tolerances in meV (3 entries corresponding to accuracy ["low", "normal", "high"]
manager:
`TaskManager` object.
spin_mode:
Defined how the electronic spin will be treated.
acell:
Lengths of the periodic box in Bohr.
smearing:
Smearing instance or string in the form "mode:tsmear". Default: FemiDirac with T=0.1 eV
"""
self.pseudo = Pseudo.aspseudo(pseudo)
self.atols_mev = atols_mev
self.toldfe = toldfe
self.spin_mode = spin_mode
self.smearing = Smearing.assmearing(smearing)
self.acell = acell
if isinstance(ecut_list_or_slice, slice):
self.ecut_iterator = iterator_from_slice(ecut_list_or_slice)
else:
self.ecut_iterator = iter(ecut_list_or_slice)
# Construct a generator that returns strategy objects.
def strategy_generator():
for ecut in self.ecut_iterator:
yield self.strategy_with_ecut(ecut)
super(PseudoIterativeConvergence, self).__init__(strategy_generator(),
max_niter=max_niter, workdir=workdir, manager=manager, )
if not self.isnc:
raise NotImplementedError("PAW convergence tests are not supported yet")
def strategy_with_ecut(self, ecut):
"""Return a Strategy instance with given cutoff energy ecut."""
# Define the system: one atom in a box of lenghts acell.
boxed_atom = AbiStructure.boxed_atom(self.pseudo, acell=self.acell)
# Gamma-only sampling.
gamma_only = KSampling.gamma_only()
# Setup electrons.
electrons = Electrons(spin_mode=self.spin_mode, smearing=self.smearing)
# Don't write WFK files.
extra_abivars = {
"ecut" : ecut,
"prtwf": 0,
"toldfe": self.toldfe,
}
strategy = ScfStrategy(boxed_atom, self.pseudo, gamma_only,
spin_mode=self.spin_mode, smearing=self.smearing,
charge=0.0, scf_algorithm=None,
use_symmetries=True, **extra_abivars)
return strategy
@property
def ecut_list(self):
"""The list of cutoff energies computed so far"""
return [float(task.strategy.ecut) for task in self]
def check_etotal_convergence(self, *args, **kwargs):
return compute_hints(self.ecut_list, self.read_etotal(), self.atols_mev,
self.pseudo)
def exit_iteration(self, *args, **kwargs):
return self.check_etotal_convergence(self, *args, **kwargs)
def get_results(self):
"""Return the results of the tasks."""
wf_results = super(PseudoIterativeConvergence, self).get_results()
data = self.check_etotal_convergence()
ecut_list, etotal, aug_ratios = data["ecut_list"], data["etotal"], data["aug_ratios"]
plot_etotal(ecut_list, etotal, aug_ratios,
show=False, savefig=self.path_in_workdir("etotal.pdf"))
wf_results.update(data)
if not monotonic(data["etotal"], mode="<", atol=1.0e-5):
logger.warning("E(ecut) is not decreasing")
wf_results.push_exceptions("E(ecut) is not decreasing\n" + str(etotal))
#if kwargs.get("json_dump", True):
# wf_results.json_dump(self.path_in_workdir("results.json"))
return wf_results
class BandStructureWorkflow(Workflow):
"""Workflow for band structure calculations."""
def __init__(self, scf_input, nscf_input, dos_inputs=None, workdir=None, manager=None):
"""
Args:
scf_input:
Input for the SCF run or `SCFStrategy` object.
nscf_input:
Input for the NSCF run or `NSCFStrategy` object defining the band structure calculation.
dos_inputs:
Input(s) for the DOS. DOS is computed only if dos_inputs is not None.
workdir:
Working directory.
manager:
`TaskManager` object.
"""
super(BandStructureWorkflow, self).__init__(workdir=workdir, manager=manager)
# Register the GS-SCF run.
self.scf_task = self.register(scf_input, task_class=ScfTask)
# Register the NSCF run and its dependency.
self.nscf_task = self.register(nscf_input, deps={self.scf_task: "DEN"}, task_class=NscfTask)
# Add DOS computation(s) if requested.
if dos_inputs is not None:
if not isinstance(dos_inputs, (list, tuple)):
dos_inputs = [dos_inputs]
for dos_input in dos_inputs:
self.register(dos_input, deps={self.scf_task: "DEN"}, task_class=NscfTask)
class RelaxWorkflow(Workflow):
"""
Workflow for structural relaxations. The first task relaxes the atomic position
while keeping the unit cell parameters fixed. The second task uses the final
structure to perform a structural relaxation in which both the atomic positions
and the lattice parameters are optimized.
"""
def __init__(self, ion_input, ioncell_input, workdir=None, manager=None):
"""
Args:
ion_input:
Input for the relaxation of the ions (cell is fixed)
ioncell_input:
Input for the relaxation of the ions and the unit cell.
workdir:
Working directory.
manager:
`TaskManager` object.
"""
super(RelaxWorkflow, self).__init__(workdir=workdir, manager=manager)
self.ion_task = self.register(ion_input, task_class=RelaxTask)
# Use WFK for the time being since I don't know why Abinit produces all these _TIM?_DEN files.
#self.ioncell_task = self.register(ioncell_input, deps={self.ion_task: "DEN"}, task_class=RelaxTask)
self.ioncell_task = self.register(ioncell_input, deps={self.ion_task: "WFK"}, task_class=RelaxTask)
# Lock ioncell_task as ion_task should communicate to ioncell_task that
# the calculation is OK and pass the final structure.
self.ioncell_task.set_status(self.S_LOCKED)
self.transfer_done = False
def on_ok(self, sender):
"""
This callback is called when one task reaches status S_OK.
"""
logger.debug("in on_ok with sender %s" % sender)
if sender == self.ion_task and not self.transfer_done:
# Get the relaxed structure.
ion_structure = self.ion_task.read_final_structure()
print("ion_structure", ion_structure)
# Transfer it to the ioncell task (do it only once).
self.ioncell_task.change_structure(ion_structure)
self.transfer_done = True
# Finally unlock ioncell_task so that we can submit it.
self.ioncell_task.set_status(self.S_READY)
base_results = super(RelaxWorkflow, self).on_ok(sender)
return base_results
class DeltaFactorWorkflow(Workflow):
def __init__(self, structure_or_cif, pseudo, kppa,
spin_mode="polarized", toldfe=1.e-8, smearing="fermi_dirac:0.1 eV",
accuracy="normal", ecut=None, pawecutdg=None, ecutsm=0.05, chksymbreak=0,
workdir=None, manager=None, **kwargs):
# FIXME Hack in chksymbreak
"""
Build a `Workflow` for the computation of the deltafactor.
Args:
structure_or_cif:
Structure objec or string with the path of the CIF file.
pseudo:
String with the name of the pseudopotential file or `Pseudo` object.` object.` object.`
kppa:
Number of k-points per atom.
spin_mode:
Spin polarization mode.
toldfe:
Tolerance on the energy (Ha)
smearing:
Smearing technique.
workdir:
String specifing the working directory.
manager:
`TaskManager` responsible for the submission of the tasks.
"""
super(DeltaFactorWorkflow, self).__init__(workdir=workdir, manager=manager)
if isinstance(structure_or_cif, Structure):
structure = structure_or_cif
else:
# Assume CIF file
structure = read_structure(structure_or_cif)
self.pseudo = Pseudo.aspseudo(pseudo)
structure = AbiStructure.asabistructure(structure)
smearing = Smearing.assmearing(smearing)
self._input_structure = structure
v0 = structure.volume
# From 94% to 106% of the equilibrium volume.
self.volumes = v0 * np.arange(94, 108, 2) / 100.
for vol in self.volumes:
new_lattice = structure.lattice.scale(vol)
new_structure = Structure(new_lattice, structure.species, structure.frac_coords)
new_structure = AbiStructure.asabistructure(new_structure)
extra_abivars = dict(
pawecutdg=pawecutdg,
ecutsm=ecutsm,
toldfe=toldfe,
prtwf=0,
paral_kgb=0,
)
extra_abivars.update(**kwargs)
if ecut is not None:
extra_abivars.update({"ecut": ecut})
ksampling = KSampling.automatic_density(new_structure, kppa,
chksymbreak=chksymbreak)
scf_input = ScfStrategy(new_structure, self.pseudo, ksampling,
accuracy=accuracy, spin_mode=spin_mode,
smearing=smearing, **extra_abivars)
self.register(scf_input, task_class=ScfTask)
def get_results(self):
num_sites = self._input_structure.num_sites
etotal = ArrayWithUnit(self.read_etotal(), "Ha").to("eV")
wf_results = super(DeltaFactorWorkflow, self).get_results()
wf_results.update({
"etotal" : list(etotal),
"volumes" : list(self.volumes),
"natom" : num_sites,
"dojo_level": 1,
})
try:
#eos_fit = EOS.Murnaghan().fit(self.volumes/num_sites, etotal/num_sites)
#print("murn",eos_fit)
#eos_fit.plot(show=False, savefig=self.path_in_workdir("murn_eos.pdf"))
# Use same fit as the one employed for the deltafactor.
eos_fit = EOS.DeltaFactor().fit(self.volumes/num_sites, etotal/num_sites)
eos_fit.plot(show=False, savefig=self.outdir.path_in("eos.pdf"))
# FIXME: This object should be moved to pseudo_dojo.
# Get reference results (Wien2K).
from pseudo_dojo.refdata.deltafactor import df_database, df_compute
wien2k = df_database().get_entry(self.pseudo.symbol)
# Compute deltafactor estimator.
dfact = df_compute(wien2k.v0, wien2k.b0_GPa, wien2k.b1,
eos_fit.v0, eos_fit.b0_GPa, eos_fit.b1, b0_GPa=True)
print("delta",eos_fit)
print("Deltafactor = %.3f meV" % dfact)
wf_results.update({
"v0": eos_fit.v0,
"b0": eos_fit.b0,
"b0_GPa": eos_fit.b0_GPa,
"b1": eos_fit.b1,
})
except EOS.Error as exc:
wf_results.push_exceptions(exc)
#if kwargs.get("json_dump", True):
# wf_results.json_dump(self.path_in_workdir("results.json"))
# Write data for the computation of the delta factor
with open(self.outdir.path_in("deltadata.txt"), "w") as fh:
fh.write("# Deltafactor = %s meV\n" % dfact)
fh.write("# Volume/natom [Ang^3] Etotal/natom [eV]\n")
for (v, e) in zip(self.volumes, etotal):
fh.write("%s %s\n" % (v/num_sites, e/num_sites))
return wf_results
def on_all_ok(self):
return self.get_results()
#def make_report(self, results, **kwargs):
# d = dict(v0=v0,
# b0_GPa=b0_GPa,
# b1=b1,
# dfact=dfact
# )
# if results.exceptions:
# d["_exceptions"] = str(results.exceptions)
#
# d = {self.accuracy: d}
class G0W0_Workflow(Workflow):
def __init__(self, scf_input, nscf_input, scr_input, sigma_inputs,
workdir=None, manager=None):
"""
Workflow for G0W0 calculations.
Args:
scf_input:
Input for the SCF run or `SCFStrategy` object.
nscf_input:
Input for the NSCF run or `NSCFStrategy` object.
scr_input:
Input for the screening run or `ScrStrategy` object
sigma_inputs:
List of Strategies for the self-energy run.
workdir:
Working directory of the calculation.
manager:
`TaskManager` object.
"""
super(G0W0_Workflow, self).__init__(workdir=workdir, manager=manager)
# Register the GS-SCF run.
# register all scf_inputs but link the nscf only the last scf in the list
if isinstance(scf_input, (list, tuple)):
for single_scf_input in scf_input:
self.scf_task = scf_task = self.register(single_scf_input, task_class=ScfTask)
else:
self.scf_task = scf_task = self.register(scf_input, task_class=ScfTask)
# Construct the input for the NSCF run.
self.nscf_task = nscf_task = self.register(nscf_input, deps={scf_task: "DEN"}, task_class=NscfTask)
# Register the SCREENING run.
self.scr_task = scr_task = self.register(scr_input, deps={nscf_task: "WFK"})
# Register the SIGMA runs.
if not isinstance(sigma_inputs, (list, tuple)):
sigma_inputs = [sigma_inputs]
self.sigma_tasks = []
for sigma_input in sigma_inputs:
task = self.register(sigma_input, deps={nscf_task: "WFK", scr_task: "SCR"})
self.sigma_tasks.append(task)
class SigmaConvWorkflow(Workflow):
def __init__(self, wfk_node, scr_node, sigma_inputs, workdir=None, manager=None):
"""
Workflow for self-energy convergence studies.
Args:
wfk_node:
The node who has produced the WFK file
scr_node:
The node who has produced the SCR file
sigma_inputs:
List of Strategies for the self-energy run.
workdir:
Working directory of the calculation.
manager:
`TaskManager` object.
"""
super(SigmaConvWorkflow, self).__init__(workdir=workdir, manager=manager)
# Register the SIGMA runs.
if not isinstance(sigma_inputs, (list, tuple)):
sigma_inputs = [sigma_inputs]
for sigma_input in sigma_inputs:
self.register(sigma_input, deps={wfk_node: "WFK", scr_node: "SCR"})
#class SCGW_Workflow(Workflow):
#
# def __init__(self, scr_input, sigma_input, workdir=None, manager=None):
# """
# Workflow for G0W0 calculations.
#
# Args:
# scr_input:
# Input for the screening run or `ScrStrategy` object
# sigma_input:
# Strategy for the self-energy run.
# workdir:
# Working directory of the calculation.
# manager:
# `TaskManager` object.
# """
# super(SCGW_Workflow, self).__init__(workdir=workdir, manager=manager)
#
# # Register the SCREENING run.
# self.scr_task = self.register(scr_input, deps={nscf_task: "WFK"})
#
# # Register the SIGMA run.
# self.sigma_task = self.register(sigma_input, deps={self.nscf_task: "WFK", self.scr_task: "SCR"})
#
# def not_converged(self):
# return self.sigma_task.not_converged()
#
# def restart(self):
# ext = "QPS"
# qps_file = self.sigma_task.outdir.has_abiext(ext)
# irdvars = irdvars_for_ext(ext)
#
# if not qps_file:
# raise TaskRestartError("Cannot find the QPS file to restart from.")
#
# # Move the QPS file produced by the SIGMA task to
# # the indir of the SCR task and the indir of the SIGMA task.
# scr_infile = self.scr_task.indir.path_in(os.path.basename(qps_file)
# sigma_infile = self.sigma_task.indir.path_in(os.path.basename(qps_file)
# shutil.copy(qps_file, scr_infile)
# shutil.move(qps_file, sigma_infile)
#
# # Add the appropriate variable for reading the QPS file.
# self.scr_task.strategy.add_extra_abivars(irdvars)
# self.sigma_task.strategy.add_extra_abivars(irdvars)
#
# # Now we can resubmit the job.
# #for task in self.
# # task.reset()
# self._restart()
class BSEMDF_Workflow(Workflow):
def __init__(self, scf_input, nscf_input, bse_input, workdir=None, manager=None):
"""
Workflow for simple BSE calculations in which the self-energy corrections
are approximated by the scissors operator and the screening in modeled
with the model dielectric function.
Args:
scf_input:
Input for the SCF run or `ScfStrategy` object.
nscf_input:
Input for the NSCF run or `NscfStrategy` object.
bse_input:
Input for the BSE run or `BSEStrategy` object.
workdir:
Working directory of the calculation.
manager:
`TaskManager`.
"""
super(BSEMDF_Workflow, self).__init__(workdir=workdir, manager=manager)
# Register the GS-SCF run.
self.scf_task = self.register(scf_input, task_class=ScfTask)
# Construct the input for the NSCF run.
self.nscf_task = self.register(nscf_input, deps={self.scf_task: "DEN"}, task_class=NscfTask)
# Construct the input for the BSE run.
self.bse_task = self.register(bse_input, deps={self.nscf_task: "WFK"}, task_class=HaydockBseTask)
class PhononWorkflow(Workflow):
"""
This workflow usually consists of nirred Phonon tasks where nirred is
the number of irreducible perturbations for a given q-point.
It provides the callback method (on_all_ok) that calls mrgddb to merge
the partial DDB files and mrgggkk to merge the GKK files.
"""
def merge_ddb_files(self):
"""
This method is called when all the q-points have been computed.
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Workflow`.
"""
ddb_files = filter(None, [task.outdir.has_abiext("DDB") for task in self])
logger.debug("will call mrgddb to merge %s:\n" % str(ddb_files))
assert len(ddb_files) == len(self)
#if len(ddb_files) == 1:
# Avoid the merge. Just move the DDB file to the outdir of the workflow
# Final DDB file will be produced in the outdir of the workflow.
out_ddb = self.outdir.path_in("out_DDB")
desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime())
mrgddb = wrappers.Mrgddb(verbose=1)
mrgddb.set_mpi_runner("mpirun")
mrgddb.merge(ddb_files, out_ddb=out_ddb, description=desc, cwd=self.outdir.path)
def merge_gkk_files(self):
"""
This method is called when all the q-points have been computed.
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Workflow`.
"""
gkk_files = filter(None, [task.outdir.has_abiext("GKK") for task in self])
logger.debug("Will call mrggkk to merge %s:\n" % str(gkk_files))
assert len(gkk) == len(self)
#if len(gkk) == 1:
# Avoid the merge. Just move the GKK file to the outdir of the workflow
# Final GKK file will be produced in the outdir of the workflow.
out_ggk = self.outdir.path_in("out_GKK")
mrggkk = wrappers.Mrggkk(verbose=1)
mrggkk.set_mpi_runner("mpirun")
raise NotImplementedError("Have to check mrggkk")
#mrggkk.merge(gswfk_file, dfpt_files, gkk_files, out_fname, binascii=0, cwd=self.outdir.path)
def on_all_ok(self):
"""
This method is called when all the q-points have been computed.
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Workflow`.
"""
# Merge DDB files.
self.merge_ddb_files()
# Merge GKK files.
#self.merge_gkk_files()
return dict(returncode=0,
message="DDB merge done"
)
class WorkflowResults(dict, MSONable):
"""
Dictionary used to store some of the results produce by a Task object
"""
_MANDATORY_KEYS = [
"task_results",
]
_EXC_KEY = "_exceptions"
def __init__(self, *args, **kwargs):
super(WorkflowResults, self).__init__(*args, **kwargs)
if self._EXC_KEY not in self:
self[self._EXC_KEY] = []
@property
def exceptions(self):
return self[self._EXC_KEY]
def push_exceptions(self, *exceptions):
for exc in exceptions:
newstr = str(exc)
if newstr not in self.exceptions:
self[self._EXC_KEY] += [newstr]
def assert_valid(self):
"""
Returns empty string if results seem valid.
The try assert except trick allows one to get a string with info on the exception.
We use the += operator so that sub-classes can add their own message.
"""
# Validate tasks.
for tres in self.task_results:
self[self._EXC_KEY] += tres.assert_valid()
return self[self._EXC_KEY]
@property
def to_dict(self):
d = {k: v for k,v in self.items()}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
mydict = {k: v for k, v in d.items() if k not in ["@module", "@class"]}
return cls(mydict)
parcing oncvpsp
Former-commit-id: 6b11e4eb1fb6ef80156a16c69523e22b74dccfee [formerly fba662e3604499490afa44a6f3001654f7e47852]
Former-commit-id: 9d8c0085b33aa95f12e7e56c8068f4d8983d05a4
"""
Abinit Workflows
"""
from __future__ import division, print_function
import sys
import os
import shutil
import time
import abc
import collections
import numpy as np
try:
from pydispatch import dispatcher
except ImportError:
pass
from pymatgen.core.units import ArrayWithUnit, Ha_to_eV
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.design_patterns import Enum, AttrDict
from pymatgen.serializers.json_coders import MSONable, json_pretty_dump
from pymatgen.io.smartio import read_structure
from pymatgen.util.num_utils import iterator_from_slice, chunks, monotonic
from pymatgen.util.string_utils import list_strings, pprint_table, WildCard
from pymatgen.io.abinitio import wrappers
from pymatgen.io.abinitio.tasks import (Task, AbinitTask, Dependency, Node, ScfTask, NscfTask, HaydockBseTask, RelaxTask)
from pymatgen.io.abinitio.strategies import Strategy
from pymatgen.io.abinitio.utils import File, Directory
from pymatgen.io.abinitio.netcdf import ETSF_Reader
from pymatgen.io.abinitio.abiobjects import Smearing, AbiStructure, KSampling, Electrons
from pymatgen.io.abinitio.pseudos import Pseudo
from pymatgen.io.abinitio.strategies import ScfStrategy
from pymatgen.io.abinitio.eos import EOS
from pymatgen.io.abinitio.abitimer import AbinitTimerParser
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__all__ = [
"Workflow",
"IterativeWorkflow",
"BandStructureWorkflow",
"RelaxWorkflow",
"DeltaFactorWorkflow",
"G0W0_Workflow",
"SigmaConvWorkflow",
"BSEMDF_Workflow",
"PhononWorkflow",
]
class WorkflowError(Exception):
"""Base class for the exceptions raised by Workflow objects."""
class BaseWorkflow(Node):
__metaclass__ = abc.ABCMeta
Error = WorkflowError
# interface modeled after subprocess.Popen
@abc.abstractproperty
def processes(self):
"""Return a list of objects that support the subprocess.Popen protocol."""
def poll(self):
"""
Check if all child processes have terminated. Set and return
returncode attribute.
"""
return [task.poll() for task in self]
def wait(self):
"""
Wait for child processed to terminate. Set and return returncode attribute.
"""
return [task.wait() for task in self]
def communicate(self, input=None):
"""
Interact with processes: Send data to stdin. Read data from stdout and
stderr, until end-of-file is reached.
Wait for process to terminate. The optional input argument should be a
string to be sent to the child processed, or None, if no data should be
sent to the children.
communicate() returns a list of tuples (stdoutdata, stderrdata).
"""
return [task.communicate(input) for task in self]
def show_intrawork_deps(self):
"""Show the dependencies within the `Workflow`."""
table = [["Task #"] + [str(i) for i in range(len(self))]]
for ii, task1 in enumerate(self):
line = (1 + len(self)) * [""]
line[0] = str(ii)
for jj, task2 in enumerate(self):
if task1.depends_on(task2):
line[jj+1] = "^"
table.append(line)
pprint_table(table)
@property
def returncodes(self):
"""
The children return codes, set by poll() and wait() (and indirectly by communicate()).
A None value indicates that the process hasn't terminated yet.
A negative value -N indicates that the child was terminated by signal N (Unix only).
"""
return [task.returncode for task in self]
@property
def ncpus_reserved(self):
"""
Returns the number of CPUs reserved in this moment.
A CPUS is reserved if it's still not running but
we have submitted the task to the queue manager.
"""
return sum(task.tot_ncpus for task in self if task.status == task.S_SUB)
@property
def ncpus_allocated(self):
"""
Returns the number of CPUs allocated in this moment.
A CPU is allocated if it's running a task or if we have
submitted a task to the queue manager but the job is still pending.
"""
return sum(task.tot_ncpus for task in self if task.status in [task.S_SUB, task.S_RUN])
@property
def ncpus_inuse(self):
"""
Returns the number of CPUs used in this moment.
A CPU is used if there's a job that is running on it.
"""
return sum(task.tot_ncpus for task in self if task.status == task.S_RUN)
def fetch_task_to_run(self):
"""
Returns the first task that is ready to run or
None if no task can be submitted at present"
Raises:
`StopIteration` if all tasks are done.
"""
# All the tasks are done so raise an exception
# that will be handled by the client code.
if all(task.is_completed for task in self):
raise StopIteration("All tasks completed.")
for task in self:
if task.can_run:
#print(task, str(task.status), [task.deps_status])
return task
# No task found, this usually happens when we have dependencies.
# Beware of possible deadlocks here!
logger.warning("Possible deadlock in fetch_task_to_run!")
return None
def fetch_alltasks_to_run(self):
"""
Returns a list with all the tasks that can be submitted.
Empty list if not task has been found.
"""
#if all(task.is_completed for task in self):
# return []
return [task for task in self if task.can_run]
@abc.abstractmethod
def setup(self, *args, **kwargs):
"""Method called before submitting the calculations."""
def _setup(self, *args, **kwargs):
self.setup(*args, **kwargs)
def connect_signals(self):
"""
Connect the signals within the workflow.
self is responsible for catching the important signals raised from
its task and raise new signals when some particular condition occurs.
"""
for task in self:
dispatcher.connect(self.on_ok, signal=task.S_OK, sender=task)
@property
def all_ok(self):
return all(task.status == task.S_OK for task in self)
def on_ok(self, sender):
"""
This callback is called when one task reaches status S_OK.
"""
logger.debug("in on_ok with sender %s" % sender)
if self.all_ok:
if self.finalized:
return AttrDict(returncode=0, message="Workflow has been already finalized")
else:
results = AttrDict(**self.on_all_ok())
self._finalized = True
# Signal to possible observers that the `Workflow` reached S_OK
print("Workflow %s is finalized and broadcasts signal S_OK" % str(self))
print("Workflow %s status = %s" % (str(self), self.status))
dispatcher.send(signal=self.S_OK, sender=self)
return results
return AttrDict(returncode=1, message="Not all tasks are OK!")
def on_all_ok(self):
"""
This method is called once the `workflow` is completed i.e. when all the tasks
have reached status S_OK. Subclasses should provide their own implementation
Returns:
Dictionary that must contain at least the following entries:
returncode:
0 on success.
message:
a string that should provide a human-readable description of what has been performed.
"""
return dict(returncode=0,
message="Calling on_all_ok of the base class!",
)
def get_results(self):
"""
Method called once the calculations are completed.
The base version returns a dictionary task_name : TaskResults for each task in self.
"""
return WorkflowResults(task_results={task.name: task.results for task in self})
class Workflow(BaseWorkflow):
"""
A Workflow is a list of (possibly connected) tasks.
"""
Error = WorkflowError
def __init__(self, workdir=None, manager=None):
"""
Args:
workdir:
Path to the working directory.
manager:
`TaskManager` object.
"""
super(Workflow, self).__init__()
self._tasks = []
if workdir is not None:
self.set_workdir(workdir)
if manager is not None:
self.set_manager(manager)
def set_manager(self, manager):
"""Set the `TaskManager` to use to launch the Task."""
self.manager = manager.deepcopy()
for task in self:
task.set_manager(manager)
@property
def flow(self):
"""The flow containing this `Workflow`."""
return self._flow
def set_flow(self, flow):
"""Set the flow associated to this `Workflow`."""
if not hasattr(self, "_flow"):
self._flow = flow
else:
if self._flow != flow:
raise ValueError("self._flow != flow")
def set_workdir(self, workdir, chroot=False):
"""Set the working directory. Cannot be set more than once unless chroot is True"""
if not chroot and hasattr(self, "workdir") and self.workdir != workdir:
raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir))
self.workdir = os.path.abspath(workdir)
# Directories with (input|output|temporary) data.
# The workflow will use these directories to connect
# itself to other workflows and/or to produce new data
# that will be used by its children.
self.indir = Directory(os.path.join(self.workdir, "indata"))
self.outdir = Directory(os.path.join(self.workdir, "outdata"))
self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata"))
def chroot(self, new_workdir):
self.set_workdir(new_workdir, chroot=True)
for i, task in enumerate(self):
new_tdir = os.path.join(self.workdir, "t" + str(i))
task.set_workdir(new_tdir, chroot=True)
def __len__(self):
return len(self._tasks)
def __iter__(self):
return self._tasks.__iter__()
def __getitem__(self, slice):
return self._tasks[slice]
def chunks(self, chunk_size):
"""Yield successive chunks of tasks of lenght chunk_size."""
for tasks in chunks(self, chunk_size):
yield tasks
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return self.indir.path_in("in_" + ext)
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return self.outdir.path_in("out_" + ext)
@property
def processes(self):
return [task.process for task in self]
@property
def all_done(self):
"""True if all the `Task` in the `Workflow` are done."""
return all(task.status >= task.S_DONE for task in self)
@property
def isnc(self):
"""True if norm-conserving calculation."""
return all(task.isnc for task in self)
@property
def ispaw(self):
"""True if PAW calculation."""
return all(task.ispaw for task in self)
@property
def status_counter(self):
"""
Returns a `Counter` object that counts the number of task with
given status (use the string representation of the status as key).
"""
counter = collections.Counter()
for task in self:
counter[str(task.status)] += 1
return counter
def allocate(self, manager=None):
"""
This function is called once we have completed the initialization
of the `Workflow`. It sets the manager of each task (if not already done)
and defines the working directories of the tasks.
Args:
manager:
`TaskManager` object or None
"""
for i, task in enumerate(self):
if not hasattr(task, "manager"):
# Set the manager
if manager is not None:
# Use the one provided in input.
task.set_manager(manager)
else:
# Use the one of the workflow.
task.set_manager(self.manager)
task_workdir = os.path.join(self.workdir, "t" + str(i))
if not hasattr(task, "workdir"):
task.set_workdir(task_workdir)
else:
if task.workdir != task_workdir:
raise ValueError("task.workdir != task_workdir: %s, %s" % (task.workdir, task_workdir))
def register(self, obj, deps=None, required_files=None, manager=None, task_class=None):
"""
Registers a new `Task` and add it to the internal list, taking into account possible dependencies.
Args:
obj:
`Strategy` object or `AbinitInput` instance.
if Strategy object, we create a new `AbinitTask` from the input strategy and add it to the list.
deps:
Dictionary specifying the dependency of this node.
None means that this obj has no dependency.
required_files:
List of strings with the path of the files used by the task.
manager:
The `TaskManager` responsible for the submission of the task. If manager is None, we use
the `TaskManager` specified during the creation of the `Workflow`.
task_class:
Task subclass to instantiate. Default: `AbinitTask`
Returns:
`Task` object
"""
task_workdir = None
if hasattr(self, "workdir"):
task_workdir = os.path.join(self.workdir, "t" + str(len(self)))
if isinstance(obj, Task):
task = obj
else:
# Set the class
if task_class is None:
task_class = AbinitTask
if isinstance(obj, Strategy):
# Create the new task (note the factory so that we create subclasses easily).
task = task_class(obj, task_workdir, manager)
else:
task = task_class.from_input(obj, task_workdir, manager)
self._tasks.append(task)
# Handle possible dependencies.
if deps is not None:
deps = [Dependency(node, exts) for (node, exts) in deps.items()]
task.add_deps(deps)
# Handle possible dependencies.
if required_files is not None:
task.add_required_files(required_files)
return task
def path_in_workdir(self, filename):
"""Create the absolute path of filename in the working directory."""
return os.path.join(self.workdir, filename)
def setup(self, *args, **kwargs):
"""
Method called before running the calculations.
The default implementation is empty.
"""
def build(self, *args, **kwargs):
"""Creates the top level directory."""
# Create the directories of the workflow.
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Build dirs and files of each task.
for task in self:
task.build(*args, **kwargs)
# Connect signals within the workflow.
self.connect_signals()
@property
def status(self):
"""
Returns the status of the workflow i.e. the minimum of the status of the tasks.
"""
return self.get_all_status(only_min=True)
#def set_status(self, status):
def get_all_status(self, only_min=False):
"""
Returns a list with the status of the tasks in self.
Args:
only_min:
If True, the minimum of the status is returned.
"""
if len(self) == 0:
# The workflow will be created in the future.
if only_min:
return self.S_INIT
else:
return [self.S_INIT]
self.check_status()
status_list = [task.status for task in self]
#print("status_list", status_list)
if only_min:
return min(status_list)
else:
return status_list
def check_status(self):
"""Check the status of the tasks."""
# Recompute the status of the tasks
for task in self:
task.check_status()
# Take into account possible dependencies.Use a list instead of generators
for task in self:
if task.status <= task.S_SUB and all([status == task.S_OK for status in task.deps_status]):
task.set_status(task.S_READY)
def rmtree(self, exclude_wildcard=""):
"""
Remove all files and directories in the working directory
Args:
exclude_wildcard:
Optional string with regular expressions separated by `|`.
Files matching one of the regular expressions will be preserved.
example: exclude_wildard="*.nc|*.txt" preserves all the files
whose extension is in ["nc", "txt"].
"""
if not exclude_wildcard:
shutil.rmtree(self.workdir)
else:
w = WildCard(exclude_wildcard)
for dirpath, dirnames, filenames in os.walk(self.workdir):
for fname in filenames:
path = os.path.join(dirpath, fname)
if not w.match(fname):
os.remove(path)
def rm_indatadir(self):
"""Remove all the indata directories."""
for task in self:
task.rm_indatadir()
def rm_outdatadir(self):
"""Remove all the indata directories."""
for task in self:
task.rm_outatadir()
def rm_tmpdatadir(self):
"""Remove all the tmpdata directories."""
for task in self:
task.rm_tmpdatadir()
def move(self, dest, isabspath=False):
"""
Recursively move self.workdir to another location. This is similar to the Unix "mv" command.
The destination path must not already exist. If the destination already exists
but is not a directory, it may be overwritten depending on os.rename() semantics.
Be default, dest is located in the parent directory of self.workdir, use isabspath=True
to specify an absolute path.
"""
if not isabspath:
dest = os.path.join(os.path.dirname(self.workdir), dest)
shutil.move(self.workdir, dest)
def submit_tasks(self, wait=False):
"""
Submits the task in self and wait.
TODO: change name.
"""
for task in self:
task.start()
if wait:
for task in self: task.wait()
def start(self, *args, **kwargs):
"""
Start the work. Calls build and _setup first, then submit the tasks.
Non-blocking call unless wait is set to True
"""
wait = kwargs.pop("wait", False)
# Build dirs and files.
self.build(*args, **kwargs)
# Initial setup
self._setup(*args, **kwargs)
# Submit tasks (does not block)
self.submit_tasks(wait=wait)
def read_etotal(self):
"""
Reads the total energy from the GSR file produced by the task.
Return a numpy array with the total energies in Hartree
The array element is set to np.inf if an exception is raised while reading the GSR file.
"""
if not self.all_done:
raise self.Error("Some task is still in running/submitted state")
etotal = []
for task in self:
# Open the GSR file and read etotal (Hartree)
gsr_path = task.outdir.has_abiext("GSR")
etot = np.inf
if gsr_path:
with ETSF_Reader(gsr_path) as r:
etot = r.read_value("etotal")
etotal.append(etot)
return etotal
def parse_timers(self):
"""
Parse the TIMER section reported in the ABINIT output files.
Returns:
`AbinitTimerParser` object
"""
filenames = filter(os.path.exists, [task.output_file.path for task in self])
parser = AbinitTimerParser()
parser.parse(filenames)
return parser
class IterativeWorkflow(Workflow):
"""
This object defines a `Workflow` that produces `Tasks` until a particular
condition is satisfied (mainly used for convergence studies or iterative algorithms.)
"""
__metaclass__ = abc.ABCMeta
def __init__(self, strategy_generator, max_niter=25, workdir=None, manager=None):
"""
Args:
strategy_generator:
Generator object that produces `Strategy` objects.
max_niter:
Maximum number of iterations. A negative value or zero value
is equivalent to having an infinite number of iterations.
workdir:
Working directory.
manager:
`TaskManager` class.
"""
super(IterativeWorkflow, self).__init__(workdir, manager)
self.strategy_generator = strategy_generator
self._max_niter = max_niter
self.niter = 0
@property
def max_niter(self):
return self._max_niter
#def set_max_niter(self, max_niter):
# self._max_niter = max_niter
#def set_inputs(self, inputs):
# self.strategy_generator = list(inputs)
def next_task(self):
"""
Generate and register a new `Task`.
Returns:
New `Task` object
"""
try:
next_strategy = next(self.strategy_generator)
except StopIteration:
raise
self.register(next_strategy)
assert len(self) == self.niter
return self[-1]
def submit_tasks(self, *args, **kwargs):
"""
Run the tasks till self.exit_iteration says to exit
or the number of iterations exceeds self.max_niter
Returns:
dictionary with the final results
"""
self.niter = 1
while True:
if self.niter > self.max_niter > 0:
logger.debug("niter %d > max_niter %d" % (self.niter, self.max_niter))
break
try:
task = self.next_task()
except StopIteration:
break
# Start the task and block till completion.
kwargs.pop('wait')
task.start(*args, **kwargs)
task.wait()
data = self.exit_iteration(*args, **kwargs)
if data["exit"]:
break
self.niter += 1
@abc.abstractmethod
def exit_iteration(self, *args, **kwargs):
"""
Return a dictionary with the results produced at the given iteration.
The dictionary must contains an entry "converged" that evaluates to
True if the iteration should be stopped.
"""
def check_conv(values, tol, min_numpts=1, mode="abs", vinf=None):
"""
Given a list of values and a tolerance tol, returns the leftmost index for which
abs(value[i] - vinf) < tol if mode == "abs"
or
abs(value[i] - vinf) / vinf < tol if mode == "rel"
returns -1 if convergence is not achieved. By default, vinf = values[-1]
Args:
tol:
Tolerance
min_numpts:
Minimum number of points that must be converged.
mode:
"abs" for absolute convergence, "rel" for relative convergence.
vinf:
Used to specify an alternative value instead of values[-1].
"""
vinf = values[-1] if vinf is None else vinf
if mode == "abs":
vdiff = [abs(v - vinf) for v in values]
elif mode == "rel":
vdiff = [abs(v - vinf) / vinf for v in values]
else:
raise ValueError("Wrong mode %s" % mode)
numpts = len(vdiff)
i = -2
if (numpts > min_numpts) and vdiff[-2] < tol:
for i in range(numpts-1, -1, -1):
if vdiff[i] > tol:
break
if (numpts - i -1) < min_numpts: i = -2
return i + 1
def compute_hints(ecut_list, etotal, atols_mev, pseudo, min_numpts=1, stream=sys.stdout):
de_low, de_normal, de_high = [a / (1000 * Ha_to_eV) for a in atols_mev]
num_ene = len(etotal)
etotal_inf = etotal[-1]
ihigh = check_conv(etotal, de_high, min_numpts=min_numpts)
inormal = check_conv(etotal, de_normal)
ilow = check_conv(etotal, de_low)
accidx = {"H": ihigh, "N": inormal, "L": ilow}
table = []; app = table.append
app(["iter", "ecut", "etotal", "et-e_inf [meV]", "accuracy",])
for idx, (ec, et) in enumerate(zip(ecut_list, etotal)):
line = "%d %.1f %.7f %.3f" % (idx, ec, et, (et-etotal_inf) * Ha_to_eV * 1.e+3)
row = line.split() + ["".join(c for c,v in accidx.items() if v == idx)]
app(row)
if stream is not None:
stream.write("pseudo: %s\n" % pseudo.name)
pprint_table(table, out=stream)
ecut_high, ecut_normal, ecut_low = 3 * (None,)
exit = (ihigh != -1)
if exit:
ecut_low = ecut_list[ilow]
ecut_normal = ecut_list[inormal]
ecut_high = ecut_list[ihigh]
aug_ratios = [1,]
aug_ratio_low, aug_ratio_normal, aug_ratio_high = 3 * (1,)
data = {
"exit" : ihigh != -1,
"etotal" : list(etotal),
"ecut_list" : ecut_list,
"aug_ratios" : aug_ratios,
"low" : {"ecut": ecut_low, "aug_ratio": aug_ratio_low},
"normal" : {"ecut": ecut_normal, "aug_ratio": aug_ratio_normal},
"high" : {"ecut": ecut_high, "aug_ratio": aug_ratio_high},
"pseudo_name": pseudo.name,
"pseudo_path": pseudo.path,
"atols_mev" : atols_mev,
"dojo_level" : 0,
}
return data
def plot_etotal(ecut_list, etotals, aug_ratios, **kwargs):
"""
Uses Matplotlib to plot the energy curve as function of ecut
Args:
ecut_list:
List of cutoff energies
etotals:
Total energies in Hartree, see aug_ratios
aug_ratios:
List augmentation rations. [1,] for norm-conserving, [4, ...] for PAW
The number of elements in aug_ration must equal the number of (sub)lists
in etotals. Example:
- NC: etotals = [3.4, 4,5 ...], aug_ratios = [1,]
- PAW: etotals = [[3.4, ...], [3.6, ...]], aug_ratios = [4,6]
========= ==============================================================
kwargs description
========= ==============================================================
show True to show the figure
savefig 'abc.png' or 'abc.eps'* to save the figure to a file.
========= ==============================================================
Returns:
`matplotlib` figure.
"""
show = kwargs.pop("show", True)
savefig = kwargs.pop("savefig", None)
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
npts = len(ecut_list)
if len(aug_ratios) != 1 and len(aug_ratios) != len(etotals):
raise ValueError("The number of sublists in etotal must equal the number of aug_ratios")
if len(aug_ratios) == 1:
etotals = [etotals,]
lines, legends = [], []
emax = -np.inf
for (aratio, etot) in zip(aug_ratios, etotals):
emev = np.array(etot) * Ha_to_eV * 1000
emev_inf = npts * [emev[-1]]
yy = emev - emev_inf
emax = np.max(emax, np.max(yy))
line, = ax.plot(ecut_list, yy, "-->", linewidth=3.0, markersize=10)
lines.append(line)
legends.append("aug_ratio = %s" % aratio)
ax.legend(lines, legends, 'upper right', shadow=True)
# Set xticks and labels.
ax.grid(True)
ax.set_xlabel("Ecut [Ha]")
ax.set_ylabel("$\Delta$ Etotal [meV]")
ax.set_xticks(ecut_list)
#ax.yaxis.set_view_interval(-10, emax + 0.01 * abs(emax))
#ax.xaxis.set_view_interval(-10, 20)
ax.yaxis.set_view_interval(-10, 20)
ax.set_title("$\Delta$ Etotal Vs Ecut")
if show:
plt.show()
if savefig is not None:
fig.savefig(savefig)
return fig
class PseudoConvergence(Workflow):
def __init__(self, workdir, manager, pseudo, ecut_list, atols_mev,
toldfe=1.e-8, spin_mode="polarized",
acell=(8, 9, 10), smearing="fermi_dirac:0.1 eV"):
super(PseudoConvergence, self).__init__(workdir, manager)
# Temporary object used to build the strategy.
generator = PseudoIterativeConvergence(workdir, manager, pseudo, ecut_list, atols_mev,
toldfe = toldfe,
spin_mode = spin_mode,
acell = acell,
smearing = smearing,
max_niter = len(ecut_list),
)
self.atols_mev = atols_mev
self.pseudo = Pseudo.aspseudo(pseudo)
self.ecut_list = []
for ecut in ecut_list:
strategy = generator.strategy_with_ecut(ecut)
self.ecut_list.append(ecut)
self.register(strategy)
def get_results(self):
# Get the results of the tasks.
wf_results = super(PseudoConvergence, self).get_results()
etotal = self.read_etotal()
data = compute_hints(self.ecut_list, etotal, self.atols_mev, self.pseudo)
plot_etotal(data["ecut_list"], data["etotal"], data["aug_ratios"],
show=False, savefig=self.path_in_workdir("etotal.pdf"))
wf_results.update(data)
if not monotonic(etotal, mode="<", atol=1.0e-5):
logger.warning("E(ecut) is not decreasing")
wf_results.push_exceptions("E(ecut) is not decreasing:\n" + str(etotal))
#if kwargs.get("json_dump", True):
# wf_results.json_dump(self.path_in_workdir("results.json"))
return wf_results
class PseudoIterativeConvergence(IterativeWorkflow):
def __init__(self, workdir, manager, pseudo, ecut_list_or_slice, atols_mev,
toldfe=1.e-8, spin_mode="polarized",
acell=(8, 9, 10), smearing="fermi_dirac:0.1 eV", max_niter=50,):
"""
Args:
workdir:
Working directory.
pseudo:
string or Pseudo instance
ecut_list_or_slice:
List of cutoff energies or slice object (mainly used for infinite iterations).
atols_mev:
List of absolute tolerances in meV (3 entries corresponding to accuracy ["low", "normal", "high"]
manager:
`TaskManager` object.
spin_mode:
Defined how the electronic spin will be treated.
acell:
Lengths of the periodic box in Bohr.
smearing:
Smearing instance or string in the form "mode:tsmear". Default: FemiDirac with T=0.1 eV
"""
self.pseudo = Pseudo.aspseudo(pseudo)
self.atols_mev = atols_mev
self.toldfe = toldfe
self.spin_mode = spin_mode
self.smearing = Smearing.assmearing(smearing)
self.acell = acell
if isinstance(ecut_list_or_slice, slice):
self.ecut_iterator = iterator_from_slice(ecut_list_or_slice)
else:
self.ecut_iterator = iter(ecut_list_or_slice)
# Construct a generator that returns strategy objects.
def strategy_generator():
for ecut in self.ecut_iterator:
yield self.strategy_with_ecut(ecut)
super(PseudoIterativeConvergence, self).__init__(strategy_generator(),
max_niter=max_niter, workdir=workdir, manager=manager, )
if not self.isnc:
raise NotImplementedError("PAW convergence tests are not supported yet")
def strategy_with_ecut(self, ecut):
"""Return a Strategy instance with given cutoff energy ecut."""
# Define the system: one atom in a box of lenghts acell.
boxed_atom = AbiStructure.boxed_atom(self.pseudo, acell=self.acell)
# Gamma-only sampling.
gamma_only = KSampling.gamma_only()
# Setup electrons.
electrons = Electrons(spin_mode=self.spin_mode, smearing=self.smearing)
# Don't write WFK files.
extra_abivars = {
"ecut" : ecut,
"prtwf": 0,
"toldfe": self.toldfe,
}
strategy = ScfStrategy(boxed_atom, self.pseudo, gamma_only,
spin_mode=self.spin_mode, smearing=self.smearing,
charge=0.0, scf_algorithm=None,
use_symmetries=True, **extra_abivars)
return strategy
@property
def ecut_list(self):
"""The list of cutoff energies computed so far"""
return [float(task.strategy.ecut) for task in self]
def check_etotal_convergence(self, *args, **kwargs):
return compute_hints(self.ecut_list, self.read_etotal(), self.atols_mev,
self.pseudo)
def exit_iteration(self, *args, **kwargs):
return self.check_etotal_convergence(self, *args, **kwargs)
def get_results(self):
"""Return the results of the tasks."""
wf_results = super(PseudoIterativeConvergence, self).get_results()
data = self.check_etotal_convergence()
ecut_list, etotal, aug_ratios = data["ecut_list"], data["etotal"], data["aug_ratios"]
plot_etotal(ecut_list, etotal, aug_ratios,
show=False, savefig=self.path_in_workdir("etotal.pdf"))
wf_results.update(data)
if not monotonic(data["etotal"], mode="<", atol=1.0e-5):
logger.warning("E(ecut) is not decreasing")
wf_results.push_exceptions("E(ecut) is not decreasing\n" + str(etotal))
#if kwargs.get("json_dump", True):
# wf_results.json_dump(self.path_in_workdir("results.json"))
return wf_results
class BandStructureWorkflow(Workflow):
"""Workflow for band structure calculations."""
def __init__(self, scf_input, nscf_input, dos_inputs=None, workdir=None, manager=None):
"""
Args:
scf_input:
Input for the SCF run or `SCFStrategy` object.
nscf_input:
Input for the NSCF run or `NSCFStrategy` object defining the band structure calculation.
dos_inputs:
Input(s) for the DOS. DOS is computed only if dos_inputs is not None.
workdir:
Working directory.
manager:
`TaskManager` object.
"""
super(BandStructureWorkflow, self).__init__(workdir=workdir, manager=manager)
# Register the GS-SCF run.
self.scf_task = self.register(scf_input, task_class=ScfTask)
# Register the NSCF run and its dependency.
self.nscf_task = self.register(nscf_input, deps={self.scf_task: "DEN"}, task_class=NscfTask)
# Add DOS computation(s) if requested.
if dos_inputs is not None:
if not isinstance(dos_inputs, (list, tuple)):
dos_inputs = [dos_inputs]
for dos_input in dos_inputs:
self.register(dos_input, deps={self.scf_task: "DEN"}, task_class=NscfTask)
class RelaxWorkflow(Workflow):
"""
Workflow for structural relaxations. The first task relaxes the atomic position
while keeping the unit cell parameters fixed. The second task uses the final
structure to perform a structural relaxation in which both the atomic positions
and the lattice parameters are optimized.
"""
def __init__(self, ion_input, ioncell_input, workdir=None, manager=None):
"""
Args:
ion_input:
Input for the relaxation of the ions (cell is fixed)
ioncell_input:
Input for the relaxation of the ions and the unit cell.
workdir:
Working directory.
manager:
`TaskManager` object.
"""
super(RelaxWorkflow, self).__init__(workdir=workdir, manager=manager)
self.ion_task = self.register(ion_input, task_class=RelaxTask)
# Use WFK for the time being since I don't know why Abinit produces all these _TIM?_DEN files.
#self.ioncell_task = self.register(ioncell_input, deps={self.ion_task: "DEN"}, task_class=RelaxTask)
self.ioncell_task = self.register(ioncell_input, deps={self.ion_task: "WFK"}, task_class=RelaxTask)
# Lock ioncell_task as ion_task should communicate to ioncell_task that
# the calculation is OK and pass the final structure.
self.ioncell_task.set_status(self.S_LOCKED)
self.transfer_done = False
def on_ok(self, sender):
"""
This callback is called when one task reaches status S_OK.
"""
logger.debug("in on_ok with sender %s" % sender)
if sender == self.ion_task and not self.transfer_done:
# Get the relaxed structure.
ion_structure = self.ion_task.read_final_structure()
print("ion_structure", ion_structure)
# Transfer it to the ioncell task (do it only once).
self.ioncell_task.change_structure(ion_structure)
self.transfer_done = True
# Finally unlock ioncell_task so that we can submit it.
self.ioncell_task.set_status(self.S_READY)
base_results = super(RelaxWorkflow, self).on_ok(sender)
return base_results
class DeltaFactorWorkflow(Workflow):
def __init__(self, structure_or_cif, pseudo, kppa,
spin_mode="polarized", toldfe=1.e-8, smearing="fermi_dirac:0.1 eV",
accuracy="normal", ecut=None, pawecutdg=None, ecutsm=0.05, chksymbreak=0,
workdir=None, manager=None, **kwargs):
# FIXME Hack in chksymbreak
"""
Build a `Workflow` for the computation of the deltafactor.
Args:
structure_or_cif:
Structure objec or string with the path of the CIF file.
pseudo:
String with the name of the pseudopotential file or `Pseudo` object.` object.` object.`
kppa:
Number of k-points per atom.
spin_mode:
Spin polarization mode.
toldfe:
Tolerance on the energy (Ha)
smearing:
Smearing technique.
workdir:
String specifing the working directory.
manager:
`TaskManager` responsible for the submission of the tasks.
"""
super(DeltaFactorWorkflow, self).__init__(workdir=workdir, manager=manager)
if isinstance(structure_or_cif, Structure):
structure = structure_or_cif
else:
# Assume CIF file
structure = read_structure(structure_or_cif)
self.pseudo = Pseudo.aspseudo(pseudo)
structure = AbiStructure.asabistructure(structure)
smearing = Smearing.assmearing(smearing)
self._input_structure = structure
v0 = structure.volume
# From 94% to 106% of the equilibrium volume.
self.volumes = v0 * np.arange(94, 108, 2) / 100.
for vol in self.volumes:
new_lattice = structure.lattice.scale(vol)
new_structure = Structure(new_lattice, structure.species, structure.frac_coords)
new_structure = AbiStructure.asabistructure(new_structure)
extra_abivars = dict(
pawecutdg=pawecutdg,
ecutsm=ecutsm,
toldfe=toldfe,
prtwf=0,
paral_kgb=0,
)
extra_abivars.update(**kwargs)
if ecut is not None:
extra_abivars.update({"ecut": ecut})
ksampling = KSampling.automatic_density(new_structure, kppa,
chksymbreak=chksymbreak)
scf_input = ScfStrategy(new_structure, self.pseudo, ksampling,
accuracy=accuracy, spin_mode=spin_mode,
smearing=smearing, **extra_abivars)
self.register(scf_input, task_class=ScfTask)
def get_results(self):
num_sites = self._input_structure.num_sites
etotal = ArrayWithUnit(self.read_etotal(), "Ha").to("eV")
wf_results = super(DeltaFactorWorkflow, self).get_results()
wf_results.update({
"etotal" : list(etotal),
"volumes" : list(self.volumes),
"natom" : num_sites,
"dojo_level": 1,
})
try:
#eos_fit = EOS.Murnaghan().fit(self.volumes/num_sites, etotal/num_sites)
#print("murn",eos_fit)
#eos_fit.plot(show=False, savefig=self.path_in_workdir("murn_eos.pdf"))
# Use same fit as the one employed for the deltafactor.
eos_fit = EOS.DeltaFactor().fit(self.volumes/num_sites, etotal/num_sites)
eos_fit.plot(show=False, savefig=self.outdir.path_in("eos.pdf"))
# FIXME: This object should be moved to pseudo_dojo.
# Get reference results (Wien2K).
from pseudo_dojo.refdata.deltafactor import df_database, df_compute
wien2k = df_database().get_entry(self.pseudo.symbol)
# Compute deltafactor estimator.
dfact = df_compute(wien2k.v0, wien2k.b0_GPa, wien2k.b1,
eos_fit.v0, eos_fit.b0_GPa, eos_fit.b1, b0_GPa=True)
print("delta",eos_fit)
print("Deltafactor = %.3f meV" % dfact)
wf_results.update({
"v0": eos_fit.v0,
"b0": eos_fit.b0,
"b0_GPa": eos_fit.b0_GPa,
"b1": eos_fit.b1,
})
except EOS.Error as exc:
wf_results.push_exceptions(exc)
#if kwargs.get("json_dump", True):
# wf_results.json_dump(self.path_in_workdir("results.json"))
# Write data for the computation of the delta factor
with open(self.outdir.path_in("deltadata.txt"), "w") as fh:
fh.write("# Deltafactor = %s meV\n" % dfact)
fh.write("# Volume/natom [Ang^3] Etotal/natom [eV]\n")
for (v, e) in zip(self.volumes, etotal):
fh.write("%s %s\n" % (v/num_sites, e/num_sites))
return wf_results
def on_all_ok(self):
return self.get_results()
#def make_report(self, results, **kwargs):
# d = dict(v0=v0,
# b0_GPa=b0_GPa,
# b1=b1,
# dfact=dfact
# )
# if results.exceptions:
# d["_exceptions"] = str(results.exceptions)
#
# d = {self.accuracy: d}
class G0W0_Workflow(Workflow):
def __init__(self, scf_input, nscf_input, scr_input, sigma_inputs,
workdir=None, manager=None):
"""
Workflow for G0W0 calculations.
Args:
scf_input:
Input for the SCF run or `SCFStrategy` object.
nscf_input:
Input for the NSCF run or `NSCFStrategy` object.
scr_input:
Input for the screening run or `ScrStrategy` object
sigma_inputs:
List of Strategies for the self-energy run.
workdir:
Working directory of the calculation.
manager:
`TaskManager` object.
"""
super(G0W0_Workflow, self).__init__(workdir=workdir, manager=manager)
# Register the GS-SCF run.
# register all scf_inputs but link the nscf only the last scf in the list
if isinstance(scf_input, (list, tuple)):
for single_scf_input in scf_input:
self.scf_task = scf_task = self.register(single_scf_input, task_class=ScfTask)
else:
self.scf_task = scf_task = self.register(scf_input, task_class=ScfTask)
# Construct the input for the NSCF run.
self.nscf_task = nscf_task = self.register(nscf_input, deps={scf_task: "DEN"}, task_class=NscfTask)
# Register the SCREENING run.
self.scr_task = scr_task = self.register(scr_input, deps={nscf_task: "WFK"})
# Register the SIGMA runs.
if not isinstance(sigma_inputs, (list, tuple)):
sigma_inputs = [sigma_inputs]
self.sigma_tasks = []
for sigma_input in sigma_inputs:
task = self.register(sigma_input, deps={nscf_task: "WFK", scr_task: "SCR"})
self.sigma_tasks.append(task)
class SigmaConvWorkflow(Workflow):
def __init__(self, wfk_node, scr_node, sigma_inputs, workdir=None, manager=None):
"""
Workflow for self-energy convergence studies.
Args:
wfk_node:
The node who has produced the WFK file
scr_node:
The node who has produced the SCR file
sigma_inputs:
List of Strategies for the self-energy run.
workdir:
Working directory of the calculation.
manager:
`TaskManager` object.
"""
super(SigmaConvWorkflow, self).__init__(workdir=workdir, manager=manager)
# Register the SIGMA runs.
if not isinstance(sigma_inputs, (list, tuple)):
sigma_inputs = [sigma_inputs]
for sigma_input in sigma_inputs:
self.register(sigma_input, deps={wfk_node: "WFK", scr_node: "SCR"})
#class SCGW_Workflow(Workflow):
#
# def __init__(self, scr_input, sigma_input, workdir=None, manager=None):
# """
# Workflow for G0W0 calculations.
#
# Args:
# scr_input:
# Input for the screening run or `ScrStrategy` object
# sigma_input:
# Strategy for the self-energy run.
# workdir:
# Working directory of the calculation.
# manager:
# `TaskManager` object.
# """
# super(SCGW_Workflow, self).__init__(workdir=workdir, manager=manager)
#
# # Register the SCREENING run.
# self.scr_task = self.register(scr_input, deps={nscf_task: "WFK"})
#
# # Register the SIGMA run.
# self.sigma_task = self.register(sigma_input, deps={self.nscf_task: "WFK", self.scr_task: "SCR"})
#
# def not_converged(self):
# return self.sigma_task.not_converged()
#
# def restart(self):
# ext = "QPS"
# qps_file = self.sigma_task.outdir.has_abiext(ext)
# irdvars = irdvars_for_ext(ext)
#
# if not qps_file:
# raise TaskRestartError("Cannot find the QPS file to restart from.")
#
# # Move the QPS file produced by the SIGMA task to
# # the indir of the SCR task and the indir of the SIGMA task.
# scr_infile = self.scr_task.indir.path_in(os.path.basename(qps_file)
# sigma_infile = self.sigma_task.indir.path_in(os.path.basename(qps_file)
# shutil.copy(qps_file, scr_infile)
# shutil.move(qps_file, sigma_infile)
#
# # Add the appropriate variable for reading the QPS file.
# self.scr_task.strategy.add_extra_abivars(irdvars)
# self.sigma_task.strategy.add_extra_abivars(irdvars)
#
# # Now we can resubmit the job.
# #for task in self.
# # task.reset()
# self._restart()
class BSEMDF_Workflow(Workflow):
def __init__(self, scf_input, nscf_input, bse_input, workdir=None, manager=None):
"""
Workflow for simple BSE calculations in which the self-energy corrections
are approximated by the scissors operator and the screening in modeled
with the model dielectric function.
Args:
scf_input:
Input for the SCF run or `ScfStrategy` object.
nscf_input:
Input for the NSCF run or `NscfStrategy` object.
bse_input:
Input for the BSE run or `BSEStrategy` object.
workdir:
Working directory of the calculation.
manager:
`TaskManager`.
"""
super(BSEMDF_Workflow, self).__init__(workdir=workdir, manager=manager)
# Register the GS-SCF run.
self.scf_task = self.register(scf_input, task_class=ScfTask)
# Construct the input for the NSCF run.
self.nscf_task = self.register(nscf_input, deps={self.scf_task: "DEN"}, task_class=NscfTask)
# Construct the input for the BSE run.
self.bse_task = self.register(bse_input, deps={self.nscf_task: "WFK"}, task_class=HaydockBseTask)
class PhononWorkflow(Workflow):
"""
This workflow usually consists of nirred Phonon tasks where nirred is
the number of irreducible perturbations for a given q-point.
It provides the callback method (on_all_ok) that calls mrgddb to merge
the partial DDB files and mrgggkk to merge the GKK files.
"""
def merge_ddb_files(self):
"""
This method is called when all the q-points have been computed.
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Workflow`.
"""
ddb_files = filter(None, [task.outdir.has_abiext("DDB") for task in self])
logger.debug("will call mrgddb to merge %s:\n" % str(ddb_files))
assert len(ddb_files) == len(self)
#if len(ddb_files) == 1:
# Avoid the merge. Just move the DDB file to the outdir of the workflow
# Final DDB file will be produced in the outdir of the workflow.
out_ddb = self.outdir.path_in("out_DDB")
desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime())
mrgddb = wrappers.Mrgddb(verbose=1)
mrgddb.set_mpi_runner("mpirun")
mrgddb.merge(ddb_files, out_ddb=out_ddb, description=desc, cwd=self.outdir.path)
def merge_gkk_files(self):
"""
This method is called when all the q-points have been computed.
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Workflow`.
"""
gkk_files = filter(None, [task.outdir.has_abiext("GKK") for task in self])
logger.debug("Will call mrggkk to merge %s:\n" % str(gkk_files))
assert len(gkk) == len(self)
#if len(gkk) == 1:
# Avoid the merge. Just move the GKK file to the outdir of the workflow
# Final GKK file will be produced in the outdir of the workflow.
out_ggk = self.outdir.path_in("out_GKK")
mrggkk = wrappers.Mrggkk(verbose=1)
mrggkk.set_mpi_runner("mpirun")
raise NotImplementedError("Have to check mrggkk")
#mrggkk.merge(gswfk_file, dfpt_files, gkk_files, out_fname, binascii=0, cwd=self.outdir.path)
def on_all_ok(self):
"""
This method is called when all the q-points have been computed.
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Workflow`.
"""
# Merge DDB files.
self.merge_ddb_files()
# Merge GKK files.
#self.merge_gkk_files()
return dict(returncode=0,
message="DDB merge done"
)
class WorkflowResults(dict, MSONable):
"""
Dictionary used to store some of the results produce by a Task object
"""
_MANDATORY_KEYS = [
"task_results",
]
_EXC_KEY = "_exceptions"
def __init__(self, *args, **kwargs):
super(WorkflowResults, self).__init__(*args, **kwargs)
if self._EXC_KEY not in self:
self[self._EXC_KEY] = []
@property
def exceptions(self):
return self[self._EXC_KEY]
def push_exceptions(self, *exceptions):
for exc in exceptions:
newstr = str(exc)
if newstr not in self.exceptions:
self[self._EXC_KEY] += [newstr]
def assert_valid(self):
"""
Returns empty string if results seem valid.
The try assert except trick allows one to get a string with info on the exception.
We use the += operator so that sub-classes can add their own message.
"""
# Validate tasks.
for tres in self.task_results:
self[self._EXC_KEY] += tres.assert_valid()
return self[self._EXC_KEY]
@property
def to_dict(self):
d = {k: v for k,v in self.items()}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
mydict = {k: v for k, v in d.items() if k not in ["@module", "@class"]}
return cls(mydict)
|
#!/usr/bin/env python
# See http://stackoverflow.com/questions/3581031/backup-mirror-github-repositories/13917251#13917251
# You can find the latest version of this script at
# https://gist.github.com/4319265
import os
import sys
import json
import urllib
import subprocess
__version__ = '0.2'
__author__ = 'Marius Gedminas <marius@gedmin.as>'
__url__ = 'https://gist.github.com/4319265'
# configuration
username = 'mgedmin'
backup_dir = os.path.expanduser('~/github')
gist_backup_dir = os.path.expanduser('~/github/gists')
# helpers
def ensure_dir(dir):
if not os.path.isdir(dir):
os.makedirs(dir)
def get_github_list(url):
response = urllib.urlopen(url + '?per_page=100')
if response.info().getheader('Link'):
print >> sys.stderr, "error: pagination is not supported yet"
return json.load(response)
def info(*args):
print(" ".join(map(str, args)))
sys.stdout.flush()
def backup(git_url, dir):
if os.path.exists(dir):
subprocess.call(['git', 'fetch'], cwd=dir)
else:
subprocess.call(['git', 'clone', '--mirror', git_url])
def update_description(git_dir, description):
with open(os.path.join(git_dir, 'description'), 'w') as f:
f.write(description.encode('UTF-8') + '\n')
# action
ensure_dir(gist_backup_dir)
os.chdir(gist_backup_dir)
for gist in get_github_list('https://api.github.com/users/%s/gists' % username):
dir = gist['id'] + '.git'
description = gist['description'] or "(no description)"
info("+", "gists/" + gist['id'], "-", description.partition('\n')[0])
backup(gist['git_pull_url'], dir)
update_description(dir, description + '\n\n' + gist['html_url'])
ensure_dir(backup_dir)
os.chdir(backup_dir)
for repo in get_github_list('https://api.github.com/users/%s/repos' % username):
dir = repo['name'] + '.git'
description = repo['description'] or "(no description)"
info("+", repo['full_name'])
backup(repo['git_url'], dir)
update_description(dir, description + '\n\n' + gist['html_url'])
Write a cloneurl file inside .git
#!/usr/bin/env python
# See http://stackoverflow.com/questions/3581031/backup-mirror-github-repositories/13917251#13917251
# You can find the latest version of this script at
# https://gist.github.com/4319265
import os
import sys
import json
import urllib
import subprocess
__version__ = '0.2'
__author__ = 'Marius Gedminas <marius@gedmin.as>'
__url__ = 'https://gist.github.com/4319265'
# configuration
username = 'mgedmin'
backup_dir = os.path.expanduser('~/github')
gist_backup_dir = os.path.expanduser('~/github/gists')
# helpers
def ensure_dir(dir):
if not os.path.isdir(dir):
os.makedirs(dir)
def get_github_list(url):
response = urllib.urlopen(url + '?per_page=100')
if response.info().getheader('Link'):
print >> sys.stderr, "error: pagination is not supported yet"
return json.load(response)
def info(*args):
print(" ".join(map(str, args)))
sys.stdout.flush()
def backup(git_url, dir):
if os.path.exists(dir):
subprocess.call(['git', 'fetch'], cwd=dir)
else:
subprocess.call(['git', 'clone', '--mirror', git_url])
def update_description(git_dir, description):
with open(os.path.join(git_dir, 'description'), 'w') as f:
f.write(description.encode('UTF-8') + '\n')
def update_cloneurl(git_dir, cloneurl):
with open(os.path.join(git_dir, 'cloneurl'), 'w') as f:
f.write(cloneurl + '\n')
# action
ensure_dir(gist_backup_dir)
os.chdir(gist_backup_dir)
for gist in get_github_list('https://api.github.com/users/%s/gists' % username):
dir = gist['id'] + '.git'
description = gist['description'] or "(no description)"
info("+", "gists/" + gist['id'], "-", description.partition('\n')[0])
backup(gist['git_pull_url'], dir)
update_description(dir, description + '\n\n' + gist['html_url'])
update_cloneurl(dir, gist['git_push_url'])
# help me catch silly errors
gist = None
del gist
ensure_dir(backup_dir)
os.chdir(backup_dir)
for repo in get_github_list('https://api.github.com/users/%s/repos' % username):
dir = repo['name'] + '.git'
description = repo['description'] or "(no description)"
info("+", repo['full_name'])
backup(repo['git_url'], dir)
update_description(dir, description + '\n\n' + repo['html_url'])
update_cloneurl(dir, repo['ssh_url'])
|
#! /usr/bin/env python
# ------------------------------------------------------------------------------
# Data validation script - validates files before import into portal.
# If create-corrected set to true, the script will create a new version of all the files it detects
# and ensure the newlines are correct and that no data is enclosed in quotes. It will also
# add entrez IDs if they are not present and the user either provides the file or sets ftp
# Also checks for duplicate column headers, repeated header rows
# ------------------------------------------------------------------------------
# imports
import sys
import os
import logging
import logging.handlers
from collections import OrderedDict
import argparse
import re
import csv
import itertools
import requests
# ------------------------------------------------------------------------------
# globals
# Current NCBI build and build counterpart - used in one of the maf checks as well as .seq filename check
NCBI_BUILD_NUMBER = 37
GENOMIC_BUILD_COUNTERPART = 'hg19'
# study-specific globals
STUDY_DIR = None
DEFINED_SAMPLE_IDS = None
DEFINED_CANCER_TYPES = None
SERVER_URL = 'http://localhost/cbioportal'
PORTAL_CANCER_TYPES = None
HUGO_ENTREZ_MAP = None
# ----------------------------------------------------------------------------
# how we differentiate between data types based on the meta_file_type field
SEG_META_PATTERN = 'meta_segment'
STUDY_META_PATTERN = 'meta_study'
CANCER_TYPE_META_PATTERN = 'meta_cancer_type'
MUTATION_META_PATTERN = 'meta_mutations_extended'
CNA_META_PATTERN = 'meta_CNA'
CLINICAL_META_PATTERN = 'meta_clinical'
LOG2_META_PATTERN = 'meta_log2CNA'
EXPRESSION_META_PATTERN = 'meta_expression'
FUSION_META_PATTERN = 'meta_fusions'
METHYLATION_META_PATTERN = 'meta_methylation'
RPPA_META_PATTERN = 'meta_rppa'
TIMELINE_META_PATTERN = 'meta_timeline'
META_FILE_PATTERNS = [
STUDY_META_PATTERN,
CANCER_TYPE_META_PATTERN,
SEG_META_PATTERN,
MUTATION_META_PATTERN,
CNA_META_PATTERN,
CLINICAL_META_PATTERN,
LOG2_META_PATTERN,
EXPRESSION_META_PATTERN,
FUSION_META_PATTERN,
METHYLATION_META_PATTERN,
RPPA_META_PATTERN,
TIMELINE_META_PATTERN
]
VALIDATOR_IDS = {CNA_META_PATTERN:'CNAValidator',
MUTATION_META_PATTERN:'MutationsExtendedValidator',
CLINICAL_META_PATTERN:'ClinicalValidator',
SEG_META_PATTERN:'SegValidator',
LOG2_META_PATTERN:'Log2Validator',
EXPRESSION_META_PATTERN:'ExpressionValidator',
FUSION_META_PATTERN:'FusionValidator',
METHYLATION_META_PATTERN:'MethylationValidator',
RPPA_META_PATTERN:'RPPAValidator',
TIMELINE_META_PATTERN:'TimelineValidator'}
# ----------------------------------------------------------------------------
# fields allowed in each meta file type, maps to True if required
CNA_META_FIELDS = {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'meta_file_type': True,
'data_file_path': True
}
MUTATION_META_FIELDS = {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'meta_file_type': True,
'data_file_path': True
}
SEG_META_FIELDS = {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'reference_genome_id': True,
'data_filename': True,
'description': True,
'meta_file_type': True,
'data_file_path': True
}
LOG2_META_FIELDS = {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'meta_file_type': True,
'data_file_path': True
}
EXPRESSION_META_FIELDS = {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'meta_file_type': True,
'data_file_path': True
}
METHYLATION_META_FIELDS = {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'meta_file_type': True,
'data_file_path': True
}
FUSION_META_FIELDS = {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'meta_file_type': True,
'data_file_path': True
}
RPPA_META_FIELDS = {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'meta_file_type': True,
'data_file_path': True
}
TIMELINE_META_FIELDS = {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'meta_file_type': True,
'data_file_path': True
}
CASE_LIST_FIELDS = {
'cancer_study_identifier': True,
'stable_id': True,
'case_list_name': True,
'case_list_description': True,
'case_list_ids': True,
'case_list_category': False
}
CLINICAL_META_FIELDS = {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'meta_file_type': True,
'data_file_path': True
}
STUDY_META_FIELDS = {
'cancer_study_identifier': True,
'type_of_cancer': True,
'name': True,
'description': True,
'groups': True,
'dedicated_color': True,
'short_name': True,
'meta_file_type': True,
'citation': False,
'pmid': False
}
CANCER_TYPE_META_FIELDS = {
'type_of_cancer': True,
'name': True,
'clinical_trial_keywords': True,
'dedicated_color': True,
'short_name': True
}
META_FIELD_MAP = {
STUDY_META_PATTERN:STUDY_META_FIELDS,
CANCER_TYPE_META_PATTERN:CANCER_TYPE_META_FIELDS,
CNA_META_PATTERN:CNA_META_FIELDS,
CLINICAL_META_PATTERN:CLINICAL_META_FIELDS,
LOG2_META_PATTERN:LOG2_META_FIELDS,
MUTATION_META_PATTERN:MUTATION_META_FIELDS,
SEG_META_PATTERN:SEG_META_FIELDS,
EXPRESSION_META_PATTERN:EXPRESSION_META_FIELDS,
METHYLATION_META_PATTERN:EXPRESSION_META_FIELDS,
FUSION_META_PATTERN:FUSION_META_FIELDS,
RPPA_META_PATTERN:RPPA_META_FIELDS,
TIMELINE_META_PATTERN:TIMELINE_META_FIELDS,
'case_list': CASE_LIST_FIELDS
}
# ----------------------------------------------------------------------------
# class definitions
class ValidationMessageFormatter(logging.Formatter):
"""Logging formatter with optional fields for data validation messages.
These fields are:
data_filename - the name of the file the message is about (if applicable)
line_number - a line number within the above file (if applicable)
column_number - a column number within the above file (if applicable)
cause - the unexpected value found in the input (if applicable)
If instead a message pertains to multiple values of one of these
fields (as the result of aggregation by CollapsingLogMessageHandler),
these will be expected in the field <fieldname>_list.
"""
def format(self, record, *args, **kwargs):
"""Check consistency of expected fields and format the record."""
if (
(
self.format_aggregated(record,
'line_number',
optional=True) or
self.format_aggregated(record,
'column_number',
optional=True))
and not self.format_aggregated(record,
'data_filename',
optional=True)):
raise ValueError(
'Tried to log about a line/column with no filename')
return super(ValidationMessageFormatter, self).format(record,
*args,
**kwargs)
@staticmethod
def format_aggregated(record,
attr_name,
single_fmt='%s',
multiple_fmt='[%s]',
join_string=', ',
max_join=3,
optional=False):
"""Format a human-readable string for a field or its <field>_list.
As would be generated when using the CollapsingLogMessageHandler.
If `optional` is True and both the field and its list are absent,
return an empty string.
"""
attr_val = getattr(record, attr_name, None)
attr_list = getattr(record, attr_name + '_list', None)
if attr_val is not None:
attr_indicator = single_fmt % attr_val
elif attr_list is not None:
string_list = list(str(val) for val in attr_list[:max_join])
num_skipped = len(attr_list) - len(string_list)
if num_skipped != 0:
string_list.append('(%d more)' % num_skipped)
attr_indicator = multiple_fmt % join_string.join(string_list)
elif optional:
attr_indicator = ''
else:
raise ValueError(
"Tried to format an absent non-optional log field: '%s'" %
attr_name)
return attr_indicator
class LogfileStyleFormatter(ValidationMessageFormatter):
"""Formatter for validation messages in a simple one-per-line format."""
def __init__(self):
"""Initialize a logging Formatter with an appropriate format string."""
super(LogfileStyleFormatter, self).__init__(
fmt='%(levelname)s: %(file_indicator)s:'
'%(line_indicator)s%(column_indicator)s'
' %(message)s%(cause_indicator)s')
def format(self, record):
"""Generate descriptions for optional fields and format the record."""
record.file_indicator = self.format_aggregated(record,
'data_filename',
optional=True)
if not record.file_indicator:
record.file_indicator = '-'
record.line_indicator = self.format_aggregated(
record,
'line_number',
' line %d:',
' lines [%s]:',
optional=True)
record.column_indicator = self.format_aggregated(
record,
'column_number',
' column %d:',
' columns [%s]:',
optional=True)
record.cause_indicator = self.format_aggregated(
record,
'cause',
"; found in file: '%s'",
"; found in file: ['%s']",
join_string="', '",
optional=True)
return super(LogfileStyleFormatter, self).format(record)
class MaxLevelTrackingHandler(logging.Handler):
"""Handler that does nothing but track the maximum msg level emitted."""
def __init__(self):
"""Initialize the handler with an attribute to track the level."""
super(MaxLevelTrackingHandler, self).__init__()
self.max_level = logging.NOTSET
def emit(self, record):
"""Update the maximum level with a new record."""
self.max_level = max(self.max_level, record.levelno)
def get_exit_status(self):
"""Return an exit status for the validator script based on max_level."""
if self.max_level <= logging.INFO:
return 0
elif self.max_level == logging.WARNING:
return 3
elif self.max_level == logging.ERROR:
return 1
else:
return 2
class Jinja2HtmlHandler(logging.handlers.BufferingHandler):
"""Logging handler that formats aggregated HTML reports using Jinja2."""
def __init__(self, study_dir, output_filename, *args, **kwargs):
"""Set study directory name, output filename and buffer size."""
self.study_dir = study_dir
self.output_filename = output_filename
self.max_level = logging.NOTSET
self.closed = False
# get the directory name of the currently running script
self.template_dir = os.path.dirname(__file__)
super(Jinja2HtmlHandler, self).__init__(*args, **kwargs)
def emit(self, record):
"""Buffer a message if the buffer is not full."""
self.max_level = max(self.max_level, record.levelno)
if len(self.buffer) < self.capacity:
return super(Jinja2HtmlHandler, self).emit(record)
def flush(self):
"""Do nothing; emit() caps the buffer and close() renders output."""
pass
def shouldFlush(self, record):
"""Never flush; emit() caps the buffer and close() renders output."""
return False
def generateHtml(self):
"""Render the HTML page for the current content in self.buffer """
# require Jinja2 only if it is actually used
import jinja2
j_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(self.template_dir),
# trim whitespace around Jinja2 operators
trim_blocks=True,
lstrip_blocks=True)
template = j_env.get_template('validation_report_template.html.jinja')
doc = template.render(
study_dir=self.study_dir,
record_list=self.buffer,
max_level=logging.getLevelName(self.max_level))
with open(self.output_filename, 'w') as f:
f.write(doc)
class CollapsingLogMessageHandler(logging.handlers.MemoryHandler):
"""Logging handler that aggregates repeated log messages into one.
This collapses validation LogRecords based on the source code line that
emitted them and their formatted message, and flushes the resulting
records to another handler.
"""
def flush(self):
"""Aggregate LogRecords by message and send them to the target handler.
Fields that occur with multiple different values in LogRecords
emitted from the same line with the same message will be
collected in a field named <field_name>_list.
"""
# group buffered LogRecords by their source code line and message
grouping_dict = OrderedDict()
for record in self.buffer:
identifying_tuple = (record.module,
record.lineno,
record.getMessage())
if identifying_tuple not in grouping_dict:
grouping_dict[identifying_tuple] = []
grouping_dict[identifying_tuple].append(record)
aggregated_buffer = []
# for each list of same-message records
for record_list in grouping_dict.values():
# make a dict to collect the fields for the aggregate record
aggregated_field_dict = {}
# for each field found in (the first of) the records
for field_name in record_list[0].__dict__:
# collect the values found for this field across the records.
# Use the keys of an OrderedDict, as OrderedSet is for some
# reason not to be found in the Python standard library.
field_values = OrderedDict((record.__dict__[field_name], None)
for record in record_list)
# if this field has the same value in all records
if len(field_values) == 1:
# use that value in the new dict
aggregated_field_dict[field_name] = field_values.popitem()[0]
else:
# set a <field>_list field instead
aggregated_field_dict[field_name + '_list'] = \
list(field_values.keys())
# add a new log record with these fields tot the output buffer
aggregated_buffer.append(
logging.makeLogRecord(aggregated_field_dict))
# replace the buffer with the aggregated one and flush
self.buffer = aggregated_buffer
super(CollapsingLogMessageHandler, self).flush()
def shouldFlush(self, record):
"""Flush when emitting an INFO message or a message without a file."""
return ((record.levelno == logging.INFO) or
('data_filename' not in record.__dict__) or
super(CollapsingLogMessageHandler, self).shouldFlush(record))
class CombiningLoggerAdapter(logging.LoggerAdapter):
"""LoggerAdapter that combines its own context info with that in calls."""
def process(self, msg, kwargs):
"""Add contextual information from call to that from LoggerAdapter."""
extra = self.extra.copy()
if 'extra' in kwargs:
# add elements from the call, possibly overwriting
extra.update(kwargs['extra'])
kwargs["extra"] = extra
return msg, kwargs
class ValidatorFactory(object):
"""Factory for creating validation objects of various types."""
@staticmethod
def createValidator(validator_type, hugo_entrez_map, logger, meta_dict):
ValidatorClass = globals()[validator_type]
return ValidatorClass(hugo_entrez_map, logger, meta_dict)
class Validator(object):
"""Abstract validator class for tab-delimited data files.
Subclassed by validators for specific data file types, which should
define a 'REQUIRED_HEADERS' attribute listing the required column
headers and a `REQUIRE_COLUMN_ORDER` boolean stating whether their
position is significant.
The methods `processTopLines`, `checkHeader`, `checkLine` and `onComplete`
may be overridden (calling their superclass methods) to perform any
appropriate validation tasks.
"""
REQUIRED_HEADERS = []
REQUIRE_COLUMN_ORDER = True
def __init__(self,hugo_entrez_map,logger,meta_dict):
"""Initialize a validator for a particular data file.
:param hugo_entrez_map: path Entrez to Hugo mapping file
:param logger: logger instance for writing the log messages
:param meta_dict: dictionary of fields found in corresponding meta file
(such as stable id and data file name)
"""
self.filename = os.path.join(STUDY_DIR, meta_dict['data_file_path'])
self.filenameShort = os.path.basename(self.filename)
self.line_number = 0
self.cols = []
self.numCols = 0
self.hugo_entrez_map = hugo_entrez_map
self.newlines = ('',)
self.studyId = ''
self.headerWritten = False
# This one is set to True if file could be parsed/read until the end (happens in onComplete)
self.fileCouldBeParsed = False
self.logger = CombiningLoggerAdapter(
logger,
extra={'data_filename': self.filenameShort})
self.meta_dict = meta_dict
self.badChars = [' ']
def validate(self):
"""Validate the data file."""
self.logger.info('Starting validation of file')
with open(self.filename, 'rU') as data_file:
# parse any block of start-of-file comment lines and the tsv header
top_comments = []
line_number = 0
for line_number, line in enumerate(data_file,
start=line_number + 1):
self.line_number = line_number
if line.startswith('#'):
top_comments.append(line)
else:
header_line = line
# end of the file's header
break
# if the loop wasn't broken by a non-commented line
else:
self.logger.error('No column header or data found in file',
extra={'line_number': self.line_number})
return
# parse start-of-file comment lines, if any
if not self.processTopLines(top_comments):
self.logger.error(
'Invalid header comments, file cannot be parsed')
return
# read five data lines to detect quotes in the tsv file
first_data_lines = []
for i, line in enumerate(data_file):
first_data_lines.append(line)
if i >= 4:
break
sample_content = header_line + ''.join(first_data_lines)
dialect = csv.Sniffer().sniff(sample_content)
# sniffer assumes " if no quote character exists
if dialect.quotechar == '"' and not (
dialect.delimiter + '"' in sample_content or
'"' + dialect.delimiter in sample_content):
dialect.quoting = csv.QUOTE_NONE
if not self._checkTsvDialect(dialect):
self.logger.error(
'Invalid file format, file cannot be parsed')
return
# parse the first non-commented line as the tsv header
header_cols = csv.reader([header_line], dialect).next()
if self.checkHeader(header_cols) > 0:
self.logger.error(
'Invalid column header, file cannot be parsed')
return
# read through the data lines of the file
csvreader = csv.reader(itertools.chain(first_data_lines,
data_file),
dialect)
for line_number, fields in enumerate(csvreader,
start=line_number + 1):
self.line_number = line_number
if fields[0].startswith('#'):
self.logger.error(
"Data line starting with '#' skipped",
extra={'line_number': self.line_number})
continue
self.checkLine(fields)
# (tuple of) string(s) of the newlines read (for 'rU' mode files)
self.newlines = data_file.newlines
# after the entire file has been read
self.onComplete()
def onComplete(self):
"""Perform final validations after all lines have been checked.
Overriding methods should call this superclass method *after* their own
validations, as it logs the message that validation was completed.
"""
self._checkLineBreaks()
# finalize:
self.fileCouldBeParsed = True
self.logger.info('Validation of file complete')
def processTopLines(self, line_list):
"""Hook to validate any list of comment lines above the TSV header.
Return False if these lines are invalid and the file cannot be
parsed, True otherwise.
"""
return True
def checkHeader(self, cols):
"""Check that the header has the correct items and set self.cols.
:param cols: The list of column headers to be validated
:return the number of errors found.
"""
num_errors = 0
# TODO check for end-of-line whitespace
self.cols = cols
self.numCols = len(self.cols)
num_errors += self._checkRepeatedColumns()
num_errors += self.checkBadChar()
if self.REQUIRE_COLUMN_ORDER:
num_errors += self._checkOrderedRequiredColumns()
else:
num_errors += self._checkUnorderedRequiredColumns()
return num_errors
def checkLine(self, data):
"""Check data values from a line after the file header.
:param data: The list of values parsed from the line
"""
if all(x == '' for x in data):
self.logger.error("Blank line",
extra={'line_number': self.line_number})
if data[:self.numCols] == self.cols:
if self.logger.isEnabledFor(logging.ERROR):
self.logger.error(
'Repeated header',
extra={'line_number': self.line_number,
'cause': ', '.join(data[:self.numCols])})
line_col_count = len(data)
if line_col_count != self.numCols:
self.logger.error('Expected %d columns based on header, '
'found %d',
self.numCols, line_col_count,
extra={'line_number': self.line_number})
for col_index, col_name in enumerate(self.cols):
if col_index < line_col_count and data[col_index] == '':
self.logger.error("Blank cell found in column '%s'",
col_name,
extra={'line_number': self.line_number,
'column_number': col_index + 1})
def _checkUnorderedRequiredColumns(self):
"""Check for missing column headers, independent of their position.
Return the number of errors encountered.
"""
num_errors = 0
for col_name in self.REQUIRED_HEADERS:
if col_name not in self.cols:
self.logger.error(
'Missing column: %s',
col_name,
extra={'line_number': self.line_number,
'cause': ', '.join(
self.cols[:len(self.REQUIRED_HEADERS)]) +
', (...)'})
num_errors += 1
return num_errors
def _checkOrderedRequiredColumns(self):
"""Check if the column header for each position is correct.
Return the number of errors encountered.
"""
num_errors = 0
for col_index, col_name in enumerate(self.REQUIRED_HEADERS):
if col_index >= self.numCols:
num_errors += 1
self.logger.error(
"Invalid header: expected '%s' in column %d,"
" found end of line",
col_name, col_index + 1,
extra={'line_number': self.line_number})
elif self.cols[col_index] != col_name:
num_errors += 1
self.logger.error(
"Invalid header: expected '%s' in this column",
col_name,
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': self.cols[col_index]})
return num_errors
def _checkTsvDialect(self, dialect):
"""Check if a csv.Dialect subclass describes a valid cBio data file."""
if dialect.delimiter != '\t':
self.logger.error('Not a tab-delimited file',
extra={'cause': 'delimiters of type: %s' %
repr(dialect.delimiter)})
return False
if dialect.quoting != csv.QUOTE_NONE:
self.logger.error('Found quotation marks around field(s) in the first rows of the file. '
'Fields and values should not be surrounded by quotation marks.',
extra={'cause': 'quotation marks of type: [%s] ' %
repr(dialect.quotechar)[1:-1]})
return True
def _checkLineBreaks(self):
"""Checks line breaks, reports to user."""
# TODO document these requirements
if "\r\n" in self.newlines:
self.logger.error('DOS-style line breaks detected (\\r\\n), '
'should be Unix-style (\\n)')
elif "\r" in self.newlines:
self.logger.error('Classic Mac OS-style line breaks detected '
'(\\r), should be Unix-style (\\n)')
elif self.newlines != '\n':
self.logger.error('No line breaks recognized in file',
extra={'cause': repr(self.newlines)[1:-1]})
def checkInt(self, value):
"""Checks if a value is an integer."""
try:
int(value)
return True
except ValueError:
return False
def checkSampleId(self, sample_id, column_number):
"""Check whether a sample id is defined, logging an error if not.
Return True if the sample id was valid, False otherwise.
"""
if sample_id not in DEFINED_SAMPLE_IDS:
self.logger.error(
'Sample ID not defined in clinical file',
extra={'line_number': self.line_number,
'column_number': column_number,
'cause': sample_id})
return False
return True
def checkGeneIdentification(self, gene_symbol=None, entrez_id=None):
"""Check if a symbol-Entrez pair is valid, logging an error if not.
It is considered valid in these three cases:
1. only the Entrez id is not None, and it is defined in the portal
2. only the symbol is not None, and it is unambiguously defined in
the portal
3. both are given, and the symbol is defined in the portal to match
the Entrez id
Return True if the pair was valid, False otherwise.
"""
if entrez_id is not None:
if gene_symbol is not None:
if gene_symbol not in self.hugo_entrez_map:
self.logger.error(
'Gene symbol not known to the cBioPortal instance',
extra={'line_number': self.line_number,
'cause': gene_symbol})
return False
elif self.hugo_entrez_map[gene_symbol] != entrez_id:
self.logger.error(
'Gene symbol does not match given Entrez id',
extra={'line_number': self.line_number,
'cause': gene_symbol + ', ' + entrez_id})
return False
else:
if entrez_id not in (self.hugo_entrez_map[sym] for
sym in self.hugo_entrez_map):
self.logger.error(
'Entrez gene id not known to the cBioPortal instance.',
extra={'line_number': self.line_number,
'cause': entrez_id})
return False
elif gene_symbol is not None:
if gene_symbol not in self.hugo_entrez_map:
self.logger.error(
'Gene symbol not known to the cBioPortal instance.',
extra={'line_number': self.line_number,
'cause': gene_symbol})
return False
else:
self.logger.error(
'No Entrez id or gene symbol provided for gene',
extra={'line_number': self.line_number})
return False
return True
def _checkRepeatedColumns(self):
num_errors = 0
seen = set()
for col_num, col in enumerate(self.cols):
if col not in seen:
seen.add(col)
else:
num_errors += 1
self.logger.error('Repeated column header',
extra={'line_number': self.line_number,
'column_number': col_num,
'cause': col})
return num_errors
def checkBadChar(self):
"""Check for bad things in a header, such as spaces, etc."""
num_errors = 0
for col_num, col_name in enumerate(self.cols):
for bc in self.badChars:
if bc in col_name:
num_errors += 1
self.logger.error("Bad character '%s' detected in header",
bc,
extra={'line_number': self.line_number,
'column_number': col_num,
'cause': col_name})
return num_errors
class FeaturewiseFileValidator(Validator):
"""Validates a file with rows for features and columns for ids and samples.
The first few columns (collectively defined in the class attributes
REQUIRED_HEADERS and OPTIONAL_HEADERS) identify the features
(e.g. genes) and the rest correspond to the samples.
Subclasses should override the checkValue(self, value, col_index)
function to check value in a sample column, and check the non-sample
columns by overriding and extending checkLine(self, data). The method
can find the headers of these columns in self.nonsample_cols.
"""
OPTIONAL_HEADERS = []
REQUIRE_COLUMN_ORDER = True
def __init__(self, *args, **kwargs):
super(FeaturewiseFileValidator, self).__init__(*args, **kwargs)
self.nonsample_cols = []
self.num_nonsample_cols = 0
self.sampleIds = []
def checkHeader(self, cols):
"""Validate the header and read sample IDs from it.
Return the number of fatal errors.
"""
num_errors = super(FeaturewiseFileValidator, self).checkHeader(cols)
if num_errors > 0:
return num_errors
# collect the non-sample columns headers, assuming order is required
self.nonsample_cols = list(self.REQUIRED_HEADERS)
# start looking for optional cols at the index after the required ones
col_index = len(self.nonsample_cols)
# start with the first optional column
for col_name in self.OPTIONAL_HEADERS:
# if the next column header in the file is the optional one we are
# looking for
if self.cols[col_index] == col_name:
# add it to the list of non-sample columns in the file
self.nonsample_cols.append(col_name)
# any subsequent optional column will be at the next index
col_index += 1
else:
# look for the next optional column at the same index
pass
self.num_nonsample_cols = len(self.nonsample_cols)
num_errors += self._set_sample_ids_from_columns()
return num_errors
def checkLine(self, data):
"""Check the values in a data line."""
super(FeaturewiseFileValidator, self).checkLine(data)
for column_index, value in enumerate(data):
if column_index >= len(self.nonsample_cols):
# checkValue() should be implemented by subclasses
self.checkValue(value, column_index)
def checkValue(self, value, column_index):
"""Override to validate a value in a sample column."""
raise NotImplementedError('The {} class did not provide a method to '
'validate values in sample columns.'.format(
self.__class__.__name__))
def _set_sample_ids_from_columns(self):
"""Extracts sample IDs from column headers and set self.sampleIds."""
num_errors = 0
# check whether any sample columns are present
if len(self.cols[self.num_nonsample_cols:]) == 0:
self.logger.error('No sample columns found',
extra={'line_number': self.line_number})
num_errors += 1
# set self.sampleIds to the list of sample column names
self.sampleIds = self.cols[self.num_nonsample_cols:]
# validate each sample id
for index, sample_id in enumerate(self.sampleIds):
if not self.checkSampleId(
sample_id,
column_number=self.num_nonsample_cols + index + 1):
num_errors += 1
return num_errors
class GenewiseFileValidator(FeaturewiseFileValidator):
"""FeatureWiseValidator that has Hugo and/or Entrez as feature columns."""
OPTIONAL_HEADERS = ['Hugo_Symbol', 'Entrez_Gene_Id']
def __init__(self, *args, **kwargs):
super(GenewiseFileValidator, self).__init__(*args, **kwargs)
def checkHeader(self, cols):
"""Validate the header and read sample IDs from it.
Return the number of fatal errors.
"""
num_errors = super(GenewiseFileValidator, self).checkHeader(cols)
if not ('Hugo_Symbol' in self.nonsample_cols or
'Entrez_Gene_Id' in self.nonsample_cols):
self.logger.error('At least one of the columns Hugo_Symbol or '
'Entrez_Gene_Id needs to be present.',
extra={'line_number': self.line_number})
num_errors += 1
return num_errors
def checkLine(self, data):
"""Check the values in a data line."""
super(GenewiseFileValidator, self).checkLine(data)
hugo_symbol = None
entrez_id = None
if 'Hugo_Symbol' in self.nonsample_cols:
hugo_symbol = data[self.nonsample_cols.index('Hugo_Symbol')]
# treat NA or the empty string as a missing value
if hugo_symbol in ('NA', ''):
hugo_symbol = None
if 'Entrez_Gene_Id' in self.nonsample_cols:
entrez_id = data[self.nonsample_cols.index('Entrez_Gene_Id')]
# treat NA or the empty string as a missing value
if entrez_id in ('NA', ''):
entrez_id = None
self.checkGeneIdentification(hugo_symbol, entrez_id)
class CNAValidator(GenewiseFileValidator):
"""Sub-class CNA validator."""
ALLOWED_VALUES = ['-2','-1','0','1','2','','NA']
def checkValue(self, value, col_index):
"""Check a value in a sample column."""
if value not in self.ALLOWED_VALUES:
if self.logger.isEnabledFor(logging.ERROR):
self.logger.error(
'Invalid CNA value: possible values are [%s]',
', '.join(self.ALLOWED_VALUES),
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
class MutationsExtendedValidator(Validator):
"""Sub-class mutations_extended validator."""
REQUIRED_HEADERS = [
'Tumor_Sample_Barcode',
'Amino_Acid_Change'
]
OPTIONAL_HEADERS = [
'Hugo_Symbol',
'Entrez_Gene_Id'
]
REQUIRE_COLUMN_ORDER = False
# Used for mapping column names to the corresponding function that does a check on the value.
# This can be done for other filetypes besides maf - not currently implemented.
CHECK_FUNCTION_MAP = {
'Hugo_Symbol':'checkValidHugo',
'Entrez_Gene_Id':'checkValidEntrez',
'Center':'checkCenter',
'NCBI_Build':'checkNCBIbuild',
'Chromosome':'checkChromosome',
'Start_Position':'checkStartPosition',
'End_Position':'checkEndPosition',
'Strand':'checkStrand',
'Variant_Classification':'checkVariantClassification',
'Variant_Type':'checkVariantType',
'Reference_Allele':'checkRefAllele',
'Tumor_Seq_Allele1':'checkTumorSeqAllele',
'Tumor_Seq_Allele2':'checkTumorSeqAllele',
'dbSNP_RS':'checkdbSNP_RS',
'dbSNP_Val_Status':'check_dbSNPValStatus',
'Tumor_Sample_Barcode':'checkTumorSampleBarcode',
'Matched_Norm_Sample_Barcode':'checkMatchedNormSampleBarcode',
'Match_Norm_Seq_Allele1':'checkMatchNormSeqAllele',
'Match_Norm_Seq_Allele2':'checkMatchNormSeqAllele',
'Tumor_Validation_Allele1':'checkTumorValidationAllele',
'Tumor_Validation_Allele2':'checkTumorValidationAllele',
'Match_Norm_Validation_Allele1':'checkMatchNormValidationAllele',
'Match_Norm_Validation_Allele2':'checkMatchNormValidationAllele',
'Verification_Status':'checkVerificationStatus',
'Validation_Status':'checkValidationStatus',
'Mutation_Status':'checkMutationStatus',
'Sequencing_Phase':'checkSequencingPhase',
'Sequence_Source':'checkSequenceSource',
'Validation_Method':'checkValidationMethod',
'Score':'checkScore',
'BAM_File':'checkBAMFile',
'Sequencer':'checkSequencer',
't_alt_count':'check_t_alt_count',
't_ref_count':'check_t_ref_count',
'n_alt_count':'check_n_alt_count',
'n_ref_count':'check_n_ref_count',
'Amino_Acid_Change': 'checkAminoAcidChange'}
def __init__(self,hugo_entrez_map,logger,meta_dict):
super(MutationsExtendedValidator, self).__init__(hugo_entrez_map,logger,meta_dict)
# TODO consider making this attribute a local var in in checkLine(),
# it really only makes sense there
self.extraCols = []
self.extra_exists = False
self.extra = ''
# TODO remove the attributes below, they violate the MAF standard
self.toplinecount = 0
self.sampleIdsHeader = set()
self.headerPresent = False
def checkHeader(self, cols):
"""Validate header, requiring at least one gene id column."""
num_errors = super(MutationsExtendedValidator, self).checkHeader(cols)
if not ('Hugo_Symbol' in self.cols or 'Entrez_Gene_Id' in self.cols):
self.logger.error('At least one of the columns Hugo_Symbol or '
'Entrez_Gene_Id needs to be present.',
extra={'line_number': self.line_number})
num_errors += 1
return num_errors
def checkLine(self, data):
"""Each value in each line is checked individually.
From the column name (stored in self.cols), the
corresponding function to check the value is selected from
CHECK_FUNCTION_MAP. Will emit a generic warning
message if this function returns False. If the function sets
self.extra_exists to True, self.extra will be used in this
message.
"""
super(MutationsExtendedValidator, self).checkLine(data)
for col_name in self.REQUIRED_HEADERS + self.OPTIONAL_HEADERS:
col_index = self.cols.index(col_name)
value = data[col_index]
if col_name == 'Tumor_Sample_Barcode':
self.checkSampleId(value, column_number=col_index + 1)
# get the checking method for this column if available, or None
checking_function = getattr(
self,
self.CHECK_FUNCTION_MAP[col_name])
if not checking_function(value):
self.printDataInvalidStatement(value, col_index)
elif self.extra_exists or self.extra:
raise RuntimeError(('Checking function %s set an error '
'message but reported no error') %
checking_function.__name__)
hugo_symbol = None
entrez_id = None
if 'Hugo_Symbol' in self.cols:
hugo_symbol = data[self.cols.index('Hugo_Symbol')]
# treat the empty string as a missing value
if hugo_symbol == '':
hugo_symbol = None
if 'Entrez_Gene_Id' in self.cols:
entrez_id = data[self.cols.index('Entrez_Gene_Id')]
# treat the empty string as a missing value
if entrez_id == '':
entrez_id = None
self.checkGeneIdentification(hugo_symbol, entrez_id)
def processTopLines(self, line_list):
"""Processes the top line, which contains sample ids used in study."""
# TODO remove this function, it violates the MAF standard
if not line_list:
return True
line = line_list[0]
self.headerPresent = True
topline = [x.strip() for x in line.split(' ') if '#' not in x]
self.toplinecount += 1
for sampleId in topline:
self.sampleIdsHeader.add(sampleId)
return True
def printDataInvalidStatement(self, value, col_index):
"""Prints out statement for invalid values detected."""
message = ("Value in column '%s' is invalid" %
self.cols[col_index])
if self.extra_exists:
message = self.extra
self.extra = ''
self.extra_exists = False
self.logger.error(
message,
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
# These functions check values of the MAF according to their name.
# The mapping of which function checks which value is a global value
# at the top of the script. If any other checks need to be added for
# another field name, add the map in the global corresponding to
# the function name that is created to check it.
def checkValidHugo(self,value):
"""Issue no errors, as this field is checked in `checkLine()`."""
return True
def checkValidEntrez(self, value):
"""Issue no errors, as this field is checked in `checkLine()`."""
return True
def checkCenter(self, value):
return True
def checkChromosome(self, value):
if self.checkInt(value):
if 1 <= int(value) <= 22:
return True
return False
elif value in ('X', 'Y', 'M'):
return True
return False
def checkStartPosition(self, value):
return True
def checkEndPosition(self, value):
return True
def checkTumorSampleBarcode(self, value):
"""Issue no warnings, as this field is checked in `checkLine()`."""
return True
def checkNCBIbuild(self, value):
if self.checkInt(value) and value != '':
if int(value) != NCBI_BUILD_NUMBER:
return False
return True
def checkStrand(self, value):
if value != '+':
return False
return True
def checkVariantClassification(self, value):
return True
def checkVariantType(self, value):
return True
def checkRefAllele(self, value):
return True
def checkTumorSeqAllele(self, value):
return True
def check_dbSNPRS(self, value):
return True
def check_dbSNPValStatus(self, value):
return True
def checkMatchedNormSampleBarcode(self, value):
if value != '':
if self.headerPresent and value not in self.sampleIdsHeader:
self.extra = 'Normal sample id not in sample ids from header'
self.extra_exists = True
return False
return True
def checkMatchedNormSampleBarcodehNormSeqAllele(self, value):
return True
def checkTumorValidationAllele(self, value):
return True
def checkMatchNormValidationAllele(self, value):
return True
def checkVerificationStatus(self, value):
if value.lower() not in ('', 'verified', 'unknown'):
return False
return True
def checkValidationStatus(self, value):
if value == '':
return True
if value.lower() not in ('valid', 'unknown', 'na', 'untested'):
return False
return True
def checkMutationStatus(self, value):
return True
def checkSequencingPhase(self, value):
return True
def checkSequenceSource(self, value):
return True
def checkValidationMethod(self, value):
return True
def checkScore(self, value):
return True
def checkBAMFile(self, value):
return True
def checkSequencer(self, value):
return True
def check_t_alt_count(self, value):
if not self.checkInt(value) and value != '':
return False
return True
def check_t_ref_count(self, value):
if not self.checkInt(value) and value != '':
return False
return True
def check_n_alt_count(self, value):
if not self.checkInt(value) and value != '':
return False
return True
def check_n_ref_count(self, value):
if not self.checkInt(value) and value != '':
return False
return True
def checkAminoAcidChange(self, value):
"""Test whether a string is a valid amino acid change specification."""
# TODO implement this test, may require bundling the hgvs package:
# https://pypi.python.org/pypi/hgvs/
return True
class ClinicalValidator(Validator):
"""Validator for clinical data files."""
REQUIRED_HEADERS = [
'PATIENT_ID',
'SAMPLE_ID'
]
REQUIRE_COLUMN_ORDER = False
srv_attrs = None
def __init__(self, *args, **kwargs):
super(ClinicalValidator, self).__init__(*args, **kwargs)
self.sampleIds = set()
self.attr_defs = []
def processTopLines(self, line_list):
"""Parse the the attribute definitions above the column header."""
LINE_NAMES = ('display_name',
'description',
'datatype',
'attribute_type',
'priority')
if not line_list:
self.logger.error(
'No data type header comments found in clinical data file',
extra={'line_number': self.line_number})
return False
if len(line_list) != len(LINE_NAMES):
self.logger.error(
'%d comment lines at start of clinical data file, expected %d',
len(line_list),
len(LINE_NAMES))
return False
# remove the # signs
line_list = [line[1:] for line in line_list]
attr_defs = None
num_attrs = 0
csvreader = csv.reader(line_list,
delimiter='\t',
quoting=csv.QUOTE_NONE,
strict=True)
invalid_values = False
for line_index, row in enumerate(csvreader):
if attr_defs is None:
# make a list of as many lists as long as there are columns
num_attrs = len(row)
attr_defs = [OrderedDict() for i in range(num_attrs)]
elif len(row) != num_attrs:
self.logger.error(
'Varying numbers of columns in clinical header (%d, %d)',
num_attrs,
len(row),
extra={'line_number': line_index + 1})
return False
for col_index, value in enumerate(row):
# test for invalid values in these (otherwise parseable) lines
if value in ('', 'NA'):
self.logger.error(
'Empty %s field in clinical attribute definition',
LINE_NAMES[line_index],
extra={'line_number': line_index + 1,
'column_number': col_index + 1,
'cause': value})
invalid_values = True
if LINE_NAMES[line_index] in ('display_name', 'description'):
pass
elif LINE_NAMES[line_index] == 'datatype':
VALID_DATATYPES = ('STRING', 'NUMBER', 'BOOLEAN')
if value not in VALID_DATATYPES:
self.logger.error(
'Invalid data type definition, must be one of'
' [%s]',
', '.join(VALID_DATATYPES),
extra={'line_number': line_index + 1,
'colum_number': col_index + 1,
'cause': value})
invalid_values = True
elif LINE_NAMES[line_index] == 'attribute_type':
VALID_ATTR_TYPES = ('PATIENT', 'SAMPLE')
if value not in VALID_ATTR_TYPES:
self.logger.error(
'Invalid attribute type definition, must be one of'
' [%s]',
', '.join(VALID_ATTR_TYPES),
extra={'line_number': line_index + 1,
'colum_number': col_index + 1,
'cause': value})
invalid_values = True
elif LINE_NAMES[line_index] == 'priority':
try:
if int(value) < 1:
raise ValueError()
except ValueError:
self.logger.error(
'Priority definition must be a positive integer',
extra={'line_number': line_index + 1,
'column_number': col_index + 1,
'cause': value})
invalid_values = True
else:
raise Exception('Unknown clinical header line name')
attr_defs[col_index][LINE_NAMES[line_index]] = value
self.attr_defs = attr_defs
return not invalid_values
def checkHeader(self, cols):
num_errors = super(ClinicalValidator, self).checkHeader(cols)
if self.numCols != len(self.attr_defs):
self.logger.error(
'Varying numbers of columns in clinical header (%d, %d)',
len(self.attr_defs),
len(self.cols),
extra={'line_number': self.line_number})
num_errors += 1
for col_index, col_name in enumerate(self.cols):
if not col_name.isupper():
self.logger.warning(
"Clinical header not in all caps",
extra={'line_number': self.line_number,
'cause': col_name})
# look up how the attribute is defined in the portal
srv_attr_properties = self.srv_attrs.get(col_name)
if srv_attr_properties is None:
self.logger.warning(
'New %s-level attribute will be added to the portal',
self.attr_defs[col_index]['attribute_type'].lower(),
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': col_name})
else:
# translate one property having a different format in the API
transl_attr_properties = {}
for prop in srv_attr_properties:
# define the 'attribute_type' property as it is found in
# files, based on 'is_patient_attribute' from the API
if prop == 'is_patient_attribute':
if srv_attr_properties[prop] == '1':
transl_attr_properties['attribute_type'] = 'PATIENT'
else:
transl_attr_properties['attribute_type'] = 'SAMPLE'
# all of the other properties just match the file format
elif prop in ('display_name', 'description',
'datatype', 'priority'):
transl_attr_properties[prop] = srv_attr_properties[prop]
# compare values defined in the file with the existing ones
for attr_property in self.attr_defs[col_index]:
value = self.attr_defs[col_index][attr_property]
if value != transl_attr_properties[attr_property]:
self.logger.error(
"%s definition for attribute '%s' does not match "
"the portal, '%s' expected",
attr_property,
col_name,
transl_attr_properties[attr_property],
extra={'line_number': self.attr_defs[col_index].keys().index(attr_property) + 1,
'column_number': col_index + 1,
'cause': value})
num_errors += 1
return num_errors
def checkLine(self, data):
super(ClinicalValidator, self).checkLine(data)
for col_index, value in enumerate(data):
# TODO check the values in the other cols, required and optional
# TODO check if cancer types in clinical attributes are defined
if col_index == self.cols.index('SAMPLE_ID'):
if DEFINED_SAMPLE_IDS and value not in DEFINED_SAMPLE_IDS:
self.logger.error(
'Defining new sample id in secondary clinical file',
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
self.sampleIds.add(value.strip())
@classmethod
def request_attrs(cls, server_url, logger):
"""Initialize cls.srv_attrs using the portal API."""
cls.srv_attrs = request_from_portal_api(
server_url + '/api/clinicalattributes/patients',
logger,
id_field='attr_id')
srv_sample_attrs = request_from_portal_api(
server_url + '/api/clinicalattributes/samples',
logger,
id_field='attr_id')
# if this happens, the database structure has changed and this script
# needs to be updated
id_overlap = (set(cls.srv_attrs.keys()) &
set(srv_sample_attrs.keys()))
if id_overlap:
raise ValueError(
'The portal at {url} returned these clinical attributes '
'both for samples and for patients: {attrs}'.format(
url=server_url,
attrs=', '.join(id_overlap)))
else:
cls.srv_attrs.update(srv_sample_attrs)
class SegValidator(Validator):
"""Validator for .seg files."""
REQUIRED_HEADERS = [
'ID',
'chrom',
'loc.start',
'loc.end',
'num.mark',
'seg.mean']
REQUIRE_COLUMN_ORDER = True
def checkLine(self, data):
super(SegValidator, self).checkLine(data)
# TODO check values in all other columns too
for col_index, value in enumerate(data):
if col_index == self.cols.index(self.REQUIRED_HEADERS[0]):
self.checkSampleId(value, column_number=col_index + 1)
class Log2Validator(GenewiseFileValidator):
def checkValue(self, value, col_index):
"""Check a value in a sample column."""
# TODO check these values
pass
class ExpressionValidator(GenewiseFileValidator):
def checkValue(self, value, col_index):
"""Check a value in a sample column."""
# TODO check these values
pass
class FusionValidator(Validator):
REQUIRED_HEADERS = [
'Hugo_Symbol',
'Entrez_Gene_Id',
'Center',
'Tumor_Sample_Barcode',
'Fusion',
'DNA support',
'RNA support',
'Method',
'Frame']
REQUIRE_COLUMN_ORDER = True
def checkLine(self, data):
super(FusionValidator, self).checkLine(data)
# TODO check the values
class MethylationValidator(GenewiseFileValidator):
def checkValue(self, value, col_index):
"""Check a value in a sample column."""
# TODO check these values
pass
class RPPAValidator(FeaturewiseFileValidator):
REQUIRED_HEADERS = ['Composite.Element.REF']
def checkLine(self, data):
super(RPPAValidator, self).checkLine(data)
# TODO check the values in the first column
# for rppa, first column should be hugo|antibody, everything after should be sampleIds
def checkValue(self, value, col_index):
"""Check a value in a sample column."""
# TODO check these values
pass
class TimelineValidator(Validator):
REQUIRED_HEADERS = [
'PATIENT_ID',
'START_DATE',
'STOP_DATE',
'EVENT_TYPE']
REQUIRE_COLUMN_ORDER = True
def checkLine(self, data):
super(TimelineValidator, self).checkLine(data)
# TODO check the values
# ------------------------------------------------------------------------------
# Functions
def parse_metadata_file(filename, logger, study_id=None, case_list=False):
"""Validate a metafile and return a dictionary of values read from it.
Return `None` if the file is invalid. If `case_list` is True,
validate the file as a case list instead of a meta file.
:param filename: name of the meta file
:param logger: the logging.Logger instance to log warnings and errors to
:param study_id: cancer study id found in previous files (or None). All subsequent
meta files should comply to this in the field 'cancer_study_identifier'
:param case_list: whether this meta file is a case list (special case)
"""
metaDictionary = {}
with open(filename, 'rU') as metafile:
for line_index, line in enumerate(metafile):
if ': ' not in line:
logger.error(
"Invalid %s file entry, no ': ' found",
{True: 'case list', False: 'meta'}[case_list],
extra={'data_filename': getFileFromFilepath(filename),
'line_number': line_index + 1})
return None
key, val = line.rstrip().split(': ', 1)
metaDictionary[key] = val
if case_list:
meta_file_type = 'case_list'
else:
if 'meta_file_type' not in metaDictionary:
logger.error("Missing field 'meta_file_type' in meta file'",
extra={'data_filename': getFileFromFilepath(filename)})
# skip this file (can't validate unknown file types)
return None
meta_file_type = metaDictionary["meta_file_type"]
if meta_file_type not in META_FILE_PATTERNS:
logger.error('Unknown meta_file_type',
extra={'data_filename': getFileFromFilepath(filename),
'cause': meta_file_type})
# skip this file (can't validate unknown file types)
return None
missing_fields = []
for field in META_FIELD_MAP[meta_file_type]:
mandatory = META_FIELD_MAP[meta_file_type][field]
if field not in metaDictionary and mandatory:
logger.error("Missing field '%s' in %s file",
field,
{True: 'case list', False: 'meta'}[case_list],
extra={'data_filename': getFileFromFilepath(filename)})
missing_fields.append(field)
if missing_fields:
# skip this file (the fields may be required for validation)
return None
for field in metaDictionary:
if field not in META_FIELD_MAP[meta_file_type]:
logger.warning(
'Unrecognized field in %s file',
{True: 'case list', False: 'meta'}[case_list],
extra={'data_filename': getFileFromFilepath(filename),
'cause': field})
# check that cancer study identifiers across files so far are consistent.
if (
study_id is not None and
study_id != metaDictionary['cancer_study_identifier']):
logger.error(
"Cancer study identifier is not consistent across "
"files, expected '%s'",
study_id,
extra={'data_filename': getFileFromFilepath(filename),
'cause': metaDictionary['cancer_study_identifier']})
return None
# compare a meta_cancer_type file with the portal instance
if meta_file_type == CANCER_TYPE_META_PATTERN:
file_cancer_type = metaDictionary.get('type_of_cancer')
if file_cancer_type not in PORTAL_CANCER_TYPES:
logger.warning(
'New disease type will be added to the portal',
extra={'data_filename': getFileFromFilepath(filename),
'cause': file_cancer_type})
else:
existing_info = PORTAL_CANCER_TYPES[file_cancer_type]
invalid_fields_found = False
for field in metaDictionary:
if (
field in CANCER_TYPE_META_FIELDS and
field != 'cancer_type_id' and
metaDictionary[field] != existing_info[field]):
logger.error(
"%s field of cancer type does not match the "
"portal, '%s' expected",
field,
existing_info[field],
extra={'data_filename': getFileFromFilepath(filename),
'cause': metaDictionary[field]})
invalid_fields_found = True
if invalid_fields_found:
return None
# check fields specific to seg meta file
if meta_file_type == SEG_META_PATTERN:
if metaDictionary['data_filename'] != metaDictionary['data_file_path']:
logger.error(
'data_filename and data_file_path differ in seg data file',
extra={'data_filename': getFileFromFilepath(filename),
'cause': (metaDictionary['data_filename'] + ', ' +
metaDictionary['data_file_path'])})
return None
if metaDictionary['reference_genome_id'] != GENOMIC_BUILD_COUNTERPART:
logger.error(
'Reference_genome_id is not %s',
GENOMIC_BUILD_COUNTERPART,
extra={'data_filename': getFileFromFilepath(filename),
'cause': metaDictionary['reference_genome_id']})
return None
# if this file type doesn't take a data file, make sure one isn't parsed
if (
'data_file_path' in metaDictionary and
'data_file_path' not in META_FIELD_MAP[meta_file_type]):
logger.warning(
"File '%s' referenced by meta file will not be processed as the "
"attribute data_file_path is not expected in this meta file",
metaDictionary['data_file_path'],
extra={'data_filename': getFileFromFilepath(filename),
'cause': metaDictionary['data_file_path']})
return metaDictionary
def process_metadata_files(directory, logger):
"""Parse the meta files in a directory and create data file validators.
Return a tuple of:
1. a dict listing the data file validator (or None) for each meta file
by file type,
2. a list of cancer type ids that have been defined in this study, and
3. the study id
Possible file types are listed in META_FILE_PATTERNS.
"""
# get filenames for all meta files in the directory
filenames = [os.path.join(directory, f) for
f in os.listdir(directory) if
re.search(r'(\b|_)meta(\b|_)', f)]
study_id = None
study_cancer_type = None
validators_by_type = {}
defined_cancer_types = []
for filename in filenames:
meta = parse_metadata_file(filename, logger, study_id)
if meta is None:
continue
if study_id is None:
study_id = meta['cancer_study_identifier']
meta_file_type = meta['meta_file_type']
if meta_file_type == STUDY_META_PATTERN:
if study_cancer_type is not None:
logger.error(
'Encountered a second meta_study file',
extra={'data_filename': getFileFromFilepath(filename)})
study_cancer_type = meta['type_of_cancer']
if meta_file_type == CANCER_TYPE_META_PATTERN:
file_cancer_type = meta['type_of_cancer']
if file_cancer_type in defined_cancer_types:
logger.error(
'Cancer type defined a second time in study',
extra={'data_filename': getFileFromFilepath(filename),
'cause': file_cancer_type})
defined_cancer_types.append(meta['type_of_cancer'])
# create a list for the file type in the dict
if meta_file_type not in validators_by_type:
validators_by_type[meta_file_type] = []
# check if data_file_path is set AND if data_file_path is a supported field according to META_FIELD_MAP:
if 'data_file_path' in meta and 'data_file_path' in META_FIELD_MAP[meta_file_type]:
validators_by_type[meta_file_type].append(
ValidatorFactory.createValidator(
VALIDATOR_IDS[meta_file_type],
HUGO_ENTREZ_MAP,
logger,
meta))
else:
validators_by_type[meta_file_type].append(None)
if not (study_cancer_type in PORTAL_CANCER_TYPES or
study_cancer_type in defined_cancer_types):
logger.error(
'Cancer type of study is neither known to the portal nor defined '
'in a meta_cancer_type file',
extra={'cause': study_cancer_type})
return validators_by_type, defined_cancer_types, study_id
def getFileFromFilepath(f):
return os.path.basename(f.strip())
def processCaseListDirectory(caseListDir, cancerStudyId, logger):
logger.info('Validating case lists')
case_lists = [os.path.join(caseListDir, x) for x in os.listdir(caseListDir)]
for case in case_lists:
case_data = parse_metadata_file(case, logger, cancerStudyId,
case_list=True)
if case_data is None:
continue
sampleIds = case_data['case_list_ids']
sampleIds = set([x.strip() for x in sampleIds.split('\t')])
for value in sampleIds:
if value not in DEFINED_SAMPLE_IDS:
logger.error(
'Sample id not defined in clinical file',
extra={'data_filename': getFileFromFilepath(case),
'cause': value})
logger.info('Validation of case lists complete')
def request_from_portal_api(service_url, logger, id_field=None):
"""Send a request to the portal API and return the decoded JSON object.
If id_field is specified, expect the object to be a list of dicts,
and instead return a dict indexed by the specified field of said
dictionaries. E.g.:
[{'id': 'spam', 'val1': 1}, {'id':'eggs', 'val1':42}] ->
{'spam': {'val1': 1}, 'eggs': {'val1': 42}}
"""
url_split = service_url.split('/api/', 1)
logger.info("Requesting %s from portal at '%s'",
url_split[1], url_split[0])
response = requests.get(service_url)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
raise IOError(
'Connection error for URL: {url}. Administrator: please check if '
'[{url}] is accessible. Message: {msg}'.format(url=service_url,
msg=e.message))
json_data = response.json()
if id_field is None:
return json_data
else:
transformed_dict = {}
# return a dict indexed by the specified field of said
# dictionaries. E.g.:
#[{'id': 'spam', 'val1': 1}, {'id':'eggs', 'val1':42}] ->
# {'spam': {'val1': 1}, 'eggs': {'val1': 42}}
for attr in json_data:
# make a copy of the attr dict
attr_dict = dict(attr)
# remove id field:
if not id_field in attr_dict:
raise RuntimeError('Unexpected error while calling web-service. '
'Please check if given {url} is correct'.format(url=service_url))
del attr_dict[id_field]
transformed_dict[attr[id_field]] = attr_dict
return transformed_dict
def get_hugo_entrez_map(server_url, logger):
"""
Returns a dict with hugo symbols and respective entrezId, e.g.:
# dict: {'LOC105377913': '105377913', 'LOC105377912': '105377912', hugo: entrez, hugo: entrez...
"""
# TODO implement an API call for gene aliases and include those in the map
json_data = request_from_portal_api(server_url + '/api/genes', logger)
# json_data is list of dicts, each entry containing e.g. dict: {'hugo_gene_symbol': 'SRXN1', 'entrez_gene_id': '140809'}
# We want to transform this to the format dict: {hugo: entrez, hugo: entrez...
result_dict = {}
for data_item in json_data:
result_dict[data_item['hugo_gene_symbol']] = data_item['entrez_gene_id']
return result_dict
# ------------------------------------------------------------------------------
def interface(args=None):
parser = argparse.ArgumentParser(description='cBioPortal meta Validator')
parser.add_argument('-s', '--study_directory', type=str, required=True,
help='path to directory.')
parser.add_argument('-u', '--url_server', type=str, required=False,
default='http://localhost/cbioportal',
help='URL to cBioPortal server. You can set this if '
'your URL is not http://localhost/cbioportal')
parser.add_argument('-html', '--html_table', type=str, required=False,
help='path to html report output file')
parser.add_argument('-v', '--verbose', required=False, action="store_true",
help='list warnings in addition to fatal errors')
parser = parser.parse_args(args)
return parser
def main_validate(args):
"""Main function."""
# global study properties
global STUDY_DIR
global DEFINED_SAMPLE_IDS
global DEFINED_CANCER_TYPES
# global portal properties
global SERVER_URL
global PORTAL_CANCER_TYPES
global HUGO_ENTREZ_MAP
# get a logger to emit messages
logger = logging.getLogger(__name__)
logger.handlers = []
logger.setLevel(logging.INFO)
exit_status_handler = MaxLevelTrackingHandler()
logger.addHandler(exit_status_handler)
# process the options
STUDY_DIR = args.study_directory
SERVER_URL = args.url_server
html_output_filename = args.html_table
verbose = False
if args.verbose:
verbose = True
# check existence of directory
if not os.path.exists(STUDY_DIR):
print >> sys.stderr, 'directory cannot be found: ' + STUDY_DIR
return 2
# set default message handler
text_handler = logging.StreamHandler(sys.stdout)
text_handler.setFormatter(LogfileStyleFormatter())
collapsing_text_handler = CollapsingLogMessageHandler(
capacity=1e6,
flushLevel=logging.CRITICAL,
target=text_handler)
if not verbose:
collapsing_text_handler.setLevel(logging.ERROR)
logger.addHandler(collapsing_text_handler)
collapsing_html_handler = None
html_handler = None
# add html table handler if applicable
if html_output_filename:
# just to make sure users get dependency error at start:
import jinja2 # pylint: disable=import-error
html_handler = Jinja2HtmlHandler(
STUDY_DIR,
html_output_filename,
capacity=1e5)
# TODO extend CollapsingLogMessageHandler to flush to multiple targets,
# and get rid of the duplicated buffering of messages here
collapsing_html_handler = CollapsingLogMessageHandler(
capacity=1e6,
flushLevel=logging.CRITICAL,
target=html_handler)
if not verbose:
collapsing_html_handler.setLevel(logging.ERROR)
logger.addHandler(collapsing_html_handler)
# Entrez values for Hugo symbols in the portal
HUGO_ENTREZ_MAP = get_hugo_entrez_map(SERVER_URL, logger)
# retrieve cancer types defined in the portal
PORTAL_CANCER_TYPES = request_from_portal_api(
SERVER_URL + '/api/cancertypes',
logger,
id_field='id')
# retrieve clinical attributes defined in the portal
ClinicalValidator.request_attrs(SERVER_URL, logger)
# walk over the meta files in the dir and get properties of the study
(validators_by_meta_type,
DEFINED_CANCER_TYPES,
study_id) = process_metadata_files(STUDY_DIR, logger)
if CLINICAL_META_PATTERN not in validators_by_meta_type:
logger.error('No clinical file detected')
return exit_status_handler.get_exit_status()
if len(validators_by_meta_type[CLINICAL_META_PATTERN]) != 1:
if logger.isEnabledFor(logging.ERROR):
logger.error(
'Multiple clinical files detected',
extra={'cause': ', '.join(
validator.filenameShort for validator in
validators_by_meta_type[CLINICAL_META_PATTERN])})
# get the validator for the clinical data file
clinvalidator = validators_by_meta_type[CLINICAL_META_PATTERN][0]
# parse the clinical data file to get defined sample ids for this study
clinvalidator.validate()
if not clinvalidator.fileCouldBeParsed:
logger.error("Clinical file could not be parsed. Please fix the problems found there first before continuing.")
return exit_status_handler.get_exit_status()
DEFINED_SAMPLE_IDS = clinvalidator.sampleIds
# validate non-clinical data files
for meta_file_type in validators_by_meta_type:
# skip clinical files, they have already been validated
if meta_file_type == CLINICAL_META_PATTERN:
continue
for validator in validators_by_meta_type[meta_file_type]:
# if there was no validator for this meta file
if validator is None:
continue
validator.validate()
case_list_dirname = os.path.join(STUDY_DIR, 'case_lists')
if not os.path.isdir(case_list_dirname):
logger.warning("No directory named 'case_lists' found")
else:
processCaseListDirectory(case_list_dirname, study_id, logger)
logger.info('Validation complete')
exit_status = exit_status_handler.get_exit_status()
if html_handler is not None:
collapsing_html_handler.flush()
html_handler.generateHtml()
return exit_status
# ------------------------------------------------------------------------------
# vamanos
if __name__ == '__main__':
try:
# parse command line options
args = interface()
# run the script
exit_status = main_validate(args)
print >>sys.stderr, ('Validation of study {status}.'.format(
status={0: 'succeeded',
1: 'failed',
2: 'not performed as problems occurred',
3: 'succeeded with warnings'}.get(exit_status, 'unknown')))
finally:
logging.shutdown()
del logging._handlerList[:] # workaround for harmless exceptions on exit
Correct expected meta file fields for study and cancer type
#! /usr/bin/env python
# ------------------------------------------------------------------------------
# Data validation script - validates files before import into portal.
# If create-corrected set to true, the script will create a new version of all the files it detects
# and ensure the newlines are correct and that no data is enclosed in quotes. It will also
# add entrez IDs if they are not present and the user either provides the file or sets ftp
# Also checks for duplicate column headers, repeated header rows
# ------------------------------------------------------------------------------
# imports
import sys
import os
import logging
import logging.handlers
from collections import OrderedDict
import argparse
import re
import csv
import itertools
import requests
# ------------------------------------------------------------------------------
# globals
# Current NCBI build and build counterpart - used in one of the maf checks as well as .seq filename check
NCBI_BUILD_NUMBER = 37
GENOMIC_BUILD_COUNTERPART = 'hg19'
# study-specific globals
STUDY_DIR = None
DEFINED_SAMPLE_IDS = None
DEFINED_CANCER_TYPES = None
SERVER_URL = 'http://localhost/cbioportal'
PORTAL_CANCER_TYPES = None
HUGO_ENTREZ_MAP = None
# ----------------------------------------------------------------------------
# how we differentiate between data types based on the meta_file_type field
SEG_META_PATTERN = 'meta_segment'
STUDY_META_PATTERN = 'meta_study'
CANCER_TYPE_META_PATTERN = 'meta_cancer_type'
MUTATION_META_PATTERN = 'meta_mutations_extended'
CNA_META_PATTERN = 'meta_CNA'
CLINICAL_META_PATTERN = 'meta_clinical'
LOG2_META_PATTERN = 'meta_log2CNA'
EXPRESSION_META_PATTERN = 'meta_expression'
FUSION_META_PATTERN = 'meta_fusions'
METHYLATION_META_PATTERN = 'meta_methylation'
RPPA_META_PATTERN = 'meta_rppa'
TIMELINE_META_PATTERN = 'meta_timeline'
META_FILE_PATTERNS = [
STUDY_META_PATTERN,
CANCER_TYPE_META_PATTERN,
SEG_META_PATTERN,
MUTATION_META_PATTERN,
CNA_META_PATTERN,
CLINICAL_META_PATTERN,
LOG2_META_PATTERN,
EXPRESSION_META_PATTERN,
FUSION_META_PATTERN,
METHYLATION_META_PATTERN,
RPPA_META_PATTERN,
TIMELINE_META_PATTERN
]
VALIDATOR_IDS = {CNA_META_PATTERN:'CNAValidator',
MUTATION_META_PATTERN:'MutationsExtendedValidator',
CLINICAL_META_PATTERN:'ClinicalValidator',
SEG_META_PATTERN:'SegValidator',
LOG2_META_PATTERN:'Log2Validator',
EXPRESSION_META_PATTERN:'ExpressionValidator',
FUSION_META_PATTERN:'FusionValidator',
METHYLATION_META_PATTERN:'MethylationValidator',
RPPA_META_PATTERN:'RPPAValidator',
TIMELINE_META_PATTERN:'TimelineValidator'}
# ----------------------------------------------------------------------------
# fields allowed in each meta file type, maps to True if required
CNA_META_FIELDS = {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'meta_file_type': True,
'data_file_path': True
}
MUTATION_META_FIELDS = {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'meta_file_type': True,
'data_file_path': True
}
SEG_META_FIELDS = {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'reference_genome_id': True,
'data_filename': True,
'description': True,
'meta_file_type': True,
'data_file_path': True
}
LOG2_META_FIELDS = {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'meta_file_type': True,
'data_file_path': True
}
EXPRESSION_META_FIELDS = {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'meta_file_type': True,
'data_file_path': True
}
METHYLATION_META_FIELDS = {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'meta_file_type': True,
'data_file_path': True
}
FUSION_META_FIELDS = {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'meta_file_type': True,
'data_file_path': True
}
RPPA_META_FIELDS = {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'meta_file_type': True,
'data_file_path': True
}
TIMELINE_META_FIELDS = {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'meta_file_type': True,
'data_file_path': True
}
CASE_LIST_FIELDS = {
'cancer_study_identifier': True,
'stable_id': True,
'case_list_name': True,
'case_list_description': True,
'case_list_ids': True,
'case_list_category': False
}
CLINICAL_META_FIELDS = {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'meta_file_type': True,
'data_file_path': True
}
STUDY_META_FIELDS = {
'cancer_study_identifier': True,
'type_of_cancer': True,
'name': True,
'description': True,
'short_name': True,
'dedicated_color': True,
'meta_file_type': True,
'citation': False,
'pmid': False,
'groups': False
}
CANCER_TYPE_META_FIELDS = {
'type_of_cancer': True,
'name': True,
'clinical_trial_keywords': True,
'dedicated_color': True,
'short_name': True,
'meta_file_type': True
}
META_FIELD_MAP = {
STUDY_META_PATTERN:STUDY_META_FIELDS,
CANCER_TYPE_META_PATTERN:CANCER_TYPE_META_FIELDS,
CNA_META_PATTERN:CNA_META_FIELDS,
CLINICAL_META_PATTERN:CLINICAL_META_FIELDS,
LOG2_META_PATTERN:LOG2_META_FIELDS,
MUTATION_META_PATTERN:MUTATION_META_FIELDS,
SEG_META_PATTERN:SEG_META_FIELDS,
EXPRESSION_META_PATTERN:EXPRESSION_META_FIELDS,
METHYLATION_META_PATTERN:EXPRESSION_META_FIELDS,
FUSION_META_PATTERN:FUSION_META_FIELDS,
RPPA_META_PATTERN:RPPA_META_FIELDS,
TIMELINE_META_PATTERN:TIMELINE_META_FIELDS,
'case_list': CASE_LIST_FIELDS
}
# ----------------------------------------------------------------------------
# class definitions
class ValidationMessageFormatter(logging.Formatter):
"""Logging formatter with optional fields for data validation messages.
These fields are:
data_filename - the name of the file the message is about (if applicable)
line_number - a line number within the above file (if applicable)
column_number - a column number within the above file (if applicable)
cause - the unexpected value found in the input (if applicable)
If instead a message pertains to multiple values of one of these
fields (as the result of aggregation by CollapsingLogMessageHandler),
these will be expected in the field <fieldname>_list.
"""
def format(self, record, *args, **kwargs):
"""Check consistency of expected fields and format the record."""
if (
(
self.format_aggregated(record,
'line_number',
optional=True) or
self.format_aggregated(record,
'column_number',
optional=True))
and not self.format_aggregated(record,
'data_filename',
optional=True)):
raise ValueError(
'Tried to log about a line/column with no filename')
return super(ValidationMessageFormatter, self).format(record,
*args,
**kwargs)
@staticmethod
def format_aggregated(record,
attr_name,
single_fmt='%s',
multiple_fmt='[%s]',
join_string=', ',
max_join=3,
optional=False):
"""Format a human-readable string for a field or its <field>_list.
As would be generated when using the CollapsingLogMessageHandler.
If `optional` is True and both the field and its list are absent,
return an empty string.
"""
attr_val = getattr(record, attr_name, None)
attr_list = getattr(record, attr_name + '_list', None)
if attr_val is not None:
attr_indicator = single_fmt % attr_val
elif attr_list is not None:
string_list = list(str(val) for val in attr_list[:max_join])
num_skipped = len(attr_list) - len(string_list)
if num_skipped != 0:
string_list.append('(%d more)' % num_skipped)
attr_indicator = multiple_fmt % join_string.join(string_list)
elif optional:
attr_indicator = ''
else:
raise ValueError(
"Tried to format an absent non-optional log field: '%s'" %
attr_name)
return attr_indicator
class LogfileStyleFormatter(ValidationMessageFormatter):
"""Formatter for validation messages in a simple one-per-line format."""
def __init__(self):
"""Initialize a logging Formatter with an appropriate format string."""
super(LogfileStyleFormatter, self).__init__(
fmt='%(levelname)s: %(file_indicator)s:'
'%(line_indicator)s%(column_indicator)s'
' %(message)s%(cause_indicator)s')
def format(self, record):
"""Generate descriptions for optional fields and format the record."""
record.file_indicator = self.format_aggregated(record,
'data_filename',
optional=True)
if not record.file_indicator:
record.file_indicator = '-'
record.line_indicator = self.format_aggregated(
record,
'line_number',
' line %d:',
' lines [%s]:',
optional=True)
record.column_indicator = self.format_aggregated(
record,
'column_number',
' column %d:',
' columns [%s]:',
optional=True)
record.cause_indicator = self.format_aggregated(
record,
'cause',
"; found in file: '%s'",
"; found in file: ['%s']",
join_string="', '",
optional=True)
return super(LogfileStyleFormatter, self).format(record)
class MaxLevelTrackingHandler(logging.Handler):
"""Handler that does nothing but track the maximum msg level emitted."""
def __init__(self):
"""Initialize the handler with an attribute to track the level."""
super(MaxLevelTrackingHandler, self).__init__()
self.max_level = logging.NOTSET
def emit(self, record):
"""Update the maximum level with a new record."""
self.max_level = max(self.max_level, record.levelno)
def get_exit_status(self):
"""Return an exit status for the validator script based on max_level."""
if self.max_level <= logging.INFO:
return 0
elif self.max_level == logging.WARNING:
return 3
elif self.max_level == logging.ERROR:
return 1
else:
return 2
class Jinja2HtmlHandler(logging.handlers.BufferingHandler):
"""Logging handler that formats aggregated HTML reports using Jinja2."""
def __init__(self, study_dir, output_filename, *args, **kwargs):
"""Set study directory name, output filename and buffer size."""
self.study_dir = study_dir
self.output_filename = output_filename
self.max_level = logging.NOTSET
self.closed = False
# get the directory name of the currently running script
self.template_dir = os.path.dirname(__file__)
super(Jinja2HtmlHandler, self).__init__(*args, **kwargs)
def emit(self, record):
"""Buffer a message if the buffer is not full."""
self.max_level = max(self.max_level, record.levelno)
if len(self.buffer) < self.capacity:
return super(Jinja2HtmlHandler, self).emit(record)
def flush(self):
"""Do nothing; emit() caps the buffer and close() renders output."""
pass
def shouldFlush(self, record):
"""Never flush; emit() caps the buffer and close() renders output."""
return False
def generateHtml(self):
"""Render the HTML page for the current content in self.buffer """
# require Jinja2 only if it is actually used
import jinja2
j_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(self.template_dir),
# trim whitespace around Jinja2 operators
trim_blocks=True,
lstrip_blocks=True)
template = j_env.get_template('validation_report_template.html.jinja')
doc = template.render(
study_dir=self.study_dir,
record_list=self.buffer,
max_level=logging.getLevelName(self.max_level))
with open(self.output_filename, 'w') as f:
f.write(doc)
class CollapsingLogMessageHandler(logging.handlers.MemoryHandler):
"""Logging handler that aggregates repeated log messages into one.
This collapses validation LogRecords based on the source code line that
emitted them and their formatted message, and flushes the resulting
records to another handler.
"""
def flush(self):
"""Aggregate LogRecords by message and send them to the target handler.
Fields that occur with multiple different values in LogRecords
emitted from the same line with the same message will be
collected in a field named <field_name>_list.
"""
# group buffered LogRecords by their source code line and message
grouping_dict = OrderedDict()
for record in self.buffer:
identifying_tuple = (record.module,
record.lineno,
record.getMessage())
if identifying_tuple not in grouping_dict:
grouping_dict[identifying_tuple] = []
grouping_dict[identifying_tuple].append(record)
aggregated_buffer = []
# for each list of same-message records
for record_list in grouping_dict.values():
# make a dict to collect the fields for the aggregate record
aggregated_field_dict = {}
# for each field found in (the first of) the records
for field_name in record_list[0].__dict__:
# collect the values found for this field across the records.
# Use the keys of an OrderedDict, as OrderedSet is for some
# reason not to be found in the Python standard library.
field_values = OrderedDict((record.__dict__[field_name], None)
for record in record_list)
# if this field has the same value in all records
if len(field_values) == 1:
# use that value in the new dict
aggregated_field_dict[field_name] = field_values.popitem()[0]
else:
# set a <field>_list field instead
aggregated_field_dict[field_name + '_list'] = \
list(field_values.keys())
# add a new log record with these fields tot the output buffer
aggregated_buffer.append(
logging.makeLogRecord(aggregated_field_dict))
# replace the buffer with the aggregated one and flush
self.buffer = aggregated_buffer
super(CollapsingLogMessageHandler, self).flush()
def shouldFlush(self, record):
"""Flush when emitting an INFO message or a message without a file."""
return ((record.levelno == logging.INFO) or
('data_filename' not in record.__dict__) or
super(CollapsingLogMessageHandler, self).shouldFlush(record))
class CombiningLoggerAdapter(logging.LoggerAdapter):
"""LoggerAdapter that combines its own context info with that in calls."""
def process(self, msg, kwargs):
"""Add contextual information from call to that from LoggerAdapter."""
extra = self.extra.copy()
if 'extra' in kwargs:
# add elements from the call, possibly overwriting
extra.update(kwargs['extra'])
kwargs["extra"] = extra
return msg, kwargs
class ValidatorFactory(object):
"""Factory for creating validation objects of various types."""
@staticmethod
def createValidator(validator_type, hugo_entrez_map, logger, meta_dict):
ValidatorClass = globals()[validator_type]
return ValidatorClass(hugo_entrez_map, logger, meta_dict)
class Validator(object):
"""Abstract validator class for tab-delimited data files.
Subclassed by validators for specific data file types, which should
define a 'REQUIRED_HEADERS' attribute listing the required column
headers and a `REQUIRE_COLUMN_ORDER` boolean stating whether their
position is significant.
The methods `processTopLines`, `checkHeader`, `checkLine` and `onComplete`
may be overridden (calling their superclass methods) to perform any
appropriate validation tasks.
"""
REQUIRED_HEADERS = []
REQUIRE_COLUMN_ORDER = True
def __init__(self,hugo_entrez_map,logger,meta_dict):
"""Initialize a validator for a particular data file.
:param hugo_entrez_map: path Entrez to Hugo mapping file
:param logger: logger instance for writing the log messages
:param meta_dict: dictionary of fields found in corresponding meta file
(such as stable id and data file name)
"""
self.filename = os.path.join(STUDY_DIR, meta_dict['data_file_path'])
self.filenameShort = os.path.basename(self.filename)
self.line_number = 0
self.cols = []
self.numCols = 0
self.hugo_entrez_map = hugo_entrez_map
self.newlines = ('',)
self.studyId = ''
self.headerWritten = False
# This one is set to True if file could be parsed/read until the end (happens in onComplete)
self.fileCouldBeParsed = False
self.logger = CombiningLoggerAdapter(
logger,
extra={'data_filename': self.filenameShort})
self.meta_dict = meta_dict
self.badChars = [' ']
def validate(self):
"""Validate the data file."""
self.logger.info('Starting validation of file')
with open(self.filename, 'rU') as data_file:
# parse any block of start-of-file comment lines and the tsv header
top_comments = []
line_number = 0
for line_number, line in enumerate(data_file,
start=line_number + 1):
self.line_number = line_number
if line.startswith('#'):
top_comments.append(line)
else:
header_line = line
# end of the file's header
break
# if the loop wasn't broken by a non-commented line
else:
self.logger.error('No column header or data found in file',
extra={'line_number': self.line_number})
return
# parse start-of-file comment lines, if any
if not self.processTopLines(top_comments):
self.logger.error(
'Invalid header comments, file cannot be parsed')
return
# read five data lines to detect quotes in the tsv file
first_data_lines = []
for i, line in enumerate(data_file):
first_data_lines.append(line)
if i >= 4:
break
sample_content = header_line + ''.join(first_data_lines)
dialect = csv.Sniffer().sniff(sample_content)
# sniffer assumes " if no quote character exists
if dialect.quotechar == '"' and not (
dialect.delimiter + '"' in sample_content or
'"' + dialect.delimiter in sample_content):
dialect.quoting = csv.QUOTE_NONE
if not self._checkTsvDialect(dialect):
self.logger.error(
'Invalid file format, file cannot be parsed')
return
# parse the first non-commented line as the tsv header
header_cols = csv.reader([header_line], dialect).next()
if self.checkHeader(header_cols) > 0:
self.logger.error(
'Invalid column header, file cannot be parsed')
return
# read through the data lines of the file
csvreader = csv.reader(itertools.chain(first_data_lines,
data_file),
dialect)
for line_number, fields in enumerate(csvreader,
start=line_number + 1):
self.line_number = line_number
if fields[0].startswith('#'):
self.logger.error(
"Data line starting with '#' skipped",
extra={'line_number': self.line_number})
continue
self.checkLine(fields)
# (tuple of) string(s) of the newlines read (for 'rU' mode files)
self.newlines = data_file.newlines
# after the entire file has been read
self.onComplete()
def onComplete(self):
"""Perform final validations after all lines have been checked.
Overriding methods should call this superclass method *after* their own
validations, as it logs the message that validation was completed.
"""
self._checkLineBreaks()
# finalize:
self.fileCouldBeParsed = True
self.logger.info('Validation of file complete')
def processTopLines(self, line_list):
"""Hook to validate any list of comment lines above the TSV header.
Return False if these lines are invalid and the file cannot be
parsed, True otherwise.
"""
return True
def checkHeader(self, cols):
"""Check that the header has the correct items and set self.cols.
:param cols: The list of column headers to be validated
:return the number of errors found.
"""
num_errors = 0
# TODO check for end-of-line whitespace
self.cols = cols
self.numCols = len(self.cols)
num_errors += self._checkRepeatedColumns()
num_errors += self.checkBadChar()
if self.REQUIRE_COLUMN_ORDER:
num_errors += self._checkOrderedRequiredColumns()
else:
num_errors += self._checkUnorderedRequiredColumns()
return num_errors
def checkLine(self, data):
"""Check data values from a line after the file header.
:param data: The list of values parsed from the line
"""
if all(x == '' for x in data):
self.logger.error("Blank line",
extra={'line_number': self.line_number})
if data[:self.numCols] == self.cols:
if self.logger.isEnabledFor(logging.ERROR):
self.logger.error(
'Repeated header',
extra={'line_number': self.line_number,
'cause': ', '.join(data[:self.numCols])})
line_col_count = len(data)
if line_col_count != self.numCols:
self.logger.error('Expected %d columns based on header, '
'found %d',
self.numCols, line_col_count,
extra={'line_number': self.line_number})
for col_index, col_name in enumerate(self.cols):
if col_index < line_col_count and data[col_index] == '':
self.logger.error("Blank cell found in column '%s'",
col_name,
extra={'line_number': self.line_number,
'column_number': col_index + 1})
def _checkUnorderedRequiredColumns(self):
"""Check for missing column headers, independent of their position.
Return the number of errors encountered.
"""
num_errors = 0
for col_name in self.REQUIRED_HEADERS:
if col_name not in self.cols:
self.logger.error(
'Missing column: %s',
col_name,
extra={'line_number': self.line_number,
'cause': ', '.join(
self.cols[:len(self.REQUIRED_HEADERS)]) +
', (...)'})
num_errors += 1
return num_errors
def _checkOrderedRequiredColumns(self):
"""Check if the column header for each position is correct.
Return the number of errors encountered.
"""
num_errors = 0
for col_index, col_name in enumerate(self.REQUIRED_HEADERS):
if col_index >= self.numCols:
num_errors += 1
self.logger.error(
"Invalid header: expected '%s' in column %d,"
" found end of line",
col_name, col_index + 1,
extra={'line_number': self.line_number})
elif self.cols[col_index] != col_name:
num_errors += 1
self.logger.error(
"Invalid header: expected '%s' in this column",
col_name,
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': self.cols[col_index]})
return num_errors
def _checkTsvDialect(self, dialect):
"""Check if a csv.Dialect subclass describes a valid cBio data file."""
if dialect.delimiter != '\t':
self.logger.error('Not a tab-delimited file',
extra={'cause': 'delimiters of type: %s' %
repr(dialect.delimiter)})
return False
if dialect.quoting != csv.QUOTE_NONE:
self.logger.error('Found quotation marks around field(s) in the first rows of the file. '
'Fields and values should not be surrounded by quotation marks.',
extra={'cause': 'quotation marks of type: [%s] ' %
repr(dialect.quotechar)[1:-1]})
return True
def _checkLineBreaks(self):
"""Checks line breaks, reports to user."""
# TODO document these requirements
if "\r\n" in self.newlines:
self.logger.error('DOS-style line breaks detected (\\r\\n), '
'should be Unix-style (\\n)')
elif "\r" in self.newlines:
self.logger.error('Classic Mac OS-style line breaks detected '
'(\\r), should be Unix-style (\\n)')
elif self.newlines != '\n':
self.logger.error('No line breaks recognized in file',
extra={'cause': repr(self.newlines)[1:-1]})
def checkInt(self, value):
"""Checks if a value is an integer."""
try:
int(value)
return True
except ValueError:
return False
def checkSampleId(self, sample_id, column_number):
"""Check whether a sample id is defined, logging an error if not.
Return True if the sample id was valid, False otherwise.
"""
if sample_id not in DEFINED_SAMPLE_IDS:
self.logger.error(
'Sample ID not defined in clinical file',
extra={'line_number': self.line_number,
'column_number': column_number,
'cause': sample_id})
return False
return True
def checkGeneIdentification(self, gene_symbol=None, entrez_id=None):
"""Check if a symbol-Entrez pair is valid, logging an error if not.
It is considered valid in these three cases:
1. only the Entrez id is not None, and it is defined in the portal
2. only the symbol is not None, and it is unambiguously defined in
the portal
3. both are given, and the symbol is defined in the portal to match
the Entrez id
Return True if the pair was valid, False otherwise.
"""
if entrez_id is not None:
if gene_symbol is not None:
if gene_symbol not in self.hugo_entrez_map:
self.logger.error(
'Gene symbol not known to the cBioPortal instance',
extra={'line_number': self.line_number,
'cause': gene_symbol})
return False
elif self.hugo_entrez_map[gene_symbol] != entrez_id:
self.logger.error(
'Gene symbol does not match given Entrez id',
extra={'line_number': self.line_number,
'cause': gene_symbol + ', ' + entrez_id})
return False
else:
if entrez_id not in (self.hugo_entrez_map[sym] for
sym in self.hugo_entrez_map):
self.logger.error(
'Entrez gene id not known to the cBioPortal instance.',
extra={'line_number': self.line_number,
'cause': entrez_id})
return False
elif gene_symbol is not None:
if gene_symbol not in self.hugo_entrez_map:
self.logger.error(
'Gene symbol not known to the cBioPortal instance.',
extra={'line_number': self.line_number,
'cause': gene_symbol})
return False
else:
self.logger.error(
'No Entrez id or gene symbol provided for gene',
extra={'line_number': self.line_number})
return False
return True
def _checkRepeatedColumns(self):
num_errors = 0
seen = set()
for col_num, col in enumerate(self.cols):
if col not in seen:
seen.add(col)
else:
num_errors += 1
self.logger.error('Repeated column header',
extra={'line_number': self.line_number,
'column_number': col_num,
'cause': col})
return num_errors
def checkBadChar(self):
"""Check for bad things in a header, such as spaces, etc."""
num_errors = 0
for col_num, col_name in enumerate(self.cols):
for bc in self.badChars:
if bc in col_name:
num_errors += 1
self.logger.error("Bad character '%s' detected in header",
bc,
extra={'line_number': self.line_number,
'column_number': col_num,
'cause': col_name})
return num_errors
class FeaturewiseFileValidator(Validator):
"""Validates a file with rows for features and columns for ids and samples.
The first few columns (collectively defined in the class attributes
REQUIRED_HEADERS and OPTIONAL_HEADERS) identify the features
(e.g. genes) and the rest correspond to the samples.
Subclasses should override the checkValue(self, value, col_index)
function to check value in a sample column, and check the non-sample
columns by overriding and extending checkLine(self, data). The method
can find the headers of these columns in self.nonsample_cols.
"""
OPTIONAL_HEADERS = []
REQUIRE_COLUMN_ORDER = True
def __init__(self, *args, **kwargs):
super(FeaturewiseFileValidator, self).__init__(*args, **kwargs)
self.nonsample_cols = []
self.num_nonsample_cols = 0
self.sampleIds = []
def checkHeader(self, cols):
"""Validate the header and read sample IDs from it.
Return the number of fatal errors.
"""
num_errors = super(FeaturewiseFileValidator, self).checkHeader(cols)
if num_errors > 0:
return num_errors
# collect the non-sample columns headers, assuming order is required
self.nonsample_cols = list(self.REQUIRED_HEADERS)
# start looking for optional cols at the index after the required ones
col_index = len(self.nonsample_cols)
# start with the first optional column
for col_name in self.OPTIONAL_HEADERS:
# if the next column header in the file is the optional one we are
# looking for
if self.cols[col_index] == col_name:
# add it to the list of non-sample columns in the file
self.nonsample_cols.append(col_name)
# any subsequent optional column will be at the next index
col_index += 1
else:
# look for the next optional column at the same index
pass
self.num_nonsample_cols = len(self.nonsample_cols)
num_errors += self._set_sample_ids_from_columns()
return num_errors
def checkLine(self, data):
"""Check the values in a data line."""
super(FeaturewiseFileValidator, self).checkLine(data)
for column_index, value in enumerate(data):
if column_index >= len(self.nonsample_cols):
# checkValue() should be implemented by subclasses
self.checkValue(value, column_index)
def checkValue(self, value, column_index):
"""Override to validate a value in a sample column."""
raise NotImplementedError('The {} class did not provide a method to '
'validate values in sample columns.'.format(
self.__class__.__name__))
def _set_sample_ids_from_columns(self):
"""Extracts sample IDs from column headers and set self.sampleIds."""
num_errors = 0
# check whether any sample columns are present
if len(self.cols[self.num_nonsample_cols:]) == 0:
self.logger.error('No sample columns found',
extra={'line_number': self.line_number})
num_errors += 1
# set self.sampleIds to the list of sample column names
self.sampleIds = self.cols[self.num_nonsample_cols:]
# validate each sample id
for index, sample_id in enumerate(self.sampleIds):
if not self.checkSampleId(
sample_id,
column_number=self.num_nonsample_cols + index + 1):
num_errors += 1
return num_errors
class GenewiseFileValidator(FeaturewiseFileValidator):
"""FeatureWiseValidator that has Hugo and/or Entrez as feature columns."""
OPTIONAL_HEADERS = ['Hugo_Symbol', 'Entrez_Gene_Id']
def __init__(self, *args, **kwargs):
super(GenewiseFileValidator, self).__init__(*args, **kwargs)
def checkHeader(self, cols):
"""Validate the header and read sample IDs from it.
Return the number of fatal errors.
"""
num_errors = super(GenewiseFileValidator, self).checkHeader(cols)
if not ('Hugo_Symbol' in self.nonsample_cols or
'Entrez_Gene_Id' in self.nonsample_cols):
self.logger.error('At least one of the columns Hugo_Symbol or '
'Entrez_Gene_Id needs to be present.',
extra={'line_number': self.line_number})
num_errors += 1
return num_errors
def checkLine(self, data):
"""Check the values in a data line."""
super(GenewiseFileValidator, self).checkLine(data)
hugo_symbol = None
entrez_id = None
if 'Hugo_Symbol' in self.nonsample_cols:
hugo_symbol = data[self.nonsample_cols.index('Hugo_Symbol')]
# treat NA or the empty string as a missing value
if hugo_symbol in ('NA', ''):
hugo_symbol = None
if 'Entrez_Gene_Id' in self.nonsample_cols:
entrez_id = data[self.nonsample_cols.index('Entrez_Gene_Id')]
# treat NA or the empty string as a missing value
if entrez_id in ('NA', ''):
entrez_id = None
self.checkGeneIdentification(hugo_symbol, entrez_id)
class CNAValidator(GenewiseFileValidator):
"""Sub-class CNA validator."""
ALLOWED_VALUES = ['-2','-1','0','1','2','','NA']
def checkValue(self, value, col_index):
"""Check a value in a sample column."""
if value not in self.ALLOWED_VALUES:
if self.logger.isEnabledFor(logging.ERROR):
self.logger.error(
'Invalid CNA value: possible values are [%s]',
', '.join(self.ALLOWED_VALUES),
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
class MutationsExtendedValidator(Validator):
"""Sub-class mutations_extended validator."""
REQUIRED_HEADERS = [
'Tumor_Sample_Barcode',
'Amino_Acid_Change'
]
OPTIONAL_HEADERS = [
'Hugo_Symbol',
'Entrez_Gene_Id'
]
REQUIRE_COLUMN_ORDER = False
# Used for mapping column names to the corresponding function that does a check on the value.
# This can be done for other filetypes besides maf - not currently implemented.
CHECK_FUNCTION_MAP = {
'Hugo_Symbol':'checkValidHugo',
'Entrez_Gene_Id':'checkValidEntrez',
'Center':'checkCenter',
'NCBI_Build':'checkNCBIbuild',
'Chromosome':'checkChromosome',
'Start_Position':'checkStartPosition',
'End_Position':'checkEndPosition',
'Strand':'checkStrand',
'Variant_Classification':'checkVariantClassification',
'Variant_Type':'checkVariantType',
'Reference_Allele':'checkRefAllele',
'Tumor_Seq_Allele1':'checkTumorSeqAllele',
'Tumor_Seq_Allele2':'checkTumorSeqAllele',
'dbSNP_RS':'checkdbSNP_RS',
'dbSNP_Val_Status':'check_dbSNPValStatus',
'Tumor_Sample_Barcode':'checkTumorSampleBarcode',
'Matched_Norm_Sample_Barcode':'checkMatchedNormSampleBarcode',
'Match_Norm_Seq_Allele1':'checkMatchNormSeqAllele',
'Match_Norm_Seq_Allele2':'checkMatchNormSeqAllele',
'Tumor_Validation_Allele1':'checkTumorValidationAllele',
'Tumor_Validation_Allele2':'checkTumorValidationAllele',
'Match_Norm_Validation_Allele1':'checkMatchNormValidationAllele',
'Match_Norm_Validation_Allele2':'checkMatchNormValidationAllele',
'Verification_Status':'checkVerificationStatus',
'Validation_Status':'checkValidationStatus',
'Mutation_Status':'checkMutationStatus',
'Sequencing_Phase':'checkSequencingPhase',
'Sequence_Source':'checkSequenceSource',
'Validation_Method':'checkValidationMethod',
'Score':'checkScore',
'BAM_File':'checkBAMFile',
'Sequencer':'checkSequencer',
't_alt_count':'check_t_alt_count',
't_ref_count':'check_t_ref_count',
'n_alt_count':'check_n_alt_count',
'n_ref_count':'check_n_ref_count',
'Amino_Acid_Change': 'checkAminoAcidChange'}
def __init__(self,hugo_entrez_map,logger,meta_dict):
super(MutationsExtendedValidator, self).__init__(hugo_entrez_map,logger,meta_dict)
# TODO consider making this attribute a local var in in checkLine(),
# it really only makes sense there
self.extraCols = []
self.extra_exists = False
self.extra = ''
# TODO remove the attributes below, they violate the MAF standard
self.toplinecount = 0
self.sampleIdsHeader = set()
self.headerPresent = False
def checkHeader(self, cols):
"""Validate header, requiring at least one gene id column."""
num_errors = super(MutationsExtendedValidator, self).checkHeader(cols)
if not ('Hugo_Symbol' in self.cols or 'Entrez_Gene_Id' in self.cols):
self.logger.error('At least one of the columns Hugo_Symbol or '
'Entrez_Gene_Id needs to be present.',
extra={'line_number': self.line_number})
num_errors += 1
return num_errors
def checkLine(self, data):
"""Each value in each line is checked individually.
From the column name (stored in self.cols), the
corresponding function to check the value is selected from
CHECK_FUNCTION_MAP. Will emit a generic warning
message if this function returns False. If the function sets
self.extra_exists to True, self.extra will be used in this
message.
"""
super(MutationsExtendedValidator, self).checkLine(data)
for col_name in self.REQUIRED_HEADERS + self.OPTIONAL_HEADERS:
col_index = self.cols.index(col_name)
value = data[col_index]
if col_name == 'Tumor_Sample_Barcode':
self.checkSampleId(value, column_number=col_index + 1)
# get the checking method for this column if available, or None
checking_function = getattr(
self,
self.CHECK_FUNCTION_MAP[col_name])
if not checking_function(value):
self.printDataInvalidStatement(value, col_index)
elif self.extra_exists or self.extra:
raise RuntimeError(('Checking function %s set an error '
'message but reported no error') %
checking_function.__name__)
hugo_symbol = None
entrez_id = None
if 'Hugo_Symbol' in self.cols:
hugo_symbol = data[self.cols.index('Hugo_Symbol')]
# treat the empty string as a missing value
if hugo_symbol == '':
hugo_symbol = None
if 'Entrez_Gene_Id' in self.cols:
entrez_id = data[self.cols.index('Entrez_Gene_Id')]
# treat the empty string as a missing value
if entrez_id == '':
entrez_id = None
self.checkGeneIdentification(hugo_symbol, entrez_id)
def processTopLines(self, line_list):
"""Processes the top line, which contains sample ids used in study."""
# TODO remove this function, it violates the MAF standard
if not line_list:
return True
line = line_list[0]
self.headerPresent = True
topline = [x.strip() for x in line.split(' ') if '#' not in x]
self.toplinecount += 1
for sampleId in topline:
self.sampleIdsHeader.add(sampleId)
return True
def printDataInvalidStatement(self, value, col_index):
"""Prints out statement for invalid values detected."""
message = ("Value in column '%s' is invalid" %
self.cols[col_index])
if self.extra_exists:
message = self.extra
self.extra = ''
self.extra_exists = False
self.logger.error(
message,
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
# These functions check values of the MAF according to their name.
# The mapping of which function checks which value is a global value
# at the top of the script. If any other checks need to be added for
# another field name, add the map in the global corresponding to
# the function name that is created to check it.
def checkValidHugo(self,value):
"""Issue no errors, as this field is checked in `checkLine()`."""
return True
def checkValidEntrez(self, value):
"""Issue no errors, as this field is checked in `checkLine()`."""
return True
def checkCenter(self, value):
return True
def checkChromosome(self, value):
if self.checkInt(value):
if 1 <= int(value) <= 22:
return True
return False
elif value in ('X', 'Y', 'M'):
return True
return False
def checkStartPosition(self, value):
return True
def checkEndPosition(self, value):
return True
def checkTumorSampleBarcode(self, value):
"""Issue no warnings, as this field is checked in `checkLine()`."""
return True
def checkNCBIbuild(self, value):
if self.checkInt(value) and value != '':
if int(value) != NCBI_BUILD_NUMBER:
return False
return True
def checkStrand(self, value):
if value != '+':
return False
return True
def checkVariantClassification(self, value):
return True
def checkVariantType(self, value):
return True
def checkRefAllele(self, value):
return True
def checkTumorSeqAllele(self, value):
return True
def check_dbSNPRS(self, value):
return True
def check_dbSNPValStatus(self, value):
return True
def checkMatchedNormSampleBarcode(self, value):
if value != '':
if self.headerPresent and value not in self.sampleIdsHeader:
self.extra = 'Normal sample id not in sample ids from header'
self.extra_exists = True
return False
return True
def checkMatchedNormSampleBarcodehNormSeqAllele(self, value):
return True
def checkTumorValidationAllele(self, value):
return True
def checkMatchNormValidationAllele(self, value):
return True
def checkVerificationStatus(self, value):
if value.lower() not in ('', 'verified', 'unknown'):
return False
return True
def checkValidationStatus(self, value):
if value == '':
return True
if value.lower() not in ('valid', 'unknown', 'na', 'untested'):
return False
return True
def checkMutationStatus(self, value):
return True
def checkSequencingPhase(self, value):
return True
def checkSequenceSource(self, value):
return True
def checkValidationMethod(self, value):
return True
def checkScore(self, value):
return True
def checkBAMFile(self, value):
return True
def checkSequencer(self, value):
return True
def check_t_alt_count(self, value):
if not self.checkInt(value) and value != '':
return False
return True
def check_t_ref_count(self, value):
if not self.checkInt(value) and value != '':
return False
return True
def check_n_alt_count(self, value):
if not self.checkInt(value) and value != '':
return False
return True
def check_n_ref_count(self, value):
if not self.checkInt(value) and value != '':
return False
return True
def checkAminoAcidChange(self, value):
"""Test whether a string is a valid amino acid change specification."""
# TODO implement this test, may require bundling the hgvs package:
# https://pypi.python.org/pypi/hgvs/
return True
class ClinicalValidator(Validator):
"""Validator for clinical data files."""
REQUIRED_HEADERS = [
'PATIENT_ID',
'SAMPLE_ID'
]
REQUIRE_COLUMN_ORDER = False
srv_attrs = None
def __init__(self, *args, **kwargs):
super(ClinicalValidator, self).__init__(*args, **kwargs)
self.sampleIds = set()
self.attr_defs = []
def processTopLines(self, line_list):
"""Parse the the attribute definitions above the column header."""
LINE_NAMES = ('display_name',
'description',
'datatype',
'attribute_type',
'priority')
if not line_list:
self.logger.error(
'No data type header comments found in clinical data file',
extra={'line_number': self.line_number})
return False
if len(line_list) != len(LINE_NAMES):
self.logger.error(
'%d comment lines at start of clinical data file, expected %d',
len(line_list),
len(LINE_NAMES))
return False
# remove the # signs
line_list = [line[1:] for line in line_list]
attr_defs = None
num_attrs = 0
csvreader = csv.reader(line_list,
delimiter='\t',
quoting=csv.QUOTE_NONE,
strict=True)
invalid_values = False
for line_index, row in enumerate(csvreader):
if attr_defs is None:
# make a list of as many lists as long as there are columns
num_attrs = len(row)
attr_defs = [OrderedDict() for i in range(num_attrs)]
elif len(row) != num_attrs:
self.logger.error(
'Varying numbers of columns in clinical header (%d, %d)',
num_attrs,
len(row),
extra={'line_number': line_index + 1})
return False
for col_index, value in enumerate(row):
# test for invalid values in these (otherwise parseable) lines
if value in ('', 'NA'):
self.logger.error(
'Empty %s field in clinical attribute definition',
LINE_NAMES[line_index],
extra={'line_number': line_index + 1,
'column_number': col_index + 1,
'cause': value})
invalid_values = True
if LINE_NAMES[line_index] in ('display_name', 'description'):
pass
elif LINE_NAMES[line_index] == 'datatype':
VALID_DATATYPES = ('STRING', 'NUMBER', 'BOOLEAN')
if value not in VALID_DATATYPES:
self.logger.error(
'Invalid data type definition, must be one of'
' [%s]',
', '.join(VALID_DATATYPES),
extra={'line_number': line_index + 1,
'colum_number': col_index + 1,
'cause': value})
invalid_values = True
elif LINE_NAMES[line_index] == 'attribute_type':
VALID_ATTR_TYPES = ('PATIENT', 'SAMPLE')
if value not in VALID_ATTR_TYPES:
self.logger.error(
'Invalid attribute type definition, must be one of'
' [%s]',
', '.join(VALID_ATTR_TYPES),
extra={'line_number': line_index + 1,
'colum_number': col_index + 1,
'cause': value})
invalid_values = True
elif LINE_NAMES[line_index] == 'priority':
try:
if int(value) < 1:
raise ValueError()
except ValueError:
self.logger.error(
'Priority definition must be a positive integer',
extra={'line_number': line_index + 1,
'column_number': col_index + 1,
'cause': value})
invalid_values = True
else:
raise Exception('Unknown clinical header line name')
attr_defs[col_index][LINE_NAMES[line_index]] = value
self.attr_defs = attr_defs
return not invalid_values
def checkHeader(self, cols):
num_errors = super(ClinicalValidator, self).checkHeader(cols)
if self.numCols != len(self.attr_defs):
self.logger.error(
'Varying numbers of columns in clinical header (%d, %d)',
len(self.attr_defs),
len(self.cols),
extra={'line_number': self.line_number})
num_errors += 1
for col_index, col_name in enumerate(self.cols):
if not col_name.isupper():
self.logger.warning(
"Clinical header not in all caps",
extra={'line_number': self.line_number,
'cause': col_name})
# look up how the attribute is defined in the portal
srv_attr_properties = self.srv_attrs.get(col_name)
if srv_attr_properties is None:
self.logger.warning(
'New %s-level attribute will be added to the portal',
self.attr_defs[col_index]['attribute_type'].lower(),
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': col_name})
else:
# translate one property having a different format in the API
transl_attr_properties = {}
for prop in srv_attr_properties:
# define the 'attribute_type' property as it is found in
# files, based on 'is_patient_attribute' from the API
if prop == 'is_patient_attribute':
if srv_attr_properties[prop] == '1':
transl_attr_properties['attribute_type'] = 'PATIENT'
else:
transl_attr_properties['attribute_type'] = 'SAMPLE'
# all of the other properties just match the file format
elif prop in ('display_name', 'description',
'datatype', 'priority'):
transl_attr_properties[prop] = srv_attr_properties[prop]
# compare values defined in the file with the existing ones
for attr_property in self.attr_defs[col_index]:
value = self.attr_defs[col_index][attr_property]
if value != transl_attr_properties[attr_property]:
self.logger.error(
"%s definition for attribute '%s' does not match "
"the portal, '%s' expected",
attr_property,
col_name,
transl_attr_properties[attr_property],
extra={'line_number': self.attr_defs[col_index].keys().index(attr_property) + 1,
'column_number': col_index + 1,
'cause': value})
num_errors += 1
return num_errors
def checkLine(self, data):
super(ClinicalValidator, self).checkLine(data)
for col_index, value in enumerate(data):
# TODO check the values in the other cols, required and optional
# TODO check if cancer types in clinical attributes are defined
if col_index == self.cols.index('SAMPLE_ID'):
if DEFINED_SAMPLE_IDS and value not in DEFINED_SAMPLE_IDS:
self.logger.error(
'Defining new sample id in secondary clinical file',
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
self.sampleIds.add(value.strip())
@classmethod
def request_attrs(cls, server_url, logger):
"""Initialize cls.srv_attrs using the portal API."""
cls.srv_attrs = request_from_portal_api(
server_url + '/api/clinicalattributes/patients',
logger,
id_field='attr_id')
srv_sample_attrs = request_from_portal_api(
server_url + '/api/clinicalattributes/samples',
logger,
id_field='attr_id')
# if this happens, the database structure has changed and this script
# needs to be updated
id_overlap = (set(cls.srv_attrs.keys()) &
set(srv_sample_attrs.keys()))
if id_overlap:
raise ValueError(
'The portal at {url} returned these clinical attributes '
'both for samples and for patients: {attrs}'.format(
url=server_url,
attrs=', '.join(id_overlap)))
else:
cls.srv_attrs.update(srv_sample_attrs)
class SegValidator(Validator):
"""Validator for .seg files."""
REQUIRED_HEADERS = [
'ID',
'chrom',
'loc.start',
'loc.end',
'num.mark',
'seg.mean']
REQUIRE_COLUMN_ORDER = True
def checkLine(self, data):
super(SegValidator, self).checkLine(data)
# TODO check values in all other columns too
for col_index, value in enumerate(data):
if col_index == self.cols.index(self.REQUIRED_HEADERS[0]):
self.checkSampleId(value, column_number=col_index + 1)
class Log2Validator(GenewiseFileValidator):
def checkValue(self, value, col_index):
"""Check a value in a sample column."""
# TODO check these values
pass
class ExpressionValidator(GenewiseFileValidator):
def checkValue(self, value, col_index):
"""Check a value in a sample column."""
# TODO check these values
pass
class FusionValidator(Validator):
REQUIRED_HEADERS = [
'Hugo_Symbol',
'Entrez_Gene_Id',
'Center',
'Tumor_Sample_Barcode',
'Fusion',
'DNA support',
'RNA support',
'Method',
'Frame']
REQUIRE_COLUMN_ORDER = True
def checkLine(self, data):
super(FusionValidator, self).checkLine(data)
# TODO check the values
class MethylationValidator(GenewiseFileValidator):
def checkValue(self, value, col_index):
"""Check a value in a sample column."""
# TODO check these values
pass
class RPPAValidator(FeaturewiseFileValidator):
REQUIRED_HEADERS = ['Composite.Element.REF']
def checkLine(self, data):
super(RPPAValidator, self).checkLine(data)
# TODO check the values in the first column
# for rppa, first column should be hugo|antibody, everything after should be sampleIds
def checkValue(self, value, col_index):
"""Check a value in a sample column."""
# TODO check these values
pass
class TimelineValidator(Validator):
REQUIRED_HEADERS = [
'PATIENT_ID',
'START_DATE',
'STOP_DATE',
'EVENT_TYPE']
REQUIRE_COLUMN_ORDER = True
def checkLine(self, data):
super(TimelineValidator, self).checkLine(data)
# TODO check the values
# ------------------------------------------------------------------------------
# Functions
def parse_metadata_file(filename, logger, study_id=None, case_list=False):
"""Validate a metafile and return a dictionary of values read from it.
Return `None` if the file is invalid. If `case_list` is True,
validate the file as a case list instead of a meta file.
:param filename: name of the meta file
:param logger: the logging.Logger instance to log warnings and errors to
:param study_id: cancer study id found in previous files (or None). All subsequent
meta files should comply to this in the field 'cancer_study_identifier'
:param case_list: whether this meta file is a case list (special case)
"""
metaDictionary = {}
with open(filename, 'rU') as metafile:
for line_index, line in enumerate(metafile):
if ': ' not in line:
logger.error(
"Invalid %s file entry, no ': ' found",
{True: 'case list', False: 'meta'}[case_list],
extra={'data_filename': getFileFromFilepath(filename),
'line_number': line_index + 1})
return None
key, val = line.rstrip().split(': ', 1)
metaDictionary[key] = val
if case_list:
meta_file_type = 'case_list'
else:
if 'meta_file_type' not in metaDictionary:
logger.error("Missing field 'meta_file_type' in meta file'",
extra={'data_filename': getFileFromFilepath(filename)})
# skip this file (can't validate unknown file types)
return None
meta_file_type = metaDictionary["meta_file_type"]
if meta_file_type not in META_FILE_PATTERNS:
logger.error('Unknown meta_file_type',
extra={'data_filename': getFileFromFilepath(filename),
'cause': meta_file_type})
# skip this file (can't validate unknown file types)
return None
missing_fields = []
for field in META_FIELD_MAP[meta_file_type]:
mandatory = META_FIELD_MAP[meta_file_type][field]
if field not in metaDictionary and mandatory:
logger.error("Missing field '%s' in %s file",
field,
{True: 'case list', False: 'meta'}[case_list],
extra={'data_filename': getFileFromFilepath(filename)})
missing_fields.append(field)
if missing_fields:
# skip this file (the fields may be required for validation)
return None
for field in metaDictionary:
if field not in META_FIELD_MAP[meta_file_type]:
logger.warning(
'Unrecognized field in %s file',
{True: 'case list', False: 'meta'}[case_list],
extra={'data_filename': getFileFromFilepath(filename),
'cause': field})
# check that cancer study identifiers across files so far are consistent.
if (
meta_file_type != CANCER_TYPE_META_PATTERN and
study_id is not None and
study_id != metaDictionary['cancer_study_identifier']):
logger.error(
"Cancer study identifier is not consistent across "
"files, expected '%s'",
study_id,
extra={'data_filename': getFileFromFilepath(filename),
'cause': metaDictionary['cancer_study_identifier']})
return None
# compare a meta_cancer_type file with the portal instance
if meta_file_type == CANCER_TYPE_META_PATTERN:
file_cancer_type = metaDictionary.get('type_of_cancer')
if file_cancer_type not in PORTAL_CANCER_TYPES:
logger.warning(
'New disease type will be added to the portal',
extra={'data_filename': getFileFromFilepath(filename),
'cause': file_cancer_type})
else:
existing_info = PORTAL_CANCER_TYPES[file_cancer_type]
invalid_fields_found = False
for field in metaDictionary:
if (
field in CANCER_TYPE_META_FIELDS and
field != 'cancer_type_id' and
metaDictionary[field] != existing_info[field]):
logger.error(
"%s field of cancer type does not match the "
"portal, '%s' expected",
field,
existing_info[field],
extra={'data_filename': getFileFromFilepath(filename),
'cause': metaDictionary[field]})
invalid_fields_found = True
if invalid_fields_found:
return None
# check fields specific to seg meta file
if meta_file_type == SEG_META_PATTERN:
if metaDictionary['data_filename'] != metaDictionary['data_file_path']:
logger.error(
'data_filename and data_file_path differ in seg data file',
extra={'data_filename': getFileFromFilepath(filename),
'cause': (metaDictionary['data_filename'] + ', ' +
metaDictionary['data_file_path'])})
return None
if metaDictionary['reference_genome_id'] != GENOMIC_BUILD_COUNTERPART:
logger.error(
'Reference_genome_id is not %s',
GENOMIC_BUILD_COUNTERPART,
extra={'data_filename': getFileFromFilepath(filename),
'cause': metaDictionary['reference_genome_id']})
return None
# if this file type doesn't take a data file, make sure one isn't parsed
if (
'data_file_path' in metaDictionary and
'data_file_path' not in META_FIELD_MAP[meta_file_type]):
logger.warning(
"File '%s' referenced by meta file will not be processed as the "
"attribute data_file_path is not expected in this meta file",
metaDictionary['data_file_path'],
extra={'data_filename': getFileFromFilepath(filename),
'cause': metaDictionary['data_file_path']})
return metaDictionary
def process_metadata_files(directory, logger):
"""Parse the meta files in a directory and create data file validators.
Return a tuple of:
1. a dict listing the data file validator (or None) for each meta file
by file type,
2. a list of cancer type ids that have been defined in this study, and
3. the study id
Possible file types are listed in META_FILE_PATTERNS.
"""
# get filenames for all meta files in the directory
filenames = [os.path.join(directory, f) for
f in os.listdir(directory) if
re.search(r'(\b|_)meta(\b|_)', f)]
study_id = None
study_cancer_type = None
validators_by_type = {}
defined_cancer_types = []
for filename in filenames:
meta = parse_metadata_file(filename, logger, study_id)
if meta is None:
continue
if study_id is None:
study_id = meta['cancer_study_identifier']
meta_file_type = meta['meta_file_type']
if meta_file_type == STUDY_META_PATTERN:
if study_cancer_type is not None:
logger.error(
'Encountered a second meta_study file',
extra={'data_filename': getFileFromFilepath(filename)})
study_cancer_type = meta['type_of_cancer']
if meta_file_type == CANCER_TYPE_META_PATTERN:
file_cancer_type = meta['type_of_cancer']
if file_cancer_type in defined_cancer_types:
logger.error(
'Cancer type defined a second time in study',
extra={'data_filename': getFileFromFilepath(filename),
'cause': file_cancer_type})
defined_cancer_types.append(meta['type_of_cancer'])
# create a list for the file type in the dict
if meta_file_type not in validators_by_type:
validators_by_type[meta_file_type] = []
# check if data_file_path is set AND if data_file_path is a supported field according to META_FIELD_MAP:
if 'data_file_path' in meta and 'data_file_path' in META_FIELD_MAP[meta_file_type]:
validators_by_type[meta_file_type].append(
ValidatorFactory.createValidator(
VALIDATOR_IDS[meta_file_type],
HUGO_ENTREZ_MAP,
logger,
meta))
else:
validators_by_type[meta_file_type].append(None)
if not (study_cancer_type in PORTAL_CANCER_TYPES or
study_cancer_type in defined_cancer_types):
logger.error(
'Cancer type of study is neither known to the portal nor defined '
'in a meta_cancer_type file',
extra={'cause': study_cancer_type})
return validators_by_type, defined_cancer_types, study_id
def getFileFromFilepath(f):
return os.path.basename(f.strip())
def processCaseListDirectory(caseListDir, cancerStudyId, logger):
logger.info('Validating case lists')
case_lists = [os.path.join(caseListDir, x) for x in os.listdir(caseListDir)]
for case in case_lists:
case_data = parse_metadata_file(case, logger, cancerStudyId,
case_list=True)
if case_data is None:
continue
sampleIds = case_data['case_list_ids']
sampleIds = set([x.strip() for x in sampleIds.split('\t')])
for value in sampleIds:
if value not in DEFINED_SAMPLE_IDS:
logger.error(
'Sample id not defined in clinical file',
extra={'data_filename': getFileFromFilepath(case),
'cause': value})
logger.info('Validation of case lists complete')
def request_from_portal_api(service_url, logger, id_field=None):
"""Send a request to the portal API and return the decoded JSON object.
If id_field is specified, expect the object to be a list of dicts,
and instead return a dict indexed by the specified field of said
dictionaries. E.g.:
[{'id': 'spam', 'val1': 1}, {'id':'eggs', 'val1':42}] ->
{'spam': {'val1': 1}, 'eggs': {'val1': 42}}
"""
url_split = service_url.split('/api/', 1)
logger.info("Requesting %s from portal at '%s'",
url_split[1], url_split[0])
response = requests.get(service_url)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
raise IOError(
'Connection error for URL: {url}. Administrator: please check if '
'[{url}] is accessible. Message: {msg}'.format(url=service_url,
msg=e.message))
json_data = response.json()
if id_field is None:
return json_data
else:
transformed_dict = {}
# return a dict indexed by the specified field of said
# dictionaries. E.g.:
#[{'id': 'spam', 'val1': 1}, {'id':'eggs', 'val1':42}] ->
# {'spam': {'val1': 1}, 'eggs': {'val1': 42}}
for attr in json_data:
# make a copy of the attr dict
attr_dict = dict(attr)
# remove id field:
if not id_field in attr_dict:
raise RuntimeError('Unexpected error while calling web-service. '
'Please check if given {url} is correct'.format(url=service_url))
del attr_dict[id_field]
transformed_dict[attr[id_field]] = attr_dict
return transformed_dict
def get_hugo_entrez_map(server_url, logger):
"""
Returns a dict with hugo symbols and respective entrezId, e.g.:
# dict: {'LOC105377913': '105377913', 'LOC105377912': '105377912', hugo: entrez, hugo: entrez...
"""
# TODO implement an API call for gene aliases and include those in the map
json_data = request_from_portal_api(server_url + '/api/genes', logger)
# json_data is list of dicts, each entry containing e.g. dict: {'hugo_gene_symbol': 'SRXN1', 'entrez_gene_id': '140809'}
# We want to transform this to the format dict: {hugo: entrez, hugo: entrez...
result_dict = {}
for data_item in json_data:
result_dict[data_item['hugo_gene_symbol']] = data_item['entrez_gene_id']
return result_dict
# ------------------------------------------------------------------------------
def interface(args=None):
parser = argparse.ArgumentParser(description='cBioPortal meta Validator')
parser.add_argument('-s', '--study_directory', type=str, required=True,
help='path to directory.')
parser.add_argument('-u', '--url_server', type=str, required=False,
default='http://localhost/cbioportal',
help='URL to cBioPortal server. You can set this if '
'your URL is not http://localhost/cbioportal')
parser.add_argument('-html', '--html_table', type=str, required=False,
help='path to html report output file')
parser.add_argument('-v', '--verbose', required=False, action="store_true",
help='list warnings in addition to fatal errors')
parser = parser.parse_args(args)
return parser
def main_validate(args):
"""Main function."""
# global study properties
global STUDY_DIR
global DEFINED_SAMPLE_IDS
global DEFINED_CANCER_TYPES
# global portal properties
global SERVER_URL
global PORTAL_CANCER_TYPES
global HUGO_ENTREZ_MAP
# get a logger to emit messages
logger = logging.getLogger(__name__)
logger.handlers = []
logger.setLevel(logging.INFO)
exit_status_handler = MaxLevelTrackingHandler()
logger.addHandler(exit_status_handler)
# process the options
STUDY_DIR = args.study_directory
SERVER_URL = args.url_server
html_output_filename = args.html_table
verbose = False
if args.verbose:
verbose = True
# check existence of directory
if not os.path.exists(STUDY_DIR):
print >> sys.stderr, 'directory cannot be found: ' + STUDY_DIR
return 2
# set default message handler
text_handler = logging.StreamHandler(sys.stdout)
text_handler.setFormatter(LogfileStyleFormatter())
collapsing_text_handler = CollapsingLogMessageHandler(
capacity=1e6,
flushLevel=logging.CRITICAL,
target=text_handler)
if not verbose:
collapsing_text_handler.setLevel(logging.ERROR)
logger.addHandler(collapsing_text_handler)
collapsing_html_handler = None
html_handler = None
# add html table handler if applicable
if html_output_filename:
# just to make sure users get dependency error at start:
import jinja2 # pylint: disable=import-error
html_handler = Jinja2HtmlHandler(
STUDY_DIR,
html_output_filename,
capacity=1e5)
# TODO extend CollapsingLogMessageHandler to flush to multiple targets,
# and get rid of the duplicated buffering of messages here
collapsing_html_handler = CollapsingLogMessageHandler(
capacity=1e6,
flushLevel=logging.CRITICAL,
target=html_handler)
if not verbose:
collapsing_html_handler.setLevel(logging.ERROR)
logger.addHandler(collapsing_html_handler)
# Entrez values for Hugo symbols in the portal
HUGO_ENTREZ_MAP = get_hugo_entrez_map(SERVER_URL, logger)
# retrieve cancer types defined in the portal
PORTAL_CANCER_TYPES = request_from_portal_api(
SERVER_URL + '/api/cancertypes',
logger,
id_field='id')
# retrieve clinical attributes defined in the portal
ClinicalValidator.request_attrs(SERVER_URL, logger)
# walk over the meta files in the dir and get properties of the study
(validators_by_meta_type,
DEFINED_CANCER_TYPES,
study_id) = process_metadata_files(STUDY_DIR, logger)
if CLINICAL_META_PATTERN not in validators_by_meta_type:
logger.error('No clinical file detected')
return exit_status_handler.get_exit_status()
if len(validators_by_meta_type[CLINICAL_META_PATTERN]) != 1:
if logger.isEnabledFor(logging.ERROR):
logger.error(
'Multiple clinical files detected',
extra={'cause': ', '.join(
validator.filenameShort for validator in
validators_by_meta_type[CLINICAL_META_PATTERN])})
# get the validator for the clinical data file
clinvalidator = validators_by_meta_type[CLINICAL_META_PATTERN][0]
# parse the clinical data file to get defined sample ids for this study
clinvalidator.validate()
if not clinvalidator.fileCouldBeParsed:
logger.error("Clinical file could not be parsed. Please fix the problems found there first before continuing.")
return exit_status_handler.get_exit_status()
DEFINED_SAMPLE_IDS = clinvalidator.sampleIds
# validate non-clinical data files
for meta_file_type in validators_by_meta_type:
# skip clinical files, they have already been validated
if meta_file_type == CLINICAL_META_PATTERN:
continue
for validator in validators_by_meta_type[meta_file_type]:
# if there was no validator for this meta file
if validator is None:
continue
validator.validate()
case_list_dirname = os.path.join(STUDY_DIR, 'case_lists')
if not os.path.isdir(case_list_dirname):
logger.warning("No directory named 'case_lists' found")
else:
processCaseListDirectory(case_list_dirname, study_id, logger)
logger.info('Validation complete')
exit_status = exit_status_handler.get_exit_status()
if html_handler is not None:
collapsing_html_handler.flush()
html_handler.generateHtml()
return exit_status
# ------------------------------------------------------------------------------
# vamanos
if __name__ == '__main__':
try:
# parse command line options
args = interface()
# run the script
exit_status = main_validate(args)
print >>sys.stderr, ('Validation of study {status}.'.format(
status={0: 'succeeded',
1: 'failed',
2: 'not performed as problems occurred',
3: 'succeeded with warnings'}.get(exit_status, 'unknown')))
finally:
logging.shutdown()
del logging._handlerList[:] # workaround for harmless exceptions on exit
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple MNIST classifier example with JIT XLA and timelines.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.client import timeline
FLAGS = None
def main(_):
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
w = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x, w) + b
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
# The raw formulation of cross-entropy,
#
# tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
# reduction_indices=[1]))
#
# can be numerically unstable.
#
# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
# outputs of 'y', and then average across the batch.
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
config = tf.ConfigProto()
jit_level = 0
if FLAGS.xla:
# Turns on XLA JIT compilation.
jit_level = tf.OptimizerOptions.ON_1
config.graph_options.optimizer_options.global_jit_level = jit_level
run_metadata = tf.RunMetadata()
sess = tf.Session(config=config)
tf.global_variables_initializer().run(session=sess)
# Train
train_loops = 1000
for i in range(train_loops):
batch_xs, batch_ys = mnist.train.next_batch(100)
# Create a timeline for the last loop and export to json to view with
# chrome://tracing/.
if i == train_loops - 1:
sess.run(train_step,
feed_dict={x: batch_xs,
y_: batch_ys},
options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
run_metadata=run_metadata)
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
trace_file = open('timeline.ctf.json', 'w')
trace_file.write(trace.generate_chrome_trace_format())
else:
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy,
feed_dict={x: mnist.test.images,
y_: mnist.test.labels}))
sess.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
parser.add_argument(
'--xla', type=bool, default=True, help='Turn xla via JIT on')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
use context to cleanly close the file descriptor
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple MNIST classifier example with JIT XLA and timelines.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.client import timeline
FLAGS = None
def main(_):
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
w = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x, w) + b
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
# The raw formulation of cross-entropy,
#
# tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
# reduction_indices=[1]))
#
# can be numerically unstable.
#
# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
# outputs of 'y', and then average across the batch.
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
config = tf.ConfigProto()
jit_level = 0
if FLAGS.xla:
# Turns on XLA JIT compilation.
jit_level = tf.OptimizerOptions.ON_1
config.graph_options.optimizer_options.global_jit_level = jit_level
run_metadata = tf.RunMetadata()
sess = tf.Session(config=config)
tf.global_variables_initializer().run(session=sess)
# Train
train_loops = 1000
for i in range(train_loops):
batch_xs, batch_ys = mnist.train.next_batch(100)
# Create a timeline for the last loop and export to json to view with
# chrome://tracing/.
if i == train_loops - 1:
sess.run(train_step,
feed_dict={x: batch_xs,
y_: batch_ys},
options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
run_metadata=run_metadata)
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
with open('timeline.ctf.json', 'w') as trace_file:
trace_file.write(trace.generate_chrome_trace_format())
else:
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy,
feed_dict={x: mnist.test.images,
y_: mnist.test.labels}))
sess.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
parser.add_argument(
'--xla', type=bool, default=True, help='Turn xla via JIT on')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
"""API to access local resources for synchronization."""
import unicodedata
from datetime import datetime
import hashlib
import os
import shutil
import re
from nxdrive.client.common import BaseClient, UNACCESSIBLE_HASH
from nxdrive.osi import AbstractOSIntegration
from nxdrive.client.base_automation_client import DOWNLOAD_TMP_FILE_PREFIX
from nxdrive.client.base_automation_client import DOWNLOAD_TMP_FILE_SUFFIX
from nxdrive.logging_config import get_logger
from nxdrive.client.common import safe_filename
from nxdrive.client.common import NotFound
from nxdrive.client.common import DEFAULT_IGNORED_PREFIXES
from nxdrive.client.common import DEFAULT_IGNORED_SUFFIXES
from nxdrive.utils import normalized_path
from nxdrive.utils import safe_long_path
from nxdrive.utils import guess_digest_algorithm
from nxdrive.client.common import FILE_BUFFER_SIZE
from send2trash import send2trash
log = get_logger(__name__)
DEDUPED_BASENAME_PATTERN = ur'^(.*)__(\d{1,3})$'
# Data transfer objects
class FileInfo(object):
"""Data Transfer Object for file info on the Local FS"""
def __init__(self, root, path, folderish, last_modification_time, size=0,
digest_func='md5', check_suspended=None, remote_ref=None):
# Function to check during long-running processing like digest
# computation if the synchronization thread needs to be suspended
self.check_suspended = check_suspended
self.size = size
filepath = os.path.join(root, path[1:].replace(u'/', os.path.sep))
root = unicodedata.normalize('NFC', root)
path = unicodedata.normalize('NFC', path)
normalized_filepath = os.path.join(root, path[1:].replace(u'/', os.path.sep))
self.filepath = normalized_filepath
# Normalize name on the file system if not normalized
# See https://jira.nuxeo.com/browse/NXDRIVE-188
if os.path.exists(filepath) and normalized_filepath != filepath:
log.debug('Forcing normalization of %r to %r', filepath, normalized_filepath)
os.rename(filepath, normalized_filepath)
self.root = root # the sync root folder local path
self.path = path # the truncated path (under the root)
self.folderish = folderish # True if a Folder
self.remote_ref = remote_ref
# Last OS modification date of the file
self.last_modification_time = last_modification_time
# Function to use
self._digest_func = digest_func.lower()
# Precompute base name once and for all are it's often useful in
# practice
self.name = os.path.basename(path)
def __repr__(self):
return self.__unicode__().encode('ascii', 'ignore')
def __unicode__(self):
return u"FileInfo[%s, remote_ref=%s]" % (self.filepath, self.remote_ref)
def get_digest(self, digest_func=None):
"""Lazy computation of the digest"""
if self.folderish:
return None
digest_func = digest_func if digest_func is not None else self._digest_func
digester = getattr(hashlib, digest_func, None)
if digester is None:
raise ValueError('Unknow digest method: ' + digest_func)
h = digester()
try:
with open(safe_long_path(self.filepath), 'rb') as f:
while True:
# Check if synchronization thread was suspended
if self.check_suspended is not None:
self.check_suspended('Digest computation: %s'
% self.filepath)
buffer_ = f.read(FILE_BUFFER_SIZE)
if buffer_ == '':
break
h.update(buffer_)
except IOError:
return UNACCESSIBLE_HASH
return h.hexdigest()
class LocalClient(BaseClient):
"""Client API implementation for the local file system"""
# TODO: initialize the prefixes and suffix with a dedicated Nuxeo
# Automation operations fetched at manager init time.
def __init__(self, base_folder, digest_func='md5', ignored_prefixes=None,
ignored_suffixes=None, check_suspended=None):
self._case_sensitive = None
# Function to check during long-running processing like digest
# computation if the synchronization thread needs to be suspended
self.check_suspended = check_suspended
if ignored_prefixes is not None:
self.ignored_prefixes = ignored_prefixes
else:
self.ignored_prefixes = DEFAULT_IGNORED_PREFIXES
if ignored_suffixes is not None:
self.ignored_suffixes = ignored_suffixes
else:
self.ignored_suffixes = DEFAULT_IGNORED_SUFFIXES
while len(base_folder) > 1 and base_folder.endswith(os.path.sep):
base_folder = base_folder[:-1]
self.base_folder = base_folder
self._digest_func = digest_func
def is_case_sensitive(self):
if self._case_sensitive is None:
path = os.tempnam(self.base_folder, '.caseTest_')
os.mkdir(path)
if os.path.exists(path.upper()):
self._case_sensitive = False
else:
self._case_sensitive = True
os.rmdir(path)
return self._case_sensitive
def is_temp_file(self, filename):
return (filename.startswith(DOWNLOAD_TMP_FILE_PREFIX) and
filename.endswith(DOWNLOAD_TMP_FILE_SUFFIX))
def set_readonly(self, ref):
path = self._abspath(ref)
self.set_path_readonly(path)
def unset_readonly(self, ref):
path = self._abspath(ref)
self.unset_path_readonly(path)
def clean_xattr_root(self):
self.unlock_ref(u'/', unlock_parent=False)
try:
self.remove_root_id()
except Exception as e:
pass
finally:
pass
self.clean_xattr_folder_recursive(u'/')
def clean_xattr_folder_recursive(self, path):
for child in self.get_children_info(path):
locker = self.unlock_ref(child.path, unlock_parent=False)
if child.remote_ref is not None:
self.remove_remote_id(child.path)
self.lock_ref(child.path, locker)
if child.folderish:
self.clean_xattr_folder_recursive(child.path)
def remove_root_id(self):
self.remove_remote_id('/', name='ndriveroot')
def set_root_id(self, value):
self.set_remote_id('/', value, name="ndriveroot")
def get_root_id(self):
return self.get_remote_id('/', name="ndriveroot")
def remove_remote_id(self, ref, name='ndrive'):
# Can be move to another class
path = self._abspath(ref)
log.trace('Removing xattr %s from %s', name, path)
locker = self.unlock_path(path, False)
if AbstractOSIntegration.is_windows():
pathAlt = path + ":" + name
try:
os.remove(pathAlt)
except WindowsError as e:
if e.errno == os.errno.EACCES:
self.unset_path_readonly(path)
os.remove(pathAlt)
self.set_path_readonly(path)
else:
raise e
finally:
self.lock_path(path, locker)
else:
try:
import xattr
if AbstractOSIntegration.is_mac():
xattr.removexattr(path, name)
else:
xattr.removexattr(path, 'user.' + name)
finally:
self.lock_path(path, locker)
def unset_folder_icon(self, ref):
'''
Unset the red icon
'''
if AbstractOSIntegration.is_windows():
# TODO Clean version
desktop_ini_file_path = os.path.join(self._abspath(ref), "desktop.ini")
if AbstractOSIntegration.is_mac():
desktop_ini_file_path = os.path.join(self._abspath(ref), "Icon\r")
if os.path.exists(desktop_ini_file_path):
os.remove(desktop_ini_file_path)
def has_folder_icon(self, ref):
target_folder = self._abspath(ref)
if AbstractOSIntegration.is_mac():
meta_file = os.path.join(target_folder, "Icon\r")
return os.path.exists(meta_file)
if AbstractOSIntegration.is_windows():
meta_file = os.path.join(target_folder, "desktop.ini")
return os.path.exists(meta_file)
return False
def set_folder_icon(self, ref, icon):
if AbstractOSIntegration.is_windows():
self.set_folder_icon_win32(ref, icon)
elif AbstractOSIntegration.is_mac():
self.set_folder_icon_darwin(ref, icon)
def set_folder_icon_win32(self, ref, icon):
import win32con
import win32api
'''
Configure red color icon for a folder Windows / Mac
'''
# Desktop.ini file content for Windows 7 and later.
ini_file_content = """
[.ShellClassInfo]
IconResource=icon_file_path,0
[ViewState]
Mode=
Vid=
FolderType=Generic
"""
# Desktop.ini file content for Windows XP.
ini_file_content_xp = """
[.ShellClassInfo]
IconFile=icon_file_path
IconIndex=0
"""
if AbstractOSIntegration.os_version_below("5.2"):
desktop_ini_content = ini_file_content_xp.replace("icon_file_path", icon)
else:
desktop_ini_content = ini_file_content.replace("icon_file_path", icon)
# Create the desktop.ini file inside the ReadOnly shared folder.
created_ini_file_path = os.path.join(self._abspath(ref), 'desktop.ini')
attrib_command_path = self._abspath(ref)
if not os.path.exists(created_ini_file_path):
try:
create_file = open(created_ini_file_path,'w')
create_file.write(desktop_ini_content)
create_file.close()
win32api.SetFileAttributes(created_ini_file_path, win32con.FILE_ATTRIBUTE_SYSTEM)
win32api.SetFileAttributes(created_ini_file_path, win32con.FILE_ATTRIBUTE_HIDDEN)
win32api.SetFileAttributes(attrib_command_path, win32con.FILE_ATTRIBUTE_SYSTEM)
except Exception as e:
log.error("Exception when setting folder icon : %r", e)
else:
win32api.SetFileAttributes(created_ini_file_path, win32con.FILE_ATTRIBUTE_SYSTEM)
win32api.SetFileAttributes(created_ini_file_path, win32con.FILE_ATTRIBUTE_HIDDEN)
win32api.SetFileAttributes(attrib_command_path, win32con.FILE_ATTRIBUTE_SYSTEM)
def _read_data(self, file_path):
'''The data file contains the mac icons'''
dat = open(file_path, 'rb')
info = dat.read()
dat.close()
return info
def _get_icon_xdata(self):
OSX_FINDER_INFO_ENTRY_SIZE = 32
OSX_FINDER_INFO_ICON_FLAG_INDEX = 8
OSX_FINDER_INFO_ICON_FLAG_VALUE = 4
result = (OSX_FINDER_INFO_ENTRY_SIZE)*[0]
result[OSX_FINDER_INFO_ICON_FLAG_INDEX] = OSX_FINDER_INFO_ICON_FLAG_VALUE
return result
def set_folder_icon_darwin(self, ref, icon):
''' Mac: Configure a folder with a given custom icon
1. Read the com.apple.ResourceFork extended attribute from the icon file
2. Set the com.apple.FinderInfo extended attribute with folder icon flag
3. Create a Icon file (name: Icon\r) inside the target folder
4. Set extended attributes com.apple.FinderInfo & com.apple.ResourceFork for icon file (name: Icon\r)
5. Hide the icon file (name: Icon\r)
'''
try:
import xattr
import stat
target_folder = self._abspath(ref)
# Generate the value for 'com.apple.FinderInfo'
has_icon_xdata = bytes(bytearray(self._get_icon_xdata()))
# Configure 'com.apple.FinderInfo' for the folder
xattr.setxattr(target_folder, xattr.XATTR_FINDERINFO_NAME, has_icon_xdata)
# Create the 'Icon\r' file
meta_file = os.path.join(target_folder, "Icon\r")
if os.path.exists(meta_file):
os.remove(meta_file)
open(meta_file, "w").close()
# Configure 'com.apple.FinderInfo' for the Icon file
xattr.setxattr(meta_file, xattr.XATTR_FINDERINFO_NAME, has_icon_xdata)
# Configure 'com.apple.ResourceFork' for the Icon file
info = self._read_data(icon)
xattr.setxattr(meta_file, xattr.XATTR_RESOURCEFORK_NAME, info)
os.chflags(meta_file, stat.UF_HIDDEN)
except Exception as e:
log.error("Exception when setting folder icon : %s", e)
def set_remote_id(self, ref, remote_id, name='ndrive'):
# Can be move to another class
path = self._abspath(ref)
log.trace('Setting xattr %s with value %r on %r', name, remote_id, path)
locker = self.unlock_path(path, False)
if AbstractOSIntegration.is_windows():
pathAlt = path + ":" + name
try:
if not os.path.exists(path):
raise NotFound()
stat = os.stat(path)
with open(pathAlt, "w") as f:
f.write(remote_id)
# Avoid time modified change
os.utime(path, (stat.st_atime, stat.st_mtime))
except IOError as e:
# Should not happen
if e.errno == os.errno.EACCES:
self.unset_path_readonly(path)
with open(pathAlt, "w") as f:
f.write(remote_id)
self.set_path_readonly(path)
else:
raise e
finally:
self.lock_path(path, locker)
else:
try:
import xattr
if type(remote_id).__name__ == "unicode":
remote_id = unicodedata.normalize('NFC', remote_id).encode('ascii','ignore')
if AbstractOSIntegration.is_mac():
xattr.setxattr(path, name, remote_id)
else:
xattr.setxattr(path, 'user.' + name, remote_id)
finally:
self.lock_path(path, locker)
def get_remote_id(self, ref, name="ndrive"):
# Can be move to another class
path = self._abspath(ref)
return LocalClient.get_path_remote_id(path, name)
@staticmethod
def get_path_remote_id(path, name="ndrive"):
if AbstractOSIntegration.is_windows():
path = path + ":" + name
try:
with open(path, "r") as f:
return f.read()
except:
return None
else:
import xattr
try:
if AbstractOSIntegration.is_mac():
value = xattr.getxattr(path, name)
else:
value = xattr.getxattr(path, 'user.' + name)
if type(value).__name__ == "unicode":
value = unicode(value)
return value
except:
return None
# Getters
def get_info(self, ref, raise_if_missing=True):
os_path = self._abspath(ref)
if not os.path.exists(os_path):
if raise_if_missing:
raise NotFound("Could not found file '%s' under '%s'" % (
ref, self.base_folder))
else:
return None
folderish = os.path.isdir(os_path)
stat_info = os.stat(os_path)
if folderish:
size = 0
else:
size = stat_info.st_size
mtime = datetime.utcfromtimestamp(stat_info.st_mtime)
path = u'/' + os_path[len(safe_long_path(self.base_folder)) + 1:]
path = path.replace(os.path.sep, u'/') # unix style path
# TODO Do we need to load it everytime ?
remote_ref = self.get_remote_id(ref)
# On unix we could use the inode for file move detection but that won't
# work on Windows. To reduce complexity of the code and the possibility
# to have Windows specific bugs, let's not use the unix inode at all.
# uid = str(stat_info.st_ino)
return FileInfo(self.base_folder, path, folderish, mtime,
digest_func=self._digest_func,
check_suspended=self.check_suspended,
remote_ref=remote_ref, size=size)
def is_equal_digests(self, local_digest, remote_digest, local_path, remote_digest_algorithm=None):
if local_digest == remote_digest:
return True
if remote_digest_algorithm is None:
remote_digest_algorithm = guess_digest_algorithm(remote_digest)
if remote_digest_algorithm == self._digest_func:
return False
else:
return self.get_info(local_path).get_digest(digest_func=remote_digest_algorithm) == remote_digest
def get_content(self, ref):
return open(self._abspath(ref), "rb").read()
def is_ignored(self, parent_ref, file_name):
# Add parent_ref to be able to filter on size if needed
ignore = False
# Office temp file
# http://support.microsoft.com/kb/211632
if file_name.startswith("~") and file_name.endswith(".tmp"):
return True
for suffix in self.ignored_suffixes:
if file_name.endswith(suffix):
ignore = True
break
for prefix in self.ignored_prefixes:
if file_name.startswith(prefix):
ignore = True
break
return ignore
def get_children_ref(self, parent_ref, name):
if parent_ref == u'/':
return parent_ref + name
else:
return parent_ref + u'/' + name
def get_children_info(self, ref):
os_path = self._abspath(ref)
result = []
children = os.listdir(os_path)
children.sort()
for child_name in children:
if not (self.is_ignored(ref, child_name) or self.is_temp_file(child_name)):
child_ref = self.get_children_ref(ref, child_name)
try:
result.append(self.get_info(child_ref))
except (OSError, NotFound):
# the child file has been deleted in the mean time or while
# reading some of its attributes
pass
return result
def get_parent_ref(self, ref):
if ref == '/':
return None
parent = ref.rsplit(u'/', 1)[0]
if parent is None:
parent = '/'
return parent
def unlock_ref(self, ref, unlock_parent=True):
path = self._abspath(ref)
return self.unlock_path(path, unlock_parent)
def lock_ref(self, ref, locker):
path = self._abspath(ref)
return self.lock_path(path, locker)
def make_folder(self, parent, name):
locker = self.unlock_ref(parent, False)
os_path, name = self._abspath_deduped(parent, name)
try:
os.mkdir(os_path)
if parent == u"/":
return u"/" + name
return parent + u"/" + name
finally:
self.lock_ref(parent, locker)
def duplicate_file(self, ref):
parent = os.path.dirname(ref)
name = os.path.basename(ref)
locker = self.unlock_ref(parent, False)
os_path, name = self._abspath_deduped(parent, name)
try:
shutil.copy(self._abspath(ref), os_path)
if parent == u"/":
return u"/" + name
return parent + u"/" + name
finally:
self.lock_ref(parent, locker)
def make_file(self, parent, name, content=None):
locker = self.unlock_ref(parent, False)
os_path, name = self._abspath_deduped(parent, name)
try:
with open(os_path, "wb") as f:
if content:
f.write(content)
if parent == u"/":
return u"/" + name
return parent + u"/" + name
finally:
self.lock_ref(parent, locker)
def get_new_file(self, parent, name):
os_path, name = self._abspath_deduped(parent, name)
if parent == u"/":
path = u"/" + name
else:
path = parent + u"/" + name
return path, os_path, name
def update_content(self, ref, content, xattr_names=['ndrive']):
xattrs = {}
for name in xattr_names:
xattrs[name] = self.get_remote_id(ref, name=name)
with open(self._abspath(ref), "wb") as f:
f.write(content)
for name in xattr_names:
if xattrs[name] is not None:
self.set_remote_id(ref, xattrs[name], name=name)
def delete(self, ref):
locker = self.unlock_ref(ref)
os_path = self._abspath(ref)
if not self.exists(ref):
return
# Remove the \\?\ for SHFileOperation on win
if os_path[:4] == '\\\\?\\':
# http://msdn.microsoft.com/en-us/library/cc249520.aspx
# SHFileOperation don't handle \\?\ paths
if len(os_path) > 260:
# Rename to the drive root
info = self.move(ref, '/')
new_ref = info.path
try:
send2trash(self._abspath(new_ref)[4:])
except:
log.debug('Cant use trash for ' + os_path
+ ', delete it')
self.delete_final(new_ref)
return
else:
os_path = os_path[4:]
log.trace('Send ' + os_path + ' to trash')
try:
send2trash(os_path)
except:
log.debug('Cant use trash for ' + os_path
+ ', delete it')
self.delete_final(ref)
finally:
# Dont want to unlock the current deleted
self.lock_ref(ref, locker & 2)
def delete_final(self, ref):
locker = 0
parent_ref = None
try:
if ref is not '/':
parent_ref = os.path.dirname(ref)
locker = self.unlock_ref(parent_ref, False)
self.unset_readonly(ref)
os_path = self._abspath(ref)
if os.path.isfile(os_path):
os.unlink(os_path)
elif os.path.isdir(os_path):
shutil.rmtree(os_path)
finally:
if parent_ref is not None:
self.lock_ref(parent_ref, locker)
def exists(self, ref):
os_path = self._abspath(ref)
return os.path.exists(os_path)
def check_writable(self, ref):
os_path = self._abspath(ref)
return os.access(os_path, os.W_OK)
def rename(self, ref, new_name):
"""Rename a local file or folder
Return the actualized info object.
"""
source_os_path = self._abspath(ref)
parent = ref.rsplit(u'/', 1)[0]
old_name = ref.rsplit(u'/', 1)[1]
parent = u'/' if parent == '' else parent
locker = self.unlock_ref(ref)
try:
# Check if only case renaming
if (old_name != new_name and old_name.lower() == new_name.lower()
and not self.is_case_sensitive()):
# Must use a temp rename as FS is not case sensitive
temp_path = os.tempnam(self._abspath(parent),
'.ren_' + old_name + '_')
if AbstractOSIntegration.is_windows():
import ctypes
ctypes.windll.kernel32.SetFileAttributesW(
unicode(temp_path), 2)
os.rename(source_os_path, temp_path)
source_os_path = temp_path
# Try the os rename part
target_os_path = self._abspath(os.path.join(parent, new_name))
else:
target_os_path, new_name = self._abspath_deduped(parent,
new_name, old_name)
if old_name != new_name:
os.rename(source_os_path, target_os_path)
if AbstractOSIntegration.is_windows():
import ctypes
# See http://msdn.microsoft.com/en-us/library/aa365535%28v=vs.85%29.aspx
ctypes.windll.kernel32.SetFileAttributesW(
unicode(target_os_path), 128)
new_ref = self.get_children_ref(parent, new_name)
return self.get_info(new_ref)
finally:
self.lock_ref(ref, locker & 2)
def move(self, ref, new_parent_ref, name=None):
"""Move a local file or folder into another folder
Return the actualized info object.
"""
if ref == u'/':
raise ValueError("Cannot move the toplevel folder.")
locker = self.unlock_ref(ref)
new_locker = self.unlock_ref(new_parent_ref, False)
source_os_path = self._abspath(ref)
name = name if name is not None else ref.rsplit(u'/', 1)[1]
target_os_path, new_name = self._abspath_deduped(new_parent_ref, name)
try:
shutil.move(source_os_path, target_os_path)
new_ref = self.get_children_ref(new_parent_ref, new_name)
return self.get_info(new_ref)
finally:
self.lock_ref(ref, locker & 2)
self.lock_ref(new_parent_ref, locker & 1 | new_locker)
def get_path(self, abspath):
"""Relative path to the local client from an absolute OS path"""
path = abspath.split(self.base_folder, 1)[1]
return path.replace(os.path.sep, '/')
def _abspath(self, ref):
"""Absolute path on the operating system"""
if not ref.startswith(u'/'):
raise ValueError("LocalClient expects ref starting with '/'")
path_suffix = ref[1:].replace('/', os.path.sep)
path = normalized_path(os.path.join(self.base_folder, path_suffix))
return safe_long_path(path)
def _abspath_safe(self, parent, orig_name):
"""Absolute path on the operating system with deduplicated names"""
# make name safe by removing invalid chars
name = safe_filename(orig_name)
# decompose the name into actionable components
name, suffix = os.path.splitext(name)
os_path = self._abspath(os.path.join(parent, name + suffix))
return os_path
def _abspath_deduped(self, parent, orig_name, old_name=None):
"""Absolute path on the operating system with deduplicated names"""
# make name safe by removing invalid chars
name = safe_filename(orig_name)
# decompose the name into actionable components
name, suffix = os.path.splitext(name)
for _ in range(1000):
os_path = self._abspath(os.path.join(parent, name + suffix))
if old_name == (name + suffix):
return os_path, name + suffix
if not os.path.exists(os_path):
return os_path, name + suffix
#raise ValueError("SHOULD NOT DUPLICATE NOW")
# the is a duplicated file, try to come with a new name
m = re.match(DEDUPED_BASENAME_PATTERN, name)
if m:
short_name, increment = m.groups()
name = u"%s__%d" % (short_name, int(increment) + 1)
else:
name = name + u'__1'
raise ValueError("Failed to de-duplicate '%s' under '%s'" % (
orig_name, parent))
NXDRIVE-190: Ignore Emacs autosave files
"""API to access local resources for synchronization."""
import unicodedata
from datetime import datetime
import hashlib
import os
import shutil
import re
from nxdrive.client.common import BaseClient, UNACCESSIBLE_HASH
from nxdrive.osi import AbstractOSIntegration
from nxdrive.client.base_automation_client import DOWNLOAD_TMP_FILE_PREFIX
from nxdrive.client.base_automation_client import DOWNLOAD_TMP_FILE_SUFFIX
from nxdrive.logging_config import get_logger
from nxdrive.client.common import safe_filename
from nxdrive.client.common import NotFound
from nxdrive.client.common import DEFAULT_IGNORED_PREFIXES
from nxdrive.client.common import DEFAULT_IGNORED_SUFFIXES
from nxdrive.utils import normalized_path
from nxdrive.utils import safe_long_path
from nxdrive.utils import guess_digest_algorithm
from nxdrive.client.common import FILE_BUFFER_SIZE
from send2trash import send2trash
log = get_logger(__name__)
DEDUPED_BASENAME_PATTERN = ur'^(.*)__(\d{1,3})$'
# Data transfer objects
class FileInfo(object):
"""Data Transfer Object for file info on the Local FS"""
def __init__(self, root, path, folderish, last_modification_time, size=0,
digest_func='md5', check_suspended=None, remote_ref=None):
# Function to check during long-running processing like digest
# computation if the synchronization thread needs to be suspended
self.check_suspended = check_suspended
self.size = size
filepath = os.path.join(root, path[1:].replace(u'/', os.path.sep))
root = unicodedata.normalize('NFC', root)
path = unicodedata.normalize('NFC', path)
normalized_filepath = os.path.join(root, path[1:].replace(u'/', os.path.sep))
self.filepath = normalized_filepath
# Normalize name on the file system if not normalized
# See https://jira.nuxeo.com/browse/NXDRIVE-188
if os.path.exists(filepath) and normalized_filepath != filepath:
log.debug('Forcing normalization of %r to %r', filepath, normalized_filepath)
os.rename(filepath, normalized_filepath)
self.root = root # the sync root folder local path
self.path = path # the truncated path (under the root)
self.folderish = folderish # True if a Folder
self.remote_ref = remote_ref
# Last OS modification date of the file
self.last_modification_time = last_modification_time
# Function to use
self._digest_func = digest_func.lower()
# Precompute base name once and for all are it's often useful in
# practice
self.name = os.path.basename(path)
def __repr__(self):
return self.__unicode__().encode('ascii', 'ignore')
def __unicode__(self):
return u"FileInfo[%s, remote_ref=%s]" % (self.filepath, self.remote_ref)
def get_digest(self, digest_func=None):
"""Lazy computation of the digest"""
if self.folderish:
return None
digest_func = digest_func if digest_func is not None else self._digest_func
digester = getattr(hashlib, digest_func, None)
if digester is None:
raise ValueError('Unknow digest method: ' + digest_func)
h = digester()
try:
with open(safe_long_path(self.filepath), 'rb') as f:
while True:
# Check if synchronization thread was suspended
if self.check_suspended is not None:
self.check_suspended('Digest computation: %s'
% self.filepath)
buffer_ = f.read(FILE_BUFFER_SIZE)
if buffer_ == '':
break
h.update(buffer_)
except IOError:
return UNACCESSIBLE_HASH
return h.hexdigest()
class LocalClient(BaseClient):
"""Client API implementation for the local file system"""
# TODO: initialize the prefixes and suffix with a dedicated Nuxeo
# Automation operations fetched at manager init time.
def __init__(self, base_folder, digest_func='md5', ignored_prefixes=None,
ignored_suffixes=None, check_suspended=None):
self._case_sensitive = None
# Function to check during long-running processing like digest
# computation if the synchronization thread needs to be suspended
self.check_suspended = check_suspended
if ignored_prefixes is not None:
self.ignored_prefixes = ignored_prefixes
else:
self.ignored_prefixes = DEFAULT_IGNORED_PREFIXES
if ignored_suffixes is not None:
self.ignored_suffixes = ignored_suffixes
else:
self.ignored_suffixes = DEFAULT_IGNORED_SUFFIXES
while len(base_folder) > 1 and base_folder.endswith(os.path.sep):
base_folder = base_folder[:-1]
self.base_folder = base_folder
self._digest_func = digest_func
def is_case_sensitive(self):
if self._case_sensitive is None:
path = os.tempnam(self.base_folder, '.caseTest_')
os.mkdir(path)
if os.path.exists(path.upper()):
self._case_sensitive = False
else:
self._case_sensitive = True
os.rmdir(path)
return self._case_sensitive
def is_temp_file(self, filename):
return (filename.startswith(DOWNLOAD_TMP_FILE_PREFIX) and
filename.endswith(DOWNLOAD_TMP_FILE_SUFFIX))
def set_readonly(self, ref):
path = self._abspath(ref)
self.set_path_readonly(path)
def unset_readonly(self, ref):
path = self._abspath(ref)
self.unset_path_readonly(path)
def clean_xattr_root(self):
self.unlock_ref(u'/', unlock_parent=False)
try:
self.remove_root_id()
except Exception as e:
pass
finally:
pass
self.clean_xattr_folder_recursive(u'/')
def clean_xattr_folder_recursive(self, path):
for child in self.get_children_info(path):
locker = self.unlock_ref(child.path, unlock_parent=False)
if child.remote_ref is not None:
self.remove_remote_id(child.path)
self.lock_ref(child.path, locker)
if child.folderish:
self.clean_xattr_folder_recursive(child.path)
def remove_root_id(self):
self.remove_remote_id('/', name='ndriveroot')
def set_root_id(self, value):
self.set_remote_id('/', value, name="ndriveroot")
def get_root_id(self):
return self.get_remote_id('/', name="ndriveroot")
def remove_remote_id(self, ref, name='ndrive'):
# Can be move to another class
path = self._abspath(ref)
log.trace('Removing xattr %s from %s', name, path)
locker = self.unlock_path(path, False)
if AbstractOSIntegration.is_windows():
pathAlt = path + ":" + name
try:
os.remove(pathAlt)
except WindowsError as e:
if e.errno == os.errno.EACCES:
self.unset_path_readonly(path)
os.remove(pathAlt)
self.set_path_readonly(path)
else:
raise e
finally:
self.lock_path(path, locker)
else:
try:
import xattr
if AbstractOSIntegration.is_mac():
xattr.removexattr(path, name)
else:
xattr.removexattr(path, 'user.' + name)
finally:
self.lock_path(path, locker)
def unset_folder_icon(self, ref):
'''
Unset the red icon
'''
if AbstractOSIntegration.is_windows():
# TODO Clean version
desktop_ini_file_path = os.path.join(self._abspath(ref), "desktop.ini")
if AbstractOSIntegration.is_mac():
desktop_ini_file_path = os.path.join(self._abspath(ref), "Icon\r")
if os.path.exists(desktop_ini_file_path):
os.remove(desktop_ini_file_path)
def has_folder_icon(self, ref):
target_folder = self._abspath(ref)
if AbstractOSIntegration.is_mac():
meta_file = os.path.join(target_folder, "Icon\r")
return os.path.exists(meta_file)
if AbstractOSIntegration.is_windows():
meta_file = os.path.join(target_folder, "desktop.ini")
return os.path.exists(meta_file)
return False
def set_folder_icon(self, ref, icon):
if AbstractOSIntegration.is_windows():
self.set_folder_icon_win32(ref, icon)
elif AbstractOSIntegration.is_mac():
self.set_folder_icon_darwin(ref, icon)
def set_folder_icon_win32(self, ref, icon):
import win32con
import win32api
'''
Configure red color icon for a folder Windows / Mac
'''
# Desktop.ini file content for Windows 7 and later.
ini_file_content = """
[.ShellClassInfo]
IconResource=icon_file_path,0
[ViewState]
Mode=
Vid=
FolderType=Generic
"""
# Desktop.ini file content for Windows XP.
ini_file_content_xp = """
[.ShellClassInfo]
IconFile=icon_file_path
IconIndex=0
"""
if AbstractOSIntegration.os_version_below("5.2"):
desktop_ini_content = ini_file_content_xp.replace("icon_file_path", icon)
else:
desktop_ini_content = ini_file_content.replace("icon_file_path", icon)
# Create the desktop.ini file inside the ReadOnly shared folder.
created_ini_file_path = os.path.join(self._abspath(ref), 'desktop.ini')
attrib_command_path = self._abspath(ref)
if not os.path.exists(created_ini_file_path):
try:
create_file = open(created_ini_file_path,'w')
create_file.write(desktop_ini_content)
create_file.close()
win32api.SetFileAttributes(created_ini_file_path, win32con.FILE_ATTRIBUTE_SYSTEM)
win32api.SetFileAttributes(created_ini_file_path, win32con.FILE_ATTRIBUTE_HIDDEN)
win32api.SetFileAttributes(attrib_command_path, win32con.FILE_ATTRIBUTE_SYSTEM)
except Exception as e:
log.error("Exception when setting folder icon : %r", e)
else:
win32api.SetFileAttributes(created_ini_file_path, win32con.FILE_ATTRIBUTE_SYSTEM)
win32api.SetFileAttributes(created_ini_file_path, win32con.FILE_ATTRIBUTE_HIDDEN)
win32api.SetFileAttributes(attrib_command_path, win32con.FILE_ATTRIBUTE_SYSTEM)
def _read_data(self, file_path):
'''The data file contains the mac icons'''
dat = open(file_path, 'rb')
info = dat.read()
dat.close()
return info
def _get_icon_xdata(self):
OSX_FINDER_INFO_ENTRY_SIZE = 32
OSX_FINDER_INFO_ICON_FLAG_INDEX = 8
OSX_FINDER_INFO_ICON_FLAG_VALUE = 4
result = (OSX_FINDER_INFO_ENTRY_SIZE)*[0]
result[OSX_FINDER_INFO_ICON_FLAG_INDEX] = OSX_FINDER_INFO_ICON_FLAG_VALUE
return result
def set_folder_icon_darwin(self, ref, icon):
''' Mac: Configure a folder with a given custom icon
1. Read the com.apple.ResourceFork extended attribute from the icon file
2. Set the com.apple.FinderInfo extended attribute with folder icon flag
3. Create a Icon file (name: Icon\r) inside the target folder
4. Set extended attributes com.apple.FinderInfo & com.apple.ResourceFork for icon file (name: Icon\r)
5. Hide the icon file (name: Icon\r)
'''
try:
import xattr
import stat
target_folder = self._abspath(ref)
# Generate the value for 'com.apple.FinderInfo'
has_icon_xdata = bytes(bytearray(self._get_icon_xdata()))
# Configure 'com.apple.FinderInfo' for the folder
xattr.setxattr(target_folder, xattr.XATTR_FINDERINFO_NAME, has_icon_xdata)
# Create the 'Icon\r' file
meta_file = os.path.join(target_folder, "Icon\r")
if os.path.exists(meta_file):
os.remove(meta_file)
open(meta_file, "w").close()
# Configure 'com.apple.FinderInfo' for the Icon file
xattr.setxattr(meta_file, xattr.XATTR_FINDERINFO_NAME, has_icon_xdata)
# Configure 'com.apple.ResourceFork' for the Icon file
info = self._read_data(icon)
xattr.setxattr(meta_file, xattr.XATTR_RESOURCEFORK_NAME, info)
os.chflags(meta_file, stat.UF_HIDDEN)
except Exception as e:
log.error("Exception when setting folder icon : %s", e)
def set_remote_id(self, ref, remote_id, name='ndrive'):
# Can be move to another class
path = self._abspath(ref)
log.trace('Setting xattr %s with value %r on %r', name, remote_id, path)
locker = self.unlock_path(path, False)
if AbstractOSIntegration.is_windows():
pathAlt = path + ":" + name
try:
if not os.path.exists(path):
raise NotFound()
stat = os.stat(path)
with open(pathAlt, "w") as f:
f.write(remote_id)
# Avoid time modified change
os.utime(path, (stat.st_atime, stat.st_mtime))
except IOError as e:
# Should not happen
if e.errno == os.errno.EACCES:
self.unset_path_readonly(path)
with open(pathAlt, "w") as f:
f.write(remote_id)
self.set_path_readonly(path)
else:
raise e
finally:
self.lock_path(path, locker)
else:
try:
import xattr
if type(remote_id).__name__ == "unicode":
remote_id = unicodedata.normalize('NFC', remote_id).encode('ascii','ignore')
if AbstractOSIntegration.is_mac():
xattr.setxattr(path, name, remote_id)
else:
xattr.setxattr(path, 'user.' + name, remote_id)
finally:
self.lock_path(path, locker)
def get_remote_id(self, ref, name="ndrive"):
# Can be move to another class
path = self._abspath(ref)
return LocalClient.get_path_remote_id(path, name)
@staticmethod
def get_path_remote_id(path, name="ndrive"):
if AbstractOSIntegration.is_windows():
path = path + ":" + name
try:
with open(path, "r") as f:
return f.read()
except:
return None
else:
import xattr
try:
if AbstractOSIntegration.is_mac():
value = xattr.getxattr(path, name)
else:
value = xattr.getxattr(path, 'user.' + name)
if type(value).__name__ == "unicode":
value = unicode(value)
return value
except:
return None
# Getters
def get_info(self, ref, raise_if_missing=True):
os_path = self._abspath(ref)
if not os.path.exists(os_path):
if raise_if_missing:
raise NotFound("Could not found file '%s' under '%s'" % (
ref, self.base_folder))
else:
return None
folderish = os.path.isdir(os_path)
stat_info = os.stat(os_path)
if folderish:
size = 0
else:
size = stat_info.st_size
mtime = datetime.utcfromtimestamp(stat_info.st_mtime)
path = u'/' + os_path[len(safe_long_path(self.base_folder)) + 1:]
path = path.replace(os.path.sep, u'/') # unix style path
# TODO Do we need to load it everytime ?
remote_ref = self.get_remote_id(ref)
# On unix we could use the inode for file move detection but that won't
# work on Windows. To reduce complexity of the code and the possibility
# to have Windows specific bugs, let's not use the unix inode at all.
# uid = str(stat_info.st_ino)
return FileInfo(self.base_folder, path, folderish, mtime,
digest_func=self._digest_func,
check_suspended=self.check_suspended,
remote_ref=remote_ref, size=size)
def is_equal_digests(self, local_digest, remote_digest, local_path, remote_digest_algorithm=None):
if local_digest == remote_digest:
return True
if remote_digest_algorithm is None:
remote_digest_algorithm = guess_digest_algorithm(remote_digest)
if remote_digest_algorithm == self._digest_func:
return False
else:
return self.get_info(local_path).get_digest(digest_func=remote_digest_algorithm) == remote_digest
def get_content(self, ref):
return open(self._abspath(ref), "rb").read()
def is_ignored(self, parent_ref, file_name):
# Add parent_ref to be able to filter on size if needed
ignore = False
# Office temp file
# http://support.microsoft.com/kb/211632
if file_name.startswith("~") and file_name.endswith(".tmp"):
return True
# Emacs auto save file
# http://www.emacswiki.org/emacs/AutoSave
if file_name.startswith("#") and file_name.endswith("#") and len(file_name) > 2:
return True
for suffix in self.ignored_suffixes:
if file_name.endswith(suffix):
ignore = True
break
for prefix in self.ignored_prefixes:
if file_name.startswith(prefix):
ignore = True
break
return ignore
def get_children_ref(self, parent_ref, name):
if parent_ref == u'/':
return parent_ref + name
else:
return parent_ref + u'/' + name
def get_children_info(self, ref):
os_path = self._abspath(ref)
result = []
children = os.listdir(os_path)
children.sort()
for child_name in children:
if not (self.is_ignored(ref, child_name) or self.is_temp_file(child_name)):
child_ref = self.get_children_ref(ref, child_name)
try:
result.append(self.get_info(child_ref))
except (OSError, NotFound):
# the child file has been deleted in the mean time or while
# reading some of its attributes
pass
return result
def get_parent_ref(self, ref):
if ref == '/':
return None
parent = ref.rsplit(u'/', 1)[0]
if parent is None:
parent = '/'
return parent
def unlock_ref(self, ref, unlock_parent=True):
path = self._abspath(ref)
return self.unlock_path(path, unlock_parent)
def lock_ref(self, ref, locker):
path = self._abspath(ref)
return self.lock_path(path, locker)
def make_folder(self, parent, name):
locker = self.unlock_ref(parent, False)
os_path, name = self._abspath_deduped(parent, name)
try:
os.mkdir(os_path)
if parent == u"/":
return u"/" + name
return parent + u"/" + name
finally:
self.lock_ref(parent, locker)
def duplicate_file(self, ref):
parent = os.path.dirname(ref)
name = os.path.basename(ref)
locker = self.unlock_ref(parent, False)
os_path, name = self._abspath_deduped(parent, name)
try:
shutil.copy(self._abspath(ref), os_path)
if parent == u"/":
return u"/" + name
return parent + u"/" + name
finally:
self.lock_ref(parent, locker)
def make_file(self, parent, name, content=None):
locker = self.unlock_ref(parent, False)
os_path, name = self._abspath_deduped(parent, name)
try:
with open(os_path, "wb") as f:
if content:
f.write(content)
if parent == u"/":
return u"/" + name
return parent + u"/" + name
finally:
self.lock_ref(parent, locker)
def get_new_file(self, parent, name):
os_path, name = self._abspath_deduped(parent, name)
if parent == u"/":
path = u"/" + name
else:
path = parent + u"/" + name
return path, os_path, name
def update_content(self, ref, content, xattr_names=['ndrive']):
xattrs = {}
for name in xattr_names:
xattrs[name] = self.get_remote_id(ref, name=name)
with open(self._abspath(ref), "wb") as f:
f.write(content)
for name in xattr_names:
if xattrs[name] is not None:
self.set_remote_id(ref, xattrs[name], name=name)
def delete(self, ref):
locker = self.unlock_ref(ref)
os_path = self._abspath(ref)
if not self.exists(ref):
return
# Remove the \\?\ for SHFileOperation on win
if os_path[:4] == '\\\\?\\':
# http://msdn.microsoft.com/en-us/library/cc249520.aspx
# SHFileOperation don't handle \\?\ paths
if len(os_path) > 260:
# Rename to the drive root
info = self.move(ref, '/')
new_ref = info.path
try:
send2trash(self._abspath(new_ref)[4:])
except:
log.debug('Cant use trash for ' + os_path
+ ', delete it')
self.delete_final(new_ref)
return
else:
os_path = os_path[4:]
log.trace('Send ' + os_path + ' to trash')
try:
send2trash(os_path)
except:
log.debug('Cant use trash for ' + os_path
+ ', delete it')
self.delete_final(ref)
finally:
# Dont want to unlock the current deleted
self.lock_ref(ref, locker & 2)
def delete_final(self, ref):
locker = 0
parent_ref = None
try:
if ref is not '/':
parent_ref = os.path.dirname(ref)
locker = self.unlock_ref(parent_ref, False)
self.unset_readonly(ref)
os_path = self._abspath(ref)
if os.path.isfile(os_path):
os.unlink(os_path)
elif os.path.isdir(os_path):
shutil.rmtree(os_path)
finally:
if parent_ref is not None:
self.lock_ref(parent_ref, locker)
def exists(self, ref):
os_path = self._abspath(ref)
return os.path.exists(os_path)
def check_writable(self, ref):
os_path = self._abspath(ref)
return os.access(os_path, os.W_OK)
def rename(self, ref, new_name):
"""Rename a local file or folder
Return the actualized info object.
"""
source_os_path = self._abspath(ref)
parent = ref.rsplit(u'/', 1)[0]
old_name = ref.rsplit(u'/', 1)[1]
parent = u'/' if parent == '' else parent
locker = self.unlock_ref(ref)
try:
# Check if only case renaming
if (old_name != new_name and old_name.lower() == new_name.lower()
and not self.is_case_sensitive()):
# Must use a temp rename as FS is not case sensitive
temp_path = os.tempnam(self._abspath(parent),
'.ren_' + old_name + '_')
if AbstractOSIntegration.is_windows():
import ctypes
ctypes.windll.kernel32.SetFileAttributesW(
unicode(temp_path), 2)
os.rename(source_os_path, temp_path)
source_os_path = temp_path
# Try the os rename part
target_os_path = self._abspath(os.path.join(parent, new_name))
else:
target_os_path, new_name = self._abspath_deduped(parent,
new_name, old_name)
if old_name != new_name:
os.rename(source_os_path, target_os_path)
if AbstractOSIntegration.is_windows():
import ctypes
# See http://msdn.microsoft.com/en-us/library/aa365535%28v=vs.85%29.aspx
ctypes.windll.kernel32.SetFileAttributesW(
unicode(target_os_path), 128)
new_ref = self.get_children_ref(parent, new_name)
return self.get_info(new_ref)
finally:
self.lock_ref(ref, locker & 2)
def move(self, ref, new_parent_ref, name=None):
"""Move a local file or folder into another folder
Return the actualized info object.
"""
if ref == u'/':
raise ValueError("Cannot move the toplevel folder.")
locker = self.unlock_ref(ref)
new_locker = self.unlock_ref(new_parent_ref, False)
source_os_path = self._abspath(ref)
name = name if name is not None else ref.rsplit(u'/', 1)[1]
target_os_path, new_name = self._abspath_deduped(new_parent_ref, name)
try:
shutil.move(source_os_path, target_os_path)
new_ref = self.get_children_ref(new_parent_ref, new_name)
return self.get_info(new_ref)
finally:
self.lock_ref(ref, locker & 2)
self.lock_ref(new_parent_ref, locker & 1 | new_locker)
def get_path(self, abspath):
"""Relative path to the local client from an absolute OS path"""
path = abspath.split(self.base_folder, 1)[1]
return path.replace(os.path.sep, '/')
def _abspath(self, ref):
"""Absolute path on the operating system"""
if not ref.startswith(u'/'):
raise ValueError("LocalClient expects ref starting with '/'")
path_suffix = ref[1:].replace('/', os.path.sep)
path = normalized_path(os.path.join(self.base_folder, path_suffix))
return safe_long_path(path)
def _abspath_safe(self, parent, orig_name):
"""Absolute path on the operating system with deduplicated names"""
# make name safe by removing invalid chars
name = safe_filename(orig_name)
# decompose the name into actionable components
name, suffix = os.path.splitext(name)
os_path = self._abspath(os.path.join(parent, name + suffix))
return os_path
def _abspath_deduped(self, parent, orig_name, old_name=None):
"""Absolute path on the operating system with deduplicated names"""
# make name safe by removing invalid chars
name = safe_filename(orig_name)
# decompose the name into actionable components
name, suffix = os.path.splitext(name)
for _ in range(1000):
os_path = self._abspath(os.path.join(parent, name + suffix))
if old_name == (name + suffix):
return os_path, name + suffix
if not os.path.exists(os_path):
return os_path, name + suffix
#raise ValueError("SHOULD NOT DUPLICATE NOW")
# the is a duplicated file, try to come with a new name
m = re.match(DEDUPED_BASENAME_PATTERN, name)
if m:
short_name, increment = m.groups()
name = u"%s__%d" % (short_name, int(increment) + 1)
else:
name = name + u'__1'
raise ValueError("Failed to de-duplicate '%s' under '%s'" % (
orig_name, parent))
|
#!/usr/bin/env python3
# Copyright 2022 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Provides helpers for writing shell-like scripts in Python.
It provides tools to execute commands with similar flexibility to shell scripts and simplifies
command line arguments using `argh` and provides common flags (e.g. -v and -vv) for all of
our command line tools.
Refer to the scripts in ./tools for example usage.
"""
from __future__ import annotations
import functools
import json
import sys
import subprocess
if sys.version_info.major != 3 or sys.version_info.minor < 8:
print("Python 3.8 or higher is required.")
sys.exit(1)
def ensure_package_exists(package: str):
"""Installs the specified package via pip if it does not exist."""
try:
__import__(package)
except ImportError:
print(
f"Missing the python package {package}. Do you want to install? [y/N] ",
end="",
flush=True,
)
response = sys.stdin.readline()
if response[:1].lower() == "y":
subprocess.check_call([sys.executable, "-m", "pip", "install", "--user", package])
else:
sys.exit(1)
ensure_package_exists("argh")
from io import StringIO
from math import ceil
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import DEVNULL, PIPE, STDOUT # type: ignore
from tempfile import gettempdir
from typing import Any, Callable, Dict, Iterable, List, NamedTuple, Optional, TypeVar, Union, cast
import argh # type: ignore
import argparse
import contextlib
import csv
import getpass
import os
import re
import shutil
import traceback
"Root directory of crosvm"
CROSVM_ROOT = Path(__file__).parent.parent.parent.resolve()
"Cargo.toml file of crosvm"
CROSVM_TOML = CROSVM_ROOT / "Cargo.toml"
"Url of crosvm's gerrit review host"
GERRIT_URL = "https://chromium-review.googlesource.com"
# Ensure that we really found the crosvm root directory
assert 'name = "crosvm"' in CROSVM_TOML.read_text()
# File where to store http headers for gcloud authentication
AUTH_HEADERS_FILE = Path(gettempdir()) / f"crosvm_gcloud_auth_headers_{getpass.getuser()}"
PathLike = Union[Path, str]
class CommandResult(NamedTuple):
"""Results of a command execution as returned by Command.run()"""
stdout: str
stderr: str
returncode: int
class Command(object):
"""
Simplified subprocess handling for shell-like scripts.
## Arguments
Arguments are provided as a list similar to subprocess.run():
>>> Command('cargo', 'build', '--workspace')
Command('cargo', 'build', '--workspace')
In contrast to subprocess.run, all strings are split by whitespaces similar to bash:
>>> Command('cargo build --workspace', '--features foo')
Command('cargo', 'build', '--workspace', '--features', 'foo')
In contrast to bash, globs are *not* evaluated, but can easily be provided using Path:
>>> Command('ls -l', *Path('.').glob('*.toml'))
Command('ls', '-l', ...)
None or False are ignored to make it easy to include conditional arguments:
>>> all = False
>>> Command('cargo build', '--workspace' if all else None)
Command('cargo', 'build')
Commands can be nested, similar to $() subshells in bash. The sub-commands will be executed
right away and their output will undergo the usual splitting:
>>> Command('printf "(%s)"', Command('echo foo bar')).stdout()
'(foo)(bar)'
Arguments can be explicitly quoted to prevent splitting, it applies to both sub-commands
as well as strings:
>>> Command('printf "(%s)"', quoted(Command('echo foo bar'))).stdout()
'(foo bar)'
Commands can also be piped into one another:
>>> wc = Command('wc')
>>> Command('echo "abcd"').pipe(wc('-c')).stdout()
'5'
Programs will be looked up in PATH or absolute paths to programs can be supplied as well:
>>> Command('/usr/bin/env').executable
PosixPath('/usr/bin/env')
## Executing
Once built, commands can be executed using `Command.fg()`, to run the command in the
foreground, visible to the user, or `Command.stdout()` to capture the stdout.
By default, any non-zero exit code will trigger an Exception and stderr is always directed to
the user.
More complex use-cases are supported with the `Command.run()` or `Command.stream()` methods.
A Command instance can also be passed to the subprocess.run() for any use-cases unsupported by
this API.
"""
def __init__(
self,
*args: Any,
stdin_cmd: Optional[Command] = None,
env_vars: Dict[str, str] = {},
):
self.args = Command.__parse_cmd(args)
self.stdin_cmd = stdin_cmd
self.env_vars = env_vars
if len(self.args) > 0:
executable = self.args[0]
if Path(executable).exists():
self.executable = Path(executable)
else:
path = shutil.which(executable)
if not path:
raise ValueError(f'Required program "{executable}" cannot be found in PATH.')
elif very_verbose():
print(f"Using {executable}: {path}")
self.executable = Path(path)
### High level execution API
def fg(
self,
quiet: bool = False,
check: bool = True,
) -> int:
"""
Runs a program in the foreground with output streamed to the user.
>>> Command('true').fg()
0
Non-zero exit codes will trigger an Exception
>>> Command('false').fg()
Traceback (most recent call last):
...
subprocess.CalledProcessError: Command 'false' returned non-zero exit status 1.
But can be disabled:
>>> Command('false').fg(check=False)
1
Arguments:
quiet: Do not show stdout unless the program failed.
check: Raise an exception if the program returned an error code.
Returns: The return code of the program.
"""
self.__debug_print()
if quiet:
result = subprocess.run(
self.args,
stdout=PIPE,
stderr=STDOUT,
stdin=self.__stdin_stream(),
env={**os.environ, **self.env_vars},
text=True,
)
else:
result = subprocess.run(
self.args,
stdin=self.__stdin_stream(),
env={**os.environ, **self.env_vars},
text=True,
)
if result.returncode != 0:
if quiet and check and result.stdout:
print(result.stdout)
if check:
raise subprocess.CalledProcessError(result.returncode, str(self), result.stdout)
return result.returncode
def success(self):
return self.fg(check=False, quiet=True) == 0
def stdout(self, check: bool = True):
"""
Runs a program and returns stdout. Stderr is still directed to the user.
"""
return self.run(stderr=None, check=check).stdout.strip()
def lines(self):
"""
Runs a program and returns stdout line by line. Stderr is still directed to the user.
"""
return self.stdout().splitlines()
def write_to(self, filename: Path):
"""
Writes all program output (stdout and stderr) to the provided file.
"""
with open(filename, "w") as file:
file.write(self.run(stderr=STDOUT).stdout)
def append_to(self, filename: Path):
"""
Appends all program output (stdout and stderr) to the provided file.
"""
with open(filename, "a") as file:
file.write(self.run(stderr=STDOUT).stdout)
def pipe(self, *args: Any):
"""
Pipes the output of this command into another process.
The target can either be another Command or the argument list to build a new command.
"""
if len(args) == 1 and isinstance(args[0], Command):
cmd = Command(stdin_cmd=self)
cmd.args = args[0].args
cmd.env_vars = self.env_vars.copy()
return cmd
else:
return Command(*args, stdin_cmd=self, env_vars=self.env_vars)
### Lower level execution API
def run(self, check: bool = True, stderr: Optional[int] = PIPE) -> CommandResult:
"""
Runs a program with stdout, stderr and error code returned.
>>> Command('echo', 'Foo').run()
CommandResult(stdout='Foo\\n', stderr='', returncode=0)
Non-zero exit codes will trigger an Exception by default.
Arguments:
check: Raise an exception if the program returned an error code.
Returns: CommandResult(stdout, stderr, returncode)
"""
self.__debug_print()
result = subprocess.run(
self.args,
stdout=subprocess.PIPE,
stderr=stderr,
stdin=self.__stdin_stream(),
env={**os.environ, **self.env_vars},
check=check,
text=True,
)
return CommandResult(result.stdout, result.stderr, result.returncode)
def stream(self, stderr: Optional[int] = PIPE) -> subprocess.Popen[str]:
"""
Runs a program and returns the Popen object of the running process.
"""
self.__debug_print()
return subprocess.Popen(
self.args,
stdout=subprocess.PIPE,
stderr=stderr,
stdin=self.__stdin_stream(),
env={**os.environ, **self.env_vars},
text=True,
)
def env(self, key: str, value: str):
cmd = Command()
cmd.args = self.args
cmd.env_vars = {**self.env_vars, key: value}
return cmd
def add_path(self, new_path: str):
path_var = self.env_vars.get("PATH", os.environ.get("PATH", ""))
cmd = Command()
cmd.args = self.args
cmd.env_vars = {**self.env_vars, "PATH": f"{path_var}:{new_path}"}
return cmd
def foreach(self, arguments: Iterable[Any], batch_size: int = 1):
"""
Yields a new command for each entry in `arguments`.
The argument is appended to each command and is intended to be used in
conjunction with `parallel()` to execute a command on a list of arguments in
parallel.
>>> parallel(*cmd('echo').foreach((1, 2, 3))).stdout()
['1', '2', '3']
Arguments can also be batched by setting batch_size > 1, which will append multiple
arguments to each command.
>>> parallel(*cmd('echo').foreach((1, 2, 3), batch_size=2)).stdout()
['1 2', '3']
"""
for batch in batched(arguments, batch_size):
yield self(*batch)
def __call__(self, *args: Any):
"""Returns a new Command with added arguments.
>>> cargo = Command('cargo')
>>> cargo('clippy')
Command('cargo', 'clippy')
"""
cmd = Command()
cmd.args = [*self.args, *Command.__parse_cmd(args)]
cmd.env_vars = self.env_vars
return cmd
def __iter__(self):
"""Allows a `Command` to be treated like a list of arguments for subprocess.run()."""
return iter(self.args)
def __str__(self):
def fmt_arg(arg: str):
# Quote arguments containing spaces.
if re.search(r"\s", arg):
return f'"{arg}"'
return arg
stdin = ""
if self.stdin_cmd:
stdin = str(self.stdin_cmd) + " | "
return stdin + " ".join(fmt_arg(a) for a in self.args)
def __repr__(self):
stdin = ""
if self.stdin_cmd:
stdin = ", stdin_cmd=" + repr(self.stdin_cmd)
return f"Command({', '.join(repr(a) for a in self.args)}{stdin})"
### Private utilities
def __stdin_stream(self):
if self.stdin_cmd:
return self.stdin_cmd.stream().stdout
return None
def __debug_print(self):
if verbose():
print("$", repr(self) if very_verbose() else str(self))
@staticmethod
def __shell_like_split(value: str):
"""Splits a string by spaces, accounting for escape characters and quoting."""
# Re-use csv parses to split by spaces and new lines, while accounting for quoting.
for line in csv.reader(StringIO(value), delimiter=" ", quotechar='"'):
for arg in line:
if arg:
yield arg
@staticmethod
def __parse_cmd(args: Iterable[Any]) -> List[str]:
"""Parses command line arguments for Command."""
res = [parsed for arg in args for parsed in Command.__parse_cmd_args(arg)]
return res
@staticmethod
def __parse_cmd_args(arg: Any) -> List[str]:
"""Parses a mixed type command line argument into a list of strings."""
if isinstance(arg, Path):
return [str(arg)]
elif isinstance(arg, QuotedString):
return [arg.value]
elif isinstance(arg, Command):
return [*Command.__shell_like_split(arg.stdout())]
elif arg is None or arg is False:
return []
else:
return [*Command.__shell_like_split(str(arg))]
class ParallelCommands(object):
"""
Allows commands to be run in parallel.
>>> parallel(cmd('true'), cmd('false')).fg(check=False)
[0, 1]
>>> parallel(cmd('echo a'), cmd('echo b')).stdout()
['a', 'b']
"""
def __init__(self, *commands: Command):
self.commands = commands
def fg(self, quiet: bool = True, check: bool = True):
with ThreadPool(os.cpu_count()) as pool:
return pool.map(lambda command: command.fg(quiet=quiet, check=check), self.commands)
def stdout(self):
with ThreadPool(os.cpu_count()) as pool:
return pool.map(lambda command: command.stdout(), self.commands)
def success(self):
results = self.fg(check=False, quiet=False)
print(results)
return all(result == 0 for result in results)
@contextlib.contextmanager
def cwd_context(path: PathLike):
"""Context for temporarily changing the cwd.
>>> with cwd('/tmp'):
... os.getcwd()
'/tmp'
"""
cwd = os.getcwd()
try:
chdir(path)
yield
finally:
chdir(cwd)
def chdir(path: PathLike):
if very_verbose():
print("cd", path)
os.chdir(path)
class QuotedString(object):
"""
Prevents the provided string from being split.
Commands will be executed and their stdout is quoted.
"""
def __init__(self, value: Any):
if isinstance(value, Command):
self.value = value.stdout()
else:
self.value = str(value)
def __str__(self):
return f'"{self.value}"'
T = TypeVar("T")
def batched(source: Iterable[T], max_batch_size: int) -> Iterable[List[T]]:
"""
Returns an iterator over batches of elements from source_list.
>>> list(batched([1, 2, 3, 4, 5], 2))
[[1, 2], [3, 4], [5]]
"""
source_list = list(source)
# Calculate batch size that spreads elements evenly across all batches
batch_count = ceil(len(source_list) / max_batch_size)
batch_size = ceil(len(source_list) / batch_count)
for index in range(0, len(source_list), batch_size):
yield source_list[index : min(index + batch_size, len(source_list))]
# Shorthands
quoted = QuotedString
cmd = Command
cwd = cwd_context
parallel = ParallelCommands
def run_main(main_fn: Callable[..., Any]):
run_commands(default_fn=main_fn)
def run_commands(
*functions: Callable[..., Any],
default_fn: Optional[Callable[..., Any]] = None,
usage: Optional[str] = None,
):
"""
Allow the user to call the provided functions with command line arguments translated to
function arguments via argh: https://pythonhosted.org/argh
"""
try:
# Add global verbose arguments
parser = argparse.ArgumentParser(usage=usage)
add_verbose_args(parser)
# Add provided commands to parser. Do not use sub-commands if we just got one function.
if functions:
argh.add_commands(parser, functions) # type: ignore
if default_fn:
argh.set_default_command(parser, default_fn) # type: ignore
# Call main method
argh.dispatch(parser) # type: ignore
except Exception as e:
if verbose():
traceback.print_exc()
else:
print(e)
sys.exit(1)
def verbose():
return very_verbose() or "-v" in sys.argv or "--verbose" in sys.argv
def very_verbose():
return "-vv" in sys.argv or "--very-verbose" in sys.argv
def add_verbose_args(parser: argparse.ArgumentParser):
# This just serves as documentation to argparse. The verbose variables are directly
# parsed from argv above to ensure they are accessible early.
parser.add_argument(
"--verbose",
"-v",
action="store_true",
default=False,
help="Print debug output",
)
parser.add_argument(
"--very-verbose",
"-vv",
action="store_true",
default=False,
help="Print more debug output",
)
def all_tracked_files():
for line in cmd("git ls-files").lines():
file = Path(line)
if file.is_file():
yield file
def find_source_files(extension: str, ignore: List[str] = []):
for file in all_tracked_files():
if file.suffix != f".{extension}":
continue
if file.is_relative_to("third_party"):
continue
if str(file) in ignore:
continue
yield file
def find_scripts(path: Path, shebang: str):
for file in path.glob("*"):
if file.is_file() and file.open(errors="ignore").read(512).startswith(f"#!{shebang}"):
yield file
def confirm(message: str, default=False):
print(message, "[y/N]" if default == False else "[Y/n]", end=" ", flush=True)
response = sys.stdin.readline().strip()
if response in ("y", "Y"):
return True
if response in ("n", "N"):
return False
return default
def get_cookie_file():
path = cmd("git config http.cookiefile").stdout(check=False)
return Path(path) if path else None
def get_gcloud_access_token():
if not shutil.which("gcloud"):
return None
return cmd("gcloud auth print-access-token").stdout(check=False)
@functools.lru_cache(maxsize=None)
def curl_with_git_auth():
"""
Returns a curl `Command` instance set up to use the same HTTP credentials as git.
This currently supports two methods:
- git cookies (the default)
- gcloud
Most developers will use git cookies, which are passed to curl.
glloud for authorization can be enabled in git via `git config credential.helper gcloud.sh`.
If enabled in git, this command will also return a curl command using a gloud access token.
"""
helper = cmd("git config credential.helper").stdout(check=False)
if not helper:
cookie_file = get_cookie_file()
if not cookie_file or not cookie_file.is_file():
raise Exception("git http cookiefile is not available.")
return cmd("curl --cookie", cookie_file)
if helper.endswith("gcloud.sh"):
token = get_gcloud_access_token()
if not token:
raise Exception("Cannot get gcloud access token.")
# Write token to a header file so it will not appear in logs or error messages.
AUTH_HEADERS_FILE.write_text(f"Authorization: Bearer {token}")
return cmd(f"curl -H @{AUTH_HEADERS_FILE}")
raise Exception(f"Unsupported git credentials.helper: {helper}")
def strip_xssi(response: str):
# See https://gerrit-review.googlesource.com/Documentation/rest-api.html#output
assert response.startswith(")]}'\n")
return response[5:]
def gerrit_api_get(path: str):
response = cmd(f"curl --silent --fail {GERRIT_URL}/{path}").stdout()
return json.loads(strip_xssi(response))
def gerrit_api_post(path: str, body: Any):
response = curl_with_git_auth()(
"--silent --fail",
"-X POST",
"-H",
quoted("Content-Type: application/json"),
"-d",
quoted(json.dumps(body)),
f"{GERRIT_URL}/a/{path}",
).stdout()
if very_verbose():
print("Response:", response)
return json.loads(strip_xssi(response))
class GerritChange(object):
"""
Class to interact with the gerrit /changes/ API.
For information on the data format returned by the API, see:
https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#change-info
"""
id: str
_data: Any
def __init__(self, data: Any):
self._data = data
self.id = data["id"]
@functools.cached_property
def _details(self) -> Any:
return gerrit_api_get(f"changes/{self.id}/detail")
@functools.cached_property
def _messages(self) -> List[Any]:
return gerrit_api_get(f"changes/{self.id}/messages")
@property
def status(self):
return cast(str, self._data["status"])
def get_votes(self, label_name: str) -> List[int]:
"Returns the list of votes on `label_name`"
label_info = self._details.get("labels", {}).get(label_name)
votes = label_info.get("all", [])
return [cast(int, v.get("value")) for v in votes]
def get_messages_by(self, email: str) -> List[str]:
"Returns all messages posted by the user with the specified `email`."
return [m["message"] for m in self._messages if m["author"].get("email") == email]
def review(self, message: str, labels: Dict[str, int]):
"Post review `message` and set the specified review `labels`"
print("Posting on", self, ":", message, labels)
gerrit_api_post(
f"changes/{self.id}/revisions/current/review",
{"message": message, "labels": labels},
)
def abandon(self, message: str):
print("Abandoning", self, ":", message)
gerrit_api_post(f"changes/{self.id}/abandon", {"message": message})
@classmethod
def query(cls, *queries: str):
"Returns a list of gerrit changes matching the provided list of queries."
return [cls(c) for c in gerrit_api_get(f"changes/?q={'+'.join(queries)}")]
def short_url(self):
return f"http://crrev.com/c/{self._data['_number']}"
def __str__(self):
return self.short_url()
def pretty_info(self):
return f"{self} - {self._data['subject']}"
def is_cros_repo():
"Returns true if the crosvm repo is a symlink or worktree to a CrOS repo checkout."
dot_git = CROSVM_ROOT / ".git"
if not dot_git.is_symlink() and dot_git.is_dir():
return False
return (cros_repo_root() / ".repo").exists()
def cros_repo_root():
"Root directory of the CrOS repo checkout."
return (CROSVM_ROOT / "../../..").resolve()
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
tools/impl/common.py: Remove __future__ import
The __future__ import has to be the first line, but will fail
if we are not using at least Python 3.7.
All it changes is the evaluation time of type annotations, so we
have to wrap some names into strings.
BUG=None
TEST=./tools/cl
Change-Id: Ib880fb29fc7a426622d03a996354b13639656c71
Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/3833640
Reviewed-by: Daniel Verkamp <72bc170b46ec491f7bdd4359a1c0bfed274de40c@chromium.org>
Commit-Queue: Dennis Kempin <cd09796fb571bec2782819dbfd33307f65b1c778@google.com>
Tested-by: Dennis Kempin <cd09796fb571bec2782819dbfd33307f65b1c778@google.com>
Reviewed-by: Paramjit Oberoi <50f681d392239e3f4d8197fe0238fb3cec4c83fe@google.com>
#!/usr/bin/env python3
# Copyright 2022 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Provides helpers for writing shell-like scripts in Python.
It provides tools to execute commands with similar flexibility to shell scripts and simplifies
command line arguments using `argh` and provides common flags (e.g. -v and -vv) for all of
our command line tools.
Refer to the scripts in ./tools for example usage.
"""
import functools
import json
import sys
import subprocess
if sys.version_info.major != 3 or sys.version_info.minor < 8:
print("Python 3.8 or higher is required.")
print("Hint: Do not use crosvm tools inside cros_sdk.")
sys.exit(1)
def ensure_package_exists(package: str):
"""Installs the specified package via pip if it does not exist."""
try:
__import__(package)
except ImportError:
print(
f"Missing the python package {package}. Do you want to install? [y/N] ",
end="",
flush=True,
)
response = sys.stdin.readline()
if response[:1].lower() == "y":
subprocess.check_call([sys.executable, "-m", "pip", "install", "--user", package])
else:
sys.exit(1)
ensure_package_exists("argh")
from io import StringIO
from math import ceil
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import DEVNULL, PIPE, STDOUT # type: ignore
from tempfile import gettempdir
from typing import Any, Callable, Dict, Iterable, List, NamedTuple, Optional, TypeVar, Union, cast
import argh # type: ignore
import argparse
import contextlib
import csv
import getpass
import os
import re
import shutil
import traceback
"Root directory of crosvm"
CROSVM_ROOT = Path(__file__).parent.parent.parent.resolve()
"Cargo.toml file of crosvm"
CROSVM_TOML = CROSVM_ROOT / "Cargo.toml"
"Url of crosvm's gerrit review host"
GERRIT_URL = "https://chromium-review.googlesource.com"
# Ensure that we really found the crosvm root directory
assert 'name = "crosvm"' in CROSVM_TOML.read_text()
# File where to store http headers for gcloud authentication
AUTH_HEADERS_FILE = Path(gettempdir()) / f"crosvm_gcloud_auth_headers_{getpass.getuser()}"
PathLike = Union[Path, str]
class CommandResult(NamedTuple):
"""Results of a command execution as returned by Command.run()"""
stdout: str
stderr: str
returncode: int
class Command(object):
"""
Simplified subprocess handling for shell-like scripts.
## Arguments
Arguments are provided as a list similar to subprocess.run():
>>> Command('cargo', 'build', '--workspace')
Command('cargo', 'build', '--workspace')
In contrast to subprocess.run, all strings are split by whitespaces similar to bash:
>>> Command('cargo build --workspace', '--features foo')
Command('cargo', 'build', '--workspace', '--features', 'foo')
In contrast to bash, globs are *not* evaluated, but can easily be provided using Path:
>>> Command('ls -l', *Path('.').glob('*.toml'))
Command('ls', '-l', ...)
None or False are ignored to make it easy to include conditional arguments:
>>> all = False
>>> Command('cargo build', '--workspace' if all else None)
Command('cargo', 'build')
Commands can be nested, similar to $() subshells in bash. The sub-commands will be executed
right away and their output will undergo the usual splitting:
>>> Command('printf "(%s)"', Command('echo foo bar')).stdout()
'(foo)(bar)'
Arguments can be explicitly quoted to prevent splitting, it applies to both sub-commands
as well as strings:
>>> Command('printf "(%s)"', quoted(Command('echo foo bar'))).stdout()
'(foo bar)'
Commands can also be piped into one another:
>>> wc = Command('wc')
>>> Command('echo "abcd"').pipe(wc('-c')).stdout()
'5'
Programs will be looked up in PATH or absolute paths to programs can be supplied as well:
>>> Command('/usr/bin/env').executable
PosixPath('/usr/bin/env')
## Executing
Once built, commands can be executed using `Command.fg()`, to run the command in the
foreground, visible to the user, or `Command.stdout()` to capture the stdout.
By default, any non-zero exit code will trigger an Exception and stderr is always directed to
the user.
More complex use-cases are supported with the `Command.run()` or `Command.stream()` methods.
A Command instance can also be passed to the subprocess.run() for any use-cases unsupported by
this API.
"""
def __init__(
self,
*args: Any,
stdin_cmd: Optional["Command"] = None,
env_vars: Dict[str, str] = {},
):
self.args = Command.__parse_cmd(args)
self.stdin_cmd = stdin_cmd
self.env_vars = env_vars
if len(self.args) > 0:
executable = self.args[0]
if Path(executable).exists():
self.executable = Path(executable)
else:
path = shutil.which(executable)
if not path:
raise ValueError(f'Required program "{executable}" cannot be found in PATH.')
elif very_verbose():
print(f"Using {executable}: {path}")
self.executable = Path(path)
### High level execution API
def fg(
self,
quiet: bool = False,
check: bool = True,
) -> int:
"""
Runs a program in the foreground with output streamed to the user.
>>> Command('true').fg()
0
Non-zero exit codes will trigger an Exception
>>> Command('false').fg()
Traceback (most recent call last):
...
subprocess.CalledProcessError: Command 'false' returned non-zero exit status 1.
But can be disabled:
>>> Command('false').fg(check=False)
1
Arguments:
quiet: Do not show stdout unless the program failed.
check: Raise an exception if the program returned an error code.
Returns: The return code of the program.
"""
self.__debug_print()
if quiet:
result = subprocess.run(
self.args,
stdout=PIPE,
stderr=STDOUT,
stdin=self.__stdin_stream(),
env={**os.environ, **self.env_vars},
text=True,
)
else:
result = subprocess.run(
self.args,
stdin=self.__stdin_stream(),
env={**os.environ, **self.env_vars},
text=True,
)
if result.returncode != 0:
if quiet and check and result.stdout:
print(result.stdout)
if check:
raise subprocess.CalledProcessError(result.returncode, str(self), result.stdout)
return result.returncode
def success(self):
return self.fg(check=False, quiet=True) == 0
def stdout(self, check: bool = True):
"""
Runs a program and returns stdout. Stderr is still directed to the user.
"""
return self.run(stderr=None, check=check).stdout.strip()
def lines(self):
"""
Runs a program and returns stdout line by line. Stderr is still directed to the user.
"""
return self.stdout().splitlines()
def write_to(self, filename: Path):
"""
Writes all program output (stdout and stderr) to the provided file.
"""
with open(filename, "w") as file:
file.write(self.run(stderr=STDOUT).stdout)
def append_to(self, filename: Path):
"""
Appends all program output (stdout and stderr) to the provided file.
"""
with open(filename, "a") as file:
file.write(self.run(stderr=STDOUT).stdout)
def pipe(self, *args: Any):
"""
Pipes the output of this command into another process.
The target can either be another Command or the argument list to build a new command.
"""
if len(args) == 1 and isinstance(args[0], Command):
cmd = Command(stdin_cmd=self)
cmd.args = args[0].args
cmd.env_vars = self.env_vars.copy()
return cmd
else:
return Command(*args, stdin_cmd=self, env_vars=self.env_vars)
### Lower level execution API
def run(self, check: bool = True, stderr: Optional[int] = PIPE) -> CommandResult:
"""
Runs a program with stdout, stderr and error code returned.
>>> Command('echo', 'Foo').run()
CommandResult(stdout='Foo\\n', stderr='', returncode=0)
Non-zero exit codes will trigger an Exception by default.
Arguments:
check: Raise an exception if the program returned an error code.
Returns: CommandResult(stdout, stderr, returncode)
"""
self.__debug_print()
result = subprocess.run(
self.args,
stdout=subprocess.PIPE,
stderr=stderr,
stdin=self.__stdin_stream(),
env={**os.environ, **self.env_vars},
check=check,
text=True,
)
return CommandResult(result.stdout, result.stderr, result.returncode)
def stream(self, stderr: Optional[int] = PIPE) -> "subprocess.Popen[str]":
"""
Runs a program and returns the Popen object of the running process.
"""
self.__debug_print()
return subprocess.Popen(
self.args,
stdout=subprocess.PIPE,
stderr=stderr,
stdin=self.__stdin_stream(),
env={**os.environ, **self.env_vars},
text=True,
)
def env(self, key: str, value: str):
cmd = Command()
cmd.args = self.args
cmd.env_vars = {**self.env_vars, key: value}
return cmd
def add_path(self, new_path: str):
path_var = self.env_vars.get("PATH", os.environ.get("PATH", ""))
cmd = Command()
cmd.args = self.args
cmd.env_vars = {**self.env_vars, "PATH": f"{path_var}:{new_path}"}
return cmd
def foreach(self, arguments: Iterable[Any], batch_size: int = 1):
"""
Yields a new command for each entry in `arguments`.
The argument is appended to each command and is intended to be used in
conjunction with `parallel()` to execute a command on a list of arguments in
parallel.
>>> parallel(*cmd('echo').foreach((1, 2, 3))).stdout()
['1', '2', '3']
Arguments can also be batched by setting batch_size > 1, which will append multiple
arguments to each command.
>>> parallel(*cmd('echo').foreach((1, 2, 3), batch_size=2)).stdout()
['1 2', '3']
"""
for batch in batched(arguments, batch_size):
yield self(*batch)
def __call__(self, *args: Any):
"""Returns a new Command with added arguments.
>>> cargo = Command('cargo')
>>> cargo('clippy')
Command('cargo', 'clippy')
"""
cmd = Command()
cmd.args = [*self.args, *Command.__parse_cmd(args)]
cmd.env_vars = self.env_vars
return cmd
def __iter__(self):
"""Allows a `Command` to be treated like a list of arguments for subprocess.run()."""
return iter(self.args)
def __str__(self):
def fmt_arg(arg: str):
# Quote arguments containing spaces.
if re.search(r"\s", arg):
return f'"{arg}"'
return arg
stdin = ""
if self.stdin_cmd:
stdin = str(self.stdin_cmd) + " | "
return stdin + " ".join(fmt_arg(a) for a in self.args)
def __repr__(self):
stdin = ""
if self.stdin_cmd:
stdin = ", stdin_cmd=" + repr(self.stdin_cmd)
return f"Command({', '.join(repr(a) for a in self.args)}{stdin})"
### Private utilities
def __stdin_stream(self):
if self.stdin_cmd:
return self.stdin_cmd.stream().stdout
return None
def __debug_print(self):
if verbose():
print("$", repr(self) if very_verbose() else str(self))
@staticmethod
def __shell_like_split(value: str):
"""Splits a string by spaces, accounting for escape characters and quoting."""
# Re-use csv parses to split by spaces and new lines, while accounting for quoting.
for line in csv.reader(StringIO(value), delimiter=" ", quotechar='"'):
for arg in line:
if arg:
yield arg
@staticmethod
def __parse_cmd(args: Iterable[Any]) -> List[str]:
"""Parses command line arguments for Command."""
res = [parsed for arg in args for parsed in Command.__parse_cmd_args(arg)]
return res
@staticmethod
def __parse_cmd_args(arg: Any) -> List[str]:
"""Parses a mixed type command line argument into a list of strings."""
if isinstance(arg, Path):
return [str(arg)]
elif isinstance(arg, QuotedString):
return [arg.value]
elif isinstance(arg, Command):
return [*Command.__shell_like_split(arg.stdout())]
elif arg is None or arg is False:
return []
else:
return [*Command.__shell_like_split(str(arg))]
class ParallelCommands(object):
"""
Allows commands to be run in parallel.
>>> parallel(cmd('true'), cmd('false')).fg(check=False)
[0, 1]
>>> parallel(cmd('echo a'), cmd('echo b')).stdout()
['a', 'b']
"""
def __init__(self, *commands: Command):
self.commands = commands
def fg(self, quiet: bool = True, check: bool = True):
with ThreadPool(os.cpu_count()) as pool:
return pool.map(lambda command: command.fg(quiet=quiet, check=check), self.commands)
def stdout(self):
with ThreadPool(os.cpu_count()) as pool:
return pool.map(lambda command: command.stdout(), self.commands)
def success(self):
results = self.fg(check=False, quiet=False)
print(results)
return all(result == 0 for result in results)
@contextlib.contextmanager
def cwd_context(path: PathLike):
"""Context for temporarily changing the cwd.
>>> with cwd('/tmp'):
... os.getcwd()
'/tmp'
"""
cwd = os.getcwd()
try:
chdir(path)
yield
finally:
chdir(cwd)
def chdir(path: PathLike):
if very_verbose():
print("cd", path)
os.chdir(path)
class QuotedString(object):
"""
Prevents the provided string from being split.
Commands will be executed and their stdout is quoted.
"""
def __init__(self, value: Any):
if isinstance(value, Command):
self.value = value.stdout()
else:
self.value = str(value)
def __str__(self):
return f'"{self.value}"'
T = TypeVar("T")
def batched(source: Iterable[T], max_batch_size: int) -> Iterable[List[T]]:
"""
Returns an iterator over batches of elements from source_list.
>>> list(batched([1, 2, 3, 4, 5], 2))
[[1, 2], [3, 4], [5]]
"""
source_list = list(source)
# Calculate batch size that spreads elements evenly across all batches
batch_count = ceil(len(source_list) / max_batch_size)
batch_size = ceil(len(source_list) / batch_count)
for index in range(0, len(source_list), batch_size):
yield source_list[index : min(index + batch_size, len(source_list))]
# Shorthands
quoted = QuotedString
cmd = Command
cwd = cwd_context
parallel = ParallelCommands
def run_main(main_fn: Callable[..., Any]):
run_commands(default_fn=main_fn)
def run_commands(
*functions: Callable[..., Any],
default_fn: Optional[Callable[..., Any]] = None,
usage: Optional[str] = None,
):
"""
Allow the user to call the provided functions with command line arguments translated to
function arguments via argh: https://pythonhosted.org/argh
"""
try:
# Add global verbose arguments
parser = argparse.ArgumentParser(usage=usage)
add_verbose_args(parser)
# Add provided commands to parser. Do not use sub-commands if we just got one function.
if functions:
argh.add_commands(parser, functions) # type: ignore
if default_fn:
argh.set_default_command(parser, default_fn) # type: ignore
# Call main method
argh.dispatch(parser) # type: ignore
except Exception as e:
if verbose():
traceback.print_exc()
else:
print(e)
sys.exit(1)
def verbose():
return very_verbose() or "-v" in sys.argv or "--verbose" in sys.argv
def very_verbose():
return "-vv" in sys.argv or "--very-verbose" in sys.argv
def add_verbose_args(parser: argparse.ArgumentParser):
# This just serves as documentation to argparse. The verbose variables are directly
# parsed from argv above to ensure they are accessible early.
parser.add_argument(
"--verbose",
"-v",
action="store_true",
default=False,
help="Print debug output",
)
parser.add_argument(
"--very-verbose",
"-vv",
action="store_true",
default=False,
help="Print more debug output",
)
def all_tracked_files():
for line in cmd("git ls-files").lines():
file = Path(line)
if file.is_file():
yield file
def find_source_files(extension: str, ignore: List[str] = []):
for file in all_tracked_files():
if file.suffix != f".{extension}":
continue
if file.is_relative_to("third_party"):
continue
if str(file) in ignore:
continue
yield file
def find_scripts(path: Path, shebang: str):
for file in path.glob("*"):
if file.is_file() and file.open(errors="ignore").read(512).startswith(f"#!{shebang}"):
yield file
def confirm(message: str, default=False):
print(message, "[y/N]" if default == False else "[Y/n]", end=" ", flush=True)
response = sys.stdin.readline().strip()
if response in ("y", "Y"):
return True
if response in ("n", "N"):
return False
return default
def get_cookie_file():
path = cmd("git config http.cookiefile").stdout(check=False)
return Path(path) if path else None
def get_gcloud_access_token():
if not shutil.which("gcloud"):
return None
return cmd("gcloud auth print-access-token").stdout(check=False)
@functools.lru_cache(maxsize=None)
def curl_with_git_auth():
"""
Returns a curl `Command` instance set up to use the same HTTP credentials as git.
This currently supports two methods:
- git cookies (the default)
- gcloud
Most developers will use git cookies, which are passed to curl.
glloud for authorization can be enabled in git via `git config credential.helper gcloud.sh`.
If enabled in git, this command will also return a curl command using a gloud access token.
"""
helper = cmd("git config credential.helper").stdout(check=False)
if not helper:
cookie_file = get_cookie_file()
if not cookie_file or not cookie_file.is_file():
raise Exception("git http cookiefile is not available.")
return cmd("curl --cookie", cookie_file)
if helper.endswith("gcloud.sh"):
token = get_gcloud_access_token()
if not token:
raise Exception("Cannot get gcloud access token.")
# Write token to a header file so it will not appear in logs or error messages.
AUTH_HEADERS_FILE.write_text(f"Authorization: Bearer {token}")
return cmd(f"curl -H @{AUTH_HEADERS_FILE}")
raise Exception(f"Unsupported git credentials.helper: {helper}")
def strip_xssi(response: str):
# See https://gerrit-review.googlesource.com/Documentation/rest-api.html#output
assert response.startswith(")]}'\n")
return response[5:]
def gerrit_api_get(path: str):
response = cmd(f"curl --silent --fail {GERRIT_URL}/{path}").stdout()
return json.loads(strip_xssi(response))
def gerrit_api_post(path: str, body: Any):
response = curl_with_git_auth()(
"--silent --fail",
"-X POST",
"-H",
quoted("Content-Type: application/json"),
"-d",
quoted(json.dumps(body)),
f"{GERRIT_URL}/a/{path}",
).stdout()
if very_verbose():
print("Response:", response)
return json.loads(strip_xssi(response))
class GerritChange(object):
"""
Class to interact with the gerrit /changes/ API.
For information on the data format returned by the API, see:
https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#change-info
"""
id: str
_data: Any
def __init__(self, data: Any):
self._data = data
self.id = data["id"]
@functools.cached_property
def _details(self) -> Any:
return gerrit_api_get(f"changes/{self.id}/detail")
@functools.cached_property
def _messages(self) -> List[Any]:
return gerrit_api_get(f"changes/{self.id}/messages")
@property
def status(self):
return cast(str, self._data["status"])
def get_votes(self, label_name: str) -> List[int]:
"Returns the list of votes on `label_name`"
label_info = self._details.get("labels", {}).get(label_name)
votes = label_info.get("all", [])
return [cast(int, v.get("value")) for v in votes]
def get_messages_by(self, email: str) -> List[str]:
"Returns all messages posted by the user with the specified `email`."
return [m["message"] for m in self._messages if m["author"].get("email") == email]
def review(self, message: str, labels: Dict[str, int]):
"Post review `message` and set the specified review `labels`"
print("Posting on", self, ":", message, labels)
gerrit_api_post(
f"changes/{self.id}/revisions/current/review",
{"message": message, "labels": labels},
)
def abandon(self, message: str):
print("Abandoning", self, ":", message)
gerrit_api_post(f"changes/{self.id}/abandon", {"message": message})
@classmethod
def query(cls, *queries: str):
"Returns a list of gerrit changes matching the provided list of queries."
return [cls(c) for c in gerrit_api_get(f"changes/?q={'+'.join(queries)}")]
def short_url(self):
return f"http://crrev.com/c/{self._data['_number']}"
def __str__(self):
return self.short_url()
def pretty_info(self):
return f"{self} - {self._data['subject']}"
def is_cros_repo():
"Returns true if the crosvm repo is a symlink or worktree to a CrOS repo checkout."
dot_git = CROSVM_ROOT / ".git"
if not dot_git.is_symlink() and dot_git.is_dir():
return False
return (cros_repo_root() / ".repo").exists()
def cros_repo_root():
"Root directory of the CrOS repo checkout."
return (CROSVM_ROOT / "../../..").resolve()
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
"""
This module holds commands of internal
glim framework commands to manipulate
a typical glim framework app.
"""
import os, traceback
from termcolor import colored
from werkzeug.serving import run_simple
from glim.command import GlimCommand
from glim.utils import copytree
from glim.exception import FolderExistsError
from glim import GlimLog, Config
from bottle import run
import glim.paths as paths
class NewCommand(GlimCommand):
"""
This class is responsible for generating a new glim app.
Attributes
----------
glim.command.GlimCommand Attributes
"""
name = 'new'
description = 'generates a new glim app'
def configure(self):
"""
Function adds the optional name argument for creating an app with
project name.
"""
self.add_argument("name", nargs='?', help="enter project name", default=None)
def run(self, app):
"""Function copies the prototype folder into os.getcwd() path."""
project_path = os.getcwd()
if self.args.name is not None:
project_path = os.path.join(project_path, self.args.name)
proto_path = paths.PROTO_PATH
try:
copytree(proto_path, project_path)
print(colored('A new glim app created successfully! Happy coding :)', 'green'))
except FolderExistsError as e:
print(e)
print(colored('App already exists', 'red'))
class StartCommand(GlimCommand):
"""
This class is responsible for starting wsgi of glim framework app.
Attributes
----------
glim.command.GlimCommand Attributes
"""
name = 'start'
description = 'start the glim app web server'
def configure(self):
"""Function adds optional host, port variables."""
self.add_argument("--host", help="enter host", default='127.0.0.1')
self.add_argument("--port", help="enter port", default='8080')
def run(self, app):
"""Function starts the web server given configuration."""
GlimLog.info('Glim server started on %s environment' % self.args.env)
try:
run(app.wsgi,
host=self.args.host,
port=int(self.args.port),
debug=Config.get('app.debugger'),
reloader=Config.get('app.reloader'),
server=Config.get('app.server'))
except Exception as e:
print(traceback.format_exc())
exit()
remove werkzeug import
"""
This module holds commands of internal
glim framework commands to manipulate
a typical glim framework app.
"""
import os, traceback
from termcolor import colored
from glim.command import GlimCommand
from glim.utils import copytree
from glim.exception import FolderExistsError
from glim import GlimLog, Config
from bottle import run
import glim.paths as paths
class NewCommand(GlimCommand):
"""
This class is responsible for generating a new glim app.
Attributes
----------
glim.command.GlimCommand Attributes
"""
name = 'new'
description = 'generates a new glim app'
def configure(self):
"""
Function adds the optional name argument for creating an app with
project name.
"""
self.add_argument("name", nargs='?', help="enter project name", default=None)
def run(self, app):
"""Function copies the prototype folder into os.getcwd() path."""
project_path = os.getcwd()
if self.args.name is not None:
project_path = os.path.join(project_path, self.args.name)
proto_path = paths.PROTO_PATH
try:
copytree(proto_path, project_path)
print(colored('A new glim app created successfully! Happy coding :)', 'green'))
except FolderExistsError as e:
print(e)
print(colored('App already exists', 'red'))
class StartCommand(GlimCommand):
"""
This class is responsible for starting wsgi of glim framework app.
Attributes
----------
glim.command.GlimCommand Attributes
"""
name = 'start'
description = 'start the glim app web server'
def configure(self):
"""Function adds optional host, port variables."""
self.add_argument("--host", help="enter host", default='127.0.0.1')
self.add_argument("--port", help="enter port", default='8080')
def run(self, app):
"""Function starts the web server given configuration."""
GlimLog.info('Glim server started on %s environment' % self.args.env)
try:
run(app.wsgi,
host=self.args.host,
port=int(self.args.port),
debug=Config.get('app.debugger'),
reloader=Config.get('app.reloader'),
server=Config.get('app.server'))
except Exception as e:
print(traceback.format_exc())
exit()
|
import json
import os
import datetime
from pyCGA.RestExecutor import WS
__author__ = 'antonior, mparker'
# HOST EXAMPLE
# host = 'http://XX.XX.XX.XXX:XXXX/'
class Users(WS):
"""
This class contains method for users ws (i.e, login, logout, create new user...)
"""
def login_method(self, userId, pwd, **options):
"""
This is the method for login
:rtype : list of dict
:param userId: user id
:param pwd: password for the user
"""
data = {"password": pwd}
return self.general_method("users", "login", item_id1=userId, data=data, **options)
def logout_method(self, userId, **options):
"""
This method logout the user
:param userId: user id
"""
return self.general_method("users", "logout", item_id1=userId, **options)
def change_password(self, userId, password, new_password, **options):
"""
method to change password
:param userId: user ID
:param password: Old password
:param new_password: New password
"""
return self.general_method("users", "change-password", item_id1=userId, password=password,
npassword=new_password, **options)
def reset_password(self, userId, email, **options):
"""
method to reset password - CURRENTLY MAIL SERVER IS NOT WORKING
:param userId: user ID
:param email: User email to receive the new password
"""
return self.general_method("users", "reset-password", item_id1=userId, email=email, **options)
def create_user(self, userId, name, email, organization, password, **options):
"""
method to create a user
:param userId: user id
:param name: name
:param email: email
:param organization: organization name
:param password: user password
"""
return self.general_method("users", "create", email=email, userId=userId, name=name, organization=organization,
password=password, **options)
def change_email(self, userId, nemail, **options):
"""
method to change email
:param sid: session id
:param nemail: new email
:param userId: user id
"""
return self.general_method("users", "change-email", item_id1=userId, nemail=nemail, **options)
def delete(self, userId, **options):
"""
method to delete user
:param userId: user id
"""
return self.general_method("users", "delete", item_id1=userId, **options)
class Files(WS):
"""
This class contains method for files ws (i.e, link, create)
"""
def share(self, userId, fileId, read=True, write=False, delete=False, unshare=False, **options):
"""
Method to share files
:param userId: id of the user this file will be shared
:param fileId: File id - Notice this is the internal id in Catalog
:param read: True/False - If True the user could read the file
:param write: True/False - If True the user could write the file
:param delete: True/False - If True the user could delete the file
:param unshare: True/False - If True the file will be unshared for this user
"""
return self.general_method("files", "share", item_id1=fileId, unshare=str(unshare).lower(), userId=userId,
read=str(read).lower(), write=str(write).lower(), delete=str(delete).lower(),
**options)
def update(self, fileId, **options):
"""
Method to update the a file.
:param fileId: id of file
:param options: Kargs where the keys are the name of the file properties and the values the new values for those
properties.
"""
return self.general_method("files", "update", item_id1=fileId, **options)
def search(self, studyId, **options):
"""
Method to search files based in a dictionary "options"
:param studyId: study id
:param options: Kargs where the keys are the name of the file properties used to search.
"""
return self.general_method("files", "search", studyId=studyId, **options)
def create_folder(self, studyId, folder, **options):
"""
This is the method create a folder in the DB
:param studyId: study to associate file to
:param folder: "path in the DB"
"""
return self.general_method(ws_category1="files", action="create-folder", studyId=studyId, folder=folder,
**options)
def link(self, studyId, uri, path, description="", parents=False, calculateChecksum=False, createFolder=True,
**options):
"""
This is the method for linking files and folders
:param studyId: study to associate file to
:param uri: full path to file on file system
:param path: path in the DB
:param description: Folder description
:param parents: True/False
:param calculateChecksum: True/Flase
:param createFolder: True/False If true will create the folder before to link the file
"""
if createFolder:
Files.create_folder(self, studyId, path)
return self.general_method(ws_category1="files", action="link", path=path, uri=uri, studyId=studyId,
description=description, parents=str(parents).lower(),
calculateChecksum=str(calculateChecksum).lower(), **options)
def info(self, fileId, **options):
"""
Method to get information of a particular file
:param fileId: file Id
"""
return self.general_method(ws_category1="files", action="info", item_id1=fileId, **options)
def update_file_post(self, fileId, json_file=None, data=None, **options):
"""
Method to update the sampleId of a file
:param json_file:
:param fileId: id of file
:param sampleId: id os sample
"""
if data is None and json_file is None:
raise Exception("please provide a json file or a data")
if data is None:
fd = open(json_file)
data = json.load(fd)
fd.close()
return self.general_method(ws_category1="files", action="update", item_id1=fileId, data=data, **options)
def relink(self, fileId, uri, calculateChecksum=False, **options):
"""
method to relink (move a file)
:param fileId: Id of file
:param uri: new path to file on filesystem
"""
return self.general_method(ws_category1="files", action="relink", item_id1=fileId, uri=uri,
calculateChecksum=str(calculateChecksum).lower(), **options)
def index(self, fileId, outdirId, annotate, **options):
"""
index a file
:param annotate: True/False
:param fileId: file Id
:param outdirId: Output directory of the indexed
"""
return self.general_method(ws_category1="files", action="index", item_id1=fileId, outdirId=outdirId,
annotate=str(annotate).lower(), **options)
def refresh(self, fileId, **options):
"""
refresh metatadata from a file or folder - returns updated files
:param fileId: File If
"""
return self.general_method(ws_category1="files", action="refresh", item_id1=fileId, **options)
def variants(self, fileId, **options):
"""
get variants from a vcf or gvcf file
:param options: Options to select the variants
:param fileId: file Id
"""
return self.general_method(ws_category1="files", action="variants", item_id1=fileId, **options)
def alignments(self, fileId, **options):
"""
get alignments from a bam file
:param options: Options to select the variants
:param fileId: file Id
"""
return self.general_method(ws_category1="files", action="alignments", item_id1=fileId, **options)
def list(self, fileId, **options):
"""
list a file
:param fileId: file Id
"""
return self.general_method(ws_category1="files", action="list", item_id1=fileId, **options)
def set_header(self, fileId, header, **options):
"""
Set the header of a file
:param fileId: file Id
:param header: new header
"""
return self.general_method(ws_category1="files", action="set_header", item_id1=fileId, header=header,
**options)
def grep(self, fileId, pattern, ignorecase=False, multi=True, **options):
"""
grep the contents of a file
:param pattern: Pattern to search
:param ignorecase: Boolean. Ignore case in grep acction
:param multi: Boolean. If it is false will return only the first one
:param fileId: File id
"""
return self.general_method(ws_category1="files", action="grep", item_id1=fileId, pattern=pattern,
ignorecase=str(ignorecase).lower(), multi=str(multi).lower(), **options
)
class Variables(WS):
def create(self, studyId, name, json_file=None, data=None, **options):
"""
This create a VariableSet using a json file, with the format is defined by opencga (this is a post method)
:param studyId:
:param json_file:
"""
if data is None and json_file is None:
raise Exception("please provide a json file or a data")
if data is None:
fd = open(json_file)
data = json.load(fd)
fd.close()
return self.general_method(ws_category1="variableSet", action="create", data=data, studyId=studyId, name=name,
**options)
def delete(self, variable_set_id, **options):
"""
This delete one VariableSet given a variableSetId
:param variable_set_id: Variable Set Id
"""
return self.general_method(ws_category1="variableSet", action="delete", item_id1=variable_set_id, **options)
def search(self, studyId, **options):
"""
Method to search Variable Sets based in a dictionary "options"
:param variable_set_id: Variable Set Id
"""
return self.general_method(ws_category1="variableSet", action="search", studyId=studyId, **options)
class Samples(WS):
def create(self, studyId, name, source, description, **options):
"""
method to create a sample
:param source: source
:param description: description
:param studyId: studyId
:param name: name
:param sampleId:
:param individualId:
"""
return self.general_method(ws_category1="samples", action="create", studyId=studyId, name=name,
source=source, description=description, **options)
def update(self, sampleId, **options):
"""
method to do simple update of sample via get method
:param sampleId: Sample Id
:param options: Options will be updated
"""
return self.general_method(ws_category1="samples", action="update", item_id1=sampleId, **options)
def update_post(self, sampleId, data, **options):
"""
method to do simple update of sample via get method
:param sampleId: Sample Id
:param options: Options will be updated
"""
return self.general_method(ws_category1="samples", action="update", item_id1=sampleId, data=data, **options)
def search(self, studyId, **options):
"""
Method to search Samples based in a dictionary "options"
:param studyId: study id
:param options: Kargs where the keys are the name of the file properties used to search.
"""
return self.general_method(ws_category1="samples", action="search", studyId=studyId, **options)
def search_by_annotation(self, studyId, variableSetName, *queries):
"""
This method search across samples using the annotation
:param queries: A list of queries for the annotation sets, <VariableId>:<operator><value>,<value>...
(the queries are built as ANDs and the list of values as ORs). Examples: NAME:=Luke,Leia,Vader AGE:>20
:param studyId: Study Id
"""
variable = Variables()
v_id = variable.search(studyId=studyId, name=variableSetName)[0]["id"]
return self.search(studyId=studyId, variableSetId=str(v_id), annotation=";".join(queries))
def annotate(self, sample_id, variableSetName, annotationSetName, studyId, json_file=None, data=None, update=True, **options):
"""
This annotate a sample using a json file (this is a post method)
:param sample_id:
:param variable_set_id:
:param json_file:
"""
if data is None and json_file is None:
raise Exception("please provide a json file or a data")
if data is None:
fd = open(json_file)
data = json.load(fd)
fd.close()
variable = Variables()
variableSetId = str(variable.search(studyId=studyId, name=variableSetName)[0]["id"])
if update:
for annt_set in self.info(str(sample_id))[0]["annotationSets"]:
if annt_set["variableSetId"] == int(variableSetId):
annotationSetName = annt_set["name"]
return self.general_method(ws_category1="samples", action="annotate",
item_id1=str(sample_id), annotateSetName=annotationSetName,
variableSetId=variableSetId, update="true", data=data
)
annotateSetName = annotationSetName + "_" + str(datetime.datetime.now()).replace(" ", "_").replace(":", "_")
return self.general_method(ws_category1="samples", action="annotate", item_id1=sample_id,
variableSetId=variableSetId, annotateSetName=annotateSetName, data=data, **options)
def info(self, sampleId, **options):
"""
Method to get the sample information
:param sampleId: Sample Id
"""
return self.general_method(ws_category1="samples", action="info", item_id1=sampleId, **options)
def delete(self, sampleId, **options):
"""
method to delete an sample
:param sampleId: Sample Id
"""
return self.general_method(ws_category1="samples", action="delete", item_id1=sampleId, **options)
def share(self, userId, fileId, read=True, write=False, delete=False, unshare=False, **options):
"""
Method to share files
:param userId: id of the user this file will be shared
:param fileId: File id - Notice this is the internal id in Catalog
:param read: True/False - If True the user could read the file
:param write: True/False - If True the user could write the file
:param delete: True/False - If True the user could delete the file
:param unshare: True/False - If True the file will be unshared for this user
"""
return self.general_method("samples", "share", item_id1=fileId, unshare=str(unshare).lower(), userId=userId,
read=str(read).lower(), write=str(write).lower(), delete=str(delete).lower(),
**options)
class Individuals(WS):
def create(self, studyId, name, family, fatherId, motherId, gender, **options):
"""
method for creating an individual
:param studyId: studyid
:param name: name of individual
:param family: specify 0 if no family
:param fatherId: specify as 0 if no fatherid
:param motherId: specify as 0 if no motehrid
:param gender: MALE, FEMALE or UNKNOWN
"""
if fatherId is None:
fatherId = "0"
if motherId is None:
motherId = "0"
gender = gender.upper()
if gender != "MALE" and gender != "FEMALE":
gender = "UNKNOWN"
return self.general_method(ws_category1="individuals", action="create", name=name, family=family, fatherId=fatherId,
motherId=motherId, gender=gender, studyId=studyId, **options)
def search(self, studyId, **options):
"""
:param studyId:
"""
return self.general_method(ws_category1="individuals", action="search", studyId=studyId, **options)
def info(self, individualId, **options):
"""
method to get individual information
:param individualId:
"""
return self.general_method(ws_category1="individuals", action="info", item_id1=individualId, **options)
def delete(self, individualId, **options):
"""
method to delete an individual
:param individualId:
:param sid:
:return:
"""
return self.general_method(ws_category1="individuals", action="delete", item_id1=individualId, **options)
def annotate(self, individual_id, variableSetName, annotationSetName, studyId, json_file=None, data=None,
update=True):
"""
This annotate a individual using a json file (this is a post method)
:param individual_name:
:param variable_set_id:
:param json_file:
"""
if data is None and json_file is None:
raise Exception("please provide a json file or a data")
if data is None:
fd = open(json_file)
data = json.load(fd)
fd.close()
variable = Variables()
variableSetId = str(variable.search(studyId=studyId, name=variableSetName)[0]["id"])
if update:
for annt_set in self.info(str(individual_id))[0]["annotationSets"]:
if annt_set["variableSetId"] == int(variableSetId):
annotationSetName = annt_set["name"]
return self.general_method(ws_category1="individuals", action="annotate",
item_id1=str(individual_id), annotateSetName=annotationSetName,
variableSetId=variableSetId, update="true", data=data
)
annotationSetName = annotationSetName + "_" + str(datetime.datetime.now()).replace(" ", "_").replace(":", "_")
return self.general_method(ws_category1="individuals", action="annotate",
item_id1=str(individual_id), annotateSetName=annotationSetName,
variableSetId=variableSetId, update="false", data=data
)
def search_by_annotation(self, studyId, variableSetName, *queries, **options):
"""
:param queries: A list of queries for the annotation sets, <VariableId>:<operator><value>,<value>...
(the queries are built as ANDs and the list of values as ORs). Examples: NAME:=Luke,Leia,Vader AGE:>20
:param studyId:
"""
variable = Variables()
v_id = variable.search(studyId=studyId, name=variableSetName)[0]["id"]
return self.search(studyId=studyId, variableSetId=str(v_id), annotation=";".join(queries), **options)
class Projects(WS):
"""
This class contains method for projects ws (i.e, create, files, info)
"""
def create(self, userId, name, alias, description, organization, **options):
"""
:param userId:
:param name:
:param alias:
:param description:
:param organization:
"""
return self.general_method(ws_category1="projects", action="create", userId=userId, name=name, alias=alias,
description=description, organization=organization, **options
)
def info(self, projectId, **options):
"""
method to get project information
:param projectId:
"""
return self.general_method(ws_category1="projects", action="info", item_id1=projectId, **options)
def update(self, projectId, **options):
"""
updates a project
:param name:
:param description:
:param organization:
:param status:
:param attributes:
:param projectId:
:param sid:
"""
return self.general_method(ws_category1="projects", action="update", item_id1=projectId, **options)
def delete(self, projectId, **options):
"""
deletes a project
:param projectId:
"""
return self.general_method(ws_category1="projects", action="delete", item_id1=projectId, **options)
def studies(self, projectId, **options):
"""
Returns information on studies contained in the project
:param projectId:
"""
return self.general_method(ws_category1="projects", action="studies", item_id1=projectId, **options)
class Studies(WS):
"""
This class contains method for studies ws (i.e, status, files, info)
"""
def search(self, **options):
"""
Method to search studies based in a dictionary "options"
:param options: Kargs where the keys are the name of the file properties used to search.
"""
return self.general_method(ws_category1="studies", action="search", **options)
def create(self, projectId, name, alias, **options):
"""
:param projectId:
:param name:
:param alias:
"""
return self.general_method(ws_category1="studies", action="create", projectId=projectId, name=name,
alias=alias, **options)
def info(self, studyId, **options):
"""
method to get study info
:param studyId:
"""
return self.general_method(ws_category1="studies", action="info", item_id1=studyId, **options)
def files(self, studyId, **options):
"""
method to get study files
:param studyId:
"""
return self.general_method(ws_category1="studies", action="files", item_id1=studyId, **options)
def jobs(self, studyId, **options):
"""
method to get study jobs
:param studyId:
"""
return self.general_method(ws_category1="studies", action="jobs", item_id1=studyId, **options)
def samples(self, studyId, **options):
"""
method to get study samples
:param studyId:
"""
return self.general_method(ws_category1="studies", action="samples", item_id1=studyId, **options)
def variants(self, studyId, **filters):
"""
method to get study variants
:param filters: All the filters will be applied to the variant fetch
:param studyId: StudyID
"""
return self.general_method(ws_category1="studies", action="variants", item_id1=studyId, **filters)
def alignments(self, studyId, **filters):
"""
method to get study alignments
:param filters: All the filters will be applied to the variant fetch
:param studyId: StudyID
"""
return self.general_method(ws_category1="studies", action="alignments", item_id1=studyId, **filters)
def status(self, studyId, **options):
"""
method to get study status
:param studyId:
"""
return self.general_method(ws_category1="studies", action="status", item_id1=studyId, **options)
def update(self, projectId, **options):
"""
updates a study
:param projectId:
:param options:
"""
return self.general_method(ws_category1="studies", action="update", item_id1=projectId, **options)
def delete(self, studyId, **options):
"""
method to get delete study
:param studyId:
"""
return self.general_method(ws_category1="studies", action="delete", item_id1=studyId, **options)
class Jobs(WS):
"""
This class contains method for jobs ws (i.e, create, info)
"""
def create(self, studyId, name, toolId, jobId, **options):
"""
create a job
:param studyId:
:param name:
:param toolId:
"""
return self.general_method(ws_category1="jobs", action="create", name=name, studyId=studyId,
toolId=toolId, jobId=jobId, **options)
def info(self, jobId, **options):
"""
delete a job
:param jobId:
"""
return self.general_method(ws_category1="jobs", action="info", item_id1=jobId, **options)
def visit(self, jobId, **options):
"""
visit a job
:param jobId:
"""
return self.general_method(ws_category1="jobs", action="visit", item_id1=jobId, **options)
def delete(self, jobId, **options):
"""
delete a job
:param jobId:
"""
return self.general_method(ws_category1="jobs", action="delete", item_id1=jobId, **options)
def create_post(self, studyId, json_file=None, data=None, **options):
"""
post method - so needs a json input file or data
:return:
"""
if data is None and json_file is None:
raise Exception("please provide a json file or a data")
if data is None:
fd = open(json_file)
data = json.load(fd)
fd.close()
return self.general_method(ws_category1="jobs", action="create", studyId=studyId, data=data, **options)
# class Cohorts(WS):
# """
# This class contains method for cohorts ws (i.e, link, create)
# """
# # TODO: Check this method 2 variables unused
# def create(self, studyId, name, variableSetId, type, sampleIds):
# """
#
# :param studyId:
# :param sid:
# :param name:
# :param variableSetId:
# :param type: can be PAIRED, CASE_CONTROL, CASE_SET, CONTROL_SET, PAIRED_TUMOR, FAMILY, TRIO, COLLECTION
# :param sampleIds:
# """
#
# url = os.path.join(self.pre_url, "cohorts",
# "create?sid=" + self.session_id + "&name=" + name + "&type=" + type + "&sampleIds=" + sampleIds)
# result = self.run_ws(url)
# return result["id"]
#
# def samples(self, cohortId):
# """
#
# method to get samples that are part of a cohort
#
# :param cohortId:
# :return: full result which can be looped over
# """
#
# url = os.path.join(self.pre_url, "cohorts", cohortId, "samples?sid=" + self.session_id)
# result = self.run_ws(url)
# return result
#
# def update(self, cohortId, sampleIds):
# """
# This will be to add or remove samples from a cohort
#
# :param cohortId:
# :param sampleIds: full list of sampleIds, i.e. old ones and new ones (comma separated)
# """
#
# url = os.path.join(self.pre_url, "cohorts", cohortId, "update?sid=" + self.session_id + "&samples=" + sampleIds)
# result = self.run_ws(url)
# return result
#
# def info(self, cohortId):
# """
# This will be to add or remove samples from a cohort
#
# :param cohortId:
#
# """
#
# url = os.path.join(self.pre_url, "cohorts", cohortId, "info?sid=" + self.session_id)
# result = self.run_ws(url)
# return result
#
# def delete(self, cohortId):
# """
#
# method to get samples that are part of a cohort
#
# :param cohortId:
# :return: full result which can be looped over
# """
#
# url = os.path.join(self.pre_url, "cohorts", cohortId, "delete?sid=" + self.session_id)
# result = self.run_ws(url)
# return result
python: Fixed python client to create new annotations
import json
import os
import datetime
from pyCGA.RestExecutor import WS
__author__ = 'antonior, mparker'
# HOST EXAMPLE
# host = 'http://XX.XX.XX.XXX:XXXX/'
class Users(WS):
"""
This class contains method for users ws (i.e, login, logout, create new user...)
"""
def login_method(self, userId, pwd, **options):
"""
This is the method for login
:rtype : list of dict
:param userId: user id
:param pwd: password for the user
"""
data = {"password": pwd}
return self.general_method("users", "login", item_id1=userId, data=data, **options)
def logout_method(self, userId, **options):
"""
This method logout the user
:param userId: user id
"""
return self.general_method("users", "logout", item_id1=userId, **options)
def change_password(self, userId, password, new_password, **options):
"""
method to change password
:param userId: user ID
:param password: Old password
:param new_password: New password
"""
return self.general_method("users", "change-password", item_id1=userId, password=password,
npassword=new_password, **options)
def reset_password(self, userId, email, **options):
"""
method to reset password - CURRENTLY MAIL SERVER IS NOT WORKING
:param userId: user ID
:param email: User email to receive the new password
"""
return self.general_method("users", "reset-password", item_id1=userId, email=email, **options)
def create_user(self, userId, name, email, organization, password, **options):
"""
method to create a user
:param userId: user id
:param name: name
:param email: email
:param organization: organization name
:param password: user password
"""
return self.general_method("users", "create", email=email, userId=userId, name=name, organization=organization,
password=password, **options)
def change_email(self, userId, nemail, **options):
"""
method to change email
:param sid: session id
:param nemail: new email
:param userId: user id
"""
return self.general_method("users", "change-email", item_id1=userId, nemail=nemail, **options)
def delete(self, userId, **options):
"""
method to delete user
:param userId: user id
"""
return self.general_method("users", "delete", item_id1=userId, **options)
class Files(WS):
"""
This class contains method for files ws (i.e, link, create)
"""
def share(self, userId, fileId, read=True, write=False, delete=False, unshare=False, **options):
"""
Method to share files
:param userId: id of the user this file will be shared
:param fileId: File id - Notice this is the internal id in Catalog
:param read: True/False - If True the user could read the file
:param write: True/False - If True the user could write the file
:param delete: True/False - If True the user could delete the file
:param unshare: True/False - If True the file will be unshared for this user
"""
return self.general_method("files", "share", item_id1=fileId, unshare=str(unshare).lower(), userId=userId,
read=str(read).lower(), write=str(write).lower(), delete=str(delete).lower(),
**options)
def update(self, fileId, **options):
"""
Method to update the a file.
:param fileId: id of file
:param options: Kargs where the keys are the name of the file properties and the values the new values for those
properties.
"""
return self.general_method("files", "update", item_id1=fileId, **options)
def search(self, studyId, **options):
"""
Method to search files based in a dictionary "options"
:param studyId: study id
:param options: Kargs where the keys are the name of the file properties used to search.
"""
return self.general_method("files", "search", studyId=studyId, **options)
def create_folder(self, studyId, folder, **options):
"""
This is the method create a folder in the DB
:param studyId: study to associate file to
:param folder: "path in the DB"
"""
return self.general_method(ws_category1="files", action="create-folder", studyId=studyId, folder=folder,
**options)
def link(self, studyId, uri, path, description="", parents=False, calculateChecksum=False, createFolder=True,
**options):
"""
This is the method for linking files and folders
:param studyId: study to associate file to
:param uri: full path to file on file system
:param path: path in the DB
:param description: Folder description
:param parents: True/False
:param calculateChecksum: True/Flase
:param createFolder: True/False If true will create the folder before to link the file
"""
if createFolder:
Files.create_folder(self, studyId, path)
return self.general_method(ws_category1="files", action="link", path=path, uri=uri, studyId=studyId,
description=description, parents=str(parents).lower(),
calculateChecksum=str(calculateChecksum).lower(), **options)
def info(self, fileId, **options):
"""
Method to get information of a particular file
:param fileId: file Id
"""
return self.general_method(ws_category1="files", action="info", item_id1=fileId, **options)
def update_file_post(self, fileId, json_file=None, data=None, **options):
"""
Method to update the sampleId of a file
:param json_file:
:param fileId: id of file
:param sampleId: id os sample
"""
if data is None and json_file is None:
raise Exception("please provide a json file or a data")
if data is None:
fd = open(json_file)
data = json.load(fd)
fd.close()
return self.general_method(ws_category1="files", action="update", item_id1=fileId, data=data, **options)
def relink(self, fileId, uri, calculateChecksum=False, **options):
"""
method to relink (move a file)
:param fileId: Id of file
:param uri: new path to file on filesystem
"""
return self.general_method(ws_category1="files", action="relink", item_id1=fileId, uri=uri,
calculateChecksum=str(calculateChecksum).lower(), **options)
def index(self, fileId, outdirId, annotate, **options):
"""
index a file
:param annotate: True/False
:param fileId: file Id
:param outdirId: Output directory of the indexed
"""
return self.general_method(ws_category1="files", action="index", item_id1=fileId, outdirId=outdirId,
annotate=str(annotate).lower(), **options)
def refresh(self, fileId, **options):
"""
refresh metatadata from a file or folder - returns updated files
:param fileId: File If
"""
return self.general_method(ws_category1="files", action="refresh", item_id1=fileId, **options)
def variants(self, fileId, **options):
"""
get variants from a vcf or gvcf file
:param options: Options to select the variants
:param fileId: file Id
"""
return self.general_method(ws_category1="files", action="variants", item_id1=fileId, **options)
def alignments(self, fileId, **options):
"""
get alignments from a bam file
:param options: Options to select the variants
:param fileId: file Id
"""
return self.general_method(ws_category1="files", action="alignments", item_id1=fileId, **options)
def list(self, fileId, **options):
"""
list a file
:param fileId: file Id
"""
return self.general_method(ws_category1="files", action="list", item_id1=fileId, **options)
def set_header(self, fileId, header, **options):
"""
Set the header of a file
:param fileId: file Id
:param header: new header
"""
return self.general_method(ws_category1="files", action="set_header", item_id1=fileId, header=header,
**options)
def grep(self, fileId, pattern, ignorecase=False, multi=True, **options):
"""
grep the contents of a file
:param pattern: Pattern to search
:param ignorecase: Boolean. Ignore case in grep acction
:param multi: Boolean. If it is false will return only the first one
:param fileId: File id
"""
return self.general_method(ws_category1="files", action="grep", item_id1=fileId, pattern=pattern,
ignorecase=str(ignorecase).lower(), multi=str(multi).lower(), **options
)
class Variables(WS):
def create(self, studyId, name, json_file=None, data=None, **options):
"""
This create a VariableSet using a json file, with the format is defined by opencga (this is a post method)
:param studyId:
:param json_file:
"""
if data is None and json_file is None:
raise Exception("please provide a json file or a data")
if data is None:
fd = open(json_file)
data = json.load(fd)
fd.close()
return self.general_method(ws_category1="variableSet", action="create", data=data, studyId=studyId, name=name,
**options)
def delete(self, variable_set_id, **options):
"""
This delete one VariableSet given a variableSetId
:param variable_set_id: Variable Set Id
"""
return self.general_method(ws_category1="variableSet", action="delete", item_id1=variable_set_id, **options)
def search(self, studyId, **options):
"""
Method to search Variable Sets based in a dictionary "options"
:param variable_set_id: Variable Set Id
"""
return self.general_method(ws_category1="variableSet", action="search", studyId=studyId, **options)
class Samples(WS):
def create(self, studyId, name, source, description, **options):
"""
method to create a sample
:param source: source
:param description: description
:param studyId: studyId
:param name: name
:param sampleId:
:param individualId:
"""
return self.general_method(ws_category1="samples", action="create", studyId=studyId, name=name,
source=source, description=description, **options)
def update(self, sampleId, **options):
"""
method to do simple update of sample via get method
:param sampleId: Sample Id
:param options: Options will be updated
"""
return self.general_method(ws_category1="samples", action="update", item_id1=sampleId, **options)
def update_post(self, sampleId, data, **options):
"""
method to do simple update of sample via get method
:param sampleId: Sample Id
:param options: Options will be updated
"""
return self.general_method(ws_category1="samples", action="update", item_id1=sampleId, data=data, **options)
def search(self, studyId, **options):
"""
Method to search Samples based in a dictionary "options"
:param studyId: study id
:param options: Kargs where the keys are the name of the file properties used to search.
"""
return self.general_method(ws_category1="samples", action="search", studyId=studyId, **options)
def search_by_annotation(self, studyId, variableSetName, *queries):
"""
This method search across samples using the annotation
:param queries: A list of queries for the annotation sets, <VariableId>:<operator><value>,<value>...
(the queries are built as ANDs and the list of values as ORs). Examples: NAME:=Luke,Leia,Vader AGE:>20
:param studyId: Study Id
"""
variable = Variables()
v_id = variable.search(studyId=studyId, name=variableSetName)[0]["id"]
return self.search(studyId=studyId, variableSetId=str(v_id), annotation=";".join(queries))
def annotate(self, sample_id, variableSetName, annotationSetName, studyId, json_file=None, data=None, update=True, **options):
"""
This annotate a sample using a json file (this is a post method)
:param sample_id:
:param variable_set_id:
:param json_file:
"""
if data is None and json_file is None:
raise Exception("please provide a json file or a data")
if data is None:
fd = open(json_file)
data = json.load(fd)
fd.close()
variable = Variables()
variableSetId = str(variable.search(studyId=studyId, name=variableSetName)[0]["id"])
if update:
for annt_set in self.info(str(sample_id))[0]["annotationSets"]:
if annt_set["variableSetId"] == int(variableSetId):
annotationSetName = annt_set["name"]
return self.general_method(ws_category1="samples", action="update",
item_id1=str(sample_id), ws_category2="annotationSets",
item_id2=annotationSetName, data=data)
annotateSetName = annotationSetName + "_" + str(datetime.datetime.now()).replace(" ", "_").replace(":", "_")
return self.general_method(ws_category1="samples", action="create", item_id1=str(sample_id),
ws_category2="annotationSets", variableSetId=variableSetId,
annotateSetName=annotateSetName, data=data, **options)
def info(self, sampleId, **options):
"""
Method to get the sample information
:param sampleId: Sample Id
"""
return self.general_method(ws_category1="samples", action="info", item_id1=sampleId, **options)
def delete(self, sampleId, **options):
"""
method to delete an sample
:param sampleId: Sample Id
"""
return self.general_method(ws_category1="samples", action="delete", item_id1=sampleId, **options)
def share(self, userId, fileId, read=True, write=False, delete=False, unshare=False, **options):
"""
Method to share files
:param userId: id of the user this file will be shared
:param fileId: File id - Notice this is the internal id in Catalog
:param read: True/False - If True the user could read the file
:param write: True/False - If True the user could write the file
:param delete: True/False - If True the user could delete the file
:param unshare: True/False - If True the file will be unshared for this user
"""
return self.general_method("samples", "share", item_id1=fileId, unshare=str(unshare).lower(), userId=userId,
read=str(read).lower(), write=str(write).lower(), delete=str(delete).lower(),
**options)
class Individuals(WS):
def create(self, studyId, name, family, fatherId, motherId, gender, **options):
"""
method for creating an individual
:param studyId: studyid
:param name: name of individual
:param family: specify 0 if no family
:param fatherId: specify as 0 if no fatherid
:param motherId: specify as 0 if no motehrid
:param gender: MALE, FEMALE or UNKNOWN
"""
if fatherId is None:
fatherId = "0"
if motherId is None:
motherId = "0"
gender = gender.upper()
if gender != "MALE" and gender != "FEMALE":
gender = "UNKNOWN"
return self.general_method(ws_category1="individuals", action="create", name=name, family=family, fatherId=fatherId,
motherId=motherId, gender=gender, studyId=studyId, **options)
def search(self, studyId, **options):
"""
:param studyId:
"""
return self.general_method(ws_category1="individuals", action="search", studyId=studyId, **options)
def info(self, individualId, **options):
"""
method to get individual information
:param individualId:
"""
return self.general_method(ws_category1="individuals", action="info", item_id1=individualId, **options)
def delete(self, individualId, **options):
"""
method to delete an individual
:param individualId:
:param sid:
:return:
"""
return self.general_method(ws_category1="individuals", action="delete", item_id1=individualId, **options)
def annotate(self, individual_id, variableSetName, annotationSetName, studyId, json_file=None, data=None,
update=True):
"""
This annotate a individual using a json file (this is a post method)
:param individual_name:
:param variable_set_id:
:param json_file:
"""
if data is None and json_file is None:
raise Exception("please provide a json file or a data")
if data is None:
fd = open(json_file)
data = json.load(fd)
fd.close()
variable = Variables()
variableSetId = str(variable.search(studyId=studyId, name=variableSetName)[0]["id"])
if update:
for annt_set in self.info(str(individual_id))[0]["annotationSets"]:
if annt_set["variableSetId"] == int(variableSetId):
annotationSetName = annt_set["name"]
return self.general_method(ws_category1="individuals", action="update",
item_id1=str(individual_id), ws_category2="annotationSets",
item_id2=annotationSetName, data=data)
annotationSetName = annotationSetName + "_" + str(datetime.datetime.now()).replace(" ", "_").replace(":", "_")
return self.general_method(ws_category1="individuals", action="create", item_id1=str(individual_id),
ws_category2="annotationSets", variableSetId=variableSetId,
annotateSetName=annotationSetName, data=data)
def search_by_annotation(self, studyId, variableSetName, *queries, **options):
"""
:param queries: A list of queries for the annotation sets, <VariableId>:<operator><value>,<value>...
(the queries are built as ANDs and the list of values as ORs). Examples: NAME:=Luke,Leia,Vader AGE:>20
:param studyId:
"""
variable = Variables()
v_id = variable.search(studyId=studyId, name=variableSetName)[0]["id"]
return self.search(studyId=studyId, variableSetId=str(v_id), annotation=";".join(queries), **options)
class Projects(WS):
"""
This class contains method for projects ws (i.e, create, files, info)
"""
def create(self, userId, name, alias, description, organization, **options):
"""
:param userId:
:param name:
:param alias:
:param description:
:param organization:
"""
return self.general_method(ws_category1="projects", action="create", userId=userId, name=name, alias=alias,
description=description, organization=organization, **options
)
def info(self, projectId, **options):
"""
method to get project information
:param projectId:
"""
return self.general_method(ws_category1="projects", action="info", item_id1=projectId, **options)
def update(self, projectId, **options):
"""
updates a project
:param name:
:param description:
:param organization:
:param status:
:param attributes:
:param projectId:
:param sid:
"""
return self.general_method(ws_category1="projects", action="update", item_id1=projectId, **options)
def delete(self, projectId, **options):
"""
deletes a project
:param projectId:
"""
return self.general_method(ws_category1="projects", action="delete", item_id1=projectId, **options)
def studies(self, projectId, **options):
"""
Returns information on studies contained in the project
:param projectId:
"""
return self.general_method(ws_category1="projects", action="studies", item_id1=projectId, **options)
class Studies(WS):
"""
This class contains method for studies ws (i.e, status, files, info)
"""
def search(self, **options):
"""
Method to search studies based in a dictionary "options"
:param options: Kargs where the keys are the name of the file properties used to search.
"""
return self.general_method(ws_category1="studies", action="search", **options)
def create(self, projectId, name, alias, **options):
"""
:param projectId:
:param name:
:param alias:
"""
return self.general_method(ws_category1="studies", action="create", projectId=projectId, name=name,
alias=alias, **options)
def info(self, studyId, **options):
"""
method to get study info
:param studyId:
"""
return self.general_method(ws_category1="studies", action="info", item_id1=studyId, **options)
def files(self, studyId, **options):
"""
method to get study files
:param studyId:
"""
return self.general_method(ws_category1="studies", action="files", item_id1=studyId, **options)
def jobs(self, studyId, **options):
"""
method to get study jobs
:param studyId:
"""
return self.general_method(ws_category1="studies", action="jobs", item_id1=studyId, **options)
def samples(self, studyId, **options):
"""
method to get study samples
:param studyId:
"""
return self.general_method(ws_category1="studies", action="samples", item_id1=studyId, **options)
def variants(self, studyId, **filters):
"""
method to get study variants
:param filters: All the filters will be applied to the variant fetch
:param studyId: StudyID
"""
return self.general_method(ws_category1="studies", action="variants", item_id1=studyId, **filters)
def alignments(self, studyId, **filters):
"""
method to get study alignments
:param filters: All the filters will be applied to the variant fetch
:param studyId: StudyID
"""
return self.general_method(ws_category1="studies", action="alignments", item_id1=studyId, **filters)
def status(self, studyId, **options):
"""
method to get study status
:param studyId:
"""
return self.general_method(ws_category1="studies", action="status", item_id1=studyId, **options)
def update(self, projectId, **options):
"""
updates a study
:param projectId:
:param options:
"""
return self.general_method(ws_category1="studies", action="update", item_id1=projectId, **options)
def delete(self, studyId, **options):
"""
method to get delete study
:param studyId:
"""
return self.general_method(ws_category1="studies", action="delete", item_id1=studyId, **options)
class Jobs(WS):
"""
This class contains method for jobs ws (i.e, create, info)
"""
def create(self, studyId, name, toolId, jobId, **options):
"""
create a job
:param studyId:
:param name:
:param toolId:
"""
return self.general_method(ws_category1="jobs", action="create", name=name, studyId=studyId,
toolId=toolId, jobId=jobId, **options)
def info(self, jobId, **options):
"""
delete a job
:param jobId:
"""
return self.general_method(ws_category1="jobs", action="info", item_id1=jobId, **options)
def visit(self, jobId, **options):
"""
visit a job
:param jobId:
"""
return self.general_method(ws_category1="jobs", action="visit", item_id1=jobId, **options)
def delete(self, jobId, **options):
"""
delete a job
:param jobId:
"""
return self.general_method(ws_category1="jobs", action="delete", item_id1=jobId, **options)
def create_post(self, studyId, json_file=None, data=None, **options):
"""
post method - so needs a json input file or data
:return:
"""
if data is None and json_file is None:
raise Exception("please provide a json file or a data")
if data is None:
fd = open(json_file)
data = json.load(fd)
fd.close()
return self.general_method(ws_category1="jobs", action="create", studyId=studyId, data=data, **options)
# class Cohorts(WS):
# """
# This class contains method for cohorts ws (i.e, link, create)
# """
# # TODO: Check this method 2 variables unused
# def create(self, studyId, name, variableSetId, type, sampleIds):
# """
#
# :param studyId:
# :param sid:
# :param name:
# :param variableSetId:
# :param type: can be PAIRED, CASE_CONTROL, CASE_SET, CONTROL_SET, PAIRED_TUMOR, FAMILY, TRIO, COLLECTION
# :param sampleIds:
# """
#
# url = os.path.join(self.pre_url, "cohorts",
# "create?sid=" + self.session_id + "&name=" + name + "&type=" + type + "&sampleIds=" + sampleIds)
# result = self.run_ws(url)
# return result["id"]
#
# def samples(self, cohortId):
# """
#
# method to get samples that are part of a cohort
#
# :param cohortId:
# :return: full result which can be looped over
# """
#
# url = os.path.join(self.pre_url, "cohorts", cohortId, "samples?sid=" + self.session_id)
# result = self.run_ws(url)
# return result
#
# def update(self, cohortId, sampleIds):
# """
# This will be to add or remove samples from a cohort
#
# :param cohortId:
# :param sampleIds: full list of sampleIds, i.e. old ones and new ones (comma separated)
# """
#
# url = os.path.join(self.pre_url, "cohorts", cohortId, "update?sid=" + self.session_id + "&samples=" + sampleIds)
# result = self.run_ws(url)
# return result
#
# def info(self, cohortId):
# """
# This will be to add or remove samples from a cohort
#
# :param cohortId:
#
# """
#
# url = os.path.join(self.pre_url, "cohorts", cohortId, "info?sid=" + self.session_id)
# result = self.run_ws(url)
# return result
#
# def delete(self, cohortId):
# """
#
# method to get samples that are part of a cohort
#
# :param cohortId:
# :return: full result which can be looped over
# """
#
# url = os.path.join(self.pre_url, "cohorts", cohortId, "delete?sid=" + self.session_id)
# result = self.run_ws(url)
# return result
|
"""
github3.repos
=============
This module contains the classes relating to repositories.
"""
from json import dumps
from base64 import b64decode
from collections import Callable
from github3.events import Event
from github3.issues import Issue, IssueEvent, Label, Milestone, issue_params
from github3.git import Blob, Commit, Reference, Tag, Tree
from github3.models import GitHubObject, GitHubCore, BaseComment, BaseCommit
from github3.pulls import PullRequest
from github3.users import User, Key
from github3.decorators import requires_auth
from github3.notifications import Subscription, Thread
class Repository(GitHubCore):
"""The :class:`Repository <Repository>` object. It represents how GitHub
sends information about repositories.
"""
def __init__(self, repo, session=None):
super(Repository, self).__init__(repo, session)
#: URL used to clone via HTTPS.
self.clone_url = repo.get('clone_url', '')
#: ``datetime`` object representing when the Repository was created.
self.created_at = self._strptime(repo.get('created_at'))
#: Description of the repository.
self.description = repo.get('description', '')
# The number of forks
#: The number of forks made of this repository.
self.forks = repo.get('forks', 0)
#: Is this repository a fork?
self.fork = repo.get('fork')
#: Full name as login/name
self.full_name = repo.get('full_name', '')
# Clone url using git, e.g. git://github.com/sigmavirus24/github3.py
#: Plain git url for an anonymous clone.
self.git_url = repo.get('git_url', '')
#: Whether or not this repository has downloads enabled
self.has_downloads = repo.get('has_downloads')
#: Whether or not this repository has an issue tracker
self.has_issues = repo.get('has_issues')
#: Whether or not this repository has the wiki enabled
self.has_wiki = repo.get('has_wiki')
# e.g. https://sigmavirus24.github.com/github3.py
#: URL of the home page for the project.
self.homepage = repo.get('homepage', '')
# e.g. https://github.com/sigmavirus24/github3.py
#: URL of the project at GitHub.
self.html_url = repo.get('html_url', '')
#: Unique id of the repository.
self.id = repo.get('id', 0)
#: Language property.
self.language = repo.get('language', '')
#: Mirror property.
self.mirror_url = repo.get('mirror_url', '')
# Repository name, e.g. github3.py
#: Name of the repository.
self.name = repo.get('name', '')
# Number of open issues
#: Number of open issues on the repository.
self.open_issues = repo.get('open_issues', 0)
# Repository owner's name
#: :class:`User <github3.users.User>` object representing the
# repository owner.
self.owner = User(repo.get('owner', {}), self._session)
#: Is this repository private?
self.private = repo.get('private')
#: ``datetime`` object representing the last time commits were pushed
# to the repository.
self.pushed_at = self._strptime(repo.get('pushed_at'))
#: Size of the repository.
self.size = repo.get('size', 0)
# SSH url e.g. git@github.com/sigmavirus24/github3.py
#: URL to clone the repository via SSH.
self.ssh_url = repo.get('ssh_url', '')
#: If it exists, url to clone the repository via SVN.
self.svn_url = repo.get('svn_url', '')
#: ``datetime`` object representing the last time the repository was
# updated.
self.updated_at = self._strptime(repo.get('updated_at'))
self._api = repo.get('url', '')
# The number of watchers
#: Number of users watching the repository.
self.watchers = repo.get('watchers', 0)
#: Parent of this fork, if it exists :class;`Repository`
self.source = repo.get('source')
if self.source:
self.source = Repository(self.source, self)
#: Parent of this fork, if it exists :class:`Repository`
self.parent = repo.get('parent')
if self.parent:
self.parent = Repository(self.parent, self)
#: default branch for the repository
self.master_branch = repo.get('master_branch', '')
def __repr__(self):
return '<Repository [{0}]>'.format(self)
def __str__(self):
return self.full_name
def _update_(self, repo):
self.__init__(repo, self._session)
def _create_pull(self, data):
self._remove_none(data)
json = None
if data:
url = self._build_url('pulls', base_url=self._api)
json = self._json(self._post(url, data=dumps(data)), 201)
return PullRequest(json, self._session) if json else None
@requires_auth
def add_collaborator(self, login):
"""Add ``login`` as a collaborator to a repository.
:param str login: (required), login of the user
:returns: bool -- True if successful, False otherwise
"""
resp = False
if login:
url = self._build_url('collaborators', login, base_url=self._api)
resp = self._boolean(self._put(url), 204, 404)
return resp
def archive(self, format, path='', ref='master'):
"""Get the tarball or zipball archive for this repo at ref.
:param str format: (required), accepted values: ('tarball',
'zipball')
:param path: (optional), path where the file should be saved
to, default is the filename provided in the headers and will be
written in the current directory.
it can take a file-like object as well
:type path: str, file
:param str ref: (optional)
:returns: bool -- True if successful, False otherwise
"""
resp = None
written = False
if format in ('tarball', 'zipball'):
url = self._build_url(format, ref, base_url=self._api)
resp = self._get(url, allow_redirects=True, stream=True)
pre_opened = False
if resp and self._boolean(resp, 200, 404):
fd = None
if path:
if isinstance(getattr(path, 'write', None), Callable):
pre_opened = True
fd = path
else:
fd = open(path, 'wb')
else:
header = resp.headers['content-disposition']
i = header.find('filename=') + len('filename=')
fd = open(header[i:], 'wb')
for chunk in resp.iter_content():
fd.write(chunk)
if not pre_opened:
fd.close()
written = True
return written
def blob(self, sha):
"""Get the blob indicated by ``sha``.
:param str sha: (required), sha of the blob
:returns: :class:`Blob <github3.git.Blob>` if successful, otherwise
None
"""
url = self._build_url('git', 'blobs', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Blob(json) if json else None
def branch(self, name):
"""Get the branch ``name`` of this repository.
:param str name: (required), branch name
:type name: str
:returns: :class:`Branch <Branch>`
"""
json = None
if name:
url = self._build_url('branches', name, base_url=self._api)
json = self._json(self._get(url), 200)
return Branch(json, self) if json else None
def commit(self, sha):
"""Get a single (repo) commit. See :func:`git_commit` for the Git Data
Commit.
:param str sha: (required), sha of the commit
:returns: :class:`RepoCommit <RepoCommit>` if successful, otherwise
None
"""
url = self._build_url('commits', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return RepoCommit(json, self) if json else None
def commit_comment(self, comment_id):
"""Get a single commit comment.
:param int comment_id: (required), id of the comment used by GitHub
:returns: :class:`RepoComment <RepoComment>` if successful, otherwise
None
"""
url = self._build_url('comments', str(comment_id), base_url=self._api)
json = self._json(self._get(url), 200)
return RepoComment(json, self) if json else None
def compare_commits(self, base, head):
"""Compare two commits.
:param str base: (required), base for the comparison
:param str head: (required), compare this against base
:returns: :class:`Comparison <Comparison>` if successful, else None
"""
url = self._build_url('compare', base + '...' + head,
base_url=self._api)
json = self._json(self._get(url), 200)
return Comparison(json) if json else None
def contents(self, path):
"""Get the contents of the file pointed to by ``path``.
:param str path: (required), path to file, e.g.
github3/repo.py
:returns: :class:`Contents <Contents>` if successful, else None
"""
url = self._build_url('contents', path, base_url=self._api)
resp = self._get(url)
if self._boolean(resp, 200, 404):
return Contents(self._json(resp, 200))
else:
return None
@requires_auth
def create_blob(self, content, encoding):
"""Create a blob with ``content``.
:param str content: (required), content of the blob
:param str encoding: (required), ('base64', 'utf-8')
:returns: string of the SHA returned
"""
sha = ''
if encoding in ('base64', 'utf-8') and content:
url = self._build_url('git', 'blobs', base_url=self._api)
data = {'content': content, 'encoding': encoding}
json = self._json(self._post(url, data=dumps(data)), 201)
if json:
sha = json.get('sha')
return sha
@requires_auth
def create_comment(self, body, sha, path='', position=1, line=1):
"""Create a comment on a commit.
:param str body: (required), body of the message
:param str sha: (required), commit id
:param str path: (optional), relative path of the file to comment
on
:param str position: (optional), line index in the diff to comment on
:param int line: (optional), line number of the file to comment on,
default: 1
:returns: :class:`RepoComment <RepoComment>` if successful else None
"""
line = int(line)
position = int(position)
json = None
if body and sha and line > 0:
data = {'body': body, 'commit_id': sha, 'line': line,
'path': path, 'position': position}
url = self._build_url('commits', sha, 'comments',
base_url=self._api)
json = self._json(self._post(url, data=dumps(data)), 201)
return RepoComment(json, self) if json else None
@requires_auth
def create_commit(self, message, tree, parents, author={}, committer={}):
"""Create a commit on this repository.
:param str message: (required), commit message
:param str tree: (required), SHA of the tree object this
commit points to
:param list parents: (required), SHAs of the commits that were parents
of this commit. If empty, the commit will be written as the root
commit. Even if there is only one parent, this should be an
array.
:param dict author: (optional), if omitted, GitHub will
use the authenticated user's credentials and the current
time. Format: {'name': 'Committer Name', 'email':
'name@example.com', 'date': 'YYYY-MM-DDTHH:MM:SS+HH:00'}
:param dict committer: (optional), if ommitted, GitHub will use the
author parameters. Should be the same format as the author
parameter.
:returns: :class:`Commit <github3.git.Commit>` if successful, else
None
"""
json = None
if message and tree and isinstance(parents, list):
url = self._build_url('git', 'commits', base_url=self._api)
data = {'message': message, 'tree': tree, 'parents': parents,
'author': author, 'committer': committer}
json = self._json(self._post(url, data=dumps(data)), 201)
return Commit(json, self) if json else None
@requires_auth
def create_fork(self, organization=None):
"""Create a fork of this repository.
:param str organization: (required), login for organization to create
the fork under
:returns: :class:`Repository <Repository>` if successful, else None
"""
url = self._build_url('forks', base_url=self._api)
if organization:
resp = self._post(url, data=dumps({'organization': organization}))
else:
resp = self._post(url)
json = self._json(resp, 202)
return Repository(json, self) if json else None
@requires_auth
def create_hook(self, name, config, events=['push'], active=True):
"""Create a hook on this repository.
:param str name: (required), name of the hook
:param dict config: (required), key-value pairs which act as settings
for this hook
:param list events: (optional), events the hook is triggered for
:param bool active: (optional), whether the hook is actually
triggered
:returns: :class:`Hook <Hook>` if successful, else None
"""
json = None
if name and config and isinstance(config, dict):
url = self._build_url('hooks', base_url=self._api)
data = {'name': name, 'config': config, 'events': events,
'active': active}
json = self._json(self._post(url, data=dumps(data)), 201)
return Hook(json, self) if json else None
@requires_auth
def create_issue(self,
title,
body=None,
assignee=None,
milestone=None,
labels=None):
"""Creates an issue on this repository.
:param str title: (required), title of the issue
:param str body: (optional), body of the issue
:param str assignee: (optional), login of the user to assign the
issue to
:param int milestone: (optional), number of the milestone to attribute
this issue to (e.g. ``m`` is a Milestone object, ``m.number`` is
what you pass here.)
:param labels: (optional), labels to apply to this
issue
:type labels: list of strings
:returns: :class:`Issue <github3.issues.Issue>` if successful, else
None
"""
issue = {'title': title, 'body': body, 'assignee': assignee,
'milestone': milestone, 'labels': labels}
self._remove_none(issue)
json = None
if issue:
url = self._build_url('issues', base_url=self._api)
json = self._json(self._post(url, data=dumps(issue)), 201)
return Issue(json, self) if json else None
@requires_auth
def create_key(self, title, key):
"""Create a deploy key.
:param str title: (required), title of key
:param str key: (required), key text
:returns: :class:`Key <github3.users.Key>` if successful, else None
"""
json = None
if title and key:
data = {'title': title, 'key': key}
url = self._build_url('keys', base_url=self._api)
json = self._json(self._post(url, data=dumps(data)), 201)
return Key(json, self) if json else None
@requires_auth
def create_label(self, name, color):
"""Create a label for this repository.
:param str name: (required), name to give to the label
:param str color: (required), value of the color to assign to the
label
:returns: :class:`Label <github3.issues.Label>` if successful, else
None
"""
json = None
if name and color:
data = {'name': name, 'color': color.strip('#')}
url = self._build_url('labels', base_url=self._api)
json = self._json(self._post(url, data=dumps(data)), 201)
return Label(json, self) if json else None
@requires_auth
def create_milestone(self, title, state=None, description=None,
due_on=None):
"""Create a milestone for this repository.
:param str title: (required), title of the milestone
:param str state: (optional), state of the milestone, accepted
values: ('open', 'closed'), default: 'open'
:param str description: (optional), description of the milestone
:param str due_on: (optional), ISO 8601 formatted due date
:returns: :class:`Milestone <github3.issues.Milestone>` if successful,
else None
"""
url = self._build_url('milestones', base_url=self._api)
if state not in ('open', 'closed'):
state = None
data = {'title': title, 'state': state,
'description': description, 'due_on': due_on}
self._remove_none(data)
json = None
if data:
json = self._json(self._post(url, data=dumps(data)), 201)
return Milestone(json, self) if json else None
@requires_auth
def create_pull(self, title, base, head, body=None):
"""Create a pull request using commits from ``head`` and comparing
against ``base``.
:param str title: (required)
:param str base: (required), e.g., 'username:branch', or a sha
:param str head: (required), e.g., 'master', or a sha
:param str body: (optional), markdown formatted description
:returns: :class:`PullRequest <github3.pulls.PullRequest>` if
successful, else None
"""
data = {'title': title, 'body': body, 'base': base,
'head': head}
return self._create_pull(data)
@requires_auth
def create_pull_from_issue(self, issue, base, head):
"""Create a pull request from issue #``issue``.
:param int issue: (required), issue number
:param str base: (required), e.g., 'username:branch', or a sha
:param str head: (required), e.g., 'master', or a sha
:returns: :class:`PullRequest <github3.pulls.PullRequest>` if
successful, else None
"""
if issue > 0:
data = {'issue': issue, 'base': base, 'head': head}
return self._create_pull(data)
return None
@requires_auth
def create_ref(self, ref, sha):
"""Create a reference in this repository.
:param str ref: (required), fully qualified name of the reference,
e.g. ``refs/heads/master``. If it doesn't start with ``refs`` and
contain at least two slashes, GitHub's API will reject it.
:param str sha: (required), SHA1 value to set the reference to
:returns: :class:`Reference <github3.git.Reference>` if successful
else None
"""
json = None
if ref and ref.count('/') >= 2 and sha:
data = {'ref': ref, 'sha': sha}
url = self._build_url('git', 'refs', base_url=self._api)
json = self._json(self._post(url, data=dumps(data)), 201)
return Reference(json, self) if json else None
@requires_auth
def create_status(self, sha, state, target_url='', description=''):
"""Create a status object on a commit.
:param str sha: (required), SHA of the commit to create the status on
:param str state: (required), state of the test; only the following
are accepted: 'pending', 'success', 'error', 'failure'
:param str target_url: (optional), URL to associate with this status.
:param str description: (optional), short description of the status
"""
json = {}
if sha and state:
data = {'state': state, 'target_url': target_url,
'description': description}
url = self._build_url('statuses', sha, base_url=self._api)
json = self._json(self._post(url, data=dumps(data)), 201)
return Status(json) if json else None
@requires_auth
def create_tag(self, tag, message, sha, obj_type, tagger,
lightweight=False):
"""Create a tag in this repository.
:param str tag: (required), name of the tag
:param str message: (required), tag message
:param str sha: (required), SHA of the git object this is tagging
:param str obj_type: (required), type of object being tagged, e.g.,
'commit', 'tree', 'blob'
:param dict tagger: (required), containing the name, email of the
tagger and the date it was tagged
:param bool lightweight: (optional), if False, create an annotated
tag, otherwise create a lightweight tag (a Reference).
:returns: If lightweight == False: :class:`Tag <github3.git.Tag>` if
successful, else None. If lightweight == True: :class:`Reference
<Reference>`
"""
if lightweight and tag and sha:
return self.create_ref('refs/tags/' + tag, sha)
json = None
if tag and message and sha and obj_type and len(tagger) == 3:
data = {'tag': tag, 'message': message, 'object': sha,
'type': obj_type, 'tagger': tagger}
url = self._build_url('git', 'tags', base_url=self._api)
json = self._json(self._post(url, data=dumps(data)), 201)
if json:
self.create_ref('refs/tags/' + tag, sha)
return Tag(json) if json else None
@requires_auth
def create_tree(self, tree, base_tree=''):
"""Create a tree on this repository.
:param list tree: (required), specifies the tree structure.
Format: [{'path': 'path/file', 'mode':
'filemode', 'type': 'blob or tree', 'sha': '44bfc6d...'}]
:param str base_tree: (optional), SHA1 of the tree you want
to update with new data
:returns: :class:`Tree <github3.git.Tree>` if successful, else None
"""
json = None
if tree and isinstance(tree, list):
data = {'tree': tree, 'base_tree': base_tree}
url = self._build_url('git', 'trees', base_url=self._api)
json = self._json(self._post(url, data=dumps(data)), 201)
return Tree(json) if json else None
@requires_auth
def delete(self):
"""Delete this repository.
:returns: bool -- True if successful, False otherwise
"""
return self._boolean(self._delete(self._api), 204, 404)
@requires_auth
def delete_key(self, key_id):
"""Delete the key with the specified id from your deploy keys list.
:returns: bool -- True if successful, False otherwise
"""
if int(key_id) <= 0:
return False
url = self._build_url('keys', str(key_id), base_url=self._api)
return self._boolean(self._delete(url), 204, 404)
def download(self, id_num):
"""Get a single download object by its id.
.. warning::
On 2012-03-11, GitHub will be deprecating the Downloads API. This
method will no longer work.
:param int id_num: (required), id of the download
:returns: :class:`Download <Download>` if successful, else None
"""
json = None
if int(id_num) > 0:
url = self._build_url('downloads', str(id_num),
base_url=self._api)
json = self._json(self._get(url), 200)
return Download(json, self) if json else None
@requires_auth
def edit(self,
name,
description=None,
homepage=None,
private=None,
has_issues=None,
has_wiki=None,
has_downloads=None,
default_branch=None):
"""Edit this repository.
:param str name: (required), name of the repository
:param str description: (optional), If not ``None``, change the
description for this repository. API default: ``None`` - leave
value unchanged.
:param str homepage: (optional), If not ``None``, change the homepage
for this repository. API default: ``None`` - leave value unchanged.
:param bool private: (optional), If ``True``, make the repository
private. If ``False``, make the repository public. API default:
``None`` - leave value unchanged.
:param bool has_issues: (optional), If ``True``, enable issues for
this repository. If ``False``, disable issues for this repository.
API default: ``None`` - leave value unchanged.
:param bool has_wiki: (optional), If ``True``, enable the wiki for
this repository. If ``False``, disable the wiki for this
repository. API default: ``None`` - leave value unchanged.
:param bool has_downloads: (optional), If ``True``, enable downloads
for this repository. If ``False``, disable downloads for this
repository. API default: ``None`` - leave value unchanged.
:param str default_branch: (optional), If not ``None``, change the
default branch for this repository. API default: ``None`` - leave
value unchanged.
:returns: bool -- True if successful, False otherwise
"""
edit = {'name': name, 'description': description, 'homepage': homepage,
'private': private, 'has_issues': has_issues,
'has_wiki': has_wiki, 'has_downloads': has_downloads,
'default_branch': default_branch}
self._remove_none(edit)
json = None
if edit:
json = self._json(self._patch(self._api, data=dumps(edit)), 200)
self._update_(json)
return True
return False
def is_collaborator(self, login):
"""Check to see if ``login`` is a collaborator on this repository.
:param str login: (required), login for the user
:returns: bool -- True if successful, False otherwise
"""
if login:
url = self._build_url('collaborators', login, base_url=self._api)
return self._boolean(self._get(url), 204, 404)
return False
def git_commit(self, sha):
"""Get a single (git) commit.
:param str sha: (required), sha of the commit
:returns: :class:`Commit <github3.git.Commit>` if successful,
otherwise None
"""
json = {}
if sha:
url = self._build_url('git', 'commits', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Commit(json, self) if json else None
@requires_auth
def hook(self, id_num):
"""Get a single hook.
:param int id_num: (required), id of the hook
:returns: :class:`Hook <Hook>` if successful, else None
"""
json = None
if int(id_num) > 0:
url = self._build_url('hooks', str(id_num), base_url=self._api)
json = self._json(self._get(url), 200)
return Hook(json, self) if json else None
def is_assignee(self, login):
"""Check if the user is a possible assignee for an issue on this
repository.
:returns: :class:`bool`
"""
if not login:
return False
url = self._build_url('assignees', login, base_url=self._api)
return self._boolean(self._get(url), 204, 404)
def issue(self, number):
"""Get the issue specified by ``number``.
:param int number: (required), number of the issue on this repository
:returns: :class:`Issue <github3.issues.Issue>` if successful, else
None
"""
json = None
if int(number) > 0:
url = self._build_url('issues', str(number), base_url=self._api)
json = self._json(self._get(url), 200)
return Issue(json, self) if json else None
@requires_auth
def key(self, id_num):
"""Get the specified deploy key.
:param int id_num: (required), id of the key
:returns: :class:`Key <Key>` if successful, else None
"""
json = None
if int(id_num) > 0:
url = self._build_url('keys', str(id_num), base_url=self._api)
json = self._json(self._get(url), 200)
return Key(json, self) if json else None
def label(self, name):
"""Get the label specified by ``name``
:param str name: (required), name of the label
:returns: :class:`Label <github3.issues.Label>` if successful, else
None
"""
json = None
if name:
url = self._build_url('labels', name, base_url=self._api)
json = self._json(self._get(url), 200)
return Label(json, self) if json else None
def iter_assignees(self, number=-1):
"""Iterate over all available assignees to which an issue may be
assigned.
:param int number: (optional), number of assignees to return. Default:
-1 returns all available assignees
:returns: list of :class:`User <github3.users.User>`\ s
"""
url = self._build_url('assignees', base_url=self._api)
return self._iter(int(number), url, User)
def iter_branches(self, number=-1):
"""Iterate over the branches in this repository.
:param int number: (optional), number of branches to return. Default:
-1 returns all branches
:returns: list of :class:`Branch <Branch>`\ es
"""
url = self._build_url('branches', base_url=self._api)
return self._iter(int(number), url, Branch)
def iter_comments(self, number=-1):
"""Iterate over comments on all commits in the repository.
:param int number: (optional), number of comments to return. Default:
-1 returns all comments
:returns: list of :class:`RepoComment <RepoComment>`\ s
"""
url = self._build_url('comments', base_url=self._api)
return self._iter(int(number), url, RepoComment)
def iter_comments_on_commit(self, sha, number=1):
"""Iterate over comments for a single commit.
:param sha: (required), sha of the commit to list comments on
:type sha: str
:param int number: (optional), number of comments to return. Default:
-1 returns all comments
:returns: list of :class:`RepoComment <RepoComment>`\ s
"""
url = self._build_url('commits', sha, 'comments', base_url=self._api)
return self._iter(int(number), url, RepoComment)
def iter_commits(self, sha=None, path=None, author=None, number=-1):
"""Iterate over commits in this repository.
:param str sha: (optional), sha or branch to start listing commits
from
:param str path: (optional), commits containing this path will be
listed
:param str author: (optional), GitHub login, real name, or email to
filter commits by (using commit author)
:param int number: (optional), number of comments to return. Default:
-1 returns all comments
:returns: list of :class:`RepoCommit <RepoCommit>`\ s
"""
params = {'sha': sha, 'path': path, 'author': author}
self._remove_none(params)
url = self._build_url('commits', base_url=self._api)
return self._iter(int(number), url, RepoCommit, params=params)
def iter_contributors(self, anon=False, number=-1):
"""Iterate over the contributors to this repository.
:param bool anon: (optional), True lists anonymous contributors as
well
:param int number: (optional), number of contributors to return.
Default: -1 returns all contributors
:returns: list of :class:`User <github3.users.User>`\ s
"""
url = self._build_url('contributors', base_url=self._api)
params = {}
if anon:
params = {'anon': True}
return self._iter(int(number), url, User, params=params)
def iter_downloads(self, number=-1):
"""Iterate over available downloads for this repository.
.. warning::
On 2012-03-11, GitHub will be deprecating the Downloads API. This
method will no longer work.
:param int number: (optional), number of downloads to return. Default:
-1 returns all available downloads
:returns: list of :class:`Download <Download>`\ s
"""
url = self._build_url('downloads', base_url=self._api)
return self._iter(int(number), url, Download)
def iter_events(self, number=-1):
"""Iterate over events on this repository.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:returns: list of :class:`Event <github3.events.Event>`\ s
"""
url = self._build_url('events', base_url=self._api)
return self._iter(int(number), url, Event)
def iter_forks(self, sort='', number=-1):
"""Iterate over forks of this repository.
:param str sort: (optional), accepted values:
('newest', 'oldest', 'watchers'), API default: 'newest'
:param int number: (optional), number of forks to return. Default: -1
returns all forks
:returns: list of :class:`Repository <Repository>`
"""
url = self._build_url('forks', base_url=self._api)
params = {}
if sort in ('newest', 'oldest', 'watchers'):
params = {'sort': sort}
return self._iter(int(number), url, Repository, params=params)
@requires_auth
def iter_hooks(self, number=-1):
"""Iterate over hooks registered on this repository.
:param int number: (optional), number of hoks to return. Default: -1
returns all hooks
:returns: list of :class:`Hook <Hook>`\ s
"""
url = self._build_url('hooks', base_url=self._api)
return self._iter(int(number), url, Hook)
def iter_issues(self,
milestone=None,
state=None,
assignee=None,
mentioned=None,
labels=None,
sort=None,
direction=None,
since=None,
number=-1):
"""Iterate over issues on this repo based upon parameters passed.
:param int milestone: (optional), 'none', or '*'
:param str state: (optional), accepted values: ('open', 'closed')
:param str assignee: (optional), 'none', '*', or login name
:param str mentioned: (optional), user's login name
:param str labels: (optional), comma-separated list of labels, e.g.
'bug,ui,@high' :param sort: accepted values:
('created', 'updated', 'comments', 'created')
:param str direction: (optional), accepted values: ('asc', 'desc')
:param str since: (optional), ISO 8601 format: YYYY-MM-DDTHH:MM:SSZ
:param int number: (optional), Number of issues to return.
By default all issues are returned
:returns: list of :class:`Issue <github3.issues.Issue>`\ s
"""
url = self._build_url('issues', base_url=self._api)
params = {'assignee': assignee, 'mentioned': mentioned}
if milestone in ('*', 'none') or isinstance(milestone, int):
params['milestone'] = milestone
self._remove_none(params)
params.update(issue_params(None, state, labels, sort, direction,
since)) # nopep8
return self._iter(int(number), url, Issue, params=params)
def iter_issue_events(self, number=-1):
"""Iterates over issue events on this repository.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:returns: generator of
:class:`IssueEvent <github3.issues.IssueEvent>`\ s
"""
url = self._build_url('issues', 'events', base_url=self._api)
return self._iter(int(number), url, IssueEvent)
@requires_auth
def iter_keys(self, number=-1):
"""Iterates over deploy keys on this repository.
:param int number: (optional), number of keys to return. Default: -1
returns all available keys
:returns: generator of :class:`Key <github3.users.Key>`\ s
"""
url = self._build_url('keys', base_url=self._api)
return self._iter(int(number), url, Key)
def iter_labels(self, number=-1):
"""Iterates over labels on this repository.
:param int number: (optional), number of labels to return. Default: -1
returns all available labels
:returns: generator of :class:`Label <github3.issues.Label>`\ s
"""
url = self._build_url('labels', base_url=self._api)
return self._iter(int(number), url, Label)
def iter_languages(self, number=-1):
"""Iterate over the programming languages used in the repository.
:param int number: (optional), number of languages to return. Default:
-1 returns all used languages
:returns: list of tuples
"""
url = self._build_url('languages', base_url=self._api)
return self._iter(int(number), url, tuple)
def iter_milestones(self, state=None, sort=None, direction=None,
number=-1):
"""Iterates over the milestones on this repository.
:param str state: (optional), state of the milestones, accepted
values: ('open', 'closed')
:param str sort: (optional), how to sort the milestones, accepted
values: ('due_date', 'completeness')
:param str direction: (optional), direction to sort the milestones,
accepted values: ('asc', 'desc')
:param int number: (optional), number of milestones to return.
Default: -1 returns all milestones
:returns: generator of
:class:`Milestone <github3.issues.Milestone>`\ s
"""
url = self._build_url('milestones', base_url=self._api)
accepted = {'state': ('open', 'closed'),
'sort': ('due_date', 'completeness'),
'direction': ('asc', 'desc')}
params = {'state': state, 'sort': sort, 'direction': direction}
for (k, v) in list(params.items()):
if not (v and (v in accepted[k])): # e.g., '' or None
del params[k]
if not params:
params = None
return self._iter(int(number), url, Milestone, params)
def iter_network_events(self, number=-1):
"""Iterates over events on a network of repositories.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:returns: generator of :class:`Event <github3.events.Event>`\ s
"""
base = self._api.replace('repos', 'networks', 1)
url = self._build_url('events', base_url=base)
return self._iter(int(number), url, Event)
def iter_notifications(self, all=False, participating=False, since='',
number=-1):
"""Iterates over the notifications for this repository.
:param bool all: (optional), show all notifications, including ones
marked as read
:param bool participating: (optional), show only the notifications the
user is participating in directly
:param str since: (optional), filters out any notifications updated
before the given time. The time should be passed in as UTC in the
ISO 8601 format: ``YYYY-MM-DDTHH:MM:SSZ``. Example:
"2012-10-09T23:39:01Z".
:returns: generator of :class:`Thread <github3.notifications.Thread>`
"""
url = self._build_url('notifications', base_url=self._api)
params = {'all': all, 'participating': participating, 'since': since}
for (k, v) in list(params.items()):
if not v:
del params[k]
return self._iter(int(number), url, Thread, params=params)
def iter_pulls(self, state=None, number=-1):
"""List pull requests on repository.
:param str state: (optional), accepted values: ('open', 'closed')
:param int number: (optional), number of pulls to return. Default: -1
returns all available pull requests
:returns: generator of
:class:`PullRequest <github3.pulls.PullRequest>`\ s
"""
url = self._build_url('pulls', base_url=self._api)
params = {}
if state in ('open', 'closed'):
params['state'] = state
return self._iter(int(number), url, PullRequest, params=params)
def iter_refs(self, subspace='', number=-1):
"""Iterates over references for this repository.
:param str subspace: (optional), e.g. 'tags', 'stashes', 'notes'
:param int number: (optional), number of refs to return. Default: -1
returns all available refs
:returns: generator of :class:`Reference <github3.git.Reference>`\ s
"""
if subspace:
args = ('git', 'refs', subspace)
else:
args = ('git', 'refs')
url = self._build_url(*args, base_url=self._api)
return self._iter(int(number), url, Reference)
def iter_stargazers(self, number=-1):
"""List users who have starred this repository.
:returns: generator of :class:`User <github3.users.User>`\ s
"""
url = self._build_url('stargazers', base_url=self._api)
return self._iter(int(number), url, User)
def iter_subscribers(self, number=-1):
"""Iterates over users subscribed to this repository.
:param int number: (optional), number of subscribers to return.
Default: -1 returns all subscribers available
:returns: generator of :class:`User <github3.users.User>`
"""
url = self._build_url('subscribers', base_url=self._api)
return self._iter(int(number), url, User)
def iter_statuses(self, sha, number=-1):
"""Iterates over the statuses for a specific SHA.
:param str sha: SHA of the commit to list the statuses of
:param int number: (optional), return up to number statuses. Default:
-1 returns all available statuses.
:returns: generator of :class:`Status <Status>`
"""
url = ''
if sha:
url = self._build_url('statuses', sha, base_url=self._api)
return self._iter(int(number), url, Status)
def iter_tags(self, number=-1):
"""Iterates over tags on this repository.
:param int number: (optional), return up to at most number tags.
Default: -1 returns all available tags.
:returns: generator of :class:`RepoTag <RepoTag>`\ s
"""
url = self._build_url('tags', base_url=self._api)
return self._iter(int(number), url, RepoTag)
@requires_auth
def iter_teams(self, number=-1):
"""Iterates over teams with access to this repository.
:param int number: (optional), return up to number Teams. Default: -1
returns all Teams.
:returns: generator of :class:`Team <github3.orgs.Team>`\ s
"""
from github3.orgs import Team
url = self._build_url('teams', base_url=self._api)
return self._iter(int(number), url, Team)
def mark_notifications(self, last_read=''):
"""Mark all notifications in this repository as read.
:param str last_read: (optional), Describes the last point that
notifications were checked. Anything updated since this time will
not be updated. Default: Now. Expected in ISO 8601 format:
``YYYY-MM-DDTHH:MM:SSZ``. Example: "2012-10-09T23:39:01Z".
:returns: bool
"""
url = self._build_url('notifications', base_url=self._api)
mark = {'read': True}
if last_read:
mark['last_read_at'] = last_read
return self._boolean(self._put(url, data=dumps(mark)),
205, 404)
def merge(self, base, head, message=''):
"""Perform a merge from ``head`` into ``base``.
:param str base: (required), where you're merging into
:param str head: (required), where you're merging from
:param str message: (optional), message to be used for the commit
:returns: :class:`RepoCommit <RepoCommit>`
"""
url = self._build_url('merges', base_url=self._api)
data = {'base': base, 'head': head, 'commit_message': message}
json = self._json(self._post(url, data=dumps(data)), 201)
return RepoCommit(json, self) if json else None
def milestone(self, number):
"""Get the milestone indicated by ``number``.
:param int number: (required), unique id number of the milestone
:returns: :class:`Milestone <github3.issues.Milestone>`
"""
url = self._build_url('milestones', str(number), base_url=self._api)
json = self._json(self._get(url), 200)
return Milestone(json, self) if json else None
@requires_auth
def pubsubhubbub(self, mode, topic, callback, secret=''):
"""Create/update a pubsubhubbub hook.
:param str mode: (required), accepted values: ('subscribe',
'unsubscribe')
:param str topic: (required), form:
https://github.com/:user/:repo/events/:event
:param str callback: (required), the URI that receives the updates
:param str secret: (optional), shared secret key that generates a
SHA1 HMAC of the payload content.
:returns: bool
"""
from re import match
m = match('https://github\.com/\w+/[\w\._-]+/events/\w+', topic)
status = False
if mode and topic and callback and m:
data = [('hub.mode', mode), ('hub.topic', topic),
('hub.callback', callback), ('hub.secret', secret)]
url = self._build_url('hub')
status = self._boolean(self._post(url, data=dumps(data)), 204,
404)
return status
def pull_request(self, number):
"""Get the pull request indicated by ``number``.
:param int number: (required), number of the pull request.
:returns: :class:`PullRequest <github3.pulls.PullRequest>`
"""
json = None
if int(number) > 0:
url = self._build_url('pulls', str(number), base_url=self._api)
json = self._json(self._get(url), 200)
return PullRequest(json, self) if json else None
def readme(self):
"""Get the README for this repository.
:returns: :class:`Contents <Contents>`
"""
url = self._build_url('readme', base_url=self._api)
json = self._json(self._get(url), 200)
return Contents(json) if json else None
def ref(self, ref):
"""Get a reference pointed to by ``ref``.
The most common will be branches and tags. For a branch, you must
specify 'heads/branchname' and for a tag, 'tags/tagname'. Essentially,
the system should return any reference you provide it in the namespace,
including notes and stashes (provided they exist on the server).
:param str ref: (required)
:type ref: str
:returns: :class:`Reference <github3.git.Reference>`
"""
url = self._build_url('git', 'refs', ref, base_url=self._api)
json = self._json(self._get(url), 200)
return Reference(json, self) if json else None
@requires_auth
def remove_collaborator(self, login):
"""Remove collaborator ``login`` from the repository.
:param str login: (required), login name of the collaborator
:returns: bool
"""
resp = False
if login:
url = self._build_url('collaborators', login, base_url=self._api)
resp = self._boolean(self._delete(url), 204, 404)
return resp
@requires_auth
def set_subscription(self, subscribed, ignored):
"""Set the user's subscription for this repository
:param bool subscribed: (required), determines if notifications should
be received from this repository.
:param bool ignored: (required), determines if notifications should be
ignored from this repository.
:returns: :class;`Subscription <Subscription>`
"""
sub = {'subscribed': subscribed, 'ignored': ignored}
url = self._build_url('subscription', base_url=self._api)
json = self._json(self._put(url, data=dumps(sub)), 200)
return Subscription(json, self) if json else None
@requires_auth
def subscription(self):
"""Return subscription for this Repository.
:returns: :class:`Subscription <github3.notifications.Subscription>`
"""
url = self._build_url('subscription', base_url=self._api)
json = self._json(self._get(url), 200)
return Subscription(json, self) if json else None
def tag(self, sha):
"""Get an annotated tag.
http://learn.github.com/p/tagging.html
:param str sha: (required), sha of the object for this tag
:returns: :class:`Tag <github3.git.Tag>`
"""
url = self._build_url('git', 'tags', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Tag(json) if json else None
def tree(self, sha):
"""Get a tree.
:param str sha: (required), sha of the object for this tree
:returns: :class:`Tree <github3.git.Tree>`
"""
url = self._build_url('git', 'trees', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Tree(json, self) if json else None
def update_label(self, name, color, new_name=''):
"""Update the label ``name``.
:param str name: (required), name of the label
:param str color: (required), color code
:param str new_name: (optional), new name of the label
:returns: bool
"""
label = self.label(name)
resp = False
if label:
upd = label.update
resp = upd(new_name, color) if new_name else upd(name, color)
return resp
class Branch(GitHubCore):
"""The :class:`Branch <Branch>` object. It holds the information GitHub
returns about a branch on a :class:`Repository <Repository>`.
"""
def __init__(self, branch, session=None):
super(Branch, self).__init__(branch, session)
#: Name of the branch.
self.name = branch.get('name')
#: Returns the branch's :class:`RepoCommit <RepoCommit>` or
# ``None``.
self.commit = branch.get('commit')
if self.commit:
self.commit = RepoCommit(self.commit, self._session)
#: Returns '_links' attribute.
self.links = branch.get('_links', {})
def __repr__(self):
return '<Repository Branch [{0}]>'.format(self.name)
class Contents(GitHubObject):
"""The :class:`Contents <Contents>` object. It holds the information
concerning any content in a repository requested via the API.
"""
def __init__(self, content):
super(Contents, self).__init__(content)
# links
self._api = content['_links'].get('self', '')
#: Dictionary of links
self.links = content.get('_links')
# should always be 'base64'
#: Returns encoding used on the content.
self.encoding = content.get('encoding', '')
# content, base64 encoded and decoded
#: Base64-encoded content of the file.
self.content = content.get('content', '')
#: Decoded content of the file.
self.decoded = self.content
if self.encoding == 'base64':
self.decoded = b64decode(self.content.encode())
# file name, path, and size
#: Name of the content.
self.name = content.get('name', '')
#: Path to the content.
self.path = content.get('path', '')
#: Size of the content
self.size = content.get('size', 0)
#: SHA string.
self.sha = content.get('sha', '')
# should always be 'file'
#: Type of content.
self.type = content.get('type', '')
def __repr__(self):
return '<Content [{0}]>'.format(self.path)
def __str__(self):
return self.decoded
@property
def git_url(self):
"""API URL for this blob"""
return self.links['git']
@property
def html_url(self):
"""URL pointing to the content on GitHub."""
return self.links['html']
class Download(GitHubCore):
"""The :class:`Download <Download>` object. It represents how GitHub sends
information about files uploaded to the downloads section of a repository.
.. warning::
On 2013-03-11, this API will be deprecated by GitHub. There will also
be a new version of github3.py to accompany this at that date.
"""
def __init__(self, download, session=None):
super(Download, self).__init__(download, session)
self._api = download.get('url', '')
#: URL of the download at GitHub.
self.html_url = download.get('html_url', '')
#: Unique id of the download on GitHub.
self.id = download.get('id', 0)
#: Name of the download.
self.name = download.get('name', '')
#: Description of the download.
self.description = download.get('description', '')
#: Size of the download.
self.size = download.get('size', 0)
#: How many times this particular file has been downloaded.
self.download_count = download.get('download_count', 0)
#: Content type of the download.
self.content_type = download.get('content_type', '')
def __repr__(self):
return '<Download [{0}]>'.format(self.name)
@requires_auth
def delete(self):
"""Delete this download if authenticated"""
return self._boolean(self._delete(self._api), 204, 404)
def saveas(self, path=''):
"""Save this download to the path specified.
:param str path: (optional), if no path is specified, it will be
saved in the current directory with the name specified by GitHub.
it can take a file-like object as well
:returns: bool
"""
if not path:
path = self.name
resp = self._get(self.html_url, allow_redirects=True, stream=True)
if self._boolean(resp, 200, 404):
if isinstance(getattr(path, 'write', None), Callable):
file_like = True
fd = path
else:
file_like = False
fd = open(path, 'wb')
for chunk in resp.iter_content():
fd.write(chunk)
if not file_like:
fd.close()
return True
return False # (No coverage)
class Hook(GitHubCore):
"""The :class:`Hook <Hook>` object. This handles the information returned
by GitHub about hooks set on a repository."""
def __init__(self, hook, session=None):
super(Hook, self).__init__(hook, session)
self._api = hook.get('url', '')
#: datetime object representing when this hook was last updated.
self.updated_at = None
if hook.get('updated_at'):
self.updated_at = self._strptime(hook.get('updated_at'))
#: datetime object representing the date the hook was created.
self.created_at = self._strptime(hook.get('created_at'))
#: The name of the hook.
self.name = hook.get('name')
#: Events which trigger the hook.
self.events = hook.get('events')
#: Whether or not this Hook is marked as active on GitHub
self.active = hook.get('active')
#: Dictionary containing the configuration for the Hook.
self.config = hook.get('config')
#: Unique id of the hook.
self.id = hook.get('id')
def __repr__(self):
return '<Hook [{0}]>'.format(self.name)
def _update_(self, hook):
self.__init__(hook, self._session)
@requires_auth
def delete(self):
"""Delete this hook.
:returns: bool
"""
return self._boolean(self._delete(self._api), 204, 404)
@requires_auth
def delete_subscription(self):
"""Delete the user's subscription to this repository.
:returns: bool
"""
url = self._build_url('subscription', base_url=self._api)
return self._boolean(self._delete(url), 204, 404)
@requires_auth
def edit(self, name, config, events=[], add_events=[], rm_events=[],
active=True):
"""Edit this hook.
:param str name: (required), name of the service being called
:param dict config: (required), key-value pairs of settings for this
hook
:param list events: (optional), which events should this be triggered
for
:param list add_events: (optional), events to be added to the list of
events that this hook triggers for
:param list rm_events: (optional), events to be remvoed from the list
of events that this hook triggers for
:param bool active: (optional), should this event be active
:returns: bool
"""
json = None
if name and config and isinstance(config, dict):
data = {'name': name, 'config': config, 'active': active}
if events:
data['events'] = events
if add_events:
data['add_events'] = add_events
if rm_events:
data['remove_events'] = rm_events
json = self._json(self._patch(self._api, data=dumps(data)), 200)
if json:
self._update_(json)
return True
return False
@requires_auth
def test(self):
"""Test this hook
:returns: bool
"""
url = self._build_url('tests', base_url=self._api)
return self._boolean(self._post(url), 204, 404)
class RepoTag(GitHubObject):
"""The :class:`RepoTag <RepoTag>` object. This stores the information
representing a tag that was created on a repository.
"""
def __init__(self, tag):
super(RepoTag, self).__init__(tag)
#: Name of the tag.
self.name = tag.get('name')
#: URL for the GitHub generated zipball associated with the tag.
self.zipball_url = tag.get('zipball_url')
#: URL for the GitHub generated tarball associated with the tag.
self.tarball_url = tag.get('tarball_url')
#: Dictionary containing the SHA and URL of the commit.
self.commit = tag.get('commit', {})
def __repr__(self):
return '<Repository Tag [{0}]>'.format(self)
def __str__(self):
return self.name
class RepoComment(BaseComment):
"""The :class:`RepoComment <RepoComment>` object. This stores the
information about a comment on a file in a repository.
"""
def __init__(self, comment, session=None):
super(RepoComment, self).__init__(comment, session)
#: Commit id on which the comment was made.
self.commit_id = comment.get('commit_id')
#: URL of the comment on GitHub.
self.html_url = comment.get('html_url')
#: The line number where the comment is located.
self.line = comment.get('line')
#: The path to the file where the comment was made.
self.path = comment.get('path')
#: The position in the diff where the comment was made.
self.position = comment.get('position')
#: datetime object representing when the comment was updated.
self.updated_at = comment.get('updated_at')
if self.updated_at:
self.updated_at = self._strptime(self.updated_at)
#: Login of the user who left the comment.
self.user = None
if comment.get('user'):
self.user = User(comment.get('user'), self)
def __repr__(self):
return '<Repository Comment [{0}/{1}]>'.format(self.commit_id[:7],
self.user.login or '') # nopep8
def _update_(self, comment):
super(RepoComment, self)._update_(comment)
self.__init__(comment, self._session)
@requires_auth
def update(self, body, sha, line, path, position):
"""Update this comment.
:param str body: (required)
:param str sha: (required), sha id of the commit to comment on
:param int line: (required), line number to comment on
:param str path: (required), relative path of the file you're
commenting on
:param int position: (required), line index in the diff to comment on
:returns: bool
"""
json = None
if body and sha and path and line > 0 and position > 0:
data = {'body': body, 'commit_id': sha, 'line': line,
'path': path, 'position': position}
json = self._json(self._post(self._api, data=dumps(data)), 200)
if json:
self._update_(json)
return True
return False
class RepoCommit(BaseCommit):
"""The :class:`RepoCommit <RepoCommit>` object. This represents a commit as
viewed by a :class:`Repository`. This is different from a Commit object
returned from the git data section.
"""
def __init__(self, commit, session=None):
super(RepoCommit, self).__init__(commit, session)
#: :class:`User <github3.users.User>` who authored the commit.
self.author = commit.get('author')
if self.author:
self.author = User(self.author, self._session)
#: :class:`User <github3.users.User>` who committed the commit.
self.committer = commit.get('committer')
if self.committer:
self.committer = User(self.committer, self._session)
#: :class:`Commit <github3.git.Commit>`.
self.commit = commit.get('commit')
if self.commit:
self.commit = Commit(self.commit, self._session)
self.sha = commit.get('sha')
#: The number of additions made in the commit.
self.additions = 0
#: The number of deletions made in the commit.
self.deletions = 0
#: Total number of changes in the files.
self.total = 0
if commit.get('stats'):
self.additions = commit['stats'].get('additions')
self.deletions = commit['stats'].get('deletions')
self.total = commit['stats'].get('total')
#: The files that were modified by this commit.
self.files = commit.get('files', [])
def __repr__(self):
return '<Repository Commit [{0}]>'.format(self.sha[:7])
def diff(self):
"""Return the diff"""
resp = self._get(self._api,
headers={'Accept': 'application/vnd.github.diff'})
return resp.content if self._boolean(resp, 200, 404) else None
def patch(self):
"""Return the patch"""
resp = self._get(self._api,
headers={'Accept': 'application/vnd.github.patch'})
return resp.content if self._boolean(resp, 200, 404) else None
class Comparison(GitHubCore):
"""The :class:`Comparison <Comparison>` object. This encapsulates the
information returned by GitHub comparing two commit objects in a
repository."""
def __init__(self, compare):
super(Comparison, self).__init__(compare)
self._api = compare.get('url', '')
#: URL to view the comparison at GitHub
self.html_url = compare.get('html_url')
#: Permanent link to this comparison.
self.permalink_url = compare.get('permalink_url')
#: URL to see the diff between the two commits.
self.diff_url = compare.get('diff_url')
#: Patch URL at GitHub for the comparison.
self.patch_url = compare.get('patch_url')
#: :class:`RepoCommit <RepoCommit>` object representing the base of
# comparison.
self.base_commit = RepoCommit(compare.get('base_commit'), None)
#: Behind or ahead.
self.status = compare.get('status')
#: Number of commits ahead by.
self.ahead_by = compare.get('ahead_by')
#: Number of commits behind by.
self.behind_by = compare.get('behind_by')
#: Number of commits difference in the comparison.
self.total_commits = compare.get('total_commits')
#: List of :class:`RepoCommit <RepoCommit>` objects.
self.commits = [RepoCommit(com) for com in compare.get('commits')]
#: List of dicts describing the files modified.
self.files = compare.get('files', [])
def __repr__(self):
return '<Comparison of {0} commits>'.format(self.total_commits)
def diff(self):
"""Return the diff"""
resp = self._get(self._api,
headers={'Accept': 'application/vnd.github.diff'})
return resp.content if self._boolean(resp, 200, 404) else None
def patch(self):
"""Return the patch"""
resp = self._get(self._api,
headers={'Accept': 'application/vnd.github.patch'})
return resp.content if self._boolean(resp, 200, 404) else None
class Status(GitHubObject):
"""The :class:`Status <Status>` object. This represents information from
the Repo Status API."""
def __init__(self, status):
super(Status, self).__init__(status)
#: datetime object representing the creation of the status object
self.created_at = self._strptime(status.get('created_at'))
#: :class:`User <github3.users.User>` who created the object
self.creator = User(status.get('creator'))
#: Short description of the Status
self.description = status.get('description')
#: GitHub ID for the status object
self.id = status.get('id')
#: State of the status, e.g., 'success', 'pending', 'failed', 'error'
self.state = status.get('state')
#: URL to view more information about the status
self.target_url = status.get('target_url')
#: datetime object representing the last time the status was updated
self.updated_at = None
if status.get('updated_at'):
self.updated_at = self._strptime(status.get('updated_at'))
def __repr__(self):
return '<Status [{s.id}:{s.state}]>'.format(s=self)
Fix pubsubhubbub.
It shouldn't be using JSON but a form instead.
"""
github3.repos
=============
This module contains the classes relating to repositories.
"""
from json import dumps
from base64 import b64decode
from collections import Callable
from github3.events import Event
from github3.issues import Issue, IssueEvent, Label, Milestone, issue_params
from github3.git import Blob, Commit, Reference, Tag, Tree
from github3.models import GitHubObject, GitHubCore, BaseComment, BaseCommit
from github3.pulls import PullRequest
from github3.users import User, Key
from github3.decorators import requires_auth
from github3.notifications import Subscription, Thread
class Repository(GitHubCore):
"""The :class:`Repository <Repository>` object. It represents how GitHub
sends information about repositories.
"""
def __init__(self, repo, session=None):
super(Repository, self).__init__(repo, session)
#: URL used to clone via HTTPS.
self.clone_url = repo.get('clone_url', '')
#: ``datetime`` object representing when the Repository was created.
self.created_at = self._strptime(repo.get('created_at'))
#: Description of the repository.
self.description = repo.get('description', '')
# The number of forks
#: The number of forks made of this repository.
self.forks = repo.get('forks', 0)
#: Is this repository a fork?
self.fork = repo.get('fork')
#: Full name as login/name
self.full_name = repo.get('full_name', '')
# Clone url using git, e.g. git://github.com/sigmavirus24/github3.py
#: Plain git url for an anonymous clone.
self.git_url = repo.get('git_url', '')
#: Whether or not this repository has downloads enabled
self.has_downloads = repo.get('has_downloads')
#: Whether or not this repository has an issue tracker
self.has_issues = repo.get('has_issues')
#: Whether or not this repository has the wiki enabled
self.has_wiki = repo.get('has_wiki')
# e.g. https://sigmavirus24.github.com/github3.py
#: URL of the home page for the project.
self.homepage = repo.get('homepage', '')
# e.g. https://github.com/sigmavirus24/github3.py
#: URL of the project at GitHub.
self.html_url = repo.get('html_url', '')
#: Unique id of the repository.
self.id = repo.get('id', 0)
#: Language property.
self.language = repo.get('language', '')
#: Mirror property.
self.mirror_url = repo.get('mirror_url', '')
# Repository name, e.g. github3.py
#: Name of the repository.
self.name = repo.get('name', '')
# Number of open issues
#: Number of open issues on the repository.
self.open_issues = repo.get('open_issues', 0)
# Repository owner's name
#: :class:`User <github3.users.User>` object representing the
# repository owner.
self.owner = User(repo.get('owner', {}), self._session)
#: Is this repository private?
self.private = repo.get('private')
#: ``datetime`` object representing the last time commits were pushed
# to the repository.
self.pushed_at = self._strptime(repo.get('pushed_at'))
#: Size of the repository.
self.size = repo.get('size', 0)
# SSH url e.g. git@github.com/sigmavirus24/github3.py
#: URL to clone the repository via SSH.
self.ssh_url = repo.get('ssh_url', '')
#: If it exists, url to clone the repository via SVN.
self.svn_url = repo.get('svn_url', '')
#: ``datetime`` object representing the last time the repository was
# updated.
self.updated_at = self._strptime(repo.get('updated_at'))
self._api = repo.get('url', '')
# The number of watchers
#: Number of users watching the repository.
self.watchers = repo.get('watchers', 0)
#: Parent of this fork, if it exists :class;`Repository`
self.source = repo.get('source')
if self.source:
self.source = Repository(self.source, self)
#: Parent of this fork, if it exists :class:`Repository`
self.parent = repo.get('parent')
if self.parent:
self.parent = Repository(self.parent, self)
#: default branch for the repository
self.master_branch = repo.get('master_branch', '')
def __repr__(self):
return '<Repository [{0}]>'.format(self)
def __str__(self):
return self.full_name
def _update_(self, repo):
self.__init__(repo, self._session)
def _create_pull(self, data):
self._remove_none(data)
json = None
if data:
url = self._build_url('pulls', base_url=self._api)
json = self._json(self._post(url, data=dumps(data)), 201)
return PullRequest(json, self._session) if json else None
@requires_auth
def add_collaborator(self, login):
"""Add ``login`` as a collaborator to a repository.
:param str login: (required), login of the user
:returns: bool -- True if successful, False otherwise
"""
resp = False
if login:
url = self._build_url('collaborators', login, base_url=self._api)
resp = self._boolean(self._put(url), 204, 404)
return resp
def archive(self, format, path='', ref='master'):
"""Get the tarball or zipball archive for this repo at ref.
:param str format: (required), accepted values: ('tarball',
'zipball')
:param path: (optional), path where the file should be saved
to, default is the filename provided in the headers and will be
written in the current directory.
it can take a file-like object as well
:type path: str, file
:param str ref: (optional)
:returns: bool -- True if successful, False otherwise
"""
resp = None
written = False
if format in ('tarball', 'zipball'):
url = self._build_url(format, ref, base_url=self._api)
resp = self._get(url, allow_redirects=True, stream=True)
pre_opened = False
if resp and self._boolean(resp, 200, 404):
fd = None
if path:
if isinstance(getattr(path, 'write', None), Callable):
pre_opened = True
fd = path
else:
fd = open(path, 'wb')
else:
header = resp.headers['content-disposition']
i = header.find('filename=') + len('filename=')
fd = open(header[i:], 'wb')
for chunk in resp.iter_content():
fd.write(chunk)
if not pre_opened:
fd.close()
written = True
return written
def blob(self, sha):
"""Get the blob indicated by ``sha``.
:param str sha: (required), sha of the blob
:returns: :class:`Blob <github3.git.Blob>` if successful, otherwise
None
"""
url = self._build_url('git', 'blobs', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Blob(json) if json else None
def branch(self, name):
"""Get the branch ``name`` of this repository.
:param str name: (required), branch name
:type name: str
:returns: :class:`Branch <Branch>`
"""
json = None
if name:
url = self._build_url('branches', name, base_url=self._api)
json = self._json(self._get(url), 200)
return Branch(json, self) if json else None
def commit(self, sha):
"""Get a single (repo) commit. See :func:`git_commit` for the Git Data
Commit.
:param str sha: (required), sha of the commit
:returns: :class:`RepoCommit <RepoCommit>` if successful, otherwise
None
"""
url = self._build_url('commits', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return RepoCommit(json, self) if json else None
def commit_comment(self, comment_id):
"""Get a single commit comment.
:param int comment_id: (required), id of the comment used by GitHub
:returns: :class:`RepoComment <RepoComment>` if successful, otherwise
None
"""
url = self._build_url('comments', str(comment_id), base_url=self._api)
json = self._json(self._get(url), 200)
return RepoComment(json, self) if json else None
def compare_commits(self, base, head):
"""Compare two commits.
:param str base: (required), base for the comparison
:param str head: (required), compare this against base
:returns: :class:`Comparison <Comparison>` if successful, else None
"""
url = self._build_url('compare', base + '...' + head,
base_url=self._api)
json = self._json(self._get(url), 200)
return Comparison(json) if json else None
def contents(self, path):
"""Get the contents of the file pointed to by ``path``.
:param str path: (required), path to file, e.g.
github3/repo.py
:returns: :class:`Contents <Contents>` if successful, else None
"""
url = self._build_url('contents', path, base_url=self._api)
resp = self._get(url)
if self._boolean(resp, 200, 404):
return Contents(self._json(resp, 200))
else:
return None
@requires_auth
def create_blob(self, content, encoding):
"""Create a blob with ``content``.
:param str content: (required), content of the blob
:param str encoding: (required), ('base64', 'utf-8')
:returns: string of the SHA returned
"""
sha = ''
if encoding in ('base64', 'utf-8') and content:
url = self._build_url('git', 'blobs', base_url=self._api)
data = {'content': content, 'encoding': encoding}
json = self._json(self._post(url, data=dumps(data)), 201)
if json:
sha = json.get('sha')
return sha
@requires_auth
def create_comment(self, body, sha, path='', position=1, line=1):
"""Create a comment on a commit.
:param str body: (required), body of the message
:param str sha: (required), commit id
:param str path: (optional), relative path of the file to comment
on
:param str position: (optional), line index in the diff to comment on
:param int line: (optional), line number of the file to comment on,
default: 1
:returns: :class:`RepoComment <RepoComment>` if successful else None
"""
line = int(line)
position = int(position)
json = None
if body and sha and line > 0:
data = {'body': body, 'commit_id': sha, 'line': line,
'path': path, 'position': position}
url = self._build_url('commits', sha, 'comments',
base_url=self._api)
json = self._json(self._post(url, data=dumps(data)), 201)
return RepoComment(json, self) if json else None
@requires_auth
def create_commit(self, message, tree, parents, author={}, committer={}):
"""Create a commit on this repository.
:param str message: (required), commit message
:param str tree: (required), SHA of the tree object this
commit points to
:param list parents: (required), SHAs of the commits that were parents
of this commit. If empty, the commit will be written as the root
commit. Even if there is only one parent, this should be an
array.
:param dict author: (optional), if omitted, GitHub will
use the authenticated user's credentials and the current
time. Format: {'name': 'Committer Name', 'email':
'name@example.com', 'date': 'YYYY-MM-DDTHH:MM:SS+HH:00'}
:param dict committer: (optional), if ommitted, GitHub will use the
author parameters. Should be the same format as the author
parameter.
:returns: :class:`Commit <github3.git.Commit>` if successful, else
None
"""
json = None
if message and tree and isinstance(parents, list):
url = self._build_url('git', 'commits', base_url=self._api)
data = {'message': message, 'tree': tree, 'parents': parents,
'author': author, 'committer': committer}
json = self._json(self._post(url, data=dumps(data)), 201)
return Commit(json, self) if json else None
@requires_auth
def create_fork(self, organization=None):
"""Create a fork of this repository.
:param str organization: (required), login for organization to create
the fork under
:returns: :class:`Repository <Repository>` if successful, else None
"""
url = self._build_url('forks', base_url=self._api)
if organization:
resp = self._post(url, data=dumps({'organization': organization}))
else:
resp = self._post(url)
json = self._json(resp, 202)
return Repository(json, self) if json else None
@requires_auth
def create_hook(self, name, config, events=['push'], active=True):
"""Create a hook on this repository.
:param str name: (required), name of the hook
:param dict config: (required), key-value pairs which act as settings
for this hook
:param list events: (optional), events the hook is triggered for
:param bool active: (optional), whether the hook is actually
triggered
:returns: :class:`Hook <Hook>` if successful, else None
"""
json = None
if name and config and isinstance(config, dict):
url = self._build_url('hooks', base_url=self._api)
data = {'name': name, 'config': config, 'events': events,
'active': active}
json = self._json(self._post(url, data=dumps(data)), 201)
return Hook(json, self) if json else None
@requires_auth
def create_issue(self,
title,
body=None,
assignee=None,
milestone=None,
labels=None):
"""Creates an issue on this repository.
:param str title: (required), title of the issue
:param str body: (optional), body of the issue
:param str assignee: (optional), login of the user to assign the
issue to
:param int milestone: (optional), number of the milestone to attribute
this issue to (e.g. ``m`` is a Milestone object, ``m.number`` is
what you pass here.)
:param labels: (optional), labels to apply to this
issue
:type labels: list of strings
:returns: :class:`Issue <github3.issues.Issue>` if successful, else
None
"""
issue = {'title': title, 'body': body, 'assignee': assignee,
'milestone': milestone, 'labels': labels}
self._remove_none(issue)
json = None
if issue:
url = self._build_url('issues', base_url=self._api)
json = self._json(self._post(url, data=dumps(issue)), 201)
return Issue(json, self) if json else None
@requires_auth
def create_key(self, title, key):
"""Create a deploy key.
:param str title: (required), title of key
:param str key: (required), key text
:returns: :class:`Key <github3.users.Key>` if successful, else None
"""
json = None
if title and key:
data = {'title': title, 'key': key}
url = self._build_url('keys', base_url=self._api)
json = self._json(self._post(url, data=dumps(data)), 201)
return Key(json, self) if json else None
@requires_auth
def create_label(self, name, color):
"""Create a label for this repository.
:param str name: (required), name to give to the label
:param str color: (required), value of the color to assign to the
label
:returns: :class:`Label <github3.issues.Label>` if successful, else
None
"""
json = None
if name and color:
data = {'name': name, 'color': color.strip('#')}
url = self._build_url('labels', base_url=self._api)
json = self._json(self._post(url, data=dumps(data)), 201)
return Label(json, self) if json else None
@requires_auth
def create_milestone(self, title, state=None, description=None,
due_on=None):
"""Create a milestone for this repository.
:param str title: (required), title of the milestone
:param str state: (optional), state of the milestone, accepted
values: ('open', 'closed'), default: 'open'
:param str description: (optional), description of the milestone
:param str due_on: (optional), ISO 8601 formatted due date
:returns: :class:`Milestone <github3.issues.Milestone>` if successful,
else None
"""
url = self._build_url('milestones', base_url=self._api)
if state not in ('open', 'closed'):
state = None
data = {'title': title, 'state': state,
'description': description, 'due_on': due_on}
self._remove_none(data)
json = None
if data:
json = self._json(self._post(url, data=dumps(data)), 201)
return Milestone(json, self) if json else None
@requires_auth
def create_pull(self, title, base, head, body=None):
"""Create a pull request using commits from ``head`` and comparing
against ``base``.
:param str title: (required)
:param str base: (required), e.g., 'username:branch', or a sha
:param str head: (required), e.g., 'master', or a sha
:param str body: (optional), markdown formatted description
:returns: :class:`PullRequest <github3.pulls.PullRequest>` if
successful, else None
"""
data = {'title': title, 'body': body, 'base': base,
'head': head}
return self._create_pull(data)
@requires_auth
def create_pull_from_issue(self, issue, base, head):
"""Create a pull request from issue #``issue``.
:param int issue: (required), issue number
:param str base: (required), e.g., 'username:branch', or a sha
:param str head: (required), e.g., 'master', or a sha
:returns: :class:`PullRequest <github3.pulls.PullRequest>` if
successful, else None
"""
if issue > 0:
data = {'issue': issue, 'base': base, 'head': head}
return self._create_pull(data)
return None
@requires_auth
def create_ref(self, ref, sha):
"""Create a reference in this repository.
:param str ref: (required), fully qualified name of the reference,
e.g. ``refs/heads/master``. If it doesn't start with ``refs`` and
contain at least two slashes, GitHub's API will reject it.
:param str sha: (required), SHA1 value to set the reference to
:returns: :class:`Reference <github3.git.Reference>` if successful
else None
"""
json = None
if ref and ref.count('/') >= 2 and sha:
data = {'ref': ref, 'sha': sha}
url = self._build_url('git', 'refs', base_url=self._api)
json = self._json(self._post(url, data=dumps(data)), 201)
return Reference(json, self) if json else None
@requires_auth
def create_status(self, sha, state, target_url='', description=''):
"""Create a status object on a commit.
:param str sha: (required), SHA of the commit to create the status on
:param str state: (required), state of the test; only the following
are accepted: 'pending', 'success', 'error', 'failure'
:param str target_url: (optional), URL to associate with this status.
:param str description: (optional), short description of the status
"""
json = {}
if sha and state:
data = {'state': state, 'target_url': target_url,
'description': description}
url = self._build_url('statuses', sha, base_url=self._api)
json = self._json(self._post(url, data=dumps(data)), 201)
return Status(json) if json else None
@requires_auth
def create_tag(self, tag, message, sha, obj_type, tagger,
lightweight=False):
"""Create a tag in this repository.
:param str tag: (required), name of the tag
:param str message: (required), tag message
:param str sha: (required), SHA of the git object this is tagging
:param str obj_type: (required), type of object being tagged, e.g.,
'commit', 'tree', 'blob'
:param dict tagger: (required), containing the name, email of the
tagger and the date it was tagged
:param bool lightweight: (optional), if False, create an annotated
tag, otherwise create a lightweight tag (a Reference).
:returns: If lightweight == False: :class:`Tag <github3.git.Tag>` if
successful, else None. If lightweight == True: :class:`Reference
<Reference>`
"""
if lightweight and tag and sha:
return self.create_ref('refs/tags/' + tag, sha)
json = None
if tag and message and sha and obj_type and len(tagger) == 3:
data = {'tag': tag, 'message': message, 'object': sha,
'type': obj_type, 'tagger': tagger}
url = self._build_url('git', 'tags', base_url=self._api)
json = self._json(self._post(url, data=dumps(data)), 201)
if json:
self.create_ref('refs/tags/' + tag, sha)
return Tag(json) if json else None
@requires_auth
def create_tree(self, tree, base_tree=''):
"""Create a tree on this repository.
:param list tree: (required), specifies the tree structure.
Format: [{'path': 'path/file', 'mode':
'filemode', 'type': 'blob or tree', 'sha': '44bfc6d...'}]
:param str base_tree: (optional), SHA1 of the tree you want
to update with new data
:returns: :class:`Tree <github3.git.Tree>` if successful, else None
"""
json = None
if tree and isinstance(tree, list):
data = {'tree': tree, 'base_tree': base_tree}
url = self._build_url('git', 'trees', base_url=self._api)
json = self._json(self._post(url, data=dumps(data)), 201)
return Tree(json) if json else None
@requires_auth
def delete(self):
"""Delete this repository.
:returns: bool -- True if successful, False otherwise
"""
return self._boolean(self._delete(self._api), 204, 404)
@requires_auth
def delete_key(self, key_id):
"""Delete the key with the specified id from your deploy keys list.
:returns: bool -- True if successful, False otherwise
"""
if int(key_id) <= 0:
return False
url = self._build_url('keys', str(key_id), base_url=self._api)
return self._boolean(self._delete(url), 204, 404)
def download(self, id_num):
"""Get a single download object by its id.
.. warning::
On 2012-03-11, GitHub will be deprecating the Downloads API. This
method will no longer work.
:param int id_num: (required), id of the download
:returns: :class:`Download <Download>` if successful, else None
"""
json = None
if int(id_num) > 0:
url = self._build_url('downloads', str(id_num),
base_url=self._api)
json = self._json(self._get(url), 200)
return Download(json, self) if json else None
@requires_auth
def edit(self,
name,
description=None,
homepage=None,
private=None,
has_issues=None,
has_wiki=None,
has_downloads=None,
default_branch=None):
"""Edit this repository.
:param str name: (required), name of the repository
:param str description: (optional), If not ``None``, change the
description for this repository. API default: ``None`` - leave
value unchanged.
:param str homepage: (optional), If not ``None``, change the homepage
for this repository. API default: ``None`` - leave value unchanged.
:param bool private: (optional), If ``True``, make the repository
private. If ``False``, make the repository public. API default:
``None`` - leave value unchanged.
:param bool has_issues: (optional), If ``True``, enable issues for
this repository. If ``False``, disable issues for this repository.
API default: ``None`` - leave value unchanged.
:param bool has_wiki: (optional), If ``True``, enable the wiki for
this repository. If ``False``, disable the wiki for this
repository. API default: ``None`` - leave value unchanged.
:param bool has_downloads: (optional), If ``True``, enable downloads
for this repository. If ``False``, disable downloads for this
repository. API default: ``None`` - leave value unchanged.
:param str default_branch: (optional), If not ``None``, change the
default branch for this repository. API default: ``None`` - leave
value unchanged.
:returns: bool -- True if successful, False otherwise
"""
edit = {'name': name, 'description': description, 'homepage': homepage,
'private': private, 'has_issues': has_issues,
'has_wiki': has_wiki, 'has_downloads': has_downloads,
'default_branch': default_branch}
self._remove_none(edit)
json = None
if edit:
json = self._json(self._patch(self._api, data=dumps(edit)), 200)
self._update_(json)
return True
return False
def is_collaborator(self, login):
"""Check to see if ``login`` is a collaborator on this repository.
:param str login: (required), login for the user
:returns: bool -- True if successful, False otherwise
"""
if login:
url = self._build_url('collaborators', login, base_url=self._api)
return self._boolean(self._get(url), 204, 404)
return False
def git_commit(self, sha):
"""Get a single (git) commit.
:param str sha: (required), sha of the commit
:returns: :class:`Commit <github3.git.Commit>` if successful,
otherwise None
"""
json = {}
if sha:
url = self._build_url('git', 'commits', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Commit(json, self) if json else None
@requires_auth
def hook(self, id_num):
"""Get a single hook.
:param int id_num: (required), id of the hook
:returns: :class:`Hook <Hook>` if successful, else None
"""
json = None
if int(id_num) > 0:
url = self._build_url('hooks', str(id_num), base_url=self._api)
json = self._json(self._get(url), 200)
return Hook(json, self) if json else None
def is_assignee(self, login):
"""Check if the user is a possible assignee for an issue on this
repository.
:returns: :class:`bool`
"""
if not login:
return False
url = self._build_url('assignees', login, base_url=self._api)
return self._boolean(self._get(url), 204, 404)
def issue(self, number):
"""Get the issue specified by ``number``.
:param int number: (required), number of the issue on this repository
:returns: :class:`Issue <github3.issues.Issue>` if successful, else
None
"""
json = None
if int(number) > 0:
url = self._build_url('issues', str(number), base_url=self._api)
json = self._json(self._get(url), 200)
return Issue(json, self) if json else None
@requires_auth
def key(self, id_num):
"""Get the specified deploy key.
:param int id_num: (required), id of the key
:returns: :class:`Key <Key>` if successful, else None
"""
json = None
if int(id_num) > 0:
url = self._build_url('keys', str(id_num), base_url=self._api)
json = self._json(self._get(url), 200)
return Key(json, self) if json else None
def label(self, name):
"""Get the label specified by ``name``
:param str name: (required), name of the label
:returns: :class:`Label <github3.issues.Label>` if successful, else
None
"""
json = None
if name:
url = self._build_url('labels', name, base_url=self._api)
json = self._json(self._get(url), 200)
return Label(json, self) if json else None
def iter_assignees(self, number=-1):
"""Iterate over all available assignees to which an issue may be
assigned.
:param int number: (optional), number of assignees to return. Default:
-1 returns all available assignees
:returns: list of :class:`User <github3.users.User>`\ s
"""
url = self._build_url('assignees', base_url=self._api)
return self._iter(int(number), url, User)
def iter_branches(self, number=-1):
"""Iterate over the branches in this repository.
:param int number: (optional), number of branches to return. Default:
-1 returns all branches
:returns: list of :class:`Branch <Branch>`\ es
"""
url = self._build_url('branches', base_url=self._api)
return self._iter(int(number), url, Branch)
def iter_comments(self, number=-1):
"""Iterate over comments on all commits in the repository.
:param int number: (optional), number of comments to return. Default:
-1 returns all comments
:returns: list of :class:`RepoComment <RepoComment>`\ s
"""
url = self._build_url('comments', base_url=self._api)
return self._iter(int(number), url, RepoComment)
def iter_comments_on_commit(self, sha, number=1):
"""Iterate over comments for a single commit.
:param sha: (required), sha of the commit to list comments on
:type sha: str
:param int number: (optional), number of comments to return. Default:
-1 returns all comments
:returns: list of :class:`RepoComment <RepoComment>`\ s
"""
url = self._build_url('commits', sha, 'comments', base_url=self._api)
return self._iter(int(number), url, RepoComment)
def iter_commits(self, sha=None, path=None, author=None, number=-1):
"""Iterate over commits in this repository.
:param str sha: (optional), sha or branch to start listing commits
from
:param str path: (optional), commits containing this path will be
listed
:param str author: (optional), GitHub login, real name, or email to
filter commits by (using commit author)
:param int number: (optional), number of comments to return. Default:
-1 returns all comments
:returns: list of :class:`RepoCommit <RepoCommit>`\ s
"""
params = {'sha': sha, 'path': path, 'author': author}
self._remove_none(params)
url = self._build_url('commits', base_url=self._api)
return self._iter(int(number), url, RepoCommit, params=params)
def iter_contributors(self, anon=False, number=-1):
"""Iterate over the contributors to this repository.
:param bool anon: (optional), True lists anonymous contributors as
well
:param int number: (optional), number of contributors to return.
Default: -1 returns all contributors
:returns: list of :class:`User <github3.users.User>`\ s
"""
url = self._build_url('contributors', base_url=self._api)
params = {}
if anon:
params = {'anon': True}
return self._iter(int(number), url, User, params=params)
def iter_downloads(self, number=-1):
"""Iterate over available downloads for this repository.
.. warning::
On 2012-03-11, GitHub will be deprecating the Downloads API. This
method will no longer work.
:param int number: (optional), number of downloads to return. Default:
-1 returns all available downloads
:returns: list of :class:`Download <Download>`\ s
"""
url = self._build_url('downloads', base_url=self._api)
return self._iter(int(number), url, Download)
def iter_events(self, number=-1):
"""Iterate over events on this repository.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:returns: list of :class:`Event <github3.events.Event>`\ s
"""
url = self._build_url('events', base_url=self._api)
return self._iter(int(number), url, Event)
def iter_forks(self, sort='', number=-1):
"""Iterate over forks of this repository.
:param str sort: (optional), accepted values:
('newest', 'oldest', 'watchers'), API default: 'newest'
:param int number: (optional), number of forks to return. Default: -1
returns all forks
:returns: list of :class:`Repository <Repository>`
"""
url = self._build_url('forks', base_url=self._api)
params = {}
if sort in ('newest', 'oldest', 'watchers'):
params = {'sort': sort}
return self._iter(int(number), url, Repository, params=params)
@requires_auth
def iter_hooks(self, number=-1):
"""Iterate over hooks registered on this repository.
:param int number: (optional), number of hoks to return. Default: -1
returns all hooks
:returns: list of :class:`Hook <Hook>`\ s
"""
url = self._build_url('hooks', base_url=self._api)
return self._iter(int(number), url, Hook)
def iter_issues(self,
milestone=None,
state=None,
assignee=None,
mentioned=None,
labels=None,
sort=None,
direction=None,
since=None,
number=-1):
"""Iterate over issues on this repo based upon parameters passed.
:param int milestone: (optional), 'none', or '*'
:param str state: (optional), accepted values: ('open', 'closed')
:param str assignee: (optional), 'none', '*', or login name
:param str mentioned: (optional), user's login name
:param str labels: (optional), comma-separated list of labels, e.g.
'bug,ui,@high' :param sort: accepted values:
('created', 'updated', 'comments', 'created')
:param str direction: (optional), accepted values: ('asc', 'desc')
:param str since: (optional), ISO 8601 format: YYYY-MM-DDTHH:MM:SSZ
:param int number: (optional), Number of issues to return.
By default all issues are returned
:returns: list of :class:`Issue <github3.issues.Issue>`\ s
"""
url = self._build_url('issues', base_url=self._api)
params = {'assignee': assignee, 'mentioned': mentioned}
if milestone in ('*', 'none') or isinstance(milestone, int):
params['milestone'] = milestone
self._remove_none(params)
params.update(issue_params(None, state, labels, sort, direction,
since)) # nopep8
return self._iter(int(number), url, Issue, params=params)
def iter_issue_events(self, number=-1):
"""Iterates over issue events on this repository.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:returns: generator of
:class:`IssueEvent <github3.issues.IssueEvent>`\ s
"""
url = self._build_url('issues', 'events', base_url=self._api)
return self._iter(int(number), url, IssueEvent)
@requires_auth
def iter_keys(self, number=-1):
"""Iterates over deploy keys on this repository.
:param int number: (optional), number of keys to return. Default: -1
returns all available keys
:returns: generator of :class:`Key <github3.users.Key>`\ s
"""
url = self._build_url('keys', base_url=self._api)
return self._iter(int(number), url, Key)
def iter_labels(self, number=-1):
"""Iterates over labels on this repository.
:param int number: (optional), number of labels to return. Default: -1
returns all available labels
:returns: generator of :class:`Label <github3.issues.Label>`\ s
"""
url = self._build_url('labels', base_url=self._api)
return self._iter(int(number), url, Label)
def iter_languages(self, number=-1):
"""Iterate over the programming languages used in the repository.
:param int number: (optional), number of languages to return. Default:
-1 returns all used languages
:returns: list of tuples
"""
url = self._build_url('languages', base_url=self._api)
return self._iter(int(number), url, tuple)
def iter_milestones(self, state=None, sort=None, direction=None,
number=-1):
"""Iterates over the milestones on this repository.
:param str state: (optional), state of the milestones, accepted
values: ('open', 'closed')
:param str sort: (optional), how to sort the milestones, accepted
values: ('due_date', 'completeness')
:param str direction: (optional), direction to sort the milestones,
accepted values: ('asc', 'desc')
:param int number: (optional), number of milestones to return.
Default: -1 returns all milestones
:returns: generator of
:class:`Milestone <github3.issues.Milestone>`\ s
"""
url = self._build_url('milestones', base_url=self._api)
accepted = {'state': ('open', 'closed'),
'sort': ('due_date', 'completeness'),
'direction': ('asc', 'desc')}
params = {'state': state, 'sort': sort, 'direction': direction}
for (k, v) in list(params.items()):
if not (v and (v in accepted[k])): # e.g., '' or None
del params[k]
if not params:
params = None
return self._iter(int(number), url, Milestone, params)
def iter_network_events(self, number=-1):
"""Iterates over events on a network of repositories.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:returns: generator of :class:`Event <github3.events.Event>`\ s
"""
base = self._api.replace('repos', 'networks', 1)
url = self._build_url('events', base_url=base)
return self._iter(int(number), url, Event)
def iter_notifications(self, all=False, participating=False, since='',
number=-1):
"""Iterates over the notifications for this repository.
:param bool all: (optional), show all notifications, including ones
marked as read
:param bool participating: (optional), show only the notifications the
user is participating in directly
:param str since: (optional), filters out any notifications updated
before the given time. The time should be passed in as UTC in the
ISO 8601 format: ``YYYY-MM-DDTHH:MM:SSZ``. Example:
"2012-10-09T23:39:01Z".
:returns: generator of :class:`Thread <github3.notifications.Thread>`
"""
url = self._build_url('notifications', base_url=self._api)
params = {'all': all, 'participating': participating, 'since': since}
for (k, v) in list(params.items()):
if not v:
del params[k]
return self._iter(int(number), url, Thread, params=params)
def iter_pulls(self, state=None, number=-1):
"""List pull requests on repository.
:param str state: (optional), accepted values: ('open', 'closed')
:param int number: (optional), number of pulls to return. Default: -1
returns all available pull requests
:returns: generator of
:class:`PullRequest <github3.pulls.PullRequest>`\ s
"""
url = self._build_url('pulls', base_url=self._api)
params = {}
if state in ('open', 'closed'):
params['state'] = state
return self._iter(int(number), url, PullRequest, params=params)
def iter_refs(self, subspace='', number=-1):
"""Iterates over references for this repository.
:param str subspace: (optional), e.g. 'tags', 'stashes', 'notes'
:param int number: (optional), number of refs to return. Default: -1
returns all available refs
:returns: generator of :class:`Reference <github3.git.Reference>`\ s
"""
if subspace:
args = ('git', 'refs', subspace)
else:
args = ('git', 'refs')
url = self._build_url(*args, base_url=self._api)
return self._iter(int(number), url, Reference)
def iter_stargazers(self, number=-1):
"""List users who have starred this repository.
:returns: generator of :class:`User <github3.users.User>`\ s
"""
url = self._build_url('stargazers', base_url=self._api)
return self._iter(int(number), url, User)
def iter_subscribers(self, number=-1):
"""Iterates over users subscribed to this repository.
:param int number: (optional), number of subscribers to return.
Default: -1 returns all subscribers available
:returns: generator of :class:`User <github3.users.User>`
"""
url = self._build_url('subscribers', base_url=self._api)
return self._iter(int(number), url, User)
def iter_statuses(self, sha, number=-1):
"""Iterates over the statuses for a specific SHA.
:param str sha: SHA of the commit to list the statuses of
:param int number: (optional), return up to number statuses. Default:
-1 returns all available statuses.
:returns: generator of :class:`Status <Status>`
"""
url = ''
if sha:
url = self._build_url('statuses', sha, base_url=self._api)
return self._iter(int(number), url, Status)
def iter_tags(self, number=-1):
"""Iterates over tags on this repository.
:param int number: (optional), return up to at most number tags.
Default: -1 returns all available tags.
:returns: generator of :class:`RepoTag <RepoTag>`\ s
"""
url = self._build_url('tags', base_url=self._api)
return self._iter(int(number), url, RepoTag)
@requires_auth
def iter_teams(self, number=-1):
"""Iterates over teams with access to this repository.
:param int number: (optional), return up to number Teams. Default: -1
returns all Teams.
:returns: generator of :class:`Team <github3.orgs.Team>`\ s
"""
from github3.orgs import Team
url = self._build_url('teams', base_url=self._api)
return self._iter(int(number), url, Team)
def mark_notifications(self, last_read=''):
"""Mark all notifications in this repository as read.
:param str last_read: (optional), Describes the last point that
notifications were checked. Anything updated since this time will
not be updated. Default: Now. Expected in ISO 8601 format:
``YYYY-MM-DDTHH:MM:SSZ``. Example: "2012-10-09T23:39:01Z".
:returns: bool
"""
url = self._build_url('notifications', base_url=self._api)
mark = {'read': True}
if last_read:
mark['last_read_at'] = last_read
return self._boolean(self._put(url, data=dumps(mark)),
205, 404)
def merge(self, base, head, message=''):
"""Perform a merge from ``head`` into ``base``.
:param str base: (required), where you're merging into
:param str head: (required), where you're merging from
:param str message: (optional), message to be used for the commit
:returns: :class:`RepoCommit <RepoCommit>`
"""
url = self._build_url('merges', base_url=self._api)
data = {'base': base, 'head': head, 'commit_message': message}
json = self._json(self._post(url, data=dumps(data)), 201)
return RepoCommit(json, self) if json else None
def milestone(self, number):
"""Get the milestone indicated by ``number``.
:param int number: (required), unique id number of the milestone
:returns: :class:`Milestone <github3.issues.Milestone>`
"""
url = self._build_url('milestones', str(number), base_url=self._api)
json = self._json(self._get(url), 200)
return Milestone(json, self) if json else None
@requires_auth
def pubsubhubbub(self, mode, topic, callback, secret=''):
"""Create/update a pubsubhubbub hook.
:param str mode: (required), accepted values: ('subscribe',
'unsubscribe')
:param str topic: (required), form:
https://github.com/:user/:repo/events/:event
:param str callback: (required), the URI that receives the updates
:param str secret: (optional), shared secret key that generates a
SHA1 HMAC of the payload content.
:returns: bool
"""
from re import match
m = match('https://github\.com/\w+/[\w\._-]+/events/\w+', topic)
status = False
if mode and topic and callback and m:
data = [('hub.mode', mode), ('hub.topic', topic),
('hub.callback', callback)]
if secret:
data.append(('hub.secret', secret))
url = self._build_url('hub')
h = {'Content-Type': 'application/x-www-form-urlencoded'}
status = self._boolean(self._post(url, data=data, headers=h), 204,
404)
return status
def pull_request(self, number):
"""Get the pull request indicated by ``number``.
:param int number: (required), number of the pull request.
:returns: :class:`PullRequest <github3.pulls.PullRequest>`
"""
json = None
if int(number) > 0:
url = self._build_url('pulls', str(number), base_url=self._api)
json = self._json(self._get(url), 200)
return PullRequest(json, self) if json else None
def readme(self):
"""Get the README for this repository.
:returns: :class:`Contents <Contents>`
"""
url = self._build_url('readme', base_url=self._api)
json = self._json(self._get(url), 200)
return Contents(json) if json else None
def ref(self, ref):
"""Get a reference pointed to by ``ref``.
The most common will be branches and tags. For a branch, you must
specify 'heads/branchname' and for a tag, 'tags/tagname'. Essentially,
the system should return any reference you provide it in the namespace,
including notes and stashes (provided they exist on the server).
:param str ref: (required)
:type ref: str
:returns: :class:`Reference <github3.git.Reference>`
"""
url = self._build_url('git', 'refs', ref, base_url=self._api)
json = self._json(self._get(url), 200)
return Reference(json, self) if json else None
@requires_auth
def remove_collaborator(self, login):
"""Remove collaborator ``login`` from the repository.
:param str login: (required), login name of the collaborator
:returns: bool
"""
resp = False
if login:
url = self._build_url('collaborators', login, base_url=self._api)
resp = self._boolean(self._delete(url), 204, 404)
return resp
@requires_auth
def set_subscription(self, subscribed, ignored):
"""Set the user's subscription for this repository
:param bool subscribed: (required), determines if notifications should
be received from this repository.
:param bool ignored: (required), determines if notifications should be
ignored from this repository.
:returns: :class;`Subscription <Subscription>`
"""
sub = {'subscribed': subscribed, 'ignored': ignored}
url = self._build_url('subscription', base_url=self._api)
json = self._json(self._put(url, data=dumps(sub)), 200)
return Subscription(json, self) if json else None
@requires_auth
def subscription(self):
"""Return subscription for this Repository.
:returns: :class:`Subscription <github3.notifications.Subscription>`
"""
url = self._build_url('subscription', base_url=self._api)
json = self._json(self._get(url), 200)
return Subscription(json, self) if json else None
def tag(self, sha):
"""Get an annotated tag.
http://learn.github.com/p/tagging.html
:param str sha: (required), sha of the object for this tag
:returns: :class:`Tag <github3.git.Tag>`
"""
url = self._build_url('git', 'tags', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Tag(json) if json else None
def tree(self, sha):
"""Get a tree.
:param str sha: (required), sha of the object for this tree
:returns: :class:`Tree <github3.git.Tree>`
"""
url = self._build_url('git', 'trees', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Tree(json, self) if json else None
def update_label(self, name, color, new_name=''):
"""Update the label ``name``.
:param str name: (required), name of the label
:param str color: (required), color code
:param str new_name: (optional), new name of the label
:returns: bool
"""
label = self.label(name)
resp = False
if label:
upd = label.update
resp = upd(new_name, color) if new_name else upd(name, color)
return resp
class Branch(GitHubCore):
"""The :class:`Branch <Branch>` object. It holds the information GitHub
returns about a branch on a :class:`Repository <Repository>`.
"""
def __init__(self, branch, session=None):
super(Branch, self).__init__(branch, session)
#: Name of the branch.
self.name = branch.get('name')
#: Returns the branch's :class:`RepoCommit <RepoCommit>` or
# ``None``.
self.commit = branch.get('commit')
if self.commit:
self.commit = RepoCommit(self.commit, self._session)
#: Returns '_links' attribute.
self.links = branch.get('_links', {})
def __repr__(self):
return '<Repository Branch [{0}]>'.format(self.name)
class Contents(GitHubObject):
"""The :class:`Contents <Contents>` object. It holds the information
concerning any content in a repository requested via the API.
"""
def __init__(self, content):
super(Contents, self).__init__(content)
# links
self._api = content['_links'].get('self', '')
#: Dictionary of links
self.links = content.get('_links')
# should always be 'base64'
#: Returns encoding used on the content.
self.encoding = content.get('encoding', '')
# content, base64 encoded and decoded
#: Base64-encoded content of the file.
self.content = content.get('content', '')
#: Decoded content of the file.
self.decoded = self.content
if self.encoding == 'base64':
self.decoded = b64decode(self.content.encode())
# file name, path, and size
#: Name of the content.
self.name = content.get('name', '')
#: Path to the content.
self.path = content.get('path', '')
#: Size of the content
self.size = content.get('size', 0)
#: SHA string.
self.sha = content.get('sha', '')
# should always be 'file'
#: Type of content.
self.type = content.get('type', '')
def __repr__(self):
return '<Content [{0}]>'.format(self.path)
def __str__(self):
return self.decoded
@property
def git_url(self):
"""API URL for this blob"""
return self.links['git']
@property
def html_url(self):
"""URL pointing to the content on GitHub."""
return self.links['html']
class Download(GitHubCore):
"""The :class:`Download <Download>` object. It represents how GitHub sends
information about files uploaded to the downloads section of a repository.
.. warning::
On 2013-03-11, this API will be deprecated by GitHub. There will also
be a new version of github3.py to accompany this at that date.
"""
def __init__(self, download, session=None):
super(Download, self).__init__(download, session)
self._api = download.get('url', '')
#: URL of the download at GitHub.
self.html_url = download.get('html_url', '')
#: Unique id of the download on GitHub.
self.id = download.get('id', 0)
#: Name of the download.
self.name = download.get('name', '')
#: Description of the download.
self.description = download.get('description', '')
#: Size of the download.
self.size = download.get('size', 0)
#: How many times this particular file has been downloaded.
self.download_count = download.get('download_count', 0)
#: Content type of the download.
self.content_type = download.get('content_type', '')
def __repr__(self):
return '<Download [{0}]>'.format(self.name)
@requires_auth
def delete(self):
"""Delete this download if authenticated"""
return self._boolean(self._delete(self._api), 204, 404)
def saveas(self, path=''):
"""Save this download to the path specified.
:param str path: (optional), if no path is specified, it will be
saved in the current directory with the name specified by GitHub.
it can take a file-like object as well
:returns: bool
"""
if not path:
path = self.name
resp = self._get(self.html_url, allow_redirects=True, stream=True)
if self._boolean(resp, 200, 404):
if isinstance(getattr(path, 'write', None), Callable):
file_like = True
fd = path
else:
file_like = False
fd = open(path, 'wb')
for chunk in resp.iter_content():
fd.write(chunk)
if not file_like:
fd.close()
return True
return False # (No coverage)
class Hook(GitHubCore):
"""The :class:`Hook <Hook>` object. This handles the information returned
by GitHub about hooks set on a repository."""
def __init__(self, hook, session=None):
super(Hook, self).__init__(hook, session)
self._api = hook.get('url', '')
#: datetime object representing when this hook was last updated.
self.updated_at = None
if hook.get('updated_at'):
self.updated_at = self._strptime(hook.get('updated_at'))
#: datetime object representing the date the hook was created.
self.created_at = self._strptime(hook.get('created_at'))
#: The name of the hook.
self.name = hook.get('name')
#: Events which trigger the hook.
self.events = hook.get('events')
#: Whether or not this Hook is marked as active on GitHub
self.active = hook.get('active')
#: Dictionary containing the configuration for the Hook.
self.config = hook.get('config')
#: Unique id of the hook.
self.id = hook.get('id')
def __repr__(self):
return '<Hook [{0}]>'.format(self.name)
def _update_(self, hook):
self.__init__(hook, self._session)
@requires_auth
def delete(self):
"""Delete this hook.
:returns: bool
"""
return self._boolean(self._delete(self._api), 204, 404)
@requires_auth
def delete_subscription(self):
"""Delete the user's subscription to this repository.
:returns: bool
"""
url = self._build_url('subscription', base_url=self._api)
return self._boolean(self._delete(url), 204, 404)
@requires_auth
def edit(self, name, config, events=[], add_events=[], rm_events=[],
active=True):
"""Edit this hook.
:param str name: (required), name of the service being called
:param dict config: (required), key-value pairs of settings for this
hook
:param list events: (optional), which events should this be triggered
for
:param list add_events: (optional), events to be added to the list of
events that this hook triggers for
:param list rm_events: (optional), events to be remvoed from the list
of events that this hook triggers for
:param bool active: (optional), should this event be active
:returns: bool
"""
json = None
if name and config and isinstance(config, dict):
data = {'name': name, 'config': config, 'active': active}
if events:
data['events'] = events
if add_events:
data['add_events'] = add_events
if rm_events:
data['remove_events'] = rm_events
json = self._json(self._patch(self._api, data=dumps(data)), 200)
if json:
self._update_(json)
return True
return False
@requires_auth
def test(self):
"""Test this hook
:returns: bool
"""
url = self._build_url('tests', base_url=self._api)
return self._boolean(self._post(url), 204, 404)
class RepoTag(GitHubObject):
"""The :class:`RepoTag <RepoTag>` object. This stores the information
representing a tag that was created on a repository.
"""
def __init__(self, tag):
super(RepoTag, self).__init__(tag)
#: Name of the tag.
self.name = tag.get('name')
#: URL for the GitHub generated zipball associated with the tag.
self.zipball_url = tag.get('zipball_url')
#: URL for the GitHub generated tarball associated with the tag.
self.tarball_url = tag.get('tarball_url')
#: Dictionary containing the SHA and URL of the commit.
self.commit = tag.get('commit', {})
def __repr__(self):
return '<Repository Tag [{0}]>'.format(self)
def __str__(self):
return self.name
class RepoComment(BaseComment):
"""The :class:`RepoComment <RepoComment>` object. This stores the
information about a comment on a file in a repository.
"""
def __init__(self, comment, session=None):
super(RepoComment, self).__init__(comment, session)
#: Commit id on which the comment was made.
self.commit_id = comment.get('commit_id')
#: URL of the comment on GitHub.
self.html_url = comment.get('html_url')
#: The line number where the comment is located.
self.line = comment.get('line')
#: The path to the file where the comment was made.
self.path = comment.get('path')
#: The position in the diff where the comment was made.
self.position = comment.get('position')
#: datetime object representing when the comment was updated.
self.updated_at = comment.get('updated_at')
if self.updated_at:
self.updated_at = self._strptime(self.updated_at)
#: Login of the user who left the comment.
self.user = None
if comment.get('user'):
self.user = User(comment.get('user'), self)
def __repr__(self):
return '<Repository Comment [{0}/{1}]>'.format(self.commit_id[:7],
self.user.login or '') # nopep8
def _update_(self, comment):
super(RepoComment, self)._update_(comment)
self.__init__(comment, self._session)
@requires_auth
def update(self, body, sha, line, path, position):
"""Update this comment.
:param str body: (required)
:param str sha: (required), sha id of the commit to comment on
:param int line: (required), line number to comment on
:param str path: (required), relative path of the file you're
commenting on
:param int position: (required), line index in the diff to comment on
:returns: bool
"""
json = None
if body and sha and path and line > 0 and position > 0:
data = {'body': body, 'commit_id': sha, 'line': line,
'path': path, 'position': position}
json = self._json(self._post(self._api, data=dumps(data)), 200)
if json:
self._update_(json)
return True
return False
class RepoCommit(BaseCommit):
"""The :class:`RepoCommit <RepoCommit>` object. This represents a commit as
viewed by a :class:`Repository`. This is different from a Commit object
returned from the git data section.
"""
def __init__(self, commit, session=None):
super(RepoCommit, self).__init__(commit, session)
#: :class:`User <github3.users.User>` who authored the commit.
self.author = commit.get('author')
if self.author:
self.author = User(self.author, self._session)
#: :class:`User <github3.users.User>` who committed the commit.
self.committer = commit.get('committer')
if self.committer:
self.committer = User(self.committer, self._session)
#: :class:`Commit <github3.git.Commit>`.
self.commit = commit.get('commit')
if self.commit:
self.commit = Commit(self.commit, self._session)
self.sha = commit.get('sha')
#: The number of additions made in the commit.
self.additions = 0
#: The number of deletions made in the commit.
self.deletions = 0
#: Total number of changes in the files.
self.total = 0
if commit.get('stats'):
self.additions = commit['stats'].get('additions')
self.deletions = commit['stats'].get('deletions')
self.total = commit['stats'].get('total')
#: The files that were modified by this commit.
self.files = commit.get('files', [])
def __repr__(self):
return '<Repository Commit [{0}]>'.format(self.sha[:7])
def diff(self):
"""Return the diff"""
resp = self._get(self._api,
headers={'Accept': 'application/vnd.github.diff'})
return resp.content if self._boolean(resp, 200, 404) else None
def patch(self):
"""Return the patch"""
resp = self._get(self._api,
headers={'Accept': 'application/vnd.github.patch'})
return resp.content if self._boolean(resp, 200, 404) else None
class Comparison(GitHubCore):
"""The :class:`Comparison <Comparison>` object. This encapsulates the
information returned by GitHub comparing two commit objects in a
repository."""
def __init__(self, compare):
super(Comparison, self).__init__(compare)
self._api = compare.get('url', '')
#: URL to view the comparison at GitHub
self.html_url = compare.get('html_url')
#: Permanent link to this comparison.
self.permalink_url = compare.get('permalink_url')
#: URL to see the diff between the two commits.
self.diff_url = compare.get('diff_url')
#: Patch URL at GitHub for the comparison.
self.patch_url = compare.get('patch_url')
#: :class:`RepoCommit <RepoCommit>` object representing the base of
# comparison.
self.base_commit = RepoCommit(compare.get('base_commit'), None)
#: Behind or ahead.
self.status = compare.get('status')
#: Number of commits ahead by.
self.ahead_by = compare.get('ahead_by')
#: Number of commits behind by.
self.behind_by = compare.get('behind_by')
#: Number of commits difference in the comparison.
self.total_commits = compare.get('total_commits')
#: List of :class:`RepoCommit <RepoCommit>` objects.
self.commits = [RepoCommit(com) for com in compare.get('commits')]
#: List of dicts describing the files modified.
self.files = compare.get('files', [])
def __repr__(self):
return '<Comparison of {0} commits>'.format(self.total_commits)
def diff(self):
"""Return the diff"""
resp = self._get(self._api,
headers={'Accept': 'application/vnd.github.diff'})
return resp.content if self._boolean(resp, 200, 404) else None
def patch(self):
"""Return the patch"""
resp = self._get(self._api,
headers={'Accept': 'application/vnd.github.patch'})
return resp.content if self._boolean(resp, 200, 404) else None
class Status(GitHubObject):
"""The :class:`Status <Status>` object. This represents information from
the Repo Status API."""
def __init__(self, status):
super(Status, self).__init__(status)
#: datetime object representing the creation of the status object
self.created_at = self._strptime(status.get('created_at'))
#: :class:`User <github3.users.User>` who created the object
self.creator = User(status.get('creator'))
#: Short description of the Status
self.description = status.get('description')
#: GitHub ID for the status object
self.id = status.get('id')
#: State of the status, e.g., 'success', 'pending', 'failed', 'error'
self.state = status.get('state')
#: URL to view more information about the status
self.target_url = status.get('target_url')
#: datetime object representing the last time the status was updated
self.updated_at = None
if status.get('updated_at'):
self.updated_at = self._strptime(status.get('updated_at'))
def __repr__(self):
return '<Status [{s.id}:{s.state}]>'.format(s=self)
|
#!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import argparse
import subprocess
import os
import glob
import time
from git_helper import GitHelper;
from file_editor import FileEditor;
g_args = None
g_apollo_root = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../../")
git_helper= None
def glob_files(path, pattern):
matches = []
for root, dirname, filenames in os.walk(path):
for filename in filenames:
if str(filename).endswith(pattern):
matches.append(os.path.join(root, filename))
return matches
def clean_gflags():
header_files = []
modules = os.path.join(g_apollo_root, "modules")
header_files.extend(glob_files(modules, ".h"))
header_files.extend(glob_files(modules, ".hpp"))
src_files = []
src_files.extend(glob_files(modules, ".cpp"));
src_files.extend(glob_files(modules, ".cc"));
code_files = header_files + src_files
flag_declares = {}
flag_defines = {}
flag_used = []
for filename in code_files:
if not os.path.isfile(filename):
continue
f = FileEditor(filename)
for flag in f.declared_gflags():
if flag not in flag_declares:
flag_declares[flag]= []
flag_declares[flag].append(filename)
for flag in f.defined_gflags():
if flag not in flag_defines:
flag_defines[flag] = []
flag_defines[flag].append(filename)
flag_used.extend(f.used_gflags())
flag_used=set(flag_used)
for flag in flag_declares:
if flag not in flag_used:
for f in flag_declares[flag]:
fhandle = FileEditor(f)
fhandle.delete_gflag(flag)
fhandle.save()
for flag in flag_defines:
if flag not in flag_used:
for f in flag_defines[flag]:
fhandle = FileEditor(f)
fhandle.delete_gflag(flag)
fhandle.save()
def format_recent_files():
oneday = 24 * 60 * 60;
commits = git_helper.get_commit_since_date("codebot", int(time.time()) - oneday)
if len(commits) == 0:
return
files = git_helper.get_changed_files_since_commit(commits[-1])
for file_name in files:
if not os.path.isfile(file_name):
continue
file_editor = FileEditor(file_name)
file_editor.delete_doxygen_file()
file_editor.save(format=True)
if __name__ == "__main__":
git_helper= GitHelper(g_apollo_root, remote="upstream")
clean_gflags()
format_recent_files()
skip clean gflags in codebot
#!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import argparse
import subprocess
import os
import glob
import time
from git_helper import GitHelper;
from file_editor import FileEditor;
g_args = None
g_apollo_root = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../../")
git_helper= None
def glob_files(path, pattern):
matches = []
for root, dirname, filenames in os.walk(path):
for filename in filenames:
if str(filename).endswith(pattern):
matches.append(os.path.join(root, filename))
return matches
def clean_gflags():
header_files = []
modules = os.path.join(g_apollo_root, "modules")
header_files.extend(glob_files(modules, ".h"))
header_files.extend(glob_files(modules, ".hpp"))
src_files = []
src_files.extend(glob_files(modules, ".cpp"));
src_files.extend(glob_files(modules, ".cc"));
code_files = header_files + src_files
flag_declares = {}
flag_defines = {}
flag_used = []
for filename in code_files:
if not os.path.isfile(filename):
continue
f = FileEditor(filename)
for flag in f.declared_gflags():
if flag not in flag_declares:
flag_declares[flag]= []
flag_declares[flag].append(filename)
for flag in f.defined_gflags():
if flag not in flag_defines:
flag_defines[flag] = []
flag_defines[flag].append(filename)
flag_used.extend(f.used_gflags())
flag_used=set(flag_used)
for flag in flag_declares:
if flag not in flag_used:
for f in flag_declares[flag]:
fhandle = FileEditor(f)
fhandle.delete_gflag(flag)
fhandle.save()
for flag in flag_defines:
if flag not in flag_used:
for f in flag_defines[flag]:
fhandle = FileEditor(f)
fhandle.delete_gflag(flag)
fhandle.save()
def format_recent_files():
oneday = 24 * 60 * 60;
commits = git_helper.get_commit_since_date("codebot", int(time.time()) - oneday)
if len(commits) == 0:
return
files = git_helper.get_changed_files_since_commit(commits[-1])
for file_name in files:
if not os.path.isfile(file_name):
continue
file_editor = FileEditor(file_name)
file_editor.delete_doxygen_file()
file_editor.save(format=True)
if __name__ == "__main__":
git_helper= GitHelper(g_apollo_root, remote="upstream")
#clean_gflags()
format_recent_files()
|
# From serenejiang DescreteFDR
# https://github.com/serenejiang/DiscreteFDR
from logging import getLogger
import types
import numpy as np
import scipy as sp
import scipy.stats
from statsmodels.sandbox.stats.multicomp import multipletests
from scipy.special import comb
logger = getLogger(__name__)
# data transformation
def rankdata(data):
logger.debug('ranking the data')
rdata = np.zeros(np.shape(data))
for crow in range(np.shape(data)[0]):
rdata[crow, :] = sp.stats.rankdata(data[crow, :])
return rdata
def log2data(data):
logger.debug('log2 transforming the data')
data[data < 2] = 2
data = np.log2(data)
return data
def binarydata(data):
logger.debug('binary transforming the data')
data[data != 0] = 1
return data
def normdata(data):
logger.debug('normalizing the data')
data = data / np.sum(data, axis=0)
return data
# different methods to calculate test statistic
def meandiff(data, labels):
mean0 = np.mean(data[:, labels == 0], axis=1)
mean1 = np.mean(data[:, labels == 1], axis=1)
tstat = mean1 - mean0
return tstat
def stdmeandiff(data, labels):
mean0 = np.mean(data[:, labels == 0], axis=1)
mean1 = np.mean(data[:, labels == 1], axis=1)
sd0 = np.std(data[:, labels == 0], axis=1, ddof=1)
sd1 = np.std(data[:, labels == 1], axis=1, ddof=1)
sdsum = sd0 + sd1
# if feature has identical values in all samples in each group, std is 0
# fix it to 1 so won't divide by 0 (mean/std is undefined)
sdsum[sdsum == 0] = 1
tstat = (mean1 - mean0) / sdsum
return tstat
def mannwhitney(data, labels):
group0 = data[:, labels == 0]
group1 = data[:, labels == 1]
tstat = np.array([scipy.stats.mannwhitneyu(group0[i, :], group1[i, :])
.statistic for i in range(np.shape(data)[0])])
return tstat
# kruwallis give a column vector while others give row vector
def kruwallis(data, labels):
n = len(np.unique(labels))
allt = np.zeros(np.shape(data)[0])
for cbact in range(np.shape(data)[0]):
group = []
for j in range(n):
group.append(data[cbact, labels == j])
tstat = scipy.stats.kruskal(*group).statistic
allt[cbact] = tstat
return allt
def pearson(data, labels):
tstat = np.array([scipy.stats.pearsonr(data[i, :],
labels)[0] for i in range(np.shape(data)[0])])
return tstat
def spearman(data, labels):
tstat = np.array([scipy.stats.spearmanr(data[i, :],
labels).correlation for i in range(np.shape(data)[0])])
return tstat
# new fdr method
def dsfdr(data, labels, transform_type='rankdata', method='meandiff',
alpha=0.1, numperm=1000, fdr_method='dsfdr'):
'''
calculate the Discrete FDR for the data
input:
data : N x S numpy array
each column is a sample (S total), each row an OTU (N total)
labels : a 1d numpy array (length S)
the labels of each sample (same order as data) with the group
(0/1 if binary, 0-G-1 if G groups, or numeric values for correlation)
transform_type : str or None
transformation to apply to the data before caluculating
the test statistic
'rankdata' : rank transfrom each OTU reads
'log2data' : calculate log2 for each OTU using minimal cutoff of 2
'normdata' : normalize the data to constant sum per samples
'binarydata' : convert to binary absence/presence
None : no transformation to perform
method : str or function
the method to use for calculating test statistics:
'meandiff' : mean(A)-mean(B) (binary)
'mannwhitney' : mann-whitney u-test (binary)
'kruwallis' : kruskal-wallis test (multiple groups)
'stdmeandiff' : (mean(A)-mean(B))/(std(A)+std(B)) (binary)
'spearman' : spearman correlation (numeric)
'pearson' : pearson correlation (numeric)
'nonzerospearman' : spearman correlation only non-zero entries
(numeric)
'nonzeropearson' : pearson correlation only non-zero entries (numeric)
function : use this function to calculate the test statistic
(input is data,labels, output is array of float)
alpha : float
the desired FDR control level
numperm : int
number of permutations to perform
fdr_method : str
the FDR procedure to determine significant bacteria
'dsfdr' : discrete FDR method
'bhfdr' : Benjamini-Hochberg FDR method
'byfdr' : Benjamini-Yekutielli FDR method
'filterBH' : Benjamini-Hochberg FDR method with filtering
output:
reject : np array of bool (length N)
True for OTUs where the null hypothesis is rejected
tstat : np array of float (length N)
the test statistic value for each OTU (for effect size)
pvals : np array of float (length N)
the p-value for each OTU
'''
logger.debug('dsfdr using fdr method: %s' % fdr_method)
data = data.copy()
if fdr_method == 'filterBH':
index = []
n0 = np.sum(labels == 0)
n1 = np.sum(labels == 1)
for i in range(np.shape(data)[0]):
nonzeros = np.count_nonzero(data[i, :])
if nonzeros < min(n0, n1):
pval_min = (comb(n0, nonzeros, exact=True)
+ comb(n1, nonzeros, exact=True)) / comb(n0 + n1, nonzeros)
if pval_min <= alpha:
index.append(i)
else:
index.append(i)
data = data[index, :]
# transform the data
if transform_type == 'rankdata':
data = rankdata(data)
elif transform_type == 'log2data':
data = log2data(data)
elif transform_type == 'binarydata':
data = binarydata(data)
elif transform_type == 'normdata':
data = normdata(data)
elif transform_type is None:
pass
else:
raise ValueError('transform type %s not supported' % transform_type)
numbact = np.shape(data)[0]
labels = labels.copy()
numbact = np.shape(data)[0]
labels = labels.copy()
logger.debug('start permutation')
if method == 'meandiff':
# fast matrix multiplication based calculation
method = meandiff
tstat = method(data, labels)
t = np.abs(tstat)
numsamples = np.shape(data)[1]
p = np.zeros([numsamples, numperm])
k1 = 1 / np.sum(labels == 0)
k2 = 1 / np.sum(labels == 1)
for cperm in range(numperm):
np.random.shuffle(labels)
p[labels == 0, cperm] = k1
p2 = np.ones(p.shape) * k2
p2[p > 0] = 0
mean1 = np.dot(data, p)
mean2 = np.dot(data, p2)
u = np.abs(mean1 - mean2)
elif method == 'mannwhitney' or method == \
'kruwallis' or method == 'stdmeandiff':
if method == 'mannwhitney':
method = mannwhitney
if method == 'kruwallis':
method = kruwallis
if method == 'stdmeandiff':
method = stdmeandiff
tstat = method(data, labels)
t = np.abs(tstat)
u = np.zeros([numbact, numperm])
for cperm in range(numperm):
rlabels = np.random.permutation(labels)
rt = method(data, rlabels)
u[:, cperm] = rt
elif method == 'spearman' or method == 'pearson':
# fast matrix multiplication based correlation
if method == 'spearman':
data = rankdata(data)
labels = sp.stats.rankdata(labels)
meanval = np.mean(data, axis=1).reshape([data.shape[0], 1])
data = data - np.repeat(meanval, data.shape[1], axis=1)
labels = labels - np.mean(labels)
tstat = np.dot(data, labels)
t = np.abs(tstat)
# calculate the normalized test statistic
stdval = np.std(data, axis=1).reshape([data.shape[0], 1])
# to fix problem with 0 std divide by zero (since we permute it's ok)
# note we don't remove from mutiple hypothesis - could be done better
stdval[stdval == 0] = 1
tdata = data / np.repeat(stdval, data.shape[1], axis=1)
meanval = np.mean(tdata, axis=1).reshape([tdata.shape[0], 1])
tdata = tdata - np.repeat(meanval, tdata.shape[1], axis=1)
meanval = np.mean(data, axis=1).reshape([data.shape[0], 1])
tdata = tdata - np.repeat(meanval, tdata.shape[1], axis=1)
tlabels = labels / np.std(labels)
# fix for n since we multiply without normalizing for n
tlabels = tlabels / len(tlabels)
tlabels = tlabels - np.mean(tlabels)
tstat = np.dot(tdata, tlabels)
permlabels = np.zeros([len(labels), numperm])
for cperm in range(numperm):
rlabels = np.random.permutation(labels)
permlabels[:, cperm] = rlabels
u = np.abs(np.dot(data, permlabels))
elif method == 'nonzerospearman' or method == 'nonzeropearson':
t = np.zeros([numbact])
tstat = np.zeros([numbact])
u = np.zeros([numbact, numperm])
for i in range(numbact):
index = np.nonzero(data[i, :])
label_nonzero = labels[index]
sample_nonzero = data[i, :][index]
if method == 'nonzerospearman':
sample_nonzero = sp.stats.rankdata(sample_nonzero)
label_nonzero = sp.stats.rankdata(label_nonzero)
sample_nonzero = sample_nonzero - np.mean(sample_nonzero)
label_nonzero = label_nonzero - np.mean(label_nonzero)
tstat[i] = np.dot(sample_nonzero, label_nonzero)
t[i] = np.abs(tstat[i])
tstat[i] = tstat[i] / (np.std(sample_nonzero) * np.std(label_nonzero) * len(sample_nonzero))
permlabels = np.zeros([len(label_nonzero), numperm])
for cperm in range(numperm):
rlabels = np.random.permutation(label_nonzero)
permlabels[:, cperm] = rlabels
u[i, :] = np.abs(np.dot(sample_nonzero, permlabels))
elif isinstance(method, types.FunctionType):
# call the user-defined function of statistical test
t = method(data, labels)
tstat = t.copy()
u = np.zeros([numbact, numperm])
for cperm in range(numperm):
rlabels = np.random.permutation(labels)
rt = method(data, rlabels)
u[:, cperm] = rt
else:
print('unsupported method %s' % method)
return None, None
# fix floating point errors (important for permutation values!)
# https://github.com/numpy/numpy/issues/8116
for crow in range(numbact):
closepos = np.isclose(t[crow], u[crow, :])
u[crow, closepos] = t[crow]
# calculate permutation p-vals
pvals = np.zeros([numbact]) # p-value for original test statistic t
pvals_u = np.zeros([numbact, numperm])
# pseudo p-values for permutated test statistic u
for crow in range(numbact):
allstat = np.hstack([t[crow], u[crow, :]])
stat_rank = sp.stats.rankdata(allstat, method='min')
allstat = 1 - ((stat_rank - 1) / len(allstat))
# assign ranks to t from biggest as 1
pvals[crow] = allstat[0]
pvals_u[crow, :] = allstat[1:]
# calculate FDR
if fdr_method == 'dsfdr':
# sort unique p-values for original test statistics biggest to smallest
pvals_unique = np.unique(pvals)
sortp = pvals_unique[np.argsort(-pvals_unique)]
# find a data-dependent threshold for the p-value
foundit = False
allfdr = []
allt = []
for cp in sortp:
realnum = np.sum(pvals <= cp)
fdr = (realnum + np.count_nonzero(
pvals_u <= cp)) / (realnum * (numperm + 1))
allfdr.append(fdr)
allt.append(cp)
if fdr <= alpha:
realcp = cp
foundit = True
break
if not foundit:
# no good threshold was found
reject = np.repeat([False], numbact)
return reject, tstat, pvals
# fill the reject null hypothesis
reject = np.zeros(numbact, dtype=int)
reject = (pvals <= realcp)
elif fdr_method == 'bhfdr' or fdr_method == 'filterBH':
t_star = np.array([t, ] * numperm).transpose()
pvals = (np.sum(u >= t_star, axis=1) + 1) / (numperm + 1)
reject = multipletests(pvals, alpha=alpha, method='fdr_bh')[0]
elif fdr_method == 'byfdr':
t_star = np.array([t, ] * numperm).transpose()
pvals = (np.sum(u >= t_star, axis=1) + 1) / (numperm + 1)
reject = multipletests(pvals, alpha=alpha, method='fdr_by')[0]
else:
raise ValueError('fdr method %s not supported' % fdr_method)
return reject, tstat, pvals
fix indentation in dsfdr (who changed it?)
# From serenejiang DescreteFDR
# https://github.com/serenejiang/DiscreteFDR
from logging import getLogger
import types
import numpy as np
import scipy as sp
import scipy.stats
from statsmodels.sandbox.stats.multicomp import multipletests
from scipy.special import comb
logger = getLogger(__name__)
# data transformation
def rankdata(data):
logger.debug('ranking the data')
rdata = np.zeros(np.shape(data))
for crow in range(np.shape(data)[0]):
rdata[crow, :] = sp.stats.rankdata(data[crow, :])
return rdata
def log2data(data):
logger.debug('log2 transforming the data')
data[data < 2] = 2
data = np.log2(data)
return data
def binarydata(data):
logger.debug('binary transforming the data')
data[data != 0] = 1
return data
def normdata(data):
logger.debug('normalizing the data')
data = data / np.sum(data, axis=0)
return data
# different methods to calculate test statistic
def meandiff(data, labels):
mean0 = np.mean(data[:, labels == 0], axis=1)
mean1 = np.mean(data[:, labels == 1], axis=1)
tstat = mean1 - mean0
return tstat
def stdmeandiff(data, labels):
mean0 = np.mean(data[:, labels == 0], axis=1)
mean1 = np.mean(data[:, labels == 1], axis=1)
sd0 = np.std(data[:, labels == 0], axis=1, ddof=1)
sd1 = np.std(data[:, labels == 1], axis=1, ddof=1)
sdsum = sd0 + sd1
# if feature has identical values in all samples in each group, std is 0
# fix it to 1 so won't divide by 0 (mean/std is undefined)
sdsum[sdsum == 0] = 1
tstat = (mean1 - mean0) / sdsum
return tstat
def mannwhitney(data, labels):
group0 = data[:, labels == 0]
group1 = data[:, labels == 1]
tstat = np.array([scipy.stats.mannwhitneyu(group0[i, :], group1[i, :])
.statistic for i in range(np.shape(data)[0])])
return tstat
# kruwallis give a column vector while others give row vector
def kruwallis(data, labels):
n = len(np.unique(labels))
allt = np.zeros(np.shape(data)[0])
for cbact in range(np.shape(data)[0]):
group = []
for j in range(n):
group.append(data[cbact, labels == j])
tstat = scipy.stats.kruskal(*group).statistic
allt[cbact] = tstat
return allt
def pearson(data, labels):
tstat = np.array([scipy.stats.pearsonr(data[i, :],
labels)[0] for i in range(np.shape(data)[0])])
return tstat
def spearman(data, labels):
tstat = np.array([scipy.stats.spearmanr(data[i, :],
labels).correlation for i in range(np.shape(data)[0])])
return tstat
# new fdr method
def dsfdr(data, labels, transform_type='rankdata', method='meandiff',
alpha=0.1, numperm=1000, fdr_method='dsfdr'):
'''
calculate the Discrete FDR for the data
input:
data : N x S numpy array
each column is a sample (S total), each row an OTU (N total)
labels : a 1d numpy array (length S)
the labels of each sample (same order as data) with the group
(0/1 if binary, 0-G-1 if G groups, or numeric values for correlation)
transform_type : str or None
transformation to apply to the data before caluculating
the test statistic
'rankdata' : rank transfrom each OTU reads
'log2data' : calculate log2 for each OTU using minimal cutoff of 2
'normdata' : normalize the data to constant sum per samples
'binarydata' : convert to binary absence/presence
None : no transformation to perform
method : str or function
the method to use for calculating test statistics:
'meandiff' : mean(A)-mean(B) (binary)
'mannwhitney' : mann-whitney u-test (binary)
'kruwallis' : kruskal-wallis test (multiple groups)
'stdmeandiff' : (mean(A)-mean(B))/(std(A)+std(B)) (binary)
'spearman' : spearman correlation (numeric)
'pearson' : pearson correlation (numeric)
'nonzerospearman' : spearman correlation only non-zero entries
(numeric)
'nonzeropearson' : pearson correlation only non-zero entries (numeric)
function : use this function to calculate the test statistic
(input is data,labels, output is array of float)
alpha : float
the desired FDR control level
numperm : int
number of permutations to perform
fdr_method : str
the FDR procedure to determine significant bacteria
'dsfdr' : discrete FDR method
'bhfdr' : Benjamini-Hochberg FDR method
'byfdr' : Benjamini-Yekutielli FDR method
'filterBH' : Benjamini-Hochberg FDR method with filtering
output:
reject : np array of bool (length N)
True for OTUs where the null hypothesis is rejected
tstat : np array of float (length N)
the test statistic value for each OTU (for effect size)
pvals : np array of float (length N)
the p-value for each OTU
'''
logger.debug('dsfdr using fdr method: %s' % fdr_method)
data = data.copy()
if fdr_method == 'filterBH':
index = []
n0 = np.sum(labels == 0)
n1 = np.sum(labels == 1)
for i in range(np.shape(data)[0]):
nonzeros = np.count_nonzero(data[i, :])
if nonzeros < min(n0, n1):
pval_min = (comb(n0, nonzeros, exact=True)
+ comb(n1, nonzeros, exact=True)) / comb(n0 + n1, nonzeros)
if pval_min <= alpha:
index.append(i)
else:
index.append(i)
data = data[index, :]
# transform the data
if transform_type == 'rankdata':
data = rankdata(data)
elif transform_type == 'log2data':
data = log2data(data)
elif transform_type == 'binarydata':
data = binarydata(data)
elif transform_type == 'normdata':
data = normdata(data)
elif transform_type is None:
pass
else:
raise ValueError('transform type %s not supported' % transform_type)
numbact = np.shape(data)[0]
labels = labels.copy()
numbact = np.shape(data)[0]
labels = labels.copy()
logger.debug('start permutation')
if method == 'meandiff':
# fast matrix multiplication based calculation
method = meandiff
tstat = method(data, labels)
t = np.abs(tstat)
numsamples = np.shape(data)[1]
p = np.zeros([numsamples, numperm])
k1 = 1 / np.sum(labels == 0)
k2 = 1 / np.sum(labels == 1)
for cperm in range(numperm):
np.random.shuffle(labels)
p[labels == 0, cperm] = k1
p2 = np.ones(p.shape) * k2
p2[p > 0] = 0
mean1 = np.dot(data, p)
mean2 = np.dot(data, p2)
u = np.abs(mean1 - mean2)
elif method == 'mannwhitney' or method == \
'kruwallis' or method == 'stdmeandiff':
if method == 'mannwhitney':
method = mannwhitney
if method == 'kruwallis':
method = kruwallis
if method == 'stdmeandiff':
method = stdmeandiff
tstat = method(data, labels)
t = np.abs(tstat)
u = np.zeros([numbact, numperm])
for cperm in range(numperm):
rlabels = np.random.permutation(labels)
rt = method(data, rlabels)
u[:, cperm] = rt
elif method == 'spearman' or method == 'pearson':
# fast matrix multiplication based correlation
if method == 'spearman':
data = rankdata(data)
labels = sp.stats.rankdata(labels)
meanval = np.mean(data, axis=1).reshape([data.shape[0], 1])
data = data - np.repeat(meanval, data.shape[1], axis=1)
labels = labels - np.mean(labels)
tstat = np.dot(data, labels)
t = np.abs(tstat)
# calculate the normalized test statistic
stdval = np.std(data, axis=1).reshape([data.shape[0], 1])
# to fix problem with 0 std divide by zero (since we permute it's ok)
# note we don't remove from mutiple hypothesis - could be done better
stdval[stdval == 0] = 1
tdata = data / np.repeat(stdval, data.shape[1], axis=1)
meanval = np.mean(tdata, axis=1).reshape([tdata.shape[0], 1])
tdata = tdata - np.repeat(meanval, tdata.shape[1], axis=1)
meanval = np.mean(data, axis=1).reshape([data.shape[0], 1])
tdata = tdata - np.repeat(meanval, tdata.shape[1], axis=1)
tlabels = labels / np.std(labels)
# fix for n since we multiply without normalizing for n
tlabels = tlabels / len(tlabels)
tlabels = tlabels - np.mean(tlabels)
tstat = np.dot(tdata, tlabels)
permlabels = np.zeros([len(labels), numperm])
for cperm in range(numperm):
rlabels = np.random.permutation(labels)
permlabels[:, cperm] = rlabels
u = np.abs(np.dot(data, permlabels))
elif method == 'nonzerospearman' or method == 'nonzeropearson':
t = np.zeros([numbact])
tstat = np.zeros([numbact])
u = np.zeros([numbact, numperm])
for i in range(numbact):
index = np.nonzero(data[i, :])
label_nonzero = labels[index]
sample_nonzero = data[i, :][index]
if method == 'nonzerospearman':
sample_nonzero = sp.stats.rankdata(sample_nonzero)
label_nonzero = sp.stats.rankdata(label_nonzero)
sample_nonzero = sample_nonzero - np.mean(sample_nonzero)
label_nonzero = label_nonzero - np.mean(label_nonzero)
tstat[i] = np.dot(sample_nonzero, label_nonzero)
t[i] = np.abs(tstat[i])
tstat[i] = tstat[i] / (np.std(sample_nonzero) * np.std(label_nonzero) * len(sample_nonzero))
permlabels = np.zeros([len(label_nonzero), numperm])
for cperm in range(numperm):
rlabels = np.random.permutation(label_nonzero)
permlabels[:, cperm] = rlabels
u[i, :] = np.abs(np.dot(sample_nonzero, permlabels))
elif isinstance(method, types.FunctionType):
# call the user-defined function of statistical test
t = method(data, labels)
tstat = t.copy()
u = np.zeros([numbact, numperm])
for cperm in range(numperm):
rlabels = np.random.permutation(labels)
rt = method(data, rlabels)
u[:, cperm] = rt
else:
print('unsupported method %s' % method)
return None, None
# fix floating point errors (important for permutation values!)
# https://github.com/numpy/numpy/issues/8116
for crow in range(numbact):
closepos = np.isclose(t[crow], u[crow, :])
u[crow, closepos] = t[crow]
# calculate permutation p-vals
pvals = np.zeros([numbact]) # p-value for original test statistic t
pvals_u = np.zeros([numbact, numperm])
# pseudo p-values for permutated test statistic u
for crow in range(numbact):
allstat = np.hstack([t[crow], u[crow, :]])
stat_rank = sp.stats.rankdata(allstat, method='min')
allstat = 1 - ((stat_rank - 1) / len(allstat))
# assign ranks to t from biggest as 1
pvals[crow] = allstat[0]
pvals_u[crow, :] = allstat[1:]
# calculate FDR
if fdr_method == 'dsfdr':
# sort unique p-values for original test statistics biggest to smallest
pvals_unique = np.unique(pvals)
sortp = pvals_unique[np.argsort(-pvals_unique)]
# find a data-dependent threshold for the p-value
foundit = False
allfdr = []
allt = []
for cp in sortp:
realnum = np.sum(pvals <= cp)
fdr = (realnum + np.count_nonzero(
pvals_u <= cp)) / (realnum * (numperm + 1))
allfdr.append(fdr)
allt.append(cp)
if fdr <= alpha:
realcp = cp
foundit = True
break
if not foundit:
# no good threshold was found
reject = np.repeat([False], numbact)
return reject, tstat, pvals
# fill the reject null hypothesis
reject = np.zeros(numbact, dtype=int)
reject = (pvals <= realcp)
elif fdr_method == 'bhfdr' or fdr_method == 'filterBH':
t_star = np.array([t, ] * numperm).transpose()
pvals = (np.sum(u >= t_star, axis=1) + 1) / (numperm + 1)
reject = multipletests(pvals, alpha=alpha, method='fdr_bh')[0]
elif fdr_method == 'byfdr':
t_star = np.array([t, ] * numperm).transpose()
pvals = (np.sum(u >= t_star, axis=1) + 1) / (numperm + 1)
reject = multipletests(pvals, alpha=alpha, method='fdr_by')[0]
else:
raise ValueError('fdr method %s not supported' % fdr_method)
return reject, tstat, pvals
|
import json
import logging
import requests
from datetime import datetime
from django.conf import settings
from optparse import make_option
from django.db import transaction, DEFAULT_DB_ALIAS
from django.core.management.base import BaseCommand
from requests.exceptions import SSLError, ConnectionError, RequestException
from varify.samples.models import Sample
log = logging.getLogger(__name__)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Specifies the target database to load results.'),
make_option('--force', action='store_true', dest='force',
default=False,
help='Forces recomputation of all gene rankings')
)
def handle(self, **options):
if not settings.PHENOTYPE_ENDPOINT:
log.error('PHENOTYPE_ENDPOINT must be defined in settings for '
'gene rankings to be updated.')
return
if not settings.GENE_RANK_BASE_URL:
log.error('GENE_RANK_BASE_URL must be defined in settings for '
'gene rankings to be updated.')
return
if not settings.VARIFY_CERT or not settings.VARIFY_KEY:
log.error('VARIFY_CERT and VARIFY_KEY must be defined in settings '
'for gene rankings to be updated.')
return
database = options.get('database')
force = options.get('force')
# Construct the cert from the setting to use in requests to the
# phenotype endpoint.
cert = (settings.VARIFY_CERT, settings.VARIFY_KEY)
# We ignore all the samples that aren't published. They aren't visible
# to the user so we don't bother updating related scores.
samples = Sample.objects.filter(published=True)
updated_samples = 0
total_samples = 0
for sample in samples:
total_samples += 1
# Construct the URL from the setting and the sample label. The
# sample label is used to retrieve the phenotype info on the remote
# endpoint.
url = settings.PHENOTYPE_ENDPOINT % sample.label
# Get the phenotype information for this sample. If the
# phenotype is unavailable then we can skip this sample.
try:
response = requests.get(url, cert=cert, verify=False)
except SSLError:
log.exception('Skipping sample "{0}". An SSLError occurred '
'during phenotype retrieval request.'
.format(sample.label))
continue
except ConnectionError:
log.exception('Skipping sample "{0}". A ConnectionError '
'occurred during phenotype retrieval request.'
.format(sample.label))
continue
except RequestException:
log.exception('Skipping sample "{0}". The sample has no '
'phenotype data associated with it'
.format(sample.label))
continue
try:
phenotype_data = json.loads(response.content)
except ValueError:
log.error("Could not parse response from {0}, skipping '{1}'."
.format(url, sample.label))
continue
try:
phenotype_modified = datetime.strptime(
phenotype_data['last_modified'], "%Y-%m-%dT%H:%M:%S.%f")
except ValueError:
phenotype_modified = datetime.min
log.warn("Could not parse 'last_modified' field on phenotype "
"data. Using datetime.min so that only unranked "
"samples will be ranked. If the 'force' flag was "
"used then all samples will be updated despite this "
"parsing failure.")
if (not force and sample.phenotype_modified and
sample.phenotype_modified < phenotype_modified):
log.debug("Sample '{0}' is already up to date, skipping it."
.format(sample.label))
continue
# Extract the HPO terms from the data returned from the phenotype
# endpoint. We need to modify the terms a bit because the phenotype
# endpoint has terms in the form 'HP_0011263' and the gene ranking
# enpoint expects them to be of the form 'HP:0011263'.
hpo_terms = []
for hpo_annotation in phenotype_data['hpoAnnotations']:
hpo_id = str(hpo_annotation.get('hpo_id', ''))
if hpo_id:
hpo_terms.append(hpo_id.replace('_', ':'))
# If there are no HPO terms then there will be no rankings so skip
# this sample to avoid any more computations and requests.
if not hpo_terms:
log.warning('Skipping "{0}" because it has no HPO terms '
'associated with it.'.format(sample.label))
continue
# Compute the unique gene list for the entire sample
genes = set(sample.results.values_list(
'variant__effects__transcript__gene__symbol', flat=True))
# Obviously, if there are no genes then the gene ranking endpoint
# will have nothing to do so we can safely skip this sample.
if not genes:
continue
# We need to convert the genes to strings because the ranking
# service is no prepared to handle the unicode format that the
# gene symbols are in when we retrieve them from the models.
gene_rank_url = "http://{0}?hpo={1}&genes={2}".format(
settings.GENE_RANK_BASE_URL, ",".join(hpo_terms),
",".join([str(g) for g in genes]))
try:
gene_response = requests.get(gene_rank_url)
except Exception:
log.exception('Error retrieving gene rankings')
continue
gene_data = json.loads(gene_response.content)
ranked_genes = gene_data['ranked_genes']
# While all the results should have been updated at the
# same time, we cannot guarantee that so we check if each
# is stale or the force flag is on before updating the
# results gene rank.
updated_results = 0
total_results = 0
for result in sample.results.all():
total_results += 1
with transaction.commit_manually(database):
try:
# Get the gene for this result. Since a result can
# have more than one gene associated with it, we
# return the first gene symbol in the list. This is
# the same one that will be shown in the collapsed
# gene list on the variant row in the results table.
gene = result.variant.effects.values_list(
'transcript__gene__symbol', flat=True)[0]
# If there is no gene on this result or the gene is
# not found in the list of ranked genes then skip this
# result.
if not gene:
log.debug("Result with id {0} has no gene, "
"skipping result.".format(result.id))
transaction.rollback()
continue
# Get the first item in the ranked gene list with a
# symbol matching the gene we looked up above for this
# result.
ranked_gene = next(
(r for r in ranked_genes if
r.get('symbol', '').lower() == gene.lower()),
None)
if not ranked_gene:
log.debug("Could not find '{0}' in ranked gene "
"list, skipping result".format(gene))
transaction.rollback()
continue
result.score.rank = ranked_gene.get('rank', None)
result.score.score = ranked_gene.get('score', None)
result.save()
updated_results += 1
except Exception:
log.exception("Error saving gene ranks and scores for "
"sample '{0}'".format(sample.label))
transaction.rollback()
transaction.commit()
sample.phenotype_modified = datetime.now()
sample.save()
log.info("Updated {0} and skipped {1} results in sample '{2}'"
.format(updated_results, total_results - updated_results,
sample.label))
updated_samples += 1
log.info("Updated {0} and skipped {1} samples"
.format(updated_samples, total_samples-updated_samples))
Accept sample labels as arguments to the gene-ranks command
import json
import logging
import requests
from datetime import datetime
from django.conf import settings
from optparse import make_option
from django.db import transaction, DEFAULT_DB_ALIAS
from django.core.management.base import BaseCommand
from requests.exceptions import SSLError, ConnectionError, RequestException
from varify.samples.models import Sample
log = logging.getLogger(__name__)
class Command(BaseCommand):
args = '<sample_label sample_label ...>'
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Specifies the target database to load results.'),
make_option('--force', action='store_true', dest='force',
default=False,
help='Forces recomputation of all gene rankings')
)
def handle(self, *args, **options):
if not settings.PHENOTYPE_ENDPOINT:
log.error('PHENOTYPE_ENDPOINT must be defined in settings for '
'gene rankings to be updated.')
return
if not settings.GENE_RANK_BASE_URL:
log.error('GENE_RANK_BASE_URL must be defined in settings for '
'gene rankings to be updated.')
return
if not settings.VARIFY_CERT or not settings.VARIFY_KEY:
log.error('VARIFY_CERT and VARIFY_KEY must be defined in settings '
'for gene rankings to be updated.')
return
database = options.get('database')
force = options.get('force')
# Construct the cert from the setting to use in requests to the
# phenotype endpoint.
cert = (settings.VARIFY_CERT, settings.VARIFY_KEY)
# We ignore all the samples that aren't published. They aren't visible
# to the user so we don't bother updating related scores. If there
# were sample labels supplied as arguments then we limit the rankings
# updates to those samples, otherwise we process all samples.
samples = Sample.objects.filter(published=True)
if args:
samples = samples.filter(label__in=args)
updated_samples = 0
total_samples = 0
for sample in samples:
total_samples += 1
# Construct the URL from the setting and the sample label. The
# sample label is used to retrieve the phenotype info on the remote
# endpoint.
url = settings.PHENOTYPE_ENDPOINT % sample.label
# Get the phenotype information for this sample. If the
# phenotype is unavailable then we can skip this sample.
try:
response = requests.get(url, cert=cert, verify=False)
except SSLError:
log.exception('Skipping sample "{0}". An SSLError occurred '
'during phenotype retrieval request.'
.format(sample.label))
continue
except ConnectionError:
log.exception('Skipping sample "{0}". A ConnectionError '
'occurred during phenotype retrieval request.'
.format(sample.label))
continue
except RequestException:
log.exception('Skipping sample "{0}". The sample has no '
'phenotype data associated with it'
.format(sample.label))
continue
try:
phenotype_data = json.loads(response.content)
except ValueError:
log.error("Could not parse response from {0}, skipping '{1}'."
.format(url, sample.label))
continue
try:
phenotype_modified = datetime.strptime(
phenotype_data['last_modified'], "%Y-%m-%dT%H:%M:%S.%f")
except ValueError:
phenotype_modified = datetime.min
log.warn("Could not parse 'last_modified' field on phenotype "
"data. Using datetime.min so that only unranked "
"samples will be ranked. If the 'force' flag was "
"used then all samples will be updated despite this "
"parsing failure.")
if (not force and sample.phenotype_modified and
sample.phenotype_modified < phenotype_modified):
log.debug("Sample '{0}' is already up to date, skipping it."
.format(sample.label))
continue
# Extract the HPO terms from the data returned from the phenotype
# endpoint. We need to modify the terms a bit because the phenotype
# endpoint has terms in the form 'HP_0011263' and the gene ranking
# enpoint expects them to be of the form 'HP:0011263'.
hpo_terms = []
for hpo_annotation in phenotype_data['hpoAnnotations']:
hpo_id = str(hpo_annotation.get('hpo_id', ''))
if hpo_id:
hpo_terms.append(hpo_id.replace('_', ':'))
# If there are no HPO terms then there will be no rankings so skip
# this sample to avoid any more computations and requests.
if not hpo_terms:
log.warning('Skipping "{0}" because it has no HPO terms '
'associated with it.'.format(sample.label))
continue
# Compute the unique gene list for the entire sample
genes = set(sample.results.values_list(
'variant__effects__transcript__gene__symbol', flat=True))
# Obviously, if there are no genes then the gene ranking endpoint
# will have nothing to do so we can safely skip this sample.
if not genes:
continue
# We need to convert the genes to strings because the ranking
# service is no prepared to handle the unicode format that the
# gene symbols are in when we retrieve them from the models.
gene_rank_url = "http://{0}?hpo={1}&genes={2}".format(
settings.GENE_RANK_BASE_URL, ",".join(hpo_terms),
",".join([str(g) for g in genes]))
try:
gene_response = requests.get(gene_rank_url)
except Exception:
log.exception('Error retrieving gene rankings')
continue
gene_data = json.loads(gene_response.content)
ranked_genes = gene_data['ranked_genes']
# While all the results should have been updated at the
# same time, we cannot guarantee that so we check if each
# is stale or the force flag is on before updating the
# results gene rank.
updated_results = 0
total_results = 0
for result in sample.results.all():
total_results += 1
with transaction.commit_manually(database):
try:
# Get the gene for this result. Since a result can
# have more than one gene associated with it, we
# return the first gene symbol in the list. This is
# the same one that will be shown in the collapsed
# gene list on the variant row in the results table.
gene = result.variant.effects.values_list(
'transcript__gene__symbol', flat=True)[0]
# If there is no gene on this result or the gene is
# not found in the list of ranked genes then skip this
# result.
if not gene:
log.debug("Result with id {0} has no gene, "
"skipping result.".format(result.id))
transaction.rollback()
continue
# Get the first item in the ranked gene list with a
# symbol matching the gene we looked up above for this
# result.
ranked_gene = next(
(r for r in ranked_genes if
r.get('symbol', '').lower() == gene.lower()),
None)
if not ranked_gene:
log.debug("Could not find '{0}' in ranked gene "
"list, skipping result".format(gene))
transaction.rollback()
continue
result.score.rank = ranked_gene.get('rank', None)
result.score.score = ranked_gene.get('score', None)
result.save()
updated_results += 1
except Exception:
log.exception("Error saving gene ranks and scores for "
"sample '{0}'".format(sample.label))
transaction.rollback()
transaction.commit()
sample.phenotype_modified = datetime.now()
sample.save()
log.info("Updated {0} and skipped {1} results in sample '{2}'"
.format(updated_results, total_results - updated_results,
sample.label))
updated_samples += 1
log.info("Updated {0} and skipped {1} samples"
.format(updated_samples, total_samples-updated_samples))
|
"""
The microstructure module provide elementary classes to describe a
crystallographic granular microstructure such as mostly present in
metallic materials.
It contains several classes which are used to describe a microstructure
composed of several grains, each one having its own crystallographic
orientation:
* :py:class:`~pymicro.crystal.microstructure.Microstructure`
* :py:class:`~pymicro.crystal.microstructure.Grain`
* :py:class:`~pymicro.crystal.microstructure.Orientation`
"""
import numpy as np
import os
import vtk
import h5py
from scipy import ndimage
from matplotlib import pyplot as plt, colors, cm
from xml.dom.minidom import Document, parse
from pymicro.crystal.lattice import Lattice, Symmetry
from pymicro.crystal.quaternion import Quaternion
from math import atan2, pi
class Orientation:
"""Crystallographic orientation class.
This follows the passive rotation definition which means that it brings
the sample coordinate system into coincidence with the crystal coordinate
system. Then one may express a vector :math:`V_c` in the crystal coordinate system
from the vector in the sample coordinate system :math:`V_s` by:
.. math::
V_c = g.V_s
and inversely (because :math:`g^{-1}=g^T`):
.. math::
V_s = g^T.V_c
Most of the code to handle rotations has been written to comply with the conventions
laid in :cite:`Rowenhorst2015`.
"""
def __init__(self, matrix):
"""Initialization from the 9 components of the orientation matrix."""
g = np.array(matrix, dtype=np.float64).reshape((3, 3))
self._matrix = g
self.euler = Orientation.OrientationMatrix2Euler(g)
self.rod = Orientation.OrientationMatrix2Rodrigues(g)
self.quat = Orientation.OrientationMatrix2Quaternion(g, P=1)
def orientation_matrix(self):
"""Returns the orientation matrix in the form of a 3x3 numpy array."""
return self._matrix
def __repr__(self):
"""Provide a string representation of the class."""
s = 'Crystal Orientation'
s += '\norientation matrix = %s' % self._matrix.view()
s += '\nEuler angles (degrees) = (%8.3f,%8.3f,%8.3f)' % (self.phi1(), self.Phi(), self.phi2())
s += '\nRodrigues vector = %s' % self.OrientationMatrix2Rodrigues(self._matrix)
s += '\nQuaternion = %s' % self.OrientationMatrix2Quaternion(self._matrix, P=1)
return s
@staticmethod
def cube():
"""Create the particular crystal orientation called Cube and which
corresponds to euler angle (0, 0, 0)."""
return Orientation.from_euler((0., 0., 0.))
@staticmethod
def brass():
"""Create the particular crystal orientation called Brass and which
corresponds to euler angle (35.264, 45, 0)."""
return Orientation.from_euler((35.264, 45., 0.))
@staticmethod
def copper():
"""Create the particular crystal orientation called Copper and which
corresponds to euler angle (90, 35.264, 45)."""
return Orientation.from_euler((90., 35.264, 45.))
@staticmethod
def s3():
"""Create the particular crystal orientation called S3 and which
corresponds to euler angle (59, 37, 63)."""
return Orientation.from_euler((58.980, 36.699, 63.435))
@staticmethod
def goss():
"""Create the particular crystal orientation called Goss and which
corresponds to euler angle (0, 45, 0)."""
return Orientation.from_euler((0., 45., 0.))
@staticmethod
def shear():
"""Create the particular crystal orientation called shear and which
corresponds to euler angle (45, 0, 0)."""
return Orientation.from_euler((45., 0., 0.))
@staticmethod
def random():
"""Create a random crystal orientation."""
from random import random
from math import acos
phi1 = random() * 360.
Phi = 180. * acos(2 * random() - 1) / np.pi
phi2 = random() * 360.
return Orientation.from_euler([phi1, Phi, phi2])
def get_ipf_colour(self, axis=np.array([0., 0., 1.]), symmetry=Symmetry.cubic):
"""Compute the IPF (inverse pole figure) colour for this orientation.
Given a particular axis expressed in the laboratory coordinate system,
one can compute the so called IPF colour based on that direction
expressed in the crystal coordinate system as :math:`[x_c,y_c,z_c]`.
There is only one tuple (u,v,w) such that:
.. math::
[x_c,y_c,z_c]=u.[0,0,1]+v.[0,1,1]+w.[1,1,1]
and it is used to assign the RGB colour.
"""
axis /= np.linalg.norm(axis)
# find the axis lying in the fundamental zone
for sym in symmetry.symmetry_operators():
Osym = np.dot(sym, self.orientation_matrix())
Vc = np.dot(Osym, axis)
if Vc[2] < 0:
Vc *= -1. # using the upward direction
uvw = np.array([Vc[2] - Vc[1], Vc[1] - Vc[0], Vc[0]])
uvw /= np.linalg.norm(uvw)
uvw /= max(uvw)
if (uvw[0] >= 0. and uvw[0] <= 1.0) and (uvw[1] >= 0. and uvw[1] <= 1.0) and (
uvw[2] >= 0. and uvw[2] <= 1.0):
# print('found sym for sst')
break
return uvw
def fzDihedral(rod, n):
"""check if the given Rodrigues vector is in the fundamental zone.
After book from Morawiecz.
"""
# top and bottom face at +/-tan(pi/2n)
t = np.tan(np.pi / (2 * n))
if abs(rod[2]) > t:
return False
# 2n faces distance 1 from origin
# y <= ((2+sqrt(2))*t - (1+sqrt(2))) * x + (1+sqrt(2))*(1-t)
y, x = sorted([abs(ro[0]), abs(ro[1])])
if x > 1:
return False
return {
2: True,
3: y / (1 + math.sqrt(2)) + (1 - math.sqrt(2 / 3)) * x < 1 - 1 / math.sqrt(3),
4: y + x < math.sqrt(2),
6: y / (1 + math.sqrt(2)) + (1 - 2 * math.sqrt(2) + math.sqrt(6)) * x < math.sqrt(3) - 1
}[n]
def inFZ(self, symmetry=Symmetry.cubic):
"""Check if the given Orientation lies within the fundamental zone.
For a given crystal symmetry, several rotations can describe the same
physcial crystllographic arangement. The Rodrigues fundamental zone
restrict the orientation space accordingly.
"""
r = self.rod
if symmetry == Symmetry.cubic:
inFZT23 = np.abs(r).sum() <= 1.0
# in the cubic symmetry, each component must be < 2 ** 0.5 - 1
inFZ = inFZT23 and np.abs(r).max() <= 2 ** 0.5 - 1
else:
raise (ValueError('unsupported crystal symmetry: %s' % symmetry))
return inFZ
def move_to_FZ(self, symmetry=Symmetry.cubic, verbose=False):
"""
Compute the equivalent crystal orientation in the Fundamental Zone of a given symmetry.
:param Symmetry symmetry: an instance of the `Symmetry` class
:param verbose: flag for verbose mode
:return: a new Orientation instance which lies in the fundamental zone.
"""
om = symmetry.move_rotation_to_FZ(self.orientation_matrix(), verbose=verbose)
return Orientation(om)
@staticmethod
def misorientation_MacKenzie(psi):
"""Return the fraction of the misorientations corresponding to the
given :math:`\\psi` angle in the reference solution derived By MacKenzie in
his 1958 paper :cite:`MacKenzie_1958`.
:param psi: the misorientation angle in radians.
:returns: the value in the cummulative distribution corresponding to psi.
"""
from math import sqrt, sin, cos, tan, pi, acos
psidg = 180 * psi / pi
if 0 <= psidg <= 45:
p = 2. / 15 * (1 - cos(psi))
elif 45 < psidg <= 60:
p = 2. / 15 * (3 * (sqrt(2) - 1) * sin(psi) - 2 * (1 - cos(psi)))
elif 60 < psidg <= 60.72:
p = 2. / 15 * ((3 * (sqrt(2) - 1) + 4. / sqrt(3)) * sin(psi) - 6. * (1 - cos(psi)))
elif 60.72 < psidg <= 62.8:
X = (sqrt(2) - 1) / (1 - (sqrt(2) - 1) ** 2 / tan(0.5 * psi) ** 2) ** 0.5
Y = (sqrt(2) - 1) ** 2 / ((3 - 1 / tan(0.5 * psi) ** 2) ** 0.5)
p = (2. / 15) * ((3 * (sqrt(2) - 1) + 4 / sqrt(3)) * sin(psi) - 6 * (1 - cos(psi))) \
- 8. / (5 * pi) * (
2 * (sqrt(2) - 1) * acos(X / tan(0.5 * psi)) + 1. / sqrt(3) * acos(Y / tan(0.5 * psi))) * sin(psi) \
+ 8. / (5 * pi) * (2 * acos((sqrt(2) + 1) * X / sqrt(2)) + acos((sqrt(2) + 1) * Y / sqrt(2))) * (
1 - cos(psi))
else:
p = 0.
return p
@staticmethod
def misorientation_axis_from_delta(delta):
"""Compute the misorientation axis from the misorientation matrix.
:param delta: The 3x3 misorientation matrix.
:returns: the misorientation axis (normalised vector).
"""
n = np.array([delta[1, 2] - delta[2, 1], delta[2, 0] - delta[0, 2], delta[0, 1] - delta[1, 0]])
n /= np.sqrt(
(delta[1, 2] - delta[2, 1]) ** 2 + (delta[2, 0] - delta[0, 2]) ** 2 + (delta[0, 1] - delta[1, 0]) ** 2)
return n
def misorientation_axis(self, orientation):
"""Compute the misorientation axis with another crystal orientation.
This vector is by definition common to both crystalline orientations.
:param orientation: an instance of :py:class:`~pymicro.crystal.microstructure.Orientation` class.
:returns: the misorientation axis (normalised vector).
"""
delta = np.dot(self.orientation_matrix(), orientation.orientation_matrix().T)
return Orientation.misorientation_axis_from_delta(delta)
@staticmethod
def misorientation_angle_from_delta(delta):
"""Compute the misorientation angle from the misorientation matrix.
Compute the angle assocated with this misorientation matrix :math:`\\Delta g`.
It is defined as :math:`\\omega = \\arccos(\\text{trace}(\\Delta g)/2-1)`.
To avoid float rounding error, the argument is rounded to 1. if it is within 1 and 1 plus 32 bits floating
point precison.
.. note::
This does not account for the crystal symmetries. If you want to
find the disorientation between two orientations, use the
:py:meth:`~pymicro.crystal.microstructure.Orientation.disorientation`
method.
:param delta: The 3x3 misorientation matrix.
:returns float: the misorientation angle in radians.
"""
cw = 0.5 * (delta.trace() - 1)
if cw > 1. and cw - 1. < 10 * np.finfo('float32').eps:
#print('cw=%.20f, rounding to 1.' % cw)
cw = 1.
omega = np.arccos(cw)
return omega
def disorientation(self, orientation, crystal_structure=Symmetry.triclinic):
"""Compute the disorientation another crystal orientation.
Considering all the possible crystal symmetries, the disorientation
is defined as the combination of the minimum misorientation angle
and the misorientation axis lying in the fundamental zone, which
can be used to bring the two lattices into coincidence.
.. note::
Both orientations are supposed to have the same symmetry. This is not necessarily the case in multi-phase
materials.
:param orientation: an instance of :py:class:`~pymicro.crystal.microstructure.Orientation` class desribing the other crystal orientation from which to compute the angle.
:param crystal_structure: an instance of the `Symmetry` class describing the crystal symmetry, triclinic (no symmetry) by default.
:returns tuple: the misorientation angle in radians, the axis as a numpy vector (crystal coordinates), the axis as a numpy vector (sample coordinates).
"""
the_angle = np.pi
symmetries = crystal_structure.symmetry_operators()
(gA, gB) = (self.orientation_matrix(), orientation.orientation_matrix()) # nicknames
for (g1, g2) in [(gA, gB), (gB, gA)]:
for j in range(symmetries.shape[0]):
sym_j = symmetries[j]
oj = np.dot(sym_j, g1) # the crystal symmetry operator is left applied
for i in range(symmetries.shape[0]):
sym_i = symmetries[i]
oi = np.dot(sym_i, g2)
delta = np.dot(oi, oj.T)
#print('delta={}'.format(delta))
mis_angle = Orientation.misorientation_angle_from_delta(delta)
#print(np.degrees(mis_angle))
if mis_angle < the_angle:
# now compute the misorientation axis, should check if it lies in the fundamental zone
mis_axis = Orientation.misorientation_axis_from_delta(delta)
# here we have np.dot(oi.T, mis_axis) = np.dot(oj.T, mis_axis)
# print(mis_axis, mis_angle*180/np.pi, np.dot(oj.T, mis_axis))
the_angle = mis_angle
the_axis = mis_axis
the_axis_xyz = np.dot(oi.T, the_axis)
return (the_angle, the_axis, the_axis_xyz)
def phi1(self):
"""Convenience methode to expose the first Euler angle."""
return self.euler[0]
def Phi(self):
"""Convenience methode to expose the second Euler angle."""
return self.euler[1]
def phi2(self):
"""Convenience methode to expose the third Euler angle."""
return self.euler[2]
def compute_XG_angle(self, hkl, omega, verbose=False):
"""Compute the angle between the scattering vector :math:`\mathbf{G_{l}}`
and :math:`\mathbf{-X}` the X-ray unit vector at a given angular position :math:`\\omega`.
A given hkl plane defines the scattering vector :math:`\mathbf{G_{hkl}}` by
the miller indices in the reciprocal space. It is expressed in the
cartesian coordinate system by :math:`\mathbf{B}.\mathbf{G_{hkl}}` and in the
laboratory coordinate system accounting for the crystal orientation
by :math:`\mathbf{g}^{-1}.\mathbf{B}.\mathbf{G_{hkl}}`.
The crystal is assumed to be placed on a rotation stage around the
laboratory vertical axis. The scattering vector can finally be
written as :math:`\mathbf{G_l}=\mathbf{\\Omega}.\mathbf{g}^{-1}.\mathbf{B}.\mathbf{G_{hkl}}`.
The X-rays unit vector is :math:`\mathbf{X}=[1, 0, 0]`. So the computed angle
is :math:`\\alpha=acos(-\mathbf{X}.\mathbf{G_l}/||\mathbf{G_l}||`
The Bragg condition is fulfilled when :math:`\\alpha=\pi/2-\\theta_{Bragg}`
:param hkl: the hkl plane, an instance of :py:class:`~pymicro.crystal.lattice.HklPlane`
:param omega: the angle of rotation of the crystal around the laboratory vertical axis.
:param bool verbose: activate verbose mode (False by default).
:return float: the angle between :math:`-\mathbf{X}` and :math:`\mathbf{G_{l}}` in degrees.
"""
X = np.array([1., 0., 0.])
gt = self.orientation_matrix().transpose()
Gc = hkl.scattering_vector()
Gs = gt.dot(Gc) # in the cartesian sample CS
omegar = omega * np.pi / 180
R = np.array([[np.cos(omegar), -np.sin(omegar), 0], [np.sin(omegar), np.cos(omegar), 0], [0, 0, 1]])
Gl = R.dot(Gs)
alpha = np.arccos(np.dot(-X, Gl) / np.linalg.norm(Gl)) * 180 / np.pi
if verbose:
print('scattering vector in the crystal CS', Gc)
print('scattering vector in the sample CS', Gs)
print('scattering vector in the laboratory CS (including Omega rotation)', Gl)
print('angle (deg) between -X and G', alpha)
return alpha
@staticmethod
def solve_trig_equation(A, B, C, verbose=False):
"""Solve the trigonometric equation in the form of:
.. math::
A\cos\\theta + B\sin\\theta = C
:param float A: the A constant in the equation.
:param float B: the B constant in the equation.
:param float C: the C constant in the equation.
:return tuple: the two solutions angular values in degrees.
"""
Delta = 4 * (A ** 2 + B ** 2 - C ** 2)
if Delta < 0:
raise ValueError('Delta < 0 (%f)' % Delta)
if verbose:
print('A={0:.3f}, B={1:.3f}, C={2:.3f}, Delta={3:.1f}'.format(A, B, C, Delta))
theta_1 = 2 * np.arctan2(B - 0.5 * np.sqrt(Delta), A + C) * 180. / np.pi % 360
theta_2 = 2 * np.arctan2(B + 0.5 * np.sqrt(Delta), A + C) * 180. / np.pi % 360
return theta_1, theta_2
def dct_omega_angles(self, hkl, lambda_keV, verbose=False):
"""Compute the two omega angles which satisfy the Bragg condition.
For a given crystal orientation sitting on a vertical rotation axis,
there is exactly two :math:`\omega` positions in :math:`[0, 2\pi]` for which
a particular :math:`(hkl)` reflexion will fulfil Bragg's law.
According to the Bragg's law, a crystallographic plane of a given
grain will be in diffracting condition if:
.. math::
\sin\\theta=-[\mathbf{\Omega}.\mathbf{g}^{-1}\mathbf{G_c}]_1
with :math:`\mathbf{\Omega}` the matrix associated with the rotation
axis:
.. math::
\mathbf{\Omega}=\\begin{pmatrix}
\cos\omega & -\sin\omega & 0 \\\\
\sin\omega & \cos\omega & 0 \\\\
0 & 0 & 1 \\\\
\end{pmatrix}
This method solves the associated second order equation to return
the two corresponding omega angles.
:param hkl: The given cristallographic plane :py:class:`~pymicro.crystal.lattice.HklPlane`
:param float lambda_keV: The X-rays energy expressed in keV
:param bool verbose: Verbose mode (False by default)
:returns tuple: :math:`(\omega_1, \omega_2)` the two values of the \
rotation angle around the vertical axis (in degrees).
"""
(h, k, l) = hkl.miller_indices()
theta = hkl.bragg_angle(lambda_keV, verbose=verbose)
lambda_nm = 1.2398 / lambda_keV
gt = self.orientation_matrix().T # gt = g^{-1} in Poulsen 2004
Gc = hkl.scattering_vector()
A = np.dot(Gc, gt[0])
B = - np.dot(Gc, gt[1])
# A = h / a * gt[0, 0] + k / b * gt[0, 1] + l / c * gt[0, 2]
# B = -h / a * gt[1, 0] - k / b * gt[1, 1] - l / c * gt[1, 2]
C = -2 * np.sin(theta) ** 2 / lambda_nm # the minus sign comes from the main equation
omega_1, omega_2 = Orientation.solve_trig_equation(A, B, C, verbose=verbose)
if verbose:
print('the two omega values in degrees fulfilling the Bragg condition are (%.1f, %.1f)' % (omega_1, omega_2))
return omega_1, omega_2
def rotating_crystal(self, hkl, lambda_keV, omega_step=0.5, display=True, verbose=False):
from pymicro.xray.xray_utils import lambda_keV_to_nm
lambda_nm = lambda_keV_to_nm(lambda_keV)
X = np.array([1., 0., 0.]) / lambda_nm
print('magnitude of X', np.linalg.norm(X))
gt = self.orientation_matrix().transpose()
(h, k, l) = hkl.miller_indices()
theta = hkl.bragg_angle(lambda_keV) * 180. / np.pi
print('bragg angle for %d%d%d reflection is %.1f' % (h, k, l, theta))
Gc = hkl.scattering_vector()
Gs = gt.dot(Gc)
alphas = []
twothetas = []
magnitude_K = []
omegas = np.linspace(0.0, 360.0, num=360.0 / omega_step, endpoint=False)
for omega in omegas:
print('\n** COMPUTING AT OMEGA=%03.1f deg' % omega)
# prepare rotation matrix
omegar = omega * np.pi / 180
R = np.array([[np.cos(omegar), -np.sin(omegar), 0], [np.sin(omegar), np.cos(omegar), 0], [0, 0, 1]])
# R = R.dot(Rlt).dot(Rut) # with tilts
Gl = R.dot(Gs)
print('scattering vector in laboratory CS', Gl)
n = R.dot(gt.dot(hkl.normal()))
print('plane normal:', hkl.normal())
print(R)
print('rotated plane normal:', n, ' with a norm of', np.linalg.norm(n))
G = n / hkl.interplanar_spacing() # here G == N
print('G vector:', G, ' with a norm of', np.linalg.norm(G))
K = X + G
print('X + G vector', K)
magnitude_K.append(np.linalg.norm(K))
print('magnitude of K', np.linalg.norm(K))
alpha = np.arccos(np.dot(-X, G) / (np.linalg.norm(-X) * np.linalg.norm(G))) * 180 / np.pi
print('angle between -X and G', alpha)
alphas.append(alpha)
twotheta = np.arccos(np.dot(K, X) / (np.linalg.norm(K) * np.linalg.norm(X))) * 180 / np.pi
print('angle (deg) between K and X', twotheta)
twothetas.append(twotheta)
print('min alpha angle is ', min(alphas))
# compute omega_1 and omega_2 to verify graphically
(w1, w2) = self.dct_omega_angles(hkl, lambda_keV, verbose=False)
# gather the results in a single figure
fig = plt.figure(figsize=(12, 10))
fig.add_subplot(311)
plt.title('Looking for (%d%d%d) Bragg reflexions' % (h, k, l))
plt.plot(omegas, alphas, 'k-')
plt.xlim(0, 360)
plt.ylim(0, 180)
plt.xticks(np.arange(0, 390, 30))
# add bragg condition
plt.axhline(90 - theta, xmin=0, xmax=360, linewidth=2)
plt.annotate('$\pi/2-\\theta_{Bragg}$', xycoords='data', xy=(360, 90 - theta), horizontalalignment='left',
verticalalignment='center', fontsize=16)
# add omega solutions
plt.axvline(w1 + 180, ymin=0, ymax=180, linewidth=2, linestyle='dashed', color='gray')
plt.axvline(w2 + 180, ymin=0, ymax=180, linewidth=2, linestyle='dashed', color='gray')
plt.annotate('$\\omega_1$', xycoords='data', xy=(w1 + 180, 0), horizontalalignment='center',
verticalalignment='bottom', fontsize=16)
plt.annotate('$\\omega_2$', xycoords='data', xy=(w2 + 180, 0), horizontalalignment='center',
verticalalignment='bottom', fontsize=16)
plt.ylabel(r'Angle between $-X$ and $\mathbf{G}$')
fig.add_subplot(312)
plt.plot(omegas, twothetas, 'k-')
plt.xlim(0, 360)
# plt.ylim(0,180)
plt.xticks(np.arange(0, 390, 30))
plt.axhline(2 * theta, xmin=0, xmax=360, linewidth=2)
plt.annotate('$2\\theta_{Bragg}$', xycoords='data', xy=(360, 2 * theta), horizontalalignment='left',
verticalalignment='center', fontsize=16)
plt.axvline(w1 + 180, linewidth=2, linestyle='dashed', color='gray')
plt.axvline(w2 + 180, linewidth=2, linestyle='dashed', color='gray')
plt.ylabel('Angle between $X$ and $K$')
fig.add_subplot(313)
plt.plot(omegas, magnitude_K, 'k-')
plt.xlim(0, 360)
plt.axhline(np.linalg.norm(X), xmin=0, xmax=360, linewidth=2)
plt.annotate('$1/\\lambda$', xycoords='data', xy=(360, 1 / lambda_nm), horizontalalignment='left',
verticalalignment='center', fontsize=16)
plt.axvline(w1 + 180, linewidth=2, linestyle='dashed', color='gray')
plt.axvline(w2 + 180, linewidth=2, linestyle='dashed', color='gray')
plt.xlabel(r'Angle of rotation $\omega$')
plt.ylabel(r'Magnitude of $X+G$ (nm$^{-1}$)')
plt.subplots_adjust(top=0.925, bottom=0.05, left=0.1, right=0.9)
if display:
plt.show()
else:
plt.savefig('rotating_crystal_plot_%d%d%d.pdf' % (h, k, l))
@staticmethod
def compute_instrument_transformation_matrix(rx_offset, ry_offset, rz_offset):
""" Compute the instrument transformation matrix for given rotation offset.
This function compute a 3x3 rotation matrix (passive convention) that transform the sample coordinate system
by rotating around the 3 cartesian axes in this order: rotation around X is applied first, then around Y and
finally around Z.
A sample vector :math:`V_s` is consequently transformed into :math:`V'_s` as:
.. math::
V'_s = T^T.V_s
:param double rx_offset: value to apply for the rotation around X.
:param double ry_offset: value to apply for the rotation around Y.
:param double rz_offset: value to apply for the rotation around Z.
:return: a 3x3 rotation matrix describing the transformation applied by the diffractometer.
"""
angle_zr = np.radians(rz_offset)
angle_yr = np.radians(ry_offset)
angle_xr = np.radians(rx_offset)
Rz = np.array([[np.cos(angle_zr), -np.sin(angle_zr), 0], [np.sin(angle_zr), np.cos(angle_zr), 0], [0, 0, 1]])
Ry = np.array([[np.cos(angle_yr), 0, np.sin(angle_yr)], [0, 1, 0], [-np.sin(angle_yr), 0, np.cos(angle_yr)]])
Rx = np.array([[1, 0, 0], [0, np.cos(angle_xr), -np.sin(angle_xr)], [0, np.sin(angle_xr), np.cos(angle_xr)]])
T = Rz.dot(np.dot(Ry, Rx))
return T
def topotomo_tilts(self, hkl, T=None, verbose=False):
"""Compute the tilts for topotomography alignment.
:param hkl: the hkl plane, an instance of :py:class:`~pymicro.crystal.lattice.HklPlane`
:param ndarray T: transformation matrix representing the diffractometer direction at omega=0.
:param bool verbose: activate verbose mode (False by default).
:returns tuple: (ut, lt) the two values of tilts to apply (in radians).
"""
if T is None:
T = np.eye(3) # identity be default
gt = self.orientation_matrix().transpose()
Gc = hkl.scattering_vector()
Gs = gt.dot(Gc) # in the cartesian sample CS
# apply instrument specific settings
Gs = np.dot(T.T, Gs)
# find topotomo tilts
ut = np.arctan(Gs[1] / Gs[2])
lt = np.arctan(-Gs[0] / (Gs[1] * np.sin(ut) + Gs[2] * np.cos(ut)))
if verbose:
print('up tilt (samrx) should be %.3f' % (ut * 180 / np.pi))
print('low tilt (samry) should be %.3f' % (lt * 180 / np.pi))
return ut, lt
def to_xml(self, doc):
"""
Returns an XML representation of the Orientation instance.
"""
print('deprecated as we are moving to hdf5 format')
orientation = doc.createElement('Orientation')
orientation_phi1 = doc.createElement('phi1')
orientation_phi1_text = doc.createTextNode('%f' % self.phi1())
orientation_phi1.appendChild(orientation_phi1_text)
orientation.appendChild(orientation_phi1)
orientation_Phi = doc.createElement('Phi')
orientation_Phi_text = doc.createTextNode('%f' % self.Phi())
orientation_Phi.appendChild(orientation_Phi_text)
orientation.appendChild(orientation_Phi)
orientation_phi2 = doc.createElement('phi2')
orientation_phi2_text = doc.createTextNode('%f' % self.phi2())
orientation_phi2.appendChild(orientation_phi2_text)
orientation.appendChild(orientation_phi2)
return orientation
@staticmethod
def from_xml(orientation_node):
orientation_phi1 = orientation_node.childNodes[0]
orientation_Phi = orientation_node.childNodes[1]
orientation_phi2 = orientation_node.childNodes[2]
phi1 = float(orientation_phi1.childNodes[0].nodeValue)
Phi = float(orientation_Phi.childNodes[0].nodeValue)
phi2 = float(orientation_phi2.childNodes[0].nodeValue)
orientation = Orientation.from_euler(np.array([phi1, Phi, phi2]))
return orientation
@staticmethod
def from_euler(euler, convention='Bunge'):
"""Rotation matrix from Euler angles.
This is the classical method to obtain an orientation matrix by 3 successive rotations. The result depends on
the convention used (how the successive rotation axes are chosen). In the Bunge convention, the first rotation
is around Z, the second around the new X and the third one around the new Z. In the Roe convention, the second
one is around Y.
"""
if convention == 'Roe':
(phi1, phi, phi2) = (euler[0] + 90, euler[1], euler[2] - 90)
else:
(phi1, phi, phi2) = euler
g = Orientation.Euler2OrientationMatrix((phi1, phi, phi2))
o = Orientation(g)
return o
@staticmethod
def from_rodrigues(rod):
g = Orientation.Rodrigues2OrientationMatrix(rod)
o = Orientation(g)
return o
@staticmethod
def from_Quaternion(q):
g = Orientation.Quaternion2OrientationMatrix(q)
o = Orientation(g)
return o
@staticmethod
def Zrot2OrientationMatrix(x1=None, x2=None, x3=None):
"""Compute the orientation matrix from the rotated coordinates given in the
.inp file for Zebulon's computations
Need at least two vectors to compute cross product
Still need some tests to validate this function
"""
if (x1 is None and x2 is None):
raise NameError('Need at least two vectors to compute the matrix')
elif (x1 == None and x3 == None):
raise NameError('Need at least two vectors to compute the matrix')
elif (x3 == None and x2 == None):
raise NameError('Need at least two vectors to compute the matrix')
if x1 == None:
x1 = np.cross(x2, x3)
elif x2 == None:
x2 = np.cross(x3, x1)
elif x3 == None:
x3 = np.cross(x1, x2)
x1 = x1 / np.linalg.norm(x1)
x2 = x2 / np.linalg.norm(x2)
x3 = x3 / np.linalg.norm(x3)
g = np.array([x1, x2, x3]).transpose()
return g
@staticmethod
def OrientationMatrix2EulerSF(g):
"""
Compute the Euler angles (in degrees) from the orientation matrix
in a similar way as done in Mandel_crystal.c
"""
tol = 0.1
r = np.zeros(9, dtype=np.float64) # double precision here
# Z-set order for tensor is 11 22 33 12 23 13 21 32 31
r[0] = g[0, 0]
r[1] = g[1, 1]
r[2] = g[2, 2]
r[3] = g[0, 1]
r[4] = g[1, 2]
r[5] = g[0, 2]
r[6] = g[1, 0]
r[7] = g[2, 1]
r[8] = g[2, 0]
phi = np.arccos(r[2])
if phi == 0.:
phi2 = 0.
phi1 = np.arcsin(r[6])
if abs(np.cos(phi1) - r[0]) > tol:
phi1 = np.pi - phi1
else:
x2 = r[5] / np.sin(phi)
x1 = r[8] / np.sin(phi);
if x1 > 1.:
x1 = 1.
if x2 > 1.:
x2 = 1.
if x1 < -1.:
x1 = -1.
if x2 < -1.:
x2 = -1.
phi2 = np.arcsin(x2)
phi1 = np.arcsin(x1)
if abs(np.cos(phi2) * np.sin(phi) - r[7]) > tol:
phi2 = np.pi - phi2
if abs(np.cos(phi1) * np.sin(phi) + r[4]) > tol:
phi1 = np.pi - phi1
return np.degrees(np.array([phi1, phi, phi2]))
@staticmethod
def OrientationMatrix2Euler(g):
"""
Compute the Euler angles from the orientation matrix.
This conversion follows the paper of Rowenhorst et al. :cite:`Rowenhorst2015`.
In particular when :math:`g_{33} = 1` within the machine precision,
there is no way to determine the values of :math:`\phi_1` and :math:`\phi_2`
(only their sum is defined). The convention is to attribute
the entire angle to :math:`\phi_1` and set :math:`\phi_2` to zero.
:param g: The 3x3 orientation matrix
:return: The 3 euler angles in degrees.
"""
eps = np.finfo('float').eps
(phi1, Phi, phi2) = (0.0, 0.0, 0.0)
# treat special case where g[2, 2] = 1
if np.abs(g[2, 2]) >= 1 - eps:
if g[2, 2] > 0.0:
phi1 = np.arctan2(g[0][1], g[0][0])
else:
phi1 = -np.arctan2(-g[0][1], g[0][0])
Phi = np.pi
else:
Phi = np.arccos(g[2][2])
zeta = 1.0 / np.sqrt(1.0 - g[2][2] ** 2)
phi1 = np.arctan2(g[2][0] * zeta, -g[2][1] * zeta)
phi2 = np.arctan2(g[0][2] * zeta, g[1][2] * zeta)
# ensure angles are in the range [0, 2*pi]
if phi1 < 0.0:
phi1 += 2 * np.pi
if Phi < 0.0:
Phi += 2 * np.pi
if phi2 < 0.0:
phi2 += 2 * np.pi
return np.degrees([phi1, Phi, phi2])
@staticmethod
def OrientationMatrix2Rodrigues(g):
"""
Compute the rodrigues vector from the orientation matrix.
:param g: The 3x3 orientation matrix representing the rotation.
:returns: The Rodrigues vector as a 3 components array.
"""
t = g.trace() + 1
if np.abs(t) < np.finfo(g.dtype).eps:
print('warning, returning [0., 0., 0.], consider using axis, angle representation instead')
return np.zeros(3)
else:
r1 = (g[1, 2] - g[2, 1]) / t
r2 = (g[2, 0] - g[0, 2]) / t
r3 = (g[0, 1] - g[1, 0]) / t
return np.array([r1, r2, r3])
@staticmethod
def OrientationMatrix2Quaternion(g, P=1):
q0 = 0.5 * np.sqrt(1 + g[0, 0] + g[1, 1] + g[2, 2])
q1 = P * 0.5 * np.sqrt(1 + g[0, 0] - g[1, 1] - g[2, 2])
q2 = P * 0.5 * np.sqrt(1 - g[0, 0] + g[1, 1] - g[2, 2])
q3 = P * 0.5 * np.sqrt(1 - g[0, 0] - g[1, 1] + g[2, 2])
if g[2, 1] < g[1, 2]:
q1 = q1 * -1
elif g[0, 2] < g[2, 0]:
q2 = q2 * -1
elif g[1, 0] < g[0, 1]:
q3 = q3 * -1
q = Quaternion(np.array([q0, q1, q2, q3]), convention=P)
return q.quat
@staticmethod
def Rodrigues2OrientationMatrix(rod):
"""
Compute the orientation matrix from the Rodrigues vector.
:param rod: The Rodrigues vector as a 3 components array.
:returns: The 3x3 orientation matrix representing the rotation.
"""
r = np.linalg.norm(rod)
I = np.diagflat(np.ones(3))
if r < np.finfo(r.dtype).eps:
return I
else:
theta = 2 * np.arctan(r)
n = rod / r
omega = np.array([[0.0, n[2], -n[1]], [-n[2], 0.0, n[0]], [n[1], -n[0], 0.0]])
return I + np.sin(theta) * omega + (1 - np.cos(theta)) * omega.dot(omega)
@staticmethod
def Rodrigues2Axis(rod):
"""
Compute the axis/angle representation from the Rodrigues vector.
:param rod: The Rodrigues vector as a 3 components array.
:returns: A tuple in the (axis, angle) form.
"""
r = np.linalg.norm(rod)
axis = rod / r
angle = 2 * np.arctan(r)
return axis, angle
@staticmethod
def Axis2OrientationMatrix(axis, angle):
"""
Compute the (passive) orientation matrix associated the rotation defined by the given (axis, angle) pair.
:param axis: the rotation axis.
:param angle: the rotation angle (degrees).
:returns: the 3x3 orientation matrix.
"""
omega = np.radians(angle)
c = np.cos(omega)
s = np.sin(omega)
g = np.array([[c + (1 - c) * axis[0] ** 2, (1 - c) * axis[0] * axis[1] + s * axis[2],
(1 - c) * axis[0] * axis[2] - s * axis[1]],
[(1 - c) * axis[0] * axis[1] - s * axis[2], c + (1 - c) * axis[1] ** 2,
(1 - c) * axis[1] * axis[2] + s * axis[0]],
[(1 - c) * axis[0] * axis[2] + s * axis[1], (1 - c) * axis[1] * axis[2] - s * axis[0],
c + (1 - c) * axis[2] ** 2]])
return g
@staticmethod
def Euler2Axis(euler):
"""
Compute the (axis, angle) representation associated to this (passive) rotation expressed by the Euler angles.
:param euler: 3 euler angles (in degrees)
:returns: a tuple containing the axis (a vector) and the angle (in radians).
"""
(phi1, Phi, phi2) = np.radians(euler)
t = np.tan(0.5 * Phi)
s = 0.5 * (phi1 + phi2)
d = 0.5 * (phi1 - phi2)
tau = np.sqrt(t ** 2 + np.sin(s) ** 2)
alpha = 2 * np.arctan2(tau, np.cos(s))
if alpha > np.pi:
axis = np.array([-t / tau * np.cos(d), -t / tau * np.sin(d), -1 / tau * np.sin(s)])
angle = 2 * np.pi - alpha
else:
axis = np.array([t / tau * np.cos(d), t / tau * np.sin(d), 1 / tau * np.sin(s)])
angle = alpha
return axis, angle
@staticmethod
def Euler2Quaternion(euler, P=1):
"""
Compute the quaternion from the 3 euler angles (in degrees).
@param tuple euler: the 3 euler angles in degrees.
@param int P: +1 to compute an active quaternion (default), -1 for a passive quaternion.
@return: a `Quaternion` instance representing the rotation.
"""
(phi1, Phi, phi2) = np.radians(euler)
q0 = np.cos(0.5 * (phi1 + phi2)) * np.cos(0.5 * Phi)
q1 = np.cos(0.5 * (phi1 - phi2)) * np.sin(0.5 * Phi)
q2 = np.sin(0.5 * (phi1 - phi2)) * np.sin(0.5 * Phi)
q3 = np.sin(0.5 * (phi1 + phi2)) * np.cos(0.5 * Phi)
q = Quaternion(np.array([q0, -P * q1, -P * q2, -P * q3]), convention=P)
return q
@staticmethod
def Euler2Rodrigues(euler):
"""
Compute the rodrigues vector from the 3 euler angles (in degrees)
"""
(phi1, Phi, phi2) = np.radians(euler)
a = 0.5 * (phi1 - phi2)
b = 0.5 * (phi1 + phi2)
r1 = np.tan(0.5 * Phi) * np.cos(a) / np.cos(b)
r2 = np.tan(0.5 * Phi) * np.sin(a) / np.cos(b)
r3 = np.tan(b)
return np.array([r1, r2, r3])
@staticmethod
def Euler2OrientationMatrix(euler):
"""
Compute the orientation matrix :math:`\mathbf{g}` associated with the 3 Euler angles
:math:`(\phi_1, \Phi, \phi_2)`. The matrix is calculated via (see the `euler_angles` recipe in the cookbook
for a detailed example):
.. math::
\mathbf{g}=\\begin{pmatrix}
\cos\phi_1\cos\phi_2 - \sin\phi_1\sin\phi_2\cos\Phi & \sin\phi_1\cos\phi_2 + \cos\phi_1\sin\phi_2\cos\Phi & \sin\phi_2\sin\Phi \\\\
-\cos\phi_1\sin\phi_2 - \sin\phi_1\cos\phi_2\cos\Phi & -\sin\phi_1\sin\phi_2 + \cos\phi_1\cos\phi_2\cos\Phi & \cos\phi_2\sin\Phi \\\\
\sin\phi_1\sin\Phi & -\cos\phi_1\sin\Phi & \cos\Phi \\\\
\end{pmatrix}
:param euler: The triplet of the Euler angles (in degrees).
:returns g: The 3x3 orientation matrix.
"""
(rphi1, rPhi, rphi2) = np.radians(euler)
c1 = np.cos(rphi1)
s1 = np.sin(rphi1)
c = np.cos(rPhi)
s = np.sin(rPhi)
c2 = np.cos(rphi2)
s2 = np.sin(rphi2)
# rotation matrix g
g11 = c1 * c2 - s1 * s2 * c
g12 = s1 * c2 + c1 * s2 * c
g13 = s2 * s
g21 = -c1 * s2 - s1 * c2 * c
g22 = -s1 * s2 + c1 * c2 * c
g23 = c2 * s
g31 = s1 * s
g32 = -c1 * s
g33 = c
g = np.array([[g11, g12, g13], [g21, g22, g23], [g31, g32, g33]])
return g
@staticmethod
def Quaternion2Euler(q):
"""
Compute Euler angles from a Quaternion
:param q: Quaternion
:return: Euler angles (in degrees, Bunge convention)
"""
P = q.convention
(q0, q1, q2, q3) = q.quat
q03 = q0 ** 2 + q3 ** 2
q12 = q1 ** 2 + q2 ** 2
chi = np.sqrt(q03 * q12)
if chi == 0.:
if q12 == 0.:
phi_1 = atan2(-2 * P * q0 * q3, q0 ** 2 - q3 ** 2)
Phi = 0.
else:
phi_1 = atan2(-2 * q1 * q2, q1 ** 2 - q2 ** 2)
Phi = pi
phi_2 = 0.
else:
phi_1 = atan2((q1 * q3 - P * q0 * q2) / chi, (-P * q0 * q1 - q2 * q3) / chi)
Phi = atan2(2 * chi, q03 - q12)
phi_2 = atan2((P * q0 * q2 + q1 * q3) / chi, (q2 * q3 - P * q0 * q1) / chi)
return np.degrees([phi_1, Phi, phi_2])
@staticmethod
def Quaternion2OrientationMatrix(q):
P = q.convention
(q0, q1, q2, q3) = q.quat
qbar = q0 ** 2 - q1 ** 2 - q2 ** 2 - q3 ** 2
g = np.array([[qbar + 2 * q1 ** 2, 2 * (q1 * q2 - P * q0 * q3), 2 * (q1 * q3 + P * q0 * q2)],
[2 * (q1 * q2 + P * q0 * q3), qbar + 2 * q2 ** 2, 2 * (q2 * q3 - P * q0 * q1)],
[2 * (q1 * q3 - P * q0 * q2), 2 * (q2 * q3 + P * q0 * q1), qbar + 2 * q3 ** 2]])
return g
@staticmethod
def read_euler_txt(txt_path):
"""
Read a set of euler angles from an ascii file.
:param str txt_path: path to the text file containing the euler angles.
:returns dict: a dictionary with the line number and the corresponding orientation.
"""
return Orientation.read_orientations(txt_path)
@staticmethod
def read_orientations(txt_path, data_type='euler', **kwargs):
"""
Read a set of grain orientations from a text file.
The text file must be organised in 3 columns (the other are ignored), corresponding to either the three euler
angles or the three rodrigues veotor components, depending on the data_type). Internally the ascii file is read
by the genfromtxt function of numpy, additional keyworks (such as the delimiter) can be passed to via the
kwargs dictionnary.
:param str txt_path: path to the text file containing the orientations.
:param str data_type: 'euler' (default) or 'rodrigues'.
:param dict kwargs: additional parameters passed to genfromtxt.
:returns dict: a dictionary with the line number and the corresponding orientation.
"""
data = np.genfromtxt(txt_path, **kwargs)
size = len(data)
orientations = []
for i in range(size):
angles = np.array([float(data[i, 0]), float(data[i, 1]), float(data[i, 2])])
if data_type == 'euler':
orientations.append([i + 1, Orientation.from_euler(angles)])
elif data_type == 'rodrigues':
orientations.append([i + 1, Orientation.from_rodrigues(angles)])
return dict(orientations)
@staticmethod
def read_euler_from_zset_inp(inp_path):
"""Read a set of grain orientations from a z-set input file.
In z-set input files, the orientation data may be specified
either using the rotation of two vector, euler angles or
rodrigues components directly. For instance the following
lines are extracted from a polycrystalline calculation file
using the rotation keyword:
::
**elset elset1 *file au.mat *integration theta_method_a 1.0 1.e-9 150 *rotation x1 0.438886 -1.028805 0.197933 x3 1.038339 0.893172 1.003888
**elset elset2 *file au.mat *integration theta_method_a 1.0 1.e-9 150 *rotation x1 0.178825 -0.716937 1.043300 x3 0.954345 0.879145 1.153101
**elset elset3 *file au.mat *integration theta_method_a 1.0 1.e-9 150 *rotation x1 -0.540479 -0.827319 1.534062 x3 1.261700 1.284318 1.004174
**elset elset4 *file au.mat *integration theta_method_a 1.0 1.e-9 150 *rotation x1 -0.941278 0.700996 0.034552 x3 1.000816 1.006824 0.885212
**elset elset5 *file au.mat *integration theta_method_a 1.0 1.e-9 150 *rotation x1 -2.383786 0.479058 -0.488336 x3 0.899545 0.806075 0.984268
:param str inp_path: the path to the ascii file to read.
:returns dict: a dictionary of the orientations associated with the elset names.
"""
inp = open(inp_path)
lines = inp.readlines()
for i, line in enumerate(lines):
if line.lstrip().startswith('***material'):
break
euler_lines = []
for j, line in enumerate(lines[i + 1:]):
# read until next *** block
if line.lstrip().startswith('***'):
break
if (not line.lstrip().startswith('%') and line.find('**elset') >= 0):
euler_lines.append(line)
euler = []
for l in euler_lines:
tokens = l.split()
elset = tokens[tokens.index('**elset') + 1]
irot = tokens.index('*rotation')
if tokens[irot + 1] == 'x1':
x1 = np.empty(3, dtype=float)
x1[0] = float(tokens[irot + 2])
x1[1] = float(tokens[irot + 3])
x1[2] = float(tokens[irot + 4])
x3 = np.empty(3, dtype=float)
x3[0] = float(tokens[irot + 6])
x3[1] = float(tokens[irot + 7])
x3[2] = float(tokens[irot + 8])
euler.append([elset, Orientation.Zrot2OrientationMatrix(x1=x1, x3=x3)])
else: # euler angles
phi1 = tokens[irot + 1]
Phi = tokens[irot + 2]
phi2 = tokens[irot + 3]
angles = np.array([float(phi1), float(Phi), float(phi2)])
euler.append([elset, Orientation.from_euler(angles)])
return dict(euler)
def slip_system_orientation_tensor(self, s):
"""Compute the orientation strain tensor m^s for this :py:class:`~pymicro.crystal.microstructure.Orientation`
and the given slip system.
:param s: an instance of :py:class:`~pymicro.crystal.lattice.SlipSystem`
.. math::
M^s_{ij} = \left(l^s_i.n^s_j)
"""
gt = self.orientation_matrix().transpose()
plane = s.get_slip_plane()
n_rot = np.dot(gt, plane.normal())
slip = s.get_slip_direction()
l_rot = np.dot(gt, slip.direction())
return np.outer(l_rot, n_rot)
def slip_system_orientation_strain_tensor(self, s):
"""Compute the orientation strain tensor m^s for this :py:class:`~pymicro.crystal.microstructure.Orientation`
and the given slip system.
:param s: an instance of :py:class:`~pymicro.crystal.lattice.SlipSystem`
.. math::
m^s_{ij} = \\frac{1}{2}\left(l^s_i.n^s_j + l^s_j.n^s_i)
"""
gt = self.orientation_matrix().transpose()
plane = s.get_slip_plane()
n_rot = np.dot(gt, plane.normal())
slip = s.get_slip_direction()
l_rot = np.dot(gt, slip.direction())
m = 0.5 * (np.outer(l_rot, n_rot) + np.outer(n_rot, l_rot))
return m
def slip_system_orientation_rotation_tensor(self, s):
"""Compute the orientation rotation tensor q^s for this :py:class:`~pymicro.crystal.microstructure.Orientation`
and the given slip system.
:param s: an instance of :py:class:`~pymicro.crystal.lattice.SlipSystem`
.. math::
q^s_{ij} = \\frac{1}{2}\left(l^s_i.n^s_j - l^s_j.n^s_i)
"""
gt = self.orientation_matrix().transpose()
plane = s.get_slip_plane()
n_rot = np.dot(gt, plane.normal())
slip = s.get_slip_direction()
l_rot = np.dot(gt, slip.direction())
q = 0.5 * (np.outer(l_rot, n_rot) - np.outer(n_rot, l_rot))
return q
def schmid_factor(self, slip_system, load_direction=[0., 0., 1]):
"""Compute the Schmid factor for this crystal orientation and the
given slip system.
:param slip_system: a slip system instance.
:param load_direction: a unit vector describing the loading direction (default: vertical axis [0, 0, 1]).
:returns float: a number between 0 ad 0.5.
"""
plane = slip_system.get_slip_plane()
gt = self.orientation_matrix().transpose()
n_rot = np.dot(gt, plane.normal()) # plane.normal() is a unit vector
slip = slip_system.get_slip_direction().direction()
slip_rot = np.dot(gt, slip)
SF = np.abs(np.dot(n_rot, load_direction) * np.dot(slip_rot, load_direction))
return SF
def compute_all_schmid_factors(self, slip_systems, load_direction=[0., 0., 1], verbose=False):
"""Compute all Schmid factors for this crystal orientation and the
given list of slip systems.
:param slip_systems: a list of the slip system from which to compute the Schmid factor values.
:param load_direction: a unit vector describing the loading direction (default: vertical axis [0, 0, 1]).
:param bool verbose: activate verbose mode.
:returns list: a list of the schmid factors.
"""
SF_list = []
for ss in slip_systems:
sf = self.schmid_factor(ss, load_direction)
if verbose:
print('Slip system: %s, Schmid factor is %.3f' % (ss, sf))
SF_list.append(sf)
return SF_list
class Grain:
"""
Class defining a crystallographic grain.
A grain has its own crystallographic orientation.
An optional id for the grain may be specified.
The center attribute is the center of mass of the grain in world coordinates.
The volume of the grain is expressed in pixel/voxel unit.
"""
def __init__(self, grain_id, grain_orientation):
self.id = grain_id
self.orientation = grain_orientation
self.center = np.array([0., 0., 0.])
self.volume = 0 # warning not implemented
self.vtkmesh = None
self.hkl_planes = []
def __repr__(self):
"""Provide a string representation of the class."""
s = '%s\n * id = %d\n' % (self.__class__.__name__, self.id)
s += ' * %s\n' % (self.orientation)
s += ' * center %s\n' % np.array_str(self.center)
s += ' * has vtk mesh ? %s\n' % (self.vtkmesh != None)
return s
def schmid_factor(self, slip_system, load_direction=[0., 0., 1]):
"""Compute the Schmid factor of this grain for the given slip system.
**Parameters**:
*slip_system*: a slip system instance.
*load_direction*: a unit vector describing the loading direction.
**Returns**
The Schmid factor of this grain for the given slip system.
"""
plane = slip_system.get_slip_plane()
gt = self.orientation_matrix().transpose()
n_rot = np.dot(gt, plane.normal()) # plane.normal() is a unit vector
slip = slip_system.get_slip_direction().direction()
slip_rot = np.dot(gt, slip)
SF = np.abs(np.dot(n_rot, load_direction) * np.dot(slip_rot, load_direction))
return self.orientation.schmid_factor(slip_system, load_direction)
def SetVtkMesh(self, mesh):
"""Set the VTK mesh of this grain.
**Parameters:**
*mesh* The grain mesh in VTK format (typically vtkunstructuredgrid)
"""
self.vtkmesh = mesh
def add_vtk_mesh(self, array, contour=True, verbose=False):
"""Add a mesh to this grain.
This method process a labeled array to extract the geometry of the grain. The grain shape is defined by
the pixels with a value of the grain id. A vtkUniformGrid object is created and thresholded or contoured
depending on the value of the flag `contour`.
The resulting mesh is returned, centered on the center of mass of the grain.
:param ndarray array: a numpy array from which to extract the grain shape.
:param bool contour: a flag to use contour mode for the shape.
:param bool verbose: activate verbose mode.
"""
label = self.id # we use the grain id here...
# create vtk structure
from scipy import ndimage
from vtk.util import numpy_support
grain_size = np.shape(array)
array_bin = (array == label).astype(np.uint8)
local_com = ndimage.measurements.center_of_mass(array_bin, array)
vtk_data_array = numpy_support.numpy_to_vtk(np.ravel(array_bin, order='F'), deep=1)
grid = vtk.vtkUniformGrid()
grid.SetOrigin(-local_com[0], -local_com[1], -local_com[2])
grid.SetSpacing(1, 1, 1)
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
grid.SetScalarType(vtk.VTK_UNSIGNED_CHAR, vtk.vtkInformation())
else:
grid.SetScalarType(vtk.VTK_UNSIGNED_CHAR)
if contour:
grid.SetExtent(0, grain_size[0] - 1, 0, grain_size[1] - 1, 0, grain_size[2] - 1)
grid.GetPointData().SetScalars(vtk_data_array)
# contouring selected grain
contour = vtk.vtkContourFilter()
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
contour.SetInputData(grid)
else:
contour.SetInput(grid)
contour.SetValue(0, 0.5)
contour.Update()
if verbose:
print(contour.GetOutput())
self.SetVtkMesh(contour.GetOutput())
else:
grid.SetExtent(0, grain_size[0], 0, grain_size[1], 0, grain_size[2])
grid.GetCellData().SetScalars(vtk_data_array)
# threshold selected grain
thresh = vtk.vtkThreshold()
thresh.ThresholdBetween(0.5, 1.5)
# thresh.ThresholdBetween(label-0.5, label+0.5)
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
thresh.SetInputData(grid)
else:
thresh.SetInput(grid)
thresh.Update()
if verbose:
print('thresholding label: %d' % label)
print(thresh.GetOutput())
self.SetVtkMesh(thresh.GetOutput())
def to_xml(self, doc, file_name=None):
"""
Returns an XML representation of the Grain instance.
"""
grain = doc.createElement('Grain')
grain_id = doc.createElement('Id')
grain_id_text = doc.createTextNode('%s' % self.id)
grain_id.appendChild(grain_id_text)
grain.appendChild(grain_id)
grain.appendChild(self.orientation.to_xml(doc))
grain_position = doc.createElement('Position')
grain_position_x = doc.createElement('X')
grain_position.appendChild(grain_position_x)
grain_position_x_text = doc.createTextNode('%f' % self.center[0])
grain_position_x.appendChild(grain_position_x_text)
grain_position_y = doc.createElement('Y')
grain_position.appendChild(grain_position_y)
grain_position_y_text = doc.createTextNode('%f' % self.center[1])
grain_position_y.appendChild(grain_position_y_text)
grain_position_z = doc.createElement('Z')
grain_position.appendChild(grain_position_z)
grain_position_z_text = doc.createTextNode('%f' % self.center[2])
grain_position_z.appendChild(grain_position_z_text)
grain.appendChild(grain_position)
grain_mesh = doc.createElement('Mesh')
if not file_name:
file_name = self.vtk_file_name()
grain_mesh_text = doc.createTextNode('%s' % file_name)
grain_mesh.appendChild(grain_mesh_text)
grain.appendChild(grain_mesh)
return grain
@staticmethod
def from_xml(grain_node, verbose=False):
grain_id = grain_node.childNodes[0]
grain_orientation = grain_node.childNodes[1]
orientation = Orientation.from_xml(grain_orientation)
id = int(grain_id.childNodes[0].nodeValue)
grain = Grain(id, orientation)
grain_position = grain_node.childNodes[2]
xg = float(grain_position.childNodes[0].childNodes[0].nodeValue)
yg = float(grain_position.childNodes[1].childNodes[0].nodeValue)
zg = float(grain_position.childNodes[2].childNodes[0].nodeValue)
grain.center = np.array([xg, yg, zg])
grain_mesh = grain_node.childNodes[3]
grain_mesh_file = grain_mesh.childNodes[0].nodeValue
if verbose:
print(grain_mesh_file)
grain.load_vtk_repr(grain_mesh_file, verbose)
return grain
def vtk_file_name(self):
return 'grain_%d.vtu' % self.id
def save_vtk_repr(self, file_name=None):
import vtk
if not file_name:
file_name = self.vtk_file_name()
print('writting ' + file_name)
writer = vtk.vtkXMLUnstructuredGridWriter()
writer.SetFileName(file_name)
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
writer.SetInputData(self.vtkmesh)
else:
writer.SetInput(self.vtkmesh)
writer.Write()
def load_vtk_repr(self, file_name, verbose=False):
import vtk
if verbose:
print('reading ' + file_name)
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName(file_name)
reader.Update()
self.vtkmesh = reader.GetOutput()
def orientation_matrix(self):
"""Returns the grain orientation matrix."""
return self.orientation.orientation_matrix()
def dct_omega_angles(self, hkl, lambda_keV, verbose=False):
"""Compute the two omega angles which satisfy the Bragg condition.
For a grain with a given crystal orientation sitting on a vertical
rotation axis, there is exactly two omega positions in [0, 2pi] for
which a particular hkl reflexion will fulfil Bragg's law.
See :py:func:`~pymicro.crystal.microstructure.Orientation.dct_omega_angles`
of the :py:class:`~pymicro.crystal.microstructure.Orientation` class.
:param hkl: The given cristallographic :py:class:`~pymicro.crystal.lattice.HklPlane`
:param float lambda_keV: The X-rays energy expressed in keV
:param bool verbose: Verbose mode (False by default)
:returns tuple: (w1, w2) the two values of the omega angle.
"""
return self.orientation.dct_omega_angles(hkl, lambda_keV, verbose)
@staticmethod
def from_dct(label=1, data_dir='.'):
"""Create a `Grain` instance from a DCT grain file.
:param int label: the grain id.
:param str data_dir: the data root from where to fetch data files.
:return: A new grain instance.
"""
grain_path = os.path.join(data_dir, '4_grains', 'phase_01', 'grain_%04d.mat' % label)
grain_info = h5py.File(grain_path)
g = Grain(label, Orientation.from_rodrigues(grain_info['R_vector'].value))
g.center = grain_info['center'].value
# add spatial representation of the grain if reconstruction is available
grain_map_path = os.path.join(data_dir, '5_reconstruction', 'phase_01_vol.mat')
if os.path.exists(grain_map_path):
with h5py.File(grain_map_path, 'r') as f:
# because how matlab writes the data, we need to swap X and Z axes in the DCT volume
vol = f['vol'].value.transpose(2, 1, 0)
from scipy import ndimage
grain_data = vol[ndimage.find_objects(vol == label)[0]]
g.volume = ndimage.measurements.sum(vol == label)
# create the vtk representation of the grain
g.add_vtk_mesh(grain_data, contour=False)
return g
class Microstructure:
"""
Class used to manipulate a full microstructure.
It is typically defined as a list of grains objects, has an associated crystal `Lattice` instance.
A grain map and a mask can be added to the microstructure instance. For simplicity a simple field `voxel_size`
describe the spatial resolution of teses maps.
"""
def __init__(self, name='empty', lattice=None):
self.name = name
if lattice is None:
lattice = Lattice.cubic(1.0)
self._lattice = lattice
self.grains = []
self.grain_map = None
self.mask = None
self.voxel_size = 1.0 # unit is voxel by default
self.vtkmesh = None
def get_number_of_phases(self):
"""Return the number of phases in this microstructure.
For the moment only one phase is supported, so this function simply returns 1."""
return 1
def get_number_of_grains(self):
"""Return the number of grains in this microstructure."""
return len(self.grains)
def set_lattice(self, lattice):
"""Set the crystallographic lattice associated with this microstructure.
:param Lattice lattice: an instance of the `Lattice class`.
"""
self._lattice = lattice
def get_lattice(self):
"""Get the crystallographic lattice associated with this microstructure.
:return: an instance of the `Lattice class`.
"""
return self._lattice
def set_grain_map(self, grain_map, voxel_size):
"""Set the grain map for this microstructure.
:param ndarray grain_map: a 2D or 3D numpy array.
:param float voxel_size: the size of the voxels in mm unit.
"""
self.grain_map = grain_map
self.voxel_size = voxel_size
def set_mask(self, mask, voxel_size):
"""Set the mask for this microstructure.
:param ndarray mask: a 2D or 3D numpy array.
:param float voxel_size: the size of the voxels in mm unit.
"""
self.mask = mask
self.voxel_size = voxel_size
@staticmethod
def random_texture(n=100):
"""Generate a random texture microstructure.
**parameters:**
*n* The number of grain orientations in the microstructure.
"""
m = Microstructure(name='random_texture')
for i in range(n):
m.grains.append(Grain(i + 1, Orientation.random()))
return m
@staticmethod
def rand_cmap(N=4096, first_is_black=False):
"""Creates a random color map.
The first color can be enforced to black and usually figure out the background.
The random seed is fixed to consistently produce the same colormap.
"""
np.random.seed(13)
rand_colors = np.random.rand(N, 3)
if first_is_black:
rand_colors[0] = [0., 0., 0.] # enforce black background (value 0)
return colors.ListedColormap(rand_colors)
def ipf_cmap(self):
"""
Return a colormap with ipf colors.
"""
N = len(self.grains)
ipf_colors = np.zeros((4096, 3))
for g in self.grains:
ipf_colors[g.id, :] = g.orientation.get_ipf_colour()
return colors.ListedColormap(ipf_colors)
@staticmethod
def from_xml(xml_file_name, grain_ids=None, verbose=False):
"""Load a Microstructure object from an xml file.
It is possible to restrict the grains which are loaded by providing
the list of ids of the grains of interest.
"""
if verbose and grain_ids:
print('loading only grain ids %s' % grain_ids)
micro = Microstructure()
dom = parse(xml_file_name)
root = dom.childNodes[0]
name = root.childNodes[0]
micro.name = name.childNodes[0].nodeValue
grains = root.childNodes[1]
for node in grains.childNodes:
if grain_ids and not (int(node.childNodes[0].childNodes[0].nodeValue) in grain_ids): continue
if verbose:
print(node)
micro.grains.append(Grain.from_xml(node, verbose))
return micro
def get_grain(self, gid):
"""Get a particular grain given its id.
This method browses the microstructure and return the grain
corresponding to the given id. If the grain is not found, the
method raises a `ValueError`.
*Parameters*
**gid**: the grain id.
*Returns*
The method return a `Grain` with the corresponding id.
"""
for grain in self.grains:
if grain.id == gid:
return grain
raise ValueError('grain %d not found in the microstructure' % gid)
def __repr__(self):
"""Provide a string representation of the class."""
s = '%s\n' % self.__class__.__name__
s += '* name: %s\n' % self.name
for g in self.grains:
s += '* %s' % g.__repr__
return s
def SetVtkMesh(self, mesh):
self.vtkmesh = mesh
@staticmethod
def match_grains(micro1, micro2, use_grain_ids=None, verbose=False):
return micro1.match_grains(micro2, use_grain_ids=use_grain_ids, verbose=verbose)
def match_grains(self, micro2, mis_tol=1, use_grain_ids=None, verbose=False):
"""Match grains from a second microstructure to this microstructure.
This function try to find pair of grains based on their orientations.
.. warning::
This function works only for microstructures with the same symmetry.
:param micro2: the second instance of `Microstructure` from which to match grains.
:param float mis_tol: the tolerance is misorientation to use to detect matches (in degrees).
:param list use_grain_ids: a list of ids to restrict the grains in which to search for matches.
:param bool verbose: activate verbose mode.
:raise ValueError: if the microstructures do not have the same symmetry.
:returns tuple: A tuple of three lists holding respectively the matches, the candidates for each match and
the grains that were unmatched.
"""
if not self.get_lattice().get_symmetry() == micro2.get_lattice().get_symmetry():
raise ValueError('warning, microstructure should have the same symmetry, got: {} and {}'.
format(self.get_lattice().get_symmetry(), micro2.get_lattice().get_symmetry()))
candidates = []
matched = []
unmatched = [] # grain that were not matched within the given tolerance
# restrict the grain ids to match if needed
if use_grain_ids:
grains_to_match = [self.get_grain(gid) for gid in use_grain_ids]
else:
grains_to_match = self.grains
# look at each grain
for i, g1 in enumerate(grains_to_match):
cands_for_g1 = []
best_mis = mis_tol
best_match = -1
for g2 in micro2.grains:
# compute disorientation
mis, _, _ = g1.orientation.disorientation(g2.orientation, crystal_structure=self.get_lattice().get_symmetry())
misd = np.degrees(mis)
if misd < mis_tol:
if verbose:
print('grain %3d -- candidate: %3d, misorientation: %.2f deg' % (g1.id, g2.id, misd))
# add this grain to the list of candidates
cands_for_g1.append(g2.id)
if misd < best_mis:
best_mis = misd
best_match = g2.id
# add our best match or mark this grain as unmatched
if best_match > 0:
matched.append([g1.id, best_match])
else:
unmatched.append(g1.id)
candidates.append(cands_for_g1)
if verbose:
print('done with matching')
print('%d/%d grains were matched ' % (len(matched), len(grains_to_match)))
return matched, candidates, unmatched
def dilate_grains(self, dilation_steps=1, dilation_ids=None):
"""Dilate grains to fill the gap beween them.
This code is based on the gtDilateGrains function from the DCT code. It has been extended to handle both 2D
and 3D cases.
:param int dilation_steps: the umber of dilation steps to apply.
:param list dilation_ids: a list to restrict the dilation to the given ids.
"""
if not hasattr(self, 'grain_map'):
raise ValueError('microstructure %s must have an associated grain_map attribute' % self.name)
return
grain_map = self.grain_map.copy()
# get rid of overlap regions flaged by -1
grain_map[grain_map == -1] = 0
# carry out dilation in iterative steps
for step in range(dilation_steps):
if dilation_ids:
grains = np.isin(grain_map, dilation_ids)
else:
grains = (grain_map > 0).astype(np.uint8)
from scipy import ndimage
grains_dil = ndimage.morphology.binary_dilation(grains).astype(np.uint8)
if hasattr(self, 'mask'):
# only dilate within the mask
grains_dil *= self.mask.astype(np.uint8)
todo = (grains_dil - grains)
# get the list of voxel for this dilation step
X, Y, Z = np.where(todo)
xstart = X - 1
xend = X + 1
ystart = Y - 1
yend = Y + 1
zstart = Z - 1
zend = Z + 1
# check bounds
xstart[xstart < 0] = 0
ystart[ystart < 0] = 0
zstart[zstart < 0] = 0
xend[xend > grain_map.shape[0] - 1] = grain_map.shape[0] - 1
yend[yend > grain_map.shape[1] - 1] = grain_map.shape[1] - 1
zend[zend > grain_map.shape[2] - 1] = grain_map.shape[2] - 1
dilation = np.zeros_like(X).astype(np.int16)
print('%d voxels to replace' % len(X))
for i in range(len(X)):
neighbours = grain_map[xstart[i]:xend[i] + 1, ystart[i]:yend[i] + 1, zstart[i]:zend[i] + 1]
if np.any(neighbours):
# at least one neighboring voxel in non zero
dilation[i] = min(neighbours[neighbours > 0])
grain_map[X, Y, Z] = dilation
print('dilation step %d done' % (step + 1))
# finally assign the dilated grain map to the microstructure
self.grain_map = grain_map
def compute_grain_center(self, gid):
"""Compute the center of masses of a grain given its id.
:param int gid: the grain id to consider.
:return: a tuple with the center of mass in mm units (or voxel if the voxel_size is not specified).
"""
# isolate the grain within the complete grain map
slices = ndimage.find_objects(self.grain_map == gid)
if not len(slices) > 0:
raise ValueError('warning grain %d not found in grain map' % gid)
sl = slices[0]
offset = np.array([sl[0].start, sl[1].start, sl[2].start])
grain_data_bin = (self.grain_map[sl] == gid).astype(np.uint8)
local_com = ndimage.measurements.center_of_mass(grain_data_bin)
com = self.voxel_size * (offset + local_com - 0.5 * np.array(self.grain_map.shape))
return com
def recompute_grain_centers(self, verbose=False):
"""Compute and assign the center of all grains in the microstructure using the grain map.
Each grain center is computed using its center of mass. The value is assigned to the grain.center attribute.
If the voxel size is specified, the grain centers will be in mm unit, if not in voxel unit.
.. note::
A grain map need to be associated with this microstructure instance for the method to run.
:param bool verbose: flag for verbose mode.
"""
if not hasattr(self, 'grain_map'):
print('warning: need a grain map to recompute the center of mass of the grains')
return
for g in self.grains:
try:
com = self.compute_grain_center(g.id)
except ValueError:
print('skipping grain %d' % g.id)
continue
if verbose:
print('grain %d center: %.3f, %.3f, %.3f' % (g.id, com[0], com[1], com[2]))
g.center = com
def print_zset_material_block(self, mat_file, grain_prefix='_ELSET'):
"""
Outputs the material block corresponding to this microstructure for
a finite element calculation with z-set.
:param str mat_file: The name of the file where the material behaviour is located
:param str grain_prefix: The grain prefix used to name the elsets corresponding to the different grains
"""
f = open('elset_list.txt', 'w')
for g in self.grains:
o = g.orientation
f.write(
' **elset %s%d *file %s *integration theta_method_a 1.0 1.e-9 150 *rotation %7.3f %7.3f %7.3f\n' % (
grain_prefix, g.id, mat_file, o.phi1(), o.Phi(), o.phi2()))
f.close()
def to_h5(self):
"""Write the microstructure as a hdf5 file."""
import time
from pymicro import __version__ as pymicro_version
print('opening file %s.h5 for writing' % self.name)
f = h5py.File('%s.h5' % self.name, 'w')
f.attrs['Pymicro_Version'] = np.string_(pymicro_version)
f.attrs['HDF5_Version'] = h5py.version.hdf5_version
f.attrs['h5py_version'] = h5py.version.version
f.attrs['file_time'] = time.time()
f.attrs['microstructure_name'] = self.name
if hasattr(self, 'data_dir'):
f.attrs['data_dir'] = self.data_dir
# ensemble data
ed = f.create_group('EnsembleData')
cs = ed.create_group('CrystalStructure')
sym = self.get_lattice().get_symmetry()
cs.attrs['symmetry'] = sym.to_string()
lp = cs.create_dataset('LatticeParameters',
data=np.array(self.get_lattice().get_lattice_parameters(), dtype=np.float32))
# feature data
fd = f.create_group('FeatureData')
grain_ids = fd.create_dataset('grain_ids',
data=np.array([g.id for g in self.grains], dtype=np.int))
avg_rods = fd.create_dataset('R_vectors',
data=np.array([g.orientation.rod for g in self.grains], dtype=np.float32))
centers = fd.create_dataset('centers',
data=np.array([g.center for g in self.grains], dtype=np.float32))
# cell data
cd = f.create_group('CellData')
if hasattr(self, 'grain_map') and self.grain_map is not None:
gm = cd.create_dataset('grain_ids', data=self.grain_map, compression='gzip', compression_opts=9)
gm.attrs['voxel_size'] = self.voxel_size
if hasattr(self, 'mask') and self.mask is not None:
ma = cd.create_dataset('mask', data=self.mask, compression='gzip', compression_opts=9)
ma.attrs['voxel_size'] = self.voxel_size
print('done writing')
f.close()
def from_h5(file_path):
"""read a microstructure object from a HDF5 file.
:param str file_path: the path to the file to read.
:return: the new `Microstructure` instance created from the file.
"""
with h5py.File(file_path, 'r') as f:
micro = Microstructure(name=f.attrs['microstructure_name'])
if 'symmetry' in f['EnsembleData/CrystalStructure'].attrs:
sym = f['EnsembleData/CrystalStructure'].attrs['symmetry']
parameters = f['EnsembleData/CrystalStructure/LatticeParameters'][()]
micro.set_lattice(Lattice.from_symmetry(Symmetry.from_string(sym), parameters))
if 'data_dir' in f.attrs:
micro.data_dir = f.attrs['data_dir']
# load feature data
if 'R_vectors' in f['FeatureData']:
print('some grains')
avg_rods = f['FeatureData/R_vectors'][()]
print(avg_rods.shape)
if 'grain_ids' in f['FeatureData']:
grain_ids = f['FeatureData/grain_ids'][()]
else:
grain_ids = range(1, 1 + avg_rods.shape[0])
if 'centers' in f['FeatureData']:
centers = f['FeatureData/centers'][()]
else:
centers = np.zeros_like(avg_rods)
for i in range(avg_rods.shape[0]):
g = Grain(grain_ids[i], Orientation.from_rodrigues(avg_rods[i, :]))
g.center = centers[i]
micro.grains.append(g)
# load cell data
if 'grain_ids' in f['CellData']:
micro.grain_map = f['CellData/grain_ids'][()]
if 'voxel_size' in f['CellData/grain_ids'].attrs:
micro.voxel_size = f['CellData/grain_ids'].attrs['voxel_size']
if 'mask' in f['CellData']:
micro.mask = f['CellData/mask'][()]
if 'voxel_size' in f['CellData/mask'].attrs:
micro.voxel_size = f['CellData/mask'].attrs['voxel_size']
return micro
def to_dream3d(self):
"""Write the microstructure as a hdf5 file compatible with DREAM3D."""
import time
f = h5py.File('%s.h5' % self.name, 'w')
f.attrs['FileVersion'] = np.string_('7.0')
f.attrs['DREAM3D Version'] = np.string_('6.1.77.d28a796')
f.attrs['HDF5_Version'] = h5py.version.hdf5_version
f.attrs['h5py_version'] = h5py.version.version
f.attrs['file_time'] = time.time()
# pipeline group (empty here)
pipeline = f.create_group('Pipeline')
pipeline.attrs['Number_Filters'] = np.int32(0)
# create the data container group
data_containers = f.create_group('DataContainers')
m = data_containers.create_group('DataContainer')
# ensemble data
ed = m.create_group('EnsembleData')
ed.attrs['AttributeMatrixType'] = np.uint32(11)
ed.attrs['TupleDimensions'] = np.uint64(2)
cryst_structure = ed.create_dataset('CrystalStructures', data=np.array([[999], [1]], dtype=np.uint32))
cryst_structure.attrs['ComponentDimensions'] = np.uint64(1)
cryst_structure.attrs['DataArrayVersion'] = np.int32(2)
cryst_structure.attrs['ObjectType'] = np.string_('DataArray<uint32_t>')
cryst_structure.attrs['Tuple Axis Dimensions'] = np.string_('x=2')
cryst_structure.attrs['TupleDimensions'] = np.uint64(2)
mat_name = ed.create_dataset('MaterialName', data=[a.encode('utf8') for a in ['Invalid Phase', 'Unknown']])
mat_name.attrs['ComponentDimensions'] = np.uint64(1)
mat_name.attrs['DataArrayVersion'] = np.int32(2)
mat_name.attrs['ObjectType'] = np.string_('StringDataArray')
mat_name.attrs['Tuple Axis Dimensions'] = np.string_('x=2')
mat_name.attrs['TupleDimensions'] = np.uint64(2)
# feature data
fd = m.create_group('FeatureData')
fd.attrs['AttributeMatrixType'] = np.uint32(7)
fd.attrs['TupleDimensions'] = np.uint64(len(self.grains))
avg_euler = fd.create_dataset('AvgEulerAngles',
data=np.array([g.orientation.euler for g in self.grains], dtype=np.float32))
avg_euler.attrs['ComponentDimensions'] = np.uint64(3)
avg_euler.attrs['DataArrayVersion'] = np.int32(2)
avg_euler.attrs['ObjectType'] = np.string_('DataArray<float>')
avg_euler.attrs['Tuple Axis Dimensions'] = np.string_('x=%d' % len(self.grains))
avg_euler.attrs['TupleDimensions'] = np.uint64(len(self.grains))
# geometry
geom = m.create_group('_SIMPL_GEOMETRY')
geom.attrs['GeometryType'] = np.uint32(999)
geom.attrs['GeometryTypeName'] = np.string_('UnkownGeometry')
# create the data container bundles group
f.create_group('DataContainerBundles')
f.close()
@staticmethod
def from_dream3d(file_path, main_key='DataContainers', data_container='DataContainer', grain_data='FeatureData',
grain_orientations='AvgEulerAngles', orientation_type='euler', grain_centroid='Centroids'):
"""Read a microstructure from a hdf5 file.
:param str file_path: the path to the hdf5 file to read.
:param str main_key: the string describing the root key.
:param str data_container: the string describing the data container group in the hdf5 file.
:param str grain_data: the string describing the grain data group in the hdf5 file.
:param str grain_orientations: the string describing the average grain orientations in the hdf5 file.
:param str orientation_type: the string describing the descriptor used for orientation data.
:param str grain_centroid: the string describing the grain centroid in the hdf5 file.
:return: a `Microstructure` instance created from the hdf5 file.
"""
micro = Microstructure()
with h5py.File(file_path, 'r') as f:
grain_data_path = '%s/%s/%s' % (main_key, data_container, grain_data)
orientations = f[grain_data_path][grain_orientations].value
if grain_centroid:
centroids = f[grain_data_path][grain_centroid].value
offset = 0
if len(centroids) < len(orientations):
offset = 1 # if grain 0 has not a centroid
for i in range(len(orientations)):
if orientations[i, 0] == 0. and orientations[i, 1] == 0. and orientations[i, 2] == 0.:
# skip grain 0 which is always (0., 0., 0.)
print('skipping (0., 0., 0.)')
continue
if orientation_type == 'euler':
g = Grain(i, Orientation.from_euler(orientations[i] * 180 / np.pi))
elif orientation_type == 'rodrigues':
g = Grain(i, Orientation.from_rodrigues(orientations[i]))
if grain_centroid:
g.center = centroids[i - offset]
micro.grains.append(g)
return micro
@staticmethod
def from_dct(data_dir='.', grain_file='index.mat', vol_file='phase_01_vol.mat', mask_file='volume_mask.mat',
use_dct_path=True, verbose=True):
"""Create a microstructure from a DCT reconstruction.
DCT reconstructions are stored in several files. The indexed grain informations are stored in a matlab file in
the '4_grains/phase_01' folder. Then, the reconstructed volume file (labeled image) is stored
in the '5_reconstruction' folder as an hdf5 file, possibly stored alongside a mask file coming from the
absorption reconstruction.
:param str data_dir: the path to the folder containing the reconstruction data.
:param str grain_file: the name of the file containing grains info.
:param str vol_file: the name of the volume file.
:param str mask_file: the name of the mask file.
:param bool use_dct_path: if True, the grain_file should be located in 4_grains/phase_01 folder and the
vol_file and mask_file in the 5_reconstruction folder.
:param bool verbose: activate verbose mode.
:return: a `Microstructure` instance created from the DCT reconstruction.
"""
if data_dir == '.':
data_dir = os.getcwd()
if data_dir.endswith(os.sep):
data_dir = data_dir[:-1]
scan = data_dir.split(os.sep)[-1]
print('creating microstructure for DCT scan %s' % scan)
micro = Microstructure(name=scan)
micro.data_dir = data_dir
if use_dct_path:
index_path = os.path.join(data_dir, '4_grains', 'phase_01', grain_file)
else:
index_path = os.path.join(data_dir, grain_file)
print(index_path)
if not os.path.exists(index_path):
raise ValueError('%s not found, please specify a valid path to the grain file.' % index_path)
return None
from scipy.io import loadmat
index = loadmat(index_path)
micro.voxel_size = index['cryst'][0][0][25][0][0]
# grab the crystal lattice
lattice_params = index['cryst'][0][0][3][0]
sym = Symmetry.from_string(index['cryst'][0][0][7][0])
print('creating crystal lattice {} ({}) with parameters {}'.format(index['cryst'][0][0][0][0], sym, lattice_params))
lattice_params[:3] /= 10 # angstrom to nm
lattice = Lattice.from_parameters(*lattice_params, symmetry=sym)
micro.set_lattice(lattice)
# add all grains to the microstructure
for i in range(len(index['grain'][0])):
gid = index['grain'][0][i][0][0][0][0][0]
rod = index['grain'][0][i][0][0][3][0]
g = Grain(gid, Orientation.from_rodrigues(rod))
g.center = index['grain'][0][i][0][0][15][0]
micro.grains.append(g)
# load the grain map if available
if use_dct_path:
grain_map_path = os.path.join(data_dir, '5_reconstruction', vol_file)
else:
grain_map_path = os.path.join(data_dir, vol_file)
if os.path.exists(grain_map_path):
with h5py.File(grain_map_path, 'r') as f:
# because how matlab writes the data, we need to swap X and Z axes in the DCT volume
micro.grain_map = f['vol'][()].transpose(2, 1, 0)
if verbose:
print('loaded grain ids volume with shape: {}'.format(micro.grain_map.shape))
# load the mask if available
if use_dct_path:
mask_path = os.path.join(data_dir, '5_reconstruction', mask_file)
else:
mask_path = os.path.join(data_dir, mask_file)
if os.path.exists(mask_path):
with h5py.File(mask_path, 'r') as f:
micro.mask = f['vol'][()].transpose(2, 1, 0).astype(np.uint8)
if verbose:
print('loaded mask volume with shape: {}'.format(micro.mask.shape))
return micro
def to_xml(self, doc):
"""
Returns an XML representation of the Microstructure instance.
"""
root = doc.createElement('Microstructure')
doc.appendChild(root)
name = doc.createElement('Name')
root.appendChild(name)
name_text = doc.createTextNode(self.name)
name.appendChild(name_text)
grains = doc.createElement('Grains')
root.appendChild(grains)
for i, grain in enumerate(self.grains):
file_name = os.path.join(self.name, '%s_%d.vtu' % (self.name, i))
grains.appendChild(grain.to_xml(doc, file_name))
def save(self):
"""Saving the microstructure to the disk.
Save the metadata as a XML file and when available, also save the
vtk representation of the grains.
"""
# save the microstructure instance as xml
doc = Document()
self.to_xml(doc)
xml_file_name = '%s.xml' % self.name
print('writing ' + xml_file_name)
f = open(xml_file_name, 'wb')
doc.writexml(f, encoding='utf-8')
f.close()
# now save the vtk representation
if self.vtkmesh != None:
import vtk
vtk_file_name = '%s.vtm' % self.name
print('writing ' + vtk_file_name)
writer = vtk.vtkXMLMultiBlockDataWriter()
writer.SetFileName(vtk_file_name)
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
writer.SetInputData(self.vtkmesh)
else:
writer.SetInput(self.vtkmesh)
writer.Write()
@staticmethod
def merge_microstructures(micros, overlap, plot=False):
"""Merge two `Microstructure` instances together.
The function works for two microstructures with grain maps and an overlap between them. Temporarily
`Microstructures` restricted to the overlap regions are created and grains are matched between the two based
on a disorientation tolerance.
.. note::
The two microstructure must have the same crystal lattice and the same voxel_size for this method to run.
:param list micros: a list containing the two microstructures to merge.
:param int overlap: the overlap to use.
:param bool plot: a flag to plot some results.
:return: a new `Microstructure`instance containing the merged microstructure.
"""
from scipy import ndimage
# perform some sanity checks
for i in range(2):
if not hasattr(micros[i], 'grain_map'):
raise ValueError('microstructure instance %s must have an associated grain_map attribute' % micros[i].name)
if micros[0].get_lattice() != micros[1].get_lattice():
raise ValueError('both microstructure must have the same crystal lattice')
lattice = micros[0].get_lattice()
if micros[0].voxel_size != micros[1].voxel_size:
raise ValueError('both microstructure must have the same voxel size')
voxel_size = micros[0].voxel_size
# create two microstructure of the overlapping regions: end slices in first scan and first slices in second scan
grain_ids_ol1 = micros[0].grain_map[:, :, micros[0].grain_map.shape[2] - overlap:]
grain_ids_ol2 = micros[1].grain_map[:, :, :overlap]
dims_ol1 = np.array(grain_ids_ol1.shape)
print(dims_ol1)
dims_ol2 = np.array(grain_ids_ol2.shape)
print(dims_ol2)
# build a microstructure for the overlap region in each volumes
grain_ids_ols = [grain_ids_ol1, grain_ids_ol2]
micros_ol = []
for i in range(2):
grain_ids_ol = grain_ids_ols[i]
ids_ol = np.unique(grain_ids_ol)
print(ids_ol)
# difference due to the crop (restricting the grain map to the overlap region)
#offset_mm = (2 * i - 1) * voxel_size * np.array([0., 0., grain_ids_ol.shape[2] - 0.5 * micros[i].grain_map.shape[2]])
# here we use an ad-hoc offset to voxel (0, 0, 0) in the full volume: offset is zero for the second volume
offset_px = (i - 1) * np.array([0., 0., grain_ids_ol.shape[2] - micros[i].grain_map.shape[2]])
offset_mm = voxel_size * offset_px
print('offset [px] is {}'.format(offset_px))
print('offset [mm] is {}'.format(offset_mm))
# make the microstructure
micro_ol = Microstructure(name='%sol_' % micros[i].name)
print('* building overlap microstructure %s' % micro_ol.name)
micro_ol.set_lattice(lattice)
micro_ol.grain_map = grain_ids_ol
for gid in ids_ol:
if gid < 1:
print('skipping %d' % gid)
continue
g = Grain(gid, micros[i].get_grain(gid).orientation)
array_bin = (grain_ids_ol == gid).astype(np.uint8)
local_com = ndimage.measurements.center_of_mass(array_bin, grain_ids_ol)
#print('local_com = {}'.format(local_com))
com_px = (local_com + offset_px - 0.5 * np.array(micros[i].grain_map.shape))
#print('com [px] = {}'.format(com_px))
com_mm = voxel_size * com_px
print('grain %2d center: %6.3f, %6.3f, %6.3f' % (gid, com_mm[0], com_mm[1], com_mm[2]))
#array_bin = (grain_ids_ol == gid).astype(np.uint8)
#local_com = ndimage.measurements.center_of_mass(array_bin, grain_ids_ol)
#com_mm = voxel_size * (local_com - 0.5 * np.array(grain_ids_ol.shape)) + offset
#print('grain %2d position: %6.3f, %6.3f, %6.3f' % (gid, com_mm[0], com_mm[1], com_mm[2]))
g.center = com_mm
micro_ol.grains.append(g)
#TODO recalculate position as we look at a truncated volume
'''
micro_ol.recompute_grain_centers(verbose=True)
for g in micro_ol.grains:
g.center += offset_mm
'''
# add the overlap microstructure to the list
micros_ol.append(micro_ol)
# match grain from micros_ol[1] to micros_ol[0] (the reference)
matched, _, unmatched = micros_ol[0].match_grains(micros_ol[1], verbose=True)
# the affine transform does not since to work, using a simpler method here
delta_avg = np.zeros(3)
for i in range(len(matched)):
# look at the pair of grains
match = matched[i]
delta = micros_ol[0].get_grain(match[0]).center - micros_ol[1].get_grain(match[1]).center
delta_avg += delta
delta_avg /= len(matched)
print('average shift (pixels):')
print(delta_avg / voxel_size)
translation = delta_avg
translation_voxel = (delta_avg / voxel_size).astype(int)
print('translation is in mm: {}'.format(translation))
print('translation is in voxels {}'.format(translation_voxel))
"""
from pymicro.view.vol_utils import compute_affine_transform
# compute the affine transform
n_points = len(matched)
fixed = np.zeros((n_points, 3))
moving = np.zeros((n_points, 3))
moved = np.zeros_like(moving)
# markers in ref grain map
for i in range(n_points):
fixed[i] = micros_ol[0].get_grain(matched[i][0]).center
moving[i] = micros_ol[1].get_grain(matched[i][1]).center
# call the registration method
translation, transformation = compute_affine_transform(fixed, moving)
invt = np.linalg.inv(transformation)
# check what are now the points after transformation
fixed_centroid = np.average(fixed, axis=0)
moving_centroid = np.average(moving, axis=0)
print('fixed centroid: {}'.format(fixed_centroid))
print('moving centroid: {}'.format(moving_centroid))
for j in range(n_points):
moved[j] = fixed_centroid + np.dot(transformation, moving[j] - moving_centroid)
print('point %d will move to (%6.3f, %6.3f, %6.3f) to be compared with (%6.3f, %6.3f, %6.3f)' % (
j, moved[j, 0], moved[j, 1], moved[j, 2], fixed[j, 0], fixed[j, 1], fixed[j, 2]))
print('transformation is:')
print(invt)
# offset and translation, here we only look for rigid body translation
offset = -np.dot(invt, translation)
print(translation, offset)
translation_voxel = (translation / voxel_size).astype(int)
"""
print(translation_voxel)
# look at ids in the reference volume
ids_ref = np.unique(micros[0].grain_map)
ids_ref_list = ids_ref.tolist()
if -1 in ids_ref_list:
ids_ref_list.remove(-1) # grain overlap
if 0 in ids_ref_list:
ids_ref_list.remove(0) # background
print(ids_ref_list)
id_offset = max(ids_ref_list)
print('grain ids in volume %s will be offset by %d' % (micros[1].name, id_offset))
# gather ids in the merging volume (will be modified)
ids_mrg = np.unique(micros[1].grain_map)
ids_mrg_list = ids_mrg.tolist()
if -1 in ids_mrg_list:
ids_mrg_list.remove(-1) # grain overlap
if 0 in ids_mrg_list:
ids_mrg_list.remove(0) # background
print(ids_mrg_list)
# prepare a volume with the same size as the second grain map, with grain ids renumbered and (X, Y) translations applied.
grain_map_translated = micros[1].grain_map.copy()
print('renumbering grains in the overlap region of volume %s' % micros[1].name)
for match in matched:
ref_id, other_id = match
print('replacing %d by %d' % (other_id, ref_id))
#TODO should flag those grains so their center can be recomputed
grain_map_translated[micros[1].grain_map == other_id] = ref_id
try:
ids_mrg_list.remove(other_id)
except ValueError:
# this can happend if a grain in reference volume was matched to more than 1 grain
print('%d was not in list anymore' % other_id)
# also renumber the rest using the offset
renumbered_grains = []
for i, other_id in enumerate(ids_mrg_list):
new_id = id_offset + i + 1
grain_map_translated[micros[1].grain_map == other_id] = new_id
print('replacing %d by %d' % (other_id, new_id))
renumbered_grains.append([other_id, new_id])
# apply translation along the (X, Y) axes
grain_map_translated = np.roll(grain_map_translated, translation_voxel[:2], (0, 1))
check = overlap // 2
print(grain_map_translated.shape)
print(overlap)
print(translation_voxel[2] + check)
if plot:
fig = plt.figure(figsize=(15, 7))
ax1 = fig.add_subplot(1, 3, 1)
ax1.imshow(micros[0].grain_map[:, :, translation_voxel[2] + check].T, vmin=0)
plt.axis('off')
plt.title('micros[0].grain_map (ref)')
ax2 = fig.add_subplot(1, 3, 2)
ax2.imshow(grain_map_translated[:, :, check].T, vmin=0)
plt.axis('off')
plt.title('micros[1].grain_map (renumbered)')
ax3 = fig.add_subplot(1, 3, 3)
same_voxel = micros[0].grain_map[:, :, translation_voxel[2] + check] == grain_map_translated[:, :, check]
print(same_voxel)
#print(same_voxel.shape)
#ax3.imshow(same_voxel.T, vmin=0, vmax=2)
plt.axis('off')
plt.title('voxels that are identicals')
plt.savefig('merging_check1.pdf')
# start the merging: the first volume is the reference
overlap = micros[0].grain_map.shape[2] - translation_voxel[2]
print('overlap is %d voxels' % overlap)
z_shape = micros[0].grain_map.shape[2] + micros[1].grain_map.shape[2] - overlap
print('vertical size will be: %d + %d + %d = %d' % (
micros[0].grain_map.shape[2] - overlap, overlap, micros[1].grain_map.shape[2] - overlap, z_shape))
shape_merged = np.array(micros[0].grain_map.shape) + [0, 0, micros[1].grain_map.shape[2] - overlap]
print('initializing volume with shape {}'.format(shape_merged))
grain_ids_merged = np.zeros(shape_merged, dtype=np.int16)
print(micros[0].grain_map.shape)
print(micros[1].grain_map.shape)
# add the non-overlapping part of the 2 volumes as is
grain_ids_merged[:, :, :micros[0].grain_map.shape[2] - overlap] = micros[0].grain_map[:, :, :-overlap]
grain_ids_merged[:, :, micros[0].grain_map.shape[2]:] = grain_map_translated[:, :, overlap:]
# look at vertices with the same label
print(micros[0].grain_map[:, :, translation_voxel[2]:].shape)
print(grain_map_translated[:, :, :overlap].shape)
print('translation_voxel[2] = %d' % translation_voxel[2])
print('micros[0].grain_map.shape[2] - overlap = %d' % (micros[0].grain_map.shape[2] - overlap))
same_voxel = micros[0].grain_map[:, :, translation_voxel[2]:] == grain_map_translated[:, :, :overlap]
print(same_voxel.shape)
grain_ids_merged[:, :, translation_voxel[2]:micros[0].grain_map.shape[2]] = grain_map_translated[:, :, :overlap] * same_voxel
# look at vertices with a single label
single_voxels_0 = (micros[0].grain_map[:, :, translation_voxel[2]:] > 0) & (grain_map_translated[:, :, :overlap] == 0)
print(single_voxels_0.shape)
grain_ids_merged[:, :, translation_voxel[2]:micros[0].grain_map.shape[2]] += micros[0].grain_map[:, :, translation_voxel[2]:] * single_voxels_0
single_voxels_1 = (grain_map_translated[:, :, :overlap] > 0) & (micros[0].grain_map[:, :, translation_voxel[2]:] == 0)
print(single_voxels_1.shape)
grain_ids_merged[:, :, translation_voxel[2]:micros[0].grain_map.shape[2]] += grain_map_translated[:, :,
:overlap] * single_voxels_1
if plot:
fig = plt.figure(figsize=(14, 10))
ax1 = fig.add_subplot(1, 2, 1)
ax1.imshow(grain_ids_merged[:, 320, :].T)
plt.axis('off')
plt.title('XZ slice')
ax2 = fig.add_subplot(1, 2, 2)
ax2.imshow(grain_ids_merged[320, :, :].T)
plt.axis('off')
plt.title('YZ slice')
plt.savefig('merging_check2.pdf')
if hasattr(micros[0], 'mask') and hasattr(micros[1], 'mask'):
mask_translated = np.roll(micros[1].mask, translation_voxel[:2], (0, 1))
# merging the masks
mask_merged = np.zeros(shape_merged, dtype=np.uint8)
# add the non-overlapping part of the 2 volumes as is
mask_merged[:, :, :micros[0].mask.shape[2] - overlap] = micros[0].mask[:, :, :-overlap]
mask_merged[:, :, micros[0].grain_map.shape[2]:] = mask_translated[:, :, overlap:]
# look at vertices with the same label
same_voxel = micros[0].mask[:, :, translation_voxel[2]:] == mask_translated[:, :, :overlap]
print(same_voxel.shape)
mask_merged[:, :, translation_voxel[2]:micros[0].mask.shape[2]] = mask_translated[:, :, :overlap] * same_voxel
# look at vertices with a single label
single_voxels_0 = (micros[0].mask[:, :, translation_voxel[2]:] > 0) & (mask_translated[:, :, :overlap] == 0)
mask_merged[:, :, translation_voxel[2]:micros[0].mask.shape[2]] += (
micros[0].mask[:, :, translation_voxel[2]:] * single_voxels_0).astype(np.uint8)
single_voxels_1 = (mask_translated[:, :, :overlap] > 0) & (micros[0].mask[:, :, translation_voxel[2]:] == 0)
mask_merged[:, :, translation_voxel[2]:micros[0].mask.shape[2]] += (
mask_translated[:, :, :overlap] * single_voxels_1).astype(np.uint8)
if plot:
fig = plt.figure(figsize=(14, 10))
ax1 = fig.add_subplot(1, 2, 1)
ax1.imshow(mask_merged[:, 320, :].T)
plt.axis('off')
plt.title('XZ slice')
ax2 = fig.add_subplot(1, 2, 2)
ax2.imshow(mask_merged[320, :, :].T)
plt.axis('off')
plt.title('YZ slice')
plt.savefig('merging_check3.pdf')
# merging finished, build the new microstructure instance
merged_micro = Microstructure(name='%s-%s' % (micros[0].name, micros[1].name))
merged_micro.set_lattice(lattice)
# add all grains from the reference volume
merged_micro.grains = micros[0].grains
#TODO recompute center of masses of grains in the overlap region
print(renumbered_grains)
# add all new grains from the merged volume
for i in range(len(renumbered_grains)):
other_id, new_id = renumbered_grains[i]
g = micros[1].get_grain(other_id)
new_g = Grain(new_id, Orientation.from_rodrigues(g.orientation.rod))
new_g.center = g.center
print('adding grain with new id %d (was %d)' % (new_id, other_id))
merged_micro.grains.append(new_g)
print('%d grains in merged microstructure' % merged_micro.get_number_of_grains())
# add the full grain map
merged_micro.grain_map = grain_ids_merged
if hasattr(micros[0], 'mask') and hasattr(micros[1], 'mask'):
merged_micro.mask = mask_merged
return merged_micro
new methode dilate_grain
"""
The microstructure module provide elementary classes to describe a
crystallographic granular microstructure such as mostly present in
metallic materials.
It contains several classes which are used to describe a microstructure
composed of several grains, each one having its own crystallographic
orientation:
* :py:class:`~pymicro.crystal.microstructure.Microstructure`
* :py:class:`~pymicro.crystal.microstructure.Grain`
* :py:class:`~pymicro.crystal.microstructure.Orientation`
"""
import numpy as np
import os
import vtk
import h5py
from scipy import ndimage
from matplotlib import pyplot as plt, colors, cm
from xml.dom.minidom import Document, parse
from pymicro.crystal.lattice import Lattice, Symmetry
from pymicro.crystal.quaternion import Quaternion
from math import atan2, pi
class Orientation:
"""Crystallographic orientation class.
This follows the passive rotation definition which means that it brings
the sample coordinate system into coincidence with the crystal coordinate
system. Then one may express a vector :math:`V_c` in the crystal coordinate system
from the vector in the sample coordinate system :math:`V_s` by:
.. math::
V_c = g.V_s
and inversely (because :math:`g^{-1}=g^T`):
.. math::
V_s = g^T.V_c
Most of the code to handle rotations has been written to comply with the conventions
laid in :cite:`Rowenhorst2015`.
"""
def __init__(self, matrix):
"""Initialization from the 9 components of the orientation matrix."""
g = np.array(matrix, dtype=np.float64).reshape((3, 3))
self._matrix = g
self.euler = Orientation.OrientationMatrix2Euler(g)
self.rod = Orientation.OrientationMatrix2Rodrigues(g)
self.quat = Orientation.OrientationMatrix2Quaternion(g, P=1)
def orientation_matrix(self):
"""Returns the orientation matrix in the form of a 3x3 numpy array."""
return self._matrix
def __repr__(self):
"""Provide a string representation of the class."""
s = 'Crystal Orientation'
s += '\norientation matrix = %s' % self._matrix.view()
s += '\nEuler angles (degrees) = (%8.3f,%8.3f,%8.3f)' % (self.phi1(), self.Phi(), self.phi2())
s += '\nRodrigues vector = %s' % self.OrientationMatrix2Rodrigues(self._matrix)
s += '\nQuaternion = %s' % self.OrientationMatrix2Quaternion(self._matrix, P=1)
return s
@staticmethod
def cube():
"""Create the particular crystal orientation called Cube and which
corresponds to euler angle (0, 0, 0)."""
return Orientation.from_euler((0., 0., 0.))
@staticmethod
def brass():
"""Create the particular crystal orientation called Brass and which
corresponds to euler angle (35.264, 45, 0)."""
return Orientation.from_euler((35.264, 45., 0.))
@staticmethod
def copper():
"""Create the particular crystal orientation called Copper and which
corresponds to euler angle (90, 35.264, 45)."""
return Orientation.from_euler((90., 35.264, 45.))
@staticmethod
def s3():
"""Create the particular crystal orientation called S3 and which
corresponds to euler angle (59, 37, 63)."""
return Orientation.from_euler((58.980, 36.699, 63.435))
@staticmethod
def goss():
"""Create the particular crystal orientation called Goss and which
corresponds to euler angle (0, 45, 0)."""
return Orientation.from_euler((0., 45., 0.))
@staticmethod
def shear():
"""Create the particular crystal orientation called shear and which
corresponds to euler angle (45, 0, 0)."""
return Orientation.from_euler((45., 0., 0.))
@staticmethod
def random():
"""Create a random crystal orientation."""
from random import random
from math import acos
phi1 = random() * 360.
Phi = 180. * acos(2 * random() - 1) / np.pi
phi2 = random() * 360.
return Orientation.from_euler([phi1, Phi, phi2])
def get_ipf_colour(self, axis=np.array([0., 0., 1.]), symmetry=Symmetry.cubic):
"""Compute the IPF (inverse pole figure) colour for this orientation.
Given a particular axis expressed in the laboratory coordinate system,
one can compute the so called IPF colour based on that direction
expressed in the crystal coordinate system as :math:`[x_c,y_c,z_c]`.
There is only one tuple (u,v,w) such that:
.. math::
[x_c,y_c,z_c]=u.[0,0,1]+v.[0,1,1]+w.[1,1,1]
and it is used to assign the RGB colour.
"""
axis /= np.linalg.norm(axis)
# find the axis lying in the fundamental zone
for sym in symmetry.symmetry_operators():
Osym = np.dot(sym, self.orientation_matrix())
Vc = np.dot(Osym, axis)
if Vc[2] < 0:
Vc *= -1. # using the upward direction
uvw = np.array([Vc[2] - Vc[1], Vc[1] - Vc[0], Vc[0]])
uvw /= np.linalg.norm(uvw)
uvw /= max(uvw)
if (uvw[0] >= 0. and uvw[0] <= 1.0) and (uvw[1] >= 0. and uvw[1] <= 1.0) and (
uvw[2] >= 0. and uvw[2] <= 1.0):
# print('found sym for sst')
break
return uvw
def fzDihedral(rod, n):
"""check if the given Rodrigues vector is in the fundamental zone.
After book from Morawiecz.
"""
# top and bottom face at +/-tan(pi/2n)
t = np.tan(np.pi / (2 * n))
if abs(rod[2]) > t:
return False
# 2n faces distance 1 from origin
# y <= ((2+sqrt(2))*t - (1+sqrt(2))) * x + (1+sqrt(2))*(1-t)
y, x = sorted([abs(ro[0]), abs(ro[1])])
if x > 1:
return False
return {
2: True,
3: y / (1 + math.sqrt(2)) + (1 - math.sqrt(2 / 3)) * x < 1 - 1 / math.sqrt(3),
4: y + x < math.sqrt(2),
6: y / (1 + math.sqrt(2)) + (1 - 2 * math.sqrt(2) + math.sqrt(6)) * x < math.sqrt(3) - 1
}[n]
def inFZ(self, symmetry=Symmetry.cubic):
"""Check if the given Orientation lies within the fundamental zone.
For a given crystal symmetry, several rotations can describe the same
physcial crystllographic arangement. The Rodrigues fundamental zone
restrict the orientation space accordingly.
"""
r = self.rod
if symmetry == Symmetry.cubic:
inFZT23 = np.abs(r).sum() <= 1.0
# in the cubic symmetry, each component must be < 2 ** 0.5 - 1
inFZ = inFZT23 and np.abs(r).max() <= 2 ** 0.5 - 1
else:
raise (ValueError('unsupported crystal symmetry: %s' % symmetry))
return inFZ
def move_to_FZ(self, symmetry=Symmetry.cubic, verbose=False):
"""
Compute the equivalent crystal orientation in the Fundamental Zone of a given symmetry.
:param Symmetry symmetry: an instance of the `Symmetry` class
:param verbose: flag for verbose mode
:return: a new Orientation instance which lies in the fundamental zone.
"""
om = symmetry.move_rotation_to_FZ(self.orientation_matrix(), verbose=verbose)
return Orientation(om)
@staticmethod
def misorientation_MacKenzie(psi):
"""Return the fraction of the misorientations corresponding to the
given :math:`\\psi` angle in the reference solution derived By MacKenzie in
his 1958 paper :cite:`MacKenzie_1958`.
:param psi: the misorientation angle in radians.
:returns: the value in the cummulative distribution corresponding to psi.
"""
from math import sqrt, sin, cos, tan, pi, acos
psidg = 180 * psi / pi
if 0 <= psidg <= 45:
p = 2. / 15 * (1 - cos(psi))
elif 45 < psidg <= 60:
p = 2. / 15 * (3 * (sqrt(2) - 1) * sin(psi) - 2 * (1 - cos(psi)))
elif 60 < psidg <= 60.72:
p = 2. / 15 * ((3 * (sqrt(2) - 1) + 4. / sqrt(3)) * sin(psi) - 6. * (1 - cos(psi)))
elif 60.72 < psidg <= 62.8:
X = (sqrt(2) - 1) / (1 - (sqrt(2) - 1) ** 2 / tan(0.5 * psi) ** 2) ** 0.5
Y = (sqrt(2) - 1) ** 2 / ((3 - 1 / tan(0.5 * psi) ** 2) ** 0.5)
p = (2. / 15) * ((3 * (sqrt(2) - 1) + 4 / sqrt(3)) * sin(psi) - 6 * (1 - cos(psi))) \
- 8. / (5 * pi) * (
2 * (sqrt(2) - 1) * acos(X / tan(0.5 * psi)) + 1. / sqrt(3) * acos(Y / tan(0.5 * psi))) * sin(psi) \
+ 8. / (5 * pi) * (2 * acos((sqrt(2) + 1) * X / sqrt(2)) + acos((sqrt(2) + 1) * Y / sqrt(2))) * (
1 - cos(psi))
else:
p = 0.
return p
@staticmethod
def misorientation_axis_from_delta(delta):
"""Compute the misorientation axis from the misorientation matrix.
:param delta: The 3x3 misorientation matrix.
:returns: the misorientation axis (normalised vector).
"""
n = np.array([delta[1, 2] - delta[2, 1], delta[2, 0] - delta[0, 2], delta[0, 1] - delta[1, 0]])
n /= np.sqrt(
(delta[1, 2] - delta[2, 1]) ** 2 + (delta[2, 0] - delta[0, 2]) ** 2 + (delta[0, 1] - delta[1, 0]) ** 2)
return n
def misorientation_axis(self, orientation):
"""Compute the misorientation axis with another crystal orientation.
This vector is by definition common to both crystalline orientations.
:param orientation: an instance of :py:class:`~pymicro.crystal.microstructure.Orientation` class.
:returns: the misorientation axis (normalised vector).
"""
delta = np.dot(self.orientation_matrix(), orientation.orientation_matrix().T)
return Orientation.misorientation_axis_from_delta(delta)
@staticmethod
def misorientation_angle_from_delta(delta):
"""Compute the misorientation angle from the misorientation matrix.
Compute the angle assocated with this misorientation matrix :math:`\\Delta g`.
It is defined as :math:`\\omega = \\arccos(\\text{trace}(\\Delta g)/2-1)`.
To avoid float rounding error, the argument is rounded to 1. if it is within 1 and 1 plus 32 bits floating
point precison.
.. note::
This does not account for the crystal symmetries. If you want to
find the disorientation between two orientations, use the
:py:meth:`~pymicro.crystal.microstructure.Orientation.disorientation`
method.
:param delta: The 3x3 misorientation matrix.
:returns float: the misorientation angle in radians.
"""
cw = 0.5 * (delta.trace() - 1)
if cw > 1. and cw - 1. < 10 * np.finfo('float32').eps:
#print('cw=%.20f, rounding to 1.' % cw)
cw = 1.
omega = np.arccos(cw)
return omega
def disorientation(self, orientation, crystal_structure=Symmetry.triclinic):
"""Compute the disorientation another crystal orientation.
Considering all the possible crystal symmetries, the disorientation
is defined as the combination of the minimum misorientation angle
and the misorientation axis lying in the fundamental zone, which
can be used to bring the two lattices into coincidence.
.. note::
Both orientations are supposed to have the same symmetry. This is not necessarily the case in multi-phase
materials.
:param orientation: an instance of :py:class:`~pymicro.crystal.microstructure.Orientation` class desribing the other crystal orientation from which to compute the angle.
:param crystal_structure: an instance of the `Symmetry` class describing the crystal symmetry, triclinic (no symmetry) by default.
:returns tuple: the misorientation angle in radians, the axis as a numpy vector (crystal coordinates), the axis as a numpy vector (sample coordinates).
"""
the_angle = np.pi
symmetries = crystal_structure.symmetry_operators()
(gA, gB) = (self.orientation_matrix(), orientation.orientation_matrix()) # nicknames
for (g1, g2) in [(gA, gB), (gB, gA)]:
for j in range(symmetries.shape[0]):
sym_j = symmetries[j]
oj = np.dot(sym_j, g1) # the crystal symmetry operator is left applied
for i in range(symmetries.shape[0]):
sym_i = symmetries[i]
oi = np.dot(sym_i, g2)
delta = np.dot(oi, oj.T)
#print('delta={}'.format(delta))
mis_angle = Orientation.misorientation_angle_from_delta(delta)
#print(np.degrees(mis_angle))
if mis_angle < the_angle:
# now compute the misorientation axis, should check if it lies in the fundamental zone
mis_axis = Orientation.misorientation_axis_from_delta(delta)
# here we have np.dot(oi.T, mis_axis) = np.dot(oj.T, mis_axis)
# print(mis_axis, mis_angle*180/np.pi, np.dot(oj.T, mis_axis))
the_angle = mis_angle
the_axis = mis_axis
the_axis_xyz = np.dot(oi.T, the_axis)
return (the_angle, the_axis, the_axis_xyz)
def phi1(self):
"""Convenience methode to expose the first Euler angle."""
return self.euler[0]
def Phi(self):
"""Convenience methode to expose the second Euler angle."""
return self.euler[1]
def phi2(self):
"""Convenience methode to expose the third Euler angle."""
return self.euler[2]
def compute_XG_angle(self, hkl, omega, verbose=False):
"""Compute the angle between the scattering vector :math:`\mathbf{G_{l}}`
and :math:`\mathbf{-X}` the X-ray unit vector at a given angular position :math:`\\omega`.
A given hkl plane defines the scattering vector :math:`\mathbf{G_{hkl}}` by
the miller indices in the reciprocal space. It is expressed in the
cartesian coordinate system by :math:`\mathbf{B}.\mathbf{G_{hkl}}` and in the
laboratory coordinate system accounting for the crystal orientation
by :math:`\mathbf{g}^{-1}.\mathbf{B}.\mathbf{G_{hkl}}`.
The crystal is assumed to be placed on a rotation stage around the
laboratory vertical axis. The scattering vector can finally be
written as :math:`\mathbf{G_l}=\mathbf{\\Omega}.\mathbf{g}^{-1}.\mathbf{B}.\mathbf{G_{hkl}}`.
The X-rays unit vector is :math:`\mathbf{X}=[1, 0, 0]`. So the computed angle
is :math:`\\alpha=acos(-\mathbf{X}.\mathbf{G_l}/||\mathbf{G_l}||`
The Bragg condition is fulfilled when :math:`\\alpha=\pi/2-\\theta_{Bragg}`
:param hkl: the hkl plane, an instance of :py:class:`~pymicro.crystal.lattice.HklPlane`
:param omega: the angle of rotation of the crystal around the laboratory vertical axis.
:param bool verbose: activate verbose mode (False by default).
:return float: the angle between :math:`-\mathbf{X}` and :math:`\mathbf{G_{l}}` in degrees.
"""
X = np.array([1., 0., 0.])
gt = self.orientation_matrix().transpose()
Gc = hkl.scattering_vector()
Gs = gt.dot(Gc) # in the cartesian sample CS
omegar = omega * np.pi / 180
R = np.array([[np.cos(omegar), -np.sin(omegar), 0], [np.sin(omegar), np.cos(omegar), 0], [0, 0, 1]])
Gl = R.dot(Gs)
alpha = np.arccos(np.dot(-X, Gl) / np.linalg.norm(Gl)) * 180 / np.pi
if verbose:
print('scattering vector in the crystal CS', Gc)
print('scattering vector in the sample CS', Gs)
print('scattering vector in the laboratory CS (including Omega rotation)', Gl)
print('angle (deg) between -X and G', alpha)
return alpha
@staticmethod
def solve_trig_equation(A, B, C, verbose=False):
"""Solve the trigonometric equation in the form of:
.. math::
A\cos\\theta + B\sin\\theta = C
:param float A: the A constant in the equation.
:param float B: the B constant in the equation.
:param float C: the C constant in the equation.
:return tuple: the two solutions angular values in degrees.
"""
Delta = 4 * (A ** 2 + B ** 2 - C ** 2)
if Delta < 0:
raise ValueError('Delta < 0 (%f)' % Delta)
if verbose:
print('A={0:.3f}, B={1:.3f}, C={2:.3f}, Delta={3:.1f}'.format(A, B, C, Delta))
theta_1 = 2 * np.arctan2(B - 0.5 * np.sqrt(Delta), A + C) * 180. / np.pi % 360
theta_2 = 2 * np.arctan2(B + 0.5 * np.sqrt(Delta), A + C) * 180. / np.pi % 360
return theta_1, theta_2
def dct_omega_angles(self, hkl, lambda_keV, verbose=False):
"""Compute the two omega angles which satisfy the Bragg condition.
For a given crystal orientation sitting on a vertical rotation axis,
there is exactly two :math:`\omega` positions in :math:`[0, 2\pi]` for which
a particular :math:`(hkl)` reflexion will fulfil Bragg's law.
According to the Bragg's law, a crystallographic plane of a given
grain will be in diffracting condition if:
.. math::
\sin\\theta=-[\mathbf{\Omega}.\mathbf{g}^{-1}\mathbf{G_c}]_1
with :math:`\mathbf{\Omega}` the matrix associated with the rotation
axis:
.. math::
\mathbf{\Omega}=\\begin{pmatrix}
\cos\omega & -\sin\omega & 0 \\\\
\sin\omega & \cos\omega & 0 \\\\
0 & 0 & 1 \\\\
\end{pmatrix}
This method solves the associated second order equation to return
the two corresponding omega angles.
:param hkl: The given cristallographic plane :py:class:`~pymicro.crystal.lattice.HklPlane`
:param float lambda_keV: The X-rays energy expressed in keV
:param bool verbose: Verbose mode (False by default)
:returns tuple: :math:`(\omega_1, \omega_2)` the two values of the \
rotation angle around the vertical axis (in degrees).
"""
(h, k, l) = hkl.miller_indices()
theta = hkl.bragg_angle(lambda_keV, verbose=verbose)
lambda_nm = 1.2398 / lambda_keV
gt = self.orientation_matrix().T # gt = g^{-1} in Poulsen 2004
Gc = hkl.scattering_vector()
A = np.dot(Gc, gt[0])
B = - np.dot(Gc, gt[1])
# A = h / a * gt[0, 0] + k / b * gt[0, 1] + l / c * gt[0, 2]
# B = -h / a * gt[1, 0] - k / b * gt[1, 1] - l / c * gt[1, 2]
C = -2 * np.sin(theta) ** 2 / lambda_nm # the minus sign comes from the main equation
omega_1, omega_2 = Orientation.solve_trig_equation(A, B, C, verbose=verbose)
if verbose:
print('the two omega values in degrees fulfilling the Bragg condition are (%.1f, %.1f)' % (omega_1, omega_2))
return omega_1, omega_2
def rotating_crystal(self, hkl, lambda_keV, omega_step=0.5, display=True, verbose=False):
from pymicro.xray.xray_utils import lambda_keV_to_nm
lambda_nm = lambda_keV_to_nm(lambda_keV)
X = np.array([1., 0., 0.]) / lambda_nm
print('magnitude of X', np.linalg.norm(X))
gt = self.orientation_matrix().transpose()
(h, k, l) = hkl.miller_indices()
theta = hkl.bragg_angle(lambda_keV) * 180. / np.pi
print('bragg angle for %d%d%d reflection is %.1f' % (h, k, l, theta))
Gc = hkl.scattering_vector()
Gs = gt.dot(Gc)
alphas = []
twothetas = []
magnitude_K = []
omegas = np.linspace(0.0, 360.0, num=360.0 / omega_step, endpoint=False)
for omega in omegas:
print('\n** COMPUTING AT OMEGA=%03.1f deg' % omega)
# prepare rotation matrix
omegar = omega * np.pi / 180
R = np.array([[np.cos(omegar), -np.sin(omegar), 0], [np.sin(omegar), np.cos(omegar), 0], [0, 0, 1]])
# R = R.dot(Rlt).dot(Rut) # with tilts
Gl = R.dot(Gs)
print('scattering vector in laboratory CS', Gl)
n = R.dot(gt.dot(hkl.normal()))
print('plane normal:', hkl.normal())
print(R)
print('rotated plane normal:', n, ' with a norm of', np.linalg.norm(n))
G = n / hkl.interplanar_spacing() # here G == N
print('G vector:', G, ' with a norm of', np.linalg.norm(G))
K = X + G
print('X + G vector', K)
magnitude_K.append(np.linalg.norm(K))
print('magnitude of K', np.linalg.norm(K))
alpha = np.arccos(np.dot(-X, G) / (np.linalg.norm(-X) * np.linalg.norm(G))) * 180 / np.pi
print('angle between -X and G', alpha)
alphas.append(alpha)
twotheta = np.arccos(np.dot(K, X) / (np.linalg.norm(K) * np.linalg.norm(X))) * 180 / np.pi
print('angle (deg) between K and X', twotheta)
twothetas.append(twotheta)
print('min alpha angle is ', min(alphas))
# compute omega_1 and omega_2 to verify graphically
(w1, w2) = self.dct_omega_angles(hkl, lambda_keV, verbose=False)
# gather the results in a single figure
fig = plt.figure(figsize=(12, 10))
fig.add_subplot(311)
plt.title('Looking for (%d%d%d) Bragg reflexions' % (h, k, l))
plt.plot(omegas, alphas, 'k-')
plt.xlim(0, 360)
plt.ylim(0, 180)
plt.xticks(np.arange(0, 390, 30))
# add bragg condition
plt.axhline(90 - theta, xmin=0, xmax=360, linewidth=2)
plt.annotate('$\pi/2-\\theta_{Bragg}$', xycoords='data', xy=(360, 90 - theta), horizontalalignment='left',
verticalalignment='center', fontsize=16)
# add omega solutions
plt.axvline(w1 + 180, ymin=0, ymax=180, linewidth=2, linestyle='dashed', color='gray')
plt.axvline(w2 + 180, ymin=0, ymax=180, linewidth=2, linestyle='dashed', color='gray')
plt.annotate('$\\omega_1$', xycoords='data', xy=(w1 + 180, 0), horizontalalignment='center',
verticalalignment='bottom', fontsize=16)
plt.annotate('$\\omega_2$', xycoords='data', xy=(w2 + 180, 0), horizontalalignment='center',
verticalalignment='bottom', fontsize=16)
plt.ylabel(r'Angle between $-X$ and $\mathbf{G}$')
fig.add_subplot(312)
plt.plot(omegas, twothetas, 'k-')
plt.xlim(0, 360)
# plt.ylim(0,180)
plt.xticks(np.arange(0, 390, 30))
plt.axhline(2 * theta, xmin=0, xmax=360, linewidth=2)
plt.annotate('$2\\theta_{Bragg}$', xycoords='data', xy=(360, 2 * theta), horizontalalignment='left',
verticalalignment='center', fontsize=16)
plt.axvline(w1 + 180, linewidth=2, linestyle='dashed', color='gray')
plt.axvline(w2 + 180, linewidth=2, linestyle='dashed', color='gray')
plt.ylabel('Angle between $X$ and $K$')
fig.add_subplot(313)
plt.plot(omegas, magnitude_K, 'k-')
plt.xlim(0, 360)
plt.axhline(np.linalg.norm(X), xmin=0, xmax=360, linewidth=2)
plt.annotate('$1/\\lambda$', xycoords='data', xy=(360, 1 / lambda_nm), horizontalalignment='left',
verticalalignment='center', fontsize=16)
plt.axvline(w1 + 180, linewidth=2, linestyle='dashed', color='gray')
plt.axvline(w2 + 180, linewidth=2, linestyle='dashed', color='gray')
plt.xlabel(r'Angle of rotation $\omega$')
plt.ylabel(r'Magnitude of $X+G$ (nm$^{-1}$)')
plt.subplots_adjust(top=0.925, bottom=0.05, left=0.1, right=0.9)
if display:
plt.show()
else:
plt.savefig('rotating_crystal_plot_%d%d%d.pdf' % (h, k, l))
@staticmethod
def compute_instrument_transformation_matrix(rx_offset, ry_offset, rz_offset):
""" Compute the instrument transformation matrix for given rotation offset.
This function compute a 3x3 rotation matrix (passive convention) that transform the sample coordinate system
by rotating around the 3 cartesian axes in this order: rotation around X is applied first, then around Y and
finally around Z.
A sample vector :math:`V_s` is consequently transformed into :math:`V'_s` as:
.. math::
V'_s = T^T.V_s
:param double rx_offset: value to apply for the rotation around X.
:param double ry_offset: value to apply for the rotation around Y.
:param double rz_offset: value to apply for the rotation around Z.
:return: a 3x3 rotation matrix describing the transformation applied by the diffractometer.
"""
angle_zr = np.radians(rz_offset)
angle_yr = np.radians(ry_offset)
angle_xr = np.radians(rx_offset)
Rz = np.array([[np.cos(angle_zr), -np.sin(angle_zr), 0], [np.sin(angle_zr), np.cos(angle_zr), 0], [0, 0, 1]])
Ry = np.array([[np.cos(angle_yr), 0, np.sin(angle_yr)], [0, 1, 0], [-np.sin(angle_yr), 0, np.cos(angle_yr)]])
Rx = np.array([[1, 0, 0], [0, np.cos(angle_xr), -np.sin(angle_xr)], [0, np.sin(angle_xr), np.cos(angle_xr)]])
T = Rz.dot(np.dot(Ry, Rx))
return T
def topotomo_tilts(self, hkl, T=None, verbose=False):
"""Compute the tilts for topotomography alignment.
:param hkl: the hkl plane, an instance of :py:class:`~pymicro.crystal.lattice.HklPlane`
:param ndarray T: transformation matrix representing the diffractometer direction at omega=0.
:param bool verbose: activate verbose mode (False by default).
:returns tuple: (ut, lt) the two values of tilts to apply (in radians).
"""
if T is None:
T = np.eye(3) # identity be default
gt = self.orientation_matrix().transpose()
Gc = hkl.scattering_vector()
Gs = gt.dot(Gc) # in the cartesian sample CS
# apply instrument specific settings
Gs = np.dot(T.T, Gs)
# find topotomo tilts
ut = np.arctan(Gs[1] / Gs[2])
lt = np.arctan(-Gs[0] / (Gs[1] * np.sin(ut) + Gs[2] * np.cos(ut)))
if verbose:
print('up tilt (samrx) should be %.3f' % (ut * 180 / np.pi))
print('low tilt (samry) should be %.3f' % (lt * 180 / np.pi))
return ut, lt
def to_xml(self, doc):
"""
Returns an XML representation of the Orientation instance.
"""
print('deprecated as we are moving to hdf5 format')
orientation = doc.createElement('Orientation')
orientation_phi1 = doc.createElement('phi1')
orientation_phi1_text = doc.createTextNode('%f' % self.phi1())
orientation_phi1.appendChild(orientation_phi1_text)
orientation.appendChild(orientation_phi1)
orientation_Phi = doc.createElement('Phi')
orientation_Phi_text = doc.createTextNode('%f' % self.Phi())
orientation_Phi.appendChild(orientation_Phi_text)
orientation.appendChild(orientation_Phi)
orientation_phi2 = doc.createElement('phi2')
orientation_phi2_text = doc.createTextNode('%f' % self.phi2())
orientation_phi2.appendChild(orientation_phi2_text)
orientation.appendChild(orientation_phi2)
return orientation
@staticmethod
def from_xml(orientation_node):
orientation_phi1 = orientation_node.childNodes[0]
orientation_Phi = orientation_node.childNodes[1]
orientation_phi2 = orientation_node.childNodes[2]
phi1 = float(orientation_phi1.childNodes[0].nodeValue)
Phi = float(orientation_Phi.childNodes[0].nodeValue)
phi2 = float(orientation_phi2.childNodes[0].nodeValue)
orientation = Orientation.from_euler(np.array([phi1, Phi, phi2]))
return orientation
@staticmethod
def from_euler(euler, convention='Bunge'):
"""Rotation matrix from Euler angles.
This is the classical method to obtain an orientation matrix by 3 successive rotations. The result depends on
the convention used (how the successive rotation axes are chosen). In the Bunge convention, the first rotation
is around Z, the second around the new X and the third one around the new Z. In the Roe convention, the second
one is around Y.
"""
if convention == 'Roe':
(phi1, phi, phi2) = (euler[0] + 90, euler[1], euler[2] - 90)
else:
(phi1, phi, phi2) = euler
g = Orientation.Euler2OrientationMatrix((phi1, phi, phi2))
o = Orientation(g)
return o
@staticmethod
def from_rodrigues(rod):
g = Orientation.Rodrigues2OrientationMatrix(rod)
o = Orientation(g)
return o
@staticmethod
def from_Quaternion(q):
g = Orientation.Quaternion2OrientationMatrix(q)
o = Orientation(g)
return o
@staticmethod
def Zrot2OrientationMatrix(x1=None, x2=None, x3=None):
"""Compute the orientation matrix from the rotated coordinates given in the
.inp file for Zebulon's computations
Need at least two vectors to compute cross product
Still need some tests to validate this function
"""
if (x1 is None and x2 is None):
raise NameError('Need at least two vectors to compute the matrix')
elif (x1 == None and x3 == None):
raise NameError('Need at least two vectors to compute the matrix')
elif (x3 == None and x2 == None):
raise NameError('Need at least two vectors to compute the matrix')
if x1 == None:
x1 = np.cross(x2, x3)
elif x2 == None:
x2 = np.cross(x3, x1)
elif x3 == None:
x3 = np.cross(x1, x2)
x1 = x1 / np.linalg.norm(x1)
x2 = x2 / np.linalg.norm(x2)
x3 = x3 / np.linalg.norm(x3)
g = np.array([x1, x2, x3]).transpose()
return g
@staticmethod
def OrientationMatrix2EulerSF(g):
"""
Compute the Euler angles (in degrees) from the orientation matrix
in a similar way as done in Mandel_crystal.c
"""
tol = 0.1
r = np.zeros(9, dtype=np.float64) # double precision here
# Z-set order for tensor is 11 22 33 12 23 13 21 32 31
r[0] = g[0, 0]
r[1] = g[1, 1]
r[2] = g[2, 2]
r[3] = g[0, 1]
r[4] = g[1, 2]
r[5] = g[0, 2]
r[6] = g[1, 0]
r[7] = g[2, 1]
r[8] = g[2, 0]
phi = np.arccos(r[2])
if phi == 0.:
phi2 = 0.
phi1 = np.arcsin(r[6])
if abs(np.cos(phi1) - r[0]) > tol:
phi1 = np.pi - phi1
else:
x2 = r[5] / np.sin(phi)
x1 = r[8] / np.sin(phi);
if x1 > 1.:
x1 = 1.
if x2 > 1.:
x2 = 1.
if x1 < -1.:
x1 = -1.
if x2 < -1.:
x2 = -1.
phi2 = np.arcsin(x2)
phi1 = np.arcsin(x1)
if abs(np.cos(phi2) * np.sin(phi) - r[7]) > tol:
phi2 = np.pi - phi2
if abs(np.cos(phi1) * np.sin(phi) + r[4]) > tol:
phi1 = np.pi - phi1
return np.degrees(np.array([phi1, phi, phi2]))
@staticmethod
def OrientationMatrix2Euler(g):
"""
Compute the Euler angles from the orientation matrix.
This conversion follows the paper of Rowenhorst et al. :cite:`Rowenhorst2015`.
In particular when :math:`g_{33} = 1` within the machine precision,
there is no way to determine the values of :math:`\phi_1` and :math:`\phi_2`
(only their sum is defined). The convention is to attribute
the entire angle to :math:`\phi_1` and set :math:`\phi_2` to zero.
:param g: The 3x3 orientation matrix
:return: The 3 euler angles in degrees.
"""
eps = np.finfo('float').eps
(phi1, Phi, phi2) = (0.0, 0.0, 0.0)
# treat special case where g[2, 2] = 1
if np.abs(g[2, 2]) >= 1 - eps:
if g[2, 2] > 0.0:
phi1 = np.arctan2(g[0][1], g[0][0])
else:
phi1 = -np.arctan2(-g[0][1], g[0][0])
Phi = np.pi
else:
Phi = np.arccos(g[2][2])
zeta = 1.0 / np.sqrt(1.0 - g[2][2] ** 2)
phi1 = np.arctan2(g[2][0] * zeta, -g[2][1] * zeta)
phi2 = np.arctan2(g[0][2] * zeta, g[1][2] * zeta)
# ensure angles are in the range [0, 2*pi]
if phi1 < 0.0:
phi1 += 2 * np.pi
if Phi < 0.0:
Phi += 2 * np.pi
if phi2 < 0.0:
phi2 += 2 * np.pi
return np.degrees([phi1, Phi, phi2])
@staticmethod
def OrientationMatrix2Rodrigues(g):
"""
Compute the rodrigues vector from the orientation matrix.
:param g: The 3x3 orientation matrix representing the rotation.
:returns: The Rodrigues vector as a 3 components array.
"""
t = g.trace() + 1
if np.abs(t) < np.finfo(g.dtype).eps:
print('warning, returning [0., 0., 0.], consider using axis, angle representation instead')
return np.zeros(3)
else:
r1 = (g[1, 2] - g[2, 1]) / t
r2 = (g[2, 0] - g[0, 2]) / t
r3 = (g[0, 1] - g[1, 0]) / t
return np.array([r1, r2, r3])
@staticmethod
def OrientationMatrix2Quaternion(g, P=1):
q0 = 0.5 * np.sqrt(1 + g[0, 0] + g[1, 1] + g[2, 2])
q1 = P * 0.5 * np.sqrt(1 + g[0, 0] - g[1, 1] - g[2, 2])
q2 = P * 0.5 * np.sqrt(1 - g[0, 0] + g[1, 1] - g[2, 2])
q3 = P * 0.5 * np.sqrt(1 - g[0, 0] - g[1, 1] + g[2, 2])
if g[2, 1] < g[1, 2]:
q1 = q1 * -1
elif g[0, 2] < g[2, 0]:
q2 = q2 * -1
elif g[1, 0] < g[0, 1]:
q3 = q3 * -1
q = Quaternion(np.array([q0, q1, q2, q3]), convention=P)
return q.quat
@staticmethod
def Rodrigues2OrientationMatrix(rod):
"""
Compute the orientation matrix from the Rodrigues vector.
:param rod: The Rodrigues vector as a 3 components array.
:returns: The 3x3 orientation matrix representing the rotation.
"""
r = np.linalg.norm(rod)
I = np.diagflat(np.ones(3))
if r < np.finfo(r.dtype).eps:
return I
else:
theta = 2 * np.arctan(r)
n = rod / r
omega = np.array([[0.0, n[2], -n[1]], [-n[2], 0.0, n[0]], [n[1], -n[0], 0.0]])
return I + np.sin(theta) * omega + (1 - np.cos(theta)) * omega.dot(omega)
@staticmethod
def Rodrigues2Axis(rod):
"""
Compute the axis/angle representation from the Rodrigues vector.
:param rod: The Rodrigues vector as a 3 components array.
:returns: A tuple in the (axis, angle) form.
"""
r = np.linalg.norm(rod)
axis = rod / r
angle = 2 * np.arctan(r)
return axis, angle
@staticmethod
def Axis2OrientationMatrix(axis, angle):
"""
Compute the (passive) orientation matrix associated the rotation defined by the given (axis, angle) pair.
:param axis: the rotation axis.
:param angle: the rotation angle (degrees).
:returns: the 3x3 orientation matrix.
"""
omega = np.radians(angle)
c = np.cos(omega)
s = np.sin(omega)
g = np.array([[c + (1 - c) * axis[0] ** 2, (1 - c) * axis[0] * axis[1] + s * axis[2],
(1 - c) * axis[0] * axis[2] - s * axis[1]],
[(1 - c) * axis[0] * axis[1] - s * axis[2], c + (1 - c) * axis[1] ** 2,
(1 - c) * axis[1] * axis[2] + s * axis[0]],
[(1 - c) * axis[0] * axis[2] + s * axis[1], (1 - c) * axis[1] * axis[2] - s * axis[0],
c + (1 - c) * axis[2] ** 2]])
return g
@staticmethod
def Euler2Axis(euler):
"""
Compute the (axis, angle) representation associated to this (passive) rotation expressed by the Euler angles.
:param euler: 3 euler angles (in degrees)
:returns: a tuple containing the axis (a vector) and the angle (in radians).
"""
(phi1, Phi, phi2) = np.radians(euler)
t = np.tan(0.5 * Phi)
s = 0.5 * (phi1 + phi2)
d = 0.5 * (phi1 - phi2)
tau = np.sqrt(t ** 2 + np.sin(s) ** 2)
alpha = 2 * np.arctan2(tau, np.cos(s))
if alpha > np.pi:
axis = np.array([-t / tau * np.cos(d), -t / tau * np.sin(d), -1 / tau * np.sin(s)])
angle = 2 * np.pi - alpha
else:
axis = np.array([t / tau * np.cos(d), t / tau * np.sin(d), 1 / tau * np.sin(s)])
angle = alpha
return axis, angle
@staticmethod
def Euler2Quaternion(euler, P=1):
"""
Compute the quaternion from the 3 euler angles (in degrees).
@param tuple euler: the 3 euler angles in degrees.
@param int P: +1 to compute an active quaternion (default), -1 for a passive quaternion.
@return: a `Quaternion` instance representing the rotation.
"""
(phi1, Phi, phi2) = np.radians(euler)
q0 = np.cos(0.5 * (phi1 + phi2)) * np.cos(0.5 * Phi)
q1 = np.cos(0.5 * (phi1 - phi2)) * np.sin(0.5 * Phi)
q2 = np.sin(0.5 * (phi1 - phi2)) * np.sin(0.5 * Phi)
q3 = np.sin(0.5 * (phi1 + phi2)) * np.cos(0.5 * Phi)
q = Quaternion(np.array([q0, -P * q1, -P * q2, -P * q3]), convention=P)
return q
@staticmethod
def Euler2Rodrigues(euler):
"""
Compute the rodrigues vector from the 3 euler angles (in degrees)
"""
(phi1, Phi, phi2) = np.radians(euler)
a = 0.5 * (phi1 - phi2)
b = 0.5 * (phi1 + phi2)
r1 = np.tan(0.5 * Phi) * np.cos(a) / np.cos(b)
r2 = np.tan(0.5 * Phi) * np.sin(a) / np.cos(b)
r3 = np.tan(b)
return np.array([r1, r2, r3])
@staticmethod
def Euler2OrientationMatrix(euler):
"""
Compute the orientation matrix :math:`\mathbf{g}` associated with the 3 Euler angles
:math:`(\phi_1, \Phi, \phi_2)`. The matrix is calculated via (see the `euler_angles` recipe in the cookbook
for a detailed example):
.. math::
\mathbf{g}=\\begin{pmatrix}
\cos\phi_1\cos\phi_2 - \sin\phi_1\sin\phi_2\cos\Phi & \sin\phi_1\cos\phi_2 + \cos\phi_1\sin\phi_2\cos\Phi & \sin\phi_2\sin\Phi \\\\
-\cos\phi_1\sin\phi_2 - \sin\phi_1\cos\phi_2\cos\Phi & -\sin\phi_1\sin\phi_2 + \cos\phi_1\cos\phi_2\cos\Phi & \cos\phi_2\sin\Phi \\\\
\sin\phi_1\sin\Phi & -\cos\phi_1\sin\Phi & \cos\Phi \\\\
\end{pmatrix}
:param euler: The triplet of the Euler angles (in degrees).
:returns g: The 3x3 orientation matrix.
"""
(rphi1, rPhi, rphi2) = np.radians(euler)
c1 = np.cos(rphi1)
s1 = np.sin(rphi1)
c = np.cos(rPhi)
s = np.sin(rPhi)
c2 = np.cos(rphi2)
s2 = np.sin(rphi2)
# rotation matrix g
g11 = c1 * c2 - s1 * s2 * c
g12 = s1 * c2 + c1 * s2 * c
g13 = s2 * s
g21 = -c1 * s2 - s1 * c2 * c
g22 = -s1 * s2 + c1 * c2 * c
g23 = c2 * s
g31 = s1 * s
g32 = -c1 * s
g33 = c
g = np.array([[g11, g12, g13], [g21, g22, g23], [g31, g32, g33]])
return g
@staticmethod
def Quaternion2Euler(q):
"""
Compute Euler angles from a Quaternion
:param q: Quaternion
:return: Euler angles (in degrees, Bunge convention)
"""
P = q.convention
(q0, q1, q2, q3) = q.quat
q03 = q0 ** 2 + q3 ** 2
q12 = q1 ** 2 + q2 ** 2
chi = np.sqrt(q03 * q12)
if chi == 0.:
if q12 == 0.:
phi_1 = atan2(-2 * P * q0 * q3, q0 ** 2 - q3 ** 2)
Phi = 0.
else:
phi_1 = atan2(-2 * q1 * q2, q1 ** 2 - q2 ** 2)
Phi = pi
phi_2 = 0.
else:
phi_1 = atan2((q1 * q3 - P * q0 * q2) / chi, (-P * q0 * q1 - q2 * q3) / chi)
Phi = atan2(2 * chi, q03 - q12)
phi_2 = atan2((P * q0 * q2 + q1 * q3) / chi, (q2 * q3 - P * q0 * q1) / chi)
return np.degrees([phi_1, Phi, phi_2])
@staticmethod
def Quaternion2OrientationMatrix(q):
P = q.convention
(q0, q1, q2, q3) = q.quat
qbar = q0 ** 2 - q1 ** 2 - q2 ** 2 - q3 ** 2
g = np.array([[qbar + 2 * q1 ** 2, 2 * (q1 * q2 - P * q0 * q3), 2 * (q1 * q3 + P * q0 * q2)],
[2 * (q1 * q2 + P * q0 * q3), qbar + 2 * q2 ** 2, 2 * (q2 * q3 - P * q0 * q1)],
[2 * (q1 * q3 - P * q0 * q2), 2 * (q2 * q3 + P * q0 * q1), qbar + 2 * q3 ** 2]])
return g
@staticmethod
def read_euler_txt(txt_path):
"""
Read a set of euler angles from an ascii file.
:param str txt_path: path to the text file containing the euler angles.
:returns dict: a dictionary with the line number and the corresponding orientation.
"""
return Orientation.read_orientations(txt_path)
@staticmethod
def read_orientations(txt_path, data_type='euler', **kwargs):
"""
Read a set of grain orientations from a text file.
The text file must be organised in 3 columns (the other are ignored), corresponding to either the three euler
angles or the three rodrigues veotor components, depending on the data_type). Internally the ascii file is read
by the genfromtxt function of numpy, additional keyworks (such as the delimiter) can be passed to via the
kwargs dictionnary.
:param str txt_path: path to the text file containing the orientations.
:param str data_type: 'euler' (default) or 'rodrigues'.
:param dict kwargs: additional parameters passed to genfromtxt.
:returns dict: a dictionary with the line number and the corresponding orientation.
"""
data = np.genfromtxt(txt_path, **kwargs)
size = len(data)
orientations = []
for i in range(size):
angles = np.array([float(data[i, 0]), float(data[i, 1]), float(data[i, 2])])
if data_type == 'euler':
orientations.append([i + 1, Orientation.from_euler(angles)])
elif data_type == 'rodrigues':
orientations.append([i + 1, Orientation.from_rodrigues(angles)])
return dict(orientations)
@staticmethod
def read_euler_from_zset_inp(inp_path):
"""Read a set of grain orientations from a z-set input file.
In z-set input files, the orientation data may be specified
either using the rotation of two vector, euler angles or
rodrigues components directly. For instance the following
lines are extracted from a polycrystalline calculation file
using the rotation keyword:
::
**elset elset1 *file au.mat *integration theta_method_a 1.0 1.e-9 150 *rotation x1 0.438886 -1.028805 0.197933 x3 1.038339 0.893172 1.003888
**elset elset2 *file au.mat *integration theta_method_a 1.0 1.e-9 150 *rotation x1 0.178825 -0.716937 1.043300 x3 0.954345 0.879145 1.153101
**elset elset3 *file au.mat *integration theta_method_a 1.0 1.e-9 150 *rotation x1 -0.540479 -0.827319 1.534062 x3 1.261700 1.284318 1.004174
**elset elset4 *file au.mat *integration theta_method_a 1.0 1.e-9 150 *rotation x1 -0.941278 0.700996 0.034552 x3 1.000816 1.006824 0.885212
**elset elset5 *file au.mat *integration theta_method_a 1.0 1.e-9 150 *rotation x1 -2.383786 0.479058 -0.488336 x3 0.899545 0.806075 0.984268
:param str inp_path: the path to the ascii file to read.
:returns dict: a dictionary of the orientations associated with the elset names.
"""
inp = open(inp_path)
lines = inp.readlines()
for i, line in enumerate(lines):
if line.lstrip().startswith('***material'):
break
euler_lines = []
for j, line in enumerate(lines[i + 1:]):
# read until next *** block
if line.lstrip().startswith('***'):
break
if (not line.lstrip().startswith('%') and line.find('**elset') >= 0):
euler_lines.append(line)
euler = []
for l in euler_lines:
tokens = l.split()
elset = tokens[tokens.index('**elset') + 1]
irot = tokens.index('*rotation')
if tokens[irot + 1] == 'x1':
x1 = np.empty(3, dtype=float)
x1[0] = float(tokens[irot + 2])
x1[1] = float(tokens[irot + 3])
x1[2] = float(tokens[irot + 4])
x3 = np.empty(3, dtype=float)
x3[0] = float(tokens[irot + 6])
x3[1] = float(tokens[irot + 7])
x3[2] = float(tokens[irot + 8])
euler.append([elset, Orientation.Zrot2OrientationMatrix(x1=x1, x3=x3)])
else: # euler angles
phi1 = tokens[irot + 1]
Phi = tokens[irot + 2]
phi2 = tokens[irot + 3]
angles = np.array([float(phi1), float(Phi), float(phi2)])
euler.append([elset, Orientation.from_euler(angles)])
return dict(euler)
def slip_system_orientation_tensor(self, s):
"""Compute the orientation strain tensor m^s for this :py:class:`~pymicro.crystal.microstructure.Orientation`
and the given slip system.
:param s: an instance of :py:class:`~pymicro.crystal.lattice.SlipSystem`
.. math::
M^s_{ij} = \left(l^s_i.n^s_j)
"""
gt = self.orientation_matrix().transpose()
plane = s.get_slip_plane()
n_rot = np.dot(gt, plane.normal())
slip = s.get_slip_direction()
l_rot = np.dot(gt, slip.direction())
return np.outer(l_rot, n_rot)
def slip_system_orientation_strain_tensor(self, s):
"""Compute the orientation strain tensor m^s for this :py:class:`~pymicro.crystal.microstructure.Orientation`
and the given slip system.
:param s: an instance of :py:class:`~pymicro.crystal.lattice.SlipSystem`
.. math::
m^s_{ij} = \\frac{1}{2}\left(l^s_i.n^s_j + l^s_j.n^s_i)
"""
gt = self.orientation_matrix().transpose()
plane = s.get_slip_plane()
n_rot = np.dot(gt, plane.normal())
slip = s.get_slip_direction()
l_rot = np.dot(gt, slip.direction())
m = 0.5 * (np.outer(l_rot, n_rot) + np.outer(n_rot, l_rot))
return m
def slip_system_orientation_rotation_tensor(self, s):
"""Compute the orientation rotation tensor q^s for this :py:class:`~pymicro.crystal.microstructure.Orientation`
and the given slip system.
:param s: an instance of :py:class:`~pymicro.crystal.lattice.SlipSystem`
.. math::
q^s_{ij} = \\frac{1}{2}\left(l^s_i.n^s_j - l^s_j.n^s_i)
"""
gt = self.orientation_matrix().transpose()
plane = s.get_slip_plane()
n_rot = np.dot(gt, plane.normal())
slip = s.get_slip_direction()
l_rot = np.dot(gt, slip.direction())
q = 0.5 * (np.outer(l_rot, n_rot) - np.outer(n_rot, l_rot))
return q
def schmid_factor(self, slip_system, load_direction=[0., 0., 1]):
"""Compute the Schmid factor for this crystal orientation and the
given slip system.
:param slip_system: a slip system instance.
:param load_direction: a unit vector describing the loading direction (default: vertical axis [0, 0, 1]).
:returns float: a number between 0 ad 0.5.
"""
plane = slip_system.get_slip_plane()
gt = self.orientation_matrix().transpose()
n_rot = np.dot(gt, plane.normal()) # plane.normal() is a unit vector
slip = slip_system.get_slip_direction().direction()
slip_rot = np.dot(gt, slip)
SF = np.abs(np.dot(n_rot, load_direction) * np.dot(slip_rot, load_direction))
return SF
def compute_all_schmid_factors(self, slip_systems, load_direction=[0., 0., 1], verbose=False):
"""Compute all Schmid factors for this crystal orientation and the
given list of slip systems.
:param slip_systems: a list of the slip system from which to compute the Schmid factor values.
:param load_direction: a unit vector describing the loading direction (default: vertical axis [0, 0, 1]).
:param bool verbose: activate verbose mode.
:returns list: a list of the schmid factors.
"""
SF_list = []
for ss in slip_systems:
sf = self.schmid_factor(ss, load_direction)
if verbose:
print('Slip system: %s, Schmid factor is %.3f' % (ss, sf))
SF_list.append(sf)
return SF_list
class Grain:
"""
Class defining a crystallographic grain.
A grain has its own crystallographic orientation.
An optional id for the grain may be specified.
The center attribute is the center of mass of the grain in world coordinates.
The volume of the grain is expressed in pixel/voxel unit.
"""
def __init__(self, grain_id, grain_orientation):
self.id = grain_id
self.orientation = grain_orientation
self.center = np.array([0., 0., 0.])
self.volume = 0 # warning not implemented
self.vtkmesh = None
self.hkl_planes = []
def __repr__(self):
"""Provide a string representation of the class."""
s = '%s\n * id = %d\n' % (self.__class__.__name__, self.id)
s += ' * %s\n' % (self.orientation)
s += ' * center %s\n' % np.array_str(self.center)
s += ' * has vtk mesh ? %s\n' % (self.vtkmesh != None)
return s
def schmid_factor(self, slip_system, load_direction=[0., 0., 1]):
"""Compute the Schmid factor of this grain for the given slip system.
**Parameters**:
*slip_system*: a slip system instance.
*load_direction*: a unit vector describing the loading direction.
**Returns**
The Schmid factor of this grain for the given slip system.
"""
plane = slip_system.get_slip_plane()
gt = self.orientation_matrix().transpose()
n_rot = np.dot(gt, plane.normal()) # plane.normal() is a unit vector
slip = slip_system.get_slip_direction().direction()
slip_rot = np.dot(gt, slip)
SF = np.abs(np.dot(n_rot, load_direction) * np.dot(slip_rot, load_direction))
return self.orientation.schmid_factor(slip_system, load_direction)
def SetVtkMesh(self, mesh):
"""Set the VTK mesh of this grain.
**Parameters:**
*mesh* The grain mesh in VTK format (typically vtkunstructuredgrid)
"""
self.vtkmesh = mesh
def add_vtk_mesh(self, array, contour=True, verbose=False):
"""Add a mesh to this grain.
This method process a labeled array to extract the geometry of the grain. The grain shape is defined by
the pixels with a value of the grain id. A vtkUniformGrid object is created and thresholded or contoured
depending on the value of the flag `contour`.
The resulting mesh is returned, centered on the center of mass of the grain.
:param ndarray array: a numpy array from which to extract the grain shape.
:param bool contour: a flag to use contour mode for the shape.
:param bool verbose: activate verbose mode.
"""
label = self.id # we use the grain id here...
# create vtk structure
from scipy import ndimage
from vtk.util import numpy_support
grain_size = np.shape(array)
array_bin = (array == label).astype(np.uint8)
local_com = ndimage.measurements.center_of_mass(array_bin, array)
vtk_data_array = numpy_support.numpy_to_vtk(np.ravel(array_bin, order='F'), deep=1)
grid = vtk.vtkUniformGrid()
grid.SetOrigin(-local_com[0], -local_com[1], -local_com[2])
grid.SetSpacing(1, 1, 1)
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
grid.SetScalarType(vtk.VTK_UNSIGNED_CHAR, vtk.vtkInformation())
else:
grid.SetScalarType(vtk.VTK_UNSIGNED_CHAR)
if contour:
grid.SetExtent(0, grain_size[0] - 1, 0, grain_size[1] - 1, 0, grain_size[2] - 1)
grid.GetPointData().SetScalars(vtk_data_array)
# contouring selected grain
contour = vtk.vtkContourFilter()
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
contour.SetInputData(grid)
else:
contour.SetInput(grid)
contour.SetValue(0, 0.5)
contour.Update()
if verbose:
print(contour.GetOutput())
self.SetVtkMesh(contour.GetOutput())
else:
grid.SetExtent(0, grain_size[0], 0, grain_size[1], 0, grain_size[2])
grid.GetCellData().SetScalars(vtk_data_array)
# threshold selected grain
thresh = vtk.vtkThreshold()
thresh.ThresholdBetween(0.5, 1.5)
# thresh.ThresholdBetween(label-0.5, label+0.5)
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
thresh.SetInputData(grid)
else:
thresh.SetInput(grid)
thresh.Update()
if verbose:
print('thresholding label: %d' % label)
print(thresh.GetOutput())
self.SetVtkMesh(thresh.GetOutput())
def to_xml(self, doc, file_name=None):
"""
Returns an XML representation of the Grain instance.
"""
grain = doc.createElement('Grain')
grain_id = doc.createElement('Id')
grain_id_text = doc.createTextNode('%s' % self.id)
grain_id.appendChild(grain_id_text)
grain.appendChild(grain_id)
grain.appendChild(self.orientation.to_xml(doc))
grain_position = doc.createElement('Position')
grain_position_x = doc.createElement('X')
grain_position.appendChild(grain_position_x)
grain_position_x_text = doc.createTextNode('%f' % self.center[0])
grain_position_x.appendChild(grain_position_x_text)
grain_position_y = doc.createElement('Y')
grain_position.appendChild(grain_position_y)
grain_position_y_text = doc.createTextNode('%f' % self.center[1])
grain_position_y.appendChild(grain_position_y_text)
grain_position_z = doc.createElement('Z')
grain_position.appendChild(grain_position_z)
grain_position_z_text = doc.createTextNode('%f' % self.center[2])
grain_position_z.appendChild(grain_position_z_text)
grain.appendChild(grain_position)
grain_mesh = doc.createElement('Mesh')
if not file_name:
file_name = self.vtk_file_name()
grain_mesh_text = doc.createTextNode('%s' % file_name)
grain_mesh.appendChild(grain_mesh_text)
grain.appendChild(grain_mesh)
return grain
@staticmethod
def from_xml(grain_node, verbose=False):
grain_id = grain_node.childNodes[0]
grain_orientation = grain_node.childNodes[1]
orientation = Orientation.from_xml(grain_orientation)
id = int(grain_id.childNodes[0].nodeValue)
grain = Grain(id, orientation)
grain_position = grain_node.childNodes[2]
xg = float(grain_position.childNodes[0].childNodes[0].nodeValue)
yg = float(grain_position.childNodes[1].childNodes[0].nodeValue)
zg = float(grain_position.childNodes[2].childNodes[0].nodeValue)
grain.center = np.array([xg, yg, zg])
grain_mesh = grain_node.childNodes[3]
grain_mesh_file = grain_mesh.childNodes[0].nodeValue
if verbose:
print(grain_mesh_file)
grain.load_vtk_repr(grain_mesh_file, verbose)
return grain
def vtk_file_name(self):
return 'grain_%d.vtu' % self.id
def save_vtk_repr(self, file_name=None):
import vtk
if not file_name:
file_name = self.vtk_file_name()
print('writting ' + file_name)
writer = vtk.vtkXMLUnstructuredGridWriter()
writer.SetFileName(file_name)
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
writer.SetInputData(self.vtkmesh)
else:
writer.SetInput(self.vtkmesh)
writer.Write()
def load_vtk_repr(self, file_name, verbose=False):
import vtk
if verbose:
print('reading ' + file_name)
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName(file_name)
reader.Update()
self.vtkmesh = reader.GetOutput()
def orientation_matrix(self):
"""Returns the grain orientation matrix."""
return self.orientation.orientation_matrix()
def dct_omega_angles(self, hkl, lambda_keV, verbose=False):
"""Compute the two omega angles which satisfy the Bragg condition.
For a grain with a given crystal orientation sitting on a vertical
rotation axis, there is exactly two omega positions in [0, 2pi] for
which a particular hkl reflexion will fulfil Bragg's law.
See :py:func:`~pymicro.crystal.microstructure.Orientation.dct_omega_angles`
of the :py:class:`~pymicro.crystal.microstructure.Orientation` class.
:param hkl: The given cristallographic :py:class:`~pymicro.crystal.lattice.HklPlane`
:param float lambda_keV: The X-rays energy expressed in keV
:param bool verbose: Verbose mode (False by default)
:returns tuple: (w1, w2) the two values of the omega angle.
"""
return self.orientation.dct_omega_angles(hkl, lambda_keV, verbose)
@staticmethod
def from_dct(label=1, data_dir='.'):
"""Create a `Grain` instance from a DCT grain file.
:param int label: the grain id.
:param str data_dir: the data root from where to fetch data files.
:return: A new grain instance.
"""
grain_path = os.path.join(data_dir, '4_grains', 'phase_01', 'grain_%04d.mat' % label)
grain_info = h5py.File(grain_path)
g = Grain(label, Orientation.from_rodrigues(grain_info['R_vector'].value))
g.center = grain_info['center'].value
# add spatial representation of the grain if reconstruction is available
grain_map_path = os.path.join(data_dir, '5_reconstruction', 'phase_01_vol.mat')
if os.path.exists(grain_map_path):
with h5py.File(grain_map_path, 'r') as f:
# because how matlab writes the data, we need to swap X and Z axes in the DCT volume
vol = f['vol'].value.transpose(2, 1, 0)
from scipy import ndimage
grain_data = vol[ndimage.find_objects(vol == label)[0]]
g.volume = ndimage.measurements.sum(vol == label)
# create the vtk representation of the grain
g.add_vtk_mesh(grain_data, contour=False)
return g
class Microstructure:
"""
Class used to manipulate a full microstructure.
It is typically defined as a list of grains objects, has an associated crystal `Lattice` instance.
A grain map and a mask can be added to the microstructure instance. For simplicity a simple field `voxel_size`
describe the spatial resolution of teses maps.
"""
def __init__(self, name='empty', lattice=None):
self.name = name
if lattice is None:
lattice = Lattice.cubic(1.0)
self._lattice = lattice
self.grains = []
self.grain_map = None
self.mask = None
self.voxel_size = 1.0 # unit is voxel by default
self.vtkmesh = None
def get_number_of_phases(self):
"""Return the number of phases in this microstructure.
For the moment only one phase is supported, so this function simply returns 1."""
return 1
def get_number_of_grains(self):
"""Return the number of grains in this microstructure."""
return len(self.grains)
def set_lattice(self, lattice):
"""Set the crystallographic lattice associated with this microstructure.
:param Lattice lattice: an instance of the `Lattice class`.
"""
self._lattice = lattice
def get_lattice(self):
"""Get the crystallographic lattice associated with this microstructure.
:return: an instance of the `Lattice class`.
"""
return self._lattice
def set_grain_map(self, grain_map, voxel_size):
"""Set the grain map for this microstructure.
:param ndarray grain_map: a 2D or 3D numpy array.
:param float voxel_size: the size of the voxels in mm unit.
"""
self.grain_map = grain_map
self.voxel_size = voxel_size
def set_mask(self, mask, voxel_size):
"""Set the mask for this microstructure.
:param ndarray mask: a 2D or 3D numpy array.
:param float voxel_size: the size of the voxels in mm unit.
"""
self.mask = mask
self.voxel_size = voxel_size
@staticmethod
def random_texture(n=100):
"""Generate a random texture microstructure.
**parameters:**
*n* The number of grain orientations in the microstructure.
"""
m = Microstructure(name='random_texture')
for i in range(n):
m.grains.append(Grain(i + 1, Orientation.random()))
return m
@staticmethod
def rand_cmap(N=4096, first_is_black=False):
"""Creates a random color map.
The first color can be enforced to black and usually figure out the background.
The random seed is fixed to consistently produce the same colormap.
"""
np.random.seed(13)
rand_colors = np.random.rand(N, 3)
if first_is_black:
rand_colors[0] = [0., 0., 0.] # enforce black background (value 0)
return colors.ListedColormap(rand_colors)
def ipf_cmap(self):
"""
Return a colormap with ipf colors.
"""
N = len(self.grains)
ipf_colors = np.zeros((4096, 3))
for g in self.grains:
ipf_colors[g.id, :] = g.orientation.get_ipf_colour()
return colors.ListedColormap(ipf_colors)
@staticmethod
def from_xml(xml_file_name, grain_ids=None, verbose=False):
"""Load a Microstructure object from an xml file.
It is possible to restrict the grains which are loaded by providing
the list of ids of the grains of interest.
"""
if verbose and grain_ids:
print('loading only grain ids %s' % grain_ids)
micro = Microstructure()
dom = parse(xml_file_name)
root = dom.childNodes[0]
name = root.childNodes[0]
micro.name = name.childNodes[0].nodeValue
grains = root.childNodes[1]
for node in grains.childNodes:
if grain_ids and not (int(node.childNodes[0].childNodes[0].nodeValue) in grain_ids): continue
if verbose:
print(node)
micro.grains.append(Grain.from_xml(node, verbose))
return micro
def get_grain(self, gid):
"""Get a particular grain given its id.
This method browses the microstructure and return the grain
corresponding to the given id. If the grain is not found, the
method raises a `ValueError`.
*Parameters*
**gid**: the grain id.
*Returns*
The method return a `Grain` with the corresponding id.
"""
for grain in self.grains:
if grain.id == gid:
return grain
raise ValueError('grain %d not found in the microstructure' % gid)
def __repr__(self):
"""Provide a string representation of the class."""
s = '%s\n' % self.__class__.__name__
s += '* name: %s\n' % self.name
for g in self.grains:
s += '* %s' % g.__repr__
return s
def SetVtkMesh(self, mesh):
self.vtkmesh = mesh
@staticmethod
def match_grains(micro1, micro2, use_grain_ids=None, verbose=False):
return micro1.match_grains(micro2, use_grain_ids=use_grain_ids, verbose=verbose)
def match_grains(self, micro2, mis_tol=1, use_grain_ids=None, verbose=False):
"""Match grains from a second microstructure to this microstructure.
This function try to find pair of grains based on their orientations.
.. warning::
This function works only for microstructures with the same symmetry.
:param micro2: the second instance of `Microstructure` from which to match grains.
:param float mis_tol: the tolerance is misorientation to use to detect matches (in degrees).
:param list use_grain_ids: a list of ids to restrict the grains in which to search for matches.
:param bool verbose: activate verbose mode.
:raise ValueError: if the microstructures do not have the same symmetry.
:returns tuple: A tuple of three lists holding respectively the matches, the candidates for each match and
the grains that were unmatched.
"""
if not self.get_lattice().get_symmetry() == micro2.get_lattice().get_symmetry():
raise ValueError('warning, microstructure should have the same symmetry, got: {} and {}'.
format(self.get_lattice().get_symmetry(), micro2.get_lattice().get_symmetry()))
candidates = []
matched = []
unmatched = [] # grain that were not matched within the given tolerance
# restrict the grain ids to match if needed
if use_grain_ids:
grains_to_match = [self.get_grain(gid) for gid in use_grain_ids]
else:
grains_to_match = self.grains
# look at each grain
for i, g1 in enumerate(grains_to_match):
cands_for_g1 = []
best_mis = mis_tol
best_match = -1
for g2 in micro2.grains:
# compute disorientation
mis, _, _ = g1.orientation.disorientation(g2.orientation, crystal_structure=self.get_lattice().get_symmetry())
misd = np.degrees(mis)
if misd < mis_tol:
if verbose:
print('grain %3d -- candidate: %3d, misorientation: %.2f deg' % (g1.id, g2.id, misd))
# add this grain to the list of candidates
cands_for_g1.append(g2.id)
if misd < best_mis:
best_mis = misd
best_match = g2.id
# add our best match or mark this grain as unmatched
if best_match > 0:
matched.append([g1.id, best_match])
else:
unmatched.append(g1.id)
candidates.append(cands_for_g1)
if verbose:
print('done with matching')
print('%d/%d grains were matched ' % (len(matched), len(grains_to_match)))
return matched, candidates, unmatched
def dilate_grain(self, grain_id, dilation_steps=1, use_mask=False):
"""Dilate a single grain overwriting the neighbors.
:param int grain_id: the grain id to dilate.
:param int dilation_steps: the number of dilation steps to apply.
:param bool use_mask: if True and that this microstructure has a mask, the dilation will be limite by it.
"""
grain_volume_init = (self.grain_map == grain_id).sum()
grain_data = self.grain_map == grain_id
grain_data = ndimage.binary_dilation(grain_data, iterations=dilation_steps).astype(np.uint8)
if use_mask and hasattr(self, 'mask'):
grain_data *= self.mask.astype(np.uint8)
self.grain_map[grain_data == 1] = grain_id
grain_volume_final = (self.grain_map == grain_id).sum()
print('grain %s was dilated by %d voxels' % (grain_id, grain_volume_final - grain_volume_init))
def dilate_grains(self, dilation_steps=1, dilation_ids=None):
"""Dilate grains to fill the gap beween them.
This code is based on the gtDilateGrains function from the DCT code. It has been extended to handle both 2D
and 3D cases.
:param int dilation_steps: the number of dilation steps to apply.
:param list dilation_ids: a list to restrict the dilation to the given ids.
"""
if not hasattr(self, 'grain_map'):
raise ValueError('microstructure %s must have an associated grain_map attribute' % self.name)
return
grain_map = self.grain_map.copy()
# get rid of overlap regions flaged by -1
grain_map[grain_map == -1] = 0
# carry out dilation in iterative steps
for step in range(dilation_steps):
if dilation_ids:
grains = np.isin(grain_map, dilation_ids)
else:
grains = (grain_map > 0).astype(np.uint8)
from scipy import ndimage
grains_dil = ndimage.morphology.binary_dilation(grains).astype(np.uint8)
if hasattr(self, 'mask'):
# only dilate within the mask
grains_dil *= self.mask.astype(np.uint8)
todo = (grains_dil - grains)
# get the list of voxel for this dilation step
X, Y, Z = np.where(todo)
xstart = X - 1
xend = X + 1
ystart = Y - 1
yend = Y + 1
zstart = Z - 1
zend = Z + 1
# check bounds
xstart[xstart < 0] = 0
ystart[ystart < 0] = 0
zstart[zstart < 0] = 0
xend[xend > grain_map.shape[0] - 1] = grain_map.shape[0] - 1
yend[yend > grain_map.shape[1] - 1] = grain_map.shape[1] - 1
zend[zend > grain_map.shape[2] - 1] = grain_map.shape[2] - 1
dilation = np.zeros_like(X).astype(np.int16)
print('%d voxels to replace' % len(X))
for i in range(len(X)):
neighbours = grain_map[xstart[i]:xend[i] + 1, ystart[i]:yend[i] + 1, zstart[i]:zend[i] + 1]
if np.any(neighbours):
# at least one neighboring voxel in non zero
dilation[i] = min(neighbours[neighbours > 0])
grain_map[X, Y, Z] = dilation
print('dilation step %d done' % (step + 1))
# finally assign the dilated grain map to the microstructure
self.grain_map = grain_map
def compute_grain_center(self, gid):
"""Compute the center of masses of a grain given its id.
:param int gid: the grain id to consider.
:return: a tuple with the center of mass in mm units (or voxel if the voxel_size is not specified).
"""
# isolate the grain within the complete grain map
slices = ndimage.find_objects(self.grain_map == gid)
if not len(slices) > 0:
raise ValueError('warning grain %d not found in grain map' % gid)
sl = slices[0]
offset = np.array([sl[0].start, sl[1].start, sl[2].start])
grain_data_bin = (self.grain_map[sl] == gid).astype(np.uint8)
local_com = ndimage.measurements.center_of_mass(grain_data_bin)
com = self.voxel_size * (offset + local_com - 0.5 * np.array(self.grain_map.shape))
return com
def recompute_grain_centers(self, verbose=False):
"""Compute and assign the center of all grains in the microstructure using the grain map.
Each grain center is computed using its center of mass. The value is assigned to the grain.center attribute.
If the voxel size is specified, the grain centers will be in mm unit, if not in voxel unit.
.. note::
A grain map need to be associated with this microstructure instance for the method to run.
:param bool verbose: flag for verbose mode.
"""
if not hasattr(self, 'grain_map'):
print('warning: need a grain map to recompute the center of mass of the grains')
return
for g in self.grains:
try:
com = self.compute_grain_center(g.id)
except ValueError:
print('skipping grain %d' % g.id)
continue
if verbose:
print('grain %d center: %.3f, %.3f, %.3f' % (g.id, com[0], com[1], com[2]))
g.center = com
def print_zset_material_block(self, mat_file, grain_prefix='_ELSET'):
"""
Outputs the material block corresponding to this microstructure for
a finite element calculation with z-set.
:param str mat_file: The name of the file where the material behaviour is located
:param str grain_prefix: The grain prefix used to name the elsets corresponding to the different grains
"""
f = open('elset_list.txt', 'w')
for g in self.grains:
o = g.orientation
f.write(
' **elset %s%d *file %s *integration theta_method_a 1.0 1.e-9 150 *rotation %7.3f %7.3f %7.3f\n' % (
grain_prefix, g.id, mat_file, o.phi1(), o.Phi(), o.phi2()))
f.close()
def to_h5(self):
"""Write the microstructure as a hdf5 file."""
import time
from pymicro import __version__ as pymicro_version
print('opening file %s.h5 for writing' % self.name)
f = h5py.File('%s.h5' % self.name, 'w')
f.attrs['Pymicro_Version'] = np.string_(pymicro_version)
f.attrs['HDF5_Version'] = h5py.version.hdf5_version
f.attrs['h5py_version'] = h5py.version.version
f.attrs['file_time'] = time.time()
f.attrs['microstructure_name'] = self.name
if hasattr(self, 'data_dir'):
f.attrs['data_dir'] = self.data_dir
# ensemble data
ed = f.create_group('EnsembleData')
cs = ed.create_group('CrystalStructure')
sym = self.get_lattice().get_symmetry()
cs.attrs['symmetry'] = sym.to_string()
lp = cs.create_dataset('LatticeParameters',
data=np.array(self.get_lattice().get_lattice_parameters(), dtype=np.float32))
# feature data
fd = f.create_group('FeatureData')
grain_ids = fd.create_dataset('grain_ids',
data=np.array([g.id for g in self.grains], dtype=np.int))
avg_rods = fd.create_dataset('R_vectors',
data=np.array([g.orientation.rod for g in self.grains], dtype=np.float32))
centers = fd.create_dataset('centers',
data=np.array([g.center for g in self.grains], dtype=np.float32))
# cell data
cd = f.create_group('CellData')
if hasattr(self, 'grain_map') and self.grain_map is not None:
gm = cd.create_dataset('grain_ids', data=self.grain_map, compression='gzip', compression_opts=9)
gm.attrs['voxel_size'] = self.voxel_size
if hasattr(self, 'mask') and self.mask is not None:
ma = cd.create_dataset('mask', data=self.mask, compression='gzip', compression_opts=9)
ma.attrs['voxel_size'] = self.voxel_size
print('done writing')
f.close()
def from_h5(file_path):
"""read a microstructure object from a HDF5 file.
:param str file_path: the path to the file to read.
:return: the new `Microstructure` instance created from the file.
"""
with h5py.File(file_path, 'r') as f:
micro = Microstructure(name=f.attrs['microstructure_name'])
if 'symmetry' in f['EnsembleData/CrystalStructure'].attrs:
sym = f['EnsembleData/CrystalStructure'].attrs['symmetry']
parameters = f['EnsembleData/CrystalStructure/LatticeParameters'][()]
micro.set_lattice(Lattice.from_symmetry(Symmetry.from_string(sym), parameters))
if 'data_dir' in f.attrs:
micro.data_dir = f.attrs['data_dir']
# load feature data
if 'R_vectors' in f['FeatureData']:
print('some grains')
avg_rods = f['FeatureData/R_vectors'][()]
print(avg_rods.shape)
if 'grain_ids' in f['FeatureData']:
grain_ids = f['FeatureData/grain_ids'][()]
else:
grain_ids = range(1, 1 + avg_rods.shape[0])
if 'centers' in f['FeatureData']:
centers = f['FeatureData/centers'][()]
else:
centers = np.zeros_like(avg_rods)
for i in range(avg_rods.shape[0]):
g = Grain(grain_ids[i], Orientation.from_rodrigues(avg_rods[i, :]))
g.center = centers[i]
micro.grains.append(g)
# load cell data
if 'grain_ids' in f['CellData']:
micro.grain_map = f['CellData/grain_ids'][()]
if 'voxel_size' in f['CellData/grain_ids'].attrs:
micro.voxel_size = f['CellData/grain_ids'].attrs['voxel_size']
if 'mask' in f['CellData']:
micro.mask = f['CellData/mask'][()]
if 'voxel_size' in f['CellData/mask'].attrs:
micro.voxel_size = f['CellData/mask'].attrs['voxel_size']
return micro
def to_dream3d(self):
"""Write the microstructure as a hdf5 file compatible with DREAM3D."""
import time
f = h5py.File('%s.h5' % self.name, 'w')
f.attrs['FileVersion'] = np.string_('7.0')
f.attrs['DREAM3D Version'] = np.string_('6.1.77.d28a796')
f.attrs['HDF5_Version'] = h5py.version.hdf5_version
f.attrs['h5py_version'] = h5py.version.version
f.attrs['file_time'] = time.time()
# pipeline group (empty here)
pipeline = f.create_group('Pipeline')
pipeline.attrs['Number_Filters'] = np.int32(0)
# create the data container group
data_containers = f.create_group('DataContainers')
m = data_containers.create_group('DataContainer')
# ensemble data
ed = m.create_group('EnsembleData')
ed.attrs['AttributeMatrixType'] = np.uint32(11)
ed.attrs['TupleDimensions'] = np.uint64(2)
cryst_structure = ed.create_dataset('CrystalStructures', data=np.array([[999], [1]], dtype=np.uint32))
cryst_structure.attrs['ComponentDimensions'] = np.uint64(1)
cryst_structure.attrs['DataArrayVersion'] = np.int32(2)
cryst_structure.attrs['ObjectType'] = np.string_('DataArray<uint32_t>')
cryst_structure.attrs['Tuple Axis Dimensions'] = np.string_('x=2')
cryst_structure.attrs['TupleDimensions'] = np.uint64(2)
mat_name = ed.create_dataset('MaterialName', data=[a.encode('utf8') for a in ['Invalid Phase', 'Unknown']])
mat_name.attrs['ComponentDimensions'] = np.uint64(1)
mat_name.attrs['DataArrayVersion'] = np.int32(2)
mat_name.attrs['ObjectType'] = np.string_('StringDataArray')
mat_name.attrs['Tuple Axis Dimensions'] = np.string_('x=2')
mat_name.attrs['TupleDimensions'] = np.uint64(2)
# feature data
fd = m.create_group('FeatureData')
fd.attrs['AttributeMatrixType'] = np.uint32(7)
fd.attrs['TupleDimensions'] = np.uint64(len(self.grains))
avg_euler = fd.create_dataset('AvgEulerAngles',
data=np.array([g.orientation.euler for g in self.grains], dtype=np.float32))
avg_euler.attrs['ComponentDimensions'] = np.uint64(3)
avg_euler.attrs['DataArrayVersion'] = np.int32(2)
avg_euler.attrs['ObjectType'] = np.string_('DataArray<float>')
avg_euler.attrs['Tuple Axis Dimensions'] = np.string_('x=%d' % len(self.grains))
avg_euler.attrs['TupleDimensions'] = np.uint64(len(self.grains))
# geometry
geom = m.create_group('_SIMPL_GEOMETRY')
geom.attrs['GeometryType'] = np.uint32(999)
geom.attrs['GeometryTypeName'] = np.string_('UnkownGeometry')
# create the data container bundles group
f.create_group('DataContainerBundles')
f.close()
@staticmethod
def from_dream3d(file_path, main_key='DataContainers', data_container='DataContainer', grain_data='FeatureData',
grain_orientations='AvgEulerAngles', orientation_type='euler', grain_centroid='Centroids'):
"""Read a microstructure from a hdf5 file.
:param str file_path: the path to the hdf5 file to read.
:param str main_key: the string describing the root key.
:param str data_container: the string describing the data container group in the hdf5 file.
:param str grain_data: the string describing the grain data group in the hdf5 file.
:param str grain_orientations: the string describing the average grain orientations in the hdf5 file.
:param str orientation_type: the string describing the descriptor used for orientation data.
:param str grain_centroid: the string describing the grain centroid in the hdf5 file.
:return: a `Microstructure` instance created from the hdf5 file.
"""
micro = Microstructure()
with h5py.File(file_path, 'r') as f:
grain_data_path = '%s/%s/%s' % (main_key, data_container, grain_data)
orientations = f[grain_data_path][grain_orientations].value
if grain_centroid:
centroids = f[grain_data_path][grain_centroid].value
offset = 0
if len(centroids) < len(orientations):
offset = 1 # if grain 0 has not a centroid
for i in range(len(orientations)):
if orientations[i, 0] == 0. and orientations[i, 1] == 0. and orientations[i, 2] == 0.:
# skip grain 0 which is always (0., 0., 0.)
print('skipping (0., 0., 0.)')
continue
if orientation_type == 'euler':
g = Grain(i, Orientation.from_euler(orientations[i] * 180 / np.pi))
elif orientation_type == 'rodrigues':
g = Grain(i, Orientation.from_rodrigues(orientations[i]))
if grain_centroid:
g.center = centroids[i - offset]
micro.grains.append(g)
return micro
@staticmethod
def from_dct(data_dir='.', grain_file='index.mat', vol_file='phase_01_vol.mat', mask_file='volume_mask.mat',
use_dct_path=True, verbose=True):
"""Create a microstructure from a DCT reconstruction.
DCT reconstructions are stored in several files. The indexed grain informations are stored in a matlab file in
the '4_grains/phase_01' folder. Then, the reconstructed volume file (labeled image) is stored
in the '5_reconstruction' folder as an hdf5 file, possibly stored alongside a mask file coming from the
absorption reconstruction.
:param str data_dir: the path to the folder containing the reconstruction data.
:param str grain_file: the name of the file containing grains info.
:param str vol_file: the name of the volume file.
:param str mask_file: the name of the mask file.
:param bool use_dct_path: if True, the grain_file should be located in 4_grains/phase_01 folder and the
vol_file and mask_file in the 5_reconstruction folder.
:param bool verbose: activate verbose mode.
:return: a `Microstructure` instance created from the DCT reconstruction.
"""
if data_dir == '.':
data_dir = os.getcwd()
if data_dir.endswith(os.sep):
data_dir = data_dir[:-1]
scan = data_dir.split(os.sep)[-1]
print('creating microstructure for DCT scan %s' % scan)
micro = Microstructure(name=scan)
micro.data_dir = data_dir
if use_dct_path:
index_path = os.path.join(data_dir, '4_grains', 'phase_01', grain_file)
else:
index_path = os.path.join(data_dir, grain_file)
print(index_path)
if not os.path.exists(index_path):
raise ValueError('%s not found, please specify a valid path to the grain file.' % index_path)
return None
from scipy.io import loadmat
index = loadmat(index_path)
micro.voxel_size = index['cryst'][0][0][25][0][0]
# grab the crystal lattice
lattice_params = index['cryst'][0][0][3][0]
sym = Symmetry.from_string(index['cryst'][0][0][7][0])
print('creating crystal lattice {} ({}) with parameters {}'.format(index['cryst'][0][0][0][0], sym, lattice_params))
lattice_params[:3] /= 10 # angstrom to nm
lattice = Lattice.from_parameters(*lattice_params, symmetry=sym)
micro.set_lattice(lattice)
# add all grains to the microstructure
for i in range(len(index['grain'][0])):
gid = index['grain'][0][i][0][0][0][0][0]
rod = index['grain'][0][i][0][0][3][0]
g = Grain(gid, Orientation.from_rodrigues(rod))
g.center = index['grain'][0][i][0][0][15][0]
micro.grains.append(g)
# load the grain map if available
if use_dct_path:
grain_map_path = os.path.join(data_dir, '5_reconstruction', vol_file)
else:
grain_map_path = os.path.join(data_dir, vol_file)
if os.path.exists(grain_map_path):
with h5py.File(grain_map_path, 'r') as f:
# because how matlab writes the data, we need to swap X and Z axes in the DCT volume
micro.grain_map = f['vol'][()].transpose(2, 1, 0)
if verbose:
print('loaded grain ids volume with shape: {}'.format(micro.grain_map.shape))
# load the mask if available
if use_dct_path:
mask_path = os.path.join(data_dir, '5_reconstruction', mask_file)
else:
mask_path = os.path.join(data_dir, mask_file)
if os.path.exists(mask_path):
with h5py.File(mask_path, 'r') as f:
micro.mask = f['vol'][()].transpose(2, 1, 0).astype(np.uint8)
if verbose:
print('loaded mask volume with shape: {}'.format(micro.mask.shape))
return micro
def to_xml(self, doc):
"""
Returns an XML representation of the Microstructure instance.
"""
root = doc.createElement('Microstructure')
doc.appendChild(root)
name = doc.createElement('Name')
root.appendChild(name)
name_text = doc.createTextNode(self.name)
name.appendChild(name_text)
grains = doc.createElement('Grains')
root.appendChild(grains)
for i, grain in enumerate(self.grains):
file_name = os.path.join(self.name, '%s_%d.vtu' % (self.name, i))
grains.appendChild(grain.to_xml(doc, file_name))
def save(self):
"""Saving the microstructure to the disk.
Save the metadata as a XML file and when available, also save the
vtk representation of the grains.
"""
# save the microstructure instance as xml
doc = Document()
self.to_xml(doc)
xml_file_name = '%s.xml' % self.name
print('writing ' + xml_file_name)
f = open(xml_file_name, 'wb')
doc.writexml(f, encoding='utf-8')
f.close()
# now save the vtk representation
if self.vtkmesh != None:
import vtk
vtk_file_name = '%s.vtm' % self.name
print('writing ' + vtk_file_name)
writer = vtk.vtkXMLMultiBlockDataWriter()
writer.SetFileName(vtk_file_name)
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
writer.SetInputData(self.vtkmesh)
else:
writer.SetInput(self.vtkmesh)
writer.Write()
@staticmethod
def merge_microstructures(micros, overlap, plot=False):
"""Merge two `Microstructure` instances together.
The function works for two microstructures with grain maps and an overlap between them. Temporarily
`Microstructures` restricted to the overlap regions are created and grains are matched between the two based
on a disorientation tolerance.
.. note::
The two microstructure must have the same crystal lattice and the same voxel_size for this method to run.
:param list micros: a list containing the two microstructures to merge.
:param int overlap: the overlap to use.
:param bool plot: a flag to plot some results.
:return: a new `Microstructure`instance containing the merged microstructure.
"""
from scipy import ndimage
# perform some sanity checks
for i in range(2):
if not hasattr(micros[i], 'grain_map'):
raise ValueError('microstructure instance %s must have an associated grain_map attribute' % micros[i].name)
if micros[0].get_lattice() != micros[1].get_lattice():
raise ValueError('both microstructure must have the same crystal lattice')
lattice = micros[0].get_lattice()
if micros[0].voxel_size != micros[1].voxel_size:
raise ValueError('both microstructure must have the same voxel size')
voxel_size = micros[0].voxel_size
# create two microstructure of the overlapping regions: end slices in first scan and first slices in second scan
grain_ids_ol1 = micros[0].grain_map[:, :, micros[0].grain_map.shape[2] - overlap:]
grain_ids_ol2 = micros[1].grain_map[:, :, :overlap]
dims_ol1 = np.array(grain_ids_ol1.shape)
print(dims_ol1)
dims_ol2 = np.array(grain_ids_ol2.shape)
print(dims_ol2)
# build a microstructure for the overlap region in each volumes
grain_ids_ols = [grain_ids_ol1, grain_ids_ol2]
micros_ol = []
for i in range(2):
grain_ids_ol = grain_ids_ols[i]
ids_ol = np.unique(grain_ids_ol)
print(ids_ol)
# difference due to the crop (restricting the grain map to the overlap region)
#offset_mm = (2 * i - 1) * voxel_size * np.array([0., 0., grain_ids_ol.shape[2] - 0.5 * micros[i].grain_map.shape[2]])
# here we use an ad-hoc offset to voxel (0, 0, 0) in the full volume: offset is zero for the second volume
offset_px = (i - 1) * np.array([0., 0., grain_ids_ol.shape[2] - micros[i].grain_map.shape[2]])
offset_mm = voxel_size * offset_px
print('offset [px] is {}'.format(offset_px))
print('offset [mm] is {}'.format(offset_mm))
# make the microstructure
micro_ol = Microstructure(name='%sol_' % micros[i].name)
print('* building overlap microstructure %s' % micro_ol.name)
micro_ol.set_lattice(lattice)
micro_ol.grain_map = grain_ids_ol
for gid in ids_ol:
if gid < 1:
print('skipping %d' % gid)
continue
g = Grain(gid, micros[i].get_grain(gid).orientation)
array_bin = (grain_ids_ol == gid).astype(np.uint8)
local_com = ndimage.measurements.center_of_mass(array_bin, grain_ids_ol)
com_px = (local_com + offset_px - 0.5 * np.array(micros[i].grain_map.shape))
com_mm = voxel_size * com_px
print('grain %2d center: %6.3f, %6.3f, %6.3f' % (gid, com_mm[0], com_mm[1], com_mm[2]))
g.center = com_mm
micro_ol.grains.append(g)
#TODO recalculate position as we look at a truncated volume
'''
micro_ol.recompute_grain_centers(verbose=True)
for g in micro_ol.grains:
g.center += offset_mm
'''
# add the overlap microstructure to the list
micros_ol.append(micro_ol)
# match grain from micros_ol[1] to micros_ol[0] (the reference)
matched, _, unmatched = micros_ol[0].match_grains(micros_ol[1], verbose=True)
# the affine transform does not since to work, using a simpler method here
delta_avg = np.zeros(3)
for i in range(len(matched)):
# look at the pair of grains
match = matched[i]
delta = micros_ol[0].get_grain(match[0]).center - micros_ol[1].get_grain(match[1]).center
delta_avg += delta
delta_avg /= len(matched)
print('average shift (pixels):')
print(delta_avg / voxel_size)
translation = delta_avg
translation_voxel = (delta_avg / voxel_size).astype(int)
print('translation is in mm: {}'.format(translation))
print('translation is in voxels {}'.format(translation_voxel))
"""
from pymicro.view.vol_utils import compute_affine_transform
# compute the affine transform
n_points = len(matched)
fixed = np.zeros((n_points, 3))
moving = np.zeros((n_points, 3))
moved = np.zeros_like(moving)
# markers in ref grain map
for i in range(n_points):
fixed[i] = micros_ol[0].get_grain(matched[i][0]).center
moving[i] = micros_ol[1].get_grain(matched[i][1]).center
# call the registration method
translation, transformation = compute_affine_transform(fixed, moving)
invt = np.linalg.inv(transformation)
# check what are now the points after transformation
fixed_centroid = np.average(fixed, axis=0)
moving_centroid = np.average(moving, axis=0)
print('fixed centroid: {}'.format(fixed_centroid))
print('moving centroid: {}'.format(moving_centroid))
for j in range(n_points):
moved[j] = fixed_centroid + np.dot(transformation, moving[j] - moving_centroid)
print('point %d will move to (%6.3f, %6.3f, %6.3f) to be compared with (%6.3f, %6.3f, %6.3f)' % (
j, moved[j, 0], moved[j, 1], moved[j, 2], fixed[j, 0], fixed[j, 1], fixed[j, 2]))
print('transformation is:')
print(invt)
# offset and translation, here we only look for rigid body translation
offset = -np.dot(invt, translation)
print(translation, offset)
translation_voxel = (translation / voxel_size).astype(int)
"""
print(translation_voxel)
# look at ids in the reference volume
ids_ref = np.unique(micros[0].grain_map)
ids_ref_list = ids_ref.tolist()
if -1 in ids_ref_list:
ids_ref_list.remove(-1) # grain overlap
if 0 in ids_ref_list:
ids_ref_list.remove(0) # background
print(ids_ref_list)
id_offset = max(ids_ref_list)
print('grain ids in volume %s will be offset by %d' % (micros[1].name, id_offset))
# gather ids in the merging volume (will be modified)
ids_mrg = np.unique(micros[1].grain_map)
ids_mrg_list = ids_mrg.tolist()
if -1 in ids_mrg_list:
ids_mrg_list.remove(-1) # grain overlap
if 0 in ids_mrg_list:
ids_mrg_list.remove(0) # background
print(ids_mrg_list)
# prepare a volume with the same size as the second grain map, with grain ids renumbered and (X, Y) translations applied.
grain_map_translated = micros[1].grain_map.copy()
print('renumbering grains in the overlap region of volume %s' % micros[1].name)
for match in matched:
ref_id, other_id = match
print('replacing %d by %d' % (other_id, ref_id))
#TODO should flag those grains so their center can be recomputed
grain_map_translated[micros[1].grain_map == other_id] = ref_id
try:
ids_mrg_list.remove(other_id)
except ValueError:
# this can happend if a grain in reference volume was matched to more than 1 grain
print('%d was not in list anymore' % other_id)
# also renumber the rest using the offset
renumbered_grains = []
for i, other_id in enumerate(ids_mrg_list):
new_id = id_offset + i + 1
grain_map_translated[micros[1].grain_map == other_id] = new_id
print('replacing %d by %d' % (other_id, new_id))
renumbered_grains.append([other_id, new_id])
# apply translation along the (X, Y) axes
grain_map_translated = np.roll(grain_map_translated, translation_voxel[:2], (0, 1))
check = overlap // 2
print(grain_map_translated.shape)
print(overlap)
print(translation_voxel[2] + check)
if plot:
fig = plt.figure(figsize=(15, 7))
ax1 = fig.add_subplot(1, 3, 1)
ax1.imshow(micros[0].grain_map[:, :, translation_voxel[2] + check].T, vmin=0)
plt.axis('off')
plt.title('micros[0].grain_map (ref)')
ax2 = fig.add_subplot(1, 3, 2)
ax2.imshow(grain_map_translated[:, :, check].T, vmin=0)
plt.axis('off')
plt.title('micros[1].grain_map (renumbered)')
ax3 = fig.add_subplot(1, 3, 3)
same_voxel = micros[0].grain_map[:, :, translation_voxel[2] + check] == grain_map_translated[:, :, check]
print(same_voxel)
#print(same_voxel.shape)
#ax3.imshow(same_voxel.T, vmin=0, vmax=2)
plt.axis('off')
plt.title('voxels that are identicals')
plt.savefig('merging_check1.pdf')
# start the merging: the first volume is the reference
overlap = micros[0].grain_map.shape[2] - translation_voxel[2]
print('overlap is %d voxels' % overlap)
z_shape = micros[0].grain_map.shape[2] + micros[1].grain_map.shape[2] - overlap
print('vertical size will be: %d + %d + %d = %d' % (
micros[0].grain_map.shape[2] - overlap, overlap, micros[1].grain_map.shape[2] - overlap, z_shape))
shape_merged = np.array(micros[0].grain_map.shape) + [0, 0, micros[1].grain_map.shape[2] - overlap]
print('initializing volume with shape {}'.format(shape_merged))
grain_ids_merged = np.zeros(shape_merged, dtype=np.int16)
print(micros[0].grain_map.shape)
print(micros[1].grain_map.shape)
# add the non-overlapping part of the 2 volumes as is
grain_ids_merged[:, :, :micros[0].grain_map.shape[2] - overlap] = micros[0].grain_map[:, :, :-overlap]
grain_ids_merged[:, :, micros[0].grain_map.shape[2]:] = grain_map_translated[:, :, overlap:]
# look at vertices with the same label
print(micros[0].grain_map[:, :, translation_voxel[2]:].shape)
print(grain_map_translated[:, :, :overlap].shape)
print('translation_voxel[2] = %d' % translation_voxel[2])
print('micros[0].grain_map.shape[2] - overlap = %d' % (micros[0].grain_map.shape[2] - overlap))
same_voxel = micros[0].grain_map[:, :, translation_voxel[2]:] == grain_map_translated[:, :, :overlap]
print(same_voxel.shape)
grain_ids_merged[:, :, translation_voxel[2]:micros[0].grain_map.shape[2]] = grain_map_translated[:, :, :overlap] * same_voxel
# look at vertices with a single label
single_voxels_0 = (micros[0].grain_map[:, :, translation_voxel[2]:] > 0) & (grain_map_translated[:, :, :overlap] == 0)
print(single_voxels_0.shape)
grain_ids_merged[:, :, translation_voxel[2]:micros[0].grain_map.shape[2]] += micros[0].grain_map[:, :, translation_voxel[2]:] * single_voxels_0
single_voxels_1 = (grain_map_translated[:, :, :overlap] > 0) & (micros[0].grain_map[:, :, translation_voxel[2]:] == 0)
print(single_voxels_1.shape)
grain_ids_merged[:, :, translation_voxel[2]:micros[0].grain_map.shape[2]] += grain_map_translated[:, :,
:overlap] * single_voxels_1
if plot:
fig = plt.figure(figsize=(14, 10))
ax1 = fig.add_subplot(1, 2, 1)
ax1.imshow(grain_ids_merged[:, 320, :].T)
plt.axis('off')
plt.title('XZ slice')
ax2 = fig.add_subplot(1, 2, 2)
ax2.imshow(grain_ids_merged[320, :, :].T)
plt.axis('off')
plt.title('YZ slice')
plt.savefig('merging_check2.pdf')
if hasattr(micros[0], 'mask') and hasattr(micros[1], 'mask'):
mask_translated = np.roll(micros[1].mask, translation_voxel[:2], (0, 1))
# merging the masks
mask_merged = np.zeros(shape_merged, dtype=np.uint8)
# add the non-overlapping part of the 2 volumes as is
mask_merged[:, :, :micros[0].mask.shape[2] - overlap] = micros[0].mask[:, :, :-overlap]
mask_merged[:, :, micros[0].grain_map.shape[2]:] = mask_translated[:, :, overlap:]
# look at vertices with the same label
same_voxel = micros[0].mask[:, :, translation_voxel[2]:] == mask_translated[:, :, :overlap]
print(same_voxel.shape)
mask_merged[:, :, translation_voxel[2]:micros[0].mask.shape[2]] = mask_translated[:, :, :overlap] * same_voxel
# look at vertices with a single label
single_voxels_0 = (micros[0].mask[:, :, translation_voxel[2]:] > 0) & (mask_translated[:, :, :overlap] == 0)
mask_merged[:, :, translation_voxel[2]:micros[0].mask.shape[2]] += (
micros[0].mask[:, :, translation_voxel[2]:] * single_voxels_0).astype(np.uint8)
single_voxels_1 = (mask_translated[:, :, :overlap] > 0) & (micros[0].mask[:, :, translation_voxel[2]:] == 0)
mask_merged[:, :, translation_voxel[2]:micros[0].mask.shape[2]] += (
mask_translated[:, :, :overlap] * single_voxels_1).astype(np.uint8)
if plot:
fig = plt.figure(figsize=(14, 10))
ax1 = fig.add_subplot(1, 2, 1)
ax1.imshow(mask_merged[:, 320, :].T)
plt.axis('off')
plt.title('XZ slice')
ax2 = fig.add_subplot(1, 2, 2)
ax2.imshow(mask_merged[320, :, :].T)
plt.axis('off')
plt.title('YZ slice')
plt.savefig('merging_check3.pdf')
# merging finished, build the new microstructure instance
merged_micro = Microstructure(name='%s-%s' % (micros[0].name, micros[1].name))
merged_micro.set_lattice(lattice)
# add all grains from the reference volume
merged_micro.grains = micros[0].grains
#TODO recompute center of masses of grains in the overlap region
print(renumbered_grains)
# add all new grains from the merged volume
for i in range(len(renumbered_grains)):
other_id, new_id = renumbered_grains[i]
g = micros[1].get_grain(other_id)
new_g = Grain(new_id, Orientation.from_rodrigues(g.orientation.rod))
new_g.center = g.center
print('adding grain with new id %d (was %d)' % (new_id, other_id))
merged_micro.grains.append(new_g)
print('%d grains in merged microstructure' % merged_micro.get_number_of_grains())
# add the full grain map
merged_micro.grain_map = grain_ids_merged
if hasattr(micros[0], 'mask') and hasattr(micros[1], 'mask'):
merged_micro.mask = mask_merged
return merged_micro
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities supporting experiments."""
import collections
import contextlib
import functools
import inspect
import itertools
import multiprocessing
import os.path
import shutil
import subprocess
import tempfile
from typing import Dict, Iterable, Iterator, List, Mapping, Optional, Sequence, Union
from absl import flags
from absl import logging
import numpy as np
import pandas as pd
import tensorflow as tf
def iter_grid(
grid_dict: Mapping[str, Sequence[Union[int, float, str]]]
) -> Iterator[Dict[str, Union[int, float, str]]]:
"""Iterates over all combinations of values in the provied dict-of-lists.
>>> list(iter_grid({'a': [1, 2], 'b': [4.0, 5.0, 6.0]))
[OrderedDict([('a', 1), ('b', 4.0)]),
OrderedDict([('a', 1), ('b', 5.0)]),
OrderedDict([('a', 1), ('b', 6.0)]),
OrderedDict([('a', 2), ('b', 4.0)]),
OrderedDict([('a', 2), ('b', 5.0)]),
OrderedDict([('a', 2), ('b', 6.0)])]
Args:
grid_dict: A dictionary of iterables.
Yields:
A sequence of dictionaries with keys from grid, and values corresponding
to all combinations of items in the corresponding iterables.
"""
names_to_lists = collections.OrderedDict(sorted(grid_dict.items()))
names = names_to_lists.keys()
for values in itertools.product(*names_to_lists.values()):
yield collections.OrderedDict(zip(names, values))
def atomic_write_to_csv(dataframe: pd.DataFrame,
output_file: str,
overwrite: bool = True) -> None:
"""Writes `dataframe` to `output_file` as a (possibly zipped) CSV file.
Args:
dataframe: A `pandas.Dataframe`.
output_file: The final output file to write. The output will be compressed
depending on the filename, see documentation for
pandas.DateFrame.to_csv(compression='infer').
overwrite: Whether to overwrite output_file if it exists.
"""
# Exporting via to_hdf() is an appealing option, because we could perhaps
# maintain more type information, and also write both hyperparameters and
# results to the same HDF5 file. However, to_hdf() call uses pickle under the
# hood, and there seems to be no way to tell it to use pickle protocol=2, it
# defaults to 4. This means the results cannot be read from Python 2. We
# currently still want Python 2 support, so sticking with CSVs for now.
# At least when writing a zip, .to_csv() is not happy taking a gfile,
# so we need a temp file on the local filesystem.
tmp_dir = tempfile.mkdtemp(prefix='atomic_write_to_csv_tmp')
# We put the output_file name last so we preserve the extension to allow
# inference of the desired compression format. Note that files with .zip
# extension (but not .bz2, .gzip, or .xv) have unexpected internal filenames
# due to https://github.com/pandas-dev/pandas/issues/26023, not
# because of something we are doing here.
tmp_name = os.path.join(tmp_dir, os.path.basename(output_file))
assert not tf.io.gfile.exists(tmp_name), 'file [{!s}] exists'.format(tmp_name)
dataframe.to_csv(tmp_name, header=True)
# Now, copy to a temp gfile next to the final target, allowing for
# an atomic move.
tmp_gfile_name = os.path.join(
os.path.dirname(output_file), '{}.tmp{}'.format(
os.path.basename(output_file),
np.random.randint(0, 2**63, dtype=np.int64)))
tf.io.gfile.copy(src=tmp_name, dst=tmp_gfile_name, overwrite=overwrite)
# Finally, do an atomic rename and clean up:
tf.io.gfile.rename(tmp_gfile_name, output_file, overwrite=overwrite)
shutil.rmtree(tmp_dir)
def atomic_read_from_csv(csv_file):
"""Reads a `pandas.DataFrame` from the (possibly zipped) `csv_file`.
Format note: The CSV is expected to have an index column.
Args:
csv_file: A (possibly zipped) CSV file.
Returns:
A `pandas.Dataframe`.
"""
# When reading from a zip, pandas.from_csv() is not happy taking a gfile,
# so we need a temp file on the local filesystem.
tmp_dir = tempfile.mkdtemp(prefix='atomic_read_from_csv_tmp')
# We put the output_file name last so we preserve the extension to allow
# inference of the compression format. Note that files with .zip extension
# (but not .bz2, .gzip, or .xv) have unexpected internal filenames due to
# https://github.com/pandas-dev/pandas/issues/26023, not because of something
# we are doing here.
tmp_name = os.path.join(tmp_dir, os.path.basename(csv_file))
assert not tf.io.gfile.exists(tmp_name), 'file [{!s}] exists'.format(tmp_name)
tf.io.gfile.copy(src=csv_file, dst=tmp_name, overwrite=True)
# Do the read from the temp file.
dataframe = pd.read_csv(tmp_name, index_col=0)
# Finally, clean up:
shutil.rmtree(tmp_dir)
return dataframe
def _optimizer_canonical_name(optimizer_cls):
"""Return a short, canonical name for an optimizer for us in flags."""
return optimizer_cls.__name__.lower()
# List of optimizers currently supported.
_SUPPORTED_OPTIMIZERS = {
_optimizer_canonical_name(cls): cls for cls in [
tf.keras.optimizers.SGD, tf.keras.optimizers.Adagrad,
tf.keras.optimizers.Adam
]
}
def define_optimizer_flags(prefix: str) -> None:
"""Defines flags with `prefix` to configure an optimizer.
This method is inteded to be paired with `create_optimizer_from_flags` using
the same `prefix`, to allow Python binaries to constructed TensorFlow
optimizers parameterized by commandline flags.
This creates two new flags:
* `--<prefix>_optimizer=<optimizer name>`
* `--<prefix>_learning_rate`
In addition to a suite of flags for each optimizer:
* `--<prefix>_<optimizer name>_<constructor_argument>`
For example, given the prefix "client" this will create flags (non-exhaustive
list):
* `--client_optimizer`
* `--client_learning_rate`
* `--client_sgd_momentum`
* `--client_sgd_nesterov`
* `--client_adam_beta_1`
* `--client_adam_beta_2`
* `--client_adam_epsilon`
Then calls to `create_optimizer_from_flags('client')` will construct an
optimizer of the type named in `--client_optimizer`, parameterized by the
flags prefixed with the matching optimizer name. For example, if
`--client_optimizer=sgd`, `--client_sgd_*` flags will be used.
IMPORTANT: For flags to be correctly parsed from the commandline, this method
must be called before `absl.app.run(main)`, and is recommened to be called
next to other flag definitions at the top of a py_binary.
Note: This method does not create a flag for `kwargs` of the Optimizer
constructor. However, `kwargs` can be set using the `overrides` parameter of
`create_optimizer_from_flags` below.
Args:
prefix: A string (possibly empty) indicating which optimizer is being
configured.
"""
# Create top-level, non-optimizer specific flags for picking the optimizer
# type and the learning rate.
flags.DEFINE_enum(
name='{!s}_optimizer'.format(prefix),
default=None,
enum_values=list(_SUPPORTED_OPTIMIZERS.keys()),
help='The type of optimizer to construct for `{!s}`'.format(prefix))
logging.info('Defined new flag: [%s]', '{!s}_optimizer'.format(prefix))
flags.DEFINE_float(
name='{!s}_learning_rate'.format(prefix),
default=None,
help='Learning rate for optimizer `{!s}`'.format(prefix))
logging.info('Defined new flag: [%s]', '{!s}_learning_rate'.format(prefix))
for optimizer_name, optimizer_cls in _SUPPORTED_OPTIMIZERS.items():
# Pull out the constructor parameters except for `self`.
constructor_signature = inspect.signature(optimizer_cls.__init__)
constructor_params = list(constructor_signature.parameters.values())[1:]
def prefixed(basename, optimizer_name=optimizer_name):
if prefix:
return '{!s}_{!s}_{!s}'.format(prefix, optimizer_name, basename)
else:
return '{!s}_{!s}'.format(optimizer_name, basename)
for param in constructor_params:
if param.name in ['kwargs', 'args', 'learning_rate']:
continue
if isinstance(param.default, bool):
define_flag_fn = flags.DEFINE_bool
elif isinstance(param.default, float):
define_flag_fn = flags.DEFINE_float
elif isinstance(param.default, int):
define_flag_fn = flags.DEFINE_integer
elif isinstance(param.default, str):
define_flag_fn = flags.DEFINE_string
else:
raise NotImplementedError('Cannot handle flag [{!s}] of type [{!s}] on '
'optimizers [{!s}]'.format(
param.name, type(param.default),
optimizer_name))
define_flag_fn(
name=prefixed(param.name),
default=param.default,
help='{!s} argument for the {!s} optimizer.'.format(
param.name, optimizer_name))
logging.info('Defined new flag: [%s]', prefixed(param.name))
def create_optimizer_from_flags(
prefix: str,
overrides: Optional[Mapping[str, Union[str, float, int, bool]]] = None
) -> tf.keras.optimizers.Optimizer:
"""Returns an optimizer based on prefixed flags.
This method is inteded to be paired with `define_optimizer_flags` using the
same `prefix`, to allow Python binaries to constructed TensorFlow optimizers
parameterized by commandline flags.
This method expects at least two flags to have been defined:
* `--<prefix>_optimizer=<optimizer name>`
* `--<prefix>_learning_rate`
In addition to suites of flags for each optimizer:
* `--<prefix>_<optimizer name>_<constructor_argument>`
For example, if `prefix='client'` this method first reads the flags:
* `--client_optimizer`
* `--client_learning_rate`
If the optimizer flag is `'sgd'`, then a `tf.keras.optimizer.SGD` optimizer is
constructed using the values in the flags prefixed with `--client_sgd_`.
Note: `kwargs` can be set using the `overrides` parameter.
Args:
prefix: The same string prefix passed to `define_optimizer_flags`.
overrides: A mapping of `(string, value)` pairs that should override default
flag values (but not user specified values from the commandline).
Returns:
A `tf.keras.optimizers.Optimizer`.
"""
if overrides is not None:
if not isinstance(overrides, collections.Mapping):
raise TypeError(
'`overrides` must be a value of type `collections.Mapping`, '
'found type: {!s}'.format(type(overrides)))
else:
overrides = {}
def prefixed(basename):
return '{}_{}'.format(prefix, basename) if prefix else basename
optimizer_flag_name = prefixed('optimizer')
if flags.FLAGS[optimizer_flag_name] is None:
raise ValueError('Must specify flag --{!s}'.format(optimizer_flag_name))
optimizer_name = flags.FLAGS[optimizer_flag_name].value
optimizer_cls = _SUPPORTED_OPTIMIZERS.get(optimizer_name)
if optimizer_cls is None:
# To support additional optimizers, implement it as a
# `tf.keras.optimizers.Optimizer` and add to the `_SUPPORTED_OPTIMIZERS`
# dict.
logging.error(
'Unknown optimizer [%s], known optimziers are [%s]. To add '
'support for an optimizer, add the optimzier class to the '
'utils_impl._SUPPORTED_OPTIMIZERS list.', optimizer_name,
list(_SUPPORTED_OPTIMIZERS.keys()))
raise ValueError('`{!s}` is not a valid optimizer for flag --{!s}, must be '
'one of {!s}. See error log for details.'.format(
optimizer_name, optimizer_flag_name,
list(_SUPPORTED_OPTIMIZERS.keys())))
def _has_user_value(flag):
"""Check if a commandline flag has a user set value."""
return flag.present or flag.value != flag.default
# Validate that the optimizers that weren't picked don't have flag values set.
# Settings that won't be used likely means there is an expectation gap between
# the user and the system and we should notify them.
unused_flag_prefixes = [
prefixed(k) for k in _SUPPORTED_OPTIMIZERS.keys() if k != optimizer_name
]
mistakenly_set_flags = []
for flag_name in flags.FLAGS:
if not _has_user_value(flags.FLAGS[flag_name]):
# Flag was not set by the user, skip it.
continue
# Otherwise the flag has a value set by the user.
for unused_prefix in unused_flag_prefixes:
if flag_name.startswith(unused_prefix):
mistakenly_set_flags.append(flag_name)
break
if mistakenly_set_flags:
raise ValueError('Commandline flags for optimizers other than [{!s}] '
'(value of --{!s}) are set. These would be ignored, '
'were the flags set by mistake? Flags: {!s}'.format(
optimizer_name, optimizer_flag_name,
mistakenly_set_flags))
flag_prefix = prefixed(optimizer_name)
prefix_len = len(flag_prefix) + 1
kwargs = dict(overrides) if overrides is not None else {}
learning_rate_flag = flags.FLAGS[prefixed('learning_rate')]
if _has_user_value(learning_rate_flag):
kwargs['learning_rate'] = learning_rate_flag.value
for flag_name in flags.FLAGS:
if not flag_name.startswith(flag_prefix):
continue
arg_name = flag_name[prefix_len:]
kwargs[arg_name] = flags.FLAGS[flag_name].value
return optimizer_cls(**kwargs)
def remove_unused_flags(prefix, hparam_dict):
"""Removes unused optimizer flags with a given prefix.
This method is intended to be used with `define_optimizer_flags`, and is used
to remove elements of hparam_dict associated with unused optimizer flags.
For example, given the prefix "client", define_optimizer_flags will create
flags including:
* `--client_optimizer`
* `--client_learning_rate`
* `--client_sgd_momentum`
* `--client_sgd_nesterov`
* `--client_adam_beta_1`
* `--client_adam_beta_2`
* `--client_adam_epsilon`
However, for purposes of recording hyperparameters, we would like to only keep
those that correspond to the optimizer selected in the flag
--client_optimizer. This method is intended to remove the unused flags.
For example, if `--client_optimizer=sgd` was set, then calling this method
with the prefix `client` will remove all pairs in hparam_dict except those
associated with the flags:
* `--client_optimizer`
* `--client_learning_rate`
* `--client_sgd_momentum`
* `--client_sgd_nesterov`
Args:
prefix: A prefix used to define optimizer flags.
hparam_dict: An ordered dictionary of (string, value) pairs corresponding to
experiment hyperparameters.
Returns:
An ordered dictionary of (string, value) pairs from hparam_dict that omits
any pairs where string = "<prefix>_<optimizer>*" but <optimizer> is not the
one set via the flag --<prefix>_optimizer=...
"""
def prefixed(basename):
return '{}_{}'.format(prefix, basename) if prefix else basename
if prefixed('optimizer') not in hparam_dict.keys():
raise ValueError('The flag {!s} was not defined.'.format(
prefixed('optimizer')))
optimizer_name = hparam_dict[prefixed('optimizer')]
if not optimizer_name:
raise ValueError('The flag {!s} was not set. Unable to determine the '
'relevant optimizer.'.format(prefixed('optimizer')))
unused_optimizer_flag_prefixes = [
prefixed(k) for k in _SUPPORTED_OPTIMIZERS.keys() if k != optimizer_name
]
def _is_used_flag(flag_name):
# We filter by whether the flag contains an unused optimizer prefix.
# This automatically retains any flag not of the form <prefix>_<optimizer>*.
for unused_flag_prefix in unused_optimizer_flag_prefixes:
if flag_name.startswith(unused_flag_prefix):
return False
return True
return collections.OrderedDict([
(flag_name, flag_value)
for flag_name, flag_value in hparam_dict.items()
if _is_used_flag(flag_name)
])
_all_hparam_flags = []
@contextlib.contextmanager
def record_hparam_flags():
"""A context manager that adds all flags created in its scope to a global list of flags, and yields all flags created in its scope.
This is useful for defining hyperparameter flags of an experiment, especially
when the flags are partitioned across a number of modules. The total list of
flags defined across modules can then be accessed via get_hparam_flags().
Example usage:
```python
with record_hparam_flags() as optimizer_hparam_flags:
flags.DEFINE_string('optimizer', 'sgd', 'Optimizer for training.')
with record_hparam_flags() as evaluation_hparam_flags:
flags.DEFINE_string('eval_metric', 'accuracy', 'Metric for evaluation.')
experiment_hparam_flags = get_hparam_flags().
```
Check `research/optimization/emnist/run_emnist.py` for more usage details.
Yields:
A list of all newly created flags.
"""
old_flags = set(iter(flags.FLAGS))
new_flags = []
yield new_flags
new_flags.extend([f for f in flags.FLAGS if f not in old_flags])
_all_hparam_flags.extend(new_flags)
def get_hparam_flags():
"""Returns a list of flags defined within the scope of record_hparam_flags."""
return _all_hparam_flags
@contextlib.contextmanager
def record_new_flags() -> Iterator[List[str]]:
"""A context manager that returns all flags created in its scope.
This is useful to define all of the flags which should be considered
hyperparameters of the training run, without needing to repeat them.
Example usage:
```python
with record_new_flags() as hparam_flags:
flags.DEFINE_string('exp_name', 'name', 'Unique name for the experiment.')
```
Check `research/emnist/run_experiment.py` for more details about the usage.
Yields:
A list of all newly created flags.
"""
old_flags = set(iter(flags.FLAGS))
new_flags = []
yield new_flags
new_flags.extend([f for f in flags.FLAGS if f not in old_flags])
def lookup_flag_values(flag_list: Iterable[str]) -> collections.OrderedDict:
"""Returns a dictionary of (flag_name, flag_value) pairs for an iterable of flag names."""
flag_odict = collections.OrderedDict()
for flag_name in flag_list:
if not isinstance(flag_name, str):
raise ValueError(
'All flag names must be strings. Flag {} was of type {}.'.format(
flag_name, type(flag_name)))
if flag_name not in flags.FLAGS:
raise ValueError('"{}" is not a defined flag.'.format(flag_name))
flag_odict[flag_name] = flags.FLAGS[flag_name].value
return flag_odict
def hparams_to_str(wid: int,
param_dict: Mapping[str, str],
short_names: Optional[Mapping[str, str]] = None) -> str:
"""Convenience method which flattens the hparams to a string.
Used as mapping function for the WorkUnitCustomiser.
Args:
wid: Work unit id, int type.
param_dict: A dict of parameters.
short_names: A dict of mappings of parameter names.
Returns:
The hparam string.
"""
if not param_dict:
return str(wid)
if not short_names:
short_names = {}
name = [
'{}={}'.format(short_names.get(k, k), str(v))
for k, v in sorted(param_dict.items())
]
hparams_str = '{}-{}'.format(str(wid), ','.join(name))
# Escape some special characters
replace_str = {
'\n': ',',
':': '=',
'\'': '',
'"': '',
}
for c, new_c in replace_str.items():
hparams_str = hparams_str.replace(c, new_c)
for c in ('\\', '/', '[', ']', '(', ')', '{', '}', '%'):
hparams_str = hparams_str.replace(c, '-')
if len(hparams_str) > 170:
raise ValueError(
'hparams_str string is too long ({}). You can input a short_name dict '
'to map the long parameter name to a short name. For example, '
' launch_experiment(executable, grid_iter, '
' {{server_learning_rate: s_lr}}) \n'
'Received: {}'.format(len(hparams_str), hparams_str))
return hparams_str
def launch_experiment(executable: str,
grid_iter: Iterable[Mapping[str, Union[int, float, str]]],
root_output_dir: str = '/tmp/exp',
short_names: Optional[Mapping[str, str]] = None,
max_workers: int = 1):
"""Launch experiments of grid search in parallel or sequentially.
Example usage:
```python
grid_iter = iter_grid({'a': [1, 2], 'b': [4.0, 5.0]))
launch_experiment('run_exp.py', grid_iter)
```
Args:
executable: An executable which takes flags --root_output_dir
and --exp_name, e.g., `bazel run //research/emnist:run_experiment --`.
grid_iter: A sequence of dictionaries with keys from grid, and values
corresponding to all combinations of items in the corresponding iterables.
root_output_dir: The directory where all outputs are stored.
short_names: Short name mapping for the parameter name used if parameter
string length is too long.
max_workers: The max number of commands to run in parallel.
"""
command_list = []
for idx, param_dict in enumerate(grid_iter):
param_list = [
'--{}={}'.format(key, str(value))
for key, value in sorted(param_dict.items())
]
short_names = short_names or {}
param_str = hparams_to_str(idx, param_dict, short_names)
param_list.append('--root_output_dir={}'.format(root_output_dir))
param_list.append('--exp_name={}'.format(param_str))
command = '{} {}'.format(executable, ' '.join(param_list))
command_list.append(command)
pool = multiprocessing.Pool(processes=max_workers)
executor = functools.partial(subprocess.call, shell=True)
for command in command_list:
pool.apply_async(executor, (command,))
pool.close()
pool.join()
Skip unnecessary local copies by using a binary GFile object and explicitly specifying compression.
PiperOrigin-RevId: 331007906
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities supporting experiments."""
import collections
import contextlib
import functools
import inspect
import itertools
import multiprocessing
import os.path
import shutil
import subprocess
import tempfile
from typing import Dict, Iterable, Iterator, List, Mapping, Optional, Sequence, Union
from absl import flags
from absl import logging
import numpy as np
import pandas as pd
import tensorflow as tf
def iter_grid(
grid_dict: Mapping[str, Sequence[Union[int, float, str]]]
) -> Iterator[Dict[str, Union[int, float, str]]]:
"""Iterates over all combinations of values in the provied dict-of-lists.
>>> list(iter_grid({'a': [1, 2], 'b': [4.0, 5.0, 6.0]))
[OrderedDict([('a', 1), ('b', 4.0)]),
OrderedDict([('a', 1), ('b', 5.0)]),
OrderedDict([('a', 1), ('b', 6.0)]),
OrderedDict([('a', 2), ('b', 4.0)]),
OrderedDict([('a', 2), ('b', 5.0)]),
OrderedDict([('a', 2), ('b', 6.0)])]
Args:
grid_dict: A dictionary of iterables.
Yields:
A sequence of dictionaries with keys from grid, and values corresponding
to all combinations of items in the corresponding iterables.
"""
names_to_lists = collections.OrderedDict(sorted(grid_dict.items()))
names = names_to_lists.keys()
for values in itertools.product(*names_to_lists.values()):
yield collections.OrderedDict(zip(names, values))
def atomic_write_to_csv(dataframe: pd.DataFrame,
output_file: str,
overwrite: bool = True) -> None:
"""Writes `dataframe` to `output_file` as a (possibly zipped) CSV file.
Args:
dataframe: A `pandas.Dataframe`.
output_file: The final output file to write. The output will be compressed
depending on the filename, see documentation for
pandas.DateFrame.to_csv(compression='infer').
overwrite: Whether to overwrite output_file if it exists.
"""
# Exporting via to_hdf() is an appealing option, because we could perhaps
# maintain more type information, and also write both hyperparameters and
# results to the same HDF5 file. However, to_hdf() call uses pickle under the
# hood, and there seems to be no way to tell it to use pickle protocol=2, it
# defaults to 4. This means the results cannot be read from Python 2. We
# currently still want Python 2 support, so sticking with CSVs for now.
# At least when writing a zip, .to_csv() is not happy taking a gfile,
# so we need a temp file on the local filesystem.
tmp_dir = tempfile.mkdtemp(prefix='atomic_write_to_csv_tmp')
# We put the output_file name last so we preserve the extension to allow
# inference of the desired compression format. Note that files with .zip
# extension (but not .bz2, .gzip, or .xv) have unexpected internal filenames
# due to https://github.com/pandas-dev/pandas/issues/26023, not
# because of something we are doing here.
tmp_name = os.path.join(tmp_dir, os.path.basename(output_file))
assert not tf.io.gfile.exists(tmp_name), 'file [{!s}] exists'.format(tmp_name)
dataframe.to_csv(tmp_name, header=True)
# Now, copy to a temp gfile next to the final target, allowing for
# an atomic move.
tmp_gfile_name = os.path.join(
os.path.dirname(output_file), '{}.tmp{}'.format(
os.path.basename(output_file),
np.random.randint(0, 2**63, dtype=np.int64)))
tf.io.gfile.copy(src=tmp_name, dst=tmp_gfile_name, overwrite=overwrite)
# Finally, do an atomic rename and clean up:
tf.io.gfile.rename(tmp_gfile_name, output_file, overwrite=overwrite)
shutil.rmtree(tmp_dir)
def atomic_read_from_csv(csv_file):
"""Reads a `pandas.DataFrame` from the (possibly zipped) `csv_file`.
Format note: The CSV is expected to have an index column.
Args:
csv_file: A (possibly zipped) CSV file.
Returns:
A `pandas.Dataframe`.
"""
return pd.read_csv(
tf.io.gfile.GFile(csv_file, mode='rb'),
compression='bz2' if csv_file.endswith('.bz2') else None,
engine='c',
index_col=0)
def _optimizer_canonical_name(optimizer_cls):
"""Return a short, canonical name for an optimizer for us in flags."""
return optimizer_cls.__name__.lower()
# List of optimizers currently supported.
_SUPPORTED_OPTIMIZERS = {
_optimizer_canonical_name(cls): cls for cls in [
tf.keras.optimizers.SGD, tf.keras.optimizers.Adagrad,
tf.keras.optimizers.Adam
]
}
def define_optimizer_flags(prefix: str) -> None:
"""Defines flags with `prefix` to configure an optimizer.
This method is inteded to be paired with `create_optimizer_from_flags` using
the same `prefix`, to allow Python binaries to constructed TensorFlow
optimizers parameterized by commandline flags.
This creates two new flags:
* `--<prefix>_optimizer=<optimizer name>`
* `--<prefix>_learning_rate`
In addition to a suite of flags for each optimizer:
* `--<prefix>_<optimizer name>_<constructor_argument>`
For example, given the prefix "client" this will create flags (non-exhaustive
list):
* `--client_optimizer`
* `--client_learning_rate`
* `--client_sgd_momentum`
* `--client_sgd_nesterov`
* `--client_adam_beta_1`
* `--client_adam_beta_2`
* `--client_adam_epsilon`
Then calls to `create_optimizer_from_flags('client')` will construct an
optimizer of the type named in `--client_optimizer`, parameterized by the
flags prefixed with the matching optimizer name. For example, if
`--client_optimizer=sgd`, `--client_sgd_*` flags will be used.
IMPORTANT: For flags to be correctly parsed from the commandline, this method
must be called before `absl.app.run(main)`, and is recommened to be called
next to other flag definitions at the top of a py_binary.
Note: This method does not create a flag for `kwargs` of the Optimizer
constructor. However, `kwargs` can be set using the `overrides` parameter of
`create_optimizer_from_flags` below.
Args:
prefix: A string (possibly empty) indicating which optimizer is being
configured.
"""
# Create top-level, non-optimizer specific flags for picking the optimizer
# type and the learning rate.
flags.DEFINE_enum(
name='{!s}_optimizer'.format(prefix),
default=None,
enum_values=list(_SUPPORTED_OPTIMIZERS.keys()),
help='The type of optimizer to construct for `{!s}`'.format(prefix))
logging.info('Defined new flag: [%s]', '{!s}_optimizer'.format(prefix))
flags.DEFINE_float(
name='{!s}_learning_rate'.format(prefix),
default=None,
help='Learning rate for optimizer `{!s}`'.format(prefix))
logging.info('Defined new flag: [%s]', '{!s}_learning_rate'.format(prefix))
for optimizer_name, optimizer_cls in _SUPPORTED_OPTIMIZERS.items():
# Pull out the constructor parameters except for `self`.
constructor_signature = inspect.signature(optimizer_cls.__init__)
constructor_params = list(constructor_signature.parameters.values())[1:]
def prefixed(basename, optimizer_name=optimizer_name):
if prefix:
return '{!s}_{!s}_{!s}'.format(prefix, optimizer_name, basename)
else:
return '{!s}_{!s}'.format(optimizer_name, basename)
for param in constructor_params:
if param.name in ['kwargs', 'args', 'learning_rate']:
continue
if isinstance(param.default, bool):
define_flag_fn = flags.DEFINE_bool
elif isinstance(param.default, float):
define_flag_fn = flags.DEFINE_float
elif isinstance(param.default, int):
define_flag_fn = flags.DEFINE_integer
elif isinstance(param.default, str):
define_flag_fn = flags.DEFINE_string
else:
raise NotImplementedError('Cannot handle flag [{!s}] of type [{!s}] on '
'optimizers [{!s}]'.format(
param.name, type(param.default),
optimizer_name))
define_flag_fn(
name=prefixed(param.name),
default=param.default,
help='{!s} argument for the {!s} optimizer.'.format(
param.name, optimizer_name))
logging.info('Defined new flag: [%s]', prefixed(param.name))
def create_optimizer_from_flags(
prefix: str,
overrides: Optional[Mapping[str, Union[str, float, int, bool]]] = None
) -> tf.keras.optimizers.Optimizer:
"""Returns an optimizer based on prefixed flags.
This method is inteded to be paired with `define_optimizer_flags` using the
same `prefix`, to allow Python binaries to constructed TensorFlow optimizers
parameterized by commandline flags.
This method expects at least two flags to have been defined:
* `--<prefix>_optimizer=<optimizer name>`
* `--<prefix>_learning_rate`
In addition to suites of flags for each optimizer:
* `--<prefix>_<optimizer name>_<constructor_argument>`
For example, if `prefix='client'` this method first reads the flags:
* `--client_optimizer`
* `--client_learning_rate`
If the optimizer flag is `'sgd'`, then a `tf.keras.optimizer.SGD` optimizer is
constructed using the values in the flags prefixed with `--client_sgd_`.
Note: `kwargs` can be set using the `overrides` parameter.
Args:
prefix: The same string prefix passed to `define_optimizer_flags`.
overrides: A mapping of `(string, value)` pairs that should override default
flag values (but not user specified values from the commandline).
Returns:
A `tf.keras.optimizers.Optimizer`.
"""
if overrides is not None:
if not isinstance(overrides, collections.Mapping):
raise TypeError(
'`overrides` must be a value of type `collections.Mapping`, '
'found type: {!s}'.format(type(overrides)))
else:
overrides = {}
def prefixed(basename):
return '{}_{}'.format(prefix, basename) if prefix else basename
optimizer_flag_name = prefixed('optimizer')
if flags.FLAGS[optimizer_flag_name] is None:
raise ValueError('Must specify flag --{!s}'.format(optimizer_flag_name))
optimizer_name = flags.FLAGS[optimizer_flag_name].value
optimizer_cls = _SUPPORTED_OPTIMIZERS.get(optimizer_name)
if optimizer_cls is None:
# To support additional optimizers, implement it as a
# `tf.keras.optimizers.Optimizer` and add to the `_SUPPORTED_OPTIMIZERS`
# dict.
logging.error(
'Unknown optimizer [%s], known optimziers are [%s]. To add '
'support for an optimizer, add the optimzier class to the '
'utils_impl._SUPPORTED_OPTIMIZERS list.', optimizer_name,
list(_SUPPORTED_OPTIMIZERS.keys()))
raise ValueError('`{!s}` is not a valid optimizer for flag --{!s}, must be '
'one of {!s}. See error log for details.'.format(
optimizer_name, optimizer_flag_name,
list(_SUPPORTED_OPTIMIZERS.keys())))
def _has_user_value(flag):
"""Check if a commandline flag has a user set value."""
return flag.present or flag.value != flag.default
# Validate that the optimizers that weren't picked don't have flag values set.
# Settings that won't be used likely means there is an expectation gap between
# the user and the system and we should notify them.
unused_flag_prefixes = [
prefixed(k) for k in _SUPPORTED_OPTIMIZERS.keys() if k != optimizer_name
]
mistakenly_set_flags = []
for flag_name in flags.FLAGS:
if not _has_user_value(flags.FLAGS[flag_name]):
# Flag was not set by the user, skip it.
continue
# Otherwise the flag has a value set by the user.
for unused_prefix in unused_flag_prefixes:
if flag_name.startswith(unused_prefix):
mistakenly_set_flags.append(flag_name)
break
if mistakenly_set_flags:
raise ValueError('Commandline flags for optimizers other than [{!s}] '
'(value of --{!s}) are set. These would be ignored, '
'were the flags set by mistake? Flags: {!s}'.format(
optimizer_name, optimizer_flag_name,
mistakenly_set_flags))
flag_prefix = prefixed(optimizer_name)
prefix_len = len(flag_prefix) + 1
kwargs = dict(overrides) if overrides is not None else {}
learning_rate_flag = flags.FLAGS[prefixed('learning_rate')]
if _has_user_value(learning_rate_flag):
kwargs['learning_rate'] = learning_rate_flag.value
for flag_name in flags.FLAGS:
if not flag_name.startswith(flag_prefix):
continue
arg_name = flag_name[prefix_len:]
kwargs[arg_name] = flags.FLAGS[flag_name].value
return optimizer_cls(**kwargs)
def remove_unused_flags(prefix, hparam_dict):
"""Removes unused optimizer flags with a given prefix.
This method is intended to be used with `define_optimizer_flags`, and is used
to remove elements of hparam_dict associated with unused optimizer flags.
For example, given the prefix "client", define_optimizer_flags will create
flags including:
* `--client_optimizer`
* `--client_learning_rate`
* `--client_sgd_momentum`
* `--client_sgd_nesterov`
* `--client_adam_beta_1`
* `--client_adam_beta_2`
* `--client_adam_epsilon`
However, for purposes of recording hyperparameters, we would like to only keep
those that correspond to the optimizer selected in the flag
--client_optimizer. This method is intended to remove the unused flags.
For example, if `--client_optimizer=sgd` was set, then calling this method
with the prefix `client` will remove all pairs in hparam_dict except those
associated with the flags:
* `--client_optimizer`
* `--client_learning_rate`
* `--client_sgd_momentum`
* `--client_sgd_nesterov`
Args:
prefix: A prefix used to define optimizer flags.
hparam_dict: An ordered dictionary of (string, value) pairs corresponding to
experiment hyperparameters.
Returns:
An ordered dictionary of (string, value) pairs from hparam_dict that omits
any pairs where string = "<prefix>_<optimizer>*" but <optimizer> is not the
one set via the flag --<prefix>_optimizer=...
"""
def prefixed(basename):
return '{}_{}'.format(prefix, basename) if prefix else basename
if prefixed('optimizer') not in hparam_dict.keys():
raise ValueError('The flag {!s} was not defined.'.format(
prefixed('optimizer')))
optimizer_name = hparam_dict[prefixed('optimizer')]
if not optimizer_name:
raise ValueError('The flag {!s} was not set. Unable to determine the '
'relevant optimizer.'.format(prefixed('optimizer')))
unused_optimizer_flag_prefixes = [
prefixed(k) for k in _SUPPORTED_OPTIMIZERS.keys() if k != optimizer_name
]
def _is_used_flag(flag_name):
# We filter by whether the flag contains an unused optimizer prefix.
# This automatically retains any flag not of the form <prefix>_<optimizer>*.
for unused_flag_prefix in unused_optimizer_flag_prefixes:
if flag_name.startswith(unused_flag_prefix):
return False
return True
return collections.OrderedDict([
(flag_name, flag_value)
for flag_name, flag_value in hparam_dict.items()
if _is_used_flag(flag_name)
])
_all_hparam_flags = []
@contextlib.contextmanager
def record_hparam_flags():
"""A context manager that adds all flags created in its scope to a global list of flags, and yields all flags created in its scope.
This is useful for defining hyperparameter flags of an experiment, especially
when the flags are partitioned across a number of modules. The total list of
flags defined across modules can then be accessed via get_hparam_flags().
Example usage:
```python
with record_hparam_flags() as optimizer_hparam_flags:
flags.DEFINE_string('optimizer', 'sgd', 'Optimizer for training.')
with record_hparam_flags() as evaluation_hparam_flags:
flags.DEFINE_string('eval_metric', 'accuracy', 'Metric for evaluation.')
experiment_hparam_flags = get_hparam_flags().
```
Check `research/optimization/emnist/run_emnist.py` for more usage details.
Yields:
A list of all newly created flags.
"""
old_flags = set(iter(flags.FLAGS))
new_flags = []
yield new_flags
new_flags.extend([f for f in flags.FLAGS if f not in old_flags])
_all_hparam_flags.extend(new_flags)
def get_hparam_flags():
"""Returns a list of flags defined within the scope of record_hparam_flags."""
return _all_hparam_flags
@contextlib.contextmanager
def record_new_flags() -> Iterator[List[str]]:
"""A context manager that returns all flags created in its scope.
This is useful to define all of the flags which should be considered
hyperparameters of the training run, without needing to repeat them.
Example usage:
```python
with record_new_flags() as hparam_flags:
flags.DEFINE_string('exp_name', 'name', 'Unique name for the experiment.')
```
Check `research/emnist/run_experiment.py` for more details about the usage.
Yields:
A list of all newly created flags.
"""
old_flags = set(iter(flags.FLAGS))
new_flags = []
yield new_flags
new_flags.extend([f for f in flags.FLAGS if f not in old_flags])
def lookup_flag_values(flag_list: Iterable[str]) -> collections.OrderedDict:
"""Returns a dictionary of (flag_name, flag_value) pairs for an iterable of flag names."""
flag_odict = collections.OrderedDict()
for flag_name in flag_list:
if not isinstance(flag_name, str):
raise ValueError(
'All flag names must be strings. Flag {} was of type {}.'.format(
flag_name, type(flag_name)))
if flag_name not in flags.FLAGS:
raise ValueError('"{}" is not a defined flag.'.format(flag_name))
flag_odict[flag_name] = flags.FLAGS[flag_name].value
return flag_odict
def hparams_to_str(wid: int,
param_dict: Mapping[str, str],
short_names: Optional[Mapping[str, str]] = None) -> str:
"""Convenience method which flattens the hparams to a string.
Used as mapping function for the WorkUnitCustomiser.
Args:
wid: Work unit id, int type.
param_dict: A dict of parameters.
short_names: A dict of mappings of parameter names.
Returns:
The hparam string.
"""
if not param_dict:
return str(wid)
if not short_names:
short_names = {}
name = [
'{}={}'.format(short_names.get(k, k), str(v))
for k, v in sorted(param_dict.items())
]
hparams_str = '{}-{}'.format(str(wid), ','.join(name))
# Escape some special characters
replace_str = {
'\n': ',',
':': '=',
'\'': '',
'"': '',
}
for c, new_c in replace_str.items():
hparams_str = hparams_str.replace(c, new_c)
for c in ('\\', '/', '[', ']', '(', ')', '{', '}', '%'):
hparams_str = hparams_str.replace(c, '-')
if len(hparams_str) > 170:
raise ValueError(
'hparams_str string is too long ({}). You can input a short_name dict '
'to map the long parameter name to a short name. For example, '
' launch_experiment(executable, grid_iter, '
' {{server_learning_rate: s_lr}}) \n'
'Received: {}'.format(len(hparams_str), hparams_str))
return hparams_str
def launch_experiment(executable: str,
grid_iter: Iterable[Mapping[str, Union[int, float, str]]],
root_output_dir: str = '/tmp/exp',
short_names: Optional[Mapping[str, str]] = None,
max_workers: int = 1):
"""Launch experiments of grid search in parallel or sequentially.
Example usage:
```python
grid_iter = iter_grid({'a': [1, 2], 'b': [4.0, 5.0]))
launch_experiment('run_exp.py', grid_iter)
```
Args:
executable: An executable which takes flags --root_output_dir
and --exp_name, e.g., `bazel run //research/emnist:run_experiment --`.
grid_iter: A sequence of dictionaries with keys from grid, and values
corresponding to all combinations of items in the corresponding iterables.
root_output_dir: The directory where all outputs are stored.
short_names: Short name mapping for the parameter name used if parameter
string length is too long.
max_workers: The max number of commands to run in parallel.
"""
command_list = []
for idx, param_dict in enumerate(grid_iter):
param_list = [
'--{}={}'.format(key, str(value))
for key, value in sorted(param_dict.items())
]
short_names = short_names or {}
param_str = hparams_to_str(idx, param_dict, short_names)
param_list.append('--root_output_dir={}'.format(root_output_dir))
param_list.append('--exp_name={}'.format(param_str))
command = '{} {}'.format(executable, ' '.join(param_list))
command_list.append(command)
pool = multiprocessing.Pool(processes=max_workers)
executor = functools.partial(subprocess.call, shell=True)
for command in command_list:
pool.apply_async(executor, (command,))
pool.close()
pool.join()
|
#!/usr/bin/env AFDKOPython
# encoding: UTF-8
from __future__ import division, absolute_import, print_function, unicode_literals
import subprocess, os, argparse, collections
from fontTools.ttLib import TTFont
import mutatorMath.ufo.document, WriteFeaturesKernFDK, WriteFeaturesMarkFDK
import hindkit, hindkit.devanagari, hindkit.patches
import hindkit.constants as constants
class Resource(object):
def __init__(
self,
builder,
output,
generator,
extensions = None,
):
self.builder = builder
self.output = output
self.generator = generator
self.extensions = []
if extensions:
self.extensions.extend(extensions)
class Builder(object):
def __init__(
self,
family,
fontrevision = '1.000',
vertical_metrics = {},
options = {},
):
self.family = family
self.fontrevision = fontrevision
self.vertical_metrics = {}
self.vertical_metrics['Ascender'] = vertical_metrics.get('Ascender', 800)
self.vertical_metrics['Descender'] = vertical_metrics.get('Descender', -200)
self.vertical_metrics['LineGap'] = vertical_metrics.get('LineGap', 0)
self.vertical_metrics['TypoAscender'] = vertical_metrics.get('TypoAscender', self.vertical_metrics['Ascender'])
self.vertical_metrics['TypoDescender'] = vertical_metrics.get('TypoDescender', self.vertical_metrics['Descender'])
self.vertical_metrics['TypoLineGap'] = vertical_metrics.get('TypoLineGap', self.vertical_metrics['LineGap'])
self.vertical_metrics['winAscent'] = vertical_metrics.get('winAscent', self.vertical_metrics['Ascender'])
self.vertical_metrics['winDescent'] = vertical_metrics.get('winDescent', abs(self.vertical_metrics['Descender']))
self.devanagari_offset_matrix = ((0, 0), (0, 0))
self.options = {
'prepare_kerning': self.family._has_kerning(),
'prepare_mark_positioning': self.family._has_mark_positioning(),
'prepare_mark_to_mark_positioning': True,
'match_mI_variants': self.family._has_mI_variants(),
'position_marks_for_mI_variants': False,
'postprocess_master': False,
'postprocess_kerning': False,
'postprocess_font_file': False,
'run_stage_prepare_styles': True,
'run_stage_prepare_features': True,
'run_stage_compile': True,
'run_makeinstances': len(self.family.masters) > len(self.family.styles),
'run_checkoutlines': True,
'run_autohint': False,
'build_ttf': False,
'override_GDEF': True,
'do_style_linking': False,
'use_os_2_version_4': False,
'prefer_typo_metrics': False,
'is_width_weight_slope_only': False,
}
self.options.update(options)
self.masters = Resource(
self,
constants.paths.MASTERS,
None,
)
self.designspace = Resource(
self,
constants.paths.DESIGNSPACE,
self._generate_designspace,
)
self.styles = Resource(
self,
constants.paths.STYLES,
self._prepare_styles,
)
self.features_classes = Resource(
self,
os.path.join(constants.paths.FEATURES, 'classes.fea'), #!
self._generate_features_classes,
extensions = [
os.path.join(constants.paths.FEATURES, 'classes_{}.fea'.format(i))
for i in ['suffixing']
],
)
self.features_tables = Resource(
self,
os.path.join(constants.paths.FEATURES, 'tables.fea'),
self._generate_features_tables,
)
self.features_languagesystems = Resource(
self,
os.path.join(constants.paths.FEATURES, 'languagesystems.fea'),
self._generate_features_languagesystems,
)
self.features_GSUB = Resource(
self,
os.path.join(constants.paths.FEATURES, 'GSUB.fea'), #!
None,
extensions = [
os.path.join(constants.paths.FEATURES, 'GSUB_{}.fea'.format(i))
for i in ['lookups', 'prefixing']
],
)
self.features_GPOS = Resource(
self,
os.path.join(constants.paths.FEATURES, 'GPOS.fea'),
self._generate_features_GPOS,
)
# self.features_weight_class = Resource(
# self,
# None,
# self._generate_features_weight_class,
# )
# self.features_references = Resource(
# self,
# None,
# self._generate_features_references,
# )
self.fmndb = Resource(
self,
constants.paths.FMNDB,
self._generate_fmndb,
)
self.goadb = Resource(
self,
constants.paths.GOADB + '_TRIMMED',
self._generate_goadb,
)
def _prepare(self, resource, *args, **kwargs):
def _premade(abstract_path):
if abstract_path:
premade_prefix = hindkit._unwrap_path_relative_to_package_dir(
os.path.join('data/premade', self.family.script.lower())
)
premade_path = os.path.join(premade_prefix, abstract_path)
else:
premade_path = None
return premade_path
if resource.output:
paths = [resource.output] + resource.extensions
if os.path.exists(resource.output):
for p in paths:
subprocess.call(['cp', '-fR', p, temp(p)])
elif resource.generator:
resource.generator(temp(resource.output), *args, **kwargs)
for p in paths:
subprocess.call(['cp', '-fR', p, temp(p)])
elif os.path.exists(_premade(resource.output)):
for p in paths:
subprocess.call(['cp', '-fR', _premade(p), temp(p)])
else:
raise SystemExit("Can't prepare {}.".format(resource))
else:
raise SystemExit("Output is not set for {}.".format(resource))
def postprocess_master(self, master):
pass
def postprocess_kerning(self, original):
return original
def postprocess_font_file(self, original):
return original
def _check_inputs(self, inputs):
results = collections.OrderedDict(
(path, os.path.exists(path))
for path in inputs
)
if not all(results.values()):
raise SystemExit(
'\n'.join('{}: {}'.format(k, v) for k, v in results.items())
)
# def _prepare_masters(self, output):
# pass
def _generate_designspace(self, output):
doc = mutatorMath.ufo.document.DesignSpaceDocumentWriter(
hindkit._unwrap_path_relative_to_cwd(output)
)
for i, master in enumerate(self.family.masters):
doc.addSource(
path = hindkit._unwrap_path_relative_to_cwd(temp(master.path)),
name = 'master-' + master.name,
location = {'weight': master.interpolation_value},
copyLib = i == 0,
copyGroups = i == 0,
copyInfo = i == 0,
# muteInfo = False,
# muteKerning = False,
# mutedGlyphNames = None,
)
for style in self.styles_to_be_built:
doc.startInstance(
name = 'instance-' + style.name,
location = {'weight': style.interpolation_value},
familyName = self.family.output_name,
styleName = style.name,
fileName = hindkit._unwrap_path_relative_to_cwd(
temp(style.path)
),
postScriptFontName = style.output_full_name_postscript,
# styleMapFamilyName = None,
# styleMapStyleName = None,
)
doc.writeInfo()
if self.options['prepare_kerning']:
doc.writeKerning()
doc.endInstance()
doc.save()
def _prepare_styles(self, output): # STAGE I
self._check_inputs([temp(i.path) for i in self.family.masters])
for style in self.styles_to_be_built:
make_dir(temp(style.directory))
if self.options['run_makeinstances']:
self._prepare(self.designspace)
arguments = ['-d', temp(constants.paths.DESIGNSPACE)]
if not self.options['run_checkoutlines']:
arguments.append('-c')
if not self.options['run_autohint']:
arguments.append('-a')
subprocess.call(['makeInstancesUFO'] + arguments)
else:
for index, (master, style) in enumerate(zip(self.family.masters, self.styles_to_be_built)):
subprocess.call(['cp', '-fR', temp(master.path), temp(style.path)])
font = style.open_font(is_temp=True)
font.info.postscriptFontName = style.output_full_name_postscript
font.save()
for style in self.styles_to_be_built:
self._simulate_makeInstancesUFO_postprocess(style)
def _simulate_makeInstancesUFO_postprocess(self, style):
self._check_inputs([temp(style.path)])
if self.options['run_checkoutlines'] or self.options['run_autohint']:
options = {
'doOverlapRemoval': self.options['run_checkoutlines'],
'doAutoHint': self.options['run_autohint'],
'allowDecimalCoords': False,
}
hindkit.patches.updateInstance(options, temp(style.path))
def _generate_features_classes(self, output):
self._check_inputs([temp(i.path) for i in self.styles_to_be_built])
lines = []
if self.options['prepare_mark_positioning']:
glyph_classes = []
glyph_classes.extend([(WriteFeaturesMarkFDK.kCombMarksClassName, glyph_filter_marks)])
if self.options['match_mI_variants']:
glyph_classes.extend([
('MATRA_I_ALTS', hindkit.devanagari.glyph_filter_matra_i_alts),
('BASES_ALIVE', hindkit.devanagari.glyph_filter_bases_alive),
('BASES_DEAD', hindkit.devanagari.glyph_filter_bases_dead),
# ('BASES_FOR_WIDE_MATRA_II', hindkit.devanagari.glyph_filter_bases_for_wide_matra_ii),
])
style_0 = self.styles_to_be_built[0].open_font(is_temp=True)
glyph_order = [
development_name for
production_name, development_name, unicode_mapping in
self.family.goadb
]
for class_name, filter_function in glyph_classes:
glyph_names = [
glyph.name for glyph in filter(
lambda glyph: filter_function(self.family, glyph),
style_0,
)
]
glyph_names = sort_glyphs(glyph_order, glyph_names)
style_0.groups.update({class_name: glyph_names})
lines.extend(
compose_glyph_class_def_lines(class_name, glyph_names)
)
style_0.save()
for style in self.styles_to_be_built[1:]:
font = style.open_font(is_temp=True)
font.groups.update(style_0.groups)
font.save()
if lines:
with open(output, 'w') as f:
f.writelines(i + '\n' for i in lines)
def _generate_features_tables(self, output):
lines = []
tables = collections.OrderedDict([
('hhea', []),
('OS/2', []),
('GDEF', []),
('name', []),
])
tables['OS/2'].extend([
'include (weightclass.fea);',
'Vendor "{}";'.format(constants.clients.Client(self.family).table_OS_2['Vendor']),
])
if self.vertical_metrics:
tables['hhea'].extend(
i.format(**self.vertical_metrics)
for i in [
'Ascender {Ascender};',
'Descender {Descender};',
'LineGap {LineGap};',
]
)
tables['OS/2'].extend(
i.format(**self.vertical_metrics)
for i in [
'TypoAscender {TypoAscender};',
'TypoDescender {TypoDescender};',
'TypoLineGap {TypoLineGap};',
'winAscent {winAscent};',
'winDescent {winDescent};',
]
)
# tables['OS/2'].extend(self.generate_UnicodeRange)
# tables['OS/2'].extend(self.generate_CodePageRange)
if self.options['override_GDEF']:
GDEF_records = {
'bases': '',
'ligatures': '',
'marks': '',
'components': '',
}
if self.options['prepare_mark_positioning'] or os.path.exists(temp(os.path.join(constants.paths.FEATURES, 'classes.fea'))):
GDEF_records['marks'] = '@{}'.format(WriteFeaturesMarkFDK.kCombMarksClassName)
if os.path.exists(temp(os.path.join(constants.paths.FEATURES, 'classes_suffixing.fea'))):
GDEF_records['marks'] = '@{}'.format('COMBINING_MARKS_GDEF')
tables['GDEF'].extend([
'GlyphClassDef {bases}, {ligatures}, {marks}, {components};'.format(**GDEF_records)
])
tables['name'].extend(
'nameid {} "{}";'.format(
name_id,
content.encode('unicode_escape').replace('\\x', '\\00').replace('\\u', '\\')
)
for name_id, content in constants.clients.Client(self.family).table_name.items()
if content
)
for name, entries in tables.items():
if entries:
lines.append('table {} {{'.format(name))
lines.extend(' ' + i for i in entries)
lines.append('}} {};'.format(name))
if lines:
with open(output, 'w') as f:
f.writelines(i + '\n' for i in lines)
def _generate_features_languagesystems(self, output):
lines = ['languagesystem DFLT dflt;']
tag = constants.misc.SCRIPTS[self.family.script.lower()]['tag']
if isinstance(tag, tuple):
lines.append('languagesystem {} dflt;'.format(tag[1]))
lines.append('languagesystem {} dflt;'.format(tag[0]))
else:
lines.append('languagesystem {} dflt;'.format(tag))
if lines:
with open(output, 'w') as f:
f.writelines(i + '\n' for i in lines)
def _generate_features_GSUB(self, output):
pass
def _generate_features_GPOS(self, output, style):
self._check_inputs([temp(style.path)])
directory = temp(style.directory)
if self.options['prepare_kerning']:
WriteFeaturesKernFDK.KernDataClass(
font = style.open_font(is_temp=True),
folderPath = directory,
)
kern_path = os.path.join(directory, WriteFeaturesKernFDK.kKernFeatureFileName)
if self.options['postprocess_kerning'] and os.path.exists(kern_path):
with open(kern_path) as f:
original = f.read()
postprocessed = self.postprocess_kerning(original)
with open(kern_path, 'w') as f:
f.write(postprocessed)
if self.options['prepare_mark_positioning']:
WriteFeaturesMarkFDK.MarkDataClass(
font = style.open_font(is_temp=True),
folderPath = directory,
trimCasingTags = False,
genMkmkFeature = self.options['prepare_mark_to_mark_positioning'],
writeClassesFile = True,
indianScriptsFormat = self.family.script.lower() in constants.misc.SCRIPTS,
)
if self.options['match_mI_variants']:
hindkit.devanagari.prepare_features_devanagari(
self.options['position_marks_for_mI_variants'],
self,
style,
) # NOTE: not pure GPOS
def _generate_features_weight_class(self, style):
directory = temp(style.directory)
with open(os.path.join(directory, 'WeightClass.fea'), 'w') as f:
f.write('WeightClass {};\n'.format(str(style.weight_class)))
def _generate_features_references(self, style):
directory = temp(style.directory)
with open(os.path.join(directory, 'features'), 'w') as f:
lines = ['table head { FontRevision 1.000; } head;']
for file_name in [
'classes',
'classes_suffixing',
'tables',
'languagesystems',
'GSUB_prefixing',
'GSUB_lookups',
'GSUB',
]:
abstract_path = os.path.join(constants.paths.FEATURES, file_name + '.fea')
if os.path.exists(temp(abstract_path)):
lines.append('include (../../{});'.format(abstract_path))
if os.path.exists(os.path.join(directory, WriteFeaturesKernFDK.kKernFeatureFileName)):
if self.family.script.lower() in constants.misc.SCRIPTS:
kerning_feature_name = 'dist'
else:
kerning_feature_name = 'kern'
lines.append(
'feature {0} {{ include ({1}); }} {0};'.format(
kerning_feature_name,
WriteFeaturesKernFDK.kKernFeatureFileName,
)
)
if os.path.exists(os.path.join(directory, WriteFeaturesMarkFDK.kMarkClassesFileName)):
lines.append('include ({});'.format(WriteFeaturesMarkFDK.kMarkClassesFileName))
for feature_name, file_name in [
('mark', WriteFeaturesMarkFDK.kMarkFeatureFileName),
('mkmk', WriteFeaturesMarkFDK.kMkmkFeatureFileName),
('abvm', WriteFeaturesMarkFDK.kAbvmFeatureFileName),
('blwm', WriteFeaturesMarkFDK.kBlwmFeatureFileName),
]:
if os.path.exists(os.path.join(directory, file_name)):
lines.append('feature {0} {{ include ({1}); }} {0};'.format(feature_name, file_name))
f.writelines(i + '\n' for i in lines)
def _generate_fmndb(self, output):
f_name = self.family.output_name
lines = []
for style in self.styles_to_be_built:
lines.append('')
lines.append('[{}]'.format(style.output_full_name_postscript))
lines.append(' f = {}'.format(f_name))
lines.append(' s = {}'.format(style.name))
l_name = style.output_full_name
comment_lines = []
if self.options['do_style_linking']:
if style.name == 'Regular':
l_name = l_name.replace(' Regular', '')
else:
if style.is_bold:
comment_lines.append(' # IsBoldStyle')
l_name = l_name.replace(' Bold', '')
if style.is_italic:
comment_lines.append(' # IsItalicStyle')
l_name = l_name.replace(' Italic', '')
if l_name != f_name:
lines.append(' l = {}'.format(l_name))
lines.extend(comment_lines)
with open(output, 'w') as f:
f.write(constants.templates.FMNDB_HEAD)
f.writelines(i + '\n' for i in lines)
def _generate_goadb(self, output):
reference_font = self.styles_to_be_built[0].open_font(is_temp=True)
with open(output, 'w') as f:
f.writelines([
' '.join(filter(None, row)) + '\n'
for row in self.family.goadb
if row[1] in reference_font
])
def _prepare_for_compiling_ttf(self):
with open(temp(self.goadb.output)) as f:
original_lines = f.readlines()
modified_lines = []
for line in original_lines:
parts = line.split()
alt_development_name = constants.misc.GLYPH_NAME_INCOSISTENCIES_IN_TTF.get(parts[1])
if alt_development_name:
parts[1] = alt_development_name
modified_lines.append(' '.join(parts) + '\n')
else:
modified_lines.append(line)
self.goadb.output = self.goadb.output + '_TTF'
with open(temp(self.goadb.output), 'w') as f:
f.writelines(modified_lines)
for style in self.styles_to_be_built:
style.input_format = 'TTF'
style.output_format = 'TTF'
def _compile(self, style):
self._check_inputs([temp(style.path), temp(self.fmndb.output), temp(self.goadb.output)])
# if style.file_name.endswith('.ufo'):
# font = style.open_font(is_temp=True)
# if font.info.postscriptFontName != style.output_full_name_postscript:
# font.info.postscriptFontName = style.output_full_name_postscript
# font.save()
font_path = style.font_path
arguments = [
'-f', temp(style.path),
'-o', font_path,
'-mf', temp(self.fmndb.output),
'-gf', temp(self.goadb.output),
'-rev', self.fontrevision,
'-ga',
'-omitMacNames',
]
if not self.args.test:
arguments.append('-r')
if not self.options['run_autohint']:
arguments.append('-shw')
if self.options['do_style_linking']:
if style.is_bold:
arguments.append('-b')
if style.is_italic:
arguments.append('-i')
if self.options['use_os_2_version_4']:
for digit, boolean in [
('7', self.options['prefer_typo_metrics']),
('8', self.options['is_width_weight_slope_only']),
('9', style.is_oblique),
]:
arguments.append('-osbOn' if boolean else '-osbOff')
arguments.append(digit)
if not os.path.isdir(constants.paths.BUILD):
os.makedirs(constants.paths.BUILD)
subprocess.call(['makeotf'] + arguments)
if self.options['postprocess_font_file'] and os.path.exists(font_path):
original = TTFont(font_path)
postprocessed = self.postprocess_font_file(original)
postprocessed.save(font_path, reorderTables=False)
print('[NOTE] `postprocess_font_file` done.')
destination = constants.paths.ADOBE_FONTS
if os.path.exists(font_path) and os.path.isdir(destination):
subprocess.call(['cp', '-f', font_path, destination])
def _finalize_options(self):
parser = argparse.ArgumentParser(
description = 'execute `AFDKOPython build.py` to run stages as specified in build.py, or append arguments to override.'
)
parser.add_argument(
'--test', action = 'store_true',
help = 'run a minimum and fast build process.',
)
parser.add_argument(
'--stages', action = 'store',
help = '"1" for "prepare_styles", "2" for "prepare_features", and "3" for "compile".',
)
parser.add_argument(
'--options', action = 'store',
help = '"0" for none, "1" for "makeinstances", "2" for "checkoutlines", and "3" for "autohint".',
)
self.args = parser.parse_args()
if self.args.stages:
stages = str(self.args.stages)
self.options['run_stage_prepare_styles'] = '1' in stages
self.options['run_stage_prepare_features'] = '2' in stages
self.options['run_stage_compile'] = '3' in stages
if self.args.options:
options = str(self.args.options)
self.options['run_makeinstances'] = '1' in options
self.options['run_checkoutlines'] = '2' in options
self.options['run_autohint'] = '3' in options
if self.args.test:
self.options['run_makeinstances'] = False
self.options['run_checkoutlines'] = False
self.options['run_autohint'] = False
self.styles_to_be_built = self.family.styles
if self.family.masters and (not self.options['run_makeinstances']):
self.styles_to_be_built = self.family.get_styles_that_are_directly_derived_from_masters()
def build(self):
self._finalize_options()
make_dir(constants.paths.TEMP)
if self.options['run_stage_prepare_styles']:
reset_dir(temp(constants.paths.MASTERS))
self._prepare(self.masters)
for master in self.family.masters:
master.update_glyph_order()
if self.options['postprocess_master']:
self.postprocess_master(master)
reset_dir(temp(constants.paths.STYLES))
self._prepare(self.styles)
if self.options['run_stage_prepare_features']:
reset_dir(temp(constants.paths.FEATURES))
self._prepare(self.features_classes)
self._prepare(self.features_tables)
self._prepare(self.features_languagesystems)
self._prepare(self.features_GSUB)
for style in self.styles_to_be_built:
self._prepare(self.features_GPOS, style)
self._generate_features_weight_class(style)
self._generate_features_references(style)
if self.options['run_stage_compile']:
self._prepare(self.fmndb)
self._prepare(self.goadb)
for style in self.styles_to_be_built:
self._compile(style)
if self.options['build_ttf']:
self._prepare_for_compiling_ttf()
for style in self.styles_to_be_built:
self._compile(style)
# ---
def sort_glyphs(glyph_order, glyph_names):
sorted_glyphs = (
[i for i in glyph_order if i in glyph_names] +
[i for i in glyph_names if i not in glyph_order]
)
return sorted_glyphs
def compose_glyph_class_def_lines(class_name, glyph_names):
if glyph_names:
glyph_class_def_lines = (
['@{} = ['.format(class_name)] +
[' {}'.format(glyph_name) for glyph_name in glyph_names] +
['];', '']
)
else:
glyph_class_def_lines = ['# @{} = [];'.format(class_name), '']
return glyph_class_def_lines
def glyph_filter_marks(family, glyph):
has_mark_anchor = False
for anchor in glyph.anchors:
if anchor.name:
if anchor.name.startswith('_'):
has_mark_anchor = True
break
return has_mark_anchor
# ---
def remove_files(path):
subprocess.call(['rm', '-fR', path])
def make_dir(path):
subprocess.call(['mkdir', '-p', path])
def reset_dir(path):
remove_files(path)
make_dir(path)
# ---
def overriding(abstract_path):
return abstract_path
def temp(abstract_path):
if abstract_path:
temp_path = os.path.join(constants.paths.TEMP, abstract_path)
else:
temp_path = None
return temp_path
Change postprocess_master to prepare_master
#!/usr/bin/env AFDKOPython
# encoding: UTF-8
from __future__ import division, absolute_import, print_function, unicode_literals
import subprocess, os, argparse, collections
from fontTools.ttLib import TTFont
import mutatorMath.ufo.document, WriteFeaturesKernFDK, WriteFeaturesMarkFDK
import hindkit, hindkit.devanagari, hindkit.patches
import hindkit.constants as constants
class Resource(object):
def __init__(
self,
builder,
output,
generator,
extensions = None,
):
self.builder = builder
self.output = output
self.generator = generator
self.extensions = []
if extensions:
self.extensions.extend(extensions)
class Builder(object):
def __init__(
self,
family,
fontrevision = '1.000',
vertical_metrics = {},
options = {},
):
self.family = family
self.fontrevision = fontrevision
self.vertical_metrics = {}
self.vertical_metrics['Ascender'] = vertical_metrics.get('Ascender', 800)
self.vertical_metrics['Descender'] = vertical_metrics.get('Descender', -200)
self.vertical_metrics['LineGap'] = vertical_metrics.get('LineGap', 0)
self.vertical_metrics['TypoAscender'] = vertical_metrics.get('TypoAscender', self.vertical_metrics['Ascender'])
self.vertical_metrics['TypoDescender'] = vertical_metrics.get('TypoDescender', self.vertical_metrics['Descender'])
self.vertical_metrics['TypoLineGap'] = vertical_metrics.get('TypoLineGap', self.vertical_metrics['LineGap'])
self.vertical_metrics['winAscent'] = vertical_metrics.get('winAscent', self.vertical_metrics['Ascender'])
self.vertical_metrics['winDescent'] = vertical_metrics.get('winDescent', abs(self.vertical_metrics['Descender']))
self.devanagari_offset_matrix = ((0, 0), (0, 0))
self.options = {
'prepare_kerning': self.family._has_kerning(),
'prepare_mark_positioning': self.family._has_mark_positioning(),
'prepare_mark_to_mark_positioning': True,
'match_mI_variants': self.family._has_mI_variants(),
'position_marks_for_mI_variants': False,
'prepare_master': False,
'postprocess_kerning': False,
'postprocess_font_file': False,
'run_stage_prepare_styles': True,
'run_stage_prepare_features': True,
'run_stage_compile': True,
'run_makeinstances': len(self.family.masters) > len(self.family.styles),
'run_checkoutlines': True,
'run_autohint': False,
'build_ttf': False,
'override_GDEF': True,
'do_style_linking': False,
'use_os_2_version_4': False,
'prefer_typo_metrics': False,
'is_width_weight_slope_only': False,
}
self.options.update(options)
self.masters = Resource(
self,
constants.paths.MASTERS,
None,
)
self.designspace = Resource(
self,
constants.paths.DESIGNSPACE,
self._generate_designspace,
)
self.styles = Resource(
self,
constants.paths.STYLES,
self._prepare_styles,
)
self.features_classes = Resource(
self,
os.path.join(constants.paths.FEATURES, 'classes.fea'), #!
self._generate_features_classes,
extensions = [
os.path.join(constants.paths.FEATURES, 'classes_{}.fea'.format(i))
for i in ['suffixing']
],
)
self.features_tables = Resource(
self,
os.path.join(constants.paths.FEATURES, 'tables.fea'),
self._generate_features_tables,
)
self.features_languagesystems = Resource(
self,
os.path.join(constants.paths.FEATURES, 'languagesystems.fea'),
self._generate_features_languagesystems,
)
self.features_GSUB = Resource(
self,
os.path.join(constants.paths.FEATURES, 'GSUB.fea'), #!
None,
extensions = [
os.path.join(constants.paths.FEATURES, 'GSUB_{}.fea'.format(i))
for i in ['lookups', 'prefixing']
],
)
self.features_GPOS = Resource(
self,
os.path.join(constants.paths.FEATURES, 'GPOS.fea'),
self._generate_features_GPOS,
)
# self.features_weight_class = Resource(
# self,
# None,
# self._generate_features_weight_class,
# )
# self.features_references = Resource(
# self,
# None,
# self._generate_features_references,
# )
self.fmndb = Resource(
self,
constants.paths.FMNDB,
self._generate_fmndb,
)
self.goadb = Resource(
self,
constants.paths.GOADB + '_TRIMMED',
self._generate_goadb,
)
def _prepare(self, resource, *args, **kwargs):
def _premade(abstract_path):
if abstract_path:
premade_prefix = hindkit._unwrap_path_relative_to_package_dir(
os.path.join('data/premade', self.family.script.lower())
)
premade_path = os.path.join(premade_prefix, abstract_path)
else:
premade_path = None
return premade_path
if resource.output:
paths = [resource.output] + resource.extensions
if os.path.exists(resource.output):
for p in paths:
subprocess.call(['cp', '-fR', p, temp(p)])
elif resource.generator:
resource.generator(temp(resource.output), *args, **kwargs)
for p in paths:
subprocess.call(['cp', '-fR', p, temp(p)])
elif os.path.exists(_premade(resource.output)):
for p in paths:
subprocess.call(['cp', '-fR', _premade(p), temp(p)])
else:
raise SystemExit("Can't prepare {}.".format(resource))
else:
raise SystemExit("Output is not set for {}.".format(resource))
def prepare_master(self, master):
pass
def postprocess_kerning(self, original):
return original
def postprocess_font_file(self, original):
return original
def _check_inputs(self, inputs):
results = collections.OrderedDict(
(path, os.path.exists(path))
for path in inputs
)
if not all(results.values()):
raise SystemExit(
'\n'.join('{}: {}'.format(k, v) for k, v in results.items())
)
# def _prepare_masters(self, output):
# pass
def _generate_designspace(self, output):
doc = mutatorMath.ufo.document.DesignSpaceDocumentWriter(
hindkit._unwrap_path_relative_to_cwd(output)
)
for i, master in enumerate(self.family.masters):
doc.addSource(
path = hindkit._unwrap_path_relative_to_cwd(temp(master.path)),
name = 'master-' + master.name,
location = {'weight': master.interpolation_value},
copyLib = i == 0,
copyGroups = i == 0,
copyInfo = i == 0,
# muteInfo = False,
# muteKerning = False,
# mutedGlyphNames = None,
)
for style in self.styles_to_be_built:
doc.startInstance(
name = 'instance-' + style.name,
location = {'weight': style.interpolation_value},
familyName = self.family.output_name,
styleName = style.name,
fileName = hindkit._unwrap_path_relative_to_cwd(
temp(style.path)
),
postScriptFontName = style.output_full_name_postscript,
# styleMapFamilyName = None,
# styleMapStyleName = None,
)
doc.writeInfo()
if self.options['prepare_kerning']:
doc.writeKerning()
doc.endInstance()
doc.save()
def _prepare_styles(self, output): # STAGE I
self._check_inputs([temp(i.path) for i in self.family.masters])
for style in self.styles_to_be_built:
make_dir(temp(style.directory))
if self.options['run_makeinstances']:
self._prepare(self.designspace)
arguments = ['-d', temp(constants.paths.DESIGNSPACE)]
if not self.options['run_checkoutlines']:
arguments.append('-c')
if not self.options['run_autohint']:
arguments.append('-a')
subprocess.call(['makeInstancesUFO'] + arguments)
else:
for index, (master, style) in enumerate(zip(self.family.masters, self.styles_to_be_built)):
subprocess.call(['cp', '-fR', temp(master.path), temp(style.path)])
font = style.open_font(is_temp=True)
font.info.postscriptFontName = style.output_full_name_postscript
font.save()
for style in self.styles_to_be_built:
self._simulate_makeInstancesUFO_postprocess(style)
def _simulate_makeInstancesUFO_postprocess(self, style):
self._check_inputs([temp(style.path)])
if self.options['run_checkoutlines'] or self.options['run_autohint']:
options = {
'doOverlapRemoval': self.options['run_checkoutlines'],
'doAutoHint': self.options['run_autohint'],
'allowDecimalCoords': False,
}
hindkit.patches.updateInstance(options, temp(style.path))
def _generate_features_classes(self, output):
self._check_inputs([temp(i.path) for i in self.styles_to_be_built])
lines = []
if self.options['prepare_mark_positioning']:
glyph_classes = []
glyph_classes.extend([(WriteFeaturesMarkFDK.kCombMarksClassName, glyph_filter_marks)])
if self.options['match_mI_variants']:
glyph_classes.extend([
('MATRA_I_ALTS', hindkit.devanagari.glyph_filter_matra_i_alts),
('BASES_ALIVE', hindkit.devanagari.glyph_filter_bases_alive),
('BASES_DEAD', hindkit.devanagari.glyph_filter_bases_dead),
# ('BASES_FOR_WIDE_MATRA_II', hindkit.devanagari.glyph_filter_bases_for_wide_matra_ii),
])
style_0 = self.styles_to_be_built[0].open_font(is_temp=True)
glyph_order = [
development_name for
production_name, development_name, unicode_mapping in
self.family.goadb
]
for class_name, filter_function in glyph_classes:
glyph_names = [
glyph.name for glyph in filter(
lambda glyph: filter_function(self.family, glyph),
style_0,
)
]
glyph_names = sort_glyphs(glyph_order, glyph_names)
style_0.groups.update({class_name: glyph_names})
lines.extend(
compose_glyph_class_def_lines(class_name, glyph_names)
)
style_0.save()
for style in self.styles_to_be_built[1:]:
font = style.open_font(is_temp=True)
font.groups.update(style_0.groups)
font.save()
if lines:
with open(output, 'w') as f:
f.writelines(i + '\n' for i in lines)
def _generate_features_tables(self, output):
lines = []
tables = collections.OrderedDict([
('hhea', []),
('OS/2', []),
('GDEF', []),
('name', []),
])
tables['OS/2'].extend([
'include (weightclass.fea);',
'Vendor "{}";'.format(constants.clients.Client(self.family).table_OS_2['Vendor']),
])
if self.vertical_metrics:
tables['hhea'].extend(
i.format(**self.vertical_metrics)
for i in [
'Ascender {Ascender};',
'Descender {Descender};',
'LineGap {LineGap};',
]
)
tables['OS/2'].extend(
i.format(**self.vertical_metrics)
for i in [
'TypoAscender {TypoAscender};',
'TypoDescender {TypoDescender};',
'TypoLineGap {TypoLineGap};',
'winAscent {winAscent};',
'winDescent {winDescent};',
]
)
# tables['OS/2'].extend(self.generate_UnicodeRange)
# tables['OS/2'].extend(self.generate_CodePageRange)
if self.options['override_GDEF']:
GDEF_records = {
'bases': '',
'ligatures': '',
'marks': '',
'components': '',
}
if self.options['prepare_mark_positioning'] or os.path.exists(temp(os.path.join(constants.paths.FEATURES, 'classes.fea'))):
GDEF_records['marks'] = '@{}'.format(WriteFeaturesMarkFDK.kCombMarksClassName)
if os.path.exists(temp(os.path.join(constants.paths.FEATURES, 'classes_suffixing.fea'))):
GDEF_records['marks'] = '@{}'.format('COMBINING_MARKS_GDEF')
tables['GDEF'].extend([
'GlyphClassDef {bases}, {ligatures}, {marks}, {components};'.format(**GDEF_records)
])
tables['name'].extend(
'nameid {} "{}";'.format(
name_id,
content.encode('unicode_escape').replace('\\x', '\\00').replace('\\u', '\\')
)
for name_id, content in constants.clients.Client(self.family).table_name.items()
if content
)
for name, entries in tables.items():
if entries:
lines.append('table {} {{'.format(name))
lines.extend(' ' + i for i in entries)
lines.append('}} {};'.format(name))
if lines:
with open(output, 'w') as f:
f.writelines(i + '\n' for i in lines)
def _generate_features_languagesystems(self, output):
lines = ['languagesystem DFLT dflt;']
tag = constants.misc.SCRIPTS[self.family.script.lower()]['tag']
if isinstance(tag, tuple):
lines.append('languagesystem {} dflt;'.format(tag[1]))
lines.append('languagesystem {} dflt;'.format(tag[0]))
else:
lines.append('languagesystem {} dflt;'.format(tag))
if lines:
with open(output, 'w') as f:
f.writelines(i + '\n' for i in lines)
def _generate_features_GSUB(self, output):
pass
def _generate_features_GPOS(self, output, style):
self._check_inputs([temp(style.path)])
directory = temp(style.directory)
if self.options['prepare_kerning']:
WriteFeaturesKernFDK.KernDataClass(
font = style.open_font(is_temp=True),
folderPath = directory,
)
kern_path = os.path.join(directory, WriteFeaturesKernFDK.kKernFeatureFileName)
if self.options['postprocess_kerning'] and os.path.exists(kern_path):
with open(kern_path) as f:
original = f.read()
postprocessed = self.postprocess_kerning(original)
with open(kern_path, 'w') as f:
f.write(postprocessed)
if self.options['prepare_mark_positioning']:
WriteFeaturesMarkFDK.MarkDataClass(
font = style.open_font(is_temp=True),
folderPath = directory,
trimCasingTags = False,
genMkmkFeature = self.options['prepare_mark_to_mark_positioning'],
writeClassesFile = True,
indianScriptsFormat = self.family.script.lower() in constants.misc.SCRIPTS,
)
if self.options['match_mI_variants']:
hindkit.devanagari.prepare_features_devanagari(
self.options['position_marks_for_mI_variants'],
self,
style,
) # NOTE: not pure GPOS
def _generate_features_weight_class(self, style):
directory = temp(style.directory)
with open(os.path.join(directory, 'WeightClass.fea'), 'w') as f:
f.write('WeightClass {};\n'.format(str(style.weight_class)))
def _generate_features_references(self, style):
directory = temp(style.directory)
with open(os.path.join(directory, 'features'), 'w') as f:
lines = ['table head { FontRevision 1.000; } head;']
for file_name in [
'classes',
'classes_suffixing',
'tables',
'languagesystems',
'GSUB_prefixing',
'GSUB_lookups',
'GSUB',
]:
abstract_path = os.path.join(constants.paths.FEATURES, file_name + '.fea')
if os.path.exists(temp(abstract_path)):
lines.append('include (../../{});'.format(abstract_path))
if os.path.exists(os.path.join(directory, WriteFeaturesKernFDK.kKernFeatureFileName)):
if self.family.script.lower() in constants.misc.SCRIPTS:
kerning_feature_name = 'dist'
else:
kerning_feature_name = 'kern'
lines.append(
'feature {0} {{ include ({1}); }} {0};'.format(
kerning_feature_name,
WriteFeaturesKernFDK.kKernFeatureFileName,
)
)
if os.path.exists(os.path.join(directory, WriteFeaturesMarkFDK.kMarkClassesFileName)):
lines.append('include ({});'.format(WriteFeaturesMarkFDK.kMarkClassesFileName))
for feature_name, file_name in [
('mark', WriteFeaturesMarkFDK.kMarkFeatureFileName),
('mkmk', WriteFeaturesMarkFDK.kMkmkFeatureFileName),
('abvm', WriteFeaturesMarkFDK.kAbvmFeatureFileName),
('blwm', WriteFeaturesMarkFDK.kBlwmFeatureFileName),
]:
if os.path.exists(os.path.join(directory, file_name)):
lines.append('feature {0} {{ include ({1}); }} {0};'.format(feature_name, file_name))
f.writelines(i + '\n' for i in lines)
def _generate_fmndb(self, output):
f_name = self.family.output_name
lines = []
for style in self.styles_to_be_built:
lines.append('')
lines.append('[{}]'.format(style.output_full_name_postscript))
lines.append(' f = {}'.format(f_name))
lines.append(' s = {}'.format(style.name))
l_name = style.output_full_name
comment_lines = []
if self.options['do_style_linking']:
if style.name == 'Regular':
l_name = l_name.replace(' Regular', '')
else:
if style.is_bold:
comment_lines.append(' # IsBoldStyle')
l_name = l_name.replace(' Bold', '')
if style.is_italic:
comment_lines.append(' # IsItalicStyle')
l_name = l_name.replace(' Italic', '')
if l_name != f_name:
lines.append(' l = {}'.format(l_name))
lines.extend(comment_lines)
with open(output, 'w') as f:
f.write(constants.templates.FMNDB_HEAD)
f.writelines(i + '\n' for i in lines)
def _generate_goadb(self, output):
reference_font = self.styles_to_be_built[0].open_font(is_temp=True)
with open(output, 'w') as f:
f.writelines([
' '.join(filter(None, row)) + '\n'
for row in self.family.goadb
if row[1] in reference_font
])
def _prepare_for_compiling_ttf(self):
with open(temp(self.goadb.output)) as f:
original_lines = f.readlines()
modified_lines = []
for line in original_lines:
parts = line.split()
alt_development_name = constants.misc.GLYPH_NAME_INCOSISTENCIES_IN_TTF.get(parts[1])
if alt_development_name:
parts[1] = alt_development_name
modified_lines.append(' '.join(parts) + '\n')
else:
modified_lines.append(line)
self.goadb.output = self.goadb.output + '_TTF'
with open(temp(self.goadb.output), 'w') as f:
f.writelines(modified_lines)
for style in self.styles_to_be_built:
style.input_format = 'TTF'
style.output_format = 'TTF'
def _compile(self, style):
self._check_inputs([temp(style.path), temp(self.fmndb.output), temp(self.goadb.output)])
# if style.file_name.endswith('.ufo'):
# font = style.open_font(is_temp=True)
# if font.info.postscriptFontName != style.output_full_name_postscript:
# font.info.postscriptFontName = style.output_full_name_postscript
# font.save()
font_path = style.font_path
arguments = [
'-f', temp(style.path),
'-o', font_path,
'-mf', temp(self.fmndb.output),
'-gf', temp(self.goadb.output),
'-rev', self.fontrevision,
'-ga',
'-omitMacNames',
]
if not self.args.test:
arguments.append('-r')
if not self.options['run_autohint']:
arguments.append('-shw')
if self.options['do_style_linking']:
if style.is_bold:
arguments.append('-b')
if style.is_italic:
arguments.append('-i')
if self.options['use_os_2_version_4']:
for digit, boolean in [
('7', self.options['prefer_typo_metrics']),
('8', self.options['is_width_weight_slope_only']),
('9', style.is_oblique),
]:
arguments.append('-osbOn' if boolean else '-osbOff')
arguments.append(digit)
if not os.path.isdir(constants.paths.BUILD):
os.makedirs(constants.paths.BUILD)
subprocess.call(['makeotf'] + arguments)
if self.options['postprocess_font_file'] and os.path.exists(font_path):
original = TTFont(font_path)
postprocessed = self.postprocess_font_file(original)
postprocessed.save(font_path, reorderTables=False)
print('[NOTE] `postprocess_font_file` done.')
destination = constants.paths.ADOBE_FONTS
if os.path.exists(font_path) and os.path.isdir(destination):
subprocess.call(['cp', '-f', font_path, destination])
def _finalize_options(self):
parser = argparse.ArgumentParser(
description = 'execute `AFDKOPython build.py` to run stages as specified in build.py, or append arguments to override.'
)
parser.add_argument(
'--test', action = 'store_true',
help = 'run a minimum and fast build process.',
)
parser.add_argument(
'--stages', action = 'store',
help = '"1" for "prepare_styles", "2" for "prepare_features", and "3" for "compile".',
)
parser.add_argument(
'--options', action = 'store',
help = '"0" for none, "1" for "makeinstances", "2" for "checkoutlines", and "3" for "autohint".',
)
self.args = parser.parse_args()
if self.args.stages:
stages = str(self.args.stages)
self.options['run_stage_prepare_styles'] = '1' in stages
self.options['run_stage_prepare_features'] = '2' in stages
self.options['run_stage_compile'] = '3' in stages
if self.args.options:
options = str(self.args.options)
self.options['run_makeinstances'] = '1' in options
self.options['run_checkoutlines'] = '2' in options
self.options['run_autohint'] = '3' in options
if self.args.test:
self.options['run_makeinstances'] = False
self.options['run_checkoutlines'] = False
self.options['run_autohint'] = False
self.styles_to_be_built = self.family.styles
if self.family.masters and (not self.options['run_makeinstances']):
self.styles_to_be_built = self.family.get_styles_that_are_directly_derived_from_masters()
def build(self):
self._finalize_options()
make_dir(constants.paths.TEMP)
if self.options['run_stage_prepare_styles']:
reset_dir(temp(constants.paths.MASTERS))
self._prepare(self.masters)
for master in self.family.masters:
if self.options['prepare_master']:
self.prepare_master(master)
master.update_glyph_order()
reset_dir(temp(constants.paths.STYLES))
self._prepare(self.styles)
if self.options['run_stage_prepare_features']:
reset_dir(temp(constants.paths.FEATURES))
self._prepare(self.features_classes)
self._prepare(self.features_tables)
self._prepare(self.features_languagesystems)
self._prepare(self.features_GSUB)
for style in self.styles_to_be_built:
self._prepare(self.features_GPOS, style)
self._generate_features_weight_class(style)
self._generate_features_references(style)
if self.options['run_stage_compile']:
self._prepare(self.fmndb)
self._prepare(self.goadb)
for style in self.styles_to_be_built:
self._compile(style)
if self.options['build_ttf']:
self._prepare_for_compiling_ttf()
for style in self.styles_to_be_built:
self._compile(style)
# ---
def sort_glyphs(glyph_order, glyph_names):
sorted_glyphs = (
[i for i in glyph_order if i in glyph_names] +
[i for i in glyph_names if i not in glyph_order]
)
return sorted_glyphs
def compose_glyph_class_def_lines(class_name, glyph_names):
if glyph_names:
glyph_class_def_lines = (
['@{} = ['.format(class_name)] +
[' {}'.format(glyph_name) for glyph_name in glyph_names] +
['];', '']
)
else:
glyph_class_def_lines = ['# @{} = [];'.format(class_name), '']
return glyph_class_def_lines
def glyph_filter_marks(family, glyph):
has_mark_anchor = False
for anchor in glyph.anchors:
if anchor.name:
if anchor.name.startswith('_'):
has_mark_anchor = True
break
return has_mark_anchor
# ---
def remove_files(path):
subprocess.call(['rm', '-fR', path])
def make_dir(path):
subprocess.call(['mkdir', '-p', path])
def reset_dir(path):
remove_files(path)
make_dir(path)
# ---
def overriding(abstract_path):
return abstract_path
def temp(abstract_path):
if abstract_path:
temp_path = os.path.join(constants.paths.TEMP, abstract_path)
else:
temp_path = None
return temp_path
|
from command import GlimCommand
from termcolor import colored
from utils import copytree
import os
import traceback
class NewCommand(GlimCommand):
name = 'new'
description = 'generates a new glim app'
def run(self, app):
proto_path = 'glim/proto/project'
currentpath = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
try:
copytree(proto_path, currentpath)
print colored('Created new glim app', 'green')
except Exception, e:
print colored('App already exists', 'red')
class StartCommand(GlimCommand):
name = 'start'
description = 'start the glim app web server'
def configure(self):
self.add_argument("--host", help = "enter host", default = '127.0.0.1')
self.add_argument("--port", help = "enter port", default = '8080')
def run(self, app):
print colored('Glim server started on %s environment' % self.args.env, 'green')
app.start(host = self.args.host, port = self.args.port)
refactor print into Log
from command import GlimCommand
from termcolor import colored
from utils import copytree
from glim.facades import Log
import os
import traceback
class NewCommand(GlimCommand):
name = 'new'
description = 'generates a new glim app'
def run(self, app):
proto_path = 'glim/proto/project'
currentpath = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
try:
copytree(proto_path, currentpath)
Log.info('Created new glim app')
except Exception, e:
Log.error('App already exists')
class StartCommand(GlimCommand):
name = 'start'
description = 'start the glim app web server'
def configure(self):
self.add_argument("--host", help = "enter host", default = '127.0.0.1')
self.add_argument("--port", help = "enter port", default = '8080')
def run(self, app):
Log.info('Glim server started on %s environment' % self.args.env)
app.start(host = self.args.host, port = self.args.port) |
#########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
__author__ = 'dan'
class Config(object):
def __init__(self):
self._file_server_root = None
self._file_server_base_uri = None
self._workflow_service_base_uri = None
self._file_server_blueprints_folder = None
self._file_server_uploaded_blueprints_folder = None
self._file_server_resources_uri
self._test_mode = False
@property
def file_server_root(self):
return self._file_server_root
@file_server_root.setter
def file_server_root(self, value):
self._file_server_root = value
@property
def file_server_base_uri(self):
return self._file_server_base_uri
@file_server_base_uri.setter
def file_server_base_uri(self, value):
self._file_server_base_uri = value
@property
def file_server_blueprints_folder(self):
return self._file_server_blueprints_folder
@file_server_blueprints_folder.setter
def file_server_blueprints_folder(self, value):
self._file_server_blueprints_folder = value
@property
def file_server_uploaded_blueprints_folder(self):
return self._file_server_uploaded_blueprints_folder
@file_server_uploaded_blueprints_folder.setter
def file_server_uploaded_blueprints_folder(self, value):
self._file_server_uploaded_blueprints_folder = value
@property
def file_server_resources_uri(self):
return self._file_server_resources_uri
@file_server_resources_uri.setter
def file_server_resources_uri(self, value):
self._file_server_resources_uri = value
@property
def workflow_service_base_uri(self):
return self._workflow_service_base_uri
@workflow_service_base_uri.setter
def workflow_service_base_uri(self, value):
self._workflow_service_base_uri = value
@property
def test_mode(self):
return self._test_mode
@test_mode.setter
def test_mode(self, value):
self._test_mode = value
_instance = Config()
def reset(configuration=None):
global _instance
if configuration is not None:
_instance = configuration
else:
_instance = Config()
def instance():
return _instance
CFY-196 fixed init
#########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
__author__ = 'dan'
class Config(object):
def __init__(self):
self._file_server_root = None
self._file_server_base_uri = None
self._workflow_service_base_uri = None
self._file_server_blueprints_folder = None
self._file_server_uploaded_blueprints_folder = None
self._file_server_resources_uri = None
self._test_mode = False
@property
def file_server_root(self):
return self._file_server_root
@file_server_root.setter
def file_server_root(self, value):
self._file_server_root = value
@property
def file_server_base_uri(self):
return self._file_server_base_uri
@file_server_base_uri.setter
def file_server_base_uri(self, value):
self._file_server_base_uri = value
@property
def file_server_blueprints_folder(self):
return self._file_server_blueprints_folder
@file_server_blueprints_folder.setter
def file_server_blueprints_folder(self, value):
self._file_server_blueprints_folder = value
@property
def file_server_uploaded_blueprints_folder(self):
return self._file_server_uploaded_blueprints_folder
@file_server_uploaded_blueprints_folder.setter
def file_server_uploaded_blueprints_folder(self, value):
self._file_server_uploaded_blueprints_folder = value
@property
def file_server_resources_uri(self):
return self._file_server_resources_uri
@file_server_resources_uri.setter
def file_server_resources_uri(self, value):
self._file_server_resources_uri = value
@property
def workflow_service_base_uri(self):
return self._workflow_service_base_uri
@workflow_service_base_uri.setter
def workflow_service_base_uri(self, value):
self._workflow_service_base_uri = value
@property
def test_mode(self):
return self._test_mode
@test_mode.setter
def test_mode(self, value):
self._test_mode = value
_instance = Config()
def reset(configuration=None):
global _instance
if configuration is not None:
_instance = configuration
else:
_instance = Config()
def instance():
return _instance
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import argparse
import multiprocessing as mp
import neblio_ci_libs as nci
nci.setup_travis_or_gh_actions_env_vars()
working_dir = os.getcwd()
build_dir = "build"
deploy_dir = os.path.join(os.environ['BUILD_DIR'],'deploy', '')
parser = argparse.ArgumentParser()
parser.add_argument('--test', '-t', help='Only build and run tests', action='store_true')
args = parser.parse_args()
nci.mkdir_p(deploy_dir)
nci.mkdir_p(build_dir)
os.chdir(build_dir)
# do not auto update homebrew as it is very slow
os.environ['HOMEBREW_NO_AUTO_UPDATE'] = '1'
# remove existing deps that come pre installed
nci.call_with_err_code('brew uninstall --ignore-dependencies ccache || true')
nci.call_with_err_code('brew uninstall --ignore-dependencies qt || true')
nci.call_with_err_code('brew uninstall --ignore-dependencies berkeley-db@4 || true')
nci.call_with_err_code('brew uninstall --ignore-dependencies boost || true')
nci.call_with_err_code('brew uninstall --ignore-dependencies miniupnpc || true')
nci.call_with_err_code('brew uninstall --ignore-dependencies curl || true')
nci.call_with_err_code('brew uninstall --ignore-dependencies openssl || true')
nci.call_with_err_code('brew uninstall --ignore-dependencies openssl@1.1 || true')
nci.call_with_err_code('brew uninstall --ignore-dependencies qrencode || true')
nci.call_with_err_code('brew uninstall --ignore-dependencies libsodium || true')
# Install High Seirra Versions of Depeendencies, due to that being the minimum version we support
#ccache https://bintray.com/homebrew/bottles/download_file?file_path=ccache-3.7.6.high_sierra.bottle.tar.gz
nci.call_retry_on_fail('brew install --force https://assets.nebl.io/dependencies/macos/ccache-3.7.6.high_sierra.bottle.tar.gz')
#qt https://bintray.com/homebrew/bottles/download_file?file_path=qt-5.13.2.high_sierra.bottle.tar.gz
nci.call_retry_on_fail('brew install --force https://assets.nebl.io/dependencies/macos/qt-5.13.2.high_sierra.bottle.tar.gz')
#berkeley-db@4 https://bintray.com/homebrew/bottles/download_file?file_path=berkeley-db%404-4.8.30.high_sierra.bottle.1.tar.gz
nci.call_retry_on_fail('brew install --force https://assets.nebl.io/dependencies/macos/berkeley-db%404-4.8.30.high_sierra.bottle.1.tar.gz')
#boost https://homebrew.bintray.com/bottles/boost-1.72.0_3.high_sierra.bottle.tar.gz
nci.call_retry_on_fail('brew install --force https://assets.nebl.io/dependencies/macos/boost-1.72.0_3.high_sierra.bottle.tar.gz')
#miniupnpc https://bintray.com/homebrew/bottles/download_file?file_path=miniupnpc-2.1.high_sierra.bottle.tar.gz
nci.call_retry_on_fail('brew install --force https://assets.nebl.io/dependencies/macos/miniupnpc-2.1.high_sierra.bottle.tar.gz')
#curl https://bintray.com/homebrew/bottles/download_file?file_path=curl-7.67.0.high_sierra.bottle.tar.gz
nci.call_retry_on_fail('brew install --force https://assets.nebl.io/dependencies/macos/curl-7.67.0.high_sierra.bottle.tar.gz')
#openssl https://bintray.com/homebrew/bottles/download_file?file_path=openssl%401.1-1.1.1d.high_sierra.bottle.tar.gz
nci.call_retry_on_fail('brew install --force https://assets.nebl.io/dependencies/macos/openssl%401.1-1.1.1d.high_sierra.bottle.tar.gz')
#qrencode https://bintray.com/homebrew/bottles/download_file?file_path=qrencode-4.0.2.high_sierra.bottle.tar.gz
nci.call_retry_on_fail('brew install --force https://assets.nebl.io/dependencies/macos/qrencode-4.0.2.high_sierra.bottle.tar.gz')
#libsodium https://bintray.com/homebrew/bottles/download_file?file_path=libsodium-1.0.18_1.high_sierra.bottle.tar.gz
nci.call_retry_on_fail('brew install --force https://assets.nebl.io/dependencies/macos/libsodium-1.0.18_1.high_sierra.bottle.tar.gz')
# force relinking
nci.call_with_err_code('brew unlink qt && brew link --force --overwrite qt')
nci.call_with_err_code('brew unlink berkeley-db@4 && brew link --force --overwrite berkeley-db@4')
nci.call_with_err_code('brew unlink boost && brew link --force --overwrite boost')
nci.call_with_err_code('brew unlink miniupnpc && brew link --force --overwrite miniupnpc')
nci.call_with_err_code('brew unlink curl && brew link --force --overwrite curl')
nci.call_with_err_code('brew unlink python && brew link --force --overwrite python')
nci.call_with_err_code('brew unlink openssl@1.1 && brew link --force --overwrite openssl@1.1')
nci.call_with_err_code('brew unlink qrencode && brew link --force --overwrite qrencode')
nci.call_with_err_code('brew unlink libsodium && brew link --force --overwrite libsodium')
nci.call_with_err_code('ccache -s')
# prepend ccache to the path, necessary since prior steps prepend things to the path
os.environ['PATH'] = '/usr/local/opt/ccache/libexec:' + os.environ['PATH']
if (args.test):
nci.call_with_err_code('qmake "QMAKE_CXX=ccache clang++" "USE_UPNP=1" "USE_QRCODE=1" "RELEASE=1" "DEFINES += UNITTEST_RUN_NTP_PARSE_TESTS" "DEFINES += UNITTEST_FORCE_DISABLE_PREMADE_DATA_DOWNLOAD" "NEBLIO_CONFIG += NoWallet" ../neblio-wallet.pro')
nci.call_with_err_code("make -j" + str(mp.cpu_count()))
# download test data
nci.call_with_err_code('wget --no-check-certificate --progress=dot:giga https://assets.nebl.io/testdata/test_data_mainnet_tab.tar.xz -O ../wallet/test/data/test_data_mainnet_tab.tar.xz')
nci.call_with_err_code('wget --no-check-certificate --progress=dot:giga https://assets.nebl.io/testdata/test_data_testnet_tab.tar.xz -O ../wallet/test/data/test_data_testnet_tab.tar.xz')
nci.call_with_err_code('tar -xJvf ../wallet/test/data/test_data_mainnet_tab.tar.xz -C ../wallet/test/data')
nci.call_with_err_code('tar -xJvf ../wallet/test/data/test_data_testnet_tab.tar.xz -C ../wallet/test/data')
nci.call_with_err_code('rm ../wallet/test/data/*.tar.xz')
# run tests
nci.call_with_err_code("./wallet/test/neblio-Qt.app/Contents/MacOS/neblio-Qt")
else:
nci.call_with_err_code('qmake "QMAKE_CXX=ccache clang++" "USE_UPNP=1" "USE_QRCODE=1" "RELEASE=1" ../neblio-wallet.pro')
nci.call_with_err_code("make -j" + str(mp.cpu_count()))
# build our .dmg
nci.call_with_err_code('npm install -g appdmg')
os.chdir("wallet")
nci.call_with_err_code('../../contrib/macdeploy/macdeployqtplus ./neblio-Qt.app -add-qt-tr da,de,es,hu,ru,uk,zh_CN,zh_TW -verbose 1 -rpath /usr/local/opt/qt/lib')
nci.call_with_err_code('appdmg ../../contrib/macdeploy/appdmg.json ./neblio-Qt.dmg')
file_name = '$(date +%Y-%m-%d)---' + os.environ['BRANCH'] + '-' + os.environ['COMMIT'][:7] + '---neblio-Qt---macOS.zip'
nci.call_with_err_code('zip -j ' + file_name + ' ./neblio-Qt.dmg')
nci.call_with_err_code('mv ' + file_name + ' ' + deploy_dir)
nci.call_with_err_code('echo "Binary package at ' + deploy_dir + file_name + '"')
# set the SOURCE_DIR & SOURCE_PATH env vars, these point to the binary that will be uploaded
nci.call_with_err_code('echo "SOURCE_DIR=' + deploy_dir + '" >> $GITHUB_ENV')
nci.call_with_err_code('echo "SOURCE_PATH=' + deploy_dir + file_name + '" >> $GITHUB_ENV')
nci.call_with_err_code('ccache -s')
print("")
print("")
print("Building finished successfully.")
print("")
Update test_osx-gui_wallet.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import argparse
import multiprocessing as mp
import neblio_ci_libs as nci
nci.setup_travis_or_gh_actions_env_vars()
working_dir = os.getcwd()
build_dir = "build"
deploy_dir = os.path.join(os.environ['BUILD_DIR'],'deploy', '')
parser = argparse.ArgumentParser()
parser.add_argument('--test', '-t', help='Only build and run tests', action='store_true')
args = parser.parse_args()
nci.mkdir_p(deploy_dir)
nci.mkdir_p(build_dir)
os.chdir(build_dir)
# do not auto update homebrew as it is very slow
os.environ['HOMEBREW_NO_AUTO_UPDATE'] = '1'
# remove existing deps that come pre installed
nci.call_with_err_code('brew uninstall --ignore-dependencies ccache || true')
nci.call_with_err_code('brew uninstall --ignore-dependencies qt || true')
nci.call_with_err_code('brew uninstall --ignore-dependencies berkeley-db@4 || true')
nci.call_with_err_code('brew uninstall --ignore-dependencies boost || true')
nci.call_with_err_code('brew uninstall --ignore-dependencies miniupnpc || true')
nci.call_with_err_code('brew uninstall --ignore-dependencies curl || true')
nci.call_with_err_code('brew uninstall --ignore-dependencies openssl || true')
nci.call_with_err_code('brew uninstall --ignore-dependencies openssl@1.1 || true')
nci.call_with_err_code('brew uninstall --ignore-dependencies qrencode || true')
nci.call_with_err_code('brew uninstall --ignore-dependencies libsodium || true')
nci.call_with_err_code('brew uninstall --ignore-dependencies brotli || true')
# Install High Seirra Versions of Depeendencies, due to that being the minimum version we support
#ccache https://bintray.com/homebrew/bottles/download_file?file_path=ccache-3.7.6.high_sierra.bottle.tar.gz
nci.call_retry_on_fail('brew install --force https://assets.nebl.io/dependencies/macos/ccache-3.7.6.high_sierra.bottle.tar.gz')
#qt https://bintray.com/homebrew/bottles/download_file?file_path=qt-5.13.2.high_sierra.bottle.tar.gz
nci.call_retry_on_fail('brew install --force https://assets.nebl.io/dependencies/macos/qt-5.13.2.high_sierra.bottle.tar.gz')
#berkeley-db@4 https://bintray.com/homebrew/bottles/download_file?file_path=berkeley-db%404-4.8.30.high_sierra.bottle.1.tar.gz
nci.call_retry_on_fail('brew install --force https://assets.nebl.io/dependencies/macos/berkeley-db%404-4.8.30.high_sierra.bottle.1.tar.gz')
#boost https://homebrew.bintray.com/bottles/boost-1.72.0_3.high_sierra.bottle.tar.gz
nci.call_retry_on_fail('brew install --force https://assets.nebl.io/dependencies/macos/boost-1.72.0_3.high_sierra.bottle.tar.gz')
#miniupnpc https://bintray.com/homebrew/bottles/download_file?file_path=miniupnpc-2.1.high_sierra.bottle.tar.gz
nci.call_retry_on_fail('brew install --force https://assets.nebl.io/dependencies/macos/miniupnpc-2.1.high_sierra.bottle.tar.gz')
#curl https://bintray.com/homebrew/bottles/download_file?file_path=curl-7.67.0.high_sierra.bottle.tar.gz
nci.call_retry_on_fail('brew install --force https://assets.nebl.io/dependencies/macos/curl-7.67.0.high_sierra.bottle.tar.gz')
#openssl https://bintray.com/homebrew/bottles/download_file?file_path=openssl%401.1-1.1.1d.high_sierra.bottle.tar.gz
nci.call_retry_on_fail('brew install --force https://assets.nebl.io/dependencies/macos/openssl%401.1-1.1.1d.high_sierra.bottle.tar.gz')
#qrencode https://bintray.com/homebrew/bottles/download_file?file_path=qrencode-4.0.2.high_sierra.bottle.tar.gz
nci.call_retry_on_fail('brew install --force https://assets.nebl.io/dependencies/macos/qrencode-4.0.2.high_sierra.bottle.tar.gz')
#libsodium https://bintray.com/homebrew/bottles/download_file?file_path=libsodium-1.0.18_1.high_sierra.bottle.tar.gz
nci.call_retry_on_fail('brew install --force https://assets.nebl.io/dependencies/macos/libsodium-1.0.18_1.high_sierra.bottle.tar.gz')
nci.call_retry_on_fail('brew install --force brotli')
# force relinking
nci.call_with_err_code('brew unlink qt && brew link --force --overwrite qt')
nci.call_with_err_code('brew unlink berkeley-db@4 && brew link --force --overwrite berkeley-db@4')
nci.call_with_err_code('brew unlink boost && brew link --force --overwrite boost')
nci.call_with_err_code('brew unlink miniupnpc && brew link --force --overwrite miniupnpc')
nci.call_with_err_code('brew unlink curl && brew link --force --overwrite curl')
nci.call_with_err_code('brew unlink python && brew link --force --overwrite python')
nci.call_with_err_code('brew unlink openssl@1.1 && brew link --force --overwrite openssl@1.1')
nci.call_with_err_code('brew unlink qrencode && brew link --force --overwrite qrencode')
nci.call_with_err_code('brew unlink libsodium && brew link --force --overwrite libsodium')
nci.call_with_err_code('brew unlink brotli && brew link --force --overwrite brotli')
nci.call_with_err_code('ccache -s')
# prepend ccache to the path, necessary since prior steps prepend things to the path
os.environ['PATH'] = '/usr/local/opt/ccache/libexec:' + os.environ['PATH']
if (args.test):
nci.call_with_err_code('qmake "QMAKE_CXX=ccache clang++" "USE_UPNP=1" "USE_QRCODE=1" "RELEASE=1" "DEFINES += UNITTEST_RUN_NTP_PARSE_TESTS" "DEFINES += UNITTEST_FORCE_DISABLE_PREMADE_DATA_DOWNLOAD" "NEBLIO_CONFIG += NoWallet" ../neblio-wallet.pro')
nci.call_with_err_code("make -j" + str(mp.cpu_count()))
# download test data
nci.call_with_err_code('wget --no-check-certificate --progress=dot:giga https://assets.nebl.io/testdata/test_data_mainnet_tab.tar.xz -O ../wallet/test/data/test_data_mainnet_tab.tar.xz')
nci.call_with_err_code('wget --no-check-certificate --progress=dot:giga https://assets.nebl.io/testdata/test_data_testnet_tab.tar.xz -O ../wallet/test/data/test_data_testnet_tab.tar.xz')
nci.call_with_err_code('tar -xJvf ../wallet/test/data/test_data_mainnet_tab.tar.xz -C ../wallet/test/data')
nci.call_with_err_code('tar -xJvf ../wallet/test/data/test_data_testnet_tab.tar.xz -C ../wallet/test/data')
nci.call_with_err_code('rm ../wallet/test/data/*.tar.xz')
# run tests
nci.call_with_err_code("./wallet/test/neblio-Qt.app/Contents/MacOS/neblio-Qt")
else:
nci.call_with_err_code('qmake "QMAKE_CXX=ccache clang++" "USE_UPNP=1" "USE_QRCODE=1" "RELEASE=1" ../neblio-wallet.pro')
nci.call_with_err_code("make -j" + str(mp.cpu_count()))
# build our .dmg
nci.call_with_err_code('npm install -g appdmg')
os.chdir("wallet")
nci.call_with_err_code('../../contrib/macdeploy/macdeployqtplus ./neblio-Qt.app -add-qt-tr da,de,es,hu,ru,uk,zh_CN,zh_TW -verbose 1 -rpath /usr/local/opt/qt/lib')
nci.call_with_err_code('appdmg ../../contrib/macdeploy/appdmg.json ./neblio-Qt.dmg')
file_name = '$(date +%Y-%m-%d)---' + os.environ['BRANCH'] + '-' + os.environ['COMMIT'][:7] + '---neblio-Qt---macOS.zip'
nci.call_with_err_code('zip -j ' + file_name + ' ./neblio-Qt.dmg')
nci.call_with_err_code('mv ' + file_name + ' ' + deploy_dir)
nci.call_with_err_code('echo "Binary package at ' + deploy_dir + file_name + '"')
# set the SOURCE_DIR & SOURCE_PATH env vars, these point to the binary that will be uploaded
nci.call_with_err_code('echo "SOURCE_DIR=' + deploy_dir + '" >> $GITHUB_ENV')
nci.call_with_err_code('echo "SOURCE_PATH=' + deploy_dir + file_name + '" >> $GITHUB_ENV')
nci.call_with_err_code('ccache -s')
print("")
print("")
print("Building finished successfully.")
print("")
|
# stdlib
from getpass import getpass
import json
import logging
import sys
import time
from typing import Dict
from typing import Optional
from typing import Type
from typing import Union
# third party
from nacl.encoding import HexEncoder
from nacl.signing import SigningKey
import requests
# syft absolute
import syft as sy
# relative
from .. import GridURL
from ...core.io.connection import ClientConnection
from ...core.io.route import SoloRoute
from ...core.node.common.client import Client
from ...core.node.domain_client import DomainClient
from ...core.node.network_client import NetworkClient
from ...util import bcolors
from ...util import verify_tls
from .grid_connection import GridHTTPConnection
DEFAULT_PYGRID_PORT = 80
DEFAULT_PYGRID_ADDRESS = f"http://127.0.0.1:{DEFAULT_PYGRID_PORT}"
def connect(
url: Union[str, GridURL] = DEFAULT_PYGRID_ADDRESS,
conn_type: Type[ClientConnection] = GridHTTPConnection,
credentials: Optional[Dict] = None,
user_key: Optional[SigningKey] = None,
timeout: Optional[float] = None,
) -> Client:
# Use Server metadata
# to build client route
credentials = credentials if credentials else {}
conn = conn_type(url=GridURL.from_url(url)) # type: ignore
# get metadata and check for https redirect so that login is sent over TLS
metadata = conn._get_metadata(timeout=timeout) # type: ignore
credentials = credentials if credentials is not None else {}
if credentials:
metadata, _user_key = conn.login(credentials=credentials) # type: ignore
_user_key = SigningKey(_user_key.encode(), encoder=HexEncoder)
else:
if not user_key:
_user_key = SigningKey.generate()
else:
_user_key = user_key
# Check node client type based on metadata response
client_type: Union[Type[DomainClient], Type[NetworkClient]]
if metadata.node_type == "Domain":
client_type = DomainClient
else:
client_type = NetworkClient
(
spec_location,
name,
client_id,
) = client_type.deserialize_client_metadata_from_node(metadata=metadata)
# Create a new Solo Route using the selected connection type
route = SoloRoute(destination=spec_location, connection=conn)
kwargs = {
"name": name,
"routes": [route],
"signing_key": _user_key,
"version": metadata.version,
}
if client_type is NetworkClient:
kwargs["network"] = spec_location
elif client_type is DomainClient:
kwargs["domain"] = spec_location
else:
raise NotImplementedError
# Create a new client using the selected client type
node = client_type(**kwargs)
return node
def login(
url: Optional[Union[str, GridURL]] = None,
port: Optional[int] = None,
email: Optional[str] = None,
password: Optional[str] = None,
conn_type: Type[ClientConnection] = GridHTTPConnection,
verbose: Optional[bool] = True,
timeout: Optional[float] = None,
retry: Optional[int] = None,
) -> Optional[Client]:
retry = 5 if retry is None else retry # Default to 5 retries
timeout = 10 if timeout is None else timeout # Default to 10 seconds
if password == "changethis": # nosec
if email == "info@openmined.org":
print(
f"{bcolors.YELLOW}WARNING:{bcolors.ENDC} CHANGE YOUR USERNAME AND PASSWORD!!! \n\nAnyone can login as an admin to your node"
+ " right now because your password is still the default PySyft username and password!!!\n"
)
else:
print(
f"{bcolors.YELLOW}WARNING:{bcolors.ENDC} CHANGE YOUR PASSWORD!!! \n\nAnyone can login into your account"
+ " right now because your password is the default PySyft password!!!\n"
)
# TRASK: please keep this so that people will stop putting their passwords in notebooks.
if password == "secret": # nosec
print("Welcome " + str(email) + "!")
password = getpass(prompt="Please enter you password:")
if port is None and not url: # if url is used, we can ignore port
port = int(input("Please specify the port of the domain you're logging into:"))
# TODO: build multiple route objects and let the Client decide which one to use
if isinstance(url, GridURL):
grid_url = url
elif url is None:
grid_url = GridURL(host_or_ip="docker-host", port=port, path="/api/v1/status")
try:
requests.get(str(grid_url), verify=verify_tls())
except Exception:
grid_url.host_or_ip = "localhost"
else:
grid_url = GridURL(host_or_ip=url, port=port)
grid_url = grid_url.with_path("/api/v1")
if verbose:
sys.stdout.write("\rConnecting to " + str(grid_url.host_or_ip) + "...")
if email is None or password is None:
credentials = {}
logging.info(
"\n\nNo email and password defined in login() - connecting as anonymous user!!!\n"
)
else:
credentials = {"email": email, "password": password}
# connecting to domain
node = None
timeout_btw_retries = timeout
retry_attempt = 1
while node is None and retry_attempt <= retry:
try:
node = connect(
url=grid_url,
credentials=credentials,
conn_type=conn_type,
timeout=timeout,
)
except requests.ConnectTimeout as e:
print(
f"""\n{bcolors.BOLD}{bcolors.RED}ConnectTimeout:{bcolors.ENDC}
Connection to node with url: {grid_url.host_or_ip} timed out after {timeout} seconds.\t
Please try the following options:\t
- Please try increasing the timeout by passing it as an agrument to the login method.
`sy.login(email="", password="", url="", timeout="")`
- The domain/network node you're trying to connect could be offline at the current moment. Please try again later.\t"""
)
return
except requests.ConnectionError as e:
if retry_attempt <= retry:
print(
f"\r{bcolors.BOLD}ConnectionError{bcolors.ENDC}: Retrying again.... Attempt: {retry_attempt}",
end="\r",
)
time.sleep(timeout_btw_retries)
else:
raise e
retry_attempt += 1
if node is None:
print(
f"""\n{bcolors.BOLD}{bcolors.RED}ConnectionError:{bcolors.ENDC}
Oops !!! We can't seem to connect to the node: '{grid_url.host_or_ip}:{grid_url.port}'\t
Please try the following options:\t
- Are you sure the server at '{grid_url.host_or_ip}:{grid_url.port}' is running? Please check the `url`/`port` you entered are correct.\t
- Are you sure you can connect to the server at '{grid_url.host_or_ip}:{grid_url.port}'? Perhaps there's a firewall between you and the server?\t
- The domain/network node you're trying to connect could be offline at the current moment. Please try again later.\t"""
)
return
if verbose:
# bit of fanciness
sys.stdout.write("\rConnecting to " + str(grid_url.host_or_ip) + "...")
sys.stdout.write(" done! \t Logging into")
sys.stdout.write(" " + str(node.name) + "... ")
if email is None or password is None:
sys.stdout.write("as GUEST...")
time.sleep(1) # ok maybe too fancy... but c'mon don't you want to be fancy?
print("done!")
else:
print("Logging into", str(node.name), "... done!")
if sy.__version__ != node.version:
print(
"\n**Warning**: The syft version on your system and the node are different."
)
print(
f"Version on your system: {sy.__version__}\nVersion on the node: {node.version}"
)
print()
return node
def register(
name: Optional[str] = None,
email: Optional[str] = None,
password: Optional[str] = None,
url: Optional[str] = None,
port: Optional[int] = None,
verbose: Optional[bool] = True,
) -> Client:
if name is None:
name = input("Please enter your name:")
if email is None:
email = input("Please enter your email:")
if password is None:
password = getpass("Please enter your password")
if url is None:
url = input("Please enter URL of domain (ex: 'localhost'):")
if port is None:
port = int(input("Please enter the port your domain is running on:"))
grid_url = GridURL(host_or_ip=url, port=port)
register_url = grid_url.url + "/api/v1/register"
myobj = {"name": name, "email": email, "password": password}
response = requests.post(register_url, data=json.dumps(myobj))
if "error" not in json.loads(response.text):
if verbose:
print("Successfully registered! Logging in...")
return login(
url=grid_url, port=port, email=email, password=password, verbose=verbose
)
raise Exception(response.text)
improve messaging for ReadTimeout Error on sy.login
fix linting
# stdlib
from getpass import getpass
import json
import logging
import sys
import time
from typing import Dict
from typing import Optional
from typing import Type
from typing import Union
# third party
from nacl.encoding import HexEncoder
from nacl.signing import SigningKey
import requests
# syft absolute
import syft as sy
# relative
from .. import GridURL
from ...core.io.connection import ClientConnection
from ...core.io.route import SoloRoute
from ...core.node.common.client import Client
from ...core.node.domain_client import DomainClient
from ...core.node.network_client import NetworkClient
from ...util import bcolors
from ...util import verify_tls
from .grid_connection import GridHTTPConnection
DEFAULT_PYGRID_PORT = 80
DEFAULT_PYGRID_ADDRESS = f"http://127.0.0.1:{DEFAULT_PYGRID_PORT}"
def connect(
url: Union[str, GridURL] = DEFAULT_PYGRID_ADDRESS,
conn_type: Type[ClientConnection] = GridHTTPConnection,
credentials: Optional[Dict] = None,
user_key: Optional[SigningKey] = None,
timeout: Optional[float] = None,
) -> Client:
# Use Server metadata
# to build client route
credentials = credentials if credentials else {}
conn = conn_type(url=GridURL.from_url(url)) # type: ignore
# get metadata and check for https redirect so that login is sent over TLS
metadata = conn._get_metadata(timeout=timeout) # type: ignore
credentials = credentials if credentials is not None else {}
if credentials:
metadata, _user_key = conn.login(credentials=credentials) # type: ignore
_user_key = SigningKey(_user_key.encode(), encoder=HexEncoder)
else:
if not user_key:
_user_key = SigningKey.generate()
else:
_user_key = user_key
# Check node client type based on metadata response
client_type: Union[Type[DomainClient], Type[NetworkClient]]
if metadata.node_type == "Domain":
client_type = DomainClient
else:
client_type = NetworkClient
(
spec_location,
name,
client_id,
) = client_type.deserialize_client_metadata_from_node(metadata=metadata)
# Create a new Solo Route using the selected connection type
route = SoloRoute(destination=spec_location, connection=conn)
kwargs = {
"name": name,
"routes": [route],
"signing_key": _user_key,
"version": metadata.version,
}
if client_type is NetworkClient:
kwargs["network"] = spec_location
elif client_type is DomainClient:
kwargs["domain"] = spec_location
else:
raise NotImplementedError
# Create a new client using the selected client type
node = client_type(**kwargs)
return node
def login(
url: Optional[Union[str, GridURL]] = None,
port: Optional[int] = None,
email: Optional[str] = None,
password: Optional[str] = None,
conn_type: Type[ClientConnection] = GridHTTPConnection,
verbose: Optional[bool] = True,
timeout: Optional[float] = None,
retry: Optional[int] = None,
) -> Client:
retry = 5 if retry is None else retry # Default to 5 retries
timeout = 10 if timeout is None else timeout # Default to 10 seconds
if password == "changethis": # nosec
if email == "info@openmined.org":
print(
f"{bcolors.YELLOW}WARNING:{bcolors.ENDC} CHANGE YOUR USERNAME AND PASSWORD!!! \n\n"
+ "Anyone can login as an admin to your node"
+ " right now because your password is still the default PySyft username and password!!!\n"
)
else:
print(
f"{bcolors.YELLOW}WARNING:{bcolors.ENDC} CHANGE YOUR PASSWORD!!! \n\n"
+ "Anyone can login into your account"
+ " right now because your password is the default PySyft password!!!\n"
)
# TRASK: please keep this so that people will stop putting their passwords in notebooks.
if password == "secret": # nosec
print("Welcome " + str(email) + "!")
password = getpass(prompt="Please enter you password:")
if port is None and not url: # if url is used, we can ignore port
port = int(input("Please specify the port of the domain you're logging into:"))
# TODO: build multiple route objects and let the Client decide which one to use
if isinstance(url, GridURL):
grid_url = url
elif url is None:
grid_url = GridURL(host_or_ip="docker-host", port=port, path="/api/v1/status")
try:
requests.get(str(grid_url), verify=verify_tls())
except Exception:
grid_url.host_or_ip = "localhost"
else:
grid_url = GridURL(host_or_ip=url, port=port)
grid_url = grid_url.with_path("/api/v1")
if verbose:
sys.stdout.write("\rConnecting to " + str(grid_url.host_or_ip) + "...")
if email is None or password is None:
credentials = {}
logging.info(
"\n\nNo email and password defined in login() - connecting as anonymous user!!!\n"
)
else:
credentials = {"email": email, "password": password}
# connecting to domain
node = None
timeout_btw_retries = timeout
retry_attempt = 1
while node is None and retry_attempt <= retry:
try:
node = connect(
url=grid_url,
credentials=credentials,
conn_type=conn_type,
timeout=timeout,
)
except requests.ReadTimeout:
print(
f"\n{bcolors.BOLD}{bcolors.RED}ReadTimeout:{bcolors.ENDC}\n"
f"\tConnection to node with url: {grid_url.host_or_ip}:{grid_url.port} "
f"timed out after {timeout} seconds.\n"
"\tPlease try the following options:\n"
"\t- Please try increasing the timeout by passing it as an argument to the login method.\n"
"\te.g. `sy.login(email='my@email.com', password='password', url='localhost', timeout=30)`\n"
"\t- The domain/network node you're trying to connect could be offline "
"at the current moment. Please try again later.\t"
)
return # type: ignore
except requests.ConnectionError as e:
if retry_attempt <= retry:
print(
f"\r{bcolors.BOLD}ConnectionError{bcolors.ENDC}: Retrying again.... Attempt: {retry_attempt}",
end="\r",
)
time.sleep(timeout_btw_retries)
else:
raise e
retry_attempt += 1
if node is None:
print(
f"\n{bcolors.BOLD}{bcolors.RED}ConnectionError:{bcolors.ENDC}\n"
f"\tOops !!! We can't seem to connect to the node: '{grid_url.host_or_ip}:{grid_url.port}'\n"
"\tPlease try the following options:\n"
f"\t- Are you sure the server at '{grid_url.host_or_ip}:{grid_url.port}' is running? "
"Please check the `url`/`port` you entered are correct.\n"
f"\t- Are you sure you can connect to the server at '{grid_url.host_or_ip}:{grid_url.port}'? "
"Perhaps there's a firewall between you and the server?\n"
"\t- The domain/network node you're trying to connect could be offline "
"at the current moment. Please try again later.\n"
)
return # type: ignore
if verbose:
# bit of fanciness
sys.stdout.write("\rConnecting to " + str(grid_url.host_or_ip) + "...")
sys.stdout.write(" done! \t Logging into")
sys.stdout.write(" " + str(node.name) + "... ")
if email is None or password is None:
sys.stdout.write("as GUEST...")
time.sleep(1) # ok maybe too fancy... but c'mon don't you want to be fancy?
print("done!")
else:
print("Logging into", str(node.name), "... done!")
if sy.__version__ != node.version:
print(
"\n**Warning**: The syft version on your system and the node are different."
)
print(
f"Version on your system: {sy.__version__}\nVersion on the node: {node.version}"
)
print()
return node
def register(
name: Optional[str] = None,
email: Optional[str] = None,
password: Optional[str] = None,
url: Optional[str] = None,
port: Optional[int] = None,
verbose: Optional[bool] = True,
) -> Client:
if name is None:
name = input("Please enter your name:")
if email is None:
email = input("Please enter your email:")
if password is None:
password = getpass("Please enter your password")
if url is None:
url = input("Please enter URL of domain (ex: 'localhost'):")
if port is None:
port = int(input("Please enter the port your domain is running on:"))
grid_url = GridURL(host_or_ip=url, port=port)
register_url = grid_url.url + "/api/v1/register"
myobj = {"name": name, "email": email, "password": password}
response = requests.post(register_url, data=json.dumps(myobj))
if "error" not in json.loads(response.text):
if verbose:
print("Successfully registered! Logging in...")
return login(
url=grid_url, port=port, email=email, password=password, verbose=verbose
)
raise Exception(response.text)
|
"""
github3.repos
=============
This module contains the classes relating to repositories.
"""
from base64 import b64decode
from json import dumps
from requests import post
from github3.events import Event
from github3.issues import Issue, IssueEvent, Label, Milestone, issue_params
from github3.git import Blob, Commit, Reference, Tag, Tree
from github3.models import GitHubObject, GitHubCore, BaseComment, BaseCommit
from github3.pulls import PullRequest
from github3.users import User, Key
from github3.decorators import requires_auth
from github3.notifications import Subscription, Thread
class Repository(GitHubCore):
"""The :class:`Repository <Repository>` object. It represents how GitHub
sends information about repositories.
"""
def __init__(self, repo, session=None):
super(Repository, self).__init__(repo, session)
#: URL used to clone via HTTPS.
self.clone_url = repo.get('clone_url')
#: ``datetime`` object representing when the Repository was created.
self.created_at = self._strptime(repo.get('created_at'))
#: Description of the repository.
self.description = repo.get('description')
# The number of forks
#: The number of forks made of this repository.
self.forks = repo.get('forks')
# Is this repository a fork?
self._is_fork = repo.get('fork')
# Clone url using git, e.g. git://github.com/sigmavirus24/github3.py
#: Plain git url for an anonymous clone.
self.git_url = repo.get('git_url')
self._has_dl = repo.get('has_downloads')
self._has_issues = repo.get('has_issues')
self._has_wiki = repo.get('has_wiki')
# e.g. https://sigmavirus24.github.com/github3.py
#: URL of the home page for the project.
self.homepage = repo.get('homepage')
# e.g. https://github.com/sigmavirus24/github3.py
#: URL of the project at GitHub.
self.html_url = repo.get('html_url')
#: Unique id of the repository.
self.id = repo.get('id')
#: Language property.
self.language = repo.get('language')
#: Mirror property.
self.mirror_url = repo.get('mirror_url')
# Repository name, e.g. github3.py
#: Name of the repository.
self.name = repo.get('name')
# Number of open issues
#: Number of open issues on the repository.
self.open_issues = repo.get('open_issues')
# Repository owner's name
#: :class:`User <github3.users.User>` object representing the
# repository owner.
self.owner = User(repo.get('owner'), self._session)
# Is this repository private?
self._priv = repo.get('private')
#: ``datetime`` object representing the last time commits were pushed
# to the repository.
self.pushed_at = self._strptime(repo.get('pushed_at'))
#: Size of the repository.
self.size = repo.get('size')
# SSH url e.g. git@github.com/sigmavirus24/github3.py
#: URL to clone the repository via SSH.
self.ssh_url = repo.get('ssh_url')
#: If it exists, url to clone the repository via SVN.
self.svn_url = repo.get('svn_url')
#: ``datetime`` object representing the last time the repository was
# updated.
self.updated_at = self._strptime(repo.get('updated_at'))
self._api = repo.get('url', '')
# The number of watchers
#: Number of users watching the repository.
self.watchers = repo.get('watchers')
#: Parent of this fork, if it exists :class;`Repository`
self.source = repo.get('source', None)
if self.source:
self.source = Repository(self.source, self)
#: Parent of this fork, if it exists :class:`Repository`
self.parent = repo.get('parent', None)
if self.parent:
self.parent = Repository(self.parent, self)
#: default branch for the repository
self.master_branch = repo.get('master_branch', '')
def __repr__(self):
return '<Repository [{0}/{1}]>'.format(self.owner.login, self.name)
def _update_(self, repo):
self.__init__(repo, self._session)
def _create_pull(self, data):
json = None
if data:
url = self._build_url('pulls', base_url=self._api)
json = self._json(self._post(url, data), 201)
return PullRequest(json, self._session) if json else None
@requires_auth
def add_collaborator(self, login):
"""Add ``login`` as a collaborator to a repository.
:param login: (required), login of the user
:type login: str
:returns: bool -- True if successful, False otherwise
"""
resp = False
if login:
url = self._build_url('collaborators', login, base_url=self._api)
resp = self._boolean(self._put(url), 204, 404)
return resp
def archive(self, format, path='', ref='master'):
"""Get the tarball or zipball archive for this repo at ref.
:param format: (required), accepted values: ('tarball',
'zipball')
:type format: str
:param path: (optional), path where the file should be saved
to, default is the filename provided in the headers and will be
written in the current directory.
it can take a file-like object as well
:type path: str, file
:param ref: (optional)
:type ref: str
:returns: bool -- True if successful, False otherwise
"""
resp = None
written = False
if format in ('tarball', 'zipball'):
url = self._build_url(format, ref, base_url=self._api)
resp = self._get(url, allow_redirects=True, prefetch=False)
fd = None
file_like = False
if resp and resp.ok:
if path:
if callable(getattr(path, 'write', None)):
file_like = True
fd = path
else:
fd = open(path, 'wb')
else:
header = resp.headers['content-disposition']
i = header.find('filename=') + len('filename=')
fd = open(header[i:], 'wb')
for chunk in resp.iter_content():
fd.write(chunk)
if not file_like:
fd.close()
written = True
return written
def blob(self, sha):
"""Get the blob indicated by ``sha``.
:param sha: (required), sha of the blob
:type sha: str
:returns: :class:`Blob <github3.git.Blob>` if successful, otherwise
None
"""
url = self._build_url('git', 'blobs', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Blob(json) if json else None
def branch(self, name):
"""Get the branch ``name`` of this repository.
:param name: (required), branch name
:type name: str
:returns: :class:`Branch <Branch>`
"""
json = None
if name:
url = self._build_url('branches', name, base_url=self._api)
json = self._json(self._get(url), 200)
return Branch(json, self) if json else None
def commit(self, sha):
"""Get a single (repo) commit. See :func:`git_commit` for the Git Data
Commit.
:param sha: (required), sha of the commit
:type sha: str
:returns: :class:`RepoCommit <RepoCommit>` if successful, otherwise
None
"""
url = self._build_url('commits', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return RepoCommit(json, self) if json else None
def commit_comment(self, comment_id):
"""Get a single commit comment.
:param comment_id: (required), id of the comment used by GitHub
:type comment_id: int
:returns: :class:`RepoComment <RepoComment>` if successful, otherwise
None
"""
url = self._build_url('comments', str(comment_id), base_url=self._api)
json = self._json(self._get(url), 200)
return RepoComment(json, self) if json else None
def compare_commits(self, base, head):
"""Compare two commits.
:param base: (required), base for the comparison
:type base: str
:param head: (required), compare this against base
:type head: str
:returns: :class:`Comparison <Comparison>` if successful, else None
"""
url = self._build_url('compare', base + '...' + head,
base_url=self._api)
json = self._json(self._get(url), 200)
return Comparison(json) if json else None
def contents(self, path):
"""Get the contents of the file pointed to by ``path``.
:param path: (required), path to file, e.g.
github3/repo.py
:type path: str
:returns: :class:`Contents <Contents>` if successful, else None
"""
url = self._build_url('contents', path, base_url=self._api)
json = self._json(self._get(url), 200)
return Contents(json) if json else None
@requires_auth
def create_blob(self, content, encoding):
"""Create a blob with ``content``.
:param content: (required), content of the blob
:type content: str
:param encoding: (required), ('base64', 'utf-8')
:type encoding: str
:returns: string of the SHA returned
"""
sha = ''
if encoding in ('base64', 'utf-8') and content:
url = self._build_url('git', 'blobs', base_url=self._api)
data = dumps({'content': content, 'encoding': encoding})
json = self._json(self._post(url, data), 201)
if json:
sha = json.get('sha')
return sha
@requires_auth
def create_comment(self, body, sha, path='', position=1, line=1):
"""Create a comment on a commit.
:param str body: (required), body of the message
:param str sha: (required), commit id
:param str path: (optional), relative path of the file to comment
on
:param str position: (optional), line index in the diff to comment on
:param int line: (optional), line number of the file to comment on,
default: 1
:returns: :class:`RepoComment <RepoComment>` if successful else None
"""
line = int(line)
position = int(position)
json = None
if body and sha and line > 0:
data = dumps({'body': body, 'commit_id': sha, 'line': line,
'path': path, 'position': position})
url = self._build_url('commits', sha, 'comments',
base_url=self._api)
json = self._json(self._post(url, data), 201)
return RepoComment(json, self) if json else None
@requires_auth
def create_commit(self, message, tree, parents, author={}, committer={}):
"""Create a commit on this repository.
:param message: (required), commit message
:type message: str
:param tree: (required), SHA of the tree object this
commit points to
:type tree: str
:param parents: (required), SHAs of the commits that were parents of
this commit. If empty, the commit will be written as the root
commit. Even if there is only one parent, this should be an
array.
:type parents: list
:param author: (optional), if omitted, GitHub will
use the authenticated user's credentials and the current
time. Format: {'name': 'Committer Name', 'email':
'name@example.com', 'date': 'YYYY-MM-DDTHH:MM:SS+HH:00'}
:type author: dict
:param committer: (optional), if ommitted, GitHub will use the author
parameters. Should be the same format as the author parameter.
:type commiter: dict
:returns: :class:`Commit <github3.git.Commit>` if successful, else
None
"""
json = None
if message and tree and isinstance(parents, list):
url = self._build_url('git', 'commits', base_url=self._api)
data = dumps({'message': message, 'tree': tree, 'parents': parents,
'author': author, 'committer': committer})
json = self._json(self._post(url, data), 201)
return Commit(json, self) if json else None
@requires_auth
def create_download(self, name, path, description='',
content_type='text/plain'):
"""Create a new download on this repository.
I do not require you provide the size in bytes because it can be
determined by the operating system.
:param str name: (required), name of the file as it will appear
:param path: (required), path to the file
:type path: str
:param description: (optional), description of the file
:type description: str
:param content_type: (optional), e.g. 'text/plain'
:type content_type: str
:returns: :class:`Download <Download>` if successful, else None
"""
json = None
if name and path:
url = self._build_url('downloads', base_url=self._api)
from os import stat
info = stat(path)
data = dumps({'name': name, 'size': info.st_size,
'description': description,
'content_type': content_type})
json = self._json(self._post(url, data), 201)
if not json:
return None
form = [('key', json.get('path')),
('acl', json.get('acl')),
('success_action_status', '201'),
('Filename', json.get('name')),
('AWSAccessKeyId', json.get('accesskeyid')),
('Policy', json.get('policy')),
('Signature', json.get('signature')),
('Content-Type', json.get('mime_type'))]
file = [('file', open(path, 'rb').read())]
resp = post(json.get('s3_url'), data=form, files=file,
headers={'Accept-Charset': 'utf-8'})
return Download(json, self) if self._boolean(resp, 201, 404) else None
@requires_auth
def create_fork(self, organization=None):
"""Create a fork of this repository.
:param organization: (required), login for organization to create the
fork under
:type organization: str
:returns: :class:`Repository <Repository>` if successful, else None
"""
url = self._build_url('forks', base_url=self._api)
if organization:
resp = self._post(url, params={'org': organization})
else:
resp = self._post(url)
json = self._json(resp, 202)
return Repository(json, self) if json else None
@requires_auth
def create_hook(self, name, config, events=['push'], active=True):
"""Create a hook on this repository.
:param name: (required), name of the hook
:type name: str
:param config: (required), key-value pairs which act as settings
for this hook
:type config: dict
:param events: (optional), events the hook is triggered for
:type events: list
:param active: (optional), whether the hook is actually
triggered
:type active: bool
:returns: :class:`Hook <Hook>` if successful, else None
"""
json = None
if name and config and isinstance(config, dict):
url = self._build_url('hooks', base_url=self._api)
data = dumps({'name': name, 'config': config, 'events': events,
'active': active})
json = self._json(self._post(url, data), 201)
return Hook(json, self) if json else None
@requires_auth
def create_issue(self,
title,
body=None,
assignee=None,
milestone=None,
labels=[]):
"""Creates an issue on this repository.
:param str title: (required), title of the issue
:param str body: (optional), body of the issue
:param str assignee: (optional), login of the user to assign the
issue to
:param int milestone: (optional), number of the milestone to attribute
this issue to (e.g. ``m`` is a Milestone object, ``m.number`` is
what you pass here.)
:param labels: (optional), labels to apply to this
issue
:type labels: list of strings
:returns: :class:`Issue <github3.issues.Issue>` if successful, else
None
"""
issue = dumps({'title': title, 'body': body, 'assignee': assignee,
'milestone': milestone, 'labels': labels})
url = self._build_url('issues', base_url=self._api)
json = self._json(self._post(url, issue), 201)
return Issue(json, self) if json else None
@requires_auth
def create_key(self, title, key):
"""Create a deploy key.
:param title: (required), title of key
:type title: str
:param key: (required), key text
:type key: str
:returns: :class:`Key <github3.users.Key>` if successful, else None
"""
data = dumps({'title': title, 'key': key})
url = self._build_url('keys', base_url=self._api)
json = self._json(self._post(url, data), 201)
return Key(json, self) if json else None
@requires_auth
def create_label(self, name, color):
"""Create a label for this repository.
:param name: (required), name to give to the label
:type name: str
:param color: (required), value of the color to assign to the
label
:type color: str
:returns: :class:`Label <github3.issues.Label>` if successful, else
None
"""
data = dumps({'name': name, 'color': color.strip('#')})
url = self._build_url('labels', base_url=self._api)
json = self._json(self._post(url, data), 201)
return Label(json, self) if json else None
@requires_auth
def create_milestone(self, title, state=None, description=None,
due_on=None):
"""Create a milestone for this repository.
:param title: (required), title of the milestone
:type title: str
:param state: (optional), state of the milestone, accepted
values: ('open', 'closed'), default: 'open'
:type state: str
:param description: (optional), description of the milestone
:type description: str
:param due_on: (optional), ISO 8601 formatted due date
:type due_on: str
:returns: :class:`Milestone <github3.issues.Milestone>` if successful,
else None
"""
url = self._build_url('milestones', base_url=self._api)
if state not in ('open', 'closed'):
state = 'open'
data = dumps({'title': title, 'state': state,
'description': description, 'due_on': due_on})
json = self._json(self._post(url, data), 201)
return Milestone(json, self) if json else None
@requires_auth
def create_pull(self, title, base, head, body=''):
"""Create a pull request using commits from ``head`` and comparing
against ``base``.
:param title: (required)
:type title: str
:param base: (required), e.g., 'username:branch', or a sha
:type base: str
:param head: (required), e.g., 'master', or a sha
:type head: str
:param body: (optional), markdown formatted description
:type body: str
:returns: :class:`PullRequest <github3.pulls.PullRequest>` if
successful, else None
"""
data = dumps({'title': title, 'body': body, 'base': base,
'head': head})
return self._create_pull(data)
@requires_auth
def create_pull_from_issue(self, issue, base, head):
"""Create a pull request from issue #``issue``.
:param issue: (required), issue number
:type issue: int
:param base: (required), e.g., 'username:branch', or a sha
:type base: str
:param head: (required), e.g., 'master', or a sha
:type head: str
:returns: :class:`PullRequest <github3.pulls.PullRequest>` if
successful, else None
"""
data = dumps({'issue': issue, 'base': base, 'head': head})
return self._create_pull(data)
@requires_auth
def create_ref(self, ref, sha):
"""Create a reference in this repository.
:param ref: (required), fully qualified name of the reference,
e.g. ``refs/heads/master``. If it doesn't start with ``refs`` and
contain at least two slashes, GitHub's API will reject it.
:type ref: str
:param sha: (required), SHA1 value to set the reference to
:type sha: str
:returns: :class:`Reference <github3.git.Reference>` if successful
else None
"""
data = dumps({'ref': ref, 'sha': sha})
url = self._build_url('git', 'refs', base_url=self._api)
json = self._json(self._post(url, data), 201)
return Reference(json, self) if json else None
@requires_auth
def create_status(self, sha, state, target_url='', description=''):
"""Create a status object on a commit.
:param str sha: (required), SHA of the commit to create the status on
:param str state: (required), state of the test; only the following
are accepted: 'pending', 'success', 'error', 'failure'
:param str target_url: (optional), URL to associate with this status.
:param str description: (optional), short description of the status
"""
json = {}
if sha and state:
data = dumps({'state': state, 'target_url': target_url,
'description': description})
url = self._build_url('statuses', sha, base_url=self._api)
json = self._json(self._post(url, data=data), 201)
return Status(json) if json else None
@requires_auth
def create_tag(self, tag, message, sha, obj_type, tagger,
lightweight=False):
"""Create a tag in this repository.
:param tag: (required), name of the tag
:type tag: str
:param message: (required), tag message
:type message: str
:param sha: (required), SHA of the git object this is tagging
:type sha: str
:param obj_type: (required), type of object being tagged, e.g.,
'commit', 'tree', 'blob'
:type obj_type: str
:param tagger: (required), containing the name, email of the
tagger and the date it was tagged
:type tagger: dict
:param lightweight: (optional), if False, create an annotated
tag, otherwise create a lightweight tag (a Reference).
:type lightweight: bool
:returns: If lightweight == False: :class:`Tag <github3.git.Tag>` if
successful, else None. If lightweight == True: :class:`Reference
<Reference>`
"""
if lightweight and tag and sha:
return self.create_ref('refs/tags/' + tag, sha)
json = None
if tag and message and sha and obj_type and len(tagger) == 3:
data = dumps({'tag': tag, 'message': message, 'object': sha,
'type': obj_type, 'tagger': tagger})
url = self._build_url('git', 'tags', base_url=self._api)
json = self._json(self._post(url, data), 201)
if json:
self.create_ref('refs/tags/' + tag, sha)
return Tag(json) if json else None
@requires_auth
def create_tree(self, tree, base_tree=''):
"""Create a tree on this repository.
:param tree: (required), specifies the tree structure.
Format: [{'path': 'path/file', 'mode':
'filemode', 'type': 'blob or tree', 'sha': '44bfc6d...'}]
:type tree: list of dicts
:param base_tree: (optional), SHA1 of the tree you want
to update with new data
:type base_tree: str
:returns: :class:`Tree <github3.git.Tree>` if successful, else None
"""
json = None
if tree and isinstance(tree, list):
data = dumps({'tree': tree, 'base_tree': base_tree})
url = self._build_url('git', 'trees', base_url=self._api)
json = self._json(self._post(url, data), 201)
return Tree(json) if json else None
@requires_auth
def delete(self):
"""Delete this repository.
:returns: bool -- True if successful, False otherwise
"""
return self._boolean(self._delete(self._api), 204, 404)
@requires_auth
def delete_key(self, key_id):
"""Delete the key with the specified id from your deploy keys list.
:returns: bool -- True if successful, False otherwise
"""
if int(key_id) <= 0:
return False
url = self._build_url('keys', str(key_id), base_url=self._api)
return self._boolean(self._delete(url), 204, 404)
def download(self, id_num):
"""Get a single download object by its id.
:param id_num: (required), id of the download
:type id_num: int
:returns: :class:`Download <Download>` if successful, else None
"""
json = None
if int(id_num) > 0:
url = self._build_url('downloads', str(id_num),
base_url=self._api)
json = self._json(self._get(url), 200)
return Download(json, self) if json else None
@requires_auth
def edit(self,
name,
description='',
homepage='',
private=False,
has_issues=True,
has_wiki=True,
has_downloads=True,
default_branch=''):
"""Edit this repository.
:param str name: (required), name of the repository
:param str description: (optional)
:param str homepage: (optional)
:param bool private: (optional), If ``True``, create a
private repository. API default: ``False``
:param bool has_issues: (optional), If ``True``, enable
issues for this repository. API default: ``True``
:param bool has_wiki: (optional), If ``True``, enable the
wiki for this repository. API default: ``True``
:param bool has_downloads: (optional), If ``True``, enable
downloads for this repository. API default: ``True``
:param str default_branch: (optional), Update the default branch for
this repository
:returns: bool -- True if successful, False otherwise
"""
data = dumps({'name': name, 'description': description,
'homepage': homepage, 'private': private,
'has_issues': has_issues, 'has_wiki': has_wiki,
'has_downloads': has_downloads,
'default_branch': default_branch})
json = self._json(self._patch(self._api, data=data), 200)
if json:
self._update_(json)
return True
return False # (No coverage)
def is_collaborator(self, login):
"""Check to see if ``login`` is a collaborator on this repository.
:param login: (required), login for the user
:type login: str
:returns: bool -- True if successful, False otherwise
"""
if login:
url = self._build_url('collaborators', login, base_url=self._api)
return self._boolean(self._get(url), 204, 404)
return False
def is_fork(self):
"""Checks if this repository is a fork.
:returns: bool
"""
return self._is_fork
def is_private(self):
"""Checks if this repository is private.
:returns: bool
"""
return self._priv
def git_commit(self, sha):
"""Get a single (git) commit.
:param sha: (required), sha of the commit
:type sha: str
:returns: :class:`Commit <github3.git.Commit>` if successful,
otherwise None
"""
url = self._build_url('git', 'commits', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Commit(json, self) if json else None
def has_downloads(self):
"""Checks if this repository has downloads.
:returns: bool
"""
return self._has_dl
def has_issues(self):
"""Checks if this repository has issues enabled.
:returns: bool
"""
return self._has_issues
def has_wiki(self):
"""Checks if this repository has a wiki.
:returns: bool
"""
return self._has_wiki
@requires_auth
def hook(self, id_num):
"""Get a single hook.
:param id_num: (required), id of the hook
:type id_num: int
:returns: :class:`Hook <Hook>` if successful, else None
"""
json = None
if int(id_num) > 0:
url = self._build_url('hooks', str(id_num), base_url=self._api)
json = self._json(self._get(url), 200)
return Hook(json, self) if json else None
def is_assignee(self, login):
"""Check if the user is a possible assignee for an issue on this
repository.
:returns: :class:`bool`
"""
url = self._build_url('assignees', login, base_url=self._api)
return self._boolean(self._get(url), 204, 404)
def issue(self, number):
"""Get the issue specified by ``number``.
:param number: (required), number of the issue on this repository
:type number: int
:returns: :class:`Issue <github3.issues.Issue>` if successful, else
None
"""
json = None
if int(number) > 0:
url = self._build_url('issues', str(number), base_url=self._api)
json = self._json(self._get(url), 200)
return Issue(json, self) if json else None
@requires_auth
def key(self, id_num):
"""Get the specified deploy key.
:param id_num: (required), id of the key
:type id_num: int
:returns: :class:`Key <Key>` if successful, else None
"""
json = None
if int(id_num) > 0:
url = self._build_url('keys', str(id_num), base_url=self._api)
json = self._json(self._get(url), 200)
return Key(json, self) if json else None
def label(self, name):
"""Get the label specified by ``name``
:param name: (required), name of the label
:type name: str
:returns: :class:`Label <github3.issues.Label>` if successful, else
None
"""
json = None
if name:
url = self._build_url('labels', name, base_url=self._api)
json = self._json(self._get(url), 200)
return Label(json, self) if json else None
def iter_assignees(self, number=-1):
"""Iterate over all available assignees to which an issue may be
assigned.
:param int number: (optional), number of assignees to return. Default:
-1 returns all available assignees
:returns: list of :class:`User <github3.users.User>`\ s
"""
url = self._build_url('assignees', base_url=self._api)
return self._iter(int(number), url, User)
def list_assignees(self):
"""List all available assignees to which an issue may be assigned.
:returns: list of :class:`User <github3.users.User>`\ s
"""
url = self._build_url('assignees', base_url=self._api)
json = self._json(self._get(url), 200)
return [User(u, self) for u in json]
def iter_branches(self, number=-1):
"""Iterate over the branches in this repository.
:param int number: (optional), number of branches to return. Default:
-1 returns all branches
:returns: list of :class:`Branch <Branch>`\ es
"""
# Paginate?
url = self._build_url('branches', base_url=self._api)
return self._iter(int(number), url, Branch)
def list_branches(self):
"""List the branches in this repository.
:returns: list of :class:`Branch <Branch>`\ es
"""
# Paginate?
url = self._build_url('branches', base_url=self._api)
json = self._json(self._get(url), 200)
return [Branch(b, self) for b in json]
def iter_comments(self, number=-1):
"""Iterate over comments on all commits in the repository.
:param int number: (optional), number of comments to return. Default:
-1 returns all comments
:returns: list of :class:`RepoComment <RepoComment>`\ s
"""
# Paginate?
url = self._build_url('comments', base_url=self._api)
return self._iter(int(number), url, RepoComment)
def list_comments(self):
"""List comments on all commits in the repository.
:returns: list of :class:`RepoComment <RepoComment>`\ s
"""
# Paginate?
url = self._build_url('comments', base_url=self._api)
json = self._json(self._get(url), 200)
return [RepoComment(comment, self) for comment in json]
def iter_comments_on_commit(self, sha, number=1):
"""Iterate over comments for a single commit.
:param sha: (required), sha of the commit to list comments on
:type sha: str
:param int number: (optional), number of comments to return. Default:
-1 returns all comments
:returns: list of :class:`RepoComment <RepoComment>`\ s
"""
url = self._build_url('commits', sha, 'comments', base_url=self._api)
return self._iter(int(number), url, RepoComment)
def list_comments_on_commit(self, sha):
"""List comments for a single commit.
:param sha: (required), sha of the commit to list comments on
:type sha: str
:returns: list of :class:`RepoComment <RepoComment>`\ s
"""
url = self._build_url('commits', sha, 'comments', base_url=self._api)
json = self._json(self._get(url), 200)
return [RepoComment(comm, self) for comm in json]
def iter_commits(self, sha='', path='', author='', number=-1):
"""Iterate over commits in this repository.
:param str sha: (optional), sha or branch to start listing commits
from
:param str path: (optional), commits containing this path will be
listed
:param str author: (optional), GitHub login, real name, or email to
filter commits by (using commit author)
:param int number: (optional), number of comments to return. Default:
-1 returns all comments
:returns: list of :class:`RepoCommit <RepoCommit>`\ s
"""
params = {}
if sha:
params['sha'] = sha
if path:
params['path'] = path
if author:
params['author'] = author
url = self._build_url('commits', base_url=self._api)
return self._iter(int(number), url, RepoCommit, params=params)
def list_commits(self, sha='', path='', author=''):
"""List commits in this repository.
:param str sha: (optional), sha or branch to start listing commits
from
:param str path: (optional), commits containing this path will be
listed
:param str author: (optional), GitHub login, real name, or email to
filter commits by (using commit author)
:returns: list of :class:`RepoCommit <RepoCommit>`\ s
"""
params = {}
if sha:
params['sha'] = sha
if path:
params['path'] = path
if author:
params['author'] = author
url = self._build_url('commits', base_url=self._api)
json = self._json(self._get(url, params=params), 200)
return [RepoCommit(commit, self) for commit in json]
def iter_contributors(self, anon=False, number=-1):
"""Iterate over the contributors to this repository.
:param anon: (optional), True lists anonymous contributors as well
:type anon: bool
:param number: (optional), number of contributors to return. Default:
-1 returns all contributors
:type number: int
:returns: list of :class:`User <github3.users.User>`\ s
"""
# Paginate
url = self._build_url('contributors', base_url=self._api)
params = {}
if anon:
params = {'anon': anon}
return self._iter(int(number), url, User, params=params)
def list_contributors(self, anon=False):
"""List the contributors to this repository.
:param anon: (optional), True lists anonymous contributors as well
:type anon: bool
:returns: list of :class:`User <github3.users.User>`\ s
"""
# Paginate
url = self._build_url('contributors', base_url=self._api)
params = {}
if anon:
params = {'anon': anon}
json = self._json(self._get(url, params=params), 200)
return [User(c, self) for c in json]
def iter_downloads(self, number=-1):
"""Iterate over available downloads for this repository.
:param int number: (optional), number of downloads to return. Default:
-1 returns all available downloads
:returns: list of :class:`Download <Download>`\ s
"""
url = self._build_url('downloads', base_url=self._api)
return self._iter(int(number), url, Download)
def list_downloads(self):
"""List available downloads for this repository.
:returns: list of :class:`Download <Download>`\ s
"""
url = self._build_url('downloads', base_url=self._api)
json = self._json(self._get(url), 200)
return [Download(dl, self) for dl in json]
def iter_events(self, number=-1):
"""Iterate over events on this repository.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:returns: list of :class:`Event <github3.events.Event>`\ s
"""
url = self._build_url('events', base_url=self._api)
return self._iter(int(number), url, Event)
def list_events(self):
"""List events on this repository.
:returns: list of :class:`Event <github3.events.Event>`\ s
"""
url = self._build_url('events', base_url=self._api)
json = self._json(self._get(url), 200)
return [Event(e, self) for e in json]
def iter_forks(self, sort='', number=-1):
"""Iterate over forks of this repository.
:param sort: (optional), accepted values:
('newest', 'oldest', 'watchers'), API default: 'newest'
:type sort: str
:param number: (optional), number of forks to return. Default: -1
returns all forks
:type number: int
:returns: list of :class:`Repository <Repository>`
"""
url = self._build_url('forks', base_url=self._api)
params = {}
if sort in ('newest', 'oldest', 'watchers'):
params = {'sort': sort}
return self._iter(int(number), url, Repository, params=params)
def list_forks(self, sort=''):
"""List forks of this repository.
:param sort: (optional), accepted values:
('newest', 'oldest', 'watchers'), API default: 'newest'
:type sort: str
:returns: list of :class:`Repository <Repository>`
"""
url = self._build_url('forks', base_url=self._api)
params = {}
if sort in ('newest', 'oldest', 'watchers'):
params = {'sort': sort}
json = self._json(self._get(url, params=params), 200)
return [Repository(r, self) for r in json]
@requires_auth
def iter_hooks(self, number=-1):
"""Iterate over hooks registered on this repository.
:param int number: (optional), number of hoks to return. Default: -1
returns all hooks
:returns: list of :class:`Hook <Hook>`\ s
"""
url = self._build_url('hooks', base_url=self._api)
return self._iter(int(number), url, Hook)
@requires_auth
def list_hooks(self):
"""List hooks registered on this repository.
:returns: list of :class:`Hook <Hook>`\ s
"""
url = self._build_url('hooks', base_url=self._api)
json = self._json(self._get(url), 200)
return [Hook(h, self) for h in json]
def iter_issues(self,
milestone=None,
state=None,
assignee=None,
mentioned=None,
labels=None,
sort=None,
direction=None,
since=None,
number=-1):
"""Iterate over issues on this repo based upon parameters passed.
:param milestone: (optional), 'none', or '*'
:type milestone: int
:param state: (optional), accepted values: ('open', 'closed')
:type state: str
:param assignee: (optional), 'none', '*', or login name
:type assignee: str
:param mentioned: (optional), user's login name
:type mentioned: str
:param labels: (optional), comma-separated list of labels, e.g.
'bug,ui,@high' :param sort: accepted values:
('created', 'updated', 'comments', 'created')
:type labels: str
:param direction: (optional), accepted values: ('asc', 'desc')
:type direction: str
:param since: (optional), ISO 8601 format: YYYY-MM-DDTHH:MM:SSZ
:type since: str
:param number: (optional), Number of issues to return.
By default all issues are returned
:type since: int
:returns: list of :class:`Issue <github3.issues.Issue>`\ s
"""
url = self._build_url('issues', base_url=self._api)
params = {}
if milestone in ('*', 'none') or isinstance(milestone, int):
params['milestone'] = milestone
if assignee:
params['assignee'] = assignee
if mentioned:
params['mentioned'] = mentioned
params.update(issue_params(None, state, labels, sort, direction,
since)) # nopep8
return self._iter(int(number), url, Issue, params=params)
def list_issues(self,
milestone=None,
state=None,
assignee=None,
mentioned=None,
labels=None,
sort=None,
direction=None,
since=None):
"""List issues on this repo based upon parameters passed.
:param milestone: (optional), 'none', or '*'
:type milestone: int
:param state: (optional), accepted values: ('open', 'closed')
:type state: str
:param assignee: (optional), 'none', '*', or login name
:type assignee: str
:param mentioned: (optional), user's login name
:type mentioned: str
:param labels: (optional), comma-separated list of labels, e.g.
'bug,ui,@high' :param sort: accepted values:
('created', 'updated', 'comments', 'created')
:type labels: str
:param direction: (optional), accepted values: ('open', 'closed')
:type direction: str
:param since: (optional), ISO 8601 format: YYYY-MM-DDTHH:MM:SSZ
:type since: str
:returns: list of :class:`Issue <github3.issues.Issue>`\ s
"""
# Paginate
url = self._build_url('issues', base_url=self._api)
params = {}
if milestone in ('*', 'none') or isinstance(milestone, int):
params['milestone'] = str(milestone).lower()
# str(None) = 'None' which is invalid, so .lower() it to make it
# work.
if assignee:
params['assignee'] = assignee
if mentioned:
params['mentioned'] = mentioned
params.update(issue_params(None, state, labels, sort, direction,
since)) # nopep8
request = self._get(url, params=params)
json = self._json(request, 200)
return [Issue(i, self) for i in json]
def iter_issue_events(self, number=-1):
"""Iterates over issue events on this repository.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:returns: generator of
:class:`IssueEvent <github3.issues.IssueEvent>`\ s
"""
url = self._build_url('issues', 'events', base_url=self._api)
return self._iter(int(number), url, IssueEvent)
def list_issue_events(self):
"""List issue events on this repository.
:returns: list of :class:`IssueEvent <github3.issues.IssueEvent>`\ s
"""
# Paginate
url = self._build_url('issues', 'events', base_url=self._api)
json = self._json(self._get(url), 200)
return [IssueEvent(e, self) for e in json]
@requires_auth
def iter_keys(self, number=-1):
"""Iterates over deploy keys on this repository.
:param int number: (optional), number of keys to return. Default: -1
returns all available keys
:returns: generator of :class:`Key <github3.users.Key>`\ s
"""
url = self._build_url('keys', base_url=self._api)
return self._iter(int(number), url, Key)
@requires_auth
def list_keys(self):
"""List deploy keys on this repository.
:returns: list of :class:`Key <github3.users.Key>`\ s
"""
# Paginate?
url = self._build_url('keys', base_url=self._api)
json = self._json(self._get(url), 200)
return [Key(k, self) for k in json]
def iter_labels(self, number=-1):
"""Iterates over labels on this repository.
:param int number: (optional), number of labels to return. Default: -1
returns all available labels
:returns: generator of :class:`Label <github3.issues.Label>`\ s
"""
url = self._build_url('labels', base_url=self._api)
return self._iter(int(number), url, Label)
def list_labels(self):
"""List labels on this repository.
:returns: list of :class:`Label <github3.issues.Label>`\ s
"""
url = self._build_url('labels', base_url=self._api)
json = self._json(self._get(url), 200)
return [Label(label, self) for label in json]
def iter_languages(self, number=-1):
"""Iterate over the programming languages used in the repository.
:param int number: (optional), number of languages to return. Default:
-1 returns all used languages
:returns: list of tuples
"""
url = self._build_url('languages', base_url=self._api)
return self._iter(int(number), url, tuple)
def list_languages(self):
"""List the programming languages used in the repository.
:returns: list of tuples
"""
url = self._build_url('languages', base_url=self._api)
json = self._json(self._get(url), 200)
return [(k, v) for k, v in json.items()]
def iter_milestones(self, state=None, sort=None, direction=None,
number=-1):
"""Iterates over the milestones on this repository.
:param str state: (optional), state of the milestones, accepted
values: ('open', 'closed')
:param str sort: (optional), how to sort the milestones, accepted
values: ('due_date', 'completeness')
:param str direction: (optional), direction to sort the milestones,
accepted values: ('asc', 'desc')
:param int number: (optional), number of milestones to return.
Default: -1 returns all milestones
:returns: generator of
:class:`Milestone <github3.issues.Milestone>`\ s
"""
url = self._build_url('milestones', base_url=self._api)
accepted = {'state': ('open', 'closed'),
'sort': ('due_date', 'completeness'),
'direction': ('asc', 'desc')}
params = {'state': state, 'sort': sort, 'direction': direction}
for (k, v) in list(params.items()):
if not (v and (v in accepted[k])): # e.g., '' or None
del params[k]
if not params:
params = None
return self._iter(int(number), url, Milestone, params)
def list_milestones(self, state=None, sort=None, direction=None):
"""List the milestones on this repository.
:param state: (optional), state of the milestones, accepted
values: ('open', 'closed')
:type state: str
:param sort: (optional), how to sort the milestones, accepted
values: ('due_date', 'completeness')
:type sort: str
:param direction: (optional), direction to sort the milestones,
accepted values: ('asc', 'desc')
:type direction: str
:returns: list of :class:`Milestone <github3.issues.Milestone>`\ s
"""
# Paginate?
url = self._build_url('milestones', base_url=self._api)
params = {}
if state in ('open', 'closed'):
params['state'] = state
if sort in ('due_date', 'completeness'):
params['sort'] = sort
if direction in ('asc', 'desc'):
params['direction'] = direction
json = self._json(self._get(url, params=params), 200)
return [Milestone(mile, self) for mile in json]
def iter_network_events(self, number=-1):
"""Iterates over events on a network of repositories.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:returns: generator of :class:`Event <github3.events.Event>`\ s
"""
base = self._api.replace('repos', 'networks', 1)
url = self._build_url('events', base_url=base)
return self._iter(int(number), url, Event)
def list_network_events(self):
"""Lists events on a network of repositories.
:returns: list of :class:`Event <github3.events.Event>`\ s
"""
# Paginate
base = self._api.replace('repos', 'networks', 1)
url = self._build_url('events', base_url=base)
json = self._json(self._get(url), 200)
return [Event(e, self) for e in json]
def iter_notifications(self, all=False, participating=False, since='',
number=-1):
"""Iterates over the notifications for this repository.
:param bool all: (optional), show all notifications, including ones
marked as read
:param bool participating: (optional), show only the notifications the
user is participating in directly
:param str since: (optional), filters out any notifications updated
before the given time. The time should be passed in as UTC in the
ISO 8601 format: ``YYYY-MM-DDTHH:MM:SSZ``. Example:
"2012-10-09T23:39:01Z".
:returns: generator of :class:`Thread <github3.notifications.Thread>`
"""
url = self._build_url('notifications', base_url=self._api)
params = {'all': all, 'participating': participating, 'since': since}
for (k, v) in list(params.items()):
if not v:
del params[k]
return self._iter(int(number), url, Thread, params=params)
def iter_pulls(self, state=None, number=-1):
"""List pull requests on repository.
:param str state: (optional), accepted values: ('open', 'closed')
:param int number: (optional), number of pulls to return. Default: -1
returns all available pull requests
:returns: generator of
:class:`PullRequest <github3.pulls.PullRequest>`\ s
"""
url = self._build_url('pulls', base_url=self._api)
if state in ('open', 'closed'):
url = '{0}?{1}={2}'.format(url, 'state', state)
return self._iter(int(number), url, PullRequest)
def list_pulls(self, state=None):
"""List pull requests on repository.
:param state: (optional), accepted values: ('open', 'closed')
:type state: str
:returns: list of :class:`PullRequest <github3.pulls.PullRequest>`\ s
"""
# Paginate
url = self._build_url('pulls', base_url=self._api)
params = {}
if state in ('open', 'closed'):
params['state'] = state
json = self._json(self._get(url, params=params), 200)
return [PullRequest(pull, self) for pull in json]
def iter_refs(self, subspace='', number=-1):
"""Iterates over references for this repository.
:param str subspace: (optional), e.g. 'tags', 'stashes', 'notes'
:param int number: (optional), number of refs to return. Default: -1
returns all available refs
:returns: generator of :class:`Reference <github3.git.Reference>`\ s
"""
if subspace:
args = ('git', 'refs', subspace)
else:
args = ('git', 'refs')
url = self._build_url(*args, base_url=self._api)
return self._iter(int(number), url, Reference)
def list_refs(self, subspace=''):
"""List references for this repository.
:param subspace: (optional), e.g. 'tags', 'stashes', 'notes'
:type subspace: str
:returns: list of :class:`Reference <github3.git.Reference>`\ s
"""
# Paginate?
if subspace:
args = ('git', 'refs', subspace)
else:
args = ('git', 'refs')
url = self._build_url(*args, base_url=self._api)
json = self._json(self._get(url), 200)
return [Reference(r, self) for r in json]
def iter_stargazers(self, number=-1):
"""List users who have starred this repository.
:returns: generator of :class:`User <github3.users.User>`\ s
"""
url = self._build_url('stargazers', base_url=self._api)
return self._iter(int(number), url, User)
def list_stargazers(self):
"""List users who have starred this repository.
:returns: list of :class:`User <github3.users.User>`\ s
"""
url = self._build_url('stargazers', base_url=self._api)
json = self._json(self._get(url), 200)
return [User(u, self) for u in json]
def iter_subscribers(self, number=-1):
"""Iterates over users subscribed to this repository.
:param int number: (optional), number of subscribers to return.
Default: -1 returns all subscribers available
:returns: generator of :class:`User <github3.users.User>`
"""
url = self._build_url('subscribers', base_url=self._api)
return self._iter(int(number), url, User)
def list_subscribers(self):
"""List users subscribed to this repository.
:returns: list of :class:`User <github3.users.User>`
"""
url = self._build_url('subscribers', base_url=self._api)
json = self._json(self._get(url), 200)
return [User(u, self) for u in json]
def iter_statuses(self, sha, number=-1):
"""Iterates over the statuses for a specific SHA.
:param str sha: SHA of the commit to list the statuses of
:param int number: (optional), return up to number statuses. Default:
-1 returns all available statuses.
:returns: generator of :class:`Status <Status>`
"""
url = ''
if sha:
url = self._build_url('statuses', sha, base_url=self._api)
return self._iter(int(number), url, Status)
def list_statuses(self, sha):
"""List the statuses for a specific SHA.
:param str sha: SHA of the commit to list the statuses of
:returns: list of :class:`Status <Status>`
"""
json = []
if sha:
url = self._build_url('statuses', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return [Status(s) for s in json]
def iter_tags(self, number=-1):
"""Iterates over tags on this repository.
:param int number: (optional), return up to at most number tags.
Default: -1 returns all available tags.
:returns: generator of :class:`RepoTag <RepoTag>`\ s
"""
url = self._build_url('tags', base_url=self._api)
return self._iter(int(number), url, RepoTag)
def list_tags(self):
"""List tags on this repository.
:returns: list of :class:`RepoTag <RepoTag>`\ s
"""
url = self._build_url('tags', base_url=self._api)
json = self._json(self._get(url), 200)
return [RepoTag(tag) for tag in json]
@requires_auth
def iter_teams(self, number=-1):
"""Iterates over teams with access to this repository.
:param int number: (optional), return up to number Teams. Default: -1
returns all Teams.
:returns: generator of :class:`Team <github3.orgs.Team>`\ s
"""
from github3.orgs import Team
url = self._build_url('teams', base_url=self._api)
return self._iter(int(number), url, Team)
@requires_auth
def list_teams(self):
"""List teams with access to this repository.
:returns: list of :class:`Team <github3.orgs.Team>`\ s
"""
from github3.orgs import Team
url = self._build_url('teams', base_url=self._api)
return [Team(t, self) for t in self._json(self._get(url), 200)]
def list_watchers(self):
"""DEPRECATED: Use list_stargazers() instead."""
raise DeprecationWarning('Use list_stargazers() instead.')
def mark_notifications(self, last_read=''):
"""Mark all notifications in this repository as read.
:param str last_read: (optional), Describes the last point that
notifications were checked. Anything updated since this time will
not be updated. Default: Now. Expected in ISO 8601 format:
``YYYY-MM-DDTHH:MM:SSZ``. Example: "2012-10-09T23:39:01Z".
:returns: bool
"""
url = self._build_url('notifications', base_url=self._api)
mark = {'read': True}
if last_read:
mark['last_read_at'] = last_read
return self._boolean(self._put(url, data=dumps(mark)),
205, 404)
def merge(self, base, head, message=''):
"""Perform a merge from ``head`` into ``base``.
:param str base: (required), where you're merging into
:param str head: (required), where you're merging from
:param str message: (optional), message to be used for the commit
:returns: :class:`RepoCommit <RepoCommit>`
"""
url = self._build_url('merges', base_url=self._api)
data = dumps({'base': base, 'head': head, 'commit_message': message})
json = self._json(self._post(url, data=data), 201)
return RepoCommit(json, self) if json else None
def milestone(self, number):
"""Get the milestone indicated by ``number``.
:param number: (required), unique id number of the milestone
:type number: int
:returns: :class:`Milestone <github3.issues.Milestone>`
"""
url = self._build_url('milestones', str(number), base_url=self._api)
json = self._json(self._get(url), 200)
return Milestone(json, self) if json else None
@requires_auth
def pubsubhubbub(self, mode, topic, callback, secret=''):
"""Create/update a pubsubhubbub hook.
:param mode: (required), accepted values: ('subscribe', 'unsubscribe')
:type mode: str
:param topic: (required), form:
https://github.com/:user/:repo/events/:event
:type topic: str
:param callback: (required), the URI that receives the updates
:type callback: str
:param secret: (optional), shared secret key that generates a
SHA1 HMAC of the payload content.
:type secret: str
:returns: bool
"""
from re import match
m = match('https://github\.com/\w+/[\w\._-]+/events/\w+', topic)
status = False
if mode and topic and callback and m:
data = [('hub.mode', mode), ('hub.topic', topic),
('hub.callback', callback), ('hub.secret', secret)]
url = self._build_url('hub')
status = self._boolean(self._post(url, data=data), 204, 404)
return status
def pull_request(self, number):
"""Get the pull request indicated by ``number``.
:param number: (required), number of the pull request.
:type number: int
:returns: :class:`PullRequest <github3.pulls.PullRequest>`
"""
json = None
if int(number) > 0:
url = self._build_url('pulls', str(number), base_url=self._api)
json = self._json(self._get(url), 200)
return PullRequest(json, self) if json else None
def readme(self):
"""Get the README for this repository.
:returns: :class:`Contents <Contents>`
"""
url = self._build_url('readme', base_url=self._api)
json = self._json(self._get(url), 200)
return Contents(json) if json else None
def ref(self, ref):
"""Get a reference pointed to by ``ref``.
The most common will be branches and tags. For a branch, you must
specify 'heads/branchname' and for a tag, 'tags/tagname'. Essentially,
the system should return any reference you provide it in the namespace,
including notes and stashes (provided they exist on the server).
:param ref: (required)
:type ref: str
:returns: :class:`Reference <github3.git.Reference>`
"""
url = self._build_url('git', 'refs', ref, base_url=self._api)
json = self._json(self._get(url), 200)
return Reference(json, self) if json else None
@requires_auth
def remove_collaborator(self, login):
"""Remove collaborator ``login`` from the repository.
:param login: (required), login name of the collaborator
:type login: str
:returns: bool
"""
resp = False
if login:
url = self._build_url('collaborators', login, base_url=self._api)
resp = self._boolean(self._delete(url), 204, 404)
return resp
@requires_auth
def set_subscription(self, subscribed, ignored):
"""Set the user's subscription for this repository
:param bool subscribed: (required), determines if notifications should
be received from this repository.
:param bool ignored: (required), determines if notifications should be
ignored from this repository.
:returns: :class;`Subscription <Subscription>`
"""
sub = dumps({'subscribed': subscribed, 'ignored': ignored})
url = self._build_url('subscription', base_url=self._api)
json = self._json(self._put(url, data=sub), 200)
return Subscription(json, self) if json else None
@requires_auth
def subscription(self):
"""Return subscription for this Repository.
:returns: :class:`Subscription <github3.notifications.Subscription>`
"""
url = self._build_url('subscription', base_url=self._api)
json = self._json(self._get(url), 200)
return Subscription(json, self) if json else None
def tag(self, sha):
"""Get an annotated tag.
http://learn.github.com/p/tagging.html
:param sha: (required), sha of the object for this tag
:type sha: str
:returns: :class:`Tag <github3.git.Tag>`
"""
url = self._build_url('git', 'tags', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Tag(json) if json else None
def tree(self, sha):
"""Get a tree.
:param sha: (required), sha of the object for this tree
:type sha: str
:returns: :class:`Tree <github3.git.Tree>`
"""
url = self._build_url('git', 'trees', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Tree(json, self) if json else None
def update_label(self, name, color, new_name=''):
"""Update the label ``name``.
:param name: (required), name of the label
:type name: str
:param color: (required), color code
:type color: str
:param new_name: (optional), new name of the label
:type new_name: str
:returns: bool
"""
label = self.label(name)
resp = False
if label:
upd = label.update
resp = upd(new_name, color) if new_name else upd(name, color)
return resp
class Branch(GitHubCore):
"""The :class:`Branch <Branch>` object. It holds the information GitHub
returns about a branch on a :class:`Repository <Repository>`.
"""
def __init__(self, branch, session=None):
super(Branch, self).__init__(branch, session)
#: Name of the branch.
self.name = branch.get('name')
#: Returns the branch's :class:`RepoCommit <RepoCommit>` or
# ``None``.
self.commit = branch.get('commit')
if self.commit:
self.commit = RepoCommit(self.commit, self._session)
#: Returns '_links' attribute.
self.links = branch.get('_links', {})
def __repr__(self):
return '<Repository Branch [{0}]>'.format(self.name)
class Contents(GitHubObject):
"""The :class:`Contents <Contents>` object. It holds the information
concerning any content in a repository requested via the API.
"""
def __init__(self, content):
super(Contents, self).__init__(content)
# links
self._api = content['_links'].get('self', '')
#: Dictionary of links
self.links = content.get('_links')
# should always be 'base64'
#: Returns encoding used on the content.
self.encoding = content.get('encoding')
# content, base64 encoded and decoded
#: Base64-encoded content of the file.
self.content = content.get('content')
#: Decoded content of the file.
self.decoded = self.content
if self.encoding == 'base64':
self.decoded = b64decode(self.content.encode())
# file name, path, and size
#: Name of the content.
self.name = content.get('name')
#: Path to the content.
self.path = content.get('path')
#: Size of the content
self.size = content.get('size')
#: SHA string.
self.sha = content.get('sha')
# should always be 'file'
#: Type of content.
self.type = content.get('type')
def __repr__(self):
return '<Content [{0}]>'.format(self.path)
@property
def git_url(self):
"""API URL for this blob"""
return self.links['git']
@property
def html_url(self):
"""URL pointing to the content on GitHub."""
return self.links['html']
class Download(GitHubCore):
"""The :class:`Download <Download>` object. It represents how GitHub sends
information about files uploaded to the downloads section of a repository.
"""
def __init__(self, download, session=None):
super(Download, self).__init__(download, session)
self._api = download.get('url', '')
#: URL of the download at GitHub.
self.html_url = download.get('html_url')
#: Unique id of the download on GitHub.
self.id = download.get('id')
#: Name of the download.
self.name = download.get('name')
#: Description of the download.
self.description = download.get('description')
#: Size of the download.
self.size = download.get('size')
#: How many times this particular file has been downloaded.
self.download_count = download.get('download_count')
#: Content type of the download.
self.content_type = download.get('content_type')
def __repr__(self):
return '<Download [{0}]>'.format(self.name)
@requires_auth
def delete(self):
"""Delete this download if authenticated"""
return self._boolean(self._delete(self._api), 204, 404)
def saveas(self, path=''):
"""Save this download to the path specified.
:param path: (optional), if no path is specified, it will be
saved in the current directory with the name specified by GitHub.
it can take a file-like object as well
:type path: str
:returns: bool
"""
if not path:
path = self.name
resp = self._get(self.html_url, allow_redirects=True, prefetch=False)
if self._boolean(resp, 200, 404):
if callable(getattr(path, 'write', None)):
file_like = True
fd = path
else:
file_like = False
fd = open(path, 'wb')
for chunk in resp.iter_content():
fd.write(chunk)
if not file_like:
fd.close()
return True
return False # (No coverage)
class Hook(GitHubCore):
"""The :class:`Hook <Hook>` object. This handles the information returned
by GitHub about hooks set on a repository."""
def __init__(self, hook, session=None):
super(Hook, self).__init__(hook, session)
self._api = hook.get('url', '')
#: datetime object representing when this hook was last updated.
self.updated_at = None
if hook.get('updated_at'):
self.updated_at = self._strptime(hook.get('updated_at'))
#: datetime object representing the date the hook was created.
self.created_at = self._strptime(hook.get('created_at'))
#: The name of the hook.
self.name = hook.get('name')
#: Events which trigger the hook.
self.events = hook.get('events')
self._active = hook.get('active')
#: Dictionary containing the configuration for the Hook.
self.config = hook.get('config')
#: Unique id of the hook.
self.id = hook.get('id')
def __repr__(self):
return '<Hook [{0}]>'.format(self.name)
def _update_(self, hook):
self.__init__(hook, self._session)
@requires_auth
def delete(self):
"""Delete this hook.
:returns: bool
"""
return self._boolean(self._delete(self._api), 204, 404)
@requires_auth
def delete_subscription(self):
"""Delete the user's subscription to this repository.
:returns: bool
"""
url = self._build_url('subscription', base_url=self._api)
return self._boolean(self._delete(url), 204, 404)
@requires_auth
def edit(self, name, config, events=[], add_events=[], rm_events=[],
active=True):
"""Edit this hook.
:param name: (required), name of the service being called
:type name: str
:param config: (required), key-value pairs of settings for this
hook
:type config: dict
:param events: (optional), which events should this be triggered
for
:type events: list
:param add_events: (optional), events to be added to the list of
events that this hook triggers for
:type add_events: list
:param rm_events: (optional), events to be remvoed from the list
of events that this hook triggers for
:type rm_events: list
:param active: (optional), should this event be active
:type active: bool
:returns: bool
"""
json = None
if name and config and isinstance(config, dict):
data = {'name': name, 'config': config, 'active': active}
if events:
data['events'] = events
if add_events:
data['add_events'] = add_events
if rm_events:
data['remove_events'] = rm_events
json = self._json(self._patch(self._api, data=dumps(data)), 200)
if json:
self._update_(json)
return True
return False
def is_active(self):
"""Checks whether the hook is marked as active on GitHub or not.
:returns: bool
"""
return self._active
@requires_auth
def test(self):
"""Test this hook
:returns: bool
"""
return self._boolean(self._post(self._api + '/test'), 204, 404)
class RepoTag(GitHubObject):
"""The :class:`RepoTag <RepoTag>` object. This stores the information
representing a tag that was created on a repository.
"""
def __init__(self, tag):
super(RepoTag, self).__init__(tag)
#: Name of the tag.
self.name = tag.get('name')
#: URL for the GitHub generated zipball associated with the tag.
self.zipball_url = tag.get('zipball_url')
#: URL for the GitHub generated tarball associated with the tag.
self.tarball_url = tag.get('tarball_url')
#: Dictionary containing the SHA and URL of the commit.
self.commit = tag.get('commit', {})
def __repr__(self):
return '<Repository Tag [{0}]>'.format(self.name)
class RepoComment(BaseComment):
"""The :class:`RepoComment <RepoComment>` object. This stores the
information about a comment on a file in a repository.
"""
def __init__(self, comment, session=None):
super(RepoComment, self).__init__(comment, session)
#: Commit id on which the comment was made.
self.commit_id = comment.get('commit_id')
#: URL of the comment on GitHub.
self.html_url = comment.get('html_url')
#: The line number where the comment is located.
self.line = comment.get('line')
#: The path to the file where the comment was made.
self.path = comment.get('path')
#: The position in the diff where the comment was made.
self.position = comment.get('position')
#: datetime object representing when the comment was updated.
self.updated_at = comment.get('updated_at')
if self.updated_at:
self.updated_at = self._strptime(self.updated_at)
#: Login of the user who left the comment.
self.user = None
if comment.get('user'):
self.user = User(comment.get('user'), self)
def __repr__(self):
return '<Repository Comment [{0}/{1}]>'.format(self.commit_id[:7],
self.user.login or '') # nopep8
def _update_(self, comment):
super(RepoComment, self)._update_(comment)
self.__init__(comment, self._session)
@requires_auth
def update(self, body, sha, line, path, position):
"""Update this comment.
:param body: (required)
:type body: str
:param sha: (required), sha id of the commit to comment on
:type sha: str
:param line: (required), line number to comment on
:type line: int
:param path: (required), relative path of the file you're
commenting on
:type path: str
:param position: (required), line index in the diff to comment on
:type position: int
:returns: bool
"""
json = None
if body and sha and path and line > 0 and position > 0:
data = dumps({'body': body, 'commit_id': sha, 'line': line,
'path': path, 'position': position})
json = self._json(self._post(self._api, data), 200)
if json:
self._update_(json)
return True
return False
class RepoCommit(BaseCommit):
"""The :class:`RepoCommit <RepoCommit>` object. This represents a commit as
viewed by a :class:`Repository`. This is different from a Commit object
returned from the git data section.
"""
def __init__(self, commit, session=None):
super(RepoCommit, self).__init__(commit, session)
#: :class:`User <github3.users.User>` who authored the commit.
self.author = commit.get('author')
if self.author:
self.author = User(self.author, self._session)
#: :class:`User <github3.users.User>` who committed the commit.
self.committer = commit.get('committer')
if self.committer:
self.committer = User(self.committer, self._session)
#: :class:`Commit <github3.git.Commit>`.
self.commit = commit.get('commit')
if self.commit:
self.commit = Commit(self.commit, self._session)
self.sha = commit.get('sha')
#: The number of additions made in the commit.
self.additions = 0
#: The number of deletions made in the commit.
self.deletions = 0
#: Total number of changes in the files.
self.total = 0
if commit.get('stats'):
self.additions = commit['stats'].get('additions')
self.deletions = commit['stats'].get('deletions')
self.total = commit['stats'].get('total')
#: The files that were modified by this commit.
self.files = commit.get('files', [])
def __repr__(self):
return '<Repository Commit [{0}]>'.format(self.sha[:7])
class Comparison(GitHubObject):
"""The :class:`Comparison <Comparison>` object. This encapsulates the
information returned by GitHub comparing two commit objects in a
repository."""
def __init__(self, compare):
super(Comparison, self).__init__(compare)
self._api = compare.get('api', '')
#: URL to view the comparison at GitHub
self.html_url = compare.get('html_url')
#: Permanent link to this comparison.
self.permalink_url = compare.get('permalink_url')
#: URL to see the diff between the two commits.
self.diff_url = compare.get('diff_url')
#: Patch URL at GitHub for the comparison.
self.patch_url = compare.get('patch_url')
#: :class:`RepoCommit <RepoCommit>` object representing the base of
# comparison.
self.base_commit = RepoCommit(compare.get('base_commit'), None)
#: Behind or ahead.
self.status = compare.get('status')
#: Number of commits ahead by.
self.ahead_by = compare.get('ahead_by')
#: Number of commits behind by.
self.behind_by = compare.get('behind_by')
#: Number of commits difference in the comparison.
self.total_commits = compare.get('total_commits')
#: List of :class:`RepoCommit <RepoCommit>` objects.
self.commits = [RepoCommit(com) for com in compare.get('commits')]
#: List of dicts describing the files modified.
self.files = compare.get('files', [])
def __repr__(self):
return '<Comparison of {0} commits>'.format(self.total_commits)
class Status(GitHubObject):
"""The :class:`Status <Status>` object. This represents information from
the Repo Status API."""
def __init__(self, status):
super(Status, self).__init__(status)
#: datetime object representing the creation of the status object
self.created_at = self._strptime(status.get('created_at'))
#: :class:`User <github3.users.User>` who created the object
self.creator = User(status.get('creator'))
#: Short description of the Status
self.description = status.get('description')
#: GitHub ID for the status object
self.id = status.get('id')
#: State of the status, e.g., 'success', 'pending', 'failed', 'error'
self.state = status.get('state')
#: URL to view more information about the status
self.target_url = status.get('target_url')
#: datetime object representing the last time the status was updated
self.updated_at = None
if status.get('updated_at'):
self.updated_at = self._strptime(status.get('updated_at'))
def __repr__(self):
return '<Status [{s.id}:{s.state}]>'.format(s=self)
Repository.master_branch defaults to None not '' now as per docs
"""
github3.repos
=============
This module contains the classes relating to repositories.
"""
from base64 import b64decode
from json import dumps
from requests import post
from github3.events import Event
from github3.issues import Issue, IssueEvent, Label, Milestone, issue_params
from github3.git import Blob, Commit, Reference, Tag, Tree
from github3.models import GitHubObject, GitHubCore, BaseComment, BaseCommit
from github3.pulls import PullRequest
from github3.users import User, Key
from github3.decorators import requires_auth
from github3.notifications import Subscription, Thread
class Repository(GitHubCore):
"""The :class:`Repository <Repository>` object. It represents how GitHub
sends information about repositories.
"""
def __init__(self, repo, session=None):
super(Repository, self).__init__(repo, session)
#: URL used to clone via HTTPS.
self.clone_url = repo.get('clone_url')
#: ``datetime`` object representing when the Repository was created.
self.created_at = self._strptime(repo.get('created_at'))
#: Description of the repository.
self.description = repo.get('description')
# The number of forks
#: The number of forks made of this repository.
self.forks = repo.get('forks')
# Is this repository a fork?
self._is_fork = repo.get('fork')
# Clone url using git, e.g. git://github.com/sigmavirus24/github3.py
#: Plain git url for an anonymous clone.
self.git_url = repo.get('git_url')
self._has_dl = repo.get('has_downloads')
self._has_issues = repo.get('has_issues')
self._has_wiki = repo.get('has_wiki')
# e.g. https://sigmavirus24.github.com/github3.py
#: URL of the home page for the project.
self.homepage = repo.get('homepage')
# e.g. https://github.com/sigmavirus24/github3.py
#: URL of the project at GitHub.
self.html_url = repo.get('html_url')
#: Unique id of the repository.
self.id = repo.get('id')
#: Language property.
self.language = repo.get('language')
#: Mirror property.
self.mirror_url = repo.get('mirror_url')
# Repository name, e.g. github3.py
#: Name of the repository.
self.name = repo.get('name')
# Number of open issues
#: Number of open issues on the repository.
self.open_issues = repo.get('open_issues')
# Repository owner's name
#: :class:`User <github3.users.User>` object representing the
# repository owner.
self.owner = User(repo.get('owner'), self._session)
# Is this repository private?
self._priv = repo.get('private')
#: ``datetime`` object representing the last time commits were pushed
# to the repository.
self.pushed_at = self._strptime(repo.get('pushed_at'))
#: Size of the repository.
self.size = repo.get('size')
# SSH url e.g. git@github.com/sigmavirus24/github3.py
#: URL to clone the repository via SSH.
self.ssh_url = repo.get('ssh_url')
#: If it exists, url to clone the repository via SVN.
self.svn_url = repo.get('svn_url')
#: ``datetime`` object representing the last time the repository was
# updated.
self.updated_at = self._strptime(repo.get('updated_at'))
self._api = repo.get('url', '')
# The number of watchers
#: Number of users watching the repository.
self.watchers = repo.get('watchers')
#: Parent of this fork, if it exists :class;`Repository`
self.source = repo.get('source', None)
if self.source:
self.source = Repository(self.source, self)
#: Parent of this fork, if it exists :class:`Repository`
self.parent = repo.get('parent', None)
if self.parent:
self.parent = Repository(self.parent, self)
#: default branch for the repository
self.master_branch = repo.get('master_branch')
def __repr__(self):
return '<Repository [{0}/{1}]>'.format(self.owner.login, self.name)
def _update_(self, repo):
self.__init__(repo, self._session)
def _create_pull(self, data):
json = None
if data:
url = self._build_url('pulls', base_url=self._api)
json = self._json(self._post(url, data), 201)
return PullRequest(json, self._session) if json else None
@requires_auth
def add_collaborator(self, login):
"""Add ``login`` as a collaborator to a repository.
:param login: (required), login of the user
:type login: str
:returns: bool -- True if successful, False otherwise
"""
resp = False
if login:
url = self._build_url('collaborators', login, base_url=self._api)
resp = self._boolean(self._put(url), 204, 404)
return resp
def archive(self, format, path='', ref='master'):
"""Get the tarball or zipball archive for this repo at ref.
:param format: (required), accepted values: ('tarball',
'zipball')
:type format: str
:param path: (optional), path where the file should be saved
to, default is the filename provided in the headers and will be
written in the current directory.
it can take a file-like object as well
:type path: str, file
:param ref: (optional)
:type ref: str
:returns: bool -- True if successful, False otherwise
"""
resp = None
written = False
if format in ('tarball', 'zipball'):
url = self._build_url(format, ref, base_url=self._api)
resp = self._get(url, allow_redirects=True, prefetch=False)
fd = None
file_like = False
if resp and resp.ok:
if path:
if callable(getattr(path, 'write', None)):
file_like = True
fd = path
else:
fd = open(path, 'wb')
else:
header = resp.headers['content-disposition']
i = header.find('filename=') + len('filename=')
fd = open(header[i:], 'wb')
for chunk in resp.iter_content():
fd.write(chunk)
if not file_like:
fd.close()
written = True
return written
def blob(self, sha):
"""Get the blob indicated by ``sha``.
:param sha: (required), sha of the blob
:type sha: str
:returns: :class:`Blob <github3.git.Blob>` if successful, otherwise
None
"""
url = self._build_url('git', 'blobs', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Blob(json) if json else None
def branch(self, name):
"""Get the branch ``name`` of this repository.
:param name: (required), branch name
:type name: str
:returns: :class:`Branch <Branch>`
"""
json = None
if name:
url = self._build_url('branches', name, base_url=self._api)
json = self._json(self._get(url), 200)
return Branch(json, self) if json else None
def commit(self, sha):
"""Get a single (repo) commit. See :func:`git_commit` for the Git Data
Commit.
:param sha: (required), sha of the commit
:type sha: str
:returns: :class:`RepoCommit <RepoCommit>` if successful, otherwise
None
"""
url = self._build_url('commits', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return RepoCommit(json, self) if json else None
def commit_comment(self, comment_id):
"""Get a single commit comment.
:param comment_id: (required), id of the comment used by GitHub
:type comment_id: int
:returns: :class:`RepoComment <RepoComment>` if successful, otherwise
None
"""
url = self._build_url('comments', str(comment_id), base_url=self._api)
json = self._json(self._get(url), 200)
return RepoComment(json, self) if json else None
def compare_commits(self, base, head):
"""Compare two commits.
:param base: (required), base for the comparison
:type base: str
:param head: (required), compare this against base
:type head: str
:returns: :class:`Comparison <Comparison>` if successful, else None
"""
url = self._build_url('compare', base + '...' + head,
base_url=self._api)
json = self._json(self._get(url), 200)
return Comparison(json) if json else None
def contents(self, path):
"""Get the contents of the file pointed to by ``path``.
:param path: (required), path to file, e.g.
github3/repo.py
:type path: str
:returns: :class:`Contents <Contents>` if successful, else None
"""
url = self._build_url('contents', path, base_url=self._api)
json = self._json(self._get(url), 200)
return Contents(json) if json else None
@requires_auth
def create_blob(self, content, encoding):
"""Create a blob with ``content``.
:param content: (required), content of the blob
:type content: str
:param encoding: (required), ('base64', 'utf-8')
:type encoding: str
:returns: string of the SHA returned
"""
sha = ''
if encoding in ('base64', 'utf-8') and content:
url = self._build_url('git', 'blobs', base_url=self._api)
data = dumps({'content': content, 'encoding': encoding})
json = self._json(self._post(url, data), 201)
if json:
sha = json.get('sha')
return sha
@requires_auth
def create_comment(self, body, sha, path='', position=1, line=1):
"""Create a comment on a commit.
:param str body: (required), body of the message
:param str sha: (required), commit id
:param str path: (optional), relative path of the file to comment
on
:param str position: (optional), line index in the diff to comment on
:param int line: (optional), line number of the file to comment on,
default: 1
:returns: :class:`RepoComment <RepoComment>` if successful else None
"""
line = int(line)
position = int(position)
json = None
if body and sha and line > 0:
data = dumps({'body': body, 'commit_id': sha, 'line': line,
'path': path, 'position': position})
url = self._build_url('commits', sha, 'comments',
base_url=self._api)
json = self._json(self._post(url, data), 201)
return RepoComment(json, self) if json else None
@requires_auth
def create_commit(self, message, tree, parents, author={}, committer={}):
"""Create a commit on this repository.
:param message: (required), commit message
:type message: str
:param tree: (required), SHA of the tree object this
commit points to
:type tree: str
:param parents: (required), SHAs of the commits that were parents of
this commit. If empty, the commit will be written as the root
commit. Even if there is only one parent, this should be an
array.
:type parents: list
:param author: (optional), if omitted, GitHub will
use the authenticated user's credentials and the current
time. Format: {'name': 'Committer Name', 'email':
'name@example.com', 'date': 'YYYY-MM-DDTHH:MM:SS+HH:00'}
:type author: dict
:param committer: (optional), if ommitted, GitHub will use the author
parameters. Should be the same format as the author parameter.
:type commiter: dict
:returns: :class:`Commit <github3.git.Commit>` if successful, else
None
"""
json = None
if message and tree and isinstance(parents, list):
url = self._build_url('git', 'commits', base_url=self._api)
data = dumps({'message': message, 'tree': tree, 'parents': parents,
'author': author, 'committer': committer})
json = self._json(self._post(url, data), 201)
return Commit(json, self) if json else None
@requires_auth
def create_download(self, name, path, description='',
content_type='text/plain'):
"""Create a new download on this repository.
I do not require you provide the size in bytes because it can be
determined by the operating system.
:param str name: (required), name of the file as it will appear
:param path: (required), path to the file
:type path: str
:param description: (optional), description of the file
:type description: str
:param content_type: (optional), e.g. 'text/plain'
:type content_type: str
:returns: :class:`Download <Download>` if successful, else None
"""
json = None
if name and path:
url = self._build_url('downloads', base_url=self._api)
from os import stat
info = stat(path)
data = dumps({'name': name, 'size': info.st_size,
'description': description,
'content_type': content_type})
json = self._json(self._post(url, data), 201)
if not json:
return None
form = [('key', json.get('path')),
('acl', json.get('acl')),
('success_action_status', '201'),
('Filename', json.get('name')),
('AWSAccessKeyId', json.get('accesskeyid')),
('Policy', json.get('policy')),
('Signature', json.get('signature')),
('Content-Type', json.get('mime_type'))]
file = [('file', open(path, 'rb').read())]
resp = post(json.get('s3_url'), data=form, files=file,
headers={'Accept-Charset': 'utf-8'})
return Download(json, self) if self._boolean(resp, 201, 404) else None
@requires_auth
def create_fork(self, organization=None):
"""Create a fork of this repository.
:param organization: (required), login for organization to create the
fork under
:type organization: str
:returns: :class:`Repository <Repository>` if successful, else None
"""
url = self._build_url('forks', base_url=self._api)
if organization:
resp = self._post(url, params={'org': organization})
else:
resp = self._post(url)
json = self._json(resp, 202)
return Repository(json, self) if json else None
@requires_auth
def create_hook(self, name, config, events=['push'], active=True):
"""Create a hook on this repository.
:param name: (required), name of the hook
:type name: str
:param config: (required), key-value pairs which act as settings
for this hook
:type config: dict
:param events: (optional), events the hook is triggered for
:type events: list
:param active: (optional), whether the hook is actually
triggered
:type active: bool
:returns: :class:`Hook <Hook>` if successful, else None
"""
json = None
if name and config and isinstance(config, dict):
url = self._build_url('hooks', base_url=self._api)
data = dumps({'name': name, 'config': config, 'events': events,
'active': active})
json = self._json(self._post(url, data), 201)
return Hook(json, self) if json else None
@requires_auth
def create_issue(self,
title,
body=None,
assignee=None,
milestone=None,
labels=[]):
"""Creates an issue on this repository.
:param str title: (required), title of the issue
:param str body: (optional), body of the issue
:param str assignee: (optional), login of the user to assign the
issue to
:param int milestone: (optional), number of the milestone to attribute
this issue to (e.g. ``m`` is a Milestone object, ``m.number`` is
what you pass here.)
:param labels: (optional), labels to apply to this
issue
:type labels: list of strings
:returns: :class:`Issue <github3.issues.Issue>` if successful, else
None
"""
issue = dumps({'title': title, 'body': body, 'assignee': assignee,
'milestone': milestone, 'labels': labels})
url = self._build_url('issues', base_url=self._api)
json = self._json(self._post(url, issue), 201)
return Issue(json, self) if json else None
@requires_auth
def create_key(self, title, key):
"""Create a deploy key.
:param title: (required), title of key
:type title: str
:param key: (required), key text
:type key: str
:returns: :class:`Key <github3.users.Key>` if successful, else None
"""
data = dumps({'title': title, 'key': key})
url = self._build_url('keys', base_url=self._api)
json = self._json(self._post(url, data), 201)
return Key(json, self) if json else None
@requires_auth
def create_label(self, name, color):
"""Create a label for this repository.
:param name: (required), name to give to the label
:type name: str
:param color: (required), value of the color to assign to the
label
:type color: str
:returns: :class:`Label <github3.issues.Label>` if successful, else
None
"""
data = dumps({'name': name, 'color': color.strip('#')})
url = self._build_url('labels', base_url=self._api)
json = self._json(self._post(url, data), 201)
return Label(json, self) if json else None
@requires_auth
def create_milestone(self, title, state=None, description=None,
due_on=None):
"""Create a milestone for this repository.
:param title: (required), title of the milestone
:type title: str
:param state: (optional), state of the milestone, accepted
values: ('open', 'closed'), default: 'open'
:type state: str
:param description: (optional), description of the milestone
:type description: str
:param due_on: (optional), ISO 8601 formatted due date
:type due_on: str
:returns: :class:`Milestone <github3.issues.Milestone>` if successful,
else None
"""
url = self._build_url('milestones', base_url=self._api)
if state not in ('open', 'closed'):
state = 'open'
data = dumps({'title': title, 'state': state,
'description': description, 'due_on': due_on})
json = self._json(self._post(url, data), 201)
return Milestone(json, self) if json else None
@requires_auth
def create_pull(self, title, base, head, body=''):
"""Create a pull request using commits from ``head`` and comparing
against ``base``.
:param title: (required)
:type title: str
:param base: (required), e.g., 'username:branch', or a sha
:type base: str
:param head: (required), e.g., 'master', or a sha
:type head: str
:param body: (optional), markdown formatted description
:type body: str
:returns: :class:`PullRequest <github3.pulls.PullRequest>` if
successful, else None
"""
data = dumps({'title': title, 'body': body, 'base': base,
'head': head})
return self._create_pull(data)
@requires_auth
def create_pull_from_issue(self, issue, base, head):
"""Create a pull request from issue #``issue``.
:param issue: (required), issue number
:type issue: int
:param base: (required), e.g., 'username:branch', or a sha
:type base: str
:param head: (required), e.g., 'master', or a sha
:type head: str
:returns: :class:`PullRequest <github3.pulls.PullRequest>` if
successful, else None
"""
data = dumps({'issue': issue, 'base': base, 'head': head})
return self._create_pull(data)
@requires_auth
def create_ref(self, ref, sha):
"""Create a reference in this repository.
:param ref: (required), fully qualified name of the reference,
e.g. ``refs/heads/master``. If it doesn't start with ``refs`` and
contain at least two slashes, GitHub's API will reject it.
:type ref: str
:param sha: (required), SHA1 value to set the reference to
:type sha: str
:returns: :class:`Reference <github3.git.Reference>` if successful
else None
"""
data = dumps({'ref': ref, 'sha': sha})
url = self._build_url('git', 'refs', base_url=self._api)
json = self._json(self._post(url, data), 201)
return Reference(json, self) if json else None
@requires_auth
def create_status(self, sha, state, target_url='', description=''):
"""Create a status object on a commit.
:param str sha: (required), SHA of the commit to create the status on
:param str state: (required), state of the test; only the following
are accepted: 'pending', 'success', 'error', 'failure'
:param str target_url: (optional), URL to associate with this status.
:param str description: (optional), short description of the status
"""
json = {}
if sha and state:
data = dumps({'state': state, 'target_url': target_url,
'description': description})
url = self._build_url('statuses', sha, base_url=self._api)
json = self._json(self._post(url, data=data), 201)
return Status(json) if json else None
@requires_auth
def create_tag(self, tag, message, sha, obj_type, tagger,
lightweight=False):
"""Create a tag in this repository.
:param tag: (required), name of the tag
:type tag: str
:param message: (required), tag message
:type message: str
:param sha: (required), SHA of the git object this is tagging
:type sha: str
:param obj_type: (required), type of object being tagged, e.g.,
'commit', 'tree', 'blob'
:type obj_type: str
:param tagger: (required), containing the name, email of the
tagger and the date it was tagged
:type tagger: dict
:param lightweight: (optional), if False, create an annotated
tag, otherwise create a lightweight tag (a Reference).
:type lightweight: bool
:returns: If lightweight == False: :class:`Tag <github3.git.Tag>` if
successful, else None. If lightweight == True: :class:`Reference
<Reference>`
"""
if lightweight and tag and sha:
return self.create_ref('refs/tags/' + tag, sha)
json = None
if tag and message and sha and obj_type and len(tagger) == 3:
data = dumps({'tag': tag, 'message': message, 'object': sha,
'type': obj_type, 'tagger': tagger})
url = self._build_url('git', 'tags', base_url=self._api)
json = self._json(self._post(url, data), 201)
if json:
self.create_ref('refs/tags/' + tag, sha)
return Tag(json) if json else None
@requires_auth
def create_tree(self, tree, base_tree=''):
"""Create a tree on this repository.
:param tree: (required), specifies the tree structure.
Format: [{'path': 'path/file', 'mode':
'filemode', 'type': 'blob or tree', 'sha': '44bfc6d...'}]
:type tree: list of dicts
:param base_tree: (optional), SHA1 of the tree you want
to update with new data
:type base_tree: str
:returns: :class:`Tree <github3.git.Tree>` if successful, else None
"""
json = None
if tree and isinstance(tree, list):
data = dumps({'tree': tree, 'base_tree': base_tree})
url = self._build_url('git', 'trees', base_url=self._api)
json = self._json(self._post(url, data), 201)
return Tree(json) if json else None
@requires_auth
def delete(self):
"""Delete this repository.
:returns: bool -- True if successful, False otherwise
"""
return self._boolean(self._delete(self._api), 204, 404)
@requires_auth
def delete_key(self, key_id):
"""Delete the key with the specified id from your deploy keys list.
:returns: bool -- True if successful, False otherwise
"""
if int(key_id) <= 0:
return False
url = self._build_url('keys', str(key_id), base_url=self._api)
return self._boolean(self._delete(url), 204, 404)
def download(self, id_num):
"""Get a single download object by its id.
:param id_num: (required), id of the download
:type id_num: int
:returns: :class:`Download <Download>` if successful, else None
"""
json = None
if int(id_num) > 0:
url = self._build_url('downloads', str(id_num),
base_url=self._api)
json = self._json(self._get(url), 200)
return Download(json, self) if json else None
@requires_auth
def edit(self,
name,
description='',
homepage='',
private=False,
has_issues=True,
has_wiki=True,
has_downloads=True,
default_branch=''):
"""Edit this repository.
:param str name: (required), name of the repository
:param str description: (optional)
:param str homepage: (optional)
:param bool private: (optional), If ``True``, create a
private repository. API default: ``False``
:param bool has_issues: (optional), If ``True``, enable
issues for this repository. API default: ``True``
:param bool has_wiki: (optional), If ``True``, enable the
wiki for this repository. API default: ``True``
:param bool has_downloads: (optional), If ``True``, enable
downloads for this repository. API default: ``True``
:param str default_branch: (optional), Update the default branch for
this repository
:returns: bool -- True if successful, False otherwise
"""
data = dumps({'name': name, 'description': description,
'homepage': homepage, 'private': private,
'has_issues': has_issues, 'has_wiki': has_wiki,
'has_downloads': has_downloads,
'default_branch': default_branch})
json = self._json(self._patch(self._api, data=data), 200)
if json:
self._update_(json)
return True
return False # (No coverage)
def is_collaborator(self, login):
"""Check to see if ``login`` is a collaborator on this repository.
:param login: (required), login for the user
:type login: str
:returns: bool -- True if successful, False otherwise
"""
if login:
url = self._build_url('collaborators', login, base_url=self._api)
return self._boolean(self._get(url), 204, 404)
return False
def is_fork(self):
"""Checks if this repository is a fork.
:returns: bool
"""
return self._is_fork
def is_private(self):
"""Checks if this repository is private.
:returns: bool
"""
return self._priv
def git_commit(self, sha):
"""Get a single (git) commit.
:param sha: (required), sha of the commit
:type sha: str
:returns: :class:`Commit <github3.git.Commit>` if successful,
otherwise None
"""
url = self._build_url('git', 'commits', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Commit(json, self) if json else None
def has_downloads(self):
"""Checks if this repository has downloads.
:returns: bool
"""
return self._has_dl
def has_issues(self):
"""Checks if this repository has issues enabled.
:returns: bool
"""
return self._has_issues
def has_wiki(self):
"""Checks if this repository has a wiki.
:returns: bool
"""
return self._has_wiki
@requires_auth
def hook(self, id_num):
"""Get a single hook.
:param id_num: (required), id of the hook
:type id_num: int
:returns: :class:`Hook <Hook>` if successful, else None
"""
json = None
if int(id_num) > 0:
url = self._build_url('hooks', str(id_num), base_url=self._api)
json = self._json(self._get(url), 200)
return Hook(json, self) if json else None
def is_assignee(self, login):
"""Check if the user is a possible assignee for an issue on this
repository.
:returns: :class:`bool`
"""
url = self._build_url('assignees', login, base_url=self._api)
return self._boolean(self._get(url), 204, 404)
def issue(self, number):
"""Get the issue specified by ``number``.
:param number: (required), number of the issue on this repository
:type number: int
:returns: :class:`Issue <github3.issues.Issue>` if successful, else
None
"""
json = None
if int(number) > 0:
url = self._build_url('issues', str(number), base_url=self._api)
json = self._json(self._get(url), 200)
return Issue(json, self) if json else None
@requires_auth
def key(self, id_num):
"""Get the specified deploy key.
:param id_num: (required), id of the key
:type id_num: int
:returns: :class:`Key <Key>` if successful, else None
"""
json = None
if int(id_num) > 0:
url = self._build_url('keys', str(id_num), base_url=self._api)
json = self._json(self._get(url), 200)
return Key(json, self) if json else None
def label(self, name):
"""Get the label specified by ``name``
:param name: (required), name of the label
:type name: str
:returns: :class:`Label <github3.issues.Label>` if successful, else
None
"""
json = None
if name:
url = self._build_url('labels', name, base_url=self._api)
json = self._json(self._get(url), 200)
return Label(json, self) if json else None
def iter_assignees(self, number=-1):
"""Iterate over all available assignees to which an issue may be
assigned.
:param int number: (optional), number of assignees to return. Default:
-1 returns all available assignees
:returns: list of :class:`User <github3.users.User>`\ s
"""
url = self._build_url('assignees', base_url=self._api)
return self._iter(int(number), url, User)
def list_assignees(self):
"""List all available assignees to which an issue may be assigned.
:returns: list of :class:`User <github3.users.User>`\ s
"""
url = self._build_url('assignees', base_url=self._api)
json = self._json(self._get(url), 200)
return [User(u, self) for u in json]
def iter_branches(self, number=-1):
"""Iterate over the branches in this repository.
:param int number: (optional), number of branches to return. Default:
-1 returns all branches
:returns: list of :class:`Branch <Branch>`\ es
"""
# Paginate?
url = self._build_url('branches', base_url=self._api)
return self._iter(int(number), url, Branch)
def list_branches(self):
"""List the branches in this repository.
:returns: list of :class:`Branch <Branch>`\ es
"""
# Paginate?
url = self._build_url('branches', base_url=self._api)
json = self._json(self._get(url), 200)
return [Branch(b, self) for b in json]
def iter_comments(self, number=-1):
"""Iterate over comments on all commits in the repository.
:param int number: (optional), number of comments to return. Default:
-1 returns all comments
:returns: list of :class:`RepoComment <RepoComment>`\ s
"""
# Paginate?
url = self._build_url('comments', base_url=self._api)
return self._iter(int(number), url, RepoComment)
def list_comments(self):
"""List comments on all commits in the repository.
:returns: list of :class:`RepoComment <RepoComment>`\ s
"""
# Paginate?
url = self._build_url('comments', base_url=self._api)
json = self._json(self._get(url), 200)
return [RepoComment(comment, self) for comment in json]
def iter_comments_on_commit(self, sha, number=1):
"""Iterate over comments for a single commit.
:param sha: (required), sha of the commit to list comments on
:type sha: str
:param int number: (optional), number of comments to return. Default:
-1 returns all comments
:returns: list of :class:`RepoComment <RepoComment>`\ s
"""
url = self._build_url('commits', sha, 'comments', base_url=self._api)
return self._iter(int(number), url, RepoComment)
def list_comments_on_commit(self, sha):
"""List comments for a single commit.
:param sha: (required), sha of the commit to list comments on
:type sha: str
:returns: list of :class:`RepoComment <RepoComment>`\ s
"""
url = self._build_url('commits', sha, 'comments', base_url=self._api)
json = self._json(self._get(url), 200)
return [RepoComment(comm, self) for comm in json]
def iter_commits(self, sha='', path='', author='', number=-1):
"""Iterate over commits in this repository.
:param str sha: (optional), sha or branch to start listing commits
from
:param str path: (optional), commits containing this path will be
listed
:param str author: (optional), GitHub login, real name, or email to
filter commits by (using commit author)
:param int number: (optional), number of comments to return. Default:
-1 returns all comments
:returns: list of :class:`RepoCommit <RepoCommit>`\ s
"""
params = {}
if sha:
params['sha'] = sha
if path:
params['path'] = path
if author:
params['author'] = author
url = self._build_url('commits', base_url=self._api)
return self._iter(int(number), url, RepoCommit, params=params)
def list_commits(self, sha='', path='', author=''):
"""List commits in this repository.
:param str sha: (optional), sha or branch to start listing commits
from
:param str path: (optional), commits containing this path will be
listed
:param str author: (optional), GitHub login, real name, or email to
filter commits by (using commit author)
:returns: list of :class:`RepoCommit <RepoCommit>`\ s
"""
params = {}
if sha:
params['sha'] = sha
if path:
params['path'] = path
if author:
params['author'] = author
url = self._build_url('commits', base_url=self._api)
json = self._json(self._get(url, params=params), 200)
return [RepoCommit(commit, self) for commit in json]
def iter_contributors(self, anon=False, number=-1):
"""Iterate over the contributors to this repository.
:param anon: (optional), True lists anonymous contributors as well
:type anon: bool
:param number: (optional), number of contributors to return. Default:
-1 returns all contributors
:type number: int
:returns: list of :class:`User <github3.users.User>`\ s
"""
# Paginate
url = self._build_url('contributors', base_url=self._api)
params = {}
if anon:
params = {'anon': anon}
return self._iter(int(number), url, User, params=params)
def list_contributors(self, anon=False):
"""List the contributors to this repository.
:param anon: (optional), True lists anonymous contributors as well
:type anon: bool
:returns: list of :class:`User <github3.users.User>`\ s
"""
# Paginate
url = self._build_url('contributors', base_url=self._api)
params = {}
if anon:
params = {'anon': anon}
json = self._json(self._get(url, params=params), 200)
return [User(c, self) for c in json]
def iter_downloads(self, number=-1):
"""Iterate over available downloads for this repository.
:param int number: (optional), number of downloads to return. Default:
-1 returns all available downloads
:returns: list of :class:`Download <Download>`\ s
"""
url = self._build_url('downloads', base_url=self._api)
return self._iter(int(number), url, Download)
def list_downloads(self):
"""List available downloads for this repository.
:returns: list of :class:`Download <Download>`\ s
"""
url = self._build_url('downloads', base_url=self._api)
json = self._json(self._get(url), 200)
return [Download(dl, self) for dl in json]
def iter_events(self, number=-1):
"""Iterate over events on this repository.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:returns: list of :class:`Event <github3.events.Event>`\ s
"""
url = self._build_url('events', base_url=self._api)
return self._iter(int(number), url, Event)
def list_events(self):
"""List events on this repository.
:returns: list of :class:`Event <github3.events.Event>`\ s
"""
url = self._build_url('events', base_url=self._api)
json = self._json(self._get(url), 200)
return [Event(e, self) for e in json]
def iter_forks(self, sort='', number=-1):
"""Iterate over forks of this repository.
:param sort: (optional), accepted values:
('newest', 'oldest', 'watchers'), API default: 'newest'
:type sort: str
:param number: (optional), number of forks to return. Default: -1
returns all forks
:type number: int
:returns: list of :class:`Repository <Repository>`
"""
url = self._build_url('forks', base_url=self._api)
params = {}
if sort in ('newest', 'oldest', 'watchers'):
params = {'sort': sort}
return self._iter(int(number), url, Repository, params=params)
def list_forks(self, sort=''):
"""List forks of this repository.
:param sort: (optional), accepted values:
('newest', 'oldest', 'watchers'), API default: 'newest'
:type sort: str
:returns: list of :class:`Repository <Repository>`
"""
url = self._build_url('forks', base_url=self._api)
params = {}
if sort in ('newest', 'oldest', 'watchers'):
params = {'sort': sort}
json = self._json(self._get(url, params=params), 200)
return [Repository(r, self) for r in json]
@requires_auth
def iter_hooks(self, number=-1):
"""Iterate over hooks registered on this repository.
:param int number: (optional), number of hoks to return. Default: -1
returns all hooks
:returns: list of :class:`Hook <Hook>`\ s
"""
url = self._build_url('hooks', base_url=self._api)
return self._iter(int(number), url, Hook)
@requires_auth
def list_hooks(self):
"""List hooks registered on this repository.
:returns: list of :class:`Hook <Hook>`\ s
"""
url = self._build_url('hooks', base_url=self._api)
json = self._json(self._get(url), 200)
return [Hook(h, self) for h in json]
def iter_issues(self,
milestone=None,
state=None,
assignee=None,
mentioned=None,
labels=None,
sort=None,
direction=None,
since=None,
number=-1):
"""Iterate over issues on this repo based upon parameters passed.
:param milestone: (optional), 'none', or '*'
:type milestone: int
:param state: (optional), accepted values: ('open', 'closed')
:type state: str
:param assignee: (optional), 'none', '*', or login name
:type assignee: str
:param mentioned: (optional), user's login name
:type mentioned: str
:param labels: (optional), comma-separated list of labels, e.g.
'bug,ui,@high' :param sort: accepted values:
('created', 'updated', 'comments', 'created')
:type labels: str
:param direction: (optional), accepted values: ('asc', 'desc')
:type direction: str
:param since: (optional), ISO 8601 format: YYYY-MM-DDTHH:MM:SSZ
:type since: str
:param number: (optional), Number of issues to return.
By default all issues are returned
:type since: int
:returns: list of :class:`Issue <github3.issues.Issue>`\ s
"""
url = self._build_url('issues', base_url=self._api)
params = {}
if milestone in ('*', 'none') or isinstance(milestone, int):
params['milestone'] = milestone
if assignee:
params['assignee'] = assignee
if mentioned:
params['mentioned'] = mentioned
params.update(issue_params(None, state, labels, sort, direction,
since)) # nopep8
return self._iter(int(number), url, Issue, params=params)
def list_issues(self,
milestone=None,
state=None,
assignee=None,
mentioned=None,
labels=None,
sort=None,
direction=None,
since=None):
"""List issues on this repo based upon parameters passed.
:param milestone: (optional), 'none', or '*'
:type milestone: int
:param state: (optional), accepted values: ('open', 'closed')
:type state: str
:param assignee: (optional), 'none', '*', or login name
:type assignee: str
:param mentioned: (optional), user's login name
:type mentioned: str
:param labels: (optional), comma-separated list of labels, e.g.
'bug,ui,@high' :param sort: accepted values:
('created', 'updated', 'comments', 'created')
:type labels: str
:param direction: (optional), accepted values: ('open', 'closed')
:type direction: str
:param since: (optional), ISO 8601 format: YYYY-MM-DDTHH:MM:SSZ
:type since: str
:returns: list of :class:`Issue <github3.issues.Issue>`\ s
"""
# Paginate
url = self._build_url('issues', base_url=self._api)
params = {}
if milestone in ('*', 'none') or isinstance(milestone, int):
params['milestone'] = str(milestone).lower()
# str(None) = 'None' which is invalid, so .lower() it to make it
# work.
if assignee:
params['assignee'] = assignee
if mentioned:
params['mentioned'] = mentioned
params.update(issue_params(None, state, labels, sort, direction,
since)) # nopep8
request = self._get(url, params=params)
json = self._json(request, 200)
return [Issue(i, self) for i in json]
def iter_issue_events(self, number=-1):
"""Iterates over issue events on this repository.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:returns: generator of
:class:`IssueEvent <github3.issues.IssueEvent>`\ s
"""
url = self._build_url('issues', 'events', base_url=self._api)
return self._iter(int(number), url, IssueEvent)
def list_issue_events(self):
"""List issue events on this repository.
:returns: list of :class:`IssueEvent <github3.issues.IssueEvent>`\ s
"""
# Paginate
url = self._build_url('issues', 'events', base_url=self._api)
json = self._json(self._get(url), 200)
return [IssueEvent(e, self) for e in json]
@requires_auth
def iter_keys(self, number=-1):
"""Iterates over deploy keys on this repository.
:param int number: (optional), number of keys to return. Default: -1
returns all available keys
:returns: generator of :class:`Key <github3.users.Key>`\ s
"""
url = self._build_url('keys', base_url=self._api)
return self._iter(int(number), url, Key)
@requires_auth
def list_keys(self):
"""List deploy keys on this repository.
:returns: list of :class:`Key <github3.users.Key>`\ s
"""
# Paginate?
url = self._build_url('keys', base_url=self._api)
json = self._json(self._get(url), 200)
return [Key(k, self) for k in json]
def iter_labels(self, number=-1):
"""Iterates over labels on this repository.
:param int number: (optional), number of labels to return. Default: -1
returns all available labels
:returns: generator of :class:`Label <github3.issues.Label>`\ s
"""
url = self._build_url('labels', base_url=self._api)
return self._iter(int(number), url, Label)
def list_labels(self):
"""List labels on this repository.
:returns: list of :class:`Label <github3.issues.Label>`\ s
"""
url = self._build_url('labels', base_url=self._api)
json = self._json(self._get(url), 200)
return [Label(label, self) for label in json]
def iter_languages(self, number=-1):
"""Iterate over the programming languages used in the repository.
:param int number: (optional), number of languages to return. Default:
-1 returns all used languages
:returns: list of tuples
"""
url = self._build_url('languages', base_url=self._api)
return self._iter(int(number), url, tuple)
def list_languages(self):
"""List the programming languages used in the repository.
:returns: list of tuples
"""
url = self._build_url('languages', base_url=self._api)
json = self._json(self._get(url), 200)
return [(k, v) for k, v in json.items()]
def iter_milestones(self, state=None, sort=None, direction=None,
number=-1):
"""Iterates over the milestones on this repository.
:param str state: (optional), state of the milestones, accepted
values: ('open', 'closed')
:param str sort: (optional), how to sort the milestones, accepted
values: ('due_date', 'completeness')
:param str direction: (optional), direction to sort the milestones,
accepted values: ('asc', 'desc')
:param int number: (optional), number of milestones to return.
Default: -1 returns all milestones
:returns: generator of
:class:`Milestone <github3.issues.Milestone>`\ s
"""
url = self._build_url('milestones', base_url=self._api)
accepted = {'state': ('open', 'closed'),
'sort': ('due_date', 'completeness'),
'direction': ('asc', 'desc')}
params = {'state': state, 'sort': sort, 'direction': direction}
for (k, v) in list(params.items()):
if not (v and (v in accepted[k])): # e.g., '' or None
del params[k]
if not params:
params = None
return self._iter(int(number), url, Milestone, params)
def list_milestones(self, state=None, sort=None, direction=None):
"""List the milestones on this repository.
:param state: (optional), state of the milestones, accepted
values: ('open', 'closed')
:type state: str
:param sort: (optional), how to sort the milestones, accepted
values: ('due_date', 'completeness')
:type sort: str
:param direction: (optional), direction to sort the milestones,
accepted values: ('asc', 'desc')
:type direction: str
:returns: list of :class:`Milestone <github3.issues.Milestone>`\ s
"""
# Paginate?
url = self._build_url('milestones', base_url=self._api)
params = {}
if state in ('open', 'closed'):
params['state'] = state
if sort in ('due_date', 'completeness'):
params['sort'] = sort
if direction in ('asc', 'desc'):
params['direction'] = direction
json = self._json(self._get(url, params=params), 200)
return [Milestone(mile, self) for mile in json]
def iter_network_events(self, number=-1):
"""Iterates over events on a network of repositories.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:returns: generator of :class:`Event <github3.events.Event>`\ s
"""
base = self._api.replace('repos', 'networks', 1)
url = self._build_url('events', base_url=base)
return self._iter(int(number), url, Event)
def list_network_events(self):
"""Lists events on a network of repositories.
:returns: list of :class:`Event <github3.events.Event>`\ s
"""
# Paginate
base = self._api.replace('repos', 'networks', 1)
url = self._build_url('events', base_url=base)
json = self._json(self._get(url), 200)
return [Event(e, self) for e in json]
def iter_notifications(self, all=False, participating=False, since='',
number=-1):
"""Iterates over the notifications for this repository.
:param bool all: (optional), show all notifications, including ones
marked as read
:param bool participating: (optional), show only the notifications the
user is participating in directly
:param str since: (optional), filters out any notifications updated
before the given time. The time should be passed in as UTC in the
ISO 8601 format: ``YYYY-MM-DDTHH:MM:SSZ``. Example:
"2012-10-09T23:39:01Z".
:returns: generator of :class:`Thread <github3.notifications.Thread>`
"""
url = self._build_url('notifications', base_url=self._api)
params = {'all': all, 'participating': participating, 'since': since}
for (k, v) in list(params.items()):
if not v:
del params[k]
return self._iter(int(number), url, Thread, params=params)
def iter_pulls(self, state=None, number=-1):
"""List pull requests on repository.
:param str state: (optional), accepted values: ('open', 'closed')
:param int number: (optional), number of pulls to return. Default: -1
returns all available pull requests
:returns: generator of
:class:`PullRequest <github3.pulls.PullRequest>`\ s
"""
url = self._build_url('pulls', base_url=self._api)
if state in ('open', 'closed'):
url = '{0}?{1}={2}'.format(url, 'state', state)
return self._iter(int(number), url, PullRequest)
def list_pulls(self, state=None):
"""List pull requests on repository.
:param state: (optional), accepted values: ('open', 'closed')
:type state: str
:returns: list of :class:`PullRequest <github3.pulls.PullRequest>`\ s
"""
# Paginate
url = self._build_url('pulls', base_url=self._api)
params = {}
if state in ('open', 'closed'):
params['state'] = state
json = self._json(self._get(url, params=params), 200)
return [PullRequest(pull, self) for pull in json]
def iter_refs(self, subspace='', number=-1):
"""Iterates over references for this repository.
:param str subspace: (optional), e.g. 'tags', 'stashes', 'notes'
:param int number: (optional), number of refs to return. Default: -1
returns all available refs
:returns: generator of :class:`Reference <github3.git.Reference>`\ s
"""
if subspace:
args = ('git', 'refs', subspace)
else:
args = ('git', 'refs')
url = self._build_url(*args, base_url=self._api)
return self._iter(int(number), url, Reference)
def list_refs(self, subspace=''):
"""List references for this repository.
:param subspace: (optional), e.g. 'tags', 'stashes', 'notes'
:type subspace: str
:returns: list of :class:`Reference <github3.git.Reference>`\ s
"""
# Paginate?
if subspace:
args = ('git', 'refs', subspace)
else:
args = ('git', 'refs')
url = self._build_url(*args, base_url=self._api)
json = self._json(self._get(url), 200)
return [Reference(r, self) for r in json]
def iter_stargazers(self, number=-1):
"""List users who have starred this repository.
:returns: generator of :class:`User <github3.users.User>`\ s
"""
url = self._build_url('stargazers', base_url=self._api)
return self._iter(int(number), url, User)
def list_stargazers(self):
"""List users who have starred this repository.
:returns: list of :class:`User <github3.users.User>`\ s
"""
url = self._build_url('stargazers', base_url=self._api)
json = self._json(self._get(url), 200)
return [User(u, self) for u in json]
def iter_subscribers(self, number=-1):
"""Iterates over users subscribed to this repository.
:param int number: (optional), number of subscribers to return.
Default: -1 returns all subscribers available
:returns: generator of :class:`User <github3.users.User>`
"""
url = self._build_url('subscribers', base_url=self._api)
return self._iter(int(number), url, User)
def list_subscribers(self):
"""List users subscribed to this repository.
:returns: list of :class:`User <github3.users.User>`
"""
url = self._build_url('subscribers', base_url=self._api)
json = self._json(self._get(url), 200)
return [User(u, self) for u in json]
def iter_statuses(self, sha, number=-1):
"""Iterates over the statuses for a specific SHA.
:param str sha: SHA of the commit to list the statuses of
:param int number: (optional), return up to number statuses. Default:
-1 returns all available statuses.
:returns: generator of :class:`Status <Status>`
"""
url = ''
if sha:
url = self._build_url('statuses', sha, base_url=self._api)
return self._iter(int(number), url, Status)
def list_statuses(self, sha):
"""List the statuses for a specific SHA.
:param str sha: SHA of the commit to list the statuses of
:returns: list of :class:`Status <Status>`
"""
json = []
if sha:
url = self._build_url('statuses', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return [Status(s) for s in json]
def iter_tags(self, number=-1):
"""Iterates over tags on this repository.
:param int number: (optional), return up to at most number tags.
Default: -1 returns all available tags.
:returns: generator of :class:`RepoTag <RepoTag>`\ s
"""
url = self._build_url('tags', base_url=self._api)
return self._iter(int(number), url, RepoTag)
def list_tags(self):
"""List tags on this repository.
:returns: list of :class:`RepoTag <RepoTag>`\ s
"""
url = self._build_url('tags', base_url=self._api)
json = self._json(self._get(url), 200)
return [RepoTag(tag) for tag in json]
@requires_auth
def iter_teams(self, number=-1):
"""Iterates over teams with access to this repository.
:param int number: (optional), return up to number Teams. Default: -1
returns all Teams.
:returns: generator of :class:`Team <github3.orgs.Team>`\ s
"""
from github3.orgs import Team
url = self._build_url('teams', base_url=self._api)
return self._iter(int(number), url, Team)
@requires_auth
def list_teams(self):
"""List teams with access to this repository.
:returns: list of :class:`Team <github3.orgs.Team>`\ s
"""
from github3.orgs import Team
url = self._build_url('teams', base_url=self._api)
return [Team(t, self) for t in self._json(self._get(url), 200)]
def list_watchers(self):
"""DEPRECATED: Use list_stargazers() instead."""
raise DeprecationWarning('Use list_stargazers() instead.')
def mark_notifications(self, last_read=''):
"""Mark all notifications in this repository as read.
:param str last_read: (optional), Describes the last point that
notifications were checked. Anything updated since this time will
not be updated. Default: Now. Expected in ISO 8601 format:
``YYYY-MM-DDTHH:MM:SSZ``. Example: "2012-10-09T23:39:01Z".
:returns: bool
"""
url = self._build_url('notifications', base_url=self._api)
mark = {'read': True}
if last_read:
mark['last_read_at'] = last_read
return self._boolean(self._put(url, data=dumps(mark)),
205, 404)
def merge(self, base, head, message=''):
"""Perform a merge from ``head`` into ``base``.
:param str base: (required), where you're merging into
:param str head: (required), where you're merging from
:param str message: (optional), message to be used for the commit
:returns: :class:`RepoCommit <RepoCommit>`
"""
url = self._build_url('merges', base_url=self._api)
data = dumps({'base': base, 'head': head, 'commit_message': message})
json = self._json(self._post(url, data=data), 201)
return RepoCommit(json, self) if json else None
def milestone(self, number):
"""Get the milestone indicated by ``number``.
:param number: (required), unique id number of the milestone
:type number: int
:returns: :class:`Milestone <github3.issues.Milestone>`
"""
url = self._build_url('milestones', str(number), base_url=self._api)
json = self._json(self._get(url), 200)
return Milestone(json, self) if json else None
@requires_auth
def pubsubhubbub(self, mode, topic, callback, secret=''):
"""Create/update a pubsubhubbub hook.
:param mode: (required), accepted values: ('subscribe', 'unsubscribe')
:type mode: str
:param topic: (required), form:
https://github.com/:user/:repo/events/:event
:type topic: str
:param callback: (required), the URI that receives the updates
:type callback: str
:param secret: (optional), shared secret key that generates a
SHA1 HMAC of the payload content.
:type secret: str
:returns: bool
"""
from re import match
m = match('https://github\.com/\w+/[\w\._-]+/events/\w+', topic)
status = False
if mode and topic and callback and m:
data = [('hub.mode', mode), ('hub.topic', topic),
('hub.callback', callback), ('hub.secret', secret)]
url = self._build_url('hub')
status = self._boolean(self._post(url, data=data), 204, 404)
return status
def pull_request(self, number):
"""Get the pull request indicated by ``number``.
:param number: (required), number of the pull request.
:type number: int
:returns: :class:`PullRequest <github3.pulls.PullRequest>`
"""
json = None
if int(number) > 0:
url = self._build_url('pulls', str(number), base_url=self._api)
json = self._json(self._get(url), 200)
return PullRequest(json, self) if json else None
def readme(self):
"""Get the README for this repository.
:returns: :class:`Contents <Contents>`
"""
url = self._build_url('readme', base_url=self._api)
json = self._json(self._get(url), 200)
return Contents(json) if json else None
def ref(self, ref):
"""Get a reference pointed to by ``ref``.
The most common will be branches and tags. For a branch, you must
specify 'heads/branchname' and for a tag, 'tags/tagname'. Essentially,
the system should return any reference you provide it in the namespace,
including notes and stashes (provided they exist on the server).
:param ref: (required)
:type ref: str
:returns: :class:`Reference <github3.git.Reference>`
"""
url = self._build_url('git', 'refs', ref, base_url=self._api)
json = self._json(self._get(url), 200)
return Reference(json, self) if json else None
@requires_auth
def remove_collaborator(self, login):
"""Remove collaborator ``login`` from the repository.
:param login: (required), login name of the collaborator
:type login: str
:returns: bool
"""
resp = False
if login:
url = self._build_url('collaborators', login, base_url=self._api)
resp = self._boolean(self._delete(url), 204, 404)
return resp
@requires_auth
def set_subscription(self, subscribed, ignored):
"""Set the user's subscription for this repository
:param bool subscribed: (required), determines if notifications should
be received from this repository.
:param bool ignored: (required), determines if notifications should be
ignored from this repository.
:returns: :class;`Subscription <Subscription>`
"""
sub = dumps({'subscribed': subscribed, 'ignored': ignored})
url = self._build_url('subscription', base_url=self._api)
json = self._json(self._put(url, data=sub), 200)
return Subscription(json, self) if json else None
@requires_auth
def subscription(self):
"""Return subscription for this Repository.
:returns: :class:`Subscription <github3.notifications.Subscription>`
"""
url = self._build_url('subscription', base_url=self._api)
json = self._json(self._get(url), 200)
return Subscription(json, self) if json else None
def tag(self, sha):
"""Get an annotated tag.
http://learn.github.com/p/tagging.html
:param sha: (required), sha of the object for this tag
:type sha: str
:returns: :class:`Tag <github3.git.Tag>`
"""
url = self._build_url('git', 'tags', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Tag(json) if json else None
def tree(self, sha):
"""Get a tree.
:param sha: (required), sha of the object for this tree
:type sha: str
:returns: :class:`Tree <github3.git.Tree>`
"""
url = self._build_url('git', 'trees', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Tree(json, self) if json else None
def update_label(self, name, color, new_name=''):
"""Update the label ``name``.
:param name: (required), name of the label
:type name: str
:param color: (required), color code
:type color: str
:param new_name: (optional), new name of the label
:type new_name: str
:returns: bool
"""
label = self.label(name)
resp = False
if label:
upd = label.update
resp = upd(new_name, color) if new_name else upd(name, color)
return resp
class Branch(GitHubCore):
"""The :class:`Branch <Branch>` object. It holds the information GitHub
returns about a branch on a :class:`Repository <Repository>`.
"""
def __init__(self, branch, session=None):
super(Branch, self).__init__(branch, session)
#: Name of the branch.
self.name = branch.get('name')
#: Returns the branch's :class:`RepoCommit <RepoCommit>` or
# ``None``.
self.commit = branch.get('commit')
if self.commit:
self.commit = RepoCommit(self.commit, self._session)
#: Returns '_links' attribute.
self.links = branch.get('_links', {})
def __repr__(self):
return '<Repository Branch [{0}]>'.format(self.name)
class Contents(GitHubObject):
"""The :class:`Contents <Contents>` object. It holds the information
concerning any content in a repository requested via the API.
"""
def __init__(self, content):
super(Contents, self).__init__(content)
# links
self._api = content['_links'].get('self', '')
#: Dictionary of links
self.links = content.get('_links')
# should always be 'base64'
#: Returns encoding used on the content.
self.encoding = content.get('encoding')
# content, base64 encoded and decoded
#: Base64-encoded content of the file.
self.content = content.get('content')
#: Decoded content of the file.
self.decoded = self.content
if self.encoding == 'base64':
self.decoded = b64decode(self.content.encode())
# file name, path, and size
#: Name of the content.
self.name = content.get('name')
#: Path to the content.
self.path = content.get('path')
#: Size of the content
self.size = content.get('size')
#: SHA string.
self.sha = content.get('sha')
# should always be 'file'
#: Type of content.
self.type = content.get('type')
def __repr__(self):
return '<Content [{0}]>'.format(self.path)
@property
def git_url(self):
"""API URL for this blob"""
return self.links['git']
@property
def html_url(self):
"""URL pointing to the content on GitHub."""
return self.links['html']
class Download(GitHubCore):
"""The :class:`Download <Download>` object. It represents how GitHub sends
information about files uploaded to the downloads section of a repository.
"""
def __init__(self, download, session=None):
super(Download, self).__init__(download, session)
self._api = download.get('url', '')
#: URL of the download at GitHub.
self.html_url = download.get('html_url')
#: Unique id of the download on GitHub.
self.id = download.get('id')
#: Name of the download.
self.name = download.get('name')
#: Description of the download.
self.description = download.get('description')
#: Size of the download.
self.size = download.get('size')
#: How many times this particular file has been downloaded.
self.download_count = download.get('download_count')
#: Content type of the download.
self.content_type = download.get('content_type')
def __repr__(self):
return '<Download [{0}]>'.format(self.name)
@requires_auth
def delete(self):
"""Delete this download if authenticated"""
return self._boolean(self._delete(self._api), 204, 404)
def saveas(self, path=''):
"""Save this download to the path specified.
:param path: (optional), if no path is specified, it will be
saved in the current directory with the name specified by GitHub.
it can take a file-like object as well
:type path: str
:returns: bool
"""
if not path:
path = self.name
resp = self._get(self.html_url, allow_redirects=True, prefetch=False)
if self._boolean(resp, 200, 404):
if callable(getattr(path, 'write', None)):
file_like = True
fd = path
else:
file_like = False
fd = open(path, 'wb')
for chunk in resp.iter_content():
fd.write(chunk)
if not file_like:
fd.close()
return True
return False # (No coverage)
class Hook(GitHubCore):
"""The :class:`Hook <Hook>` object. This handles the information returned
by GitHub about hooks set on a repository."""
def __init__(self, hook, session=None):
super(Hook, self).__init__(hook, session)
self._api = hook.get('url', '')
#: datetime object representing when this hook was last updated.
self.updated_at = None
if hook.get('updated_at'):
self.updated_at = self._strptime(hook.get('updated_at'))
#: datetime object representing the date the hook was created.
self.created_at = self._strptime(hook.get('created_at'))
#: The name of the hook.
self.name = hook.get('name')
#: Events which trigger the hook.
self.events = hook.get('events')
self._active = hook.get('active')
#: Dictionary containing the configuration for the Hook.
self.config = hook.get('config')
#: Unique id of the hook.
self.id = hook.get('id')
def __repr__(self):
return '<Hook [{0}]>'.format(self.name)
def _update_(self, hook):
self.__init__(hook, self._session)
@requires_auth
def delete(self):
"""Delete this hook.
:returns: bool
"""
return self._boolean(self._delete(self._api), 204, 404)
@requires_auth
def delete_subscription(self):
"""Delete the user's subscription to this repository.
:returns: bool
"""
url = self._build_url('subscription', base_url=self._api)
return self._boolean(self._delete(url), 204, 404)
@requires_auth
def edit(self, name, config, events=[], add_events=[], rm_events=[],
active=True):
"""Edit this hook.
:param name: (required), name of the service being called
:type name: str
:param config: (required), key-value pairs of settings for this
hook
:type config: dict
:param events: (optional), which events should this be triggered
for
:type events: list
:param add_events: (optional), events to be added to the list of
events that this hook triggers for
:type add_events: list
:param rm_events: (optional), events to be remvoed from the list
of events that this hook triggers for
:type rm_events: list
:param active: (optional), should this event be active
:type active: bool
:returns: bool
"""
json = None
if name and config and isinstance(config, dict):
data = {'name': name, 'config': config, 'active': active}
if events:
data['events'] = events
if add_events:
data['add_events'] = add_events
if rm_events:
data['remove_events'] = rm_events
json = self._json(self._patch(self._api, data=dumps(data)), 200)
if json:
self._update_(json)
return True
return False
def is_active(self):
"""Checks whether the hook is marked as active on GitHub or not.
:returns: bool
"""
return self._active
@requires_auth
def test(self):
"""Test this hook
:returns: bool
"""
return self._boolean(self._post(self._api + '/test'), 204, 404)
class RepoTag(GitHubObject):
"""The :class:`RepoTag <RepoTag>` object. This stores the information
representing a tag that was created on a repository.
"""
def __init__(self, tag):
super(RepoTag, self).__init__(tag)
#: Name of the tag.
self.name = tag.get('name')
#: URL for the GitHub generated zipball associated with the tag.
self.zipball_url = tag.get('zipball_url')
#: URL for the GitHub generated tarball associated with the tag.
self.tarball_url = tag.get('tarball_url')
#: Dictionary containing the SHA and URL of the commit.
self.commit = tag.get('commit', {})
def __repr__(self):
return '<Repository Tag [{0}]>'.format(self.name)
class RepoComment(BaseComment):
"""The :class:`RepoComment <RepoComment>` object. This stores the
information about a comment on a file in a repository.
"""
def __init__(self, comment, session=None):
super(RepoComment, self).__init__(comment, session)
#: Commit id on which the comment was made.
self.commit_id = comment.get('commit_id')
#: URL of the comment on GitHub.
self.html_url = comment.get('html_url')
#: The line number where the comment is located.
self.line = comment.get('line')
#: The path to the file where the comment was made.
self.path = comment.get('path')
#: The position in the diff where the comment was made.
self.position = comment.get('position')
#: datetime object representing when the comment was updated.
self.updated_at = comment.get('updated_at')
if self.updated_at:
self.updated_at = self._strptime(self.updated_at)
#: Login of the user who left the comment.
self.user = None
if comment.get('user'):
self.user = User(comment.get('user'), self)
def __repr__(self):
return '<Repository Comment [{0}/{1}]>'.format(self.commit_id[:7],
self.user.login or '') # nopep8
def _update_(self, comment):
super(RepoComment, self)._update_(comment)
self.__init__(comment, self._session)
@requires_auth
def update(self, body, sha, line, path, position):
"""Update this comment.
:param body: (required)
:type body: str
:param sha: (required), sha id of the commit to comment on
:type sha: str
:param line: (required), line number to comment on
:type line: int
:param path: (required), relative path of the file you're
commenting on
:type path: str
:param position: (required), line index in the diff to comment on
:type position: int
:returns: bool
"""
json = None
if body and sha and path and line > 0 and position > 0:
data = dumps({'body': body, 'commit_id': sha, 'line': line,
'path': path, 'position': position})
json = self._json(self._post(self._api, data), 200)
if json:
self._update_(json)
return True
return False
class RepoCommit(BaseCommit):
"""The :class:`RepoCommit <RepoCommit>` object. This represents a commit as
viewed by a :class:`Repository`. This is different from a Commit object
returned from the git data section.
"""
def __init__(self, commit, session=None):
super(RepoCommit, self).__init__(commit, session)
#: :class:`User <github3.users.User>` who authored the commit.
self.author = commit.get('author')
if self.author:
self.author = User(self.author, self._session)
#: :class:`User <github3.users.User>` who committed the commit.
self.committer = commit.get('committer')
if self.committer:
self.committer = User(self.committer, self._session)
#: :class:`Commit <github3.git.Commit>`.
self.commit = commit.get('commit')
if self.commit:
self.commit = Commit(self.commit, self._session)
self.sha = commit.get('sha')
#: The number of additions made in the commit.
self.additions = 0
#: The number of deletions made in the commit.
self.deletions = 0
#: Total number of changes in the files.
self.total = 0
if commit.get('stats'):
self.additions = commit['stats'].get('additions')
self.deletions = commit['stats'].get('deletions')
self.total = commit['stats'].get('total')
#: The files that were modified by this commit.
self.files = commit.get('files', [])
def __repr__(self):
return '<Repository Commit [{0}]>'.format(self.sha[:7])
class Comparison(GitHubObject):
"""The :class:`Comparison <Comparison>` object. This encapsulates the
information returned by GitHub comparing two commit objects in a
repository."""
def __init__(self, compare):
super(Comparison, self).__init__(compare)
self._api = compare.get('api', '')
#: URL to view the comparison at GitHub
self.html_url = compare.get('html_url')
#: Permanent link to this comparison.
self.permalink_url = compare.get('permalink_url')
#: URL to see the diff between the two commits.
self.diff_url = compare.get('diff_url')
#: Patch URL at GitHub for the comparison.
self.patch_url = compare.get('patch_url')
#: :class:`RepoCommit <RepoCommit>` object representing the base of
# comparison.
self.base_commit = RepoCommit(compare.get('base_commit'), None)
#: Behind or ahead.
self.status = compare.get('status')
#: Number of commits ahead by.
self.ahead_by = compare.get('ahead_by')
#: Number of commits behind by.
self.behind_by = compare.get('behind_by')
#: Number of commits difference in the comparison.
self.total_commits = compare.get('total_commits')
#: List of :class:`RepoCommit <RepoCommit>` objects.
self.commits = [RepoCommit(com) for com in compare.get('commits')]
#: List of dicts describing the files modified.
self.files = compare.get('files', [])
def __repr__(self):
return '<Comparison of {0} commits>'.format(self.total_commits)
class Status(GitHubObject):
"""The :class:`Status <Status>` object. This represents information from
the Repo Status API."""
def __init__(self, status):
super(Status, self).__init__(status)
#: datetime object representing the creation of the status object
self.created_at = self._strptime(status.get('created_at'))
#: :class:`User <github3.users.User>` who created the object
self.creator = User(status.get('creator'))
#: Short description of the Status
self.description = status.get('description')
#: GitHub ID for the status object
self.id = status.get('id')
#: State of the status, e.g., 'success', 'pending', 'failed', 'error'
self.state = status.get('state')
#: URL to view more information about the status
self.target_url = status.get('target_url')
#: datetime object representing the last time the status was updated
self.updated_at = None
if status.get('updated_at'):
self.updated_at = self._strptime(status.get('updated_at'))
def __repr__(self):
return '<Status [{s.id}:{s.state}]>'.format(s=self)
|
import pygame
import pygame.camera
from pygame.locals import *
import numpy as np
import scipy.ndimage.filters as filt
import matplotlib.pyplot as plt
import matplotlib.cm as cm
pygame.camera.init()
cam = pygame.camera.Camera("/dev/video0",(640,480))
cam.start()
image = cam.get_image()
cam.stop()
npimages = np.frombuffer(image.get_buffer(), dtype = np.uint8).reshape(480, 640, 3)
plt.imshow(npimages)
plt.show()
takes a video and compares it to an initial image
import pygame
import pygame.camera
from pygame.locals import *
import numpy as np
import scipy.ndimage.filters as filt
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import time
def waitCountdown():
for i in range(4, 1, -1):
time.sleep(1)
print i
pygame.camera.init()
cam = pygame.camera.Camera("/dev/video0",(640,480))
cam.start()
waitCountdown()
lfreeimage = cam.get_image()
waitCountdown()
video = ["ERROR"] * 100
for i in range(100):
video[i] = cam.get_image()
cam.stop()
lfreenpimage = np.frombuffer(lfreeimage.get_buffer(), dtype = np.uint8).reshape(480, 640, 3)
npvideo = ["error"] * 100
for i in range(100):
npvideo[i] = np.frombuffer(npimage[i].get_buffer(), dtype = np.uint8).reshape(480, 640, 3)
plt.imshow(lfreenpimage - npvideo[50])
plt.show()
|
import pygame
import __main__
import text
import math
class Saper(object):
def __init__(self):
# lokalizacja_startowa
self.saper_x = 1
self.saper_y = 1
# szerokosc o dlugosc sapera
self.saper_width = 32
self.saper_height = 32
# kierunek {"North" : 0, "South" : 1, "East" : 2, "West" : 3}
self.direction = 0
self.walk = False
self.how = 0
self.head = pygame.Rect(self.saper_x * 32 + 8, self.saper_y * 32, self.saper_width / 2, self.saper_height / 2)
self.rect = pygame.Rect(self.saper_x * 32, self.saper_y * 32, self.saper_width, self.saper_height)
self.bomb = False
self.bylo = False
self.to_find_bomb = True
self.to_answer = False
self.answer = False
self.answer1 = False
self.answer2 = False
self.answer3 = False
def Update(self):
if self.to_find_bomb == True:
self.Find_bomb()
if self.answer == True:
self.Move_to_bomb()
if self.walk == True:
self.Walk()
if self.bomb == True:
__main__.bomb.rect.x = self.rect.x + __main__.bomb.bomb_width / 2
__main__.bomb.rect.y = self.rect.y + __main__.bomb.bomb_height / 2
if self.direction == 0:
self.head.x = self.rect.x + 8
self.head.y = self.rect.y
elif self.direction == 2:
self.head.x = self.rect.x + 16
self.head.y = self.rect.y + 8
elif self.direction == 1:
self.head.x = self.rect.x + 8
self.head.y = self.rect.y + 16
elif self.direction == 3:
self.head.x = self.rect.x
self.head.y = self.rect.y + 8
self.Polecenia(); #Wykonywanie polecen
def Polecenia(self):
#wykonywanie polecen
if self.to_answer == True:
if __main__.chat.saved_function_name == "Zaprzeczenie":
__main__.chat.chat_log.append(text.Text("Ok.", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
self.to_answer = False
if __main__.chat.saved_function_name == "Zgoda":
__main__.chat.chat_log.append(text.Text("Jade do niej.", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
self.to_answer = False
self.answer = True
elif self.answer1 == True:
if __main__.chat.saved_function_name == "Podnies":
self.Pick_up()
self.answer1 = False
__main__.chat.chat_log.append(text.Text("Ale ciezka.", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
elif __main__.chat.saved_function_name == "Rozbroj":
self.Defuse(__main__.bomb)
self.answer1 = False
__main__.chat.chat_log.append(text.Text("Sie robi.", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
elif __main__.chat.saved_function_name == "Zaprzeczenie":
self.answer1 = False
__main__.chat.chat_log.append(text.Text("Nie to nie.", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
elif self.answer2 == True:
if __main__.chat.saved_function_name == "Podnies" or __main__.chat.saved_function_name == "Zgoda":
self.Pick_up()
self.answer2 = False
__main__.chat.chat_log.append(text.Text("Gotowe.", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
elif self.answer3 == True:
if __main__.chat.saved_function_name == "Rozbroj" or __main__.chat.saved_function_name == "Zgoda":
self.Defuse(__main__.bomb)
self.answer3 = False
__main__.chat.chat_log.append(text.Text("Uff, zyje!", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
elif __main__.chat.saved_function_name == "Zaprzeczenie":
__main__.chat.saved_function_name = ""
__main__.chat.saved_parameter_name = ""
__main__.chat.saved_number = 0
__main__.chat.found_number = False
__main__.chat.chat_log.append(text.Text("Nie wykonam rozkazu poprzedzonego zaprzeczeniem", __main__.chat.saper_color))
elif __main__.chat.saved_function_name == "Pojedz" and self.walk == False and __main__.chat.found_number == True:
self.Rotate_dir(__main__.chat.saved_parameter_name)
self.Move(__main__.chat.saved_number)
__main__.chat.saved_function_name = ""
__main__.chat.saved_parameter_name = ""
__main__.chat.saved_number = 0
__main__.chat.found_number = False
elif __main__.chat.saved_function_name == "Pojedz" and self.bylo == False and self.walk == False:
__main__.chat.chat_log.append(text.Text("O ile kratek mam sie przemiescic?", __main__.chat.saper_color))
self.bylo = True
elif __main__.chat.saved_function_name == "Podnies" and __main__.chat.saved_object_name == "Bomba" and self.Pick_up():
__main__.chat.chat_log.append(text.Text("Podnioslem.", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
__main__.chat.saved_object_name = ""
elif __main__.chat.saved_function_name == "Podnies" and self.bylo == False:
__main__.chat.chat_log.append(text.Text("Nie wiem co mam podniesc.", __main__.chat.saper_color))
self.bylo = True
elif __main__.chat.saved_function_name == "Obroc":
if self.Rotate_dir(__main__.chat.saved_parameter_name):
__main__.chat.chat_log.append(text.Text("Obrocilem sie.", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
__main__.chat.saved_parameter_name = ""
elif self.bylo == False:
__main__.chat.chat_log.append(text.Text("W ktora strone mam sie obrocic?", __main__.chat.saper_color))
self.bylo = True
elif __main__.chat.saved_function_name == "Upusc":
if self.Drop() == True:
__main__.chat.chat_log.append(text.Text("Gotowe.", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
elif self.bylo == False:
self.bylo = True
__main__.chat.chat_log.append(text.Text("Nie mam co upuscic.", __main__.chat.saper_color))
elif __main__.chat.saved_function_name == "Rozbroj" and __main__.chat.saved_object_name == "Bomba":
self.Defuse(__main__.bomb)
__main__.chat.chat_log.append(text.Text("Rozborilem.", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
__main__.chat.saved_object_name = ""
elif __main__.chat.saved_function_name == "Rozbroj" and self.bylo == False:
__main__.chat.chat_log.append(text.Text("Nie wiem co mam rozbroic.", __main__.chat.saper_color))
self.bylo = True
elif __main__.chat.dont_understand:
__main__.chat.chat_log.append(text.Text("Nie rozumiem.", __main__.chat.saper_color))
__main__.chat.dont_understand = False
while len(__main__.chat.chat_log) > 9:
__main__.chat.chat_log.pop(0)
###############################################################################################################3
def Render(self):
pygame.draw.rect(__main__.gameDisplay, __main__.saper_color, self.rect)
pygame.draw.rect(__main__.gameDisplay, __main__.saper_head, self.head)
def Collision(self):
for wall in __main__.walls:
if self.rect.colliderect(wall.rect):
if self.direction == 2: # Moving right; Hit the left side of the wall
self.rect.right = wall.rect.left
if self.direction == 3: # Moving left; Hit the right side of the wall
self.rect.left = wall.rect.right
if self.direction == 1: # Moving down; Hit the top side of the wall
self.rect.bottom = wall.rect.top
if self.direction == 0: # Moving up; Hit the bottom side of the wall
self.rect.top = wall.rect.bottom
__main__.chat.chat_log.append(text.Text("Twarda sciana.", __main__.chat.saper_color))
#if len(__main__.chat.chat_log) > 10:
#__main__.chat.chat_log.pop(0)
self.walk = False
def Move(self, meters):
self.how = meters
self.walk = True
def Walk(self):
if self.how != 0:
if self.direction == 0:
self.rect.y -= self.saper_height
elif self.direction == 1:
self.rect.y += self.saper_height
elif self.direction == 2:
self.rect.x += self.saper_width
elif self.direction == 3:
self.rect.x -= self.saper_width
else:
self.how = 0
self.walk = False
self.how -= 1
else:
self.walk = False
self.Collision()
def Rotate(self):
if self.direction == 0:
self.direction = 2
elif self.direction == 1:
self.direction = 3
elif self.direction == 2:
self.direction = 1
elif self.direction == 3:
self.direction = 0
self.walk = False
def Rotate_dir(self, direction):
if(direction == 'Lewo'):
self.direction = 3
return True
elif (direction == 'Prawo'):
self.direction = 2
return True
elif (direction == 'Dol'):
self.direction = 1
return True
elif (direction == 'Przod'):
return True
elif (direction == 'Gora'):
self.direction = 0
return True
elif (direction == 'Tyl'):
if (self.direction == 1):
self.direction = 0;
elif (self.direction == 2):
self.direction = 3;
elif (self.direction == 0):
self.direction = 1;
elif (self.direction == 3):
self.direction = 2;
return True
return False
def Distance(self, object):
return math.sqrt(pow(object.x / 32 - self.rect.x / 32, 2) + pow(object.y / 32 - self.rect.y / 32, 2))
#(math.fabs(self.rect.x - __main__.bomb.rect.x) / 32 < 8 and int(self.rect.y / 32) == int(__main__.bomb.rect.y / 32)) or (math.fabs(self.rect.y - __main__.bomb.rect.y) / 32 < 8 and int(self.rect.x / 32) == int(__main__.bomb.rect.x / 32))
def Find_bomb(self):
if self.Distance(__main__.bomb.rect) < 6 and self.Distance(__main__.bomb.rect) > 1 and self.walk == False:
self.to_find_bomb = False
self.to_answer = True
__main__.chat.chat_log.append(text.Text("Zauwazylem bombe, podjechac do niej?", __main__.chat.saper_color))
def Move_to_bomb(self):
x1 = self.rect.x / 32
x2 = __main__.bomb.rect.x / 32
y1 = self.rect.y / 32
y2 = __main__.bomb.rect.y / 32
if self.Distance(__main__.bomb.rect) > 1:
if x1 < x2:
self.direction = 2
self.rect.x += self.saper_width
elif x1 > x2:
self.direction = 3
self.rect.x -= self.saper_width
elif y1 > y2:
self.direction = 0
self.rect.y -= self.saper_height
elif y1 < y2:
self.direction = 1
self.rect.y += self.saper_height
else:
self.answer = False
if __main__.bomb.type == 1:
__main__.chat.chat_log.append(text.Text("Podniesc ja, a moze rozbroic?", __main__.chat.saper_color))
self.answer1 = True
elif __main__.bomb.type == 2:
__main__.chat.chat_log.append(text.Text("Mam podniesc bombe?", __main__.chat.saper_color))
self.answer2 = True
elif __main__.bomb.type == 3:
__main__.chat.chat_log.append(text.Text("Sprobowac rozbroic?", __main__.chat.saper_color))
self.answer3 = True
def Pick_up(self):
if self.Distance(__main__.bomb.rect) <= 1 and __main__.bomb.lifting == True:
self.bomb = True
return True
elif self.bylo == False:
if self.Distance(__main__.bomb.rect) > 1:
__main__.chat.chat_log.append(text.Text("Nie mam tak dlugich raczek.", __main__.chat.saper_color))
elif __main__.bomb.lifting == False:
__main__.chat.chat_log.append(text.Text("Nie moge podniesc tej bomby.", __main__.chat.saper_color))
self.bylo = True
return False
def Drop(self):
if self.bomb == True:
self.bomb = False
return True
return False
def Defuse(self, bomb):
if (bomb.type == 1 or bomb.type == 3) and self.Distance(__main__.bomb.rect) <= 1:
bomb.defused = True
elif self.bylo == False and bomb.type == 2:
__main__.chat.chat_log.append(text.Text("Nie moge rozbroic tej bomby.", __main__.chat.saper_color))
self.bylo = True
else:
__main__.chat.chat_log.append(text.Text("Musze byc blizej bomby.", __main__.chat.saper_color))
self.bylo = True
#def Detonate(self, bomb):
Mniej rygorystycznie "Nie rozumiem" pt2
"Nie rozumiem" obsługiwane przez sprawdzanie, czy jakakolwiek akcja została wykonana (chat.actiondone)
import pygame
import __main__
import text
import math
class Saper(object):
def __init__(self):
# lokalizacja_startowa
self.saper_x = 1
self.saper_y = 1
# szerokosc o dlugosc sapera
self.saper_width = 32
self.saper_height = 32
# kierunek {"North" : 0, "South" : 1, "East" : 2, "West" : 3}
self.direction = 0
self.walk = False
self.how = 0
self.head = pygame.Rect(self.saper_x * 32 + 8, self.saper_y * 32, self.saper_width / 2, self.saper_height / 2)
self.rect = pygame.Rect(self.saper_x * 32, self.saper_y * 32, self.saper_width, self.saper_height)
self.bomb = False
self.bylo = False
self.to_find_bomb = True
self.to_answer = False
self.answer = False
self.answer1 = False
self.answer2 = False
self.answer3 = False
def Update(self):
if self.to_find_bomb == True:
self.Find_bomb()
if self.answer == True:
self.Move_to_bomb()
if self.walk == True:
self.Walk()
if self.bomb == True:
__main__.bomb.rect.x = self.rect.x + __main__.bomb.bomb_width / 2
__main__.bomb.rect.y = self.rect.y + __main__.bomb.bomb_height / 2
if self.direction == 0:
self.head.x = self.rect.x + 8
self.head.y = self.rect.y
elif self.direction == 2:
self.head.x = self.rect.x + 16
self.head.y = self.rect.y + 8
elif self.direction == 1:
self.head.x = self.rect.x + 8
self.head.y = self.rect.y + 16
elif self.direction == 3:
self.head.x = self.rect.x
self.head.y = self.rect.y + 8
#self.Polecenia(); #Wykonywanie polecen - przeniesione do recognize_words po wywolaniu przetwarzania
def Polecenia(self):
#wykonywanie polecen
if self.to_answer == True:
if __main__.chat.saved_function_name == "Zaprzeczenie":
__main__.chat.chat_log.append(text.Text("Ok.", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
self.to_answer = False
__main__.chat.actiondone = True;
if __main__.chat.saved_function_name == "Zgoda":
__main__.chat.chat_log.append(text.Text("Jade do niej.", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
self.to_answer = False
self.answer = True
__main__.chat.actiondone = True;
elif self.answer1 == True:
if __main__.chat.saved_function_name == "Podnies":
self.Pick_up()
self.answer1 = False
__main__.chat.chat_log.append(text.Text("Ale ciezka.", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
__main__.chat.actiondone = True;
elif __main__.chat.saved_function_name == "Rozbroj":
self.Defuse(__main__.bomb)
self.answer1 = False
__main__.chat.chat_log.append(text.Text("Sie robi.", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
__main__.chat.actiondone = True;
elif __main__.chat.saved_function_name == "Zaprzeczenie":
self.answer1 = False
__main__.chat.chat_log.append(text.Text("Nie to nie.", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
__main__.chat.actiondone = True;
elif self.answer2 == True:
if __main__.chat.saved_function_name == "Podnies" or __main__.chat.saved_function_name == "Zgoda":
self.Pick_up()
self.answer2 = False
__main__.chat.chat_log.append(text.Text("Gotowe.", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
__main__.chat.actiondone = True;
elif self.answer3 == True:
if __main__.chat.saved_function_name == "Rozbroj" or __main__.chat.saved_function_name == "Zgoda":
self.Defuse(__main__.bomb)
self.answer3 = False
__main__.chat.actiondone = True;
__main__.chat.chat_log.append(text.Text("Uff, zyje!", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
elif __main__.chat.saved_function_name == "Zaprzeczenie":
__main__.chat.saved_function_name = ""
__main__.chat.saved_parameter_name = ""
__main__.chat.saved_number = 0
__main__.chat.found_number = False
__main__.chat.actiondone = True;
__main__.chat.chat_log.append(text.Text("Nie wykonam rozkazu poprzedzonego zaprzeczeniem", __main__.chat.saper_color))
elif __main__.chat.saved_function_name == "Pojedz" and self.walk == False and __main__.chat.found_number == True:
self.Rotate_dir(__main__.chat.saved_parameter_name)
self.Move(__main__.chat.saved_number)
__main__.chat.saved_function_name = ""
__main__.chat.saved_parameter_name = ""
__main__.chat.saved_number = 0
__main__.chat.found_number = False
__main__.chat.actiondone = True;
elif __main__.chat.saved_function_name == "Pojedz" and self.bylo == False and self.walk == False:
__main__.chat.chat_log.append(text.Text("O ile kratek mam sie przemiescic?", __main__.chat.saper_color))
self.bylo = True
__main__.chat.actiondone = True;
elif __main__.chat.saved_function_name == "Podnies" and __main__.chat.saved_object_name == "Bomba" and self.Pick_up():
__main__.chat.chat_log.append(text.Text("Podnioslem.", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
__main__.chat.saved_object_name = ""
__main__.chat.actiondone = True;
elif __main__.chat.saved_function_name == "Podnies" and self.bylo == False:
__main__.chat.chat_log.append(text.Text("Nie wiem co mam podniesc.", __main__.chat.saper_color))
self.bylo = True
__main__.chat.actiondone = True;
elif __main__.chat.saved_function_name == "Obroc":
if self.Rotate_dir(__main__.chat.saved_parameter_name):
__main__.chat.chat_log.append(text.Text("Obrocilem sie.", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
__main__.chat.saved_parameter_name = ""
__main__.chat.actiondone = True;
elif self.bylo == False:
__main__.chat.chat_log.append(text.Text("W ktora strone mam sie obrocic?", __main__.chat.saper_color))
self.bylo = True
__main__.chat.actiondone = True;
elif __main__.chat.saved_function_name == "Upusc":
if self.Drop() == True:
__main__.chat.chat_log.append(text.Text("Gotowe.", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
__main__.chat.actiondone = True;
elif self.bylo == False:
self.bylo = True
__main__.chat.actiondone = True;
__main__.chat.chat_log.append(text.Text("Nie mam co upuscic.", __main__.chat.saper_color))
elif __main__.chat.saved_function_name == "Rozbroj" and __main__.chat.saved_object_name == "Bomba":
self.Defuse(__main__.bomb)
__main__.chat.chat_log.append(text.Text("Rozborilem.", __main__.chat.saper_color))
__main__.chat.saved_function_name = ""
__main__.chat.saved_object_name = ""
__main__.chat.actiondone = True;
elif __main__.chat.saved_function_name == "Rozbroj" and self.bylo == False:
__main__.chat.chat_log.append(text.Text("Nie wiem co mam rozbroic.", __main__.chat.saper_color))
self.bylo = True
__main__.chat.actiondone = True;
elif __main__.chat.actiondone == False:
__main__.chat.chat_log.append(text.Text("Nie rozumiem.", __main__.chat.saper_color))
__main__.chat.dont_understand = False
while len(__main__.chat.chat_log) > 9:
__main__.chat.chat_log.pop(0)
###############################################################################################################3
def Render(self):
pygame.draw.rect(__main__.gameDisplay, __main__.saper_color, self.rect)
pygame.draw.rect(__main__.gameDisplay, __main__.saper_head, self.head)
def Collision(self):
for wall in __main__.walls:
if self.rect.colliderect(wall.rect):
if self.direction == 2: # Moving right; Hit the left side of the wall
self.rect.right = wall.rect.left
if self.direction == 3: # Moving left; Hit the right side of the wall
self.rect.left = wall.rect.right
if self.direction == 1: # Moving down; Hit the top side of the wall
self.rect.bottom = wall.rect.top
if self.direction == 0: # Moving up; Hit the bottom side of the wall
self.rect.top = wall.rect.bottom
__main__.chat.chat_log.append(text.Text("Twarda sciana.", __main__.chat.saper_color))
#if len(__main__.chat.chat_log) > 10:
#__main__.chat.chat_log.pop(0)
self.walk = False
def Move(self, meters):
self.how = meters
self.walk = True
def Walk(self):
if self.how != 0:
if self.direction == 0:
self.rect.y -= self.saper_height
elif self.direction == 1:
self.rect.y += self.saper_height
elif self.direction == 2:
self.rect.x += self.saper_width
elif self.direction == 3:
self.rect.x -= self.saper_width
else:
self.how = 0
self.walk = False
self.how -= 1
else:
self.walk = False
self.Collision()
def Rotate(self):
if self.direction == 0:
self.direction = 2
elif self.direction == 1:
self.direction = 3
elif self.direction == 2:
self.direction = 1
elif self.direction == 3:
self.direction = 0
self.walk = False
def Rotate_dir(self, direction):
if(direction == 'Lewo'):
self.direction = 3
return True
elif (direction == 'Prawo'):
self.direction = 2
return True
elif (direction == 'Dol'):
self.direction = 1
return True
elif (direction == 'Przod'):
return True
elif (direction == 'Gora'):
self.direction = 0
return True
elif (direction == 'Tyl'):
if (self.direction == 1):
self.direction = 0;
elif (self.direction == 2):
self.direction = 3;
elif (self.direction == 0):
self.direction = 1;
elif (self.direction == 3):
self.direction = 2;
return True
return False
def Distance(self, object):
return math.sqrt(pow(object.x / 32 - self.rect.x / 32, 2) + pow(object.y / 32 - self.rect.y / 32, 2))
#(math.fabs(self.rect.x - __main__.bomb.rect.x) / 32 < 8 and int(self.rect.y / 32) == int(__main__.bomb.rect.y / 32)) or (math.fabs(self.rect.y - __main__.bomb.rect.y) / 32 < 8 and int(self.rect.x / 32) == int(__main__.bomb.rect.x / 32))
def Find_bomb(self):
if self.Distance(__main__.bomb.rect) < 6 and self.Distance(__main__.bomb.rect) > 1 and self.walk == False:
self.to_find_bomb = False
self.to_answer = True
__main__.chat.chat_log.append(text.Text("Zauwazylem bombe, podjechac do niej?", __main__.chat.saper_color))
def Move_to_bomb(self):
x1 = self.rect.x / 32
x2 = __main__.bomb.rect.x / 32
y1 = self.rect.y / 32
y2 = __main__.bomb.rect.y / 32
if self.Distance(__main__.bomb.rect) > 1:
if x1 < x2:
self.direction = 2
self.rect.x += self.saper_width
elif x1 > x2:
self.direction = 3
self.rect.x -= self.saper_width
elif y1 > y2:
self.direction = 0
self.rect.y -= self.saper_height
elif y1 < y2:
self.direction = 1
self.rect.y += self.saper_height
else:
self.answer = False
if __main__.bomb.type == 1:
__main__.chat.chat_log.append(text.Text("Podniesc ja, a moze rozbroic?", __main__.chat.saper_color))
self.answer1 = True
elif __main__.bomb.type == 2:
__main__.chat.chat_log.append(text.Text("Mam podniesc bombe?", __main__.chat.saper_color))
self.answer2 = True
elif __main__.bomb.type == 3:
__main__.chat.chat_log.append(text.Text("Sprobowac rozbroic?", __main__.chat.saper_color))
self.answer3 = True
def Pick_up(self):
if self.Distance(__main__.bomb.rect) <= 1 and __main__.bomb.lifting == True:
self.bomb = True
return True
elif self.bylo == False:
if self.Distance(__main__.bomb.rect) > 1:
__main__.chat.chat_log.append(text.Text("Nie mam tak dlugich raczek.", __main__.chat.saper_color))
elif __main__.bomb.lifting == False:
__main__.chat.chat_log.append(text.Text("Nie moge podniesc tej bomby.", __main__.chat.saper_color))
self.bylo = True
return False
def Drop(self):
if self.bomb == True:
self.bomb = False
return True
return False
def Defuse(self, bomb):
if (bomb.type == 1 or bomb.type == 3) and self.Distance(__main__.bomb.rect) <= 1:
bomb.defused = True
elif self.bylo == False and bomb.type == 2:
__main__.chat.chat_log.append(text.Text("Nie moge rozbroic tej bomby.", __main__.chat.saper_color))
self.bylo = True
else:
__main__.chat.chat_log.append(text.Text("Musze byc blizej bomby.", __main__.chat.saper_color))
self.bylo = True
#def Detonate(self, bomb):
|
import os
import datetime
import requests
from flask import jsonify, request
from dateutil.parser import parse
from server import app, sqldb
from penn.base import APIError
from .models import StudySpacesBooking, User
from .penndata import studyspaces, wharton
from .base import cached_route
@app.route('/studyspaces/gsr', methods=['GET'])
def get_wharton_gsrs():
""" Temporary endpoint to allow non-authenticated users to access the list of GSRs. """
sessionid = request.args.get('sessionid')
if not sessionid:
sessionid = os.environ.get('GSR_SESSIONID')
if not sessionid:
return jsonify({'error': 'No GSR session id is set!'})
time = request.args.get('date')
if time:
time += " 05:00"
else:
time = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%S")
resp = requests.get('https://apps.wharton.upenn.edu/gsr/api/app/grid_view/', params={
'search_time': time
}, cookies={
'sessionid': sessionid
})
if resp.status_code == 200:
return jsonify(resp.json())
else:
return jsonify({'error': 'Remote server returned status code {}.'.format(resp.status_code)})
@app.route('/studyspaces/gsr/reservations', methods=['GET'])
def get_wharton_gsr_reservations():
"""
Returns JSON containing a list of Wharton GSR reservations.
"""
sessionid = request.args.get('sessionid')
if not sessionid:
return jsonify({'error': 'No Session ID provided.'})
try:
reservations = wharton.get_reservations(sessionid)
except APIError as e:
return jsonify({"error": str(e)})
return jsonify({'reservations': reservations})
@app.route('/studyspaces/availability/<int:building>', methods=['GET'])
def parse_times(building):
"""
Returns JSON containing all rooms for a given building.
Usage:
/studyspaces/availability/<building> gives all rooms for the next 24 hours
/studyspaces/availability/<building>?start=2018-25-01 gives all rooms in the start date
/studyspaces/availability/<building>?start=...&end=... gives all rooms between the two days
"""
if 'date' in request.args:
start = request.args.get('date')
end = request.args.get('date')
else:
start = request.args.get('start')
end = request.args.get('end')
try:
rooms = studyspaces.get_rooms(building, start, end)
# legacy support for old scraping method
rooms["location_id"] = rooms["id"]
rooms["rooms"] = []
for room_list in rooms["categories"]:
for room in room_list["rooms"]:
room["thumbnail"] = room["image"]
del room["image"]
room["room_id"] = room["id"]
del room["id"]
room["gid"] = room_list["cid"]
room["lid"] = building
room["times"] = room["availability"]
del room["availability"]
for time in room["times"]:
time["available"] = True
time["start"] = time["from"]
time["end"] = time["to"]
del time["from"]
del time["to"]
rooms["rooms"].append(room)
except APIError as e:
return jsonify({"error": str(e)})
return jsonify(rooms)
@app.route('/studyspaces/locations', methods=['GET'])
def display_id_pairs():
"""
Returns JSON containing a list of buildings with their ids.
"""
def get_data():
return {"locations": studyspaces.get_buildings()}
return cached_route('studyspaces:locations', datetime.timedelta(days=1), get_data)
@app.route('/studyspaces/cancel', methods=['POST'])
def cancel_room():
"""
Cancels a booked room.
"""
booking_id = request.form.get("booking_id")
if not booking_id:
return jsonify({"error": "No booking id sent to server!"})
# ensure that the server was the one that booked the room
for bid in booking_id.strip().split(","):
exists = sqldb.session.query(sqldb.exists().where(StudySpacesBooking.booking_id == bid)).scalar()
if not exists:
return jsonify({"error": "Cancellation request aborted because of booking '{}'.".format(bid)})
resp = studyspaces.cancel_room(booking_id)
return jsonify(resp)
@app.route('/studyspaces/book', methods=['POST'])
def book_room():
"""
Books a room.
"""
try:
room = int(request.form["room"])
except (KeyError, ValueError):
return jsonify({"results": False, "error": "Please specify a correct room id!"})
try:
start = parse(request.form["start"])
end = parse(request.form["end"])
except KeyError:
return jsonify({"results": False, "error": "No start and end parameters passed to server!"})
contact = {}
for arg, field in [("fname", "firstname"), ("lname", "lastname"), ("email", "email"), ("nickname", "groupname")]:
try:
contact[arg] = request.form[field]
except KeyError:
return jsonify({"results": False, "error": "'{}' is a required parameter!".format(field)})
contact["custom"] = {}
for arg, field in [("q2533", "phone"), ("q2555", "size"), ("q2537", "size")]:
try:
contact["custom"][arg] = request.form[field]
except KeyError:
pass
resp = studyspaces.book_room(room, start.isoformat(), end.isoformat(), **contact)
if "error" not in resp:
save_booking(
rid=room,
email=contact["email"],
start=start.replace(tzinfo=None),
end=end.replace(tzinfo=None),
booking_id=resp.get("booking_id")
)
return jsonify(resp)
def save_booking(**info):
try:
user = User.get_or_create()
except ValueError:
user = None
if user is None:
return
info['user'] = user.id
item = StudySpacesBooking(**info)
sqldb.session.add(item)
sqldb.session.commit()
added reservations route
import os
import datetime
import requests
from flask import jsonify, request
from dateutil.parser import parse
from server import app, sqldb
from penn.base import APIError
from .models import StudySpacesBooking, User
from .penndata import studyspaces, wharton
from .base import cached_route
@app.route('/studyspaces/gsr', methods=['GET'])
def get_wharton_gsrs():
""" Temporary endpoint to allow non-authenticated users to access the list of GSRs. """
sessionid = request.args.get('sessionid')
if not sessionid:
sessionid = os.environ.get('GSR_SESSIONID')
if not sessionid:
return jsonify({'error': 'No GSR session id is set!'})
time = request.args.get('date')
if time:
time += " 05:00"
else:
time = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%S")
resp = requests.get('https://apps.wharton.upenn.edu/gsr/api/app/grid_view/', params={
'search_time': time
}, cookies={
'sessionid': sessionid
})
if resp.status_code == 200:
return jsonify(resp.json())
else:
return jsonify({'error': 'Remote server returned status code {}.'.format(resp.status_code)})
@app.route('/studyspaces/gsr/reservations', methods=['GET'])
def get_wharton_gsr_reservations():
""" Temporary endpoint to allow non-authenticated users to access the list of GSRs. """
sessionid = request.args.get('sessionid')
if not sessionid:
return jsonify({'error': 'No sessionID provided.'})
try:
reservations = wharton.get_reservations(sessionid)
except APIError as e:
return jsonify({"error": str(e)})
return jsonify({'reservations': reservations})
@app.route('/studyspaces/gsr/reservations', methods=['GET'])
def get_wharton_gsr_reservations():
"""
Returns JSON containing a list of Wharton GSR reservations.
"""
sessionid = request.args.get('sessionid')
if not sessionid:
return jsonify({'error': 'No Session ID provided.'})
try:
reservations = wharton.get_reservations(sessionid)
except APIError as e:
return jsonify({"error": str(e)})
return jsonify({'reservations': reservations})
@app.route('/studyspaces/availability/<int:building>', methods=['GET'])
def parse_times(building):
"""
Returns JSON containing all rooms for a given building.
Usage:
/studyspaces/availability/<building> gives all rooms for the next 24 hours
/studyspaces/availability/<building>?start=2018-25-01 gives all rooms in the start date
/studyspaces/availability/<building>?start=...&end=... gives all rooms between the two days
"""
if 'date' in request.args:
start = request.args.get('date')
end = request.args.get('date')
else:
start = request.args.get('start')
end = request.args.get('end')
try:
rooms = studyspaces.get_rooms(building, start, end)
# legacy support for old scraping method
rooms["location_id"] = rooms["id"]
rooms["rooms"] = []
for room_list in rooms["categories"]:
for room in room_list["rooms"]:
room["thumbnail"] = room["image"]
del room["image"]
room["room_id"] = room["id"]
del room["id"]
room["gid"] = room_list["cid"]
room["lid"] = building
room["times"] = room["availability"]
del room["availability"]
for time in room["times"]:
time["available"] = True
time["start"] = time["from"]
time["end"] = time["to"]
del time["from"]
del time["to"]
rooms["rooms"].append(room)
except APIError as e:
return jsonify({"error": str(e)})
return jsonify(rooms)
@app.route('/studyspaces/locations', methods=['GET'])
def display_id_pairs():
"""
Returns JSON containing a list of buildings with their ids.
"""
def get_data():
return {"locations": studyspaces.get_buildings()}
return cached_route('studyspaces:locations', datetime.timedelta(days=1), get_data)
@app.route('/studyspaces/cancel', methods=['POST'])
def cancel_room():
"""
Cancels a booked room.
"""
booking_id = request.form.get("booking_id")
if not booking_id:
return jsonify({"error": "No booking id sent to server!"})
# ensure that the server was the one that booked the room
for bid in booking_id.strip().split(","):
exists = sqldb.session.query(sqldb.exists().where(StudySpacesBooking.booking_id == bid)).scalar()
if not exists:
return jsonify({"error": "Cancellation request aborted because of booking '{}'.".format(bid)})
resp = studyspaces.cancel_room(booking_id)
return jsonify(resp)
@app.route('/studyspaces/book', methods=['POST'])
def book_room():
"""
Books a room.
"""
try:
room = int(request.form["room"])
except (KeyError, ValueError):
return jsonify({"results": False, "error": "Please specify a correct room id!"})
try:
start = parse(request.form["start"])
end = parse(request.form["end"])
except KeyError:
return jsonify({"results": False, "error": "No start and end parameters passed to server!"})
contact = {}
for arg, field in [("fname", "firstname"), ("lname", "lastname"), ("email", "email"), ("nickname", "groupname")]:
try:
contact[arg] = request.form[field]
except KeyError:
return jsonify({"results": False, "error": "'{}' is a required parameter!".format(field)})
contact["custom"] = {}
for arg, field in [("q2533", "phone"), ("q2555", "size"), ("q2537", "size")]:
try:
contact["custom"][arg] = request.form[field]
except KeyError:
pass
resp = studyspaces.book_room(room, start.isoformat(), end.isoformat(), **contact)
if "error" not in resp:
save_booking(
rid=room,
email=contact["email"],
start=start.replace(tzinfo=None),
end=end.replace(tzinfo=None),
booking_id=resp.get("booking_id")
)
return jsonify(resp)
def save_booking(**info):
try:
user = User.get_or_create()
except ValueError:
user = None
if user is None:
return
info['user'] = user.id
item = StudySpacesBooking(**info)
sqldb.session.add(item)
sqldb.session.commit()
|
import json
import logging
import requests
from datetime import datetime
from django.conf import settings
from optparse import make_option
from django.db import transaction, DEFAULT_DB_ALIAS
from django.core.management.base import BaseCommand
from requests.exceptions import SSLError, ConnectionError, RequestException
from varify.samples.models import Sample, ResultScore
log = logging.getLogger(__name__)
class Command(BaseCommand):
args = '<sample_label sample_label ...>'
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Specifies the target database to load results.'),
make_option('--force', action='store_true', dest='force',
default=False,
help='Forces recomputation of all gene rankings')
)
def handle(self, *args, **options):
if not getattr(settings, 'PHENOTYPE_ENDPOINT', None):
log.error('PHENOTYPE_ENDPOINT must be defined in settings for '
'gene rankings to be updated.')
return
if not getattr(settings, 'GENE_RANK_BASE_URL', None):
log.error('GENE_RANK_BASE_URL must be defined in settings for '
'gene rankings to be updated.')
return
if (not getattr(settings, 'VARIFY_CERT', None) or
not getattr(settings, 'VARIFY_KEY', None)):
log.error('VARIFY_CERT and VARIFY_KEY must be defined in settings '
'for gene rankings to be updated.')
return
database = options.get('database')
force = options.get('force')
# Construct the cert from the setting to use in requests to the
# phenotype endpoint.
cert = (settings.VARIFY_CERT, settings.VARIFY_KEY)
# We ignore all the samples that aren't published. They aren't visible
# to the user so we don't bother updating related scores. If there
# were sample labels supplied as arguments then we limit the rankings
# updates to those samples, otherwise we process all samples.
samples = Sample.objects.filter(published=True)
if args:
samples = samples.filter(label__in=args)
updated_samples = 0
total_samples = 0
for sample in samples:
total_samples += 1
# Construct the URL from the setting and the sample label. The
# sample label is used to retrieve the phenotype info on the remote
# endpoint.
url = settings.PHENOTYPE_ENDPOINT.format(sample.label)
# Get the phenotype information for this sample. If the
# phenotype is unavailable then we can skip this sample.
try:
response = requests.get(url, cert=cert, verify=False)
except SSLError:
log.exception('Skipping sample "{0}". An SSLError occurred '
'during phenotype retrieval request.'
.format(sample.label))
continue
except ConnectionError:
log.exception('Skipping sample "{0}". A ConnectionError '
'occurred during phenotype retrieval request.'
.format(sample.label))
continue
except RequestException:
log.exception('Skipping sample "{0}". The sample has no '
'phenotype data associated with it'
.format(sample.label))
continue
try:
phenotype_data = json.loads(response.content)
except ValueError:
log.error("Could not parse response from {0}, skipping '{1}'."
.format(url, sample.label))
continue
try:
phenotype_modified = datetime.strptime(
phenotype_data['last_modified'], "%Y-%m-%dT%H:%M:%S.%f")
except ValueError:
phenotype_modified = datetime.min
log.warn("Could not parse 'last_modified' field on phenotype "
"data. Using datetime.min so that only unranked "
"samples will be ranked. If the 'force' flag was "
"used then all samples will be updated despite this "
"parsing failure.")
# If the parsed response doesn't contain any HPO terms then we can
# skip this sample since we cannot rank genes without HPO terms.
if not phenotype_data.get('hpoAnnotations'):
log.error("Response from phenotype missing HPO Annotations, "
"skipping '{0}'.".format(sample.label))
continue
if (not force and sample.phenotype_modified and
sample.phenotype_modified > phenotype_modified):
log.debug("Sample '{0}' is already up to date, skipping it."
.format(sample.label))
continue
# Extract the HPO terms from the data returned from the phenotype
# endpoint. We need to modify the terms a bit because the phenotype
# endpoint has terms in the form 'HP_0011263' and the gene ranking
# enpoint expects them to be of the form 'HP:0011263'.
hpo_terms = []
for hpo_annotation in phenotype_data['hpoAnnotations']:
try:
hpo_id = str(hpo_annotation.get('hpo_id'))
except AttributeError:
continue
if hpo_id:
hpo_terms.append(hpo_id.replace('_', ':'))
# If there are no HPO terms then there will be no rankings so skip
# this sample to avoid any more computations and requests.
if not hpo_terms:
log.warning('Skipping "{0}" because it has no HPO terms '
'associated with it.'.format(sample.label))
continue
# Compute the unique gene list for the entire sample
genes = set(sample.results.values_list(
'variant__effects__transcript__gene__symbol', flat=True))
# Obviously, if there are no genes then the gene ranking endpoint
# will have nothing to do so we can safely skip this sample.
if not genes:
log.warning('Skipping "{0}" because it has no genes '
'associated with it.'.format(sample.label))
continue
# We need to convert the genes to strings because the ranking
# service is no prepared to handle the unicode format that the
# gene symbols are in when we retrieve them from the models.
gene_rank_url = "{0}?hpo={1}&genes={2}".format(
settings.GENE_RANK_BASE_URL, ",".join(hpo_terms),
",".join([str(g) for g in genes]))
try:
gene_response = requests.get(gene_rank_url)
except Exception:
log.exception('Error retrieving gene rankings, skipping '
'sample "{0}".'.format(sample.label))
continue
gene_data = json.loads(gene_response.content)
ranked_genes = gene_data['ranked_genes']
updated_results = 0
total_results = 0
for result in sample.results.all():
total_results += 1
with transaction.commit_manually(database):
try:
# Get the gene for this result. Since a result can
# have more than one gene associated with it, we
# return the first gene symbol in the list. This is
# the same one that will be shown in the collapsed
# gene list on the variant row in the results table.
gene = result.variant.effects.values_list(
'transcript__gene__symbol', flat=True)[0]
# If there is no gene on this result or the gene is
# not found in the list of ranked genes then skip this
# result.
if not gene:
log.debug("Result with id {0} has no gene, "
"skipping result.".format(result.id))
transaction.rollback()
continue
# Get the first item in the ranked gene list with a
# symbol matching the gene we looked up above for this
# result.
ranked_gene = next(
(r for r in ranked_genes if
r.get('symbol').lower() == gene.lower()),
None)
if not ranked_gene:
log.debug("Could not find '{0}' in ranked gene "
"list, skipping result".format(gene))
transaction.rollback()
continue
try:
rs = ResultScore.objects.get(result=result)
rs.rank = ranked_gene.get('rank')
rs.score = ranked_gene.get('score')
except ResultScore.DoesNotExist:
rs = ResultScore(
result=result,
rank=ranked_gene.get('rank'),
score=ranked_gene.get('score'))
rs.save()
updated_results += 1
except Exception:
log.exception("Error saving gene ranks and scores for "
"sample '{0}'".format(sample.label))
transaction.rollback()
continue
transaction.commit()
sample.phenotype_modified = datetime.now()
sample.save()
log.info("Updated {0} and skipped {1} results in sample '{2}'"
.format(updated_results, total_results - updated_results,
sample.label))
updated_samples += 1
log.info("Updated {0} and skipped {1} samples"
.format(updated_samples, total_samples-updated_samples))
Avoid possible index error when looking up gene for result
import json
import logging
import requests
from datetime import datetime
from django.conf import settings
from optparse import make_option
from django.db import transaction, DEFAULT_DB_ALIAS
from django.core.management.base import BaseCommand
from requests.exceptions import SSLError, ConnectionError, RequestException
from varify.samples.models import Sample, ResultScore
log = logging.getLogger(__name__)
class Command(BaseCommand):
args = '<sample_label sample_label ...>'
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Specifies the target database to load results.'),
make_option('--force', action='store_true', dest='force',
default=False,
help='Forces recomputation of all gene rankings')
)
def handle(self, *args, **options):
if not getattr(settings, 'PHENOTYPE_ENDPOINT', None):
log.error('PHENOTYPE_ENDPOINT must be defined in settings for '
'gene rankings to be updated.')
return
if not getattr(settings, 'GENE_RANK_BASE_URL', None):
log.error('GENE_RANK_BASE_URL must be defined in settings for '
'gene rankings to be updated.')
return
if (not getattr(settings, 'VARIFY_CERT', None) or
not getattr(settings, 'VARIFY_KEY', None)):
log.error('VARIFY_CERT and VARIFY_KEY must be defined in settings '
'for gene rankings to be updated.')
return
database = options.get('database')
force = options.get('force')
# Construct the cert from the setting to use in requests to the
# phenotype endpoint.
cert = (settings.VARIFY_CERT, settings.VARIFY_KEY)
# We ignore all the samples that aren't published. They aren't visible
# to the user so we don't bother updating related scores. If there
# were sample labels supplied as arguments then we limit the rankings
# updates to those samples, otherwise we process all samples.
samples = Sample.objects.filter(published=True)
if args:
samples = samples.filter(label__in=args)
updated_samples = 0
total_samples = 0
for sample in samples:
total_samples += 1
# Construct the URL from the setting and the sample label. The
# sample label is used to retrieve the phenotype info on the remote
# endpoint.
url = settings.PHENOTYPE_ENDPOINT.format(sample.label)
# Get the phenotype information for this sample. If the
# phenotype is unavailable then we can skip this sample.
try:
response = requests.get(url, cert=cert, verify=False)
except SSLError:
log.exception('Skipping sample "{0}". An SSLError occurred '
'during phenotype retrieval request.'
.format(sample.label))
continue
except ConnectionError:
log.exception('Skipping sample "{0}". A ConnectionError '
'occurred during phenotype retrieval request.'
.format(sample.label))
continue
except RequestException:
log.exception('Skipping sample "{0}". The sample has no '
'phenotype data associated with it'
.format(sample.label))
continue
try:
phenotype_data = json.loads(response.content)
except ValueError:
log.error("Could not parse response from {0}, skipping '{1}'."
.format(url, sample.label))
continue
try:
phenotype_modified = datetime.strptime(
phenotype_data['last_modified'], "%Y-%m-%dT%H:%M:%S.%f")
except ValueError:
phenotype_modified = datetime.min
log.warn("Could not parse 'last_modified' field on phenotype "
"data. Using datetime.min so that only unranked "
"samples will be ranked. If the 'force' flag was "
"used then all samples will be updated despite this "
"parsing failure.")
# If the parsed response doesn't contain any HPO terms then we can
# skip this sample since we cannot rank genes without HPO terms.
if not phenotype_data.get('hpoAnnotations'):
log.error("Response from phenotype missing HPO Annotations, "
"skipping '{0}'.".format(sample.label))
continue
if (not force and sample.phenotype_modified and
sample.phenotype_modified > phenotype_modified):
log.debug("Sample '{0}' is already up to date, skipping it."
.format(sample.label))
continue
# Extract the HPO terms from the data returned from the phenotype
# endpoint. We need to modify the terms a bit because the phenotype
# endpoint has terms in the form 'HP_0011263' and the gene ranking
# enpoint expects them to be of the form 'HP:0011263'.
hpo_terms = []
for hpo_annotation in phenotype_data['hpoAnnotations']:
try:
hpo_id = hpo_annotation.get('hpo_id')
except AttributeError:
continue
if hpo_id:
hpo_terms.append(hpo_id.replace('_', ':'))
# If there are no HPO terms then there will be no rankings so skip
# this sample to avoid any more computations and requests.
if not hpo_terms:
log.warning('Skipping "{0}" because it has no HPO terms '
'associated with it.'.format(sample.label))
continue
# Compute the unique gene list for the entire sample
genes = set(sample.results.values_list(
'variant__effects__transcript__gene__symbol', flat=True))
# Obviously, if there are no genes then the gene ranking endpoint
# will have nothing to do so we can safely skip this sample.
if not genes:
log.warning('Skipping "{0}" because it has no genes '
'associated with it.'.format(sample.label))
continue
# We need to convert the genes to strings because the ranking
# service is no prepared to handle the unicode format that the
# gene symbols are in when we retrieve them from the models.
gene_rank_url = "{0}?hpo={1}&genes={2}".format(
settings.GENE_RANK_BASE_URL, ",".join(hpo_terms),
",".join([str(g) for g in genes]))
try:
gene_response = requests.get(gene_rank_url)
except Exception:
log.exception('Error retrieving gene rankings, skipping '
'sample "{0}".'.format(sample.label))
continue
gene_data = json.loads(gene_response.content)
ranked_genes = gene_data['ranked_genes']
updated_results = 0
total_results = 0
for result in sample.results.all():
total_results += 1
with transaction.commit_manually(database):
try:
genes = result.variant.effects\
.exclude(transcript__gene__symbol__isnull=True)\
.values_list('transcript__gene__symbol', flat=True)
# If there is no gene on this result or the gene is
# not found in the list of ranked genes then skip this
# result.
if not genes:
log.debug("Result with id {0} has no gene, "
"skipping result.".format(result.id))
transaction.rollback()
continue
# Use the first gene from the list since a result can
# have more than one gene associated with it, we
# return the first gene symbol in the list. This is
# the same one that will be shown in the collapsed
# gene list on the variant row in the results table.
gene = genes[0]
# Get the first item in the ranked gene list with a
# symbol matching the gene we looked up above for this
# result.
ranked_gene = next(
(r for r in ranked_genes if
r.get('symbol').lower() == gene.lower()),
None)
if not ranked_gene:
log.debug("Could not find '{0}' in ranked gene "
"list, skipping result".format(gene))
transaction.rollback()
continue
try:
rs = ResultScore.objects.get(result=result)
rs.rank = ranked_gene.get('rank')
rs.score = ranked_gene.get('score')
except ResultScore.DoesNotExist:
rs = ResultScore(
result=result,
rank=ranked_gene.get('rank'),
score=ranked_gene.get('score'))
rs.save()
updated_results += 1
except Exception:
log.exception("Error saving gene ranks and scores for "
"sample '{0}'".format(sample.label))
transaction.rollback()
continue
transaction.commit()
sample.phenotype_modified = datetime.now()
sample.save()
log.info("Updated {0} and skipped {1} results in sample '{2}'"
.format(updated_results, total_results - updated_results,
sample.label))
updated_samples += 1
log.info("Updated {0} and skipped {1} samples"
.format(updated_samples, total_samples-updated_samples))
|
import tempfile
import unittest
import shutil
import os
from molotov import quickstart, __version__, run
from molotov.tests.support import set_args
class TestQuickStart(unittest.TestCase):
def setUp(self):
self._curdir = os.getcwd()
self.tempdir = tempfile.mkdtemp()
self._answers = ['y', 'welp', self.tempdir]
def tearDown(self):
os.chdir(self._curdir)
shutil.rmtree(self.tempdir)
def _input(self, text):
if self._answers == []:
self._answers = ['y', 'welp', self.tempdir]
answer = self._answers.pop()
return answer
def test_version(self):
quickstart._input = self._input
with set_args('molostart', '--version') as out:
try:
quickstart.main()
except SystemExit:
pass
output = out[0].read().strip()
self.assertEqual(output, __version__)
def test_generate(self):
quickstart._input = self._input
with set_args('molostart'):
quickstart.main()
result = os.listdir(self.tempdir)
result.sort()
self.assertEqual(result, ['Makefile', 'loadtest.py', 'molotov.json'])
# second runs stops
with set_args('molostart'):
try:
quickstart.main()
raise AssertionError()
except SystemExit:
pass
def test_codeworks(self):
quickstart._input = self._input
with set_args('molostart'):
quickstart.main()
result = os.listdir(self.tempdir)
result.sort()
self.assertEqual(result, ['Makefile', 'loadtest.py', 'molotov.json'])
os.chdir(self.tempdir)
with set_args('molotov', '-cxv', '--max-runs', '1'):
try:
run.main()
except SystemExit:
pass
make sure the fixtures are cleaned up
import tempfile
import shutil
import os
from molotov import quickstart, __version__, run
from molotov.tests.support import set_args, TestLoop
class TestQuickStart(TestLoop):
def setUp(self):
super(TestQuickStart, self).setUp()
self._curdir = os.getcwd()
self.tempdir = tempfile.mkdtemp()
self._answers = ['y', 'welp', self.tempdir]
def tearDown(self):
os.chdir(self._curdir)
shutil.rmtree(self.tempdir)
super(TestQuickStart, self).tearDown()
def _input(self, text):
if self._answers == []:
self._answers = ['y', 'welp', self.tempdir]
answer = self._answers.pop()
return answer
def test_version(self):
quickstart._input = self._input
with set_args('molostart', '--version') as out:
try:
quickstart.main()
except SystemExit:
pass
output = out[0].read().strip()
self.assertEqual(output, __version__)
def test_generate(self):
quickstart._input = self._input
with set_args('molostart'):
quickstart.main()
result = os.listdir(self.tempdir)
result.sort()
self.assertEqual(result, ['Makefile', 'loadtest.py', 'molotov.json'])
# second runs stops
with set_args('molostart'):
try:
quickstart.main()
raise AssertionError()
except SystemExit:
pass
def test_codeworks(self):
quickstart._input = self._input
with set_args('molostart'):
quickstart.main()
result = os.listdir(self.tempdir)
result.sort()
self.assertEqual(result, ['Makefile', 'loadtest.py', 'molotov.json'])
os.chdir(self.tempdir)
with set_args('molotov', '-cxv', '--max-runs', '1'):
try:
run.main()
except SystemExit:
pass
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields, api
class StockPickingWave(models.Model):
_inherit = 'stock.picking.wave'
@api.one
def _count_confirmed_pickings(self):
self.num_confirmed = sum(1 for x in self.picking_ids if x.state ==
'confirmed')
@api.one
def _count_assigned_pickings(self):
self.num_assigned = sum(1 for x in self.picking_ids if x.state ==
'assigned')
pickings_products = fields.One2many(
'stock.move', 'wave', string='Products', readonly=True)
pickings_operations = fields.One2many(
'stock.pack.operation', 'wave', string='Operations', readonly=True)
num_confirmed = fields.Integer(
compute="_count_confirmed_pickings", string="Confirmed pickings")
num_assigned = fields.Integer(
compute="_count_assigned_pickings", string="Assigned pickings")
@api.one
def button_check_disponibility(self):
picking_obj = self.env['stock.picking']
picking_ids = [picking.id for picking in
self.picking_ids if picking.state == 'confirmed']
pickings = picking_obj.browse(picking_ids)
pickings.action_assign()
def action_transfer(self, cr, uid, ids, context=None):
picking_obj = self.pool['stock.picking']
wave = self.browse(cr, uid, ids[0], context=context)
picking_ids = [picking.id for picking in
wave.picking_ids if picking.state == 'assigned']
return picking_obj.do_enter_transfer_details(
cr, uid, picking_ids, context=context)
class StockMove(models.Model):
_inherit = 'stock.move'
wave = fields.Many2one('stock.picking.wave', related='picking_id.wave_id',
string='Picking Wave', store=True)
[IMP] stock_picking_wave_transfer_button: has been added to context id of the wave.
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields, api
class StockPickingWave(models.Model):
_inherit = 'stock.picking.wave'
@api.one
def _count_confirmed_pickings(self):
self.num_confirmed = sum(1 for x in self.picking_ids if x.state ==
'confirmed')
@api.one
def _count_assigned_pickings(self):
self.num_assigned = sum(1 for x in self.picking_ids if x.state ==
'assigned')
pickings_products = fields.One2many(
'stock.move', 'wave', string='Products', readonly=True)
pickings_operations = fields.One2many(
'stock.pack.operation', 'wave', string='Operations', readonly=True)
num_confirmed = fields.Integer(
compute="_count_confirmed_pickings", string="Confirmed pickings")
num_assigned = fields.Integer(
compute="_count_assigned_pickings", string="Assigned pickings")
@api.one
def button_check_disponibility(self):
picking_obj = self.env['stock.picking']
picking_ids = [picking.id for picking in
self.picking_ids if picking.state == 'confirmed']
pickings = picking_obj.browse(picking_ids)
pickings.action_assign()
def action_transfer(self, cr, uid, ids, context=None):
picking_obj = self.pool['stock.picking']
wave = self.browse(cr, uid, ids[0], context=context)
picking_ids = [picking.id for picking in
wave.picking_ids if picking.state == 'assigned']
c = context.copy()
c.update({'origin_wave': wave.id})
return picking_obj.do_enter_transfer_details(
cr, uid, picking_ids, context=c)
class StockMove(models.Model):
_inherit = 'stock.move'
wave = fields.Many2one('stock.picking.wave', related='picking_id.wave_id',
string='Picking Wave', store=True)
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Sample Stats Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
@test_util.test_all_tf_execution_regimes
class _AutoCorrelationTest(object):
@property
def use_static_shape(self):
raise NotImplementedError('Subclass failed to implement `use_static_shape`')
@property
def dtype(self):
raise NotImplementedError('Subclass failed to implement `dtype`.')
def test_constant_sequence_axis_0_max_lags_none_center_false(self):
x_ = np.array([[0., 0., 0.], [1., 1., 1.]]).astype(self.dtype)
x_ph = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
# Setting normalize = True means we divide by zero.
auto_corr = tfp.stats.auto_correlation(
x_ph, axis=1, center=False, normalize=False)
if self.use_static_shape:
self.assertEqual((2, 3), auto_corr.shape)
auto_corr_ = self.evaluate(auto_corr)
self.assertAllClose([[0., 0., 0.], [1., 1., 1.]], auto_corr_)
def test_constant_sequence_axis_0_max_lags_none_center_true(self):
x_ = np.array([[0., 0., 0.], [1., 1., 1.]]).astype(self.dtype)
x_ph = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
# Setting normalize = True means we divide by zero.
auto_corr = tfp.stats.auto_correlation(
x_ph, axis=1, normalize=False, center=True)
if self.use_static_shape:
self.assertEqual((2, 3), auto_corr.shape)
auto_corr_ = self.evaluate(auto_corr)
self.assertAllClose([[0., 0., 0.], [0., 0., 0.]], auto_corr_)
def check_results_versus_brute_force(self, x, axis, max_lags, center,
normalize):
"""Compute auto-correlation by brute force, then compare to tf result."""
# Brute for auto-corr -- avoiding fft and transpositions.
axis_len = x.shape[axis]
if max_lags is None:
max_lags = axis_len - 1
else:
max_lags = min(axis_len - 1, max_lags)
auto_corr_at_lag = []
if center:
x -= x.mean(axis=axis, keepdims=True)
for m in range(max_lags + 1):
auto_corr_at_lag.append(
(np.take(x, indices=range(0, axis_len - m), axis=axis) * np.conj(
np.take(x, indices=range(m, axis_len), axis=axis))).mean(
axis=axis, keepdims=True))
rxx = np.concatenate(auto_corr_at_lag, axis=axis)
if normalize:
rxx /= np.take(rxx, [0], axis=axis)
x_ph = tf1.placeholder_with_default(
x, shape=x.shape if self.use_static_shape else None)
auto_corr = tfp.stats.auto_correlation(
x_ph,
axis=axis,
max_lags=max_lags,
center=center,
normalize=normalize)
if self.use_static_shape:
output_shape = list(x.shape)
output_shape[axis] = max_lags + 1
self.assertAllEqual(output_shape, auto_corr.shape)
self.assertAllClose(rxx, self.evaluate(auto_corr), rtol=1e-5, atol=1e-5)
def test_axis_n1_center_false_max_lags_none(self):
rng = np.random.RandomState(seed=test_util.test_seed())
x = rng.randn(2, 3, 4).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(2, 3, 4).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-1, max_lags=None, center=False, normalize=False)
def test_axis_n2_center_false_max_lags_none(self):
rng = np.random.RandomState(seed=test_util.test_seed())
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-2, max_lags=None, center=False, normalize=False)
def test_axis_n1_center_false_max_lags_none_normalize_true(self):
rng = np.random.RandomState(seed=test_util.test_seed())
x = rng.randn(2, 3, 4).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(2, 3, 4).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-1, max_lags=None, center=False, normalize=True)
def test_axis_n2_center_false_max_lags_none_normalize_true(self):
rng = np.random.RandomState(seed=test_util.test_seed())
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-2, max_lags=None, center=False, normalize=True)
def test_axis_0_center_true_max_lags_none(self):
rng = np.random.RandomState(seed=test_util.test_seed())
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=0, max_lags=None, center=True, normalize=False)
def test_axis_2_center_true_max_lags_1(self):
rng = np.random.RandomState(seed=test_util.test_seed())
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=2, max_lags=1, center=True, normalize=False)
def test_axis_2_center_true_max_lags_100(self):
# There are less than 100 elements in axis 2, so expect we get back an array
# the same size as x, despite having asked for 100 lags.
rng = np.random.RandomState(seed=test_util.test_seed())
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=2, max_lags=100, center=True, normalize=False)
def test_long_orthonormal_sequence_has_corr_length_0(self):
rng = np.random.RandomState(seed=test_util.test_seed())
l = 10000
x = rng.randn(l).astype(self.dtype)
x_ph = tf1.placeholder_with_default(
x, shape=(l,) if self.use_static_shape else None)
rxx = tfp.stats.auto_correlation(
x_ph, max_lags=l // 2, center=True, normalize=False)
if self.use_static_shape:
self.assertAllEqual((l // 2 + 1,), rxx.shape)
rxx_ = self.evaluate(rxx)
# OSS CPU FFT has some accuracy issues, so this tolerance is a bit bad.
self.assertAllClose(1., rxx_[0], rtol=0.05)
# The maximal error in the rest of the sequence is not great.
self.assertAllClose(np.zeros(l // 2), rxx_[1:], atol=0.1)
# The mean error in the rest is ok, actually 0.008 when I tested it.
self.assertLess(np.abs(rxx_[1:]).mean(), 0.02)
def test_step_function_sequence(self):
if tf.executing_eagerly() and not self.use_static_shape:
# TODO(b/122840816): Modify this test so that it runs in eager mode with
# dynamic shapes, or document that this is the intended behavior.
return
rng = np.random.RandomState(seed=test_util.test_seed())
# x jumps to new random value every 10 steps. So correlation length = 10.
x = (rng.randint(-10, 10, size=(1000, 1)) * np.ones(
(1, 10))).ravel().astype(self.dtype)
x_ph = tf1.placeholder_with_default(
x, shape=(1000 * 10,) if self.use_static_shape else None)
rxx = tfp.stats.auto_correlation(
x_ph, max_lags=1000 * 10 // 2, center=True, normalize=False)
if self.use_static_shape:
self.assertAllEqual((1000 * 10 // 2 + 1,), rxx.shape)
rxx_ = self.evaluate(rxx)
rxx_ /= rxx_[0]
# Expect positive correlation for the first 10 lags, then significantly
# smaller negative.
self.assertGreater(rxx_[:10].min(), 0)
# TODO(b/138375951): Re-enable this assertion once we know why its
# failing.
# self.assertGreater(rxx_[9], 5 * rxx_[10:20].mean())
# RXX should be decreasing for the first 10 lags.
diff = np.diff(rxx_)
self.assertLess(diff[:10].max(), 0)
def test_normalization(self):
rng = np.random.RandomState(seed=test_util.test_seed())
l = 10000
x = 3 * rng.randn(l).astype(self.dtype)
x_ph = tf1.placeholder_with_default(
x, shape=(l,) if self.use_static_shape else None)
rxx = tfp.stats.auto_correlation(
x_ph, max_lags=l // 2, center=True, normalize=True)
if self.use_static_shape:
self.assertAllEqual((l // 2 + 1,), rxx.shape)
rxx_ = self.evaluate(rxx)
# Note that RXX[0] = 1, despite the fact that E[X^2] = 9, and this is
# due to normalize=True.
# OSS CPU FFT has some accuracy issues, so this tolerance is a bit bad.
self.assertAllClose(1., rxx_[0], rtol=0.05)
# The maximal error in the rest of the sequence is not great.
self.assertAllClose(np.zeros(l // 2), rxx_[1:], atol=0.1)
# The mean error in the rest is ok, actually 0.008 when I tested it.
self.assertLess(np.abs(rxx_[1:]).mean(), 0.02)
@test_util.test_all_tf_execution_regimes
class AutoCorrelationTestStaticShapeFloat32(test_util.TestCase,
_AutoCorrelationTest):
@property
def dtype(self):
return np.float32
@property
def use_static_shape(self):
return True
@test_util.test_all_tf_execution_regimes
class AutoCorrelationTestStaticShapeComplex64(test_util.TestCase,
_AutoCorrelationTest):
@property
def dtype(self):
return np.complex64
@property
def use_static_shape(self):
return True
@test_util.test_all_tf_execution_regimes
class AutoCorrelationTestDynamicShapeFloat32(test_util.TestCase,
_AutoCorrelationTest):
@property
def dtype(self):
return np.float32
@property
def use_static_shape(self):
return False
@test_util.test_all_tf_execution_regimes
class CovarianceTest(test_util.TestCase):
def _np_cov_1d(self, x, y):
return ((x - x.mean(axis=0)) * (y - y.mean(axis=0))).mean(axis=0)
def test_batch_scalar(self):
rng = np.random.RandomState(seed=test_util.test_seed())
# X and Y are correlated, albeit less so in the first component.
# They both are 100 samples of 3-batch scalars.
x = rng.randn(100, 3)
y = x + 0.1 * rng.randn(100, 3)
x[:, 0] += 0.1 * rng.randn(100)
cov = tfp.stats.covariance(x, y, sample_axis=0, event_axis=None)
self.assertAllEqual((3,), cov.shape)
cov = self.evaluate(cov)
for i in range(3): # Iterate over batch index.
self.assertAllClose(self._np_cov_1d(x[:, i], y[:, i]), cov[i])
def test_batch_vector_sampaxis0_eventaxisn1(self):
rng = np.random.RandomState(seed=test_util.test_seed())
# X and Y are correlated, albeit less so in the first component.
# They both are both 100 samples of 3-batch vectors in R^2.
x = rng.randn(100, 3, 2)
y = x + 0.1 * rng.randn(100, 3, 2)
x[:, :, 0] += 0.1 * rng.randn(100, 3)
cov = tfp.stats.covariance(x, y, event_axis=-1)
self.assertAllEqual((3, 2, 2), cov.shape)
cov = self.evaluate(cov)
cov_kd = tfp.stats.covariance(x, y, event_axis=-1, keepdims=True)
self.assertAllEqual((1, 3, 2, 2), cov_kd.shape)
cov_kd = self.evaluate(cov_kd)
self.assertAllEqual(cov, cov_kd[0, ...])
for i in range(3): # Iterate over batch index.
x_i = x[:, i, :] # Pick out ith batch of samples.
y_i = y[:, i, :]
cov_i = cov[i, :, :]
for m in range(2): # Iterate over row of matrix
for n in range(2): # Iterate over column of matrix
self.assertAllClose(
self._np_cov_1d(x_i[:, m], y_i[:, n]), cov_i[m, n])
def test_batch_vector_sampaxis13_eventaxis2(self):
rng = np.random.RandomState(seed=test_util.test_seed())
# x.shape = batch, sample, event, sample
x = rng.randn(4, 10, 2, 10)
y = x + 0.1 * rng.randn(10, 2, 10)
x[:, :, 0, :] += 0.1 * rng.randn(4, 10, 10)
cov = tfp.stats.covariance(x, y, sample_axis=[1, 3], event_axis=[2])
self.assertAllEqual((4, 2, 2), cov.shape)
cov = self.evaluate(cov)
cov_kd = tfp.stats.covariance(
x, y, sample_axis=[1, 3], event_axis=[2], keepdims=True)
self.assertAllEqual((4, 1, 2, 2, 1), cov_kd.shape)
cov_kd = self.evaluate(cov_kd)
self.assertAllEqual(cov, cov_kd[:, 0, :, :, 0])
for i in range(4): # Iterate over batch index.
# Get ith batch of samples, and permute/reshape to [n_samples, n_events]
x_i = np.reshape(np.transpose(x[i, :, :, :], [0, 2, 1]), [10 * 10, 2])
y_i = np.reshape(np.transpose(y[i, :, :, :], [0, 2, 1]), [10 * 10, 2])
# Will compare with ith batch of covariance.
cov_i = cov[i, :, :]
for m in range(2): # Iterate over row of matrix
for n in range(2): # Iterate over column of matrix
self.assertAllClose(
self._np_cov_1d(x_i[:, m], y_i[:, n]), cov_i[m, n])
def test_batch_vector_sampaxis02_eventaxis1(self):
rng = np.random.RandomState(seed=test_util.test_seed())
# x.shape = sample, event, sample, batch
x = rng.randn(2, 3, 4, 5)
y = x + 0.1 * rng.randn(2, 3, 4, 5)
cov = tfp.stats.covariance(x, y, sample_axis=[0, 2], event_axis=[1])
self.assertAllEqual((3, 3, 5), cov.shape)
cov = self.evaluate(cov)
cov_kd = tfp.stats.covariance(
x, y, sample_axis=[0, 2], event_axis=[1], keepdims=True)
self.assertAllEqual((1, 3, 3, 1, 5), cov_kd.shape)
cov_kd = self.evaluate(cov_kd)
self.assertAllEqual(cov, cov_kd[0, :, :, 0, :])
for i in range(5): # Iterate over batch index.
# Get ith batch of samples, and permute/reshape to [n_samples, n_events]
x_i = np.reshape(np.transpose(x[:, :, :, i], [0, 2, 1]), [2 * 4, 3])
y_i = np.reshape(np.transpose(y[:, :, :, i], [0, 2, 1]), [2 * 4, 3])
# Will compare with ith batch of covariance.
cov_i = cov[:, :, i]
for m in range(3): # Iterate over row of matrix
for n in range(3): # Iterate over column of matrix
self.assertAllClose(
self._np_cov_1d(x_i[:, m], y_i[:, n]), cov_i[m, n])
def test_batch_vector_sampaxis03_eventaxis12_dynamic(self):
rng = np.random.RandomState(seed=test_util.test_seed())
# x.shape = sample, event, event, sample, batch
x = rng.randn(2, 3, 4, 5, 6)
y = x + 0.1 * rng.randn(2, 3, 4, 5, 6)
x_ph = tf1.placeholder_with_default(x, shape=None)
y_ph = tf1.placeholder_with_default(y, shape=None)
cov = tfp.stats.covariance(
x_ph, y_ph, sample_axis=[0, 3], event_axis=[1, 2])
cov = self.evaluate(cov)
self.assertAllEqual((3, 4, 3, 4, 6), cov.shape)
cov_kd = tfp.stats.covariance(
x_ph, y_ph, sample_axis=[0, 3], event_axis=[1, 2], keepdims=True)
cov_kd = self.evaluate(cov_kd)
self.assertAllEqual((1, 3, 4, 3, 4, 1, 6), cov_kd.shape)
self.assertAllEqual(cov, cov_kd[0, :, :, :, :, 0, :])
for i in range(6): # Iterate over batch index.
# Get ith batch of samples, and permute/reshape to [n_samples, n_events]
x_i = np.reshape(
np.transpose(x[:, :, :, :, i], [0, 3, 1, 2]), [2 * 5, 3 * 4])
y_i = np.reshape(
np.transpose(y[:, :, :, :, i], [0, 3, 1, 2]), [2 * 5, 3 * 4])
# Will compare with ith batch of covariance.
cov_i = np.reshape(cov[..., i], [3 * 4, 3 * 4])
for m in range(0, 3 * 4, 3): # Iterate over some rows of matrix
for n in range(0, 3 * 4, 3): # Iterate over some columns of matrix
self.assertAllClose(
self._np_cov_1d(x_i[:, m], y_i[:, n]), cov_i[m, n])
def test_non_contiguous_event_axis_raises(self):
rng = np.random.RandomState(seed=test_util.test_seed())
# They both are both 100 samples of 3-batch vectors in R^2.
x = rng.randn(100, 3, 2)
y = x + 0.1 * rng.randn(100, 3, 2)
with self.assertRaisesRegexp(ValueError, 'must be contiguous'):
tfp.stats.covariance(x, y, sample_axis=1, event_axis=[0, 2])
def test_overlapping_axis_raises(self):
rng = np.random.RandomState(seed=test_util.test_seed())
# They both are both 100 samples of 3-batch vectors in R^2.
x = rng.randn(100, 3, 2)
y = x + 0.1 * rng.randn(100, 3, 2)
with self.assertRaisesRegexp(ValueError, 'overlapped'):
tfp.stats.covariance(x, y, sample_axis=[0, 1], event_axis=[1, 2])
def test_batch_vector_shape_dtype_ok(self):
# Test addresses a particular bug.
x = tf.ones((5, 2))
# This next line failed, due to concatenating [float32, int32, int32]
# traceback went to tf.concat((batch_axis, event_axis, sample_axis), 0)
# Test passes when this does not fail.
tfp.stats.covariance(x)
@test_util.test_all_tf_execution_regimes
class CorrelationTest(test_util.TestCase):
def _np_corr_1d(self, x, y):
assert x.ndim == y.ndim == 1
x = x - x.mean()
y = y - y.mean()
sigma_x = np.sqrt((x**2).mean())
sigma_y = np.sqrt((y**2).mean())
return (x * y).mean() / (sigma_x * sigma_y)
def test_batch_scalar(self):
rng = np.random.RandomState(seed=test_util.test_seed())
# X and Y are correlated, albeit less so in the first component.
# They both are 100 samples of 3-batch scalars.
x = rng.randn(100, 3)
y = x + 0.1 * rng.randn(100, 3)
x[:, 0] += 0.1 * rng.randn(100)
corr = tfp.stats.correlation(x, y, sample_axis=0, event_axis=None)
self.assertAllEqual((3,), corr.shape)
corr = self.evaluate(corr)
for i in range(3): # Iterate over batch index.
self.assertAllClose(self._np_corr_1d(x[:, i], y[:, i]), corr[i])
def test_diagonal_of_correlation_matrix_x_with_x_is_one(self):
rng = np.random.RandomState(seed=test_util.test_seed())
# Some big numbers, to test stability.
x = np.float32(1e10 * rng.randn(100, 3))
corr = tfp.stats.correlation(x, sample_axis=0, event_axis=1)
self.assertAllEqual((3, 3), corr.shape)
corr = self.evaluate(corr)
self.assertAllClose([1., 1., 1.], np.diag(corr))
def test_batch_vector_sampaxis0_eventaxisn1(self):
rng = np.random.RandomState(seed=test_util.test_seed())
# X and Y are correlated, albeit less so in the first component.
# They both are both 100 samples of 3-batch vectors in R^2.
x = rng.randn(100, 3, 2)
y = x + 0.1 * rng.randn(100, 3, 2)
x[:, :, 0] += 0.1 * rng.randn(100, 3)
corr = tfp.stats.correlation(x, y, event_axis=-1)
self.assertAllEqual((3, 2, 2), corr.shape)
corr = self.evaluate(corr)
corr_kd = tfp.stats.correlation(x, y, event_axis=-1, keepdims=True)
self.assertAllEqual((1, 3, 2, 2), corr_kd.shape)
corr_kd = self.evaluate(corr_kd)
self.assertAllEqual(corr, corr_kd[0, ...])
for i in range(3): # Iterate over batch index.
x_i = x[:, i, :] # Pick out ith batch of samples.
y_i = y[:, i, :]
corr_i = corr[i, :, :]
for m in range(2): # Iterate over row of matrix
for n in range(2): # Iterate over column of matrix
self.assertAllClose(
self._np_corr_1d(x_i[:, m], y_i[:, n]), corr_i[m, n])
@test_util.test_all_tf_execution_regimes
class CholeskyCovarianceTest(test_util.TestCase):
def test_batch_vector_sampaxis1_eventaxis2(self):
rng = np.random.RandomState(seed=test_util.test_seed())
# x.shape = [2, 5000, 2],
# 2-batch members, 5000 samples each, events in R^2.
x0 = rng.randn(5000, 2)
x1 = 2 * rng.randn(5000, 2)
x = np.stack((x0, x1), axis=0)
# chol.shape = [2 (batch), 2x2 (event x event)]
chol = tfp.stats.cholesky_covariance(x, sample_axis=1)
chol_kd = tfp.stats.cholesky_covariance(x, sample_axis=1, keepdims=True)
# Make sure static shape of keepdims works
self.assertAllEqual((2, 2, 2), chol.shape)
self.assertAllEqual((2, 1, 2, 2), chol_kd.shape)
chol, chol_kd = self.evaluate([chol, chol_kd])
# keepdims should not change the numbers in the result.
self.assertAllEqual(chol, np.squeeze(chol_kd, axis=1))
# Covariance is trivial since these are independent normals.
# Tolerance chosen to be 2x the lowest passing atol.
self.assertAllClose(np.eye(2), chol[0, ...], atol=0.06)
self.assertAllClose(2 * np.eye(2), chol[1, ...], atol=0.06)
@test_util.test_all_tf_execution_regimes
class VarianceTest(test_util.TestCase):
"""Light test: Most methods tested implicitly by CovarianceTest."""
def test_independent_uniform_samples(self):
rng = np.random.RandomState(seed=test_util.test_seed())
x = rng.rand(10, 10, 10)
var = tfp.stats.variance(x, sample_axis=None)
self.assertAllEqual((), var.shape)
var_kd = tfp.stats.variance(x, sample_axis=None, keepdims=True)
self.assertAllEqual((1, 1, 1), var_kd.shape)
var, var_kd = self.evaluate([var, var_kd])
self.assertAllEqual(var, var_kd.reshape(()))
self.assertAllClose(np.var(x), var)
@test_util.test_all_tf_execution_regimes
class StddevTest(test_util.TestCase):
"""Light test: Most methods tested implicitly by VarianceTest."""
def test_independent_uniform_samples(self):
rng = np.random.RandomState(seed=test_util.test_seed())
x = rng.rand(10, 10, 10)
stddev = tfp.stats.stddev(x, sample_axis=[1, -1])
self.assertAllEqual((10,), stddev.shape)
stddev_kd = tfp.stats.stddev(x, sample_axis=[1, -1], keepdims=True)
self.assertAllEqual((10, 1, 1), stddev_kd.shape)
stddev, stddev_kd = self.evaluate([stddev, stddev_kd])
self.assertAllEqual(stddev, stddev_kd.reshape((10,)))
self.assertAllClose(np.std(x, axis=(1, -1)), stddev)
@test_util.test_all_tf_execution_regimes
class LogAverageProbsTest(test_util.TestCase):
def test_mathematical_correctness_bernoulli(self):
logits = tf.random.normal([10, 3, 4], seed=test_util.test_seed())
# The "expected" calculation is numerically naive.
probs = tf.math.sigmoid(logits)
expected = tf.math.log(tf.reduce_mean(probs, axis=0))
actual = tfp.stats.log_average_probs(logits, validate_args=True)
self.assertAllClose(*self.evaluate([expected, actual]), rtol=1e-5, atol=0.)
def test_mathematical_correctness_categorical(self):
logits = tf.random.normal([10, 3, 4], seed=test_util.test_seed())
# The "expected" calculation is numerically naive.
probs = tf.math.softmax(logits, axis=-1)
expected = tf.math.log(tf.reduce_mean(probs, axis=0))
actual = tfp.stats.log_average_probs(
logits, event_axis=-1, validate_args=True)
self.assertAllClose(*self.evaluate([expected, actual]), rtol=1e-5, atol=0.)
def test_bad_axis_static(self):
logits = tf.random.normal([10, 3, 4], seed=test_util.test_seed())
with self.assertRaisesRegexp(ValueError, r'.*must be distinct.'):
tfp.stats.log_average_probs(
logits,
sample_axis=[0, 1, 2],
event_axis=-1,
validate_args=True)
def test_bad_axis_dynamic(self):
if tf.executing_eagerly():
return
logits = tf.random.normal([10, 3, 4], seed=45)
event_axis = tf.Variable(-1)
with self.assertRaisesOpError(
r'Arguments `sample_axis` and `event_axis` must be distinct.'):
self.evaluate(event_axis.initializer)
self.evaluate(tfp.stats.log_average_probs(
logits,
sample_axis=[0, 1, 2],
event_axis=event_axis,
validate_args=True))
if __name__ == '__main__':
tf.test.main()
making tests more robust with numpy specific random state
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Sample Stats Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
@test_util.test_all_tf_execution_regimes
class _AutoCorrelationTest(object):
@property
def use_static_shape(self):
raise NotImplementedError('Subclass failed to implement `use_static_shape`')
@property
def dtype(self):
raise NotImplementedError('Subclass failed to implement `dtype`.')
def test_constant_sequence_axis_0_max_lags_none_center_false(self):
x_ = np.array([[0., 0., 0.], [1., 1., 1.]]).astype(self.dtype)
x_ph = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
# Setting normalize = True means we divide by zero.
auto_corr = tfp.stats.auto_correlation(
x_ph, axis=1, center=False, normalize=False)
if self.use_static_shape:
self.assertEqual((2, 3), auto_corr.shape)
auto_corr_ = self.evaluate(auto_corr)
self.assertAllClose([[0., 0., 0.], [1., 1., 1.]], auto_corr_)
def test_constant_sequence_axis_0_max_lags_none_center_true(self):
x_ = np.array([[0., 0., 0.], [1., 1., 1.]]).astype(self.dtype)
x_ph = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
# Setting normalize = True means we divide by zero.
auto_corr = tfp.stats.auto_correlation(
x_ph, axis=1, normalize=False, center=True)
if self.use_static_shape:
self.assertEqual((2, 3), auto_corr.shape)
auto_corr_ = self.evaluate(auto_corr)
self.assertAllClose([[0., 0., 0.], [0., 0., 0.]], auto_corr_)
def check_results_versus_brute_force(self, x, axis, max_lags, center,
normalize):
"""Compute auto-correlation by brute force, then compare to tf result."""
# Brute for auto-corr -- avoiding fft and transpositions.
axis_len = x.shape[axis]
if max_lags is None:
max_lags = axis_len - 1
else:
max_lags = min(axis_len - 1, max_lags)
auto_corr_at_lag = []
if center:
x -= x.mean(axis=axis, keepdims=True)
for m in range(max_lags + 1):
auto_corr_at_lag.append(
(np.take(x, indices=range(0, axis_len - m), axis=axis) * np.conj(
np.take(x, indices=range(m, axis_len), axis=axis))).mean(
axis=axis, keepdims=True))
rxx = np.concatenate(auto_corr_at_lag, axis=axis)
if normalize:
rxx /= np.take(rxx, [0], axis=axis)
x_ph = tf1.placeholder_with_default(
x, shape=x.shape if self.use_static_shape else None)
auto_corr = tfp.stats.auto_correlation(
x_ph,
axis=axis,
max_lags=max_lags,
center=center,
normalize=normalize)
if self.use_static_shape:
output_shape = list(x.shape)
output_shape[axis] = max_lags + 1
self.assertAllEqual(output_shape, auto_corr.shape)
self.assertAllClose(rxx, self.evaluate(auto_corr), rtol=1e-5, atol=1e-5)
def test_axis_n1_center_false_max_lags_none(self):
rng = test_util.test_np_rng()
x = rng.randn(2, 3, 4).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(2, 3, 4).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-1, max_lags=None, center=False, normalize=False)
def test_axis_n2_center_false_max_lags_none(self):
rng = test_util.test_np_rng()
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-2, max_lags=None, center=False, normalize=False)
def test_axis_n1_center_false_max_lags_none_normalize_true(self):
rng = test_util.test_np_rng()
x = rng.randn(2, 3, 4).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(2, 3, 4).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-1, max_lags=None, center=False, normalize=True)
def test_axis_n2_center_false_max_lags_none_normalize_true(self):
rng = test_util.test_np_rng()
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-2, max_lags=None, center=False, normalize=True)
def test_axis_0_center_true_max_lags_none(self):
rng = test_util.test_np_rng()
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=0, max_lags=None, center=True, normalize=False)
def test_axis_2_center_true_max_lags_1(self):
rng = test_util.test_np_rng()
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=2, max_lags=1, center=True, normalize=False)
def test_axis_2_center_true_max_lags_100(self):
# There are less than 100 elements in axis 2, so expect we get back an array
# the same size as x, despite having asked for 100 lags.
rng = test_util.test_np_rng()
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=2, max_lags=100, center=True, normalize=False)
def test_long_orthonormal_sequence_has_corr_length_0(self):
rng = test_util.test_np_rng()
l = 10000
x = rng.randn(l).astype(self.dtype)
x_ph = tf1.placeholder_with_default(
x, shape=(l,) if self.use_static_shape else None)
rxx = tfp.stats.auto_correlation(
x_ph, max_lags=l // 2, center=True, normalize=False)
if self.use_static_shape:
self.assertAllEqual((l // 2 + 1,), rxx.shape)
rxx_ = self.evaluate(rxx)
# OSS CPU FFT has some accuracy issues, so this tolerance is a bit bad.
self.assertAllClose(1., rxx_[0], rtol=0.05)
# The maximal error in the rest of the sequence is not great.
self.assertAllClose(np.zeros(l // 2), rxx_[1:], atol=0.1)
# The mean error in the rest is ok, actually 0.008 when I tested it.
self.assertLess(np.abs(rxx_[1:]).mean(), 0.02)
def test_step_function_sequence(self):
if tf.executing_eagerly() and not self.use_static_shape:
# TODO(b/122840816): Modify this test so that it runs in eager mode with
# dynamic shapes, or document that this is the intended behavior.
return
rng = test_util.test_np_rng()
# x jumps to new random value every 10 steps. So correlation length = 10.
x = (rng.randint(-10, 10, size=(1000, 1)) * np.ones(
(1, 10))).ravel().astype(self.dtype)
x_ph = tf1.placeholder_with_default(
x, shape=(1000 * 10,) if self.use_static_shape else None)
rxx = tfp.stats.auto_correlation(
x_ph, max_lags=1000 * 10 // 2, center=True, normalize=False)
if self.use_static_shape:
self.assertAllEqual((1000 * 10 // 2 + 1,), rxx.shape)
rxx_ = self.evaluate(rxx)
rxx_ /= rxx_[0]
# Expect positive correlation for the first 10 lags, then significantly
# smaller negative.
self.assertGreater(rxx_[:10].min(), 0)
# TODO(b/138375951): Re-enable this assertion once we know why its
# failing.
# self.assertGreater(rxx_[9], 5 * rxx_[10:20].mean())
# RXX should be decreasing for the first 10 lags.
diff = np.diff(rxx_)
self.assertLess(diff[:10].max(), 0)
def test_normalization(self):
rng = test_util.test_np_rng()
l = 10000
x = 3 * rng.randn(l).astype(self.dtype)
x_ph = tf1.placeholder_with_default(
x, shape=(l,) if self.use_static_shape else None)
rxx = tfp.stats.auto_correlation(
x_ph, max_lags=l // 2, center=True, normalize=True)
if self.use_static_shape:
self.assertAllEqual((l // 2 + 1,), rxx.shape)
rxx_ = self.evaluate(rxx)
# Note that RXX[0] = 1, despite the fact that E[X^2] = 9, and this is
# due to normalize=True.
# OSS CPU FFT has some accuracy issues, so this tolerance is a bit bad.
self.assertAllClose(1., rxx_[0], rtol=0.05)
# The maximal error in the rest of the sequence is not great.
self.assertAllClose(np.zeros(l // 2), rxx_[1:], atol=0.1)
# The mean error in the rest is ok, actually 0.008 when I tested it.
self.assertLess(np.abs(rxx_[1:]).mean(), 0.02)
@test_util.test_all_tf_execution_regimes
class AutoCorrelationTestStaticShapeFloat32(test_util.TestCase,
_AutoCorrelationTest):
@property
def dtype(self):
return np.float32
@property
def use_static_shape(self):
return True
@test_util.test_all_tf_execution_regimes
class AutoCorrelationTestStaticShapeComplex64(test_util.TestCase,
_AutoCorrelationTest):
@property
def dtype(self):
return np.complex64
@property
def use_static_shape(self):
return True
@test_util.test_all_tf_execution_regimes
class AutoCorrelationTestDynamicShapeFloat32(test_util.TestCase,
_AutoCorrelationTest):
@property
def dtype(self):
return np.float32
@property
def use_static_shape(self):
return False
@test_util.test_all_tf_execution_regimes
class CovarianceTest(test_util.TestCase):
def _np_cov_1d(self, x, y):
return ((x - x.mean(axis=0)) * (y - y.mean(axis=0))).mean(axis=0)
def test_batch_scalar(self):
rng = test_util.test_np_rng()
# X and Y are correlated, albeit less so in the first component.
# They both are 100 samples of 3-batch scalars.
x = rng.randn(100, 3)
y = x + 0.1 * rng.randn(100, 3)
x[:, 0] += 0.1 * rng.randn(100)
cov = tfp.stats.covariance(x, y, sample_axis=0, event_axis=None)
self.assertAllEqual((3,), cov.shape)
cov = self.evaluate(cov)
for i in range(3): # Iterate over batch index.
self.assertAllClose(self._np_cov_1d(x[:, i], y[:, i]), cov[i])
def test_batch_vector_sampaxis0_eventaxisn1(self):
rng = test_util.test_np_rng()
# X and Y are correlated, albeit less so in the first component.
# They both are both 100 samples of 3-batch vectors in R^2.
x = rng.randn(100, 3, 2)
y = x + 0.1 * rng.randn(100, 3, 2)
x[:, :, 0] += 0.1 * rng.randn(100, 3)
cov = tfp.stats.covariance(x, y, event_axis=-1)
self.assertAllEqual((3, 2, 2), cov.shape)
cov = self.evaluate(cov)
cov_kd = tfp.stats.covariance(x, y, event_axis=-1, keepdims=True)
self.assertAllEqual((1, 3, 2, 2), cov_kd.shape)
cov_kd = self.evaluate(cov_kd)
self.assertAllEqual(cov, cov_kd[0, ...])
for i in range(3): # Iterate over batch index.
x_i = x[:, i, :] # Pick out ith batch of samples.
y_i = y[:, i, :]
cov_i = cov[i, :, :]
for m in range(2): # Iterate over row of matrix
for n in range(2): # Iterate over column of matrix
self.assertAllClose(
self._np_cov_1d(x_i[:, m], y_i[:, n]), cov_i[m, n])
def test_batch_vector_sampaxis13_eventaxis2(self):
rng = test_util.test_np_rng()
# x.shape = batch, sample, event, sample
x = rng.randn(4, 10, 2, 10)
y = x + 0.1 * rng.randn(10, 2, 10)
x[:, :, 0, :] += 0.1 * rng.randn(4, 10, 10)
cov = tfp.stats.covariance(x, y, sample_axis=[1, 3], event_axis=[2])
self.assertAllEqual((4, 2, 2), cov.shape)
cov = self.evaluate(cov)
cov_kd = tfp.stats.covariance(
x, y, sample_axis=[1, 3], event_axis=[2], keepdims=True)
self.assertAllEqual((4, 1, 2, 2, 1), cov_kd.shape)
cov_kd = self.evaluate(cov_kd)
self.assertAllEqual(cov, cov_kd[:, 0, :, :, 0])
for i in range(4): # Iterate over batch index.
# Get ith batch of samples, and permute/reshape to [n_samples, n_events]
x_i = np.reshape(np.transpose(x[i, :, :, :], [0, 2, 1]), [10 * 10, 2])
y_i = np.reshape(np.transpose(y[i, :, :, :], [0, 2, 1]), [10 * 10, 2])
# Will compare with ith batch of covariance.
cov_i = cov[i, :, :]
for m in range(2): # Iterate over row of matrix
for n in range(2): # Iterate over column of matrix
self.assertAllClose(
self._np_cov_1d(x_i[:, m], y_i[:, n]), cov_i[m, n])
def test_batch_vector_sampaxis02_eventaxis1(self):
rng = test_util.test_np_rng()
# x.shape = sample, event, sample, batch
x = rng.randn(2, 3, 4, 5)
y = x + 0.1 * rng.randn(2, 3, 4, 5)
cov = tfp.stats.covariance(x, y, sample_axis=[0, 2], event_axis=[1])
self.assertAllEqual((3, 3, 5), cov.shape)
cov = self.evaluate(cov)
cov_kd = tfp.stats.covariance(
x, y, sample_axis=[0, 2], event_axis=[1], keepdims=True)
self.assertAllEqual((1, 3, 3, 1, 5), cov_kd.shape)
cov_kd = self.evaluate(cov_kd)
self.assertAllEqual(cov, cov_kd[0, :, :, 0, :])
for i in range(5): # Iterate over batch index.
# Get ith batch of samples, and permute/reshape to [n_samples, n_events]
x_i = np.reshape(np.transpose(x[:, :, :, i], [0, 2, 1]), [2 * 4, 3])
y_i = np.reshape(np.transpose(y[:, :, :, i], [0, 2, 1]), [2 * 4, 3])
# Will compare with ith batch of covariance.
cov_i = cov[:, :, i]
for m in range(3): # Iterate over row of matrix
for n in range(3): # Iterate over column of matrix
self.assertAllClose(
self._np_cov_1d(x_i[:, m], y_i[:, n]), cov_i[m, n])
def test_batch_vector_sampaxis03_eventaxis12_dynamic(self):
rng = test_util.test_np_rng()
# x.shape = sample, event, event, sample, batch
x = rng.randn(2, 3, 4, 5, 6)
y = x + 0.1 * rng.randn(2, 3, 4, 5, 6)
x_ph = tf1.placeholder_with_default(x, shape=None)
y_ph = tf1.placeholder_with_default(y, shape=None)
cov = tfp.stats.covariance(
x_ph, y_ph, sample_axis=[0, 3], event_axis=[1, 2])
cov = self.evaluate(cov)
self.assertAllEqual((3, 4, 3, 4, 6), cov.shape)
cov_kd = tfp.stats.covariance(
x_ph, y_ph, sample_axis=[0, 3], event_axis=[1, 2], keepdims=True)
cov_kd = self.evaluate(cov_kd)
self.assertAllEqual((1, 3, 4, 3, 4, 1, 6), cov_kd.shape)
self.assertAllEqual(cov, cov_kd[0, :, :, :, :, 0, :])
for i in range(6): # Iterate over batch index.
# Get ith batch of samples, and permute/reshape to [n_samples, n_events]
x_i = np.reshape(
np.transpose(x[:, :, :, :, i], [0, 3, 1, 2]), [2 * 5, 3 * 4])
y_i = np.reshape(
np.transpose(y[:, :, :, :, i], [0, 3, 1, 2]), [2 * 5, 3 * 4])
# Will compare with ith batch of covariance.
cov_i = np.reshape(cov[..., i], [3 * 4, 3 * 4])
for m in range(0, 3 * 4, 3): # Iterate over some rows of matrix
for n in range(0, 3 * 4, 3): # Iterate over some columns of matrix
self.assertAllClose(
self._np_cov_1d(x_i[:, m], y_i[:, n]), cov_i[m, n])
def test_non_contiguous_event_axis_raises(self):
rng = test_util.test_np_rng()
# They both are both 100 samples of 3-batch vectors in R^2.
x = rng.randn(100, 3, 2)
y = x + 0.1 * rng.randn(100, 3, 2)
with self.assertRaisesRegexp(ValueError, 'must be contiguous'):
tfp.stats.covariance(x, y, sample_axis=1, event_axis=[0, 2])
def test_overlapping_axis_raises(self):
rng = test_util.test_np_rng()
# They both are both 100 samples of 3-batch vectors in R^2.
x = rng.randn(100, 3, 2)
y = x + 0.1 * rng.randn(100, 3, 2)
with self.assertRaisesRegexp(ValueError, 'overlapped'):
tfp.stats.covariance(x, y, sample_axis=[0, 1], event_axis=[1, 2])
def test_batch_vector_shape_dtype_ok(self):
# Test addresses a particular bug.
x = tf.ones((5, 2))
# This next line failed, due to concatenating [float32, int32, int32]
# traceback went to tf.concat((batch_axis, event_axis, sample_axis), 0)
# Test passes when this does not fail.
tfp.stats.covariance(x)
@test_util.test_all_tf_execution_regimes
class CorrelationTest(test_util.TestCase):
def _np_corr_1d(self, x, y):
assert x.ndim == y.ndim == 1
x = x - x.mean()
y = y - y.mean()
sigma_x = np.sqrt((x**2).mean())
sigma_y = np.sqrt((y**2).mean())
return (x * y).mean() / (sigma_x * sigma_y)
def test_batch_scalar(self):
rng = test_util.test_np_rng()
# X and Y are correlated, albeit less so in the first component.
# They both are 100 samples of 3-batch scalars.
x = rng.randn(100, 3)
y = x + 0.1 * rng.randn(100, 3)
x[:, 0] += 0.1 * rng.randn(100)
corr = tfp.stats.correlation(x, y, sample_axis=0, event_axis=None)
self.assertAllEqual((3,), corr.shape)
corr = self.evaluate(corr)
for i in range(3): # Iterate over batch index.
self.assertAllClose(self._np_corr_1d(x[:, i], y[:, i]), corr[i])
def test_diagonal_of_correlation_matrix_x_with_x_is_one(self):
rng = test_util.test_np_rng()
# Some big numbers, to test stability.
x = np.float32(1e10 * rng.randn(100, 3))
corr = tfp.stats.correlation(x, sample_axis=0, event_axis=1)
self.assertAllEqual((3, 3), corr.shape)
corr = self.evaluate(corr)
self.assertAllClose([1., 1., 1.], np.diag(corr))
def test_batch_vector_sampaxis0_eventaxisn1(self):
rng = test_util.test_np_rng()
# X and Y are correlated, albeit less so in the first component.
# They both are both 100 samples of 3-batch vectors in R^2.
x = rng.randn(100, 3, 2)
y = x + 0.1 * rng.randn(100, 3, 2)
x[:, :, 0] += 0.1 * rng.randn(100, 3)
corr = tfp.stats.correlation(x, y, event_axis=-1)
self.assertAllEqual((3, 2, 2), corr.shape)
corr = self.evaluate(corr)
corr_kd = tfp.stats.correlation(x, y, event_axis=-1, keepdims=True)
self.assertAllEqual((1, 3, 2, 2), corr_kd.shape)
corr_kd = self.evaluate(corr_kd)
self.assertAllEqual(corr, corr_kd[0, ...])
for i in range(3): # Iterate over batch index.
x_i = x[:, i, :] # Pick out ith batch of samples.
y_i = y[:, i, :]
corr_i = corr[i, :, :]
for m in range(2): # Iterate over row of matrix
for n in range(2): # Iterate over column of matrix
self.assertAllClose(
self._np_corr_1d(x_i[:, m], y_i[:, n]), corr_i[m, n])
@test_util.test_all_tf_execution_regimes
class CholeskyCovarianceTest(test_util.TestCase):
def test_batch_vector_sampaxis1_eventaxis2(self):
rng = test_util.test_np_rng()
# x.shape = [2, 5000, 2],
# 2-batch members, 5000 samples each, events in R^2.
x0 = rng.randn(5000, 2)
x1 = 2 * rng.randn(5000, 2)
x = np.stack((x0, x1), axis=0)
# chol.shape = [2 (batch), 2x2 (event x event)]
chol = tfp.stats.cholesky_covariance(x, sample_axis=1)
chol_kd = tfp.stats.cholesky_covariance(x, sample_axis=1, keepdims=True)
# Make sure static shape of keepdims works
self.assertAllEqual((2, 2, 2), chol.shape)
self.assertAllEqual((2, 1, 2, 2), chol_kd.shape)
chol, chol_kd = self.evaluate([chol, chol_kd])
# keepdims should not change the numbers in the result.
self.assertAllEqual(chol, np.squeeze(chol_kd, axis=1))
# Covariance is trivial since these are independent normals.
# Tolerance chosen to be 2x the lowest passing atol.
self.assertAllClose(np.eye(2), chol[0, ...], atol=0.06)
self.assertAllClose(2 * np.eye(2), chol[1, ...], atol=0.06)
@test_util.test_all_tf_execution_regimes
class VarianceTest(test_util.TestCase):
"""Light test: Most methods tested implicitly by CovarianceTest."""
def test_independent_uniform_samples(self):
rng = test_util.test_np_rng()
x = rng.rand(10, 10, 10)
var = tfp.stats.variance(x, sample_axis=None)
self.assertAllEqual((), var.shape)
var_kd = tfp.stats.variance(x, sample_axis=None, keepdims=True)
self.assertAllEqual((1, 1, 1), var_kd.shape)
var, var_kd = self.evaluate([var, var_kd])
self.assertAllEqual(var, var_kd.reshape(()))
self.assertAllClose(np.var(x), var)
@test_util.test_all_tf_execution_regimes
class StddevTest(test_util.TestCase):
"""Light test: Most methods tested implicitly by VarianceTest."""
def test_independent_uniform_samples(self):
rng = test_util.test_np_rng()
x = rng.rand(10, 10, 10)
stddev = tfp.stats.stddev(x, sample_axis=[1, -1])
self.assertAllEqual((10,), stddev.shape)
stddev_kd = tfp.stats.stddev(x, sample_axis=[1, -1], keepdims=True)
self.assertAllEqual((10, 1, 1), stddev_kd.shape)
stddev, stddev_kd = self.evaluate([stddev, stddev_kd])
self.assertAllEqual(stddev, stddev_kd.reshape((10,)))
self.assertAllClose(np.std(x, axis=(1, -1)), stddev)
@test_util.test_all_tf_execution_regimes
class LogAverageProbsTest(test_util.TestCase):
def test_mathematical_correctness_bernoulli(self):
logits = tf.random.normal([10, 3, 4], seed=test_util.test_seed())
# The "expected" calculation is numerically naive.
probs = tf.math.sigmoid(logits)
expected = tf.math.log(tf.reduce_mean(probs, axis=0))
actual = tfp.stats.log_average_probs(logits, validate_args=True)
self.assertAllClose(*self.evaluate([expected, actual]), rtol=1e-5, atol=0.)
def test_mathematical_correctness_categorical(self):
logits = tf.random.normal([10, 3, 4], seed=test_util.test_seed())
# The "expected" calculation is numerically naive.
probs = tf.math.softmax(logits, axis=-1)
expected = tf.math.log(tf.reduce_mean(probs, axis=0))
actual = tfp.stats.log_average_probs(
logits, event_axis=-1, validate_args=True)
self.assertAllClose(*self.evaluate([expected, actual]), rtol=1e-5, atol=0.)
def test_bad_axis_static(self):
logits = tf.random.normal([10, 3, 4], seed=test_util.test_seed())
with self.assertRaisesRegexp(ValueError, r'.*must be distinct.'):
tfp.stats.log_average_probs(
logits,
sample_axis=[0, 1, 2],
event_axis=-1,
validate_args=True)
def test_bad_axis_dynamic(self):
if tf.executing_eagerly():
return
logits = tf.random.normal([10, 3, 4], seed=45)
event_axis = tf.Variable(-1)
with self.assertRaisesOpError(
r'Arguments `sample_axis` and `event_axis` must be distinct.'):
self.evaluate(event_axis.initializer)
self.evaluate(tfp.stats.log_average_probs(
logits,
sample_axis=[0, 1, 2],
event_axis=event_axis,
validate_args=True))
if __name__ == '__main__':
tf.test.main()
|
#! usr/bin/python
# -*- coding: utf-8 -*-
"""
Enjoy your glitch life!!
Replace: 任意の箇所のバイト列を、同サイズの任意のバイト列に置き換える
Increase: 任意の箇所のバイト列を、それより大きなサイズの任意のバイト列に置き換える
Decrease: 任意の箇所のバイト列を、削除する
Swap: 任意の箇所のバイト列と他の任意の箇所のバイト列を入れ替える
http://ucnv.org/openspace2013/map.html
Usage:
glitch [-h] -i=<input> [-o=<output>] [-n=<times>] [maximum] [hard] [-m=<mode>]
Options:
-h show this
-i=<input> '*.jpg' file
-o=<output> '*.jpg' glitched file[default: ./glitched.jpg]
-n=<times> output files N times[default: 10]
maximum create 62 files
hard hard glitch
-m=<mode> glitch mode, r: replace,
i: increase,
d: decreace,
s: swap
[default: r]
"""
import base64
import random
import string
import docopt
from contextlib import contextmanager
class Glitch:
def __init__(self):
self.glitch_mode = {
'r': self.replace,
'i': self.increase,
'd': self.decrease,
's': self.swap
}
def glitch(self, infile, outfile='glitched.jpg', times=10, maximum=False, hard=False, mode='r'):
setting, mode, times = self.prepare_glitchfile(infile, hard, mode, times, maximum)
self.factory(outfile, setting, mode, times)
return self.enjoyglitch()
def enjoyglitch(self):
njo, litc = map(list, ("njo", "litc"))
a = list(map(random.shuffle, (njo, litc)))
njo, litc = "".join(njo), "".join(litc)
return "E" + njo + "y G" + litc + "h."
def factory(self, outfile, setting, mode, times):
for i in range(times):
filename = outfile.replace(".jpg", "{0}_{1}.jpg".format(i, mode.__name__))
with open(filename, "wb") as f:
g = self.machine(setting, mode)
f.write(g)
def prepare_glitchfile(self, infile, hard, mode, times, maximum):
mode = self.glitch_mode[mode]
times = self.set_glitch_times(times, maximum)
with open(infile, 'rb') as f:
graphictext = base64.encodestring(f.read())
if hard:
fan = [self.fetchAlphanumeric() for i in range(4)]
most = None
else:
fan = [self.fetchAlphanumeric() for i in range(2)]
most = self.mostbytes(graphictext)
most += self.mostbytes(graphictext, most)
return ((graphictext, fan, most), mode, times)
def machine(self, setting, mode):
infile, fan, most = setting
if most is None:
most = next(fan[2]) + next(fan[3])
return mode(infile, fan, most)
def set_glitch_times(self, times, maximum):
if maximum:
return len(string.ascii_letters + string.digits)
else:
return int(times)
def mostbytes(self, text, remove_key=None):
if isinstance(remove_key, bytes):
remove_key = ord(remove_key)
text = list(set(text))
ruleout = [ord(w) for w in ['+', '=', '/', '\n']]
if remove_key:
text.remove(remove_key)
most = max(text, key=text.count)
if most in ruleout:
most = self.mostbytes(text, most)
try:
return bytes([ord(chr(most))])
except TypeError as e:
return most
def fetchAlphanumeric(self):
an = list(string.ascii_letters + string.digits)
random.shuffle(an)
return (bytes([ord(an[i])]) for i in range(len(an)))
def replace(self, infile, fan, most):
glitchfile = infile[:]
return base64.decodestring(glitchfile.replace(most, next(fan[0])+next(fan[1])))
def increase(self):
return 'Not yet.'
def decrease(self):
return 'Not yet.'
def swap(self, infile, outfile, hard):
return 'Not yet.'
def main(*args):
g = Glitch()
return g.glitch(*args)
if __name__ == '__main__':
args = docopt.docopt(__doc__, version=1.0)
print(main(args["-i"], args["-o"], args["-n"], args["maximum"], args["hard"], args["-m"]))
Fix: remove set(because couldn't check mote bytes) in mostbytes and remove contextlib
#! usr/bin/python
# -*- coding: utf-8 -*-
"""
Enjoy your glitch life!!
Replace: 任意の箇所のバイト列を、同サイズの任意のバイト列に置き換える
Increase: 任意の箇所のバイト列を、それより大きなサイズの任意のバイト列に置き換える
Decrease: 任意の箇所のバイト列を、削除する
Swap: 任意の箇所のバイト列と他の任意の箇所のバイト列を入れ替える
http://ucnv.org/openspace2013/map.html
Usage:
glitch [-h] -i=<input> [-o=<output>] [-n=<times>] [maximum] [hard] [-m=<mode>]
Options:
-h show this
-i=<input> '*.jpg' file
-o=<output> '*.jpg' glitched file[default: ./glitched.jpg]
-n=<times> output files N times[default: 10]
maximum create 62 files
hard hard glitch
-m=<mode> glitch mode, r: replace,
i: increase,
d: decreace,
s: swap
[default: r]
"""
import base64
import random
import string
import docopt
class Glitch:
def __init__(self):
self.glitch_mode = {
'r': self.replace,
'i': self.increase,
'd': self.decrease,
's': self.swap
}
def glitch(self, infile, outfile='glitched.jpg', times=10, maximum=False, hard=False, mode='r'):
setting, mode, times = self.prepare_glitchfile(infile, hard, mode, times, maximum)
self.factory(outfile, setting, mode, times)
return self.enjoyglitch()
def enjoyglitch(self):
njo, litc = map(list, ("njo", "litc"))
a = list(map(random.shuffle, (njo, litc)))
njo, litc = "".join(njo), "".join(litc)
return "E" + njo + "y G" + litc + "h."
def factory(self, outfile, setting, mode, times):
for i in range(times):
filename = outfile.replace(".jpg", "{0}_{1}.jpg".format(i, mode.__name__))
with open(filename, "wb") as f:
g = self.machine(setting, mode)
f.write(g)
def prepare_glitchfile(self, infile, hard, mode, times, maximum):
mode = self.glitch_mode[mode]
times = self.set_glitch_times(times, maximum)
with open(infile, 'rb') as f:
graphictext = base64.encodestring(f.read())
if hard:
fan = [self.fetchAlphanumeric() for i in range(4)]
most = None
else:
fan = [self.fetchAlphanumeric() for i in range(2)]
most = self.mostbytes(graphictext)
most += self.mostbytes(graphictext, most)
return ((graphictext, fan, most), mode, times)
def machine(self, setting, mode):
infile, fan, most = setting
if most is None:
most = next(fan[2]) + next(fan[3])
return mode(infile, fan, most)
def set_glitch_times(self, times, maximum):
if maximum:
return len(string.ascii_letters + string.digits)
else:
return int(times)
def mostbytes(self, text, remove_key=None):
if isinstance(remove_key, bytes):
remove_key = ord(remove_key)
text = list(text)
ruleout = [ord(w) for w in ['+', '=', '/', '\n']]
if remove_key:
text.remove(remove_key)
most = max(text, key=text.count)
if most in ruleout:
most = self.mostbytes(text, most)
try:
return bytes([ord(chr(most))])
except TypeError as e:
return most
def fetchAlphanumeric(self):
an = list(string.ascii_letters + string.digits)
random.shuffle(an)
return (bytes([ord(an[i])]) for i in range(len(an)))
def replace(self, infile, fan, most):
glitchfile = infile[:]
return base64.decodestring(glitchfile.replace(most, next(fan[0])+next(fan[1])))
def increase(self):
return 'Not yet.'
def decrease(self):
return 'Not yet.'
def swap(self, infile, outfile, hard):
return 'Not yet.'
def main(*args):
g = Glitch()
return g.glitch(*args)
if __name__ == '__main__':
args = docopt.docopt(__doc__, version=1.0)
print(main(args["-i"], args["-o"], args["-n"], args["maximum"], args["hard"], args["-m"]))
|
#########
# Copyright (c) 2013-2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import StringIO
import traceback
import os
import yaml
from contextlib import contextmanager
from flask_restful import Api
from flask import Flask, jsonify, Blueprint, current_app
from flask_security import Security
from werkzeug.exceptions import InternalServerError
from manager_rest import config, premium_enabled, manager_exceptions
from manager_rest.storage import db, user_datastore
from manager_rest.security.user_handler import user_loader
from manager_rest.maintenance import maintenance_mode_handler
from manager_rest.rest.endpoint_mapper import setup_resources
from manager_rest.flask_utils import set_flask_security_config
from manager_rest.manager_exceptions import INTERNAL_SERVER_ERROR_CODE
from manager_rest.app_logging import (setup_logger,
log_request,
log_response)
if premium_enabled:
from cloudify_premium.authentication.extended_auth_handler \
import configure_auth
from cloudify_premium.license.license import LicenseHandler
SQL_DIALECT = 'postgresql'
app_errors = Blueprint('app_errors', __name__)
@app_errors.app_errorhandler(manager_exceptions.ManagerException)
def manager_exception(error):
current_app.logger.error(error)
return jsonify(
message=str(error),
error_code=error.error_code
), error.status_code
@app_errors.app_errorhandler(InternalServerError)
def internal_error(e):
s_traceback = StringIO.StringIO()
traceback.print_exc(file=s_traceback)
return jsonify(
message="Internal error occurred in manager REST server - {0}: {1}"
.format(type(e).__name__, e),
error_code=INTERNAL_SERVER_ERROR_CODE,
server_traceback=s_traceback.getvalue()
), 500
class CloudifyFlaskApp(Flask):
def __init__(self, load_config=True):
_detect_debug_environment()
super(CloudifyFlaskApp, self).__init__(__name__)
if load_config:
config.instance.load_configuration()
self._set_sql_alchemy()
# These two need to be called after the configuration was loaded
if config.instance.rest_service_log_path:
setup_logger(self.logger)
if premium_enabled and config.instance.file_server_root:
self.external_auth = configure_auth(self.logger)
self.before_request(LicenseHandler.check_license_expiration_date)
else:
self.external_auth = None
self.before_request(log_request)
self.before_request(maintenance_mode_handler)
self.after_request(log_response)
self._set_flask_security()
with self.app_context():
roles = config.instance.authorization_roles
if roles:
for role in roles:
user_datastore.find_or_create_role(name=role['name'])
user_datastore.commit()
with self._prevent_flask_restful_error_handling():
setup_resources(Api(self))
self.register_blueprint(app_errors)
def _set_flask_security(self):
"""Set Flask-Security specific configurations and init the extension
"""
set_flask_security_config(self)
Security(app=self, datastore=user_datastore)
# Get the login manager and set our own callback to be the user getter
login_manager = self.extensions['security'].login_manager
login_manager.request_loader(user_loader)
self.token_serializer = self.extensions[
'security'].remember_token_serializer
def _set_sql_alchemy(self):
"""
Set SQLAlchemy specific configurations, init the db object and create
the tables if necessary
"""
self.config['SQLALCHEMY_DATABASE_URI'] = config.instance.db_url
self.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(self) # Prepare the app for use with flask-sqlalchemy
@contextmanager
def _prevent_flask_restful_error_handling(self):
"""Add flask-restful under this, to avoid installing its errorhandlers
Flask-restful's errorhandlers are both not flexible enough, and too
complex. We want to simply use flask's error handling mechanism,
so this will make sure that flask-restful's are overridden with the
default ones.
"""
orig_handle_exc = self.handle_exception
orig_handle_user_exc = self.handle_user_exception
yield
self.handle_exception = orig_handle_exc
self.handle_user_exception = orig_handle_user_exc
def reset_app(configuration=None):
global app
config.reset(configuration)
app = CloudifyFlaskApp(False)
def _detect_debug_environment():
"""
Detect whether server is running in a debug environment
if so, connect to debug server at a port stored in env[DEBUG_REST_SERVICE]
"""
try:
docl_debug_path = os.environ.get('DEBUG_CONFIG')
if docl_debug_path and os.path.isfile(docl_debug_path):
with open(docl_debug_path, 'r') as docl_debug_file:
debug_config = yaml.safe_load(docl_debug_file)
if debug_config.get('is_debug_on'):
import pydevd
pydevd.settrace(
debug_config['host'], port=53100, stdoutToServer=True,
stderrToServer=True, suspend=False)
except BaseException, e:
raise Exception('Failed to connect to debug server, {0}: {1}'.
format(type(e).__name__, str(e)))
ManagerExceptions: add server_traceback (#1922)
Add an empty value so that it doesn't break v1 and v2 clients
This has to be reverted after v1 and v2 are removed
#########
# Copyright (c) 2013-2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import StringIO
import traceback
import os
import yaml
from contextlib import contextmanager
from flask_restful import Api
from flask import Flask, jsonify, Blueprint, current_app
from flask_security import Security
from werkzeug.exceptions import InternalServerError
from manager_rest import config, premium_enabled, manager_exceptions
from manager_rest.storage import db, user_datastore
from manager_rest.security.user_handler import user_loader
from manager_rest.maintenance import maintenance_mode_handler
from manager_rest.rest.endpoint_mapper import setup_resources
from manager_rest.flask_utils import set_flask_security_config
from manager_rest.manager_exceptions import INTERNAL_SERVER_ERROR_CODE
from manager_rest.app_logging import (setup_logger,
log_request,
log_response)
if premium_enabled:
from cloudify_premium.authentication.extended_auth_handler \
import configure_auth
from cloudify_premium.license.license import LicenseHandler
SQL_DIALECT = 'postgresql'
app_errors = Blueprint('app_errors', __name__)
@app_errors.app_errorhandler(manager_exceptions.ManagerException)
def manager_exception(error):
current_app.logger.error(error)
return jsonify(
message=str(error),
error_code=error.error_code,
# useless, but v1 and v2 api clients require server_traceback
# remove this after dropping v1 and v2 api clients
server_traceback=None
), error.status_code
@app_errors.app_errorhandler(InternalServerError)
def internal_error(e):
s_traceback = StringIO.StringIO()
traceback.print_exc(file=s_traceback)
return jsonify(
message="Internal error occurred in manager REST server - {0}: {1}"
.format(type(e).__name__, e),
error_code=INTERNAL_SERVER_ERROR_CODE,
server_traceback=s_traceback.getvalue()
), 500
class CloudifyFlaskApp(Flask):
def __init__(self, load_config=True):
_detect_debug_environment()
super(CloudifyFlaskApp, self).__init__(__name__)
if load_config:
config.instance.load_configuration()
self._set_sql_alchemy()
# These two need to be called after the configuration was loaded
if config.instance.rest_service_log_path:
setup_logger(self.logger)
if premium_enabled and config.instance.file_server_root:
self.external_auth = configure_auth(self.logger)
self.before_request(LicenseHandler.check_license_expiration_date)
else:
self.external_auth = None
self.before_request(log_request)
self.before_request(maintenance_mode_handler)
self.after_request(log_response)
self._set_flask_security()
with self.app_context():
roles = config.instance.authorization_roles
if roles:
for role in roles:
user_datastore.find_or_create_role(name=role['name'])
user_datastore.commit()
with self._prevent_flask_restful_error_handling():
setup_resources(Api(self))
self.register_blueprint(app_errors)
def _set_flask_security(self):
"""Set Flask-Security specific configurations and init the extension
"""
set_flask_security_config(self)
Security(app=self, datastore=user_datastore)
# Get the login manager and set our own callback to be the user getter
login_manager = self.extensions['security'].login_manager
login_manager.request_loader(user_loader)
self.token_serializer = self.extensions[
'security'].remember_token_serializer
def _set_sql_alchemy(self):
"""
Set SQLAlchemy specific configurations, init the db object and create
the tables if necessary
"""
self.config['SQLALCHEMY_DATABASE_URI'] = config.instance.db_url
self.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(self) # Prepare the app for use with flask-sqlalchemy
@contextmanager
def _prevent_flask_restful_error_handling(self):
"""Add flask-restful under this, to avoid installing its errorhandlers
Flask-restful's errorhandlers are both not flexible enough, and too
complex. We want to simply use flask's error handling mechanism,
so this will make sure that flask-restful's are overridden with the
default ones.
"""
orig_handle_exc = self.handle_exception
orig_handle_user_exc = self.handle_user_exception
yield
self.handle_exception = orig_handle_exc
self.handle_user_exception = orig_handle_user_exc
def reset_app(configuration=None):
global app
config.reset(configuration)
app = CloudifyFlaskApp(False)
def _detect_debug_environment():
"""
Detect whether server is running in a debug environment
if so, connect to debug server at a port stored in env[DEBUG_REST_SERVICE]
"""
try:
docl_debug_path = os.environ.get('DEBUG_CONFIG')
if docl_debug_path and os.path.isfile(docl_debug_path):
with open(docl_debug_path, 'r') as docl_debug_file:
debug_config = yaml.safe_load(docl_debug_file)
if debug_config.get('is_debug_on'):
import pydevd
pydevd.settrace(
debug_config['host'], port=53100, stdoutToServer=True,
stderrToServer=True, suspend=False)
except BaseException, e:
raise Exception('Failed to connect to debug server, {0}: {1}'.
format(type(e).__name__, str(e)))
|
from citrination_client.base.base_client import BaseClient
from citrination_client.base.errors import *
from citrination_client.data import *
from citrination_client.data import routes as routes
from pypif import pif
import json
import os
import requests
class DataClient(BaseClient):
"""
Client encapsulating data management behavior.
"""
def __init__(self, api_key, host="https://citrination.com", suppress_warnings=False):
"""
Constructor.
:param api_key: A users API key, as a string asdfasdf
:type api_key: str
:param host: The base URL of the citrination site, e.g. https://citrination.com
:type host: str
:param suppress_warnings: Whether or not usage warnings should be
printed to stdout
:type suppress_warnings: bool
"""
members = [
"upload",
"list_files",
"matched_file_count",
"get_dataset_files",
"get_dataset_file",
"create_dataset",
"create_dataset_version"
]
super(DataClient, self).__init__(api_key, host, members, suppress_warnings=suppress_warnings)
def upload(self, dataset_id, source_path, dest_path=None):
"""
Upload a file, specifying source and dest paths a file (acts as the scp command).asdfasdf
:param source_path: The path to the file on the source host asdf
:type source_path: str
:param dest_path: The path to the file where the contents of the upload will be written (on the dest host)
:type dest_path: str
:return: The result of the upload process
:rtype: :class:`UploadResult`
"""
upload_result = UploadResult()
source_path = str(source_path)
if not dest_path:
dest_path = source_path
else:
dest_path = str(dest_path)
if os.path.isdir(source_path):
for path, subdirs, files in os.walk(source_path):
for name in files:
path_without_root_dir = path.split("/")[-1:] + [name]
current_dest_path = os.path.join(dest_path, *path_without_root_dir)
current_source_path = os.path.join(path, name)
try:
if self.upload(dataset_id, current_source_path, current_dest_path).successful():
upload_result.add_success(current_source_path)
else:
upload_result.add_failure(current_source_path,"Upload failure")
except (CitrinationClientError, ValueError) as e:
upload_result.add_failure(current_source_path, e.message)
return upload_result
elif os.path.isfile(source_path):
file_data = { "dest_path": str(dest_path), "src_path": str(source_path)}
j = self._post_json(routes.upload_to_dataset(dataset_id), data=file_data).json()
s3url = _get_s3_presigned_url(j)
with open(source_path, 'rb') as f:
r = requests.put(s3url, data=f, headers=j["required_headers"])
if r.status_code == 200:
data = {'s3object': j['url']['path'], 's3bucket': j['bucket']}
self._post_json(routes.update_file(j['file_id']), data=data)
upload_result.add_success(source_path)
return upload_result
else:
raise CitrinationClientError("Failure to upload {} to Citrination".format(source_path))
else:
raise ValueError("No file at specified path {}".format(source_path))
def list_files(self, dataset_id, glob=".", is_dir=False):
"""
List matched filenames in a dataset on Citrination.
:param dataset_id: The ID of the dataset to search for files.
:type dataset_id: int
:param glob: A pattern which will be matched against files in the dataset.
:type glob: str
:param is_dir: A boolean indicating whether or not the pattern should match against the beginning of paths in the dataset.
:type is_dir: bool
:return: A list of filepaths in the dataset matching the provided glob.
:rtype: str[]
"""
data = {
"list": {
"glob": glob,
"isDir": is_dir
}
}
return self._post_json(routes.list_files(dataset_id), data, failure_message="Failed to list files for dataset {}".format(dataset_id)).json()['files']
def matched_file_count(self, dataset_id, glob=".", is_dir=False):
"""
Returns the number of files matching a pattern in a dataset.
:param dataset_id: The ID of the dataset to search for files.
:type dataset_id: int
:param glob: A pattern which will be matched against files in the dataset.
:type glob: str
:param is_dir: A boolean indicating whether or not the pattern should match against the beginning of paths in the dataset.
:type is_dir: bool
:return: The number of matching files
:rtype: int
"""
list_result = self.list_files(dataset_id, glob, is_dir)
return len(list_result)
def get_dataset_files(self, dataset_id, glob=".", is_dir=False, version_number=None):
"""
Retrieves URLs for the files matched by a glob or a path to a directory
in a given dataset.
:param dataset_id: The id of the dataset to retrieve files from
:type dataset_id: int
:param glob: A regex used to select one or more files in the dataset
:type glob: str
:param version_number: The version number of the dataset to retrieve files from
:type version_number: int
:return: A list of dataset files whose paths match the provided pattern.
:rtype: list of :class:`DatasetFile`
"""
if version_number is None:
latest = True
else:
latest = False
data = {
"download_request": {
"glob": glob,
"isDir": is_dir,
"latest": latest
}
}
failure_message = "Failed to get matched files in dataset {}".format(dataset_id)
versions = self._post_json(routes.matched_files(dataset_id), data, failure_message=failure_message).json()['versions']
# if you don't provide a version number, only the latest
# will be included in the response body
if version_number is None:
version = versions[0]
else:
try:
version = list(filter(lambda v: v['number'] == version_number, versions))[0]
except IndexError:
raise ResourceNotFoundException()
return list(
map(
lambda f: DatasetFile(path=f['filename'], url=f['url']), version['files']
)
)
def get_dataset_file(self, dataset_id, file_path, version = None):
"""
Retrieves a dataset file matching a provided file path
:param dataset_id: The id of the dataset to retrieve file from
:type dataset_id: int
:param file_path: The file path within the dataset
:type file_path: str
:param version: The dataset version to look for the file in. If nothing is supplied, the latest dataset version will be searched
:type version: int
:return: A dataset file matching the filepath provided
:rtype: :class:`DatasetFile`
"""
return self.get_dataset_files(dataset_id, "^{}$".format(file_path), version_number=version)[0]
def get_pif(self, dataset_id, uid, version = None):
"""
Retrieves a PIF from a given dataset.
:param dataset_id: The id of the dataset to retrieve PIF from
:type dataset_id: int
:param uid: The uid of the PIF to retrieve
:type uid: str
:param version: The dataset version to look for the PIF in. If nothing is supplied, the latest dataset version will be searched
:type version: int
:return: A :class:`Pif` object
:rtype: :class:`Pif`
"""
failure_message = "An error occurred retrieving PIF {}".format(uid)
if version == None:
response = self._get(routes.pif_dataset_uid(dataset_id, uid), failure_message=failure_message)
else:
response = self._get(routes.pif_dataset_version_uid(dataset_id, uid, version), failure_message=failure_message)
return pif.loads(response.content)
def create_dataset(self, name=None, description=None, public=False):
"""
Create a new data set.
:param name: name of the dataset
:type name: str
:param description: description for the dataset
:type description: str
:param public: A boolean indicating whether or not the dataset should be public.
:type public: bool
:return: The newly created dataset.
:rtype: :class:`Dataset`
"""
data = {
"public": _convert_bool_to_public_value(public)
}
if name:
data["name"] = name
if description:
data["description"] = description
dataset = {"dataset": data}
failure_message = "Unable to create dataset"
result = self._post_json(routes.create_dataset(), dataset, failure_message=failure_message).json()
return _dataset_from_response_dict(result)
def update_dataset(self, dataset_id, name=None, description=None, public=None):
"""
Update a data set.
:param dataset_id: The ID of the dataset to update
:type dataset_id: int
:param name: name of the dataset
:type name: str
:param description: description for the dataset
:type description: str
:param public: A boolean indicating whether or not the dataset should
be public.
:type public: bool
:return: The updated dataset.
:rtype: :class:`Dataset`
"""
data = {
"public": _convert_bool_to_public_value(public)
}
if name:
data["name"] = name
if description:
data["description"] = description
dataset = {"dataset": data}
failure_message = "Failed to update dataset {}".format(dataset_id)
response = self._post_json(routes.update_dataset(dataset_id), data=dataset, failure_message=failure_message).json()
return _dataset_from_response_dict(response)
def create_dataset_version(self, dataset_id):
"""
Create a new data set version.
:param dataset_id: The ID of the dataset for which the version must be bumped.
:type dataset_id: int
:return: The new dataset version.
:rtype: :class:`DatasetVersion`
"""
failure_message = "Failed to create dataset version for dataset {}".format(dataset_id)
number = self._post_json(routes.create_dataset_version(dataset_id), data={}, failure_message=failure_message).json()['dataset_scoped_id']
return DatasetVersion(number=number)
def _dataset_from_response_dict(dataset):
return Dataset(dataset['id'], name=dataset['name'],
description=dataset['description'], created_at=dataset['created_at'])
def _convert_bool_to_public_value(val):
if val == None:
return None
if val == False:
return '0'
if val == True:
return '1'
# for backwards compatability, support the old API #utahisrad
if val == '0' or val == '1':
return val
def _get_s3_presigned_url(response_dict):
"""
Helper method to create an S3 presigned url from the response dictionary.
"""
url = response_dict['url']
return url['scheme']+'://'+url['host']+url['path']+'?'+url['query']
Decode UTF-8 For Python 3
from citrination_client.base.base_client import BaseClient
from citrination_client.base.errors import *
from citrination_client.data import *
from citrination_client.data import routes as routes
from pypif import pif
import json
import os
import requests
class DataClient(BaseClient):
"""
Client encapsulating data management behavior.
"""
def __init__(self, api_key, host="https://citrination.com", suppress_warnings=False):
"""
Constructor.
:param api_key: A users API key, as a string asdfasdf
:type api_key: str
:param host: The base URL of the citrination site, e.g. https://citrination.com
:type host: str
:param suppress_warnings: Whether or not usage warnings should be
printed to stdout
:type suppress_warnings: bool
"""
members = [
"upload",
"list_files",
"matched_file_count",
"get_dataset_files",
"get_dataset_file",
"create_dataset",
"create_dataset_version"
]
super(DataClient, self).__init__(api_key, host, members, suppress_warnings=suppress_warnings)
def upload(self, dataset_id, source_path, dest_path=None):
"""
Upload a file, specifying source and dest paths a file (acts as the scp command).asdfasdf
:param source_path: The path to the file on the source host asdf
:type source_path: str
:param dest_path: The path to the file where the contents of the upload will be written (on the dest host)
:type dest_path: str
:return: The result of the upload process
:rtype: :class:`UploadResult`
"""
upload_result = UploadResult()
source_path = str(source_path)
if not dest_path:
dest_path = source_path
else:
dest_path = str(dest_path)
if os.path.isdir(source_path):
for path, subdirs, files in os.walk(source_path):
for name in files:
path_without_root_dir = path.split("/")[-1:] + [name]
current_dest_path = os.path.join(dest_path, *path_without_root_dir)
current_source_path = os.path.join(path, name)
try:
if self.upload(dataset_id, current_source_path, current_dest_path).successful():
upload_result.add_success(current_source_path)
else:
upload_result.add_failure(current_source_path,"Upload failure")
except (CitrinationClientError, ValueError) as e:
upload_result.add_failure(current_source_path, e.message)
return upload_result
elif os.path.isfile(source_path):
file_data = { "dest_path": str(dest_path), "src_path": str(source_path)}
j = self._post_json(routes.upload_to_dataset(dataset_id), data=file_data).json()
s3url = _get_s3_presigned_url(j)
with open(source_path, 'rb') as f:
r = requests.put(s3url, data=f, headers=j["required_headers"])
if r.status_code == 200:
data = {'s3object': j['url']['path'], 's3bucket': j['bucket']}
self._post_json(routes.update_file(j['file_id']), data=data)
upload_result.add_success(source_path)
return upload_result
else:
raise CitrinationClientError("Failure to upload {} to Citrination".format(source_path))
else:
raise ValueError("No file at specified path {}".format(source_path))
def list_files(self, dataset_id, glob=".", is_dir=False):
"""
List matched filenames in a dataset on Citrination.
:param dataset_id: The ID of the dataset to search for files.
:type dataset_id: int
:param glob: A pattern which will be matched against files in the dataset.
:type glob: str
:param is_dir: A boolean indicating whether or not the pattern should match against the beginning of paths in the dataset.
:type is_dir: bool
:return: A list of filepaths in the dataset matching the provided glob.
:rtype: str[]
"""
data = {
"list": {
"glob": glob,
"isDir": is_dir
}
}
return self._post_json(routes.list_files(dataset_id), data, failure_message="Failed to list files for dataset {}".format(dataset_id)).json()['files']
def matched_file_count(self, dataset_id, glob=".", is_dir=False):
"""
Returns the number of files matching a pattern in a dataset.
:param dataset_id: The ID of the dataset to search for files.
:type dataset_id: int
:param glob: A pattern which will be matched against files in the dataset.
:type glob: str
:param is_dir: A boolean indicating whether or not the pattern should match against the beginning of paths in the dataset.
:type is_dir: bool
:return: The number of matching files
:rtype: int
"""
list_result = self.list_files(dataset_id, glob, is_dir)
return len(list_result)
def get_dataset_files(self, dataset_id, glob=".", is_dir=False, version_number=None):
"""
Retrieves URLs for the files matched by a glob or a path to a directory
in a given dataset.
:param dataset_id: The id of the dataset to retrieve files from
:type dataset_id: int
:param glob: A regex used to select one or more files in the dataset
:type glob: str
:param version_number: The version number of the dataset to retrieve files from
:type version_number: int
:return: A list of dataset files whose paths match the provided pattern.
:rtype: list of :class:`DatasetFile`
"""
if version_number is None:
latest = True
else:
latest = False
data = {
"download_request": {
"glob": glob,
"isDir": is_dir,
"latest": latest
}
}
failure_message = "Failed to get matched files in dataset {}".format(dataset_id)
versions = self._post_json(routes.matched_files(dataset_id), data, failure_message=failure_message).json()['versions']
# if you don't provide a version number, only the latest
# will be included in the response body
if version_number is None:
version = versions[0]
else:
try:
version = list(filter(lambda v: v['number'] == version_number, versions))[0]
except IndexError:
raise ResourceNotFoundException()
return list(
map(
lambda f: DatasetFile(path=f['filename'], url=f['url']), version['files']
)
)
def get_dataset_file(self, dataset_id, file_path, version = None):
"""
Retrieves a dataset file matching a provided file path
:param dataset_id: The id of the dataset to retrieve file from
:type dataset_id: int
:param file_path: The file path within the dataset
:type file_path: str
:param version: The dataset version to look for the file in. If nothing is supplied, the latest dataset version will be searched
:type version: int
:return: A dataset file matching the filepath provided
:rtype: :class:`DatasetFile`
"""
return self.get_dataset_files(dataset_id, "^{}$".format(file_path), version_number=version)[0]
def get_pif(self, dataset_id, uid, version = None):
"""
Retrieves a PIF from a given dataset.
:param dataset_id: The id of the dataset to retrieve PIF from
:type dataset_id: int
:param uid: The uid of the PIF to retrieve
:type uid: str
:param version: The dataset version to look for the PIF in. If nothing is supplied, the latest dataset version will be searched
:type version: int
:return: A :class:`Pif` object
:rtype: :class:`Pif`
"""
failure_message = "An error occurred retrieving PIF {}".format(uid)
if version == None:
response = self._get(routes.pif_dataset_uid(dataset_id, uid), failure_message=failure_message)
else:
response = self._get(routes.pif_dataset_version_uid(dataset_id, uid, version), failure_message=failure_message)
return pif.loads(response.content.decode("utf-8"))
def create_dataset(self, name=None, description=None, public=False):
"""
Create a new data set.
:param name: name of the dataset
:type name: str
:param description: description for the dataset
:type description: str
:param public: A boolean indicating whether or not the dataset should be public.
:type public: bool
:return: The newly created dataset.
:rtype: :class:`Dataset`
"""
data = {
"public": _convert_bool_to_public_value(public)
}
if name:
data["name"] = name
if description:
data["description"] = description
dataset = {"dataset": data}
failure_message = "Unable to create dataset"
result = self._post_json(routes.create_dataset(), dataset, failure_message=failure_message).json()
return _dataset_from_response_dict(result)
def update_dataset(self, dataset_id, name=None, description=None, public=None):
"""
Update a data set.
:param dataset_id: The ID of the dataset to update
:type dataset_id: int
:param name: name of the dataset
:type name: str
:param description: description for the dataset
:type description: str
:param public: A boolean indicating whether or not the dataset should
be public.
:type public: bool
:return: The updated dataset.
:rtype: :class:`Dataset`
"""
data = {
"public": _convert_bool_to_public_value(public)
}
if name:
data["name"] = name
if description:
data["description"] = description
dataset = {"dataset": data}
failure_message = "Failed to update dataset {}".format(dataset_id)
response = self._post_json(routes.update_dataset(dataset_id), data=dataset, failure_message=failure_message).json()
return _dataset_from_response_dict(response)
def create_dataset_version(self, dataset_id):
"""
Create a new data set version.
:param dataset_id: The ID of the dataset for which the version must be bumped.
:type dataset_id: int
:return: The new dataset version.
:rtype: :class:`DatasetVersion`
"""
failure_message = "Failed to create dataset version for dataset {}".format(dataset_id)
number = self._post_json(routes.create_dataset_version(dataset_id), data={}, failure_message=failure_message).json()['dataset_scoped_id']
return DatasetVersion(number=number)
def _dataset_from_response_dict(dataset):
return Dataset(dataset['id'], name=dataset['name'],
description=dataset['description'], created_at=dataset['created_at'])
def _convert_bool_to_public_value(val):
if val == None:
return None
if val == False:
return '0'
if val == True:
return '1'
# for backwards compatability, support the old API #utahisrad
if val == '0' or val == '1':
return val
def _get_s3_presigned_url(response_dict):
"""
Helper method to create an S3 presigned url from the response dictionary.
"""
url = response_dict['url']
return url['scheme']+'://'+url['host']+url['path']+'?'+url['query'] |
#!/usr/bin/python
import sys
import grid
def addHandler(command, handler):
if not command in commands:
commands[command] = []
commands[command].append(handler)
def removeHandler(command, handler):
if command in commands and handler in commands[command]:
commands[command].remove(handler)
def setChatHandler(command, handler):
chatcommands[command] = handler
def removeChatHandler(command):
if command in chatcommands:
del chatcommands[command]
def sendCommand(command):
armagetron.write(command + "\n")
def pauseBeforeRound():
sendCommand("WAIT_FOR_EXTERNAL_SCRIPT 1")
def continueRound():
sendCommand("WAIT_FOR_EXTERNAL_SCRIPT 0")
def chatCommand(command):
if command[1] in chatcommands:
chatcommands[command[1]]()
else:
sendCommand("PLAYER_MESSAGE " + command[3] + " Command " + command[1] + " not found.");
def init(command):
sendCommand("INCLUDE script.cfg")
def run():
line = None
while line != "QUIT":
line = ladderlog.readline()
if not line:
continue
command = line.split()
if command[0] in commands:
for handler in commands[command[0]]:
handler(command)
ladderlog = open(sys.argv[1], 'r')
armagetron = open(sys.argv[2], 'a', 1)
grid = grid.Grid(sendCommand)
commands = { "NEW_ROUND": [ grid.newRound ],
"NEW_MATCH": [ grid.newMatch ],
"ROUND_SCORE": [ grid.roundScore ],
"ROUND_SCORE_TEAM": [ grid.roundScoreTeam ],
"TEAM_CREATED": [ grid.teamCreated ],
"TEAM_DESTROYED": [ grid.teamDestroyed ],
"TEAM_RENAMED": [ grid.teamRenamed ],
"TEAM_PLAYER_ADDED": [ grid.teamPlayerAdded ],
"TEAM_PLAYER_REMOVED": [ grid.teamPlayerRemoved ],
"PLAYER_ENTERED": [ grid.playerEntered ],
"PLAYER_LEFT": [ grid.playerLeft ],
"PLAYER_RENAMED": [ grid.playerRenamed ],
"NUM_HUMANS": [ grid.numHumans ],
"POSITIONS": [ grid.positions ],
"ZONE_SPAWNED": [ grid.zoneSpawned ],
"ZONE_COLLAPSED": [ grid.zoneCollapsed ],
"GAME_END": [ grid.reset ],
"ENCODING": [ init ],
"INVALID_COMMAND": [ chatCommand ] }
chatcommands = {}
Made main loop better
#!/usr/bin/python
import sys
import grid
def addHandler(command, handler):
if not command in commands:
commands[command] = []
commands[command].append(handler)
def removeHandler(command, handler):
if command in commands and handler in commands[command]:
commands[command].remove(handler)
def setChatHandler(command, handler):
chatcommands[command] = handler
def removeChatHandler(command):
if command in chatcommands:
del chatcommands[command]
def sendCommand(command):
armagetron.write(command + "\n")
def pauseBeforeRound():
sendCommand("WAIT_FOR_EXTERNAL_SCRIPT 1")
def continueRound():
sendCommand("WAIT_FOR_EXTERNAL_SCRIPT 0")
def chatCommand(command):
if command[1] in chatcommands:
chatcommands[command[1]]()
else:
sendCommand("PLAYER_MESSAGE " + command[3] + " Command " + command[1] + " not found.");
def init(command):
sendCommand("INCLUDE script.cfg")
def run():
while True:
line = ladderlog.readline()
if not line:
continue
if line == "QUIT":
break
command = line.split()
if command[0] in commands:
for handler in commands[command[0]]:
handler(command)
ladderlog = open(sys.argv[1], 'r')
armagetron = open(sys.argv[2], 'a', 1)
grid = grid.Grid(sendCommand)
commands = { "NEW_ROUND": [ grid.newRound ],
"NEW_MATCH": [ grid.newMatch ],
"ROUND_SCORE": [ grid.roundScore ],
"ROUND_SCORE_TEAM": [ grid.roundScoreTeam ],
"TEAM_CREATED": [ grid.teamCreated ],
"TEAM_DESTROYED": [ grid.teamDestroyed ],
"TEAM_RENAMED": [ grid.teamRenamed ],
"TEAM_PLAYER_ADDED": [ grid.teamPlayerAdded ],
"TEAM_PLAYER_REMOVED": [ grid.teamPlayerRemoved ],
"PLAYER_ENTERED": [ grid.playerEntered ],
"PLAYER_LEFT": [ grid.playerLeft ],
"PLAYER_RENAMED": [ grid.playerRenamed ],
"NUM_HUMANS": [ grid.numHumans ],
"POSITIONS": [ grid.positions ],
"ZONE_SPAWNED": [ grid.zoneSpawned ],
"ZONE_COLLAPSED": [ grid.zoneCollapsed ],
"GAME_END": [ grid.reset ],
"ENCODING": [ init ],
"INVALID_COMMAND": [ chatCommand ] }
chatcommands = {}
|
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from utils import *
class Handler(BaseHandler):
def get(self):
env = self.env
self.render('embed.html', close_button=self.params.small,
gadget_link_html=anchor_start(
'http://%s/gadget?lang=%s' % (env.netloc, env.lang)),
apache_link_html=anchor_start(
'http://www.apache.org/licenses/LICENSE-2.0.html'),
developers_link_html=anchor_start(
'http://code.google.com/p/googlepersonfinder'),
link_end_html='</a>'
)
Fix link to gadget in the embed page.
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from utils import *
class Handler(BaseHandler):
def get(self):
env = self.env
self.render('embed.html', close_button=self.params.small,
gadget_link_html=anchor_start(
'%s/gadget?lang=%s' % (env.repo_url, env.lang)),
apache_link_html=anchor_start(
'http://www.apache.org/licenses/LICENSE-2.0.html'),
developers_link_html=anchor_start(
'http://code.google.com/p/googlepersonfinder'),
link_end_html='</a>'
)
|
""" Enaml widget for editing a list of string
"""
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
from atom.api import (Bool, List, ContainerList, observe, set_default, Unicode, Enum, Int, Signal, Callable)
from enaml.widgets.api import RawWidget
from enaml.core.declarative import d_
from enaml.qt.QtGui import QListWidget, QListWidgetItem, QAbstractItemView, QColor
from enaml.qt.QtCore import Qt
class QtListStrWidget(RawWidget):
""" A Qt4 implementation of an Enaml ProxyListStrView.
"""
__slots__ = '__weakref__'
#: The list of str being viewed
items = d_(List(Unicode()))
checked_states = d_(ContainerList(Bool()))
#: The index of the currently selected str
selected_index = d_(Int(-1))
#: The currently selected str
selected_item = d_(Unicode())
#: Whether or not the items should be checkable
checkable = d_(Bool(True))
#: Whether or not the items should be editable
editable = d_(Bool(True))
#
validator = d_(Callable())
#: .
hug_width = set_default('weak')
item_changed = Signal()
enable_changed = Signal()
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self, parent):
""" Create the QListWidget widget.
"""
# Create the list model and accompanying controls:
widget = QListWidget(parent)
for item, checked in zip(self.items, self.checked_states):
self.add_item(widget, item, checked)
# set selected_item here so that first change fires an 'update' rather than 'create' event
self.selected_item = ''
if self.items:
self.selected_index = 0
self.selected_item = self.items[0]
widget.setCurrentRow(0)
widget.itemSelectionChanged.connect(self.on_selection)
widget.itemChanged.connect(self.on_edit)
return widget
def add_item(self, widget, item, checked=True):
itemWidget = QListWidgetItem(item)
if self.checkable:
itemWidget.setCheckState(Qt.Checked if checked else Qt.Unchecked)
if self.editable:
_set_item_flag(itemWidget, Qt.ItemIsEditable, True)
widget.addItem(itemWidget)
#--------------------------------------------------------------------------
# Signal Handlers
#--------------------------------------------------------------------------
def on_selection(self):
"""
The signal handler for the index changed signal.
"""
widget = self.get_widget()
self.selected_index = widget.currentRow()
self.selected_item = self.items[widget.currentRow()] if self.selected_index >= 0 else u''
def on_edit(self, item):
"""
The signal handler for the item changed signal.
"""
widget = self.get_widget()
itemRow = widget.indexFromItem(item).row()
oldLabel = self.items[itemRow]
newLabel = item.text()
if oldLabel != newLabel:
self.item_changed(oldLabel, newLabel)
self.selected_item = item.text()
self.items[itemRow] = item.text()
if self.validator and not self.validator(newLabel):
item.setTextColor(QColor(255,0,0))
else:
item.setTextColor(QColor(0,0,0))
else:
self.checked_states[itemRow] = True if item.checkState() == Qt.Checked else False
self.enable_changed(item.text(), self.checked_states[itemRow])
#--------------------------------------------------------------------------
# ProxyListStrView API
#--------------------------------------------------------------------------
def set_items(self, items, widget = None):
"""
"""
widget = self.get_widget()
count = widget.count()
nitems = len(items)
for idx, item in enumerate(items[:count]):
itemWidget = widget.item(idx)
#Update checked state before the text so that we can distinguish a checked state change from a label change
itemWidget.setCheckState(Qt.Checked if self.checked_states[idx] else Qt.Unchecked)
itemWidget.setText(item)
if nitems > count:
for item in items[count:]:
self.add_item(widget, item)
elif nitems < count:
for idx in reversed(xrange(nitems, count)):
widget.takeItem(idx)
#--------------------------------------------------------------------------
# Observers
#--------------------------------------------------------------------------
@observe('items')
def _update_proxy(self, change):
""" An observer which sends state change to the proxy.
"""
# The superclass handler implementation is sufficient.
if self.get_widget() != None:
if change["name"] == "items":
if change["type"] == "update":
if len(change["oldvalue"]) > len(change["value"]):
#We've lost an item
removedKey = set(change["oldvalue"]) - set(change["value"])
removedIndex = change["oldvalue"].index(list(removedKey)[0])
del self.checked_states[removedIndex]
elif len(change["oldvalue"]) < len(change["value"]):
self.checked_states.append(True)
self.set_items(self.items)
# Helper methods
def _set_item_flag(item, flag, enabled):
""" Set or unset the given item flag for the item.
"""
flags = item.flags()
if enabled:
flags |= flag
else:
flags &= ~flag
item.setFlags(flags)
Added validation check to add_item and set_item
""" Enaml widget for editing a list of string
"""
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
from atom.api import (Bool, List, ContainerList, observe, set_default, Unicode, Enum, Int, Signal, Callable)
from enaml.widgets.api import RawWidget
from enaml.core.declarative import d_
from enaml.qt.QtGui import QListWidget, QListWidgetItem, QAbstractItemView, QColor
from enaml.qt.QtCore import Qt
class QtListStrWidget(RawWidget):
""" A Qt4 implementation of an Enaml ProxyListStrView.
"""
__slots__ = '__weakref__'
#: The list of str being viewed
items = d_(List(Unicode()))
checked_states = d_(ContainerList(Bool()))
#: The index of the currently selected str
selected_index = d_(Int(-1))
#: The currently selected str
selected_item = d_(Unicode())
#: Whether or not the items should be checkable
checkable = d_(Bool(True))
#: Whether or not the items should be editable
editable = d_(Bool(True))
#
validator = d_(Callable())
#: .
hug_width = set_default('weak')
item_changed = Signal()
enable_changed = Signal()
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self, parent):
""" Create the QListWidget widget.
"""
# Create the list model and accompanying controls:
widget = QListWidget(parent)
for item, checked in zip(self.items, self.checked_states):
self.add_item(widget, item, checked)
# set selected_item here so that first change fires an 'update' rather than 'create' event
self.selected_item = ''
if self.items:
self.selected_index = 0
self.selected_item = self.items[0]
widget.setCurrentRow(0)
widget.itemSelectionChanged.connect(self.on_selection)
widget.itemChanged.connect(self.on_edit)
return widget
def add_item(self, widget, item, checked=True):
itemWidget = QListWidgetItem(item)
if self.checkable:
itemWidget.setCheckState(Qt.Checked if checked else Qt.Unchecked)
if self.editable:
_set_item_flag(itemWidget, Qt.ItemIsEditable, True)
widget.addItem(itemWidget)
self.apply_validator(itemWidget, itemWidget.text())
#--------------------------------------------------------------------------
# Signal Handlers
#--------------------------------------------------------------------------
def on_selection(self):
"""
The signal handler for the index changed signal.
"""
widget = self.get_widget()
self.selected_index = widget.currentRow()
self.selected_item = self.items[widget.currentRow()] if self.selected_index >= 0 else u''
def on_edit(self, item):
"""
The signal handler for the item changed signal.
"""
widget = self.get_widget()
itemRow = widget.indexFromItem(item).row()
oldLabel = self.items[itemRow]
newLabel = item.text()
if oldLabel != newLabel:
self.item_changed(oldLabel, newLabel)
self.selected_item = item.text()
self.items[itemRow] = item.text()
self.apply_validator(item, newLabel)
else:
self.checked_states[itemRow] = True if item.checkState() == Qt.Checked else False
self.enable_changed(item.text(), self.checked_states[itemRow])
#--------------------------------------------------------------------------
# ProxyListStrView API
#--------------------------------------------------------------------------
def set_items(self, items, widget = None):
"""
"""
widget = self.get_widget()
count = widget.count()
nitems = len(items)
for idx, item in enumerate(items[:count]):
itemWidget = widget.item(idx)
#Update checked state before the text so that we can distinguish a checked state change from a label change
itemWidget.setCheckState(Qt.Checked if self.checked_states[idx] else Qt.Unchecked)
itemWidget.setText(item)
self.apply_validator(itemWidget, item)
if nitems > count:
for item in items[count:]:
self.add_item(widget, item)
elif nitems < count:
for idx in reversed(xrange(nitems, count)):
widget.takeItem(idx)
#--------------------------------------------------------------------------
# Utility methods
#--------------------------------------------------------------------------
def apply_validator(self, item, label):
if self.validator and not self.validator(label):
item.setTextColor(QColor(255,0,0))
else:
item.setTextColor(QColor(0,0,0))
#--------------------------------------------------------------------------
# Observers
#--------------------------------------------------------------------------
@observe('items')
def _update_proxy(self, change):
""" An observer which sends state change to the proxy.
"""
# The superclass handler implementation is sufficient.
if self.get_widget() != None:
if change["name"] == "items":
if change["type"] == "update":
if len(change["oldvalue"]) > len(change["value"]):
#We've lost an item
removedKey = set(change["oldvalue"]) - set(change["value"])
removedIndex = change["oldvalue"].index(list(removedKey)[0])
del self.checked_states[removedIndex]
elif len(change["oldvalue"]) < len(change["value"]):
self.checked_states.append(True)
self.set_items(self.items)
# Helper methods
def _set_item_flag(item, flag, enabled):
""" Set or unset the given item flag for the item.
"""
flags = item.flags()
if enabled:
flags |= flag
else:
flags &= ~flag
item.setFlags(flags)
|
"""
The microstructure module provide elementary classes to describe a
crystallographic granular microstructure such as mostly present in
metallic materials.
It contains several classes which are used to describe a microstructure
composed of several grains, each one having its own crystallographic
orientation:
* :py:class:`~pymicro.crystal.microstructure.Microstructure`
* :py:class:`~pymicro.crystal.microstructure.Grain`
* :py:class:`~pymicro.crystal.microstructure.Orientation`
"""
import numpy as np
import os
import vtk
import h5py
from matplotlib import pyplot as plt, colors, cm
from xml.dom.minidom import Document, parse
from pymicro.crystal.lattice import Symmetry
from math import atan2, pi
class Orientation:
"""Crystallographic orientation class.
This follows the passive rotation definition which means that it brings
the sample coordinate system into coincidence with the crystal coordinate
system. Then one may express a vector :math:`V_c` in the crystal coordinate system
from the vector in the sample coordinate system :math:`V_s` by:
.. math::
V_c = g.V_s
and inversely (because :math:`g^{-1}=g^T`):
.. math::
V_s = g^T.V_c
Most of the code to handle rotations has been written to comply with the conventions
laid in :cite:`Rowenhorst2015`.
"""
def __init__(self, matrix):
"""Initialization from the 9 components of the orientation matrix."""
g = np.array(matrix, dtype=np.float64).reshape((3, 3))
self._matrix = g
self.euler = Orientation.OrientationMatrix2Euler(g)
self.rod = Orientation.OrientationMatrix2Rodrigues(g)
def orientation_matrix(self):
"""Returns the orientation matrix in the form of a 3x3 numpy array."""
return self._matrix
def __repr__(self):
"""Provide a string representation of the class."""
s = 'Crystal Orientation'
s += '\norientation matrix = %s' % self._matrix.view()
s += '\nEuler angles (degrees) = (%8.3f,%8.3f,%8.3f)' % (self.phi1(), self.Phi(), self.phi2())
s += '\nRodrigues vector = %s' % self.OrientationMatrix2Rodrigues(self._matrix)
return s
@staticmethod
def cube():
"""Create the particular crystal orientation called Cube and which
corresponds to euler angle (0, 0, 0)."""
return Orientation.from_euler((0., 0., 0.))
@staticmethod
def brass():
"""Create the particular crystal orientation called Brass and which
corresponds to euler angle (35.264, 45, 0)."""
return Orientation.from_euler((35.264, 45., 0.))
@staticmethod
def copper():
"""Create the particular crystal orientation called Copper and which
corresponds to euler angle (90, 35.264, 45)."""
return Orientation.from_euler((90., 35.264, 45.))
@staticmethod
def s3():
"""Create the particular crystal orientation called S3 and which
corresponds to euler angle (59, 37, 63)."""
return Orientation.from_euler((58.980, 36.699, 63.435))
@staticmethod
def goss():
"""Create the particular crystal orientation called Goss and which
corresponds to euler angle (0, 45, 0)."""
return Orientation.from_euler((0., 45., 0.))
@staticmethod
def shear():
"""Create the particular crystal orientation called shear and which
corresponds to euler angle (45, 0, 0)."""
return Orientation.from_euler((45., 0., 0.))
def get_ipf_colour(self, axis=np.array([0., 0., 1.]), symmetry=Symmetry.cubic):
"""Compute the IPF (inverse pole figure) colour for this orientation.
Given a particular axis expressed in the laboratory coordinate system,
one can compute the so called IPF colour based on that direction
expressed in the crystal coordinate system as :math:`[x_c,y_c,z_c]`.
There is only one tuple (u,v,w) such that:
.. math::
[x_c,y_c,z_c]=u.[0,0,1]+v.[0,1,1]+w.[1,1,1]
and it is used to assign the RGB colour.
"""
axis /= np.linalg.norm(axis)
# find the axis lying in the fundamental zone
for sym in symmetry.symmetry_operators():
Osym = np.dot(sym, self.orientation_matrix())
Vc = np.dot(Osym, axis)
if Vc[2] < 0:
Vc *= -1. # using the upward direction
uvw = np.array([Vc[2] - Vc[1], Vc[1] - Vc[0], Vc[0]])
uvw /= np.linalg.norm(uvw)
uvw /= max(uvw)
if (uvw[0] >= 0. and uvw[0] <= 1.0) and (uvw[1] >= 0. and uvw[1] <= 1.0) and (
uvw[2] >= 0. and uvw[2] <= 1.0):
# print('found sym for sst')
break
return uvw
def fzDihedral(rod, n):
"""check if the given Rodrigues vector is in the fundamental zone.
After book from Morawiecz.
"""
# top and bottom face at +/-tan(pi/2n)
t = np.tan(np.pi / (2 * n))
if abs(rod[2]) > t:
return False
# 2n faces distance 1 from origin
# y <= ((2+sqrt(2))*t - (1+sqrt(2))) * x + (1+sqrt(2))*(1-t)
y, x = sorted([abs(ro[0]), abs(ro[1])])
if x > 1:
return False
return {
2: True,
3: y / (1 + math.sqrt(2)) + (1 - math.sqrt(2 / 3)) * x < 1 - 1 / math.sqrt(3),
4: y + x < math.sqrt(2),
6: y / (1 + math.sqrt(2)) + (1 - 2 * math.sqrt(2) + math.sqrt(6)) * x < math.sqrt(3) - 1
}[n]
def inFZ(self, symmetry=Symmetry.cubic):
"""Check if the given Orientation lies within the fundamental zone.
For a given crystal symmetry, several rotations can describe the same
physcial crystllographic arangement. The Rodrigues fundamental zone
restrict the orientation space accordingly.
"""
r = self.rod
if symmetry == Symmetry.cubic:
inFZT23 = np.abs(r).sum() <= 1.0
# in the cubic symmetry, each component must be < 2 ** 0.5 - 1
inFZ = inFZT23 and np.abs(r).max() <= 2 ** 0.5 - 1
else:
raise (ValueError('unsupported crystal symmetry: %s' % symmetry))
return inFZ
def move_to_FZ(self, symmetry=Symmetry.cubic, verbose=False):
"""
Compute the equivalent crystal orientation in the Fundamental Zone of a given symmetry.
:param Symmetry symmetry: an instance of the `Symmetry` class
:param verbose: flag for verbose mode
:return: a new Orientation instance which lies in the fundamental zone.
"""
om = symmetry.move_rotation_to_FZ(self.orientation_matrix(), verbose=verbose)
return Orientation(om)
@staticmethod
def misorientation_MacKenzie(psi):
"""Return the fraction of the misorientations corresponding to the
given :math:`\\psi` angle in the reference solution derived By MacKenzie in
his 1958 paper :cite:`MacKenzie_1958`.
:param psi: the misorientation angle in radians.
:returns: the value in the cummulative distribution corresponding to psi.
"""
from math import sqrt, sin, cos, tan, pi, acos
psidg = 180 * psi / pi
if 0 <= psidg <= 45:
p = 2. / 15 * (1 - cos(psi))
elif 45 < psidg <= 60:
p = 2. / 15 * (3 * (sqrt(2) - 1) * sin(psi) - 2 * (1 - cos(psi)))
elif 60 < psidg <= 60.72:
p = 2. / 15 * ((3 * (sqrt(2) - 1) + 4. / sqrt(3)) * sin(psi) - 6. * (1 - cos(psi)))
elif 60.72 < psidg <= 62.8:
X = (sqrt(2) - 1) / (1 - (sqrt(2) - 1) ** 2 / tan(0.5 * psi) ** 2) ** 0.5
Y = (sqrt(2) - 1) ** 2 / ((3 - 1 / tan(0.5 * psi) ** 2) ** 0.5)
p = (2. / 15) * ((3 * (sqrt(2) - 1) + 4 / sqrt(3)) * sin(psi) - 6 * (1 - cos(psi))) \
- 8. / (5 * pi) * (
2 * (sqrt(2) - 1) * acos(X / tan(0.5 * psi)) + 1. / sqrt(3) * acos(Y / tan(0.5 * psi))) * sin(psi) \
+ 8. / (5 * pi) * (2 * acos((sqrt(2) + 1) * X / sqrt(2)) + acos((sqrt(2) + 1) * Y / sqrt(2))) * (
1 - cos(psi))
else:
p = 0.
return p
@staticmethod
def misorientation_axis_from_delta(delta):
"""Compute the misorientation axis from the misorientation matrix.
:param delta: The 3x3 misorientation matrix.
:returns: the misorientation axis (normalised vector).
"""
n = np.array([delta[1, 2] - delta[2, 1], delta[2, 0] - delta[0, 2], delta[0, 1] - delta[1, 0]])
n /= np.sqrt(
(delta[1, 2] - delta[2, 1]) ** 2 + (delta[2, 0] - delta[0, 2]) ** 2 + (delta[0, 1] - delta[1, 0]) ** 2)
return n
def misorientation_axis(self, orientation):
"""Compute the misorientation axis with another crystal orientation.
This vector is by definition common to both crystalline orientations.
:param orientation: an instance of :py:class:`~pymicro.crystal.microstructure.Orientation` class.
:returns: the misorientation axis (normalised vector).
"""
delta = np.dot(self.orientation_matrix(), orientation.orientation_matrix().T)
return Orientation.misorientation_axis_from_delta(delta)
@staticmethod
def misorientation_angle_from_delta(delta):
"""Compute the misorientation angle from the misorientation matrix.
Compute the angle assocated with this misorientation matrix :math:`\\Delta g`.
It is defined as :math:`\\omega = \\arccos(\\text{trace}(\\Delta g)/2-1)`.
To avoid float rounding error, the argument is rounded to 1. if it is within 1 and 1 plus 32 bits floating
point precison.
.. note::
This does not account for the crystal symmetries. If you want to
find the disorientation between two orientations, use the
:py:meth:`~pymicro.crystal.microstructure.Orientation.disorientation`
method.
:param delta: The 3x3 misorientation matrix.
:returns float: the misorientation angle in radians.
"""
cw = 0.5 * (delta.trace() - 1)
if cw > 1. and cw - 1. < 10 * np.finfo('float32').eps:
print('cw=%.20f, rounding to 1.' % cw)
cw = 1.
omega = np.arccos(cw)
return omega
def disorientation(self, orientation, crystal_structure=Symmetry.triclinic):
"""Compute the disorientation another crystal orientation.
Considering all the possible crystal symmetries, the disorientation
is defined as the combination of the minimum misorientation angle
and the misorientation axis lying in the fundamental zone, which
can be used to bring the two lattices into coincidence.
.. note::
Both orientations are supposed to have the same symmetry. This is not necessarily the case in multi-phase
materials.
:param orientation: an instance of :py:class:`~pymicro.crystal.microstructure.Orientation` class desribing the other crystal orientation from which to compute the angle.
:param crystal_structure: an instance of the `Symmetry` class describing the crystal symmetry, triclinic (no symmetry) by default.
:returns tuple: the misorientation angle in radians, the axis as a numpy vector (crystal coordinates), the axis as a numpy vector (sample coordinates).
"""
the_angle = np.pi
symmetries = crystal_structure.symmetry_operators()
(gA, gB) = (self.orientation_matrix(), orientation.orientation_matrix()) # nicknames
for (g1, g2) in [(gA, gB), (gB, gA)]:
for j in range(symmetries.shape[0]):
sym_j = symmetries[j]
oj = np.dot(sym_j, g1) # the crystal symmetry operator is left applied
for i in range(symmetries.shape[0]):
sym_i = symmetries[i]
oi = np.dot(sym_i, g2)
delta = np.dot(oi, oj.T)
print('delta={}'.format(delta))
mis_angle = Orientation.misorientation_angle_from_delta(delta)
print(np.degrees(mis_angle))
if mis_angle < the_angle:
# now compute the misorientation axis, should check if it lies in the fundamental zone
mis_axis = Orientation.misorientation_axis_from_delta(delta)
# here we have np.dot(oi.T, mis_axis) = np.dot(oj.T, mis_axis)
# print(mis_axis, mis_angle*180/np.pi, np.dot(oj.T, mis_axis))
the_angle = mis_angle
the_axis = mis_axis
the_axis_xyz = np.dot(oi.T, the_axis)
return (the_angle, the_axis, the_axis_xyz)
def phi1(self):
"""Convenience methode to expose the first Euler angle."""
return self.euler[0]
def Phi(self):
"""Convenience methode to expose the second Euler angle."""
return self.euler[1]
def phi2(self):
"""Convenience methode to expose the third Euler angle."""
return self.euler[2]
def compute_XG_angle(self, hkl, omega, verbose=False):
"""Compute the angle between the scattering vector :math:`\mathbf{G_{l}}`
and :math:`\mathbf{-X}` the X-ray unit vector at a given angular position :math:`\\omega`.
A given hkl plane defines the scattering vector :math:`\mathbf{G_{hkl}}` by
the miller indices in the reciprocal space. It is expressed in the
cartesian coordinate system by :math:`\mathbf{B}.\mathbf{G_{hkl}}` and in the
laboratory coordinate system accounting for the crystal orientation
by :math:`\mathbf{g}^{-1}.\mathbf{B}.\mathbf{G_{hkl}}`.
The crystal is assumed to be placed on a rotation stage around the
laboratory vertical axis. The scattering vector can finally be
written as :math:`\mathbf{G_l}=\mathbf{\\Omega}.\mathbf{g}^{-1}.\mathbf{B}.\mathbf{G_{hkl}}`.
The X-rays unit vector is :math:`\mathbf{X}=[1, 0, 0]`. So the computed angle
is :math:`\\alpha=acos(-\mathbf{X}.\mathbf{G_l}/||\mathbf{G_l}||`
The Bragg condition is fulfilled when :math:`\\alpha=\pi/2-\\theta_{Bragg}`
:param hkl: the hkl plane, an instance of :py:class:`~pymicro.crystal.lattice.HklPlane`
:param omega: the angle of rotation of the crystal around the laboratory vertical axis.
:param bool verbose: activate verbose mode (False by default).
:return float: the angle between :math:`-\mathbf{X}` and :math:`\mathbf{G_{l}}` in degrees.
"""
X = np.array([1., 0., 0.])
gt = self.orientation_matrix().transpose()
Gc = hkl.scattering_vector()
Gs = gt.dot(Gc) # in the cartesian sample CS
omegar = omega * np.pi / 180
R = np.array([[np.cos(omegar), -np.sin(omegar), 0], [np.sin(omegar), np.cos(omegar), 0], [0, 0, 1]])
Gl = R.dot(Gs)
alpha = np.arccos(np.dot(-X, Gl) / np.linalg.norm(Gl)) * 180 / np.pi
if verbose:
print('scattering vector in the crystal CS', Gc)
print('scattering vector in the sample CS', Gs)
print('scattering vector in the laboratory CS (including Omega rotation)', Gl)
print('angle (deg) between -X and G', alpha)
return alpha
@staticmethod
def solve_trig_equation(A, B, C, verbose=False):
"""Solve the trigonometric equation in the form of:
.. math::
A\cos\\theta + B\sin\\theta = C
:param float A: the A constant in the equation.
:param float B: the B constant in the equation.
:param float C: the C constant in the equation.
:return tuple: the two solutions angular values in degrees.
"""
Delta = 4 * (A ** 2 + B ** 2 - C ** 2)
if Delta < 0:
raise ValueError('Delta < 0 (%f)' % Delta)
if verbose:
print('A={0:.3f}, B={1:.3f}, C={2:.3f}, Delta={3:.1f}'.format(A, B, C, Delta))
theta_1 = 2 * np.arctan2(B - 0.5 * np.sqrt(Delta), A + C) * 180. / np.pi % 360
theta_2 = 2 * np.arctan2(B + 0.5 * np.sqrt(Delta), A + C) * 180. / np.pi % 360
return theta_1, theta_2
def dct_omega_angles(self, hkl, lambda_keV, verbose=False):
"""Compute the two omega angles which satisfy the Bragg condition.
For a given crystal orientation sitting on a vertical rotation axis,
there is exactly two :math:`\omega` positions in :math:`[0, 2\pi]` for which
a particular :math:`(hkl)` reflexion will fulfil Bragg's law.
According to the Bragg's law, a crystallographic plane of a given
grain will be in diffracting condition if:
.. math::
\sin\\theta=-[\mathbf{\Omega}.\mathbf{g}^{-1}\mathbf{G_c}]_1
with :math:`\mathbf{\Omega}` the matrix associated with the rotation
axis:
.. math::
\mathbf{\Omega}=\\begin{pmatrix}
\cos\omega & -\sin\omega & 0 \\\\
\sin\omega & \cos\omega & 0 \\\\
0 & 0 & 1 \\\\
\end{pmatrix}
This method solves the associated second order equation to return
the two corresponding omega angles.
:param hkl: The given cristallographic plane :py:class:`~pymicro.crystal.lattice.HklPlane`
:param float lambda_keV: The X-rays energy expressed in keV
:param bool verbose: Verbose mode (False by default)
:returns tuple: :math:`(\omega_1, \omega_2)` the two values of the \
rotation angle around the vertical axis (in degrees).
"""
(h, k, l) = hkl.miller_indices()
theta = hkl.bragg_angle(lambda_keV, verbose=verbose)
lambda_nm = 1.2398 / lambda_keV
gt = self.orientation_matrix().T # gt = g^{-1} in Poulsen 2004
Gc = hkl.scattering_vector()
A = np.dot(Gc, gt[0])
B = - np.dot(Gc, gt[1])
# A = h / a * gt[0, 0] + k / b * gt[0, 1] + l / c * gt[0, 2]
# B = -h / a * gt[1, 0] - k / b * gt[1, 1] - l / c * gt[1, 2]
C = -2 * np.sin(theta) ** 2 / lambda_nm # the minus sign comes from the main equation
omega_1, omega_2 = Orientation.solve_trig_equation(A, B, C, verbose=verbose)
if verbose:
print('the two omega values in degrees fulfilling the Bragg condition are (%.1f, %.1f)' % (omega_1, omega_2))
return omega_1, omega_2
def rotating_crystal(self, hkl, lambda_keV, omega_step=0.5, display=True, verbose=False):
from pymicro.xray.xray_utils import lambda_keV_to_nm
lambda_nm = lambda_keV_to_nm(lambda_keV)
X = np.array([1., 0., 0.]) / lambda_nm
print('magnitude of X', np.linalg.norm(X))
gt = self.orientation_matrix().transpose()
(h, k, l) = hkl.miller_indices()
theta = hkl.bragg_angle(lambda_keV) * 180. / np.pi
print('bragg angle for %d%d%d reflection is %.1f' % (h, k, l, theta))
Gc = hkl.scattering_vector()
Gs = gt.dot(Gc)
alphas = []
twothetas = []
magnitude_K = []
omegas = np.linspace(0.0, 360.0, num=360.0 / omega_step, endpoint=False)
for omega in omegas:
print('\n** COMPUTING AT OMEGA=%03.1f deg' % omega)
# prepare rotation matrix
omegar = omega * np.pi / 180
R = np.array([[np.cos(omegar), -np.sin(omegar), 0], [np.sin(omegar), np.cos(omegar), 0], [0, 0, 1]])
# R = R.dot(Rlt).dot(Rut) # with tilts
Gl = R.dot(Gs)
print('scattering vector in laboratory CS', Gl)
n = R.dot(gt.dot(hkl.normal()))
print('plane normal:', hkl.normal())
print(R)
print('rotated plane normal:', n, ' with a norm of', np.linalg.norm(n))
G = n / hkl.interplanar_spacing() # here G == N
print('G vector:', G, ' with a norm of', np.linalg.norm(G))
K = X + G
print('X + G vector', K)
magnitude_K.append(np.linalg.norm(K))
print('magnitude of K', np.linalg.norm(K))
alpha = np.arccos(np.dot(-X, G) / (np.linalg.norm(-X) * np.linalg.norm(G))) * 180 / np.pi
print('angle between -X and G', alpha)
alphas.append(alpha)
twotheta = np.arccos(np.dot(K, X) / (np.linalg.norm(K) * np.linalg.norm(X))) * 180 / np.pi
print('angle (deg) between K and X', twotheta)
twothetas.append(twotheta)
print('min alpha angle is ', min(alphas))
# compute omega_1 and omega_2 to verify graphically
(w1, w2) = self.dct_omega_angles(hkl, lambda_keV, verbose=False)
# gather the results in a single figure
fig = plt.figure(figsize=(12, 10))
fig.add_subplot(311)
plt.title('Looking for (%d%d%d) Bragg reflexions' % (h, k, l))
plt.plot(omegas, alphas, 'k-')
plt.xlim(0, 360)
plt.ylim(0, 180)
plt.xticks(np.arange(0, 390, 30))
# add bragg condition
plt.axhline(90 - theta, xmin=0, xmax=360, linewidth=2)
plt.annotate('$\pi/2-\\theta_{Bragg}$', xycoords='data', xy=(360, 90 - theta), horizontalalignment='left',
verticalalignment='center', fontsize=16)
# add omega solutions
plt.axvline(w1 + 180, ymin=0, ymax=180, linewidth=2, linestyle='dashed', color='gray')
plt.axvline(w2 + 180, ymin=0, ymax=180, linewidth=2, linestyle='dashed', color='gray')
plt.annotate('$\\omega_1$', xycoords='data', xy=(w1 + 180, 0), horizontalalignment='center',
verticalalignment='bottom', fontsize=16)
plt.annotate('$\\omega_2$', xycoords='data', xy=(w2 + 180, 0), horizontalalignment='center',
verticalalignment='bottom', fontsize=16)
plt.ylabel(r'Angle between $-X$ and $\mathbf{G}$')
fig.add_subplot(312)
plt.plot(omegas, twothetas, 'k-')
plt.xlim(0, 360)
# plt.ylim(0,180)
plt.xticks(np.arange(0, 390, 30))
plt.axhline(2 * theta, xmin=0, xmax=360, linewidth=2)
plt.annotate('$2\\theta_{Bragg}$', xycoords='data', xy=(360, 2 * theta), horizontalalignment='left',
verticalalignment='center', fontsize=16)
plt.axvline(w1 + 180, linewidth=2, linestyle='dashed', color='gray')
plt.axvline(w2 + 180, linewidth=2, linestyle='dashed', color='gray')
plt.ylabel('Angle between $X$ and $K$')
fig.add_subplot(313)
plt.plot(omegas, magnitude_K, 'k-')
plt.xlim(0, 360)
plt.axhline(np.linalg.norm(X), xmin=0, xmax=360, linewidth=2)
plt.annotate('$1/\\lambda$', xycoords='data', xy=(360, 1 / lambda_nm), horizontalalignment='left',
verticalalignment='center', fontsize=16)
plt.axvline(w1 + 180, linewidth=2, linestyle='dashed', color='gray')
plt.axvline(w2 + 180, linewidth=2, linestyle='dashed', color='gray')
plt.xlabel(r'Angle of rotation $\omega$')
plt.ylabel(r'Magnitude of $X+G$ (nm$^{-1}$)')
plt.subplots_adjust(top=0.925, bottom=0.05, left=0.1, right=0.9)
if display:
plt.show()
else:
plt.savefig('rotating_crystal_plot_%d%d%d.pdf' % (h, k, l))
def topotomo_tilts(self, hkl, verbose=False):
"""Compute the tilts for topotomography alignment.
:param hkl: the hkl plane, an instance of :py:class:`~pymicro.crystal.lattice.HklPlane`
:param bool verbose: activate verbose mode (False by default).
:returns tuple: (ut, lt) the two values of tilts to apply (in radians).
"""
gt = self.orientation_matrix().transpose()
Gc = hkl.scattering_vector()
Gs = gt.dot(Gc) # in the cartesian sample CS
# find topotomo tilts
ut = np.arctan(-Gs[0] / Gs[2])
lt = np.arctan(Gs[1] / (-Gs[0] * np.sin(ut) + Gs[2] * np.cos(ut)))
if verbose:
print('up tilt (samry) should be %.3f' % (ut * 180 / np.pi))
print('low tilt (samrx) should be %.3f' % (lt * 180 / np.pi))
return (ut, lt)
def to_xml(self, doc):
"""
Returns an XML representation of the Orientation instance.
"""
print('deprecated as we are moving to hdf5 format')
orientation = doc.createElement('Orientation')
orientation_phi1 = doc.createElement('phi1')
orientation_phi1_text = doc.createTextNode('%f' % self.phi1())
orientation_phi1.appendChild(orientation_phi1_text)
orientation.appendChild(orientation_phi1)
orientation_Phi = doc.createElement('Phi')
orientation_Phi_text = doc.createTextNode('%f' % self.Phi())
orientation_Phi.appendChild(orientation_Phi_text)
orientation.appendChild(orientation_Phi)
orientation_phi2 = doc.createElement('phi2')
orientation_phi2_text = doc.createTextNode('%f' % self.phi2())
orientation_phi2.appendChild(orientation_phi2_text)
orientation.appendChild(orientation_phi2)
return orientation
@staticmethod
def from_xml(orientation_node):
orientation_phi1 = orientation_node.childNodes[0]
orientation_Phi = orientation_node.childNodes[1]
orientation_phi2 = orientation_node.childNodes[2]
phi1 = float(orientation_phi1.childNodes[0].nodeValue)
Phi = float(orientation_Phi.childNodes[0].nodeValue)
phi2 = float(orientation_phi2.childNodes[0].nodeValue)
orientation = Orientation.from_euler(np.array([phi1, Phi, phi2]))
return orientation
@staticmethod
def from_euler(euler, convention='Bunge'):
"""Rotation matrix from Euler angles.
This is the classical method to obtain an orientation matrix by 3 successive rotations. The result depends on
the convention used (how the successive rotation axes are chosen). In the Bunge convention, the first rotation
is around Z, the second around the new X and the third one around the new Z. In the Roe convention, the second
one is around Y.
"""
if convention == 'Roe':
(phi1, phi, phi2) = (euler[0] + 90, euler[1], euler[2] - 90)
else:
(phi1, phi, phi2) = euler
g = Orientation.Euler2OrientationMatrix((phi1, phi, phi2))
o = Orientation(g)
return o
@staticmethod
def from_rodrigues(rod):
g = Orientation.Rodrigues2OrientationMatrix(rod)
o = Orientation(g)
return o
@staticmethod
def from_quaternion(quat):
g = Orientation.Quaternions2OrientationMatrix(quat)
o = Orientation(g)
return o
@staticmethod
def Euler2OrientationMatrix(euler):
"""
Compute the orientation matrix :math:`\mathbf{g}` associated with the 3 Euler angles
:math:`(\phi_1, \Phi, \phi_2)`. The matrix is calculated via (see the `euler_angles` recipe in the cookbook
for a detailed example):
.. math::
\mathbf{g}=\\begin{pmatrix}
\cos\phi_1\cos\phi_2 - \sin\phi_1\sin\phi_2\cos\Phi & \sin\phi_1\cos\phi_2 + \cos\phi_1\sin\phi_2\cos\Phi & \sin\phi_2\sin\Phi \\\\
-\cos\phi_1\sin\phi_2 - \sin\phi_1\cos\phi_2\cos\Phi & -\sin\phi_1\sin\phi_2 + \cos\phi_1\cos\phi_2\cos\Phi & \cos\phi_2\sin\Phi \\\\
\sin\phi_1\sin\Phi & -\cos\phi_1\sin\Phi & \cos\Phi \\\\
\end{pmatrix}
:param euler: The triplet of the Euler angles (in degrees).
:returns g: The 3x3 orientation matrix.
"""
(rphi1, rPhi, rphi2) = np.radians(euler)
c1 = np.cos(rphi1)
s1 = np.sin(rphi1)
c = np.cos(rPhi)
s = np.sin(rPhi)
c2 = np.cos(rphi2)
s2 = np.sin(rphi2)
# rotation matrix g
g11 = c1 * c2 - s1 * s2 * c
g12 = s1 * c2 + c1 * s2 * c
g13 = s2 * s
g21 = -c1 * s2 - s1 * c2 * c
g22 = -s1 * s2 + c1 * c2 * c
g23 = c2 * s
g31 = s1 * s
g32 = -c1 * s
g33 = c
g = np.array([[g11, g12, g13], [g21, g22, g23], [g31, g32, g33]])
return g
@staticmethod
def Zrot2OrientationMatrix(x1=None, x2=None, x3=None):
"""Compute the orientation matrix from the rotated coordinates given in the
.inp file for Zebulon's computations
Need at least two vectors to compute cross product
Still need some tests to validate this function
"""
if (x1 is None and x2 is None):
raise NameError('Need at least two vectors to compute the matrix')
elif (x1 == None and x3 == None):
raise NameError('Need at least two vectors to compute the matrix')
elif (x3 == None and x2 == None):
raise NameError('Need at least two vectors to compute the matrix')
if x1 == None:
x1 = np.cross(x2, x3)
elif x2 == None:
x2 = np.cross(x3, x1)
elif x3 == None:
x3 = np.cross(x1, x2)
x1 = x1 / np.linalg.norm(x1)
x2 = x2 / np.linalg.norm(x2)
x3 = x3 / np.linalg.norm(x3)
g = np.array([x1, x2, x3]).transpose()
return g
@staticmethod
def OrientationMatrix2EulerSF(g):
"""
Compute the Euler angles (in degrees) from the orientation matrix
in a similar way as done in Mandel_crystal.c
"""
tol = 0.1
r = np.zeros(9, dtype=np.float64) # double precision here
# Z-set order for tensor is 11 22 33 12 23 13 21 32 31
r[0] = g[0, 0]
r[1] = g[1, 1]
r[2] = g[2, 2]
r[3] = g[0, 1]
r[4] = g[1, 2]
r[5] = g[0, 2]
r[6] = g[1, 0]
r[7] = g[2, 1]
r[8] = g[2, 0]
phi = np.arccos(r[2])
if phi == 0.:
phi2 = 0.
phi1 = np.arcsin(r[6])
if abs(np.cos(phi1) - r[0]) > tol:
phi1 = np.pi - phi1
else:
x2 = r[5] / np.sin(phi)
x1 = r[8] / np.sin(phi);
if x1 > 1.:
x1 = 1.
if x2 > 1.:
x2 = 1.
if x1 < -1.:
x1 = -1.
if x2 < -1.:
x2 = -1.
phi2 = np.arcsin(x2)
phi1 = np.arcsin(x1)
if abs(np.cos(phi2) * np.sin(phi) - r[7]) > tol:
phi2 = np.pi - phi2
if abs(np.cos(phi1) * np.sin(phi) + r[4]) > tol:
phi1 = np.pi - phi1
return np.degrees(np.array([phi1, phi, phi2]))
@staticmethod
def OrientationMatrix2Euler(g):
"""
Compute the Euler angles from the orientation matrix.
This conversion follows the paper of Rowenhorst et al. :cite:`Rowenhorst2015`.
In particular when :math:`g_{33} = 1` within the machine precision,
there is no way to determine the values of :math:`\phi_1` and :math:`\phi_2`
(only their sum is defined). The convention is to attribute
the entire angle to :math:`\phi_1` and set :math:`\phi_2` to zero.
:param g: The 3x3 orientation matrix
:return: The 3 euler angles in degrees.
"""
eps = np.finfo('float').eps
(phi1, Phi, phi2) = (0.0, 0.0, 0.0)
# treat special case where g[2, 2] = 1
if np.abs(g[2, 2]) >= 1 - eps:
if g[2, 2] > 0.0:
phi1 = np.arctan2(g[0][1], g[0][0])
else:
phi1 = -np.arctan2(-g[0][1], g[0][0])
Phi = np.pi
else:
Phi = np.arccos(g[2][2])
zeta = 1.0 / np.sqrt(1.0 - g[2][2] ** 2)
phi1 = np.arctan2(g[2][0] * zeta, -g[2][1] * zeta)
phi2 = np.arctan2(g[0][2] * zeta, g[1][2] * zeta)
# ensure angles are in the range [0, 2*pi]
if phi1 < 0.0:
phi1 += 2 * np.pi
if Phi < 0.0:
Phi += 2 * np.pi
if phi2 < 0.0:
phi2 += 2 * np.pi
return np.degrees([phi1, Phi, phi2])
@staticmethod
def OrientationMatrix2Rodrigues(g):
"""
Compute the rodrigues vector from the orientation matrix.
:param g: The 3x3 orientation matrix representing the rotation.
:returns: The Rodrigues vector as a 3 components array.
"""
t = g.trace() + 1
if np.abs(t) < np.finfo(g.dtype).eps:
print('warning, returning [0., 0., 0.], consider using axis, angle representation instead')
return np.zeros(3)
else:
r1 = (g[1, 2] - g[2, 1]) / t
r2 = (g[2, 0] - g[0, 2]) / t
r3 = (g[0, 1] - g[1, 0]) / t
return np.array([r1, r2, r3])
@staticmethod
def OrientationMatrix2Quaternion(g):
P = -1 # passif
q0 = 0.5 * np.sqrt(1 + g[0, 0] + g[1, 1] + g[2, 2])
q1 = P * 0.5 * np.sqrt(1 + g[0, 0] - g[1, 1] - g[2, 2])
q2 = P * 0.5 * np.sqrt(1 - g[0, 0] + g[1, 1] - g[2, 2])
q3 = P * 0.5 * np.sqrt(1 - g[0, 0] - g[1, 1] + g[2, 2])
if g[2, 1] < g[1, 2]:
q1 = q1 * -1
elif g[0, 2] < g[2, 0]:
q2 = q2 * -1
elif g[1, 0] < g[0, 1]:
q3 = q3 * -1
qn = np.sqrt(q0 ** 2 + q1 ** 2 + q2 ** 2 + q3 ** 2)
q = np.array([q0, q1, q2, q3]) / qn
return q
@staticmethod
def Rodrigues2OrientationMatrix(rod):
"""
Compute the orientation matrix from the Rodrigues vector.
:param rod: The Rodrigues vector as a 3 components array.
:returns: The 3x3 orientation matrix representing the rotation.
"""
r = np.linalg.norm(rod)
I = np.diagflat(np.ones(3))
if r < np.finfo(r.dtype).eps:
return I
else:
theta = 2 * np.arctan(r)
n = rod / r
omega = np.array([[0.0, n[2], -n[1]], [-n[2], 0.0, n[0]], [n[1], -n[0], 0.0]])
return I + np.sin(theta) * omega + (1 - np.cos(theta)) * omega.dot(omega)
@staticmethod
def Rodrigues2Axis(rod):
"""
Compute the axis/angle representation from the Rodrigues vector.
:param rod: The Rodrigues vector as a 3 components array.
:returns: A tuple in the (axis, angle) form.
"""
r = np.linalg.norm(rod)
axis = rod / r
angle = 2 * np.arctan(r)
return axis, angle
@staticmethod
def Axis2OrientationMatrix(axis, angle):
"""
Compute the (passive) orientation matrix associated the rotation defined by the given (axis, angle) pair.
:param axis: the rotation axis.
:param angle: the rotation angle (degrees).
:returns: the 3x3 orientation matrix.
"""
omega = np.radians(angle)
c = np.cos(omega)
s = np.sin(omega)
g = np.array([[c + (1 - c) * axis[0] ** 2, (1 - c) * axis[0] * axis[1] + s * axis[2],
(1 - c) * axis[0] * axis[2] - s * axis[1]],
[(1 - c) * axis[0] * axis[1] - s * axis[2], c + (1 - c) * axis[1] ** 2,
(1 - c) * axis[1] * axis[2] + s * axis[0]],
[(1 - c) * axis[0] * axis[2] + s * axis[1], (1 - c) * axis[1] * axis[2] - s * axis[0],
c + (1 - c) * axis[2] ** 2]])
return g
@staticmethod
def Euler2Axis(euler):
"""
Compute the (axis, angle) representation associated to this (passive) rotation expressed by the Euler angles.
:param euler: 3 euler angles (in degrees)
:returns: a tuple containing the axis (a vector) and the angle (in radians).
"""
(phi1, Phi, phi2) = np.radians(euler)
t = np.tan(0.5 * Phi)
s = 0.5 * (phi1 + phi2)
d = 0.5 * (phi1 - phi2)
tau = np.sqrt(t ** 2 + np.sin(s) ** 2)
alpha = 2 * np.arctan2(tau, np.cos(s))
if alpha > np.pi:
axis = np.array([-t / tau * np.cos(d), -t / tau * np.sin(d), -1 / tau * np.sin(s)])
angle = 2 * np.pi - alpha
else:
axis = np.array([t / tau * np.cos(d), t / tau * np.sin(d), 1 / tau * np.sin(s)])
angle = alpha
return axis, angle
@staticmethod
def Euler2Quaternion(euler):
"""
Compute the quaternion from the 3 euler angles (in degrees)
"""
(phi1, Phi, phi2) = np.radians(euler)
q0 = np.cos(0.5 * (phi1 + phi2)) * np.cos(0.5 * Phi)
q1 = np.cos(0.5 * (phi1 - phi2)) * np.sin(0.5 * Phi)
q2 = np.sin(0.5 * (phi1 - phi2)) * np.sin(0.5 * Phi)
q3 = np.sin(0.5 * (phi1 + phi2)) * np.cos(0.5 * Phi)
return np.array([q0, q1, q2, q3])
@staticmethod
def Euler2Rodrigues(euler):
"""
Compute the rodrigues vector from the 3 euler angles (in degrees)
"""
(phi1, Phi, phi2) = np.radians(euler)
a = 0.5 * (phi1 - phi2)
b = 0.5 * (phi1 + phi2)
r1 = np.tan(0.5 * Phi) * np.cos(a) / np.cos(b)
r2 = np.tan(0.5 * Phi) * np.sin(a) / np.cos(b)
r3 = np.tan(b)
return np.array([r1, r2, r3])
@staticmethod
def Quaternion2Euler(quat, convention):
"""
Compute Euler angles (in radians) from Quaternions
:param quat: 4 values defining a quaternion
:param convention: 'A' for active, 'P' for passive rotations
:return: 3 Euler angles (in degrees, Bunge convention)
"""
(q0, q1, q2, q3) = quat
if convention == 'A':
#Uses active rotation convention like in EMSphInx
P = 1.
elif convention == 'P':
#Uses passive rotation convention
P = -1.
q03 = q0**2 + q3**2
q12 = q1**2 + q2**2
chi = np.sqrt(q03 * q12)
if chi == 0.:
if q12 == 0.:
phi_1 = atan2(-2 * P * q0 * q3, q0**2 - q3**2)
Phi = 0.
else:
phi_1 = atan2(-2 * q1 * q2, q1**2 - q2**2)
Phi = pi
phi_2 = 0.
else:
phi_1 = atan2((q1 * q3 - P * q0 * q2) / chi, (-P * q0 * q1 - q2 * q3) / chi)
Phi = atan2(2 * chi, q03 - q12)
phi_2 = atan2((P * q0 * q2 + q1 * q3) / chi, (q2 * q3 - P * q0 * q1) / chi)
return np.degrees([phi_1, Phi, phi_2])
@staticmethod
def Quaternion2OrientationMatrix(quat):
#Passive convention
P = -1
(q0, q1, q2, q3) = quat
qbar = q0**2 - q1**2 - q2**2 - q3**2
g = np.array([[qbar + 2 * q1**2, 2 * (q1 * q2 - P * q0 * q3), 2 * (q1 * q3 + P * q0 * q2)],
[2 * (q1 * q2 + P * q0 * q3), qbar + 2 * q2**2, 2 * (q2 * q3 - P * q0 * q1)],
[2 * (q1 * q3 - P * q0 * q2), 2 * (q2 * q3 + P * q0 * q1), qbar + 2 * q3**2]])
return g
@staticmethod
def read_euler_txt(txt_path):
"""
Read a set of euler angles from an ascii file.
:param str txt_path: path to the text file containing the euler angles.
:returns dict: a dictionary with the line number and the corresponding orientation.
"""
return Orientation.read_orientations(txt_path)
@staticmethod
def read_orientations(txt_path, data_type='euler', **kwargs):
"""
Read a set of grain orientations from a text file.
The text file must be organised in 3 columns (the other are ignored), corresponding to either the three euler
angles or the three rodrigues veotor components, depending on the data_type). Internally the ascii file is read
by the genfromtxt function of numpy, additional keyworks (such as the delimiter) can be passed to via the
kwargs dictionnary.
:param str txt_path: path to the text file containing the orientations.
:param str data_type: 'euler' (default) or 'rodrigues'.
:param dict kwargs: additional parameters passed to genfromtxt.
:returns dict: a dictionary with the line number and the corresponding orientation.
"""
data = np.genfromtxt(txt_path, **kwargs)
size = len(data)
orientations = []
for i in range(size):
angles = np.array([float(data[i, 0]), float(data[i, 1]), float(data[i, 2])])
if data_type == 'euler':
orientations.append([i + 1, Orientation.from_euler(angles)])
elif data_type == 'rodrigues':
orientations.append([i + 1, Orientation.from_rodrigues(angles)])
return dict(orientations)
@staticmethod
def read_euler_from_zset_inp(inp_path):
"""Read a set of grain orientations from a z-set input file.
In z-set input files, the orientation data may be specified
either using the rotation of two vector, euler angles or
rodrigues components directly. For instance the following
lines are extracted from a polycrystalline calculation file
using the rotation keyword:
::
**elset elset1 *file au.mat *integration theta_method_a 1.0 1.e-9 150 *rotation x1 0.438886 -1.028805 0.197933 x3 1.038339 0.893172 1.003888
**elset elset2 *file au.mat *integration theta_method_a 1.0 1.e-9 150 *rotation x1 0.178825 -0.716937 1.043300 x3 0.954345 0.879145 1.153101
**elset elset3 *file au.mat *integration theta_method_a 1.0 1.e-9 150 *rotation x1 -0.540479 -0.827319 1.534062 x3 1.261700 1.284318 1.004174
**elset elset4 *file au.mat *integration theta_method_a 1.0 1.e-9 150 *rotation x1 -0.941278 0.700996 0.034552 x3 1.000816 1.006824 0.885212
**elset elset5 *file au.mat *integration theta_method_a 1.0 1.e-9 150 *rotation x1 -2.383786 0.479058 -0.488336 x3 0.899545 0.806075 0.984268
:param str inp_path: the path to the ascii file to read.
:returns dict: a dictionary of the orientations associated with the elset names.
"""
inp = open(inp_path)
lines = inp.readlines()
for i, line in enumerate(lines):
if line.lstrip().startswith('***material'):
break
euler_lines = []
for j, line in enumerate(lines[i + 1:]):
# read until next *** block
if line.lstrip().startswith('***'):
break
if (not line.lstrip().startswith('%') and line.find('**elset') >= 0):
euler_lines.append(line)
euler = []
for l in euler_lines:
tokens = l.split()
elset = tokens[tokens.index('**elset') + 1]
irot = tokens.index('*rotation')
if tokens[irot + 1] == 'x1':
x1 = np.empty(3, dtype=float)
x1[0] = float(tokens[irot + 2])
x1[1] = float(tokens[irot + 3])
x1[2] = float(tokens[irot + 4])
x3 = np.empty(3, dtype=float)
x3[0] = float(tokens[irot + 6])
x3[1] = float(tokens[irot + 7])
x3[2] = float(tokens[irot + 8])
euler.append([elset, Orientation.Zrot2OrientationMatrix(x1=x1, x3=x3)])
else: # euler angles
phi1 = tokens[irot + 1]
Phi = tokens[irot + 2]
phi2 = tokens[irot + 3]
angles = np.array([float(phi1), float(Phi), float(phi2)])
euler.append([elset, Orientation.from_euler(angles)])
return dict(euler)
def slip_system_orientation_tensor(self, s):
"""Compute the orientation strain tensor m^s for this :py:class:`~pymicro.crystal.microstructure.Orientation`
and the given slip system.
:param s: an instance of :py:class:`~pymicro.crystal.lattice.SlipSystem`
.. math::
M^s_{ij} = \left(l^s_i.n^s_j)
"""
gt = self.orientation_matrix().transpose()
plane = s.get_slip_plane()
n_rot = np.dot(gt, plane.normal())
slip = s.get_slip_direction()
l_rot = np.dot(gt, slip.direction())
return np.outer(l_rot, n_rot)
def slip_system_orientation_strain_tensor(self, s):
"""Compute the orientation strain tensor m^s for this :py:class:`~pymicro.crystal.microstructure.Orientation`
and the given slip system.
:param s: an instance of :py:class:`~pymicro.crystal.lattice.SlipSystem`
.. math::
m^s_{ij} = \\frac{1}{2}\left(l^s_i.n^s_j + l^s_j.n^s_i)
"""
gt = self.orientation_matrix().transpose()
plane = s.get_slip_plane()
n_rot = np.dot(gt, plane.normal())
slip = s.get_slip_direction()
l_rot = np.dot(gt, slip.direction())
m = 0.5 * (np.outer(l_rot, n_rot) + np.outer(n_rot, l_rot))
return m
def slip_system_orientation_rotation_tensor(self, s):
"""Compute the orientation rotation tensor q^s for this :py:class:`~pymicro.crystal.microstructure.Orientation`
and the given slip system.
:param s: an instance of :py:class:`~pymicro.crystal.lattice.SlipSystem`
.. math::
q^s_{ij} = \\frac{1}{2}\left(l^s_i.n^s_j - l^s_j.n^s_i)
"""
gt = self.orientation_matrix().transpose()
plane = s.get_slip_plane()
n_rot = np.dot(gt, plane.normal())
slip = s.get_slip_direction()
l_rot = np.dot(gt, slip.direction())
q = 0.5 * (np.outer(l_rot, n_rot) - np.outer(n_rot, l_rot))
return q
def schmid_factor(self, slip_system, load_direction=[0., 0., 1]):
"""Compute the Schmid factor for this crystal orientation and the
given slip system.
:param slip_system: a slip system instance.
:param load_direction: a unit vector describing the loading direction (default: vertical axis [0, 0, 1]).
:returns float: a number between 0 ad 0.5.
"""
plane = slip_system.get_slip_plane()
gt = self.orientation_matrix().transpose()
n_rot = np.dot(gt, plane.normal()) # plane.normal() is a unit vector
slip = slip_system.get_slip_direction().direction()
slip_rot = np.dot(gt, slip)
SF = np.abs(np.dot(n_rot, load_direction) * np.dot(slip_rot, load_direction))
return SF
def compute_all_schmid_factors(self, slip_systems, load_direction=[0., 0., 1], verbose=False):
"""Compute all Schmid factors for this crystal orientation and the
given list of slip systems.
:param slip_systems: a list of the slip system from which to compute the Schmid factor values.
:param load_direction: a unit vector describing the loading direction (default: vertical axis [0, 0, 1]).
:param bool verbose: activate verbose mode.
:returns list: a list of the schmid factors.
"""
SF_list = []
for ss in slip_systems:
sf = self.schmid_factor(ss, load_direction)
if verbose:
print('Slip system: %s, Schmid factor is %.3f' % (ss, sf))
SF_list.append(sf)
return SF_list
class Grain:
"""
Class defining a crystallographic grain.
A grain has its own crystallographic orientation.
An optional id for the grain may be specified.
The position field is the center of mass of the grain in world coordinates.
The volume of the grain is expressed in pixel/voxel unit.
"""
def __init__(self, grain_id, grain_orientation):
self.id = grain_id
self.orientation = grain_orientation
self.position = np.array([0., 0., 0.])
self.volume = 0 # warning not implemented
self.vtkmesh = None
def __repr__(self):
"""Provide a string representation of the class."""
s = '%s\n * id = %d\n' % (self.__class__.__name__, self.id)
s += ' * %s\n' % (self.orientation)
s += ' * position %s\n' % np.array_str(self.position)
s += ' * has vtk mesh ? %s\n' % (self.vtkmesh != None)
return s
def schmid_factor(self, slip_system, load_direction=[0., 0., 1]):
"""Compute the Schmid factor of this grain for the given slip system.
**Parameters**:
*slip_system*: a slip system instance.
*load_direction*: a unit vector describing the loading direction.
**Returns**
The Schmid factor of this grain for the given slip system.
"""
plane = slip_system.get_slip_plane()
gt = self.orientation_matrix().transpose()
n_rot = np.dot(gt, plane.normal()) # plane.normal() is a unit vector
slip = slip_system.get_slip_direction().direction()
slip_rot = np.dot(gt, slip)
SF = np.abs(np.dot(n_rot, load_direction) * np.dot(slip_rot, load_direction))
return self.orientation.schmid_factor(slip_system, load_direction)
def SetVtkMesh(self, mesh):
"""Set the VTK mesh of this grain.
**Parameters:**
*mesh* The grain mesh in VTK format (typically vtkunstructuredgrid)
"""
self.vtkmesh = mesh
def add_vtk_mesh(self, array, contour=True, verbose=False):
"""Add a mesh to this grain.
This method process a labeled array to extract the geometry of the grain. The grain shape is defined by
the pixels with a value of the grain id. A vtkUniformGrid object is created and thresholded or contoured
depending on the value of the flag `contour`.
The resulting mesh is returned, centered on the center of mass of the grain.
:param ndarray array: a numpy array from which to extract the grain shape.
:param bool contour: a flag to use contour mode for the shape.
:param bool verbose: activate verbose mode.
"""
label = self.id # we use the grain id here...
# create vtk structure
from scipy import ndimage
from vtk.util import numpy_support
grain_size = np.shape(array)
array_bin = (array == label).astype(np.uint8)
local_com = ndimage.measurements.center_of_mass(array_bin, array)
vtk_data_array = numpy_support.numpy_to_vtk(np.ravel(array_bin, order='F'), deep=1)
grid = vtk.vtkUniformGrid()
grid.SetOrigin(-local_com[0], -local_com[1], -local_com[2])
grid.SetSpacing(1, 1, 1)
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
grid.SetScalarType(vtk.VTK_UNSIGNED_CHAR, vtk.vtkInformation())
else:
grid.SetScalarType(vtk.VTK_UNSIGNED_CHAR)
if contour:
grid.SetExtent(0, grain_size[0] - 1, 0, grain_size[1] - 1, 0, grain_size[2] - 1)
grid.GetPointData().SetScalars(vtk_data_array)
# contouring selected grain
contour = vtk.vtkContourFilter()
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
contour.SetInputData(grid)
else:
contour.SetInput(grid)
contour.SetValue(0, 0.5)
contour.Update()
if verbose:
print(contour.GetOutput())
self.SetVtkMesh(contour.GetOutput())
else:
grid.SetExtent(0, grain_size[0], 0, grain_size[1], 0, grain_size[2])
grid.GetCellData().SetScalars(vtk_data_array)
# threshold selected grain
thresh = vtk.vtkThreshold()
thresh.ThresholdBetween(0.5, 1.5)
# thresh.ThresholdBetween(label-0.5, label+0.5)
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
thresh.SetInputData(grid)
else:
thresh.SetInput(grid)
thresh.Update()
if verbose:
print('thresholding label: %d' % label)
print(thresh.GetOutput())
self.SetVtkMesh(thresh.GetOutput())
def to_xml(self, doc, file_name=None):
"""
Returns an XML representation of the Grain instance.
"""
grain = doc.createElement('Grain')
grain_id = doc.createElement('Id')
grain_id_text = doc.createTextNode('%s' % self.id)
grain_id.appendChild(grain_id_text)
grain.appendChild(grain_id)
grain.appendChild(self.orientation.to_xml(doc))
grain_position = doc.createElement('Position')
grain_position_x = doc.createElement('X')
grain_position.appendChild(grain_position_x)
grain_position_x_text = doc.createTextNode('%f' % self.position[0])
grain_position_x.appendChild(grain_position_x_text)
grain_position_y = doc.createElement('Y')
grain_position.appendChild(grain_position_y)
grain_position_y_text = doc.createTextNode('%f' % self.position[1])
grain_position_y.appendChild(grain_position_y_text)
grain_position_z = doc.createElement('Z')
grain_position.appendChild(grain_position_z)
grain_position_z_text = doc.createTextNode('%f' % self.position[2])
grain_position_z.appendChild(grain_position_z_text)
grain.appendChild(grain_position)
grain_mesh = doc.createElement('Mesh')
if not file_name:
file_name = self.vtk_file_name()
grain_mesh_text = doc.createTextNode('%s' % file_name)
grain_mesh.appendChild(grain_mesh_text)
grain.appendChild(grain_mesh)
return grain
@staticmethod
def from_xml(grain_node, verbose=False):
grain_id = grain_node.childNodes[0]
grain_orientation = grain_node.childNodes[1]
orientation = Orientation.from_xml(grain_orientation)
id = int(grain_id.childNodes[0].nodeValue)
grain = Grain(id, orientation)
grain_position = grain_node.childNodes[2]
xg = float(grain_position.childNodes[0].childNodes[0].nodeValue)
yg = float(grain_position.childNodes[1].childNodes[0].nodeValue)
zg = float(grain_position.childNodes[2].childNodes[0].nodeValue)
grain.position = np.array([xg, yg, zg])
grain_mesh = grain_node.childNodes[3]
grain_mesh_file = grain_mesh.childNodes[0].nodeValue
if verbose:
print(grain_mesh_file)
grain.load_vtk_repr(grain_mesh_file, verbose)
return grain
def vtk_file_name(self):
return 'grain_%d.vtu' % self.id
def save_vtk_repr(self, file_name=None):
import vtk
if not file_name:
file_name = self.vtk_file_name()
print('writting ' + file_name)
writer = vtk.vtkXMLUnstructuredGridWriter()
writer.SetFileName(file_name)
writer.SetInput(self.vtkmesh)
writer.Write()
def load_vtk_repr(self, file_name, verbose=False):
import vtk
if verbose:
print('reading ' + file_name)
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName(file_name)
reader.Update()
self.vtkmesh = reader.GetOutput()
def orientation_matrix(self):
"""Returns the grain orientation matrix."""
return self.orientation.orientation_matrix()
def dct_omega_angles(self, hkl, lambda_keV, verbose=False):
"""Compute the two omega angles which satisfy the Bragg condition.
For a grain with a given crystal orientation sitting on a vertical
rotation axis, there is exactly two omega positions in [0, 2pi] for
which a particular hkl reflexion will fulfil Bragg's law.
See :py:func:`~pymicro.crystal.microstructure.Orientation.dct_omega_angles`
of the :py:class:`~pymicro.crystal.microstructure.Orientation` class.
:param hkl: The given cristallographic :py:class:`~pymicro.crystal.lattice.HklPlane`
:param float lambda_keV: The X-rays energy expressed in keV
:param bool verbose: Verbose mode (False by default)
:returns tuple: (w1, w2) the two values of the omega angle.
"""
return self.orientation.dct_omega_angles(hkl, lambda_keV, verbose)
class Microstructure:
"""
Class used to manipulate a full microstructure.
It is typically defined as a list of grains objects.
"""
def __init__(self, name='empty'):
self.name = name
self.grains = []
self.vtkmesh = None
@staticmethod
def random_texture(n=100):
"""Generate a random texture microstructure.
**parameters:**
*n* The number of grain orientations in the microstructure.
"""
from random import random
from math import acos
m = Microstructure(name='random_texture')
for i in range(n):
phi1 = random() * 360.
Phi = 180. * acos(2 * random() - 1) / np.pi
phi2 = random() * 360.
m.grains.append(Grain(i + 1, Orientation.from_euler([phi1, Phi, phi2])))
return m
@staticmethod
def rand_cmap(N=4096, first_is_black=False):
"""Creates a random color map.
The first color can be enforced to black and usually figure out the background.
The random seed is fixed to consistently produce the same colormap.
"""
np.random.seed(13)
rand_colors = np.random.rand(N, 3)
if first_is_black:
rand_colors[0] = [0., 0., 0.] # enforce black background (value 0)
return colors.ListedColormap(rand_colors)
def ipf_cmap(self):
"""
Return a colormap with ipf colors.
"""
N = len(self.grains)
ipf_colors = np.zeros((4096, 3))
for g in self.grains:
ipf_colors[g.id, :] = g.orientation.get_ipf_colour()
return colors.ListedColormap(ipf_colors)
@staticmethod
def from_xml(xml_file_name, grain_ids=None, verbose=False):
"""Load a Microstructure object from an xml file.
It is possible to restrict the grains which are loaded by providing
the list of ids of the grains of interest.
"""
if verbose and grain_ids:
print('loading only grain ids %s' % grain_ids)
micro = Microstructure()
dom = parse(xml_file_name)
root = dom.childNodes[0]
name = root.childNodes[0]
micro.name = name.childNodes[0].nodeValue
grains = root.childNodes[1]
for node in grains.childNodes:
if grain_ids and not (int(node.childNodes[0].childNodes[0].nodeValue) in grain_ids): continue
if verbose:
print(node)
micro.grains.append(Grain.from_xml(node, verbose))
return micro
def get_grain(self, gid):
"""Get a particular grain given its id.
This method browses the microstructure and return the grain
corresponding to the given id. If the grain is not found, the
method raises a `ValueError`.
*Parameters*
**gid**: the grain id.
*Returns*
The method return a `Grain` with the corresponding id.
"""
for grain in self.grains:
if grain.id == gid:
return grain
raise ValueError('grain %d not found in the microstructure' % gid)
def __repr__(self):
"""Provide a string representation of the class."""
s = '%s\n' % self.__class__.__name__
s += '* name: %s\n' % self.name
for g in self.grains:
s += '* %s' % g.__repr__
return s
def SetVtkMesh(self, mesh):
self.vtkmesh = mesh
def print_zset_material_block(self, mat_file, grain_prefix='_ELSET'):
"""
Outputs the material block corresponding to this microstructure for
a finite element calculation with z-set.
:param str mat_file: The name of the file where the material behaviour is located
:param str grain_prefix: The grain prefix used to name the elsets corresponding to the different grains
"""
f = open('elset_list.txt', 'w')
for g in self.grains:
o = g.orientation
f.write(
' **elset %s%d *file %s *integration theta_method_a 1.0 1.e-9 150 *rotation %7.3f %7.3f %7.3f\n' % (
grain_prefix, g.id, mat_file, o.phi1(), o.Phi(), o.phi2()))
f.close()
def to_h5(self):
"""Write the microstructure as a hdf5 file compatible with DREAM3D."""
import time
f = h5py.File('%s.h5' % self.name, 'w')
f.attrs['FileVersion'] = np.string_('7.0')
f.attrs['DREAM3D Version'] = np.string_('6.1.77.d28a796')
f.attrs['HDF5_Version'] = h5py.version.hdf5_version
f.attrs['h5py_version'] = h5py.version.version
f.attrs['file_time'] = time.time()
# pipeline group (empty here)
pipeline = f.create_group('Pipeline')
pipeline.attrs['Number_Filters'] = np.int32(0)
# create the data container group
data_containers = f.create_group('DataContainers')
m = data_containers.create_group('DataContainer')
# ensemble data
ed = m.create_group('EnsembleData')
ed.attrs['AttributeMatrixType'] = np.uint32(11)
ed.attrs['TupleDimensions'] = np.uint64(2)
cryst_structure = ed.create_dataset('CrystalStructures', data=np.array([[999], [1]], dtype=np.uint32))
cryst_structure.attrs['ComponentDimensions'] = np.uint64(1)
cryst_structure.attrs['DataArrayVersion'] = np.int32(2)
cryst_structure.attrs['ObjectType'] = np.string_('DataArray<uint32_t>')
cryst_structure.attrs['Tuple Axis Dimensions'] = np.string_('x=2')
cryst_structure.attrs['TupleDimensions'] = np.uint64(2)
mat_name = ed.create_dataset('MaterialName', data=[a.encode('utf8') for a in ['Invalid Phase', 'Unknown']])
mat_name.attrs['ComponentDimensions'] = np.uint64(1)
mat_name.attrs['DataArrayVersion'] = np.int32(2)
mat_name.attrs['ObjectType'] = np.string_('StringDataArray')
mat_name.attrs['Tuple Axis Dimensions'] = np.string_('x=2')
mat_name.attrs['TupleDimensions'] = np.uint64(2)
# feature data
fd = m.create_group('FeatureData')
fd.attrs['AttributeMatrixType'] = np.uint32(7)
fd.attrs['TupleDimensions'] = np.uint64(len(self.grains))
avg_euler = fd.create_dataset('AvgEulerAngles',
data=np.array([g.orientation.euler for g in self.grains], dtype=np.float32))
avg_euler.attrs['ComponentDimensions'] = np.uint64(3)
avg_euler.attrs['DataArrayVersion'] = np.int32(2)
avg_euler.attrs['ObjectType'] = np.string_('DataArray<float>')
avg_euler.attrs['Tuple Axis Dimensions'] = np.string_('x=%d' % len(self.grains))
avg_euler.attrs['TupleDimensions'] = np.uint64(len(self.grains))
# geometry
geom = m.create_group('_SIMPL_GEOMETRY')
geom.attrs['GeometryType'] = np.uint32(999)
geom.attrs['GeometryTypeName'] = np.string_('UnkownGeometry')
# create the data container bundles group
f.create_group('DataContainerBundles')
f.close()
@staticmethod
def from_h5(file_path, main_key='DataContainers', data_container='DataContainer', grain_data='FeatureData',
grain_orientations='AvgEulerAngles', orientation_type='euler', grain_centroid='Centroids'):
"""Read a microstructure from a hdf5 file.
:param str file_path: the path to the hdf5 file to read.
:param str main_key: the string describing the root key.
:param str data_container: the string describing the data container group in the hdf5 file.
:param str grain_data: the string describing the grain data group in the hdf5 file.
:param str grain_orientations: the string describing the average grain orientations in the hdf5 file.
:param str orientation_type: the string describing the descriptor used for orientation data.
:param str grain_centroid: the string describing the grain centroid in the hdf5 file.
:return: a `Microstructure` instance created from the hdf5 file.
"""
micro = Microstructure()
with h5py.File(file_path, 'r') as f:
grain_data_path = '%s/%s/%s' % (main_key, data_container, grain_data)
orientations = f[grain_data_path][grain_orientations].value
if grain_centroid:
centroids = f[grain_data_path][grain_centroid].value
offset = 0
if len(centroids) < len(orientations):
offset = 1 # if grain 0 has not a centroid
for i in range(len(orientations)):
if orientations[i, 0] == 0. and orientations[i, 1] == 0. and orientations[i, 2] == 0.:
# skip grain 0 which is always (0., 0., 0.)
print('skipping (0., 0., 0.)')
continue
if orientation_type == 'euler':
g = Grain(i, Orientation.from_euler(orientations[i] * 180 / np.pi))
elif orientation_type == 'rodrigues':
g = Grain(i, Orientation.from_rodrigues(orientations[i]))
if grain_centroid:
g.position = centroids[i - offset]
micro.grains.append(g)
return micro
@staticmethod
def from_dct(data_root='.', vol_file='phase_01_vol.mat', grain_ids=None, verbose=True):
"""Create a microstructure from a DCT reconstruction.
DCT reconstructions are stored in hdf5 matlab files. the reconstructed volume file (labeled image) is stored
in the '5_reconstruction' folder and the individual grain files are stored in the '4_grains/phase_01' folder.
:param str data_root: the path to the folder containing the data.
:param str vol_file: the name of the volume file.
:param list grain_ids: a list of grain ids to load into the `Microstructure` instance.
:param bool verbose: activate verbose mode.
:return: a `Microstructure` instance created from the DCT reconstruction.
"""
from scipy import ndimage
micro = Microstructure()
micro.data_root = data_root
vol_file = os.path.join(data_root, '5_reconstruction', vol_file)
with h5py.File(vol_file, 'r') as f:
# choose weather or not to load the volume into memory here
vol = f['vol'].value.transpose(2, 1, 0) # Because how matlab writes the data, we need to swap X and Z axes in the DCT volume
if verbose:
print('loaded volume with shape: %d x %d x %d' % (vol.shape[0], vol.shape[1], vol.shape[2]))
all_grain_ids = np.unique(vol)
if not grain_ids:
grain_ids = all_grain_ids
else:
# check that all requested grain ids are present
for label in [x for x in grain_ids if x not in all_grain_ids]:
print('warning, requested grain %d is not present in the data file' % label)
micro_mesh = vtk.vtkMultiBlockDataSet()
micro_mesh.SetNumberOfBlocks(len(grain_ids))
for i, label in enumerate(grain_ids):
if label <= 0:
continue
grain_path = os.path.join('4_grains', 'phase_01', 'grain_%04d.mat' % label)
grain_file = os.path.join(micro.data_root, grain_path)
grain_info = h5py.File(grain_file)
g = Grain(label, Orientation.from_rodrigues(grain_info['R_vector'].value))
g.position = grain_info['center'].value
grain_data = vol[ndimage.find_objects(vol == label)[0]]
g.volume = ndimage.measurements.sum(vol == label)
# create the vtk representation of the grain
g.add_vtk_mesh(grain_data, contour=False)
if verbose:
print('loading grain %d' % label)
print('adding block %d to mesh for grain %d' % (i, label))
micro_mesh.SetBlock(i, g.vtkmesh)
micro.grains.append(g)
micro.SetVtkMesh(micro_mesh)
return micro
def to_xml(self, doc):
"""
Returns an XML representation of the Microstructure instance.
"""
root = doc.createElement('Microstructure')
doc.appendChild(root)
name = doc.createElement('Name')
root.appendChild(name)
name_text = doc.createTextNode(self.name)
name.appendChild(name_text)
grains = doc.createElement('Grains')
root.appendChild(grains)
for i, grain in enumerate(self.grains):
file_name = os.path.join(self.name, '%s_%d.vtu' % (self.name, i))
grains.appendChild(grain.to_xml(doc, file_name))
def save(self):
"""Saving the microstructure to the disk.
Save the metadata as a XML file and when available, also save the
vtk representation of the grains.
"""
# save the microstructure instance as xml
doc = Document()
self.to_xml(doc)
xml_file_name = '%s.xml' % self.name
print('writing ' + xml_file_name)
f = open(xml_file_name, 'wb')
doc.writexml(f, encoding='utf-8')
f.close()
# now save the vtk representation
if self.vtkmesh != None:
import vtk
vtk_file_name = '%s.vtm' % self.name
print('writing ' + vtk_file_name)
writer = vtk.vtkXMLMultiBlockDataWriter()
writer.SetFileName(vtk_file_name)
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
writer.SetInputData(self.vtkmesh)
else:
writer.SetInput(self.vtkmesh)
writer.Write()
def dct_projection(self, data, lattice, omega, dif_grains, lambda_keV, d, ps, det_npx=np.array([2048, 2048]), ds=1,
display=False, verbose=False):
"""Compute the detector image in dct configuration.
:params np.ndarray data: The 3d data set from which to compute the projection.
:params lattice: The crystal lattice of the material.
:params float omega: The rotation angle at which the projection is computed.
"""
lambda_nm = 1.2398 / lambda_keV
# prepare rotation matrix
omegar = omega * np.pi / 180
R = np.array([[np.cos(omegar), -np.sin(omegar), 0], [np.sin(omegar), np.cos(omegar), 0], [0, 0, 1]])
data_abs = np.where(data > 0, 1, 0)
x_max = np.ceil(max(data_abs.shape[0], data_abs.shape[1]) * 2 ** 0.5)
proj = np.zeros((np.shape(data_abs)[2], x_max), dtype=np.float)
if verbose:
print('diffracting grains', dif_grains)
print('proj size is ', np.shape(proj))
# handle each grain in Bragg condition
for (gid, (h, k, l)) in dif_grains:
mask_dif = (data == gid)
data_abs[mask_dif] = 0 # remove this grain from the absorption
from skimage.transform import radon
for i in range(np.shape(data_abs)[2]):
proj[i, :] = radon(data_abs[:, :, i], [omega])[:, 0]
# create the detector image (larger than the FOV) by padding the transmission image with zeros
full_proj = np.zeros(det_npx / ds, dtype=np.float)
if verbose:
print('full proj size is ', np.shape(full_proj))
print('max proj', proj.max())
# here we could use np.pad with numpy version > 1.7
print(int(0.5 * det_npx[0] / ds - proj.shape[0] / 2.))
print(int(0.5 * det_npx[0] / ds + proj.shape[0] / 2.))
print(int(0.5 * det_npx[1] / ds - proj.shape[1] / 2.))
print(int(0.5 * det_npx[1] / ds + proj.shape[1] / 2.))
# let's moderate the direct beam so we see nicely the spots with a 8 bits scale
att = 6.0 / ds # 1.0
full_proj[int(0.5 * det_npx[0] / ds - proj.shape[0] / 2.):int(0.5 * det_npx[0] / ds + proj.shape[0] / 2.), \
int(0.5 * det_npx[1] / ds - proj.shape[1] / 2.):int(0.5 * det_npx[1] / ds + proj.shape[1] / 2.)] += proj / att
# add diffraction spots
from pymicro.crystal.lattice import HklPlane
from scipy import ndimage
for (gid, (h, k, l)) in dif_grains:
# compute scattering vector
gt = self.get_grain(gid).orientation_matrix().transpose()
p = HklPlane(h, k, l, lattice)
X = np.array([1., 0., 0.]) / lambda_nm
n = R.dot(gt.dot(p.normal()))
G = n / p.interplanar_spacing() # also G = R.dot(gt.dot(h*astar + k*bstar + l*cstar))
K = X + G
# TODO explain the - signs, account for grain position in the rotated sample
(u, v) = (d * K[1] / K[0], d * K[2] / K[0]) # unit is mm
(u_mic, v_mic) = (1000 * u, 1000 * v) # unit is micron
(up, vp) = (0.5 * det_npx[0] / ds + u_mic / (ps * ds),
0.5 * det_npx[1] / ds + v_mic / (ps * ds)) # unit is pixel on the detector
if verbose:
print('plane normal:', p.normal())
print(R)
print('rotated plane normal:', n)
print('scattering vector:', G)
print('K = X + G vector', K)
print('lenght X', np.linalg.norm(X))
print('lenght K', np.linalg.norm(K))
print('angle between X and K', np.arccos(
np.dot(K, X) / (np.linalg.norm(K) * np.linalg.norm(X))) * 180 / np.pi)
print('diffracted beam will hit the detector at (%.3f,%.3f) mm or (%d,%d) pixels' % (u, v, up, vp))
grain_data = np.where(data == gid, 1, 0)
data_dif = grain_data[ndimage.find_objects(data == gid)[0]]
x_max = np.ceil(max(data_dif.shape[0], data_dif.shape[1]) * 2 ** 0.5)
proj_dif = np.zeros((np.shape(data_dif)[2], x_max), dtype=np.float)
for i in range(np.shape(data_dif)[2]):
a = radon(data_dif[:, :, i], [omega])
proj_dif[i, :] = a[:, 0]
if verbose:
print('* proj_dif size is ', np.shape(proj_dif))
print(int(up - proj_dif.shape[0] / 2.))
print(int(up + proj_dif.shape[0] / 2.))
print(int(vp - proj_dif.shape[1] / 2.))
print(int(vp + proj_dif.shape[1] / 2.))
print('max proj_dif', proj_dif.max())
# add diffraction spot to the image detector
try:
# warning full_proj image is transposed (we could fix that and plot with .T since pyplot plots images like (y,x))
full_proj[int(vp - proj_dif.shape[0] / 2.):int(vp + proj_dif.shape[0] / 2.), \
int(up - proj_dif.shape[1] / 2.):int(up + proj_dif.shape[1] / 2.)] += proj_dif
# full_proj[int(up - proj_dif.shape[0]/2.):int(up + proj_dif.shape[0]/2.), \
# int(vp - proj_dif.shape[1]/2.):int(vp + proj_dif.shape[1]/2.)] += proj_dif
except:
print('error occured') # grain diffracts outside the detector
pass
plt.imsave('proj_dif/proj_dif_grain%d_omega=%05.1f.png' % (gid, omega), proj_dif, cmap=cm.gray,
origin='lower')
if display:
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
ax.imshow(full_proj[:, ::-1], cmap=cm.gray, vmin=0, vmax=255, origin='lower') # check origin
for (h, k, l) in [(1, 1, 0), (2, 0, 0), (2, 1, 1), (2, 2, 0), (2, 2, 2), (3, 1, 0), (3, 2, 1), (3, 3, 0),
(3, 3, 2)]:
hkl = HklPlane(h, k, l, lattice)
theta = hkl.bragg_angle(lambda_keV)
print('bragg angle for %s reflection is %.2f deg' % (hkl.miller_indices(), theta * 180. / np.pi))
t = np.linspace(0.0, 2 * np.pi, num=37)
L = d * 1000 / ps / ds * np.tan(2 * theta) # 2 theta distance on the detector
ax.plot(0.5 * det_npx[0] / ds + L * np.cos(t), 0.5 * det_npx[1] / ds + L * np.sin(t), 'g--')
ax.annotate(str(h) + str(k) + str(l), xy=(0.5 * det_npx[0] / ds, 0.5 * det_npx[1] / ds + L),
xycoords='data', color='green', horizontalalignment='center', verticalalignment='bottom',
fontsize=16)
plt.xlim(0, det_npx[0] / ds)
plt.ylim(0, det_npx[1] / ds)
plt.show()
else:
# save projection image with origin = lower since Z-axis is upwards
plt.imsave('proj/proj_omega=%05.1f.png' % omega, full_proj, cmap=cm.gray, vmin=0, vmax=100, origin='lower')
New Quaternion functions
"""
The microstructure module provide elementary classes to describe a
crystallographic granular microstructure such as mostly present in
metallic materials.
It contains several classes which are used to describe a microstructure
composed of several grains, each one having its own crystallographic
orientation:
* :py:class:`~pymicro.crystal.microstructure.Microstructure`
* :py:class:`~pymicro.crystal.microstructure.Grain`
* :py:class:`~pymicro.crystal.microstructure.Orientation`
"""
import numpy as np
import os
import vtk
import h5py
from matplotlib import pyplot as plt, colors, cm
from xml.dom.minidom import Document, parse
from pymicro.crystal.lattice import Symmetry
from pymicro.crystal.quaternion import Quaternion
from math import atan2, pi
class Orientation:
"""Crystallographic orientation class.
This follows the passive rotation definition which means that it brings
the sample coordinate system into coincidence with the crystal coordinate
system. Then one may express a vector :math:`V_c` in the crystal coordinate system
from the vector in the sample coordinate system :math:`V_s` by:
.. math::
V_c = g.V_s
and inversely (because :math:`g^{-1}=g^T`):
.. math::
V_s = g^T.V_c
Most of the code to handle rotations has been written to comply with the conventions
laid in :cite:`Rowenhorst2015`.
"""
def __init__(self, matrix):
"""Initialization from the 9 components of the orientation matrix."""
g = np.array(matrix, dtype=np.float64).reshape((3, 3))
self._matrix = g
self.euler = Orientation.OrientationMatrix2Euler(g)
self.rod = Orientation.OrientationMatrix2Rodrigues(g)
self.quat = Orientation.OrientationMatrix2Quaternion(g, P=1)
def orientation_matrix(self):
"""Returns the orientation matrix in the form of a 3x3 numpy array."""
return self._matrix
def __repr__(self):
"""Provide a string representation of the class."""
s = 'Crystal Orientation'
s += '\norientation matrix = %s' % self._matrix.view()
s += '\nEuler angles (degrees) = (%8.3f,%8.3f,%8.3f)' % (self.phi1(), self.Phi(), self.phi2())
s += '\nRodrigues vector = %s' % self.OrientationMatrix2Rodrigues(self._matrix)
s += '\nQuaternion = %s' % self.OrientationMatrix2Quaternion(self._matrix, P=1)
return s
@staticmethod
def cube():
"""Create the particular crystal orientation called Cube and which
corresponds to euler angle (0, 0, 0)."""
return Orientation.from_euler((0., 0., 0.))
@staticmethod
def brass():
"""Create the particular crystal orientation called Brass and which
corresponds to euler angle (35.264, 45, 0)."""
return Orientation.from_euler((35.264, 45., 0.))
@staticmethod
def copper():
"""Create the particular crystal orientation called Copper and which
corresponds to euler angle (90, 35.264, 45)."""
return Orientation.from_euler((90., 35.264, 45.))
@staticmethod
def s3():
"""Create the particular crystal orientation called S3 and which
corresponds to euler angle (59, 37, 63)."""
return Orientation.from_euler((58.980, 36.699, 63.435))
@staticmethod
def goss():
"""Create the particular crystal orientation called Goss and which
corresponds to euler angle (0, 45, 0)."""
return Orientation.from_euler((0., 45., 0.))
@staticmethod
def shear():
"""Create the particular crystal orientation called shear and which
corresponds to euler angle (45, 0, 0)."""
return Orientation.from_euler((45., 0., 0.))
def get_ipf_colour(self, axis=np.array([0., 0., 1.]), symmetry=Symmetry.cubic):
"""Compute the IPF (inverse pole figure) colour for this orientation.
Given a particular axis expressed in the laboratory coordinate system,
one can compute the so called IPF colour based on that direction
expressed in the crystal coordinate system as :math:`[x_c,y_c,z_c]`.
There is only one tuple (u,v,w) such that:
.. math::
[x_c,y_c,z_c]=u.[0,0,1]+v.[0,1,1]+w.[1,1,1]
and it is used to assign the RGB colour.
"""
axis /= np.linalg.norm(axis)
# find the axis lying in the fundamental zone
for sym in symmetry.symmetry_operators():
Osym = np.dot(sym, self.orientation_matrix())
Vc = np.dot(Osym, axis)
if Vc[2] < 0:
Vc *= -1. # using the upward direction
uvw = np.array([Vc[2] - Vc[1], Vc[1] - Vc[0], Vc[0]])
uvw /= np.linalg.norm(uvw)
uvw /= max(uvw)
if (uvw[0] >= 0. and uvw[0] <= 1.0) and (uvw[1] >= 0. and uvw[1] <= 1.0) and (
uvw[2] >= 0. and uvw[2] <= 1.0):
# print('found sym for sst')
break
return uvw
def fzDihedral(rod, n):
"""check if the given Rodrigues vector is in the fundamental zone.
After book from Morawiecz.
"""
# top and bottom face at +/-tan(pi/2n)
t = np.tan(np.pi / (2 * n))
if abs(rod[2]) > t:
return False
# 2n faces distance 1 from origin
# y <= ((2+sqrt(2))*t - (1+sqrt(2))) * x + (1+sqrt(2))*(1-t)
y, x = sorted([abs(ro[0]), abs(ro[1])])
if x > 1:
return False
return {
2: True,
3: y / (1 + math.sqrt(2)) + (1 - math.sqrt(2 / 3)) * x < 1 - 1 / math.sqrt(3),
4: y + x < math.sqrt(2),
6: y / (1 + math.sqrt(2)) + (1 - 2 * math.sqrt(2) + math.sqrt(6)) * x < math.sqrt(3) - 1
}[n]
def inFZ(self, symmetry=Symmetry.cubic):
"""Check if the given Orientation lies within the fundamental zone.
For a given crystal symmetry, several rotations can describe the same
physcial crystllographic arangement. The Rodrigues fundamental zone
restrict the orientation space accordingly.
"""
r = self.rod
if symmetry == Symmetry.cubic:
inFZT23 = np.abs(r).sum() <= 1.0
# in the cubic symmetry, each component must be < 2 ** 0.5 - 1
inFZ = inFZT23 and np.abs(r).max() <= 2 ** 0.5 - 1
else:
raise (ValueError('unsupported crystal symmetry: %s' % symmetry))
return inFZ
def move_to_FZ(self, symmetry=Symmetry.cubic, verbose=False):
"""
Compute the equivalent crystal orientation in the Fundamental Zone of a given symmetry.
:param Symmetry symmetry: an instance of the `Symmetry` class
:param verbose: flag for verbose mode
:return: a new Orientation instance which lies in the fundamental zone.
"""
om = symmetry.move_rotation_to_FZ(self.orientation_matrix(), verbose=verbose)
return Orientation(om)
@staticmethod
def misorientation_MacKenzie(psi):
"""Return the fraction of the misorientations corresponding to the
given :math:`\\psi` angle in the reference solution derived By MacKenzie in
his 1958 paper :cite:`MacKenzie_1958`.
:param psi: the misorientation angle in radians.
:returns: the value in the cummulative distribution corresponding to psi.
"""
from math import sqrt, sin, cos, tan, pi, acos
psidg = 180 * psi / pi
if 0 <= psidg <= 45:
p = 2. / 15 * (1 - cos(psi))
elif 45 < psidg <= 60:
p = 2. / 15 * (3 * (sqrt(2) - 1) * sin(psi) - 2 * (1 - cos(psi)))
elif 60 < psidg <= 60.72:
p = 2. / 15 * ((3 * (sqrt(2) - 1) + 4. / sqrt(3)) * sin(psi) - 6. * (1 - cos(psi)))
elif 60.72 < psidg <= 62.8:
X = (sqrt(2) - 1) / (1 - (sqrt(2) - 1) ** 2 / tan(0.5 * psi) ** 2) ** 0.5
Y = (sqrt(2) - 1) ** 2 / ((3 - 1 / tan(0.5 * psi) ** 2) ** 0.5)
p = (2. / 15) * ((3 * (sqrt(2) - 1) + 4 / sqrt(3)) * sin(psi) - 6 * (1 - cos(psi))) \
- 8. / (5 * pi) * (
2 * (sqrt(2) - 1) * acos(X / tan(0.5 * psi)) + 1. / sqrt(3) * acos(Y / tan(0.5 * psi))) * sin(psi) \
+ 8. / (5 * pi) * (2 * acos((sqrt(2) + 1) * X / sqrt(2)) + acos((sqrt(2) + 1) * Y / sqrt(2))) * (
1 - cos(psi))
else:
p = 0.
return p
@staticmethod
def misorientation_axis_from_delta(delta):
"""Compute the misorientation axis from the misorientation matrix.
:param delta: The 3x3 misorientation matrix.
:returns: the misorientation axis (normalised vector).
"""
n = np.array([delta[1, 2] - delta[2, 1], delta[2, 0] - delta[0, 2], delta[0, 1] - delta[1, 0]])
n /= np.sqrt(
(delta[1, 2] - delta[2, 1]) ** 2 + (delta[2, 0] - delta[0, 2]) ** 2 + (delta[0, 1] - delta[1, 0]) ** 2)
return n
def misorientation_axis(self, orientation):
"""Compute the misorientation axis with another crystal orientation.
This vector is by definition common to both crystalline orientations.
:param orientation: an instance of :py:class:`~pymicro.crystal.microstructure.Orientation` class.
:returns: the misorientation axis (normalised vector).
"""
delta = np.dot(self.orientation_matrix(), orientation.orientation_matrix().T)
return Orientation.misorientation_axis_from_delta(delta)
@staticmethod
def misorientation_angle_from_delta(delta):
"""Compute the misorientation angle from the misorientation matrix.
Compute the angle assocated with this misorientation matrix :math:`\\Delta g`.
It is defined as :math:`\\omega = \\arccos(\\text{trace}(\\Delta g)/2-1)`.
To avoid float rounding error, the argument is rounded to 1. if it is within 1 and 1 plus 32 bits floating
point precison.
.. note::
This does not account for the crystal symmetries. If you want to
find the disorientation between two orientations, use the
:py:meth:`~pymicro.crystal.microstructure.Orientation.disorientation`
method.
:param delta: The 3x3 misorientation matrix.
:returns float: the misorientation angle in radians.
"""
cw = 0.5 * (delta.trace() - 1)
if cw > 1. and cw - 1. < 10 * np.finfo('float32').eps:
print('cw=%.20f, rounding to 1.' % cw)
cw = 1.
omega = np.arccos(cw)
return omega
def disorientation(self, orientation, crystal_structure=Symmetry.triclinic):
"""Compute the disorientation another crystal orientation.
Considering all the possible crystal symmetries, the disorientation
is defined as the combination of the minimum misorientation angle
and the misorientation axis lying in the fundamental zone, which
can be used to bring the two lattices into coincidence.
.. note::
Both orientations are supposed to have the same symmetry. This is not necessarily the case in multi-phase
materials.
:param orientation: an instance of :py:class:`~pymicro.crystal.microstructure.Orientation` class desribing the other crystal orientation from which to compute the angle.
:param crystal_structure: an instance of the `Symmetry` class describing the crystal symmetry, triclinic (no symmetry) by default.
:returns tuple: the misorientation angle in radians, the axis as a numpy vector (crystal coordinates), the axis as a numpy vector (sample coordinates).
"""
the_angle = np.pi
symmetries = crystal_structure.symmetry_operators()
(gA, gB) = (self.orientation_matrix(), orientation.orientation_matrix()) # nicknames
for (g1, g2) in [(gA, gB), (gB, gA)]:
for j in range(symmetries.shape[0]):
sym_j = symmetries[j]
oj = np.dot(sym_j, g1) # the crystal symmetry operator is left applied
for i in range(symmetries.shape[0]):
sym_i = symmetries[i]
oi = np.dot(sym_i, g2)
delta = np.dot(oi, oj.T)
print('delta={}'.format(delta))
mis_angle = Orientation.misorientation_angle_from_delta(delta)
print(np.degrees(mis_angle))
if mis_angle < the_angle:
# now compute the misorientation axis, should check if it lies in the fundamental zone
mis_axis = Orientation.misorientation_axis_from_delta(delta)
# here we have np.dot(oi.T, mis_axis) = np.dot(oj.T, mis_axis)
# print(mis_axis, mis_angle*180/np.pi, np.dot(oj.T, mis_axis))
the_angle = mis_angle
the_axis = mis_axis
the_axis_xyz = np.dot(oi.T, the_axis)
return (the_angle, the_axis, the_axis_xyz)
def phi1(self):
"""Convenience methode to expose the first Euler angle."""
return self.euler[0]
def Phi(self):
"""Convenience methode to expose the second Euler angle."""
return self.euler[1]
def phi2(self):
"""Convenience methode to expose the third Euler angle."""
return self.euler[2]
def compute_XG_angle(self, hkl, omega, verbose=False):
"""Compute the angle between the scattering vector :math:`\mathbf{G_{l}}`
and :math:`\mathbf{-X}` the X-ray unit vector at a given angular position :math:`\\omega`.
A given hkl plane defines the scattering vector :math:`\mathbf{G_{hkl}}` by
the miller indices in the reciprocal space. It is expressed in the
cartesian coordinate system by :math:`\mathbf{B}.\mathbf{G_{hkl}}` and in the
laboratory coordinate system accounting for the crystal orientation
by :math:`\mathbf{g}^{-1}.\mathbf{B}.\mathbf{G_{hkl}}`.
The crystal is assumed to be placed on a rotation stage around the
laboratory vertical axis. The scattering vector can finally be
written as :math:`\mathbf{G_l}=\mathbf{\\Omega}.\mathbf{g}^{-1}.\mathbf{B}.\mathbf{G_{hkl}}`.
The X-rays unit vector is :math:`\mathbf{X}=[1, 0, 0]`. So the computed angle
is :math:`\\alpha=acos(-\mathbf{X}.\mathbf{G_l}/||\mathbf{G_l}||`
The Bragg condition is fulfilled when :math:`\\alpha=\pi/2-\\theta_{Bragg}`
:param hkl: the hkl plane, an instance of :py:class:`~pymicro.crystal.lattice.HklPlane`
:param omega: the angle of rotation of the crystal around the laboratory vertical axis.
:param bool verbose: activate verbose mode (False by default).
:return float: the angle between :math:`-\mathbf{X}` and :math:`\mathbf{G_{l}}` in degrees.
"""
X = np.array([1., 0., 0.])
gt = self.orientation_matrix().transpose()
Gc = hkl.scattering_vector()
Gs = gt.dot(Gc) # in the cartesian sample CS
omegar = omega * np.pi / 180
R = np.array([[np.cos(omegar), -np.sin(omegar), 0], [np.sin(omegar), np.cos(omegar), 0], [0, 0, 1]])
Gl = R.dot(Gs)
alpha = np.arccos(np.dot(-X, Gl) / np.linalg.norm(Gl)) * 180 / np.pi
if verbose:
print('scattering vector in the crystal CS', Gc)
print('scattering vector in the sample CS', Gs)
print('scattering vector in the laboratory CS (including Omega rotation)', Gl)
print('angle (deg) between -X and G', alpha)
return alpha
@staticmethod
def solve_trig_equation(A, B, C, verbose=False):
"""Solve the trigonometric equation in the form of:
.. math::
A\cos\\theta + B\sin\\theta = C
:param float A: the A constant in the equation.
:param float B: the B constant in the equation.
:param float C: the C constant in the equation.
:return tuple: the two solutions angular values in degrees.
"""
Delta = 4 * (A ** 2 + B ** 2 - C ** 2)
if Delta < 0:
raise ValueError('Delta < 0 (%f)' % Delta)
if verbose:
print('A={0:.3f}, B={1:.3f}, C={2:.3f}, Delta={3:.1f}'.format(A, B, C, Delta))
theta_1 = 2 * np.arctan2(B - 0.5 * np.sqrt(Delta), A + C) * 180. / np.pi % 360
theta_2 = 2 * np.arctan2(B + 0.5 * np.sqrt(Delta), A + C) * 180. / np.pi % 360
return theta_1, theta_2
def dct_omega_angles(self, hkl, lambda_keV, verbose=False):
"""Compute the two omega angles which satisfy the Bragg condition.
For a given crystal orientation sitting on a vertical rotation axis,
there is exactly two :math:`\omega` positions in :math:`[0, 2\pi]` for which
a particular :math:`(hkl)` reflexion will fulfil Bragg's law.
According to the Bragg's law, a crystallographic plane of a given
grain will be in diffracting condition if:
.. math::
\sin\\theta=-[\mathbf{\Omega}.\mathbf{g}^{-1}\mathbf{G_c}]_1
with :math:`\mathbf{\Omega}` the matrix associated with the rotation
axis:
.. math::
\mathbf{\Omega}=\\begin{pmatrix}
\cos\omega & -\sin\omega & 0 \\\\
\sin\omega & \cos\omega & 0 \\\\
0 & 0 & 1 \\\\
\end{pmatrix}
This method solves the associated second order equation to return
the two corresponding omega angles.
:param hkl: The given cristallographic plane :py:class:`~pymicro.crystal.lattice.HklPlane`
:param float lambda_keV: The X-rays energy expressed in keV
:param bool verbose: Verbose mode (False by default)
:returns tuple: :math:`(\omega_1, \omega_2)` the two values of the \
rotation angle around the vertical axis (in degrees).
"""
(h, k, l) = hkl.miller_indices()
theta = hkl.bragg_angle(lambda_keV, verbose=verbose)
lambda_nm = 1.2398 / lambda_keV
gt = self.orientation_matrix().T # gt = g^{-1} in Poulsen 2004
Gc = hkl.scattering_vector()
A = np.dot(Gc, gt[0])
B = - np.dot(Gc, gt[1])
# A = h / a * gt[0, 0] + k / b * gt[0, 1] + l / c * gt[0, 2]
# B = -h / a * gt[1, 0] - k / b * gt[1, 1] - l / c * gt[1, 2]
C = -2 * np.sin(theta) ** 2 / lambda_nm # the minus sign comes from the main equation
omega_1, omega_2 = Orientation.solve_trig_equation(A, B, C, verbose=verbose)
if verbose:
print('the two omega values in degrees fulfilling the Bragg condition are (%.1f, %.1f)' % (omega_1, omega_2))
return omega_1, omega_2
def rotating_crystal(self, hkl, lambda_keV, omega_step=0.5, display=True, verbose=False):
from pymicro.xray.xray_utils import lambda_keV_to_nm
lambda_nm = lambda_keV_to_nm(lambda_keV)
X = np.array([1., 0., 0.]) / lambda_nm
print('magnitude of X', np.linalg.norm(X))
gt = self.orientation_matrix().transpose()
(h, k, l) = hkl.miller_indices()
theta = hkl.bragg_angle(lambda_keV) * 180. / np.pi
print('bragg angle for %d%d%d reflection is %.1f' % (h, k, l, theta))
Gc = hkl.scattering_vector()
Gs = gt.dot(Gc)
alphas = []
twothetas = []
magnitude_K = []
omegas = np.linspace(0.0, 360.0, num=360.0 / omega_step, endpoint=False)
for omega in omegas:
print('\n** COMPUTING AT OMEGA=%03.1f deg' % omega)
# prepare rotation matrix
omegar = omega * np.pi / 180
R = np.array([[np.cos(omegar), -np.sin(omegar), 0], [np.sin(omegar), np.cos(omegar), 0], [0, 0, 1]])
# R = R.dot(Rlt).dot(Rut) # with tilts
Gl = R.dot(Gs)
print('scattering vector in laboratory CS', Gl)
n = R.dot(gt.dot(hkl.normal()))
print('plane normal:', hkl.normal())
print(R)
print('rotated plane normal:', n, ' with a norm of', np.linalg.norm(n))
G = n / hkl.interplanar_spacing() # here G == N
print('G vector:', G, ' with a norm of', np.linalg.norm(G))
K = X + G
print('X + G vector', K)
magnitude_K.append(np.linalg.norm(K))
print('magnitude of K', np.linalg.norm(K))
alpha = np.arccos(np.dot(-X, G) / (np.linalg.norm(-X) * np.linalg.norm(G))) * 180 / np.pi
print('angle between -X and G', alpha)
alphas.append(alpha)
twotheta = np.arccos(np.dot(K, X) / (np.linalg.norm(K) * np.linalg.norm(X))) * 180 / np.pi
print('angle (deg) between K and X', twotheta)
twothetas.append(twotheta)
print('min alpha angle is ', min(alphas))
# compute omega_1 and omega_2 to verify graphically
(w1, w2) = self.dct_omega_angles(hkl, lambda_keV, verbose=False)
# gather the results in a single figure
fig = plt.figure(figsize=(12, 10))
fig.add_subplot(311)
plt.title('Looking for (%d%d%d) Bragg reflexions' % (h, k, l))
plt.plot(omegas, alphas, 'k-')
plt.xlim(0, 360)
plt.ylim(0, 180)
plt.xticks(np.arange(0, 390, 30))
# add bragg condition
plt.axhline(90 - theta, xmin=0, xmax=360, linewidth=2)
plt.annotate('$\pi/2-\\theta_{Bragg}$', xycoords='data', xy=(360, 90 - theta), horizontalalignment='left',
verticalalignment='center', fontsize=16)
# add omega solutions
plt.axvline(w1 + 180, ymin=0, ymax=180, linewidth=2, linestyle='dashed', color='gray')
plt.axvline(w2 + 180, ymin=0, ymax=180, linewidth=2, linestyle='dashed', color='gray')
plt.annotate('$\\omega_1$', xycoords='data', xy=(w1 + 180, 0), horizontalalignment='center',
verticalalignment='bottom', fontsize=16)
plt.annotate('$\\omega_2$', xycoords='data', xy=(w2 + 180, 0), horizontalalignment='center',
verticalalignment='bottom', fontsize=16)
plt.ylabel(r'Angle between $-X$ and $\mathbf{G}$')
fig.add_subplot(312)
plt.plot(omegas, twothetas, 'k-')
plt.xlim(0, 360)
# plt.ylim(0,180)
plt.xticks(np.arange(0, 390, 30))
plt.axhline(2 * theta, xmin=0, xmax=360, linewidth=2)
plt.annotate('$2\\theta_{Bragg}$', xycoords='data', xy=(360, 2 * theta), horizontalalignment='left',
verticalalignment='center', fontsize=16)
plt.axvline(w1 + 180, linewidth=2, linestyle='dashed', color='gray')
plt.axvline(w2 + 180, linewidth=2, linestyle='dashed', color='gray')
plt.ylabel('Angle between $X$ and $K$')
fig.add_subplot(313)
plt.plot(omegas, magnitude_K, 'k-')
plt.xlim(0, 360)
plt.axhline(np.linalg.norm(X), xmin=0, xmax=360, linewidth=2)
plt.annotate('$1/\\lambda$', xycoords='data', xy=(360, 1 / lambda_nm), horizontalalignment='left',
verticalalignment='center', fontsize=16)
plt.axvline(w1 + 180, linewidth=2, linestyle='dashed', color='gray')
plt.axvline(w2 + 180, linewidth=2, linestyle='dashed', color='gray')
plt.xlabel(r'Angle of rotation $\omega$')
plt.ylabel(r'Magnitude of $X+G$ (nm$^{-1}$)')
plt.subplots_adjust(top=0.925, bottom=0.05, left=0.1, right=0.9)
if display:
plt.show()
else:
plt.savefig('rotating_crystal_plot_%d%d%d.pdf' % (h, k, l))
def topotomo_tilts(self, hkl, verbose=False):
"""Compute the tilts for topotomography alignment.
:param hkl: the hkl plane, an instance of :py:class:`~pymicro.crystal.lattice.HklPlane`
:param bool verbose: activate verbose mode (False by default).
:returns tuple: (ut, lt) the two values of tilts to apply (in radians).
"""
gt = self.orientation_matrix().transpose()
Gc = hkl.scattering_vector()
Gs = gt.dot(Gc) # in the cartesian sample CS
# find topotomo tilts
ut = np.arctan(-Gs[0] / Gs[2])
lt = np.arctan(Gs[1] / (-Gs[0] * np.sin(ut) + Gs[2] * np.cos(ut)))
if verbose:
print('up tilt (samry) should be %.3f' % (ut * 180 / np.pi))
print('low tilt (samrx) should be %.3f' % (lt * 180 / np.pi))
return (ut, lt)
def to_xml(self, doc):
"""
Returns an XML representation of the Orientation instance.
"""
print('deprecated as we are moving to hdf5 format')
orientation = doc.createElement('Orientation')
orientation_phi1 = doc.createElement('phi1')
orientation_phi1_text = doc.createTextNode('%f' % self.phi1())
orientation_phi1.appendChild(orientation_phi1_text)
orientation.appendChild(orientation_phi1)
orientation_Phi = doc.createElement('Phi')
orientation_Phi_text = doc.createTextNode('%f' % self.Phi())
orientation_Phi.appendChild(orientation_Phi_text)
orientation.appendChild(orientation_Phi)
orientation_phi2 = doc.createElement('phi2')
orientation_phi2_text = doc.createTextNode('%f' % self.phi2())
orientation_phi2.appendChild(orientation_phi2_text)
orientation.appendChild(orientation_phi2)
return orientation
@staticmethod
def from_xml(orientation_node):
orientation_phi1 = orientation_node.childNodes[0]
orientation_Phi = orientation_node.childNodes[1]
orientation_phi2 = orientation_node.childNodes[2]
phi1 = float(orientation_phi1.childNodes[0].nodeValue)
Phi = float(orientation_Phi.childNodes[0].nodeValue)
phi2 = float(orientation_phi2.childNodes[0].nodeValue)
orientation = Orientation.from_euler(np.array([phi1, Phi, phi2]))
return orientation
@staticmethod
def from_euler(euler, convention='Bunge'):
"""Rotation matrix from Euler angles.
This is the classical method to obtain an orientation matrix by 3 successive rotations. The result depends on
the convention used (how the successive rotation axes are chosen). In the Bunge convention, the first rotation
is around Z, the second around the new X and the third one around the new Z. In the Roe convention, the second
one is around Y.
"""
if convention == 'Roe':
(phi1, phi, phi2) = (euler[0] + 90, euler[1], euler[2] - 90)
else:
(phi1, phi, phi2) = euler
g = Orientation.Euler2OrientationMatrix((phi1, phi, phi2))
o = Orientation(g)
return o
@staticmethod
def from_rodrigues(rod):
g = Orientation.Rodrigues2OrientationMatrix(rod)
o = Orientation(g)
return o
@staticmethod
def from_Quaternion(q):
g = Orientation.Quaternion2OrientationMatrix(q)
o = Orientation(g)
return o
@staticmethod
def Zrot2OrientationMatrix(x1=None, x2=None, x3=None):
"""Compute the orientation matrix from the rotated coordinates given in the
.inp file for Zebulon's computations
Need at least two vectors to compute cross product
Still need some tests to validate this function
"""
if (x1 is None and x2 is None):
raise NameError('Need at least two vectors to compute the matrix')
elif (x1 == None and x3 == None):
raise NameError('Need at least two vectors to compute the matrix')
elif (x3 == None and x2 == None):
raise NameError('Need at least two vectors to compute the matrix')
if x1 == None:
x1 = np.cross(x2, x3)
elif x2 == None:
x2 = np.cross(x3, x1)
elif x3 == None:
x3 = np.cross(x1, x2)
x1 = x1 / np.linalg.norm(x1)
x2 = x2 / np.linalg.norm(x2)
x3 = x3 / np.linalg.norm(x3)
g = np.array([x1, x2, x3]).transpose()
return g
@staticmethod
def OrientationMatrix2EulerSF(g):
"""
Compute the Euler angles (in degrees) from the orientation matrix
in a similar way as done in Mandel_crystal.c
"""
tol = 0.1
r = np.zeros(9, dtype=np.float64) # double precision here
# Z-set order for tensor is 11 22 33 12 23 13 21 32 31
r[0] = g[0, 0]
r[1] = g[1, 1]
r[2] = g[2, 2]
r[3] = g[0, 1]
r[4] = g[1, 2]
r[5] = g[0, 2]
r[6] = g[1, 0]
r[7] = g[2, 1]
r[8] = g[2, 0]
phi = np.arccos(r[2])
if phi == 0.:
phi2 = 0.
phi1 = np.arcsin(r[6])
if abs(np.cos(phi1) - r[0]) > tol:
phi1 = np.pi - phi1
else:
x2 = r[5] / np.sin(phi)
x1 = r[8] / np.sin(phi);
if x1 > 1.:
x1 = 1.
if x2 > 1.:
x2 = 1.
if x1 < -1.:
x1 = -1.
if x2 < -1.:
x2 = -1.
phi2 = np.arcsin(x2)
phi1 = np.arcsin(x1)
if abs(np.cos(phi2) * np.sin(phi) - r[7]) > tol:
phi2 = np.pi - phi2
if abs(np.cos(phi1) * np.sin(phi) + r[4]) > tol:
phi1 = np.pi - phi1
return np.degrees(np.array([phi1, phi, phi2]))
@staticmethod
def OrientationMatrix2Euler(g):
"""
Compute the Euler angles from the orientation matrix.
This conversion follows the paper of Rowenhorst et al. :cite:`Rowenhorst2015`.
In particular when :math:`g_{33} = 1` within the machine precision,
there is no way to determine the values of :math:`\phi_1` and :math:`\phi_2`
(only their sum is defined). The convention is to attribute
the entire angle to :math:`\phi_1` and set :math:`\phi_2` to zero.
:param g: The 3x3 orientation matrix
:return: The 3 euler angles in degrees.
"""
eps = np.finfo('float').eps
(phi1, Phi, phi2) = (0.0, 0.0, 0.0)
# treat special case where g[2, 2] = 1
if np.abs(g[2, 2]) >= 1 - eps:
if g[2, 2] > 0.0:
phi1 = np.arctan2(g[0][1], g[0][0])
else:
phi1 = -np.arctan2(-g[0][1], g[0][0])
Phi = np.pi
else:
Phi = np.arccos(g[2][2])
zeta = 1.0 / np.sqrt(1.0 - g[2][2] ** 2)
phi1 = np.arctan2(g[2][0] * zeta, -g[2][1] * zeta)
phi2 = np.arctan2(g[0][2] * zeta, g[1][2] * zeta)
# ensure angles are in the range [0, 2*pi]
if phi1 < 0.0:
phi1 += 2 * np.pi
if Phi < 0.0:
Phi += 2 * np.pi
if phi2 < 0.0:
phi2 += 2 * np.pi
return np.degrees([phi1, Phi, phi2])
@staticmethod
def OrientationMatrix2Rodrigues(g):
"""
Compute the rodrigues vector from the orientation matrix.
:param g: The 3x3 orientation matrix representing the rotation.
:returns: The Rodrigues vector as a 3 components array.
"""
t = g.trace() + 1
if np.abs(t) < np.finfo(g.dtype).eps:
print('warning, returning [0., 0., 0.], consider using axis, angle representation instead')
return np.zeros(3)
else:
r1 = (g[1, 2] - g[2, 1]) / t
r2 = (g[2, 0] - g[0, 2]) / t
r3 = (g[0, 1] - g[1, 0]) / t
return np.array([r1, r2, r3])
@staticmethod
def OrientationMatrix2Quaternion(g, P=1):
q0 = 0.5 * np.sqrt(1 + g[0, 0] + g[1, 1] + g[2, 2])
q1 = P * 0.5 * np.sqrt(1 + g[0, 0] - g[1, 1] - g[2, 2])
q2 = P * 0.5 * np.sqrt(1 - g[0, 0] + g[1, 1] - g[2, 2])
q3 = P * 0.5 * np.sqrt(1 - g[0, 0] - g[1, 1] + g[2, 2])
if g[2, 1] < g[1, 2]:
q1 = q1 * -1
elif g[0, 2] < g[2, 0]:
q2 = q2 * -1
elif g[1, 0] < g[0, 1]:
q3 = q3 * -1
q = Quaternion(np.array([q0, q1, q2, q3]), convention=P)
return q.quat
@staticmethod
def Rodrigues2OrientationMatrix(rod):
"""
Compute the orientation matrix from the Rodrigues vector.
:param rod: The Rodrigues vector as a 3 components array.
:returns: The 3x3 orientation matrix representing the rotation.
"""
r = np.linalg.norm(rod)
I = np.diagflat(np.ones(3))
if r < np.finfo(r.dtype).eps:
return I
else:
theta = 2 * np.arctan(r)
n = rod / r
omega = np.array([[0.0, n[2], -n[1]], [-n[2], 0.0, n[0]], [n[1], -n[0], 0.0]])
return I + np.sin(theta) * omega + (1 - np.cos(theta)) * omega.dot(omega)
@staticmethod
def Rodrigues2Axis(rod):
"""
Compute the axis/angle representation from the Rodrigues vector.
:param rod: The Rodrigues vector as a 3 components array.
:returns: A tuple in the (axis, angle) form.
"""
r = np.linalg.norm(rod)
axis = rod / r
angle = 2 * np.arctan(r)
return axis, angle
@staticmethod
def Axis2OrientationMatrix(axis, angle):
"""
Compute the (passive) orientation matrix associated the rotation defined by the given (axis, angle) pair.
:param axis: the rotation axis.
:param angle: the rotation angle (degrees).
:returns: the 3x3 orientation matrix.
"""
omega = np.radians(angle)
c = np.cos(omega)
s = np.sin(omega)
g = np.array([[c + (1 - c) * axis[0] ** 2, (1 - c) * axis[0] * axis[1] + s * axis[2],
(1 - c) * axis[0] * axis[2] - s * axis[1]],
[(1 - c) * axis[0] * axis[1] - s * axis[2], c + (1 - c) * axis[1] ** 2,
(1 - c) * axis[1] * axis[2] + s * axis[0]],
[(1 - c) * axis[0] * axis[2] + s * axis[1], (1 - c) * axis[1] * axis[2] - s * axis[0],
c + (1 - c) * axis[2] ** 2]])
return g
@staticmethod
def Euler2Axis(euler):
"""
Compute the (axis, angle) representation associated to this (passive) rotation expressed by the Euler angles.
:param euler: 3 euler angles (in degrees)
:returns: a tuple containing the axis (a vector) and the angle (in radians).
"""
(phi1, Phi, phi2) = np.radians(euler)
t = np.tan(0.5 * Phi)
s = 0.5 * (phi1 + phi2)
d = 0.5 * (phi1 - phi2)
tau = np.sqrt(t ** 2 + np.sin(s) ** 2)
alpha = 2 * np.arctan2(tau, np.cos(s))
if alpha > np.pi:
axis = np.array([-t / tau * np.cos(d), -t / tau * np.sin(d), -1 / tau * np.sin(s)])
angle = 2 * np.pi - alpha
else:
axis = np.array([t / tau * np.cos(d), t / tau * np.sin(d), 1 / tau * np.sin(s)])
angle = alpha
return axis, angle
@staticmethod
def Euler2Quaternion(euler, P=1):
"""
Compute the quaternion from the 3 euler angles (in degrees).
@param tuple euler: the 3 euler angles in degrees.
@param int P: +1 to compute an active quaternion (default), -1 for a passive quaternion.
@return: a `Quaternion` instance representing the rotation.
"""
(phi1, Phi, phi2) = np.radians(euler)
q0 = np.cos(0.5 * (phi1 + phi2)) * np.cos(0.5 * Phi)
q1 = np.cos(0.5 * (phi1 - phi2)) * np.sin(0.5 * Phi)
q2 = np.sin(0.5 * (phi1 - phi2)) * np.sin(0.5 * Phi)
q3 = np.sin(0.5 * (phi1 + phi2)) * np.cos(0.5 * Phi)
q = Quaternion(np.array([q0, -P * q1, -P * q2, -P * q3]), convention=P)
return q
@staticmethod
def Euler2Rodrigues(euler):
"""
Compute the rodrigues vector from the 3 euler angles (in degrees)
"""
(phi1, Phi, phi2) = np.radians(euler)
a = 0.5 * (phi1 - phi2)
b = 0.5 * (phi1 + phi2)
r1 = np.tan(0.5 * Phi) * np.cos(a) / np.cos(b)
r2 = np.tan(0.5 * Phi) * np.sin(a) / np.cos(b)
r3 = np.tan(b)
return np.array([r1, r2, r3])
@staticmethod
def Euler2OrientationMatrix(euler):
"""
Compute the orientation matrix :math:`\mathbf{g}` associated with the 3 Euler angles
:math:`(\phi_1, \Phi, \phi_2)`. The matrix is calculated via (see the `euler_angles` recipe in the cookbook
for a detailed example):
.. math::
\mathbf{g}=\\begin{pmatrix}
\cos\phi_1\cos\phi_2 - \sin\phi_1\sin\phi_2\cos\Phi & \sin\phi_1\cos\phi_2 + \cos\phi_1\sin\phi_2\cos\Phi & \sin\phi_2\sin\Phi \\\\
-\cos\phi_1\sin\phi_2 - \sin\phi_1\cos\phi_2\cos\Phi & -\sin\phi_1\sin\phi_2 + \cos\phi_1\cos\phi_2\cos\Phi & \cos\phi_2\sin\Phi \\\\
\sin\phi_1\sin\Phi & -\cos\phi_1\sin\Phi & \cos\Phi \\\\
\end{pmatrix}
:param euler: The triplet of the Euler angles (in degrees).
:returns g: The 3x3 orientation matrix.
"""
(rphi1, rPhi, rphi2) = np.radians(euler)
c1 = np.cos(rphi1)
s1 = np.sin(rphi1)
c = np.cos(rPhi)
s = np.sin(rPhi)
c2 = np.cos(rphi2)
s2 = np.sin(rphi2)
# rotation matrix g
g11 = c1 * c2 - s1 * s2 * c
g12 = s1 * c2 + c1 * s2 * c
g13 = s2 * s
g21 = -c1 * s2 - s1 * c2 * c
g22 = -s1 * s2 + c1 * c2 * c
g23 = c2 * s
g31 = s1 * s
g32 = -c1 * s
g33 = c
g = np.array([[g11, g12, g13], [g21, g22, g23], [g31, g32, g33]])
return g
@staticmethod
def Quaternion2Euler(q):
"""
Compute Euler angles from a Quaternion
:param q: Quaternion
:return: Euler angles (in degrees, Bunge convention)
"""
P = q.convention
(q0, q1, q2, q3) = q.quat
q03 = q0 ** 2 + q3 ** 2
q12 = q1 ** 2 + q2 ** 2
chi = np.sqrt(q03 * q12)
if chi == 0.:
if q12 == 0.:
phi_1 = atan2(-2 * P * q0 * q3, q0 ** 2 - q3 ** 2)
Phi = 0.
else:
phi_1 = atan2(-2 * q1 * q2, q1 ** 2 - q2 ** 2)
Phi = pi
phi_2 = 0.
else:
phi_1 = atan2((q1 * q3 - P * q0 * q2) / chi, (-P * q0 * q1 - q2 * q3) / chi)
Phi = atan2(2 * chi, q03 - q12)
phi_2 = atan2((P * q0 * q2 + q1 * q3) / chi, (q2 * q3 - P * q0 * q1) / chi)
return np.degrees([phi_1, Phi, phi_2])
@staticmethod
def Quaternion2OrientationMatrix(q):
P = q.convention
(q0, q1, q2, q3) = q.quat
qbar = q0 ** 2 - q1 ** 2 - q2 ** 2 - q3 ** 2
g = np.array([[qbar + 2 * q1 ** 2, 2 * (q1 * q2 - P * q0 * q3), 2 * (q1 * q3 + P * q0 * q2)],
[2 * (q1 * q2 + P * q0 * q3), qbar + 2 * q2 ** 2, 2 * (q2 * q3 - P * q0 * q1)],
[2 * (q1 * q3 - P * q0 * q2), 2 * (q2 * q3 + P * q0 * q1), qbar + 2 * q3 ** 2]])
return g
@staticmethod
def read_euler_txt(txt_path):
"""
Read a set of euler angles from an ascii file.
:param str txt_path: path to the text file containing the euler angles.
:returns dict: a dictionary with the line number and the corresponding orientation.
"""
return Orientation.read_orientations(txt_path)
@staticmethod
def read_orientations(txt_path, data_type='euler', **kwargs):
"""
Read a set of grain orientations from a text file.
The text file must be organised in 3 columns (the other are ignored), corresponding to either the three euler
angles or the three rodrigues veotor components, depending on the data_type). Internally the ascii file is read
by the genfromtxt function of numpy, additional keyworks (such as the delimiter) can be passed to via the
kwargs dictionnary.
:param str txt_path: path to the text file containing the orientations.
:param str data_type: 'euler' (default) or 'rodrigues'.
:param dict kwargs: additional parameters passed to genfromtxt.
:returns dict: a dictionary with the line number and the corresponding orientation.
"""
data = np.genfromtxt(txt_path, **kwargs)
size = len(data)
orientations = []
for i in range(size):
angles = np.array([float(data[i, 0]), float(data[i, 1]), float(data[i, 2])])
if data_type == 'euler':
orientations.append([i + 1, Orientation.from_euler(angles)])
elif data_type == 'rodrigues':
orientations.append([i + 1, Orientation.from_rodrigues(angles)])
return dict(orientations)
@staticmethod
def read_euler_from_zset_inp(inp_path):
"""Read a set of grain orientations from a z-set input file.
In z-set input files, the orientation data may be specified
either using the rotation of two vector, euler angles or
rodrigues components directly. For instance the following
lines are extracted from a polycrystalline calculation file
using the rotation keyword:
::
**elset elset1 *file au.mat *integration theta_method_a 1.0 1.e-9 150 *rotation x1 0.438886 -1.028805 0.197933 x3 1.038339 0.893172 1.003888
**elset elset2 *file au.mat *integration theta_method_a 1.0 1.e-9 150 *rotation x1 0.178825 -0.716937 1.043300 x3 0.954345 0.879145 1.153101
**elset elset3 *file au.mat *integration theta_method_a 1.0 1.e-9 150 *rotation x1 -0.540479 -0.827319 1.534062 x3 1.261700 1.284318 1.004174
**elset elset4 *file au.mat *integration theta_method_a 1.0 1.e-9 150 *rotation x1 -0.941278 0.700996 0.034552 x3 1.000816 1.006824 0.885212
**elset elset5 *file au.mat *integration theta_method_a 1.0 1.e-9 150 *rotation x1 -2.383786 0.479058 -0.488336 x3 0.899545 0.806075 0.984268
:param str inp_path: the path to the ascii file to read.
:returns dict: a dictionary of the orientations associated with the elset names.
"""
inp = open(inp_path)
lines = inp.readlines()
for i, line in enumerate(lines):
if line.lstrip().startswith('***material'):
break
euler_lines = []
for j, line in enumerate(lines[i + 1:]):
# read until next *** block
if line.lstrip().startswith('***'):
break
if (not line.lstrip().startswith('%') and line.find('**elset') >= 0):
euler_lines.append(line)
euler = []
for l in euler_lines:
tokens = l.split()
elset = tokens[tokens.index('**elset') + 1]
irot = tokens.index('*rotation')
if tokens[irot + 1] == 'x1':
x1 = np.empty(3, dtype=float)
x1[0] = float(tokens[irot + 2])
x1[1] = float(tokens[irot + 3])
x1[2] = float(tokens[irot + 4])
x3 = np.empty(3, dtype=float)
x3[0] = float(tokens[irot + 6])
x3[1] = float(tokens[irot + 7])
x3[2] = float(tokens[irot + 8])
euler.append([elset, Orientation.Zrot2OrientationMatrix(x1=x1, x3=x3)])
else: # euler angles
phi1 = tokens[irot + 1]
Phi = tokens[irot + 2]
phi2 = tokens[irot + 3]
angles = np.array([float(phi1), float(Phi), float(phi2)])
euler.append([elset, Orientation.from_euler(angles)])
return dict(euler)
def slip_system_orientation_tensor(self, s):
"""Compute the orientation strain tensor m^s for this :py:class:`~pymicro.crystal.microstructure.Orientation`
and the given slip system.
:param s: an instance of :py:class:`~pymicro.crystal.lattice.SlipSystem`
.. math::
M^s_{ij} = \left(l^s_i.n^s_j)
"""
gt = self.orientation_matrix().transpose()
plane = s.get_slip_plane()
n_rot = np.dot(gt, plane.normal())
slip = s.get_slip_direction()
l_rot = np.dot(gt, slip.direction())
return np.outer(l_rot, n_rot)
def slip_system_orientation_strain_tensor(self, s):
"""Compute the orientation strain tensor m^s for this :py:class:`~pymicro.crystal.microstructure.Orientation`
and the given slip system.
:param s: an instance of :py:class:`~pymicro.crystal.lattice.SlipSystem`
.. math::
m^s_{ij} = \\frac{1}{2}\left(l^s_i.n^s_j + l^s_j.n^s_i)
"""
gt = self.orientation_matrix().transpose()
plane = s.get_slip_plane()
n_rot = np.dot(gt, plane.normal())
slip = s.get_slip_direction()
l_rot = np.dot(gt, slip.direction())
m = 0.5 * (np.outer(l_rot, n_rot) + np.outer(n_rot, l_rot))
return m
def slip_system_orientation_rotation_tensor(self, s):
"""Compute the orientation rotation tensor q^s for this :py:class:`~pymicro.crystal.microstructure.Orientation`
and the given slip system.
:param s: an instance of :py:class:`~pymicro.crystal.lattice.SlipSystem`
.. math::
q^s_{ij} = \\frac{1}{2}\left(l^s_i.n^s_j - l^s_j.n^s_i)
"""
gt = self.orientation_matrix().transpose()
plane = s.get_slip_plane()
n_rot = np.dot(gt, plane.normal())
slip = s.get_slip_direction()
l_rot = np.dot(gt, slip.direction())
q = 0.5 * (np.outer(l_rot, n_rot) - np.outer(n_rot, l_rot))
return q
def schmid_factor(self, slip_system, load_direction=[0., 0., 1]):
"""Compute the Schmid factor for this crystal orientation and the
given slip system.
:param slip_system: a slip system instance.
:param load_direction: a unit vector describing the loading direction (default: vertical axis [0, 0, 1]).
:returns float: a number between 0 ad 0.5.
"""
plane = slip_system.get_slip_plane()
gt = self.orientation_matrix().transpose()
n_rot = np.dot(gt, plane.normal()) # plane.normal() is a unit vector
slip = slip_system.get_slip_direction().direction()
slip_rot = np.dot(gt, slip)
SF = np.abs(np.dot(n_rot, load_direction) * np.dot(slip_rot, load_direction))
return SF
def compute_all_schmid_factors(self, slip_systems, load_direction=[0., 0., 1], verbose=False):
"""Compute all Schmid factors for this crystal orientation and the
given list of slip systems.
:param slip_systems: a list of the slip system from which to compute the Schmid factor values.
:param load_direction: a unit vector describing the loading direction (default: vertical axis [0, 0, 1]).
:param bool verbose: activate verbose mode.
:returns list: a list of the schmid factors.
"""
SF_list = []
for ss in slip_systems:
sf = self.schmid_factor(ss, load_direction)
if verbose:
print('Slip system: %s, Schmid factor is %.3f' % (ss, sf))
SF_list.append(sf)
return SF_list
class Grain:
"""
Class defining a crystallographic grain.
A grain has its own crystallographic orientation.
An optional id for the grain may be specified.
The position field is the center of mass of the grain in world coordinates.
The volume of the grain is expressed in pixel/voxel unit.
"""
def __init__(self, grain_id, grain_orientation):
self.id = grain_id
self.orientation = grain_orientation
self.position = np.array([0., 0., 0.])
self.volume = 0 # warning not implemented
self.vtkmesh = None
def __repr__(self):
"""Provide a string representation of the class."""
s = '%s\n * id = %d\n' % (self.__class__.__name__, self.id)
s += ' * %s\n' % (self.orientation)
s += ' * position %s\n' % np.array_str(self.position)
s += ' * has vtk mesh ? %s\n' % (self.vtkmesh != None)
return s
def schmid_factor(self, slip_system, load_direction=[0., 0., 1]):
"""Compute the Schmid factor of this grain for the given slip system.
**Parameters**:
*slip_system*: a slip system instance.
*load_direction*: a unit vector describing the loading direction.
**Returns**
The Schmid factor of this grain for the given slip system.
"""
plane = slip_system.get_slip_plane()
gt = self.orientation_matrix().transpose()
n_rot = np.dot(gt, plane.normal()) # plane.normal() is a unit vector
slip = slip_system.get_slip_direction().direction()
slip_rot = np.dot(gt, slip)
SF = np.abs(np.dot(n_rot, load_direction) * np.dot(slip_rot, load_direction))
return self.orientation.schmid_factor(slip_system, load_direction)
def SetVtkMesh(self, mesh):
"""Set the VTK mesh of this grain.
**Parameters:**
*mesh* The grain mesh in VTK format (typically vtkunstructuredgrid)
"""
self.vtkmesh = mesh
def add_vtk_mesh(self, array, contour=True, verbose=False):
"""Add a mesh to this grain.
This method process a labeled array to extract the geometry of the grain. The grain shape is defined by
the pixels with a value of the grain id. A vtkUniformGrid object is created and thresholded or contoured
depending on the value of the flag `contour`.
The resulting mesh is returned, centered on the center of mass of the grain.
:param ndarray array: a numpy array from which to extract the grain shape.
:param bool contour: a flag to use contour mode for the shape.
:param bool verbose: activate verbose mode.
"""
label = self.id # we use the grain id here...
# create vtk structure
from scipy import ndimage
from vtk.util import numpy_support
grain_size = np.shape(array)
array_bin = (array == label).astype(np.uint8)
local_com = ndimage.measurements.center_of_mass(array_bin, array)
vtk_data_array = numpy_support.numpy_to_vtk(np.ravel(array_bin, order='F'), deep=1)
grid = vtk.vtkUniformGrid()
grid.SetOrigin(-local_com[0], -local_com[1], -local_com[2])
grid.SetSpacing(1, 1, 1)
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
grid.SetScalarType(vtk.VTK_UNSIGNED_CHAR, vtk.vtkInformation())
else:
grid.SetScalarType(vtk.VTK_UNSIGNED_CHAR)
if contour:
grid.SetExtent(0, grain_size[0] - 1, 0, grain_size[1] - 1, 0, grain_size[2] - 1)
grid.GetPointData().SetScalars(vtk_data_array)
# contouring selected grain
contour = vtk.vtkContourFilter()
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
contour.SetInputData(grid)
else:
contour.SetInput(grid)
contour.SetValue(0, 0.5)
contour.Update()
if verbose:
print(contour.GetOutput())
self.SetVtkMesh(contour.GetOutput())
else:
grid.SetExtent(0, grain_size[0], 0, grain_size[1], 0, grain_size[2])
grid.GetCellData().SetScalars(vtk_data_array)
# threshold selected grain
thresh = vtk.vtkThreshold()
thresh.ThresholdBetween(0.5, 1.5)
# thresh.ThresholdBetween(label-0.5, label+0.5)
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
thresh.SetInputData(grid)
else:
thresh.SetInput(grid)
thresh.Update()
if verbose:
print('thresholding label: %d' % label)
print(thresh.GetOutput())
self.SetVtkMesh(thresh.GetOutput())
def to_xml(self, doc, file_name=None):
"""
Returns an XML representation of the Grain instance.
"""
grain = doc.createElement('Grain')
grain_id = doc.createElement('Id')
grain_id_text = doc.createTextNode('%s' % self.id)
grain_id.appendChild(grain_id_text)
grain.appendChild(grain_id)
grain.appendChild(self.orientation.to_xml(doc))
grain_position = doc.createElement('Position')
grain_position_x = doc.createElement('X')
grain_position.appendChild(grain_position_x)
grain_position_x_text = doc.createTextNode('%f' % self.position[0])
grain_position_x.appendChild(grain_position_x_text)
grain_position_y = doc.createElement('Y')
grain_position.appendChild(grain_position_y)
grain_position_y_text = doc.createTextNode('%f' % self.position[1])
grain_position_y.appendChild(grain_position_y_text)
grain_position_z = doc.createElement('Z')
grain_position.appendChild(grain_position_z)
grain_position_z_text = doc.createTextNode('%f' % self.position[2])
grain_position_z.appendChild(grain_position_z_text)
grain.appendChild(grain_position)
grain_mesh = doc.createElement('Mesh')
if not file_name:
file_name = self.vtk_file_name()
grain_mesh_text = doc.createTextNode('%s' % file_name)
grain_mesh.appendChild(grain_mesh_text)
grain.appendChild(grain_mesh)
return grain
@staticmethod
def from_xml(grain_node, verbose=False):
grain_id = grain_node.childNodes[0]
grain_orientation = grain_node.childNodes[1]
orientation = Orientation.from_xml(grain_orientation)
id = int(grain_id.childNodes[0].nodeValue)
grain = Grain(id, orientation)
grain_position = grain_node.childNodes[2]
xg = float(grain_position.childNodes[0].childNodes[0].nodeValue)
yg = float(grain_position.childNodes[1].childNodes[0].nodeValue)
zg = float(grain_position.childNodes[2].childNodes[0].nodeValue)
grain.position = np.array([xg, yg, zg])
grain_mesh = grain_node.childNodes[3]
grain_mesh_file = grain_mesh.childNodes[0].nodeValue
if verbose:
print(grain_mesh_file)
grain.load_vtk_repr(grain_mesh_file, verbose)
return grain
def vtk_file_name(self):
return 'grain_%d.vtu' % self.id
def save_vtk_repr(self, file_name=None):
import vtk
if not file_name:
file_name = self.vtk_file_name()
print('writting ' + file_name)
writer = vtk.vtkXMLUnstructuredGridWriter()
writer.SetFileName(file_name)
writer.SetInput(self.vtkmesh)
writer.Write()
def load_vtk_repr(self, file_name, verbose=False):
import vtk
if verbose:
print('reading ' + file_name)
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName(file_name)
reader.Update()
self.vtkmesh = reader.GetOutput()
def orientation_matrix(self):
"""Returns the grain orientation matrix."""
return self.orientation.orientation_matrix()
def dct_omega_angles(self, hkl, lambda_keV, verbose=False):
"""Compute the two omega angles which satisfy the Bragg condition.
For a grain with a given crystal orientation sitting on a vertical
rotation axis, there is exactly two omega positions in [0, 2pi] for
which a particular hkl reflexion will fulfil Bragg's law.
See :py:func:`~pymicro.crystal.microstructure.Orientation.dct_omega_angles`
of the :py:class:`~pymicro.crystal.microstructure.Orientation` class.
:param hkl: The given cristallographic :py:class:`~pymicro.crystal.lattice.HklPlane`
:param float lambda_keV: The X-rays energy expressed in keV
:param bool verbose: Verbose mode (False by default)
:returns tuple: (w1, w2) the two values of the omega angle.
"""
return self.orientation.dct_omega_angles(hkl, lambda_keV, verbose)
class Microstructure:
"""
Class used to manipulate a full microstructure.
It is typically defined as a list of grains objects.
"""
def __init__(self, name='empty'):
self.name = name
self.grains = []
self.vtkmesh = None
@staticmethod
def random_texture(n=100):
"""Generate a random texture microstructure.
**parameters:**
*n* The number of grain orientations in the microstructure.
"""
from random import random
from math import acos
m = Microstructure(name='random_texture')
for i in range(n):
phi1 = random() * 360.
Phi = 180. * acos(2 * random() - 1) / np.pi
phi2 = random() * 360.
m.grains.append(Grain(i + 1, Orientation.from_euler([phi1, Phi, phi2])))
return m
@staticmethod
def rand_cmap(N=4096, first_is_black=False):
"""Creates a random color map.
The first color can be enforced to black and usually figure out the background.
The random seed is fixed to consistently produce the same colormap.
"""
np.random.seed(13)
rand_colors = np.random.rand(N, 3)
if first_is_black:
rand_colors[0] = [0., 0., 0.] # enforce black background (value 0)
return colors.ListedColormap(rand_colors)
def ipf_cmap(self):
"""
Return a colormap with ipf colors.
"""
N = len(self.grains)
ipf_colors = np.zeros((4096, 3))
for g in self.grains:
ipf_colors[g.id, :] = g.orientation.get_ipf_colour()
return colors.ListedColormap(ipf_colors)
@staticmethod
def from_xml(xml_file_name, grain_ids=None, verbose=False):
"""Load a Microstructure object from an xml file.
It is possible to restrict the grains which are loaded by providing
the list of ids of the grains of interest.
"""
if verbose and grain_ids:
print('loading only grain ids %s' % grain_ids)
micro = Microstructure()
dom = parse(xml_file_name)
root = dom.childNodes[0]
name = root.childNodes[0]
micro.name = name.childNodes[0].nodeValue
grains = root.childNodes[1]
for node in grains.childNodes:
if grain_ids and not (int(node.childNodes[0].childNodes[0].nodeValue) in grain_ids): continue
if verbose:
print(node)
micro.grains.append(Grain.from_xml(node, verbose))
return micro
def get_grain(self, gid):
"""Get a particular grain given its id.
This method browses the microstructure and return the grain
corresponding to the given id. If the grain is not found, the
method raises a `ValueError`.
*Parameters*
**gid**: the grain id.
*Returns*
The method return a `Grain` with the corresponding id.
"""
for grain in self.grains:
if grain.id == gid:
return grain
raise ValueError('grain %d not found in the microstructure' % gid)
def __repr__(self):
"""Provide a string representation of the class."""
s = '%s\n' % self.__class__.__name__
s += '* name: %s\n' % self.name
for g in self.grains:
s += '* %s' % g.__repr__
return s
def SetVtkMesh(self, mesh):
self.vtkmesh = mesh
def print_zset_material_block(self, mat_file, grain_prefix='_ELSET'):
"""
Outputs the material block corresponding to this microstructure for
a finite element calculation with z-set.
:param str mat_file: The name of the file where the material behaviour is located
:param str grain_prefix: The grain prefix used to name the elsets corresponding to the different grains
"""
f = open('elset_list.txt', 'w')
for g in self.grains:
o = g.orientation
f.write(
' **elset %s%d *file %s *integration theta_method_a 1.0 1.e-9 150 *rotation %7.3f %7.3f %7.3f\n' % (
grain_prefix, g.id, mat_file, o.phi1(), o.Phi(), o.phi2()))
f.close()
def to_h5(self):
"""Write the microstructure as a hdf5 file compatible with DREAM3D."""
import time
f = h5py.File('%s.h5' % self.name, 'w')
f.attrs['FileVersion'] = np.string_('7.0')
f.attrs['DREAM3D Version'] = np.string_('6.1.77.d28a796')
f.attrs['HDF5_Version'] = h5py.version.hdf5_version
f.attrs['h5py_version'] = h5py.version.version
f.attrs['file_time'] = time.time()
# pipeline group (empty here)
pipeline = f.create_group('Pipeline')
pipeline.attrs['Number_Filters'] = np.int32(0)
# create the data container group
data_containers = f.create_group('DataContainers')
m = data_containers.create_group('DataContainer')
# ensemble data
ed = m.create_group('EnsembleData')
ed.attrs['AttributeMatrixType'] = np.uint32(11)
ed.attrs['TupleDimensions'] = np.uint64(2)
cryst_structure = ed.create_dataset('CrystalStructures', data=np.array([[999], [1]], dtype=np.uint32))
cryst_structure.attrs['ComponentDimensions'] = np.uint64(1)
cryst_structure.attrs['DataArrayVersion'] = np.int32(2)
cryst_structure.attrs['ObjectType'] = np.string_('DataArray<uint32_t>')
cryst_structure.attrs['Tuple Axis Dimensions'] = np.string_('x=2')
cryst_structure.attrs['TupleDimensions'] = np.uint64(2)
mat_name = ed.create_dataset('MaterialName', data=[a.encode('utf8') for a in ['Invalid Phase', 'Unknown']])
mat_name.attrs['ComponentDimensions'] = np.uint64(1)
mat_name.attrs['DataArrayVersion'] = np.int32(2)
mat_name.attrs['ObjectType'] = np.string_('StringDataArray')
mat_name.attrs['Tuple Axis Dimensions'] = np.string_('x=2')
mat_name.attrs['TupleDimensions'] = np.uint64(2)
# feature data
fd = m.create_group('FeatureData')
fd.attrs['AttributeMatrixType'] = np.uint32(7)
fd.attrs['TupleDimensions'] = np.uint64(len(self.grains))
avg_euler = fd.create_dataset('AvgEulerAngles',
data=np.array([g.orientation.euler for g in self.grains], dtype=np.float32))
avg_euler.attrs['ComponentDimensions'] = np.uint64(3)
avg_euler.attrs['DataArrayVersion'] = np.int32(2)
avg_euler.attrs['ObjectType'] = np.string_('DataArray<float>')
avg_euler.attrs['Tuple Axis Dimensions'] = np.string_('x=%d' % len(self.grains))
avg_euler.attrs['TupleDimensions'] = np.uint64(len(self.grains))
# geometry
geom = m.create_group('_SIMPL_GEOMETRY')
geom.attrs['GeometryType'] = np.uint32(999)
geom.attrs['GeometryTypeName'] = np.string_('UnkownGeometry')
# create the data container bundles group
f.create_group('DataContainerBundles')
f.close()
@staticmethod
def from_h5(file_path, main_key='DataContainers', data_container='DataContainer', grain_data='FeatureData',
grain_orientations='AvgEulerAngles', orientation_type='euler', grain_centroid='Centroids'):
"""Read a microstructure from a hdf5 file.
:param str file_path: the path to the hdf5 file to read.
:param str main_key: the string describing the root key.
:param str data_container: the string describing the data container group in the hdf5 file.
:param str grain_data: the string describing the grain data group in the hdf5 file.
:param str grain_orientations: the string describing the average grain orientations in the hdf5 file.
:param str orientation_type: the string describing the descriptor used for orientation data.
:param str grain_centroid: the string describing the grain centroid in the hdf5 file.
:return: a `Microstructure` instance created from the hdf5 file.
"""
micro = Microstructure()
with h5py.File(file_path, 'r') as f:
grain_data_path = '%s/%s/%s' % (main_key, data_container, grain_data)
orientations = f[grain_data_path][grain_orientations].value
if grain_centroid:
centroids = f[grain_data_path][grain_centroid].value
offset = 0
if len(centroids) < len(orientations):
offset = 1 # if grain 0 has not a centroid
for i in range(len(orientations)):
if orientations[i, 0] == 0. and orientations[i, 1] == 0. and orientations[i, 2] == 0.:
# skip grain 0 which is always (0., 0., 0.)
print('skipping (0., 0., 0.)')
continue
if orientation_type == 'euler':
g = Grain(i, Orientation.from_euler(orientations[i] * 180 / np.pi))
elif orientation_type == 'rodrigues':
g = Grain(i, Orientation.from_rodrigues(orientations[i]))
if grain_centroid:
g.position = centroids[i - offset]
micro.grains.append(g)
return micro
@staticmethod
def from_dct(data_root='.', vol_file='phase_01_vol.mat', grain_ids=None, verbose=True):
"""Create a microstructure from a DCT reconstruction.
DCT reconstructions are stored in hdf5 matlab files. the reconstructed volume file (labeled image) is stored
in the '5_reconstruction' folder and the individual grain files are stored in the '4_grains/phase_01' folder.
:param str data_root: the path to the folder containing the data.
:param str vol_file: the name of the volume file.
:param list grain_ids: a list of grain ids to load into the `Microstructure` instance.
:param bool verbose: activate verbose mode.
:return: a `Microstructure` instance created from the DCT reconstruction.
"""
from scipy import ndimage
micro = Microstructure()
micro.data_root = data_root
vol_file = os.path.join(data_root, '5_reconstruction', vol_file)
with h5py.File(vol_file, 'r') as f:
# choose weather or not to load the volume into memory here
vol = f['vol'].value.transpose(2, 1, 0) # Because how matlab writes the data, we need to swap X and Z axes in the DCT volume
if verbose:
print('loaded volume with shape: %d x %d x %d' % (vol.shape[0], vol.shape[1], vol.shape[2]))
all_grain_ids = np.unique(vol)
if not grain_ids:
grain_ids = all_grain_ids
else:
# check that all requested grain ids are present
for label in [x for x in grain_ids if x not in all_grain_ids]:
print('warning, requested grain %d is not present in the data file' % label)
micro_mesh = vtk.vtkMultiBlockDataSet()
micro_mesh.SetNumberOfBlocks(len(grain_ids))
for i, label in enumerate(grain_ids):
if label <= 0:
continue
grain_path = os.path.join('4_grains', 'phase_01', 'grain_%04d.mat' % label)
grain_file = os.path.join(micro.data_root, grain_path)
grain_info = h5py.File(grain_file)
g = Grain(label, Orientation.from_rodrigues(grain_info['R_vector'].value))
g.position = grain_info['center'].value
grain_data = vol[ndimage.find_objects(vol == label)[0]]
g.volume = ndimage.measurements.sum(vol == label)
# create the vtk representation of the grain
g.add_vtk_mesh(grain_data, contour=False)
if verbose:
print('loading grain %d' % label)
print('adding block %d to mesh for grain %d' % (i, label))
micro_mesh.SetBlock(i, g.vtkmesh)
micro.grains.append(g)
micro.SetVtkMesh(micro_mesh)
return micro
def to_xml(self, doc):
"""
Returns an XML representation of the Microstructure instance.
"""
root = doc.createElement('Microstructure')
doc.appendChild(root)
name = doc.createElement('Name')
root.appendChild(name)
name_text = doc.createTextNode(self.name)
name.appendChild(name_text)
grains = doc.createElement('Grains')
root.appendChild(grains)
for i, grain in enumerate(self.grains):
file_name = os.path.join(self.name, '%s_%d.vtu' % (self.name, i))
grains.appendChild(grain.to_xml(doc, file_name))
def save(self):
"""Saving the microstructure to the disk.
Save the metadata as a XML file and when available, also save the
vtk representation of the grains.
"""
# save the microstructure instance as xml
doc = Document()
self.to_xml(doc)
xml_file_name = '%s.xml' % self.name
print('writing ' + xml_file_name)
f = open(xml_file_name, 'wb')
doc.writexml(f, encoding='utf-8')
f.close()
# now save the vtk representation
if self.vtkmesh != None:
import vtk
vtk_file_name = '%s.vtm' % self.name
print('writing ' + vtk_file_name)
writer = vtk.vtkXMLMultiBlockDataWriter()
writer.SetFileName(vtk_file_name)
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
writer.SetInputData(self.vtkmesh)
else:
writer.SetInput(self.vtkmesh)
writer.Write()
def dct_projection(self, data, lattice, omega, dif_grains, lambda_keV, d, ps, det_npx=np.array([2048, 2048]), ds=1,
display=False, verbose=False):
"""Compute the detector image in dct configuration.
:params np.ndarray data: The 3d data set from which to compute the projection.
:params lattice: The crystal lattice of the material.
:params float omega: The rotation angle at which the projection is computed.
"""
lambda_nm = 1.2398 / lambda_keV
# prepare rotation matrix
omegar = omega * np.pi / 180
R = np.array([[np.cos(omegar), -np.sin(omegar), 0], [np.sin(omegar), np.cos(omegar), 0], [0, 0, 1]])
data_abs = np.where(data > 0, 1, 0)
x_max = np.ceil(max(data_abs.shape[0], data_abs.shape[1]) * 2 ** 0.5)
proj = np.zeros((np.shape(data_abs)[2], x_max), dtype=np.float)
if verbose:
print('diffracting grains', dif_grains)
print('proj size is ', np.shape(proj))
# handle each grain in Bragg condition
for (gid, (h, k, l)) in dif_grains:
mask_dif = (data == gid)
data_abs[mask_dif] = 0 # remove this grain from the absorption
from skimage.transform import radon
for i in range(np.shape(data_abs)[2]):
proj[i, :] = radon(data_abs[:, :, i], [omega])[:, 0]
# create the detector image (larger than the FOV) by padding the transmission image with zeros
full_proj = np.zeros(det_npx / ds, dtype=np.float)
if verbose:
print('full proj size is ', np.shape(full_proj))
print('max proj', proj.max())
# here we could use np.pad with numpy version > 1.7
print(int(0.5 * det_npx[0] / ds - proj.shape[0] / 2.))
print(int(0.5 * det_npx[0] / ds + proj.shape[0] / 2.))
print(int(0.5 * det_npx[1] / ds - proj.shape[1] / 2.))
print(int(0.5 * det_npx[1] / ds + proj.shape[1] / 2.))
# let's moderate the direct beam so we see nicely the spots with a 8 bits scale
att = 6.0 / ds # 1.0
full_proj[int(0.5 * det_npx[0] / ds - proj.shape[0] / 2.):int(0.5 * det_npx[0] / ds + proj.shape[0] / 2.), \
int(0.5 * det_npx[1] / ds - proj.shape[1] / 2.):int(0.5 * det_npx[1] / ds + proj.shape[1] / 2.)] += proj / att
# add diffraction spots
from pymicro.crystal.lattice import HklPlane
from scipy import ndimage
for (gid, (h, k, l)) in dif_grains:
# compute scattering vector
gt = self.get_grain(gid).orientation_matrix().transpose()
p = HklPlane(h, k, l, lattice)
X = np.array([1., 0., 0.]) / lambda_nm
n = R.dot(gt.dot(p.normal()))
G = n / p.interplanar_spacing() # also G = R.dot(gt.dot(h*astar + k*bstar + l*cstar))
K = X + G
# TODO explain the - signs, account for grain position in the rotated sample
(u, v) = (d * K[1] / K[0], d * K[2] / K[0]) # unit is mm
(u_mic, v_mic) = (1000 * u, 1000 * v) # unit is micron
(up, vp) = (0.5 * det_npx[0] / ds + u_mic / (ps * ds),
0.5 * det_npx[1] / ds + v_mic / (ps * ds)) # unit is pixel on the detector
if verbose:
print('plane normal:', p.normal())
print(R)
print('rotated plane normal:', n)
print('scattering vector:', G)
print('K = X + G vector', K)
print('lenght X', np.linalg.norm(X))
print('lenght K', np.linalg.norm(K))
print('angle between X and K', np.arccos(
np.dot(K, X) / (np.linalg.norm(K) * np.linalg.norm(X))) * 180 / np.pi)
print('diffracted beam will hit the detector at (%.3f,%.3f) mm or (%d,%d) pixels' % (u, v, up, vp))
grain_data = np.where(data == gid, 1, 0)
data_dif = grain_data[ndimage.find_objects(data == gid)[0]]
x_max = np.ceil(max(data_dif.shape[0], data_dif.shape[1]) * 2 ** 0.5)
proj_dif = np.zeros((np.shape(data_dif)[2], x_max), dtype=np.float)
for i in range(np.shape(data_dif)[2]):
a = radon(data_dif[:, :, i], [omega])
proj_dif[i, :] = a[:, 0]
if verbose:
print('* proj_dif size is ', np.shape(proj_dif))
print(int(up - proj_dif.shape[0] / 2.))
print(int(up + proj_dif.shape[0] / 2.))
print(int(vp - proj_dif.shape[1] / 2.))
print(int(vp + proj_dif.shape[1] / 2.))
print('max proj_dif', proj_dif.max())
# add diffraction spot to the image detector
try:
# warning full_proj image is transposed (we could fix that and plot with .T since pyplot plots images like (y,x))
full_proj[int(vp - proj_dif.shape[0] / 2.):int(vp + proj_dif.shape[0] / 2.), \
int(up - proj_dif.shape[1] / 2.):int(up + proj_dif.shape[1] / 2.)] += proj_dif
# full_proj[int(up - proj_dif.shape[0]/2.):int(up + proj_dif.shape[0]/2.), \
# int(vp - proj_dif.shape[1]/2.):int(vp + proj_dif.shape[1]/2.)] += proj_dif
except:
print('error occured') # grain diffracts outside the detector
pass
plt.imsave('proj_dif/proj_dif_grain%d_omega=%05.1f.png' % (gid, omega), proj_dif, cmap=cm.gray,
origin='lower')
if display:
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
ax.imshow(full_proj[:, ::-1], cmap=cm.gray, vmin=0, vmax=255, origin='lower') # check origin
for (h, k, l) in [(1, 1, 0), (2, 0, 0), (2, 1, 1), (2, 2, 0), (2, 2, 2), (3, 1, 0), (3, 2, 1), (3, 3, 0),
(3, 3, 2)]:
hkl = HklPlane(h, k, l, lattice)
theta = hkl.bragg_angle(lambda_keV)
print('bragg angle for %s reflection is %.2f deg' % (hkl.miller_indices(), theta * 180. / np.pi))
t = np.linspace(0.0, 2 * np.pi, num=37)
L = d * 1000 / ps / ds * np.tan(2 * theta) # 2 theta distance on the detector
ax.plot(0.5 * det_npx[0] / ds + L * np.cos(t), 0.5 * det_npx[1] / ds + L * np.sin(t), 'g--')
ax.annotate(str(h) + str(k) + str(l), xy=(0.5 * det_npx[0] / ds, 0.5 * det_npx[1] / ds + L),
xycoords='data', color='green', horizontalalignment='center', verticalalignment='bottom',
fontsize=16)
plt.xlim(0, det_npx[0] / ds)
plt.ylim(0, det_npx[1] / ds)
plt.show()
else:
# save projection image with origin = lower since Z-axis is upwards
plt.imsave('proj/proj_omega=%05.1f.png' % omega, full_proj, cmap=cm.gray, vmin=0, vmax=100, origin='lower')
|
from board import *
from pos import *
# TODO: cloned in GUI
class IllegalMoveException(Exception):
pass
class GameState():
""" This is for the state of a game as of a particular move.
"""
def __init__(self, game, parent=None):
self.game = game
self.parent = parent
self.observers = []
if parent == None:
self.board = Board(game.size())
# 3 for convenience, should only use [1] and [2]
self.captured = [0,0,0]
self.won_by = False
self.move_number = 1
else:
self.board = parent.board # TODO: Clone
self.captured = parent.captured[:]
self.won_by = parent.won_by
self.move_number = parent.move_number # not + 1, that will be triggered by a move
# TODO: copy AI observer manually
def get_move_number(self):
return self.move_number
def get_captured(self, player_num):
return self.captured[player_num]
# these two should only be used for testing
def set_move_number(self, turn):
self.move_number = turn
def set_captured(self, player_num, pieces):
self.captured[player_num] = pieces
def make_move(self, move):
move_pos = move.pos
if self.board.get_occ(move_pos) > 0:
raise IllegalMoveException("That position is already occupied")
other_colour = self.to_move_colour()
# Place a stone
self.move_number += 1
# FIXME: this should go before the move inc., but it breaks
my_colour = self.to_move_colour()
self.board.set_occ(move_pos, my_colour)
board_size = self.board.get_size()
MC = my_colour
OC = other_colour
# Process captures
# TODO: keryo pente capture 3s
for direction in DIRECTIONS:
occs = self.board.get_occs_in_a_line_for_capture_test(move_pos, direction, 4)
if occs == [MC, OC, OC, MC]:
capture_pos1 = move_pos.shift(direction, 1)
capture_pos2 = move_pos.shift(direction, 2)
# Remove stones
self.board.set_occ(capture_pos1, EMPTY)
self.board.set_occ(capture_pos2, EMPTY)
# Keep track of capture count
self.captured[my_colour] += 2
if self.captured[my_colour] >= 10:
self.won_by = MC
# Check for a win by checking all the lines that run through
# the move position.
# We only need to check half of the directions,
# because for each we need to check the opposite direction
# in case the last stone was not placed at an end.
for direction in DIRECTIONS[:4]:
l = 1
while l < 5:
test_pos = move_pos.shift(direction, l)
if test_pos[0] < 0 or \
test_pos[0] > board_size or \
test_pos[1] < 0 or \
test_pos[1] > board_size:
# Other end of a potential line is off the edge of the board
break
next_col = self.board.get_occ(test_pos)
if next_col != my_colour:
break
l += 1
# Now see how far the line goes in the opposite direction.
m = -1
while m > -5:
test_pos = move_pos.shift(direction, m)
if test_pos[0] < 0 or \
test_pos[0] > board_size or \
test_pos[1] < 0 or \
test_pos[1] > board_size:
# Other end of a potential line is off the edge of the board
break
next_col = self.board.get_occ(test_pos)
if next_col != my_colour:
break
m -= 1
total_line_length = 1 + (l-1) - (m+1)
# TODO: check rules to see if lines longer than 5 also win
if total_line_length >= 5:
self.won_by = my_colour
def to_move(self):
return not self.move_number % 2
def to_move_colour(self):
return (self.move_number % 2) + 1
def successors(self):
succ = []
for x in range(self.BOARD_SIZE):
for y in range(self.BOARD_SIZE):
pos = Pos(x, y)
action = Move(pos)
try:
succ.append((action, State(self, action)))
except IllegalMoveException:
pass
return succ
'''
# TODO: Move this to ABState, maybe use Rules object
def utility(self, player):
# 5+ in a row or 5+ pairs captured = infinity
if self.captured[BLACK] >= 10 or self.won_by == BLACK:
return alpha_beta.infinity
if self.captured[WHITE] >= 10 or self.won_by == WHITE:
return -alpha_beta.infinity
return self.captured[0] - self.captured[1]
def score(self):
return self.utility(None)
'''
- clean up
from board import *
from pos import *
# TODO: cloned in GUI
class IllegalMoveException(Exception):
pass
class GameState():
""" This is for the state of a game as of a particular move.
"""
def __init__(self, game, parent=None):
self.game = game
self.parent = parent
if parent == None:
self.board = Board(game.size())
# 3 for convenience, should only use [1] and [2]
self.captured = [0,0,0]
self.won_by = False
self.move_number = 1
else:
self.board = parent.board # TODO: Clone
self.captured = parent.captured[:]
self.won_by = parent.won_by
self.move_number = parent.move_number # not + 1, that will be triggered by a move
# TODO: copy AI observer manually
def get_move_number(self):
return self.move_number
def get_captured(self, player_num):
return self.captured[player_num]
# these two should only be used for testing
def set_move_number(self, turn):
self.move_number = turn
def set_captured(self, player_num, pieces):
self.captured[player_num] = pieces
def make_move(self, move):
move_pos = move.pos
if self.board.get_occ(move_pos) > 0:
raise IllegalMoveException("That position is already occupied")
other_colour = self.to_move_colour()
# Place a stone
self.move_number += 1
# FIXME: this should go before the move inc., but it breaks
my_colour = self.to_move_colour()
self.board.set_occ(move_pos, my_colour)
board_size = self.board.get_size()
MC = my_colour
OC = other_colour
# Process captures
# TODO: keryo pente capture 3s
for direction in DIRECTIONS:
occs = self.board.get_occs_in_a_line_for_capture_test(move_pos, direction, 4)
if occs == [MC, OC, OC, MC]:
capture_pos1 = move_pos.shift(direction, 1)
capture_pos2 = move_pos.shift(direction, 2)
# Remove stones
self.board.set_occ(capture_pos1, EMPTY)
self.board.set_occ(capture_pos2, EMPTY)
# Keep track of capture count
self.captured[my_colour] += 2
if self.captured[my_colour] >= 10:
self.won_by = MC
# Check for a win by checking all the lines that run through
# the move position.
# We only need to check half of the directions,
# because for each we need to check the opposite direction
# in case the last stone was not placed at an end.
for direction in DIRECTIONS[:4]:
l = 1
while l < 5:
test_pos = move_pos.shift(direction, l)
if test_pos[0] < 0 or \
test_pos[0] > board_size or \
test_pos[1] < 0 or \
test_pos[1] > board_size:
# Other end of a potential line is off the edge of the board
break
next_col = self.board.get_occ(test_pos)
if next_col != my_colour:
break
l += 1
# Now see how far the line goes in the opposite direction.
m = -1
while m > -5:
test_pos = move_pos.shift(direction, m)
if test_pos[0] < 0 or \
test_pos[0] > board_size or \
test_pos[1] < 0 or \
test_pos[1] > board_size:
# Other end of a potential line is off the edge of the board
break
next_col = self.board.get_occ(test_pos)
if next_col != my_colour:
break
m -= 1
total_line_length = 1 + (l-1) - (m+1)
# TODO: check rules to see if lines longer than 5 also win
if total_line_length >= 5:
self.won_by = my_colour
def to_move(self):
return not self.move_number % 2
def to_move_colour(self):
return (self.move_number % 2) + 1
def successors(self):
succ = []
for x in range(self.BOARD_SIZE):
for y in range(self.BOARD_SIZE):
pos = Pos(x, y)
action = Move(pos)
try:
succ.append((action, State(self, action)))
except IllegalMoveException:
pass
return succ
'''
# TODO: Move this to ABState, maybe use Rules object
def utility(self, player):
# 5+ in a row or 5+ pairs captured = infinity
if self.captured[BLACK] >= 10 or self.won_by == BLACK:
return alpha_beta.infinity
if self.captured[WHITE] >= 10 or self.won_by == WHITE:
return -alpha_beta.infinity
return self.captured[0] - self.captured[1]
def score(self):
return self.utility(None)
'''
|
import os
import sys
import time
import re
import functools
from termcolor import colored, cprint
from itertools import chain
from .Locus import Locus
from .Config import cf
from apsw import CantOpenError
import camoco as co
import matplotlib.pylab as pylab
import numpy as np
import pandas as pd
import statsmodels.api as sm
import gzip
import bz2
def available_datasets(type='%', name='%'):
try:
cur = co.Camoco("Camoco", type='Camoco').db.cursor()
datasets = cur.execute('''
SELECT type, name, description, added
FROM datasets
WHERE type LIKE ?
AND name LIKE ?
ORDER BY type;''', (type,name)).fetchall()
if datasets:
datasets = pd.DataFrame(
datasets,
columns=["Type", "Name", "Description", "Date Added"],
).set_index('Type')
else:
datasets = pd.DataFrame(
columns=["Type", "Name", "Description", "Date Added"]
)
# Check to see if we are looking for a specific dataset
if '%' not in type and '%' not in name:
return True if name in datasets['Name'].values else False
else:
return datasets
except CantOpenError as e:
return False
def available(type=None,name=None):
# Laaaaaaaaazy
return available_datasets(type=type,name=name)
def del_dataset(type, name, safe=True):
try:
c = co.Camoco("Camoco")
except CantOpenError:
return True
if safe:
df = available(type=type,name=name)
c.log("Are you sure you want to delete:\n {}", df)
if input("(Notice CAPS)[Y/n]:") != 'Y':
c.log("Nothing Deleted")
return
c.log("Deleting {}", name)
try:
c.db.cursor().execute('''
DELETE FROM datasets
WHERE name LIKE '{}'
AND type LIKE '{}';'''.format(name, type)
)
except CantOpenError:
pass
try:
os.remove(
os.path.expanduser(os.path.join(
cf.options.basedir,
'databases',
'{}.{}.db'.format(type, name)
)
)
)
except FileNotFoundError as e:
pass
#c.log('Database Not Found: {}'.format(e))
try:
os.remove(
os.path.expanduser(os.path.join(
cf.options.basedir,
'databases',
'{}.{}.hd5'.format(type, name)
)
)
)
except FileNotFoundError as e:
pass
#c.log('Database Not Found: {}'.format(e))
if type == 'Expr':
# also have to remove the COB specific refgen
del_dataset('RefGen', 'Filtered'+name, safe=safe)
return True
def mv_dataset(type,name,new_name):
c = co.Camoco("Camoco")
c.db.cursor().execute('''
UPDATE datasets SET name = ?
WHERE name = ? AND
type = ?''',(new_name,name,type)
)
os.rename(
c._resource('databases','.'.join([type,name])+".db"),
c._resource('databases',".".join([type,new_name])+".db")
)
class rawFile(object):
def __init__(self,filename):
self.filename = filename
if filename.endswith('.gz'):
self.handle = gzip.open(filename,'rt')
elif filename.endswith('bz2'):
self.handle = bz2.open(filename,'rt')
else:
self.handle = open(filename,'r')
def __enter__(self):
return self.handle
def __exit__(self,type,value,traceback):
self.handle.close()
def redescribe_dataset(type,name,new_desc):
c = co.Camoco("Camoco")
c.db.cursor().execute('''
UPDATE datasets SET description = ?
WHERE name = ? AND type = ?''',
(new_desc,name,type)
)
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
# Give us a way to clear the cache
if 'clear_cache' in kwargs:
cache.clear()
# This wraps the calling of the memoized object
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
class log(object):
def __init__(self, msg=None, *args, color='green'):
if msg is not None and cf.logging.log_level == 'verbose':
print(
colored(
" ".join(["[LOG]", time.ctime(), '-', msg.format(*args)]),
color=color
), file=sys.stderr
)
self.quiet = False
@classmethod
def warn(cls, msg, *args):
cls(msg, *args, color='red')
def __call__(self, msg, *args, color='green'):
if cf.logging.log_level == 'verbose':
print(
colored(
" ".join(["[LOG]", time.ctime(), '-', msg.format(*args)]),
color=color
),
file=sys.stderr
)
def plot_flanking_vs_inter(cob):
import numpy as np
from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.distributions.mixture_rvs import mixture_rvs
log('Getting genes')
genes = sorted(list(cob.refgen.iter_genes()))
flanking = np.array([cob.coexpression(genes[i], genes[i-1]).score for i in range(1, len(genes))])
inter = cob.coex[~np.isfinite(cob.coex.distance)].score.values
log('Getting flanking KDE')
# get the KDEs
flanking_kde = sm.nonparametric.KDEUnivariate(flanking)
flanking_kde.fit()
log('Getting Inter KDE')
inter_kde = sm.nonparametric.KDEUnivariate(inter)
inter_kde.fit()
log('Plotting')
plt.clf()
fig = plt.figure(figsize=(8, 4))
fig.hold(True)
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim([-4, 4])
ax.set_ylim([0, 0.5])
ax.plot(flanking_kde.support, flanking_kde.density, lw=2, color='black', alpha=1)
ax.fill(flanking_kde.support, flanking_kde.density, color='red', alpha=0.3, label='Cis Interactions')
ax.scatter(np.median(flanking), -0.05, marker='D', color='red')
ax.set_xlim([-4, 4])
ax.set_ylim([0, 0.5])
ax.plot(inter_kde.support, inter_kde.density, lw=2, color='black')
ax.fill(inter_kde.support, inter_kde.density, color='blue', alpha=0.3, label='Trans Interactions')
ax.scatter(np.median(inter), -0.05, marker='D', color='blue')
ax.set_xlabel('CoExpression Interaction (Z-Score)')
ax.set_ylabel('Distribution Density')
fig.tight_layout()
fig.savefig("{}_flank_inter.png".format(cob.name))
def plot_local_global_degree(term, filename=None, bootstraps=1):
ROOT = co.COB("ROOT")
RZM = ROOT.refgen # use root specific for bootstraps
hood = ROOT.neighborhood(term.flanking_genes(RZM))
bshood = pd.concat([ROOT.neighborhood(term.bootstrap_flanking_genes(RZM)) for _ in range(0, bootstraps)])
pylab.clf()
pylab.scatter(bshood['local'], bshood['global'], alpha=0.05)
pylab.scatter(hood['local'], hood['global'], c='r')
pylab.xlabel('Local Degree')
pylab.ylabel('Global Degree')
pylab.title('{} Locality'.format(term.id))
if filename is None:
filename = "{}_locality.png".format(term.id)
pylab.savefig(filename)
def plot_local_vs_cc(term, filename=None, bootstraps=1):
RZM = co.COB('ROOT').refgen # use root specific for bootstraps
pylab.clf()
for _ in range(0, bootstraps):
graph = co.COB('ROOT').graph(term.bootstrap_flanking_genes(RZM))
degree = np.array(graph.degree())
cc = np.array(graph.transitivity_local_undirected(weights='weight'))
nan_mask = np.isnan(cc)
pylab.scatter(degree[~nan_mask], cc[~nan_mask], alpha=0.05)
# plot empirical
graph = COB('ROOT').graph(term.flanking_genes(RZM))
degree = np.array(graph.degree())
cc = np.array(graph.transitivity_local_undirected(weights='weight'))
nan_mask = np.isnan(cc)
pylab.scatter(degree[~nan_mask], cc[~nan_mask])
pylab.xlabel('Local Degree')
pylab.ylabel('Clustering Coefficient')
if filename is None:
filename = "{}_cc.png".format(term.id)
pylab.savefig(filename)
modulate verbosity.
import os
import sys
import time
import re
import functools
from termcolor import colored, cprint
from itertools import chain
from .Locus import Locus
from .Config import cf
from apsw import CantOpenError
import camoco as co
import matplotlib.pylab as pylab
import numpy as np
import pandas as pd
import statsmodels.api as sm
import gzip
import bz2
def available_datasets(type='%', name='%'):
try:
cur = co.Camoco("Camoco", type='Camoco').db.cursor()
datasets = cur.execute('''
SELECT type, name, description, added
FROM datasets
WHERE type LIKE ?
AND name LIKE ?
ORDER BY type;''', (type,name)).fetchall()
if datasets:
datasets = pd.DataFrame(
datasets,
columns=["Type", "Name", "Description", "Date Added"],
).set_index('Type')
else:
datasets = pd.DataFrame(
columns=["Type", "Name", "Description", "Date Added"]
)
# Check to see if we are looking for a specific dataset
if '%' not in type and '%' not in name:
return True if name in datasets['Name'].values else False
else:
return datasets
except CantOpenError as e:
return False
def available(type=None,name=None):
# Laaaaaaaaazy
return available_datasets(type=type,name=name)
def del_dataset(type, name, safe=True):
try:
c = co.Camoco("Camoco")
except CantOpenError:
return True
if safe:
df = available(type=type,name=name)
c.log("Are you sure you want to delete:\n {}", df)
if input("(Notice CAPS)[Y/n]:") != 'Y':
c.log("Nothing Deleted")
return
c.log("Deleting {}", name)
try:
c.db.cursor().execute('''
DELETE FROM datasets
WHERE name LIKE '{}'
AND type LIKE '{}';'''.format(name, type)
)
except CantOpenError:
pass
try:
os.remove(
os.path.expanduser(os.path.join(
cf.options.basedir,
'databases',
'{}.{}.db'.format(type, name)
)
)
)
except FileNotFoundError as e:
pass
#c.log('Database Not Found: {}'.format(e))
try:
os.remove(
os.path.expanduser(os.path.join(
cf.options.basedir,
'databases',
'{}.{}.hd5'.format(type, name)
)
)
)
except FileNotFoundError as e:
pass
#c.log('Database Not Found: {}'.format(e))
if type == 'Expr':
# also have to remove the COB specific refgen
del_dataset('RefGen', 'Filtered'+name, safe=safe)
return True
def mv_dataset(type,name,new_name):
c = co.Camoco("Camoco")
c.db.cursor().execute('''
UPDATE datasets SET name = ?
WHERE name = ? AND
type = ?''',(new_name,name,type)
)
os.rename(
c._resource('databases','.'.join([type,name])+".db"),
c._resource('databases',".".join([type,new_name])+".db")
)
class rawFile(object):
def __init__(self,filename):
self.filename = filename
if filename.endswith('.gz'):
self.handle = gzip.open(filename,'rt')
elif filename.endswith('bz2'):
self.handle = bz2.open(filename,'rt')
else:
self.handle = open(filename,'r')
def __enter__(self):
return self.handle
def __exit__(self,type,value,traceback):
self.handle.close()
def redescribe_dataset(type,name,new_desc):
c = co.Camoco("Camoco")
c.db.cursor().execute('''
UPDATE datasets SET description = ?
WHERE name = ? AND type = ?''',
(new_desc,name,type)
)
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
# Give us a way to clear the cache
if 'clear_cache' in kwargs:
cache.clear()
# This wraps the calling of the memoized object
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
class log(object):
def __init__(self, msg=None, *args, color='green'):
if msg is not None and cf.logging.log_level == 'verbose':
print(
colored(
" ".join(["[LOG]", time.ctime(), '-', msg.format(*args)]),
color=color
), file=sys.stderr
)
@classmethod
def warn(cls, msg, *args):
cls(msg, *args, color='red')
def __call__(self, msg, *args, color='green'):
if cf.logging.log_level == 'verbose':
print(
colored(
" ".join(["[LOG]", time.ctime(), '-', msg.format(*args)]),
color=color
),
file=sys.stderr
)
def plot_flanking_vs_inter(cob):
import numpy as np
from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.distributions.mixture_rvs import mixture_rvs
log('Getting genes')
genes = sorted(list(cob.refgen.iter_genes()))
flanking = np.array([cob.coexpression(genes[i], genes[i-1]).score for i in range(1, len(genes))])
inter = cob.coex[~np.isfinite(cob.coex.distance)].score.values
log('Getting flanking KDE')
# get the KDEs
flanking_kde = sm.nonparametric.KDEUnivariate(flanking)
flanking_kde.fit()
log('Getting Inter KDE')
inter_kde = sm.nonparametric.KDEUnivariate(inter)
inter_kde.fit()
log('Plotting')
plt.clf()
fig = plt.figure(figsize=(8, 4))
fig.hold(True)
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim([-4, 4])
ax.set_ylim([0, 0.5])
ax.plot(flanking_kde.support, flanking_kde.density, lw=2, color='black', alpha=1)
ax.fill(flanking_kde.support, flanking_kde.density, color='red', alpha=0.3, label='Cis Interactions')
ax.scatter(np.median(flanking), -0.05, marker='D', color='red')
ax.set_xlim([-4, 4])
ax.set_ylim([0, 0.5])
ax.plot(inter_kde.support, inter_kde.density, lw=2, color='black')
ax.fill(inter_kde.support, inter_kde.density, color='blue', alpha=0.3, label='Trans Interactions')
ax.scatter(np.median(inter), -0.05, marker='D', color='blue')
ax.set_xlabel('CoExpression Interaction (Z-Score)')
ax.set_ylabel('Distribution Density')
fig.tight_layout()
fig.savefig("{}_flank_inter.png".format(cob.name))
def plot_local_global_degree(term, filename=None, bootstraps=1):
ROOT = co.COB("ROOT")
RZM = ROOT.refgen # use root specific for bootstraps
hood = ROOT.neighborhood(term.flanking_genes(RZM))
bshood = pd.concat([ROOT.neighborhood(term.bootstrap_flanking_genes(RZM)) for _ in range(0, bootstraps)])
pylab.clf()
pylab.scatter(bshood['local'], bshood['global'], alpha=0.05)
pylab.scatter(hood['local'], hood['global'], c='r')
pylab.xlabel('Local Degree')
pylab.ylabel('Global Degree')
pylab.title('{} Locality'.format(term.id))
if filename is None:
filename = "{}_locality.png".format(term.id)
pylab.savefig(filename)
def plot_local_vs_cc(term, filename=None, bootstraps=1):
RZM = co.COB('ROOT').refgen # use root specific for bootstraps
pylab.clf()
for _ in range(0, bootstraps):
graph = co.COB('ROOT').graph(term.bootstrap_flanking_genes(RZM))
degree = np.array(graph.degree())
cc = np.array(graph.transitivity_local_undirected(weights='weight'))
nan_mask = np.isnan(cc)
pylab.scatter(degree[~nan_mask], cc[~nan_mask], alpha=0.05)
# plot empirical
graph = COB('ROOT').graph(term.flanking_genes(RZM))
degree = np.array(graph.degree())
cc = np.array(graph.transitivity_local_undirected(weights='weight'))
nan_mask = np.isnan(cc)
pylab.scatter(degree[~nan_mask], cc[~nan_mask])
pylab.xlabel('Local Degree')
pylab.ylabel('Clustering Coefficient')
if filename is None:
filename = "{}_cc.png".format(term.id)
pylab.savefig(filename)
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from skimage import color
import time
import math
import argparse
# A basic Python implementation of ImageFilters
def numpy_boxblur(image, blur_mask, iterations=3):
''' boxblur using numpy '''
blur_mask = blur_mask.astype(np.float)
self_blur_mask = (9 - (blur_mask * 8)) / 9.0
other_blur_mask = blur_mask / 9.0
red = image[...,0].astype(np.float)
green = image[...,1].astype(np.float)
blue = image[...,2].astype(np.float)
blur_weights = np.dstack((other_blur_mask, other_blur_mask, other_blur_mask,
other_blur_mask, self_blur_mask, other_blur_mask,
other_blur_mask, other_blur_mask, other_blur_mask))
iterations = 1
print blur_weights[200,300,:]
for i in range(iterations):
red_padded = np.pad(red, 1, mode='edge')
green_padded = np.pad(green, 1, mode='edge')
blue_padded = np.pad(blue, 1, mode='edge')
red_stacked = np.dstack((red_padded[:-2, :-2], red_padded[:-2, 1:-1], red_padded[:-2, 2:],
red_padded[1:-1, :-2], red_padded[1:-1, 1:-1], red_padded[1:-1, 2:],
red_padded[2:, :-2], red_padded[2:, 1:-1], red_padded[2:, 2:]))
green_stacked = np.dstack((green_padded[:-2, :-2], green_padded[:-2, 1:-1], green_padded[:-2, 2:],
green_padded[1:-1, :-2], green_padded[1:-1, 1:-1], green_padded[1:-1, 2:],
green_padded[2:, :-2], green_padded[2:, 1:-1], green_padded[2:, 2:]))
blue_stacked = np.dstack((blue_padded[:-2, :-2], blue_padded[:-2, 1:-1], blue_padded[:-2, 2:],
blue_padded[1:-1, :-2], blue_padded[1:-1, 1:-1], blue_padded[1:-1, 2:],
blue_padded[2:, :-2], blue_padded[2:, 1:-1], blue_padded[2:, 2:]))
red = np.average(red_stacked, axis=2, weights=blur_weights).astype(np.uint8)
green = np.average(green_stacked, axis=2, weights=blur_weights).astype(np.uint8)
blue = np.average(blue_stacked, axis=2, weights=blur_weights).astype(np.uint8)
image = np.dstack((red, green, blue))
return image
# Adjusts the brightness on a pixel
def brightness(p,value):
red = truncate(p[0] + value)
green = truncate(p[1] + value)
blue = truncate(p[2]+ value)
return [red, green, blue]
# Adjusts the saturation of an image
# 0.0 creates a black-and-white image.
# 0.5 reduces the color saturation by half.
# 1.0 causes no change.
# 2.0 doubles the color saturation.
def saturation(image, value):
Pr = np.float(0.299)
Pg = np.float(0.587)
Pb = np.float(0.114)
red = image[...,0].astype(np.float)
green = image[...,1].astype(np.float)
blue = image[...,2].astype(np.float)
P = np.sqrt(red*red*Pr + green*green*Pg + blue*blue*Pb)
red_v = (P + ((red - P) * value))
green_v = (P + ((green - P) * value))
blue_v = (P + ((blue - P) * value))
return np.dstack((red_v, green_v, blue_v))
# Adjusts the contrast on a pixel
def contrast(image, value):
factor = (259 * (value + 255)) / float(255 * (259 - value))
red = image[...,0].astype(np.float)
green = image[...,1].astype(np.float)
blue = image[...,2].astype(np.float)
red_v = factor * (red - 128) + 128
green_v = factor * (green - 128) + 128
blue_v = factor * (blue - 128) + 128
return np.dstack((red_v, green_v, blue_v))
#Increases the warmth or coolness of a pixel
def temperature(p,value):
red = p[0]
blue = p[2]
if value > 0:
red = truncate(p[0] + value)
elif value < 0:
blue = truncate(p[2] + value)
return [red, p[1], blue]
#Inverts the colors, producing the same image that would be found in a film negative
def invert(p, value):
if value:
red = truncate(255 - p[0])
green = truncate(255 - p[1])
blue = truncate(255 - p[2])
else:
red = int(p[0])
green = int(p[1])
blue = int(p[2])
return [red, green, blue]
#If the pixel is above value it becomes black, otherwise white
def threshold(p,value,apply):
if apply:
pixel_av = (p[0] + p[1] + p[2])/3.0
if pixel_av>value:
red = 255
green = 255
blue = 255
else:
red = 0
green = 0
blue = 0
else:
red = p[0]
green = p[1]
blue = p[2]
return [red, green, blue]
# Ensures a pixel's value for a color is between 0 and 255
def truncate(image):
print image[250,...]
zeros = np.zeros_like(image)
ones = np.ones_like(image) * 255
image = np.maximum(image, zeros)
image = np.minimum(image, ones)
print image[250,...]
return image
# generates horizontal blur mask using focus middle, focus radius, and image height,
# and stores the blur mask in the blur_mask parameter (np.array)
def generate_horizontal_blur_mask(blur_mask, middle_in_focus, in_focus_radius, height):
# Calculate blur amount for each pixel based on middle_in_focus and in_focus_radius
# Loop over y first so we can calculate the blur amount
# fade out 20% to blurry so that there is not an abrupt transition
no_blur_region = .8 * in_focus_radius
# Set blur amount for focus middle
#blur_row = np.array([blur_mask])
blur_row = np.zeros_like(blur_mask[0], dtype=np.float)
blur_mask[middle_in_focus] = blur_row
# Simulataneously set blur amount for both rows of same distance from middle
for y in xrange(middle_in_focus - in_focus_radius, middle_in_focus):
# The blur amount depends on the y-value of the pixel
distance_to_m = abs(y - middle_in_focus)
# Note: Because we are iterating over all y's in the focus region, all the y's are within
# the focus radius.
# Thus, we need only check if we should fade to blurry so that there is not an abrupt transition
if distance_to_m > no_blur_region:
# Calculate blur ammount
blur_amount = (1.0 / (in_focus_radius - no_blur_region)) * (distance_to_m - no_blur_region)
else:
# No blur
blur_amount = 0.0
blur_row.fill(blur_amount)
if y > 0:
blur_mask[y] = blur_row
if middle_in_focus + distance_to_m < height:
blur_mask[middle_in_focus + distance_to_m] = blur_row
# Generates a circular horizontal blur mask using the x and y coordinates of the focus middle,
# focus radius, and image height, and stores the blur mask in the blur_mask parameter (np.array)
def generate_circular_blur_mask(blur_mask, middle_in_focus_x, middle_in_focus_y, in_focus_radius, width, height):
# Calculate blur amount for each pixel based on middle_in_focus and in_focus_radius
# Fade out 20% to blurry so that there is not an abrupt transition
no_blur_region = .8 * in_focus_radius
# Set blur amount (no blur) for center of in-focus region
#blur_mask[middle_in_focus_y, middle_in_focus_x] = 0.0
# Calculate all x,y coords
x_coords = np.arange(middle_in_focus_x - in_focus_radius, middle_in_focus_x + in_focus_radius + 1)
y_coords = np.arange(middle_in_focus_y - in_focus_radius, middle_in_focus_y + +in_focus_radius + 1)
xy_list = cartesian([x_coords, y_coords])
size_arr = np.full(xy_list.shape,(width,height))
# Filter out x,y coords that are out of bounds
greater_than_zero = np.greater(xy_list,np.zeros(xy_list.shape))
less_than_upper_bound = np.less(xy_list, size_arr)
in_bounds_coords = np.logical_and(greater_than_zero,less_than_upper_bound)
xy_list = xy_list[in_bounds_coords]
#separate x's and y's into separate arrays
xy_list = xy_list.T
# Loop over x and y first so we can calculate the blur amount
v_ciruclar_blur_mask_helper = np.vectorize(ciruclar_blur_mask_helper)
xy_blur_amounts = v_ciruclar_blur_mask_helper(xy_list[0],xy_list[1], middle_in_focus_x, middle_in_focus_y, in_focus_radius, no_blur_region)
blur_mask[xy_list[1],xy_list[0]] = xy_blur_amounts
def ciruclar_blur_mask_helper(x, y, middle_in_focus_x, middle_in_focus_y, in_focus_radius, no_blur_region):
x_distance_to_m = np.absolute(x - middle_in_focus_x)
y_distance_to_m = np.absolute(y - middle_in_focus_y)
distance_to_m = (x_distance_to_m ** 2 + y_distance_to_m ** 2) ** 0.5
blur_amount = 1.0
# Note: Not all values we iterate over are within the focus region, so we must check
if distance_to_m < no_blur_region:
# No blur
blur_amount = 0.0
# Check if we should fade to blurry so that there is not an abrupt transition
elif distance_to_m < in_focus_radius:
blur_amount = (1.0 / (in_focus_radius - no_blur_region)) * (distance_to_m - no_blur_region)
return blur_amount
def cartesian(arrays, out=None):
# This code was adapted from
# http://stackoverflow.com/questions/1208118/using-numpy-to-build-an-array-of-all-combinations-of-two-arrays
"""
Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
dtype = arrays[0].dtype
n = np.prod([x.size for x in arrays])
if out is None:
out = np.zeros([n, len(arrays)], dtype=dtype)
m = n / arrays[0].size
out[:,0] = np.repeat(arrays[0], m)
if arrays[1:]:
cartesian(arrays[1:], out=out[0:m,1:])
for j in xrange(1, arrays[0].size):
out[j*m:(j+1)*m,1:] = out[0:m,1:]
return out
# Run a Python implementation of ImageFilters
if __name__ == '__main__':
# Start the clock
start_time = time.time()
setup_time = time.time()
#==============================================================================
# Setup for parsing Command Line Args
#==============================================================================
parser = argparse.ArgumentParser(description='Image Effects (in pure Python)')
parser.add_argument('-i','--input', help='Input image file name',required=True)
parser.add_argument('-o','--output',help='Output image file name (required to save new image)', required=False)
parser.add_argument('-n','--n_passes', help='Number of box blur passes',required=False)
parser.add_argument('-b','--bright',help='Brightness Level (between -100.0 and 100.0)', required=False)
parser.add_argument('-s','--sat',help='Saturation Level (between 0.0 and 2.0)', required=False)
parser.add_argument('-c','--con',help='Contrast Level (between 0 and 50)', required=False)
parser.add_argument('-t','--temp',help='Temperature Level (between -255 and 255)', required=False)
parser.add_argument('-v','--inv',help='Invert Colors (0 -> Normal, 1 -> Inverted)', required=False)
parser.add_argument('-z','--thresh',help='Threshold Level (between 0 and 255, default is None)', required=False)
parser.add_argument('-w','--inverse',help='Invert Colors (0 -> Normal, 1 -> Inverted)', required=False)
parser.add_argument('-x','--x_center',help='X coord of center of focus region', required=False)
parser.add_argument('-y','--y_center',help='Y coord of center of focus region', required=False)
parser.add_argument('-r','--radius',help='Radius of focus region', required=False)
parser.add_argument('-m','--blur_mask',help='Blur mask file name', required=False)
parser.add_argument('-f','--focus',help='Focus (0 -> In Focus, 1 -> Consistent Blur, 2 -> Circular Tilt Shift, 3 -> Horizontal Tilt Shift', required=False)
#==============================================================================
# Parse Command Line Args
#==============================================================================
args = parser.parse_args()
# Load the image
try:
input_image = mpimg.imread(args.input,0)
except (OSError, IOError) as e:
parser.error('Valid input image file name required')
width = np.int32(input_image.shape[1])
height = np.int32(input_image.shape[0])
# Output image file name
out_filename = args.output if args.output is not None else None
output_image = np.zeros_like(input_image)
# Number of Passes - 3 passes approximates Gaussian Blur
num_passes = np.int32(args.n_passes) if args.n_passes is not None else np.int32(3)
if num_passes < 0:
parser.error('Number of passes must be greater than 0')
# Brightness - Between -100 and 100
bright = np.float32(args.bright) if args.bright is not None else np.float32(0.0)
if bright > 100 or bright < -100:
parser.error('Brightness must be between -100 and 100')
# Saturation - Between 0 and 2, 1.0 does not produce any effect
sat = np.float32(args.sat) if args.sat is not None else np.float32(1.0)
if sat > 5 or bright < 0:
parser.error('Saturation must be between 0 and 5')
# Contrast - Between -255 and 255
con = np.float32(args.con) if args.con is not None else np.float32(0.0)
if con > 255 or con < -255:
parser.error('Contrast must be between -255 and 255')
# Temperature - Between -255 and 255
temp = np.int32(args.temp) if args.temp is not None else np.int32(0)
if temp > 255 or temp < -255:
parser.error('Temperature must be between -255 and 255')
# Invert - True or False
inv = np.bool_(True) if args.inverse == '1' else np.bool_(False)
if inv is np.bool(False) and args.inverse not in ['1', None]:
parser.error('Inverse must be 0 or 1')
#Threshold - Between 0 and 255
thresh = np.float32(-1.0)
apply_thresh = False
if args.thresh is not None:
thresh = np.float32(args.thresh)
apply_thresh = True
if thresh > 255 or thresh < 0:
parser.error('Threshold must be between 0 and 255')
# Focus Type (default None)
# Consistent blur
consistent_blur = True if args.focus == '1' else False
# Circle in-focus region
focused_circle = True if args.focus == '2' else False
# Horizontal in-focus region
focused_hor = True if args.focus == '3' else False
# The y-index of the center of the in-focus region
middle_in_focus_y = np.int32(args.y_center) if args.y_center is not None else np.int32(height/2)
if middle_in_focus_y > height or middle_in_focus_y < 0:
parser.error('Y coord of center of focus region must be between 0 and {} for this image'.format(height-1))
# The x-index of the center of the in-focus region
# Note: this only matters for circular in-focus region
middle_in_focus_x = np.int32(args.x_center) if args.x_center is not None else np.int32(width/2)
if middle_in_focus_x > width or middle_in_focus_x < 0:
parser.error('X coord of center of focus region must be between 0 and {} for this image'.format(width-1))
# The number of pixels distance from middle_in_focus to keep in focus
in_focus_radius = np.int32(args.radius) if args.radius is not None else np.int32(min(width, height)/2)
if in_focus_radius < 0:
parser.error('Radius of focus region must be positive')
# Accept the file name storing the blur mask
# Note: There is one float blur amount per pixel
# If Tilt Shift is enabled
if consistent_blur or focused_circle or focused_hor or args.blur_mask != None:
# Initialize blur mask to be all 1's (completely blurry)
# Note: There is one float blur amount per pixel
if args.blur_mask is not None:
blur_mask = mpimg.imread(args.blur_mask,0)
else:
# Initialize blur mask to be all 1's (completely blurry)
blur_mask = np.ones(input_image.shape[:2], dtype=np.float32)
# Generate the blur mask
if focused_circle:
print "Creating circular blur mask"
generate_circular_blur_mask(blur_mask, middle_in_focus_x, middle_in_focus_y, in_focus_radius, width, height)
elif focused_hor:
print "Creating horizontal blur mask"
generate_horizontal_blur_mask(blur_mask, middle_in_focus_y, in_focus_radius, height)
else:
# No blurring
blur_mask = np.zeros(input_image.shape[:2], dtype=np.float32)
num_passes = 1
if blur_mask.shape != input_image.shape[:2]:
parser.error('The specified blur mask\'s shape did not match the input image\'s shape')
#==============================================================================
# End Parsing Command Line Args
#==============================================================================
setup_end_time = time.time()
print "Took {} seconds to setup".format(setup_end_time - setup_time)
print "Image Width %s" % width
print "Image Height %s" % height
#p4 = brightness(p4,bright)
input_image = saturation(input_image, sat)
input_image = contrast(input_image, con)
#p4 = temperature(p4,temp)
#p4 = invert(p4, inv)
#p4 = threshold(p4, thresh, apply_thresh)
# NOW CUTOFF THE VALUES AND RETURN AS UINT8
input_image = truncate(input_image)
blur_time = time.time()
print "Performing boxblur"
input_image = numpy_boxblur(input_image, blur_mask, num_passes)
end_time = time.time()
print "TOTAL - Took %s seconds to run %s passes" % (end_time - start_time, num_passes)
print "Blur time %s" % (end_time - blur_time)
if out_filename is not None:
# Save image
mpimg.imsave(out_filename, input_image)
else:
# Display the new image
plt.imshow(input_image)
plt.show()
More Cleanup Numpy
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from skimage import color
import time
import math
import argparse
# A basic Python implementation of ImageFilters
def numpy_boxblur(image, blur_mask, iterations=3):
''' boxblur using numpy '''
blur_mask = blur_mask.astype(np.float)
self_blur_mask = (9 - (blur_mask * 8)) / 9.0
other_blur_mask = blur_mask / 9.0
red = image[...,0].astype(np.float)
green = image[...,1].astype(np.float)
blue = image[...,2].astype(np.float)
blur_weights = np.dstack((other_blur_mask, other_blur_mask, other_blur_mask,
other_blur_mask, self_blur_mask, other_blur_mask,
other_blur_mask, other_blur_mask, other_blur_mask))
iterations = 1
for i in range(iterations):
red_padded = np.pad(red, 1, mode='edge')
green_padded = np.pad(green, 1, mode='edge')
blue_padded = np.pad(blue, 1, mode='edge')
red_stacked = np.dstack((red_padded[:-2, :-2], red_padded[:-2, 1:-1], red_padded[:-2, 2:],
red_padded[1:-1, :-2], red_padded[1:-1, 1:-1], red_padded[1:-1, 2:],
red_padded[2:, :-2], red_padded[2:, 1:-1], red_padded[2:, 2:]))
green_stacked = np.dstack((green_padded[:-2, :-2], green_padded[:-2, 1:-1], green_padded[:-2, 2:],
green_padded[1:-1, :-2], green_padded[1:-1, 1:-1], green_padded[1:-1, 2:],
green_padded[2:, :-2], green_padded[2:, 1:-1], green_padded[2:, 2:]))
blue_stacked = np.dstack((blue_padded[:-2, :-2], blue_padded[:-2, 1:-1], blue_padded[:-2, 2:],
blue_padded[1:-1, :-2], blue_padded[1:-1, 1:-1], blue_padded[1:-1, 2:],
blue_padded[2:, :-2], blue_padded[2:, 1:-1], blue_padded[2:, 2:]))
red = np.average(red_stacked, axis=2, weights=blur_weights).astype(np.uint8)
green = np.average(green_stacked, axis=2, weights=blur_weights).astype(np.uint8)
blue = np.average(blue_stacked, axis=2, weights=blur_weights).astype(np.uint8)
image = np.dstack((red, green, blue))
return image
# Adjusts the brightness on a pixel
def brightness(p,value):
red = truncate(p[0] + value)
green = truncate(p[1] + value)
blue = truncate(p[2]+ value)
return [red, green, blue]
# Adjusts the saturation of an image
# 0.0 creates a black-and-white image.
# 0.5 reduces the color saturation by half.
# 1.0 causes no change.
# 2.0 doubles the color saturation.
def saturation(image, value):
Pr = np.float(0.299)
Pg = np.float(0.587)
Pb = np.float(0.114)
red = image[...,0].astype(np.float)
green = image[...,1].astype(np.float)
blue = image[...,2].astype(np.float)
P = np.sqrt(red*red*Pr + green*green*Pg + blue*blue*Pb)
red_v = (P + ((red - P) * value))
green_v = (P + ((green - P) * value))
blue_v = (P + ((blue - P) * value))
return np.dstack((red_v, green_v, blue_v))
# Adjusts the contrast on a pixel
def contrast(image, value):
factor = (259 * (value + 255)) / float(255 * (259 - value))
red = image[...,0].astype(np.float)
green = image[...,1].astype(np.float)
blue = image[...,2].astype(np.float)
red_v = factor * (red - 128) + 128
green_v = factor * (green - 128) + 128
blue_v = factor * (blue - 128) + 128
return np.dstack((red_v, green_v, blue_v))
#Increases the warmth or coolness of a pixel
def temperature(p,value):
red = p[0]
blue = p[2]
if value > 0:
red = truncate(p[0] + value)
elif value < 0:
blue = truncate(p[2] + value)
return [red, p[1], blue]
#Inverts the colors, producing the same image that would be found in a film negative
def invert(p, value):
if value:
red = truncate(255 - p[0])
green = truncate(255 - p[1])
blue = truncate(255 - p[2])
else:
red = int(p[0])
green = int(p[1])
blue = int(p[2])
return [red, green, blue]
#If the pixel is above value it becomes black, otherwise white
def threshold(p,value,apply):
if apply:
pixel_av = (p[0] + p[1] + p[2])/3.0
if pixel_av>value:
red = 255
green = 255
blue = 255
else:
red = 0
green = 0
blue = 0
else:
red = p[0]
green = p[1]
blue = p[2]
return [red, green, blue]
# Ensures a pixel's value for a color is between 0 and 255
def truncate(image):
zeros = np.zeros_like(image)
ones = np.ones_like(image) * 255
image = np.maximum(image, zeros)
image = np.minimum(image, ones)
return image
# generates horizontal blur mask using focus middle, focus radius, and image height,
# and stores the blur mask in the blur_mask parameter (np.array)
def generate_horizontal_blur_mask(blur_mask, middle_in_focus, in_focus_radius, height):
# Calculate blur amount for each pixel based on middle_in_focus and in_focus_radius
# Loop over y first so we can calculate the blur amount
# fade out 20% to blurry so that there is not an abrupt transition
no_blur_region = .8 * in_focus_radius
# Set blur amount for focus middle
#blur_row = np.array([blur_mask])
blur_row = np.zeros_like(blur_mask[0], dtype=np.float)
blur_mask[middle_in_focus] = blur_row
# Simulataneously set blur amount for both rows of same distance from middle
for y in xrange(middle_in_focus - in_focus_radius, middle_in_focus):
# The blur amount depends on the y-value of the pixel
distance_to_m = abs(y - middle_in_focus)
# Note: Because we are iterating over all y's in the focus region, all the y's are within
# the focus radius.
# Thus, we need only check if we should fade to blurry so that there is not an abrupt transition
if distance_to_m > no_blur_region:
# Calculate blur ammount
blur_amount = (1.0 / (in_focus_radius - no_blur_region)) * (distance_to_m - no_blur_region)
else:
# No blur
blur_amount = 0.0
blur_row.fill(blur_amount)
if y > 0:
blur_mask[y] = blur_row
if middle_in_focus + distance_to_m < height:
blur_mask[middle_in_focus + distance_to_m] = blur_row
# Generates a circular horizontal blur mask using the x and y coordinates of the focus middle,
# focus radius, and image height, and stores the blur mask in the blur_mask parameter (np.array)
def generate_circular_blur_mask(blur_mask, middle_in_focus_x, middle_in_focus_y, in_focus_radius, width, height):
# Calculate blur amount for each pixel based on middle_in_focus and in_focus_radius
# Fade out 20% to blurry so that there is not an abrupt transition
no_blur_region = .8 * in_focus_radius
# Set blur amount (no blur) for center of in-focus region
#blur_mask[middle_in_focus_y, middle_in_focus_x] = 0.0
# Calculate all x,y coords
x_coords = np.arange(middle_in_focus_x - in_focus_radius, middle_in_focus_x + in_focus_radius + 1)
y_coords = np.arange(middle_in_focus_y - in_focus_radius, middle_in_focus_y + +in_focus_radius + 1)
xy_list = cartesian([x_coords, y_coords])
size_arr = np.full(xy_list.shape,(width,height))
# Filter out x,y coords that are out of bounds
greater_than_zero = np.greater(xy_list,np.zeros(xy_list.shape))
less_than_upper_bound = np.less(xy_list, size_arr)
in_bounds_coords = np.logical_and(greater_than_zero,less_than_upper_bound)
xy_list = xy_list[in_bounds_coords]
#separate x's and y's into separate arrays
xy_list = xy_list.T
# Loop over x and y first so we can calculate the blur amount
v_ciruclar_blur_mask_helper = np.vectorize(ciruclar_blur_mask_helper)
xy_blur_amounts = v_ciruclar_blur_mask_helper(xy_list[0],xy_list[1], middle_in_focus_x, middle_in_focus_y, in_focus_radius, no_blur_region)
blur_mask[xy_list[1],xy_list[0]] = xy_blur_amounts
def ciruclar_blur_mask_helper(x, y, middle_in_focus_x, middle_in_focus_y, in_focus_radius, no_blur_region):
x_distance_to_m = np.absolute(x - middle_in_focus_x)
y_distance_to_m = np.absolute(y - middle_in_focus_y)
distance_to_m = (x_distance_to_m ** 2 + y_distance_to_m ** 2) ** 0.5
blur_amount = 1.0
# Note: Not all values we iterate over are within the focus region, so we must check
if distance_to_m < no_blur_region:
# No blur
blur_amount = 0.0
# Check if we should fade to blurry so that there is not an abrupt transition
elif distance_to_m < in_focus_radius:
blur_amount = (1.0 / (in_focus_radius - no_blur_region)) * (distance_to_m - no_blur_region)
return blur_amount
def cartesian(arrays, out=None):
# This code was adapted from
# http://stackoverflow.com/questions/1208118/using-numpy-to-build-an-array-of-all-combinations-of-two-arrays
"""
Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
dtype = arrays[0].dtype
n = np.prod([x.size for x in arrays])
if out is None:
out = np.zeros([n, len(arrays)], dtype=dtype)
m = n / arrays[0].size
out[:,0] = np.repeat(arrays[0], m)
if arrays[1:]:
cartesian(arrays[1:], out=out[0:m,1:])
for j in xrange(1, arrays[0].size):
out[j*m:(j+1)*m,1:] = out[0:m,1:]
return out
# Run a Python implementation of ImageFilters
if __name__ == '__main__':
# Start the clock
start_time = time.time()
setup_time = time.time()
#==============================================================================
# Setup for parsing Command Line Args
#==============================================================================
parser = argparse.ArgumentParser(description='Image Effects (in pure Python)')
parser.add_argument('-i','--input', help='Input image file name',required=True)
parser.add_argument('-o','--output',help='Output image file name (required to save new image)', required=False)
parser.add_argument('-n','--n_passes', help='Number of box blur passes',required=False)
parser.add_argument('-b','--bright',help='Brightness Level (between -100.0 and 100.0)', required=False)
parser.add_argument('-s','--sat',help='Saturation Level (between 0.0 and 2.0)', required=False)
parser.add_argument('-c','--con',help='Contrast Level (between 0 and 50)', required=False)
parser.add_argument('-t','--temp',help='Temperature Level (between -255 and 255)', required=False)
parser.add_argument('-v','--inv',help='Invert Colors (0 -> Normal, 1 -> Inverted)', required=False)
parser.add_argument('-z','--thresh',help='Threshold Level (between 0 and 255, default is None)', required=False)
parser.add_argument('-w','--inverse',help='Invert Colors (0 -> Normal, 1 -> Inverted)', required=False)
parser.add_argument('-x','--x_center',help='X coord of center of focus region', required=False)
parser.add_argument('-y','--y_center',help='Y coord of center of focus region', required=False)
parser.add_argument('-r','--radius',help='Radius of focus region', required=False)
parser.add_argument('-m','--blur_mask',help='Blur mask file name', required=False)
parser.add_argument('-f','--focus',help='Focus (0 -> In Focus, 1 -> Consistent Blur, 2 -> Circular Tilt Shift, 3 -> Horizontal Tilt Shift', required=False)
#==============================================================================
# Parse Command Line Args
#==============================================================================
args = parser.parse_args()
# Load the image
try:
input_image = mpimg.imread(args.input,0)
except (OSError, IOError) as e:
parser.error('Valid input image file name required')
width = np.int32(input_image.shape[1])
height = np.int32(input_image.shape[0])
# Output image file name
out_filename = args.output if args.output is not None else None
output_image = np.zeros_like(input_image)
# Number of Passes - 3 passes approximates Gaussian Blur
num_passes = np.int32(args.n_passes) if args.n_passes is not None else np.int32(3)
if num_passes < 0:
parser.error('Number of passes must be greater than 0')
# Brightness - Between -100 and 100
bright = np.float32(args.bright) if args.bright is not None else np.float32(0.0)
if bright > 100 or bright < -100:
parser.error('Brightness must be between -100 and 100')
# Saturation - Between 0 and 2, 1.0 does not produce any effect
sat = np.float32(args.sat) if args.sat is not None else np.float32(1.0)
if sat > 5 or bright < 0:
parser.error('Saturation must be between 0 and 5')
# Contrast - Between -255 and 255
con = np.float32(args.con) if args.con is not None else np.float32(0.0)
if con > 255 or con < -255:
parser.error('Contrast must be between -255 and 255')
# Temperature - Between -255 and 255
temp = np.int32(args.temp) if args.temp is not None else np.int32(0)
if temp > 255 or temp < -255:
parser.error('Temperature must be between -255 and 255')
# Invert - True or False
inv = np.bool_(True) if args.inverse == '1' else np.bool_(False)
if inv is np.bool(False) and args.inverse not in ['1', None]:
parser.error('Inverse must be 0 or 1')
#Threshold - Between 0 and 255
thresh = np.float32(-1.0)
apply_thresh = False
if args.thresh is not None:
thresh = np.float32(args.thresh)
apply_thresh = True
if thresh > 255 or thresh < 0:
parser.error('Threshold must be between 0 and 255')
# Focus Type (default None)
# Consistent blur
consistent_blur = True if args.focus == '1' else False
# Circle in-focus region
focused_circle = True if args.focus == '2' else False
# Horizontal in-focus region
focused_hor = True if args.focus == '3' else False
# The y-index of the center of the in-focus region
middle_in_focus_y = np.int32(args.y_center) if args.y_center is not None else np.int32(height/2)
if middle_in_focus_y > height or middle_in_focus_y < 0:
parser.error('Y coord of center of focus region must be between 0 and {} for this image'.format(height-1))
# The x-index of the center of the in-focus region
# Note: this only matters for circular in-focus region
middle_in_focus_x = np.int32(args.x_center) if args.x_center is not None else np.int32(width/2)
if middle_in_focus_x > width or middle_in_focus_x < 0:
parser.error('X coord of center of focus region must be between 0 and {} for this image'.format(width-1))
# The number of pixels distance from middle_in_focus to keep in focus
in_focus_radius = np.int32(args.radius) if args.radius is not None else np.int32(min(width, height)/2)
if in_focus_radius < 0:
parser.error('Radius of focus region must be positive')
# Accept the file name storing the blur mask
# Note: There is one float blur amount per pixel
# If Tilt Shift is enabled
if consistent_blur or focused_circle or focused_hor or args.blur_mask != None:
# Initialize blur mask to be all 1's (completely blurry)
# Note: There is one float blur amount per pixel
if args.blur_mask is not None:
blur_mask = mpimg.imread(args.blur_mask,0)
else:
# Initialize blur mask to be all 1's (completely blurry)
blur_mask = np.ones(input_image.shape[:2], dtype=np.float32)
# Generate the blur mask
if focused_circle:
print "Creating circular blur mask"
generate_circular_blur_mask(blur_mask, middle_in_focus_x, middle_in_focus_y, in_focus_radius, width, height)
elif focused_hor:
print "Creating horizontal blur mask"
generate_horizontal_blur_mask(blur_mask, middle_in_focus_y, in_focus_radius, height)
else:
# No blurring
blur_mask = np.zeros(input_image.shape[:2], dtype=np.float32)
num_passes = 1
if blur_mask.shape != input_image.shape[:2]:
parser.error('The specified blur mask\'s shape did not match the input image\'s shape')
#==============================================================================
# End Parsing Command Line Args
#==============================================================================
setup_end_time = time.time()
print "Took {} seconds to setup".format(setup_end_time - setup_time)
print "Image Width %s" % width
print "Image Height %s" % height
#p4 = brightness(p4,bright)
input_image = saturation(input_image, sat)
input_image = contrast(input_image, con)
#p4 = temperature(p4,temp)
#p4 = invert(p4, inv)
#p4 = threshold(p4, thresh, apply_thresh)
# NOW CUTOFF THE VALUES AND RETURN AS UINT8
input_image = truncate(input_image)
blur_time = time.time()
print "Performing boxblur"
input_image = numpy_boxblur(input_image, blur_mask, num_passes)
end_time = time.time()
print "TOTAL - Took %s seconds to run %s passes" % (end_time - start_time, num_passes)
print "Blur time %s" % (end_time - blur_time)
if out_filename is not None:
# Save image
mpimg.imsave(out_filename, input_image)
else:
# Display the new image
plt.imshow(input_image)
plt.show() |
#!/usr/bin/python
import glob
import csv
import xlwt
import os
import sys
import psycopg2
import psycopg2.extras
import pprint
import collections
import getopt
# Reading parameters
def read_params():
year = 0
datatype = 0
filename = 'output.xls'
region = 0
global debug
debug = 0
try:
myopts, args = getopt.getopt(sys.argv[1:],"y:d:h:r:f:Dp:")
except getopt.GetoptError as e:
print (str(e))
print("Usage: %s -y year -d datatype -r region -f filename -DDEBUG -o output" % sys.argv[0])
sys.exit(2)
for o, a in myopts:
if o == '-y':
year=a
elif o == '-d':
datatype=a
elif o == '-r':
region=a
elif o == '-f':
filename=a
elif o == '-p':
path=a
elif o == '-D':
debug=1
if debug:
print filename + "\n"
return (year, datatype, region, filename, path, debug)
def load_data(year, datatype, region, debug):
#Define connection to products database
conn_string = "host='10.24.63.148' dbname='russian_pilot1' user='clioweb' password='clio-dev-911'"
# products will be loaded in tulips
data = {}
# get a connection, if a connect cannot be made an exception will be raised here
conn = psycopg2.connect(conn_string)
# conn.cursor will return a cursor object, you can use this cursor to perform queries
cursor = conn.cursor()
# execute our Query
# Example SQL: cursor.execute("select * from russianrepository where year='1897' and datatype='3.01' limit 1000")
query = "select * from russianrepository WHERE 1 = 1 ";
if year > 0:
query += " AND year = '%s'" % year
if datatype > 0:
query += " AND datatype = '%s'" % datatype
if region:
query += " AND territory = '%s'" % region
if debug:
print query + " TEST <br>\n"
query += 'order by territory asc'
# execute
cursor.execute(query)
# retrieve the records from the database
records = cursor.fetchall()
row_count = 0
i = 0
for row in records:
i = i + 1
data[i] = row
# print row[0]
return (i, data)
def main():
# Initialization
row_count = 0
year = 0
datatype = 0
dataset = {}
value = 0
filename = ''
datadir = "/home/clio-infra/public_html/tmp/"
(year, datatype, region, filename, datadir, debug) = read_params()
(row_count, dataset) = load_data(year, datatype, region, debug)
wb = xlwt.Workbook(encoding='utf')
f_short_name = "Data"
ws = wb.add_sheet(str(f_short_name))
for i in range(1,row_count):
for j in range(len(dataset[i])):
value = dataset[i][j]
ws.write(i, j, value)
wb.save(datadir + "/" + filename)
print datadir + "/" + filename
if debug:
print datadir + filename
main()
Added configuration file
#!/usr/bin/python
import glob
import csv
import xlwt
import os
import sys
import psycopg2
import psycopg2.extras
import pprint
import collections
import getopt
import ConfigParser
# Reading parameters
def read_params():
year = 0
datatype = 0
filename = 'output.xls'
region = 0
global debug
debug = 0
try:
myopts, args = getopt.getopt(sys.argv[1:],"y:d:h:r:f:Dp:")
except getopt.GetoptError as e:
print (str(e))
print("Usage: %s -y year -d datatype -r region -f filename -DDEBUG -o output" % sys.argv[0])
sys.exit(2)
for o, a in myopts:
if o == '-y':
year=a
elif o == '-d':
datatype=a
elif o == '-r':
region=a
elif o == '-f':
filename=a
elif o == '-p':
path=a
elif o == '-D':
debug=1
if debug:
print filename + "\n"
return (year, datatype, region, filename, path, debug)
def load_data(year, datatype, region, debug):
cparser = ConfigParser.RawConfigParser()
cpath = "/etc/apache2/rusrep.config"
cparser.read(cpath)
conn_string = "host='%s' dbname='%s' user='%s' password='%s'" % (cparser.get('config', 'dbhost'), cparser.get('config', 'dbname'), cparser.get('config', 'dblogin'), cparser.get('config', 'dbpassword'))
# products will be loaded in tulips
data = {}
# get a connection, if a connect cannot be made an exception will be raised here
conn = psycopg2.connect(conn_string)
# conn.cursor will return a cursor object, you can use this cursor to perform queries
cursor = conn.cursor()
# execute our Query
# Example SQL: cursor.execute("select * from russianrepository where year='1897' and datatype='3.01' limit 1000")
query = "select * from russianrepository WHERE 1 = 1 ";
if year > 0:
query += " AND year = '%s'" % year
if datatype > 0:
query += " AND datatype = '%s'" % datatype
if region:
query += " AND territory = '%s'" % region
if debug:
print query + " TEST <br>\n"
query += 'order by territory asc'
# execute
cursor.execute(query)
# retrieve the records from the database
records = cursor.fetchall()
row_count = 0
i = 0
for row in records:
i = i + 1
data[i] = row
# print row[0]
return (i, data)
def main():
# Initialization
row_count = 0
year = 0
datatype = 0
dataset = {}
value = 0
filename = ''
datadir = "/home/clio-infra/public_html/tmp/"
(year, datatype, region, filename, datadir, debug) = read_params()
(row_count, dataset) = load_data(year, datatype, region, debug)
wb = xlwt.Workbook(encoding='utf')
f_short_name = "Data"
ws = wb.add_sheet(str(f_short_name))
for i in range(1,row_count):
for j in range(len(dataset[i])):
value = dataset[i][j]
ws.write(i, j, value)
wb.save(datadir + "/" + filename)
print datadir + "/" + filename
if debug:
print datadir + filename
main()
|
#!/usr/bin/env python
#
# autoprop_tests.py: testing automatic properties
#
# Subversion is a tool for revision control.
# See http://subversion.tigris.org for more information.
#
# ====================================================================
# Copyright (c) 2000-2003 CollabNet. All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://subversion.tigris.org/license-1.html.
# If newer versions of this license are posted there, you may use a
# newer version instead, at your option.
#
######################################################################
# General modules
import string, sys, re, os, os.path, shutil
# Our testing module
import svntest
# (abbreviation)
Skip = svntest.testcase.Skip
XFail = svntest.testcase.XFail
Item = svntest.wc.StateItem
# Helper functions
def check_prop(name, path, exp_out):
"""Verify that property NAME on PATH has a value of EXP_OUT"""
# Not using run_svn because binary_mode must be set
out, err = svntest.main.run_svn(None, 'pg', '--strict', name, path)
if out != exp_out:
print "Expected standard output: ", exp_out, "\n"
print "Actual standard output: ", out, "\n"
raise svntest.Failure
def check_proplist(path, exp_out):
"""Verify that property list on PATH has a value of EXP_OUT"""
# Not using run_svn because binary_mode must be set
out, err = svntest.main.run_svn(None, 'proplist', path)
if len(out) == 0 and len(exp_out) == 0:
# no properties expected and svn didn't output anything so it's ok
return
if len(out) < 1:
print "Expected result: ", exp_out, "\n"
print "Actual standard output: ", out, "\n"
raise svntest.Failure
out2 = []
if len(out) > 1:
for line in out[1:]:
out2 = out2 + [string.strip(line)]
out2.sort()
exp_out.sort()
if out2 != exp_out:
print "Expected result: ", exp_out, "\n"
print "Actual result: ", out2, "\n"
print "Actual standard output: ", out, "\n"
raise svntest.Failure
######################################################################
# Tests
#----------------------------------------------------------------------
def create_config(config_dir, enable_flag):
"create config directories and files"
# config file names
cfgfile_cfg = os.path.join(config_dir, 'config')
cfgfile_srv = os.path.join(config_dir, 'server')
# create the directory
if not os.path.isdir(config_dir):
os.makedirs(config_dir)
# create the file 'config'
fd = open(cfgfile_cfg, 'w')
fd.write('[miscellany]\n')
fd.write('enable-auto-props = ' + enable_flag + '\n')
fd.write('\n')
fd.write('[auto-props]\n')
fd.write('*.c = cfile=yes\n')
fd.write('*.jpg = jpgfile=ja\n')
fd.write('fubar* = tarfile=si\n')
fd.write('foobar.lha = lhafile=da;lzhfile=niet\n')
fd.write('spacetest = a b c = d e f ; g h i = j k l ; m n o = ; = p \n')
fd.write('* = auto=oui\n')
fd.write('\n')
fd.close()
# create the file 'server'
fd = open(cfgfile_srv, 'w')
fd.write('#\n')
fd.close()
#----------------------------------------------------------------------
def create_test_file(dir, name):
"create a test file"
fd = open(os.path.join(dir, name), 'w', 0644)
fd.write('foo\nbar\nbaz\n')
fd.close()
#----------------------------------------------------------------------
def autoprops_test(sbox, cmd, cfgtype, paramtype, subdir):
"""configurable autoprops test.
if CMD == 1 do test svn import else test svn add
if CFGTYPE == 1 add is enabled in the config, if it is 2 import is
enabled else both are disabled
if PARAMTYPE == 1 --auto-props is added to the commandline, if it is
2 --no-auto-props is added else none is added
if string SUBDIR is not empty files are created in that subdir and the
directory is added/imported"""
# some directories
wc_dir = sbox.wc_dir
tmp_dir = os.path.abspath(svntest.main.temp_dir)
config_dir = os.path.join(tmp_dir, 'autoprops_config')
repos_url = svntest.main.current_repo_url
svntest.main.set_config_dir(config_dir)
# initialize parameters
parameters = []
# add svn command
if cmd == 1:
parameters = parameters + ['import', '--username', main.wc_author,
'--password', main.wc_passwd, '-m', 'bla']
need_svn_up = 1
files_dir = tmp_dir
else:
parameters = parameters + ['add']
need_svn_up = 0
files_dir = wc_dir
parameters = parameters + ['--config-dir', config_dir]
# set config flags
if cfgtype == 1:
create_config(config_dir, 'yes')
enable_flag = 1
else:
create_config(config_dir, 'no')
enable_flag = 0
# add comandline flags
if paramtype == 1:
parameters = parameters + ['--auto-props']
enable_flag = 1
elif paramtype == 2:
parameters = parameters + ['--no-auto-props']
enable_flag = 0
# setup subdirectory if needed
if len(subdir) > 0:
files_dir = os.path.join(files_dir, subdir)
files_wc_dir = os.path.join(wc_dir, subdir)
os.makedirs(files_dir)
else:
files_wc_dir = wc_dir
# create test files
filenames = []
filenames = filenames + ['foo.h']
create_test_file(files_dir, filenames[len(filenames)-1])
filenames = filenames + ['foo.c']
create_test_file(files_dir, filenames[len(filenames)-1])
filenames = filenames + ['foo.jpg']
create_test_file(files_dir, filenames[len(filenames)-1])
filenames = filenames + ['fubar.tar']
create_test_file(files_dir, filenames[len(filenames)-1])
filenames = filenames + ['foobar.lha']
create_test_file(files_dir, filenames[len(filenames)-1])
filenames = filenames + ['spacetest']
create_test_file(files_dir, filenames[len(filenames)-1])
if len(subdir) == 0:
# add/import the files
for filename in filenames:
filename = os.path.join(files_dir, filename)
if cmd == 1:
tmp_params = parameters + [filename, os.path.join(repos_url, filename)]
else:
tmp_params = parameters + [filename]
svntest.main.run_svn(None, *tmp_params)
else:
# add/import subdirectory
if cmd == 1:
parameters = parameters + [files_dir, repos_url]
else:
parameters = parameters + [files_wc_dir]
svntest.main.run_svn(None, *parameters)
# do an svn up if needed
if need_svn_up:
svntest.main.run_svn(None, 'update')
# check the properties
if enable_flag:
filename = os.path.join(files_wc_dir, 'foo.h' )
check_proplist(filename,['auto'])
check_prop('auto', filename, ['oui'])
filename = os.path.join(files_wc_dir, 'foo.c' )
check_proplist(filename,['cfile', 'auto'])
check_prop('auto', filename, ['oui'])
check_prop('cfile', filename, ['yes'])
filename = os.path.join(files_wc_dir, 'foo.jpg' )
check_proplist(filename,['jpgfile', 'auto'])
check_prop('auto', filename, ['oui'])
check_prop('jpgfile', filename, ['ja'])
filename = os.path.join(files_wc_dir, 'fubar.tar' )
check_proplist(filename,['tarfile', 'auto'])
check_prop('auto', filename, ['oui'])
check_prop('tarfile', filename, ['si'])
filename = os.path.join(files_wc_dir, 'foobar.lha' )
check_proplist(filename,['lhafile', 'lzhfile', 'auto'])
check_prop('auto', filename, ['oui'])
check_prop('lhafile', filename, ['da'])
check_prop('lzhfile', filename, ['niet'])
filename = os.path.join(files_wc_dir, 'spacetest' )
check_proplist(filename,['a b c', 'g h i', 'm n o', 'auto'])
check_prop('auto', filename, ['oui'])
check_prop('a b c', filename, ['d e f'])
check_prop('g h i', filename, ['j k l'])
check_prop('m n o', filename, [])
else:
filename = os.path.join(files_wc_dir, 'foo.h' )
check_proplist(filename,[])
filename = os.path.join(files_wc_dir, 'foo.c' )
check_proplist(filename,[])
filename = os.path.join(files_wc_dir, 'foo.jpg' )
check_proplist(filename,[])
filename = os.path.join(files_wc_dir, 'fubar.tar' )
check_proplist(filename,[])
filename = os.path.join(files_wc_dir, 'foobar.lha' )
check_proplist(filename,[])
#----------------------------------------------------------------------
def autoprops_add_no_none(sbox):
"add: config=no, commandline=none"
# Bootstrap
sbox.build()
# cmd=add, config=no, commandline=none
autoprops_test(sbox, 'add', 0, 0, '')
#----------------------------------------------------------------------
def autoprops_add_yes_none(sbox):
"add: config=yes, commandline=none"
# Bootstrap
sbox.build()
# cmd=add, config=yes, commandline=none
autoprops_test(sbox, 'add', 1, 0, '')
#----------------------------------------------------------------------
def autoprops_add_no_yes(sbox):
"add: config=no, commandline=yes"
# Bootstrap
sbox.build()
# cmd=add, config=no, commandline=yes
autoprops_test(sbox, 'add', 0, 1, '')
#----------------------------------------------------------------------
def autoprops_add_yes_yes(sbox):
"add: config=yes, commandline=yes"
# Bootstrap
sbox.build()
# cmd=add, config=yes, commandline=yes
autoprops_test(sbox, 'add', 1, 1, '')
#----------------------------------------------------------------------
def autoprops_add_no_no(sbox):
"add: config=no, commandline=no"
# Bootstrap
sbox.build()
# cmd=add, config=no, commandline=no
autoprops_test(sbox, 'add', 0, 2, '')
#----------------------------------------------------------------------
def autoprops_add_yes_no(sbox):
"add: config=yes, commandline=no"
# Bootstrap
sbox.build()
# cmd=add, config=yes, commandline=no
autoprops_test(sbox, 'add', 1, 2, '')
#----------------------------------------------------------------------
def autoprops_imp_no_none(sbox):
"import: config=no, commandline=none"
# Bootstrap
sbox.build()
# cmd=import, config=no, commandline=none
autoprops_test(sbox, 'import', 0, 0, '')
#----------------------------------------------------------------------
def autoprops_imp_yes_none(sbox):
"import: config=yes, commandline=none"
# Bootstrap
sbox.build()
# cmd=import, config=yes, commandline=none
autoprops_test(sbox, 'import', 1, 0, '')
#----------------------------------------------------------------------
def autoprops_imp_no_yes(sbox):
"import: config=no, commandline=yes"
# Bootstrap
sbox.build()
# cmd=import, config=no, commandline=yes
autoprops_test(sbox, 'import', 0, 1, '')
#----------------------------------------------------------------------
def autoprops_imp_yes_yes(sbox):
"import: config=yes, commandline=yes"
# Bootstrap
sbox.build()
# cmd=import, config=yes, commandline=yes
autoprops_test(sbox, 'import', 1, 1, '')
#----------------------------------------------------------------------
def autoprops_imp_no_no(sbox):
"import: config=no, commandline=no"
# Bootstrap
sbox.build()
# cmd=import, config=no, commandline=no
autoprops_test(sbox, 'import', 0, 2, '')
#----------------------------------------------------------------------
def autoprops_imp_yes_no(sbox):
"import: config=yes, commandline=no"
# Bootstrap
sbox.build()
# cmd=import, config=yes, commandline=no
autoprops_test(sbox, 'import', 1, 2, '')
#----------------------------------------------------------------------
def autoprops_add_dir(sbox):
"add directory"
# Bootstrap
sbox.build()
# cmd=import, config=yes, commandline=no
autoprops_test(sbox, 'add', 1, 0, 'autodir')
#----------------------------------------------------------------------
def autoprops_imp_dir(sbox):
"import directory"
# Bootstrap
sbox.build()
# cmd=import, config=yes, commandline=no
autoprops_test(sbox, 'import', 1, 0, 'autodir')
########################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
autoprops_add_no_none,
autoprops_add_yes_none,
autoprops_add_no_yes,
autoprops_add_yes_yes,
autoprops_add_no_no,
autoprops_add_yes_no,
autoprops_imp_no_none,
autoprops_imp_yes_none,
autoprops_imp_no_yes,
autoprops_imp_yes_yes,
autoprops_imp_no_no,
autoprops_imp_yes_no,
autoprops_add_dir,
autoprops_imp_dir,
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
Patch from Matthew Hambley (modified):
Various corrections and tidying up of autoprop test script.
* subversion/tests/clients/cmdline/autoprop_tests.py
(create_config): Change 'enable_flag' parameter to a Boolean to hide
configuration file nature. [MH]
(autoprops_test): Change parameter names and types in an attempt to
reflect better their nature. Fix: only 'add' was being tested even
when 'import' was specified, and 'import' test had bugs. [MH]
Reduce repetition of code. [JAF]
git-svn-id: f8a4e5e023278da1e04e203c7fe051e3c4285d88@847466 13f79535-47bb-0310-9956-ffa450edef68
#!/usr/bin/env python
#
# autoprop_tests.py: testing automatic properties
#
# Subversion is a tool for revision control.
# See http://subversion.tigris.org for more information.
#
# ====================================================================
# Copyright (c) 2000-2003 CollabNet. All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://subversion.tigris.org/license-1.html.
# If newer versions of this license are posted there, you may use a
# newer version instead, at your option.
#
######################################################################
# General modules
import string, sys, re, os, os.path, shutil
# Our testing module
import svntest
# (abbreviation)
Skip = svntest.testcase.Skip
XFail = svntest.testcase.XFail
Item = svntest.wc.StateItem
# Helper functions
def check_prop(name, path, exp_out):
"""Verify that property NAME on PATH has a value of EXP_OUT"""
# Not using run_svn because binary_mode must be set
out, err = svntest.main.run_svn(None, 'pg', '--strict', name, path)
if out != exp_out:
print "Expected standard output: ", exp_out, "\n"
print "Actual standard output: ", out, "\n"
raise svntest.Failure
def check_proplist(path, exp_out):
"""Verify that property list on PATH has a value of EXP_OUT"""
# Not using run_svn because binary_mode must be set
out, err = svntest.main.run_svn(None, 'proplist', path)
if len(out) == 0 and len(exp_out) == 0:
# no properties expected and svn didn't output anything so it's ok
return
if len(out) < 1:
print "Expected result: ", exp_out, "\n"
print "Actual standard output: ", out, "\n"
raise svntest.Failure
out2 = []
if len(out) > 1:
for line in out[1:]:
out2 = out2 + [string.strip(line)]
out2.sort()
exp_out.sort()
if out2 != exp_out:
print "Expected result: ", exp_out, "\n"
print "Actual result: ", out2, "\n"
print "Actual standard output: ", out, "\n"
raise svntest.Failure
######################################################################
# Tests
#----------------------------------------------------------------------
def create_config(config_dir, enable_flag):
"create config directories and files"
# config file names
cfgfile_cfg = os.path.join(config_dir, 'config')
cfgfile_srv = os.path.join(config_dir, 'server')
# create the directory
if not os.path.isdir(config_dir):
os.makedirs(config_dir)
# create the file 'config'
fd = open(cfgfile_cfg, 'w')
fd.write('[miscellany]\n')
if enable_flag:
fd.write('enable-auto-props = yes\n')
else:
fd.write('enable-auto-props = no\n')
fd.write('\n')
fd.write('[auto-props]\n')
fd.write('*.c = cfile=yes\n')
fd.write('*.jpg = jpgfile=ja\n')
fd.write('fubar* = tarfile=si\n')
fd.write('foobar.lha = lhafile=da;lzhfile=niet\n')
fd.write('spacetest = a b c = d e f ; g h i = j k l ; m n o = ; = p \n')
fd.write('* = auto=oui\n')
fd.write('\n')
fd.close()
# create the file 'server'
fd = open(cfgfile_srv, 'w')
fd.write('#\n')
fd.close()
#----------------------------------------------------------------------
def create_test_file(dir, name):
"create a test file"
fd = open(os.path.join(dir, name), 'w', 0644)
fd.write('foo\nbar\nbaz\n')
fd.close()
#----------------------------------------------------------------------
def autoprops_test(sbox, cmd, cfgenable, clienable, subdir):
"""configurable autoprops test.
CMD is the subcommand to test: 'import' or 'add'
if CFGENABLE is true, enable autoprops in the config file, else disable
if CLIENABLE == 1: --auto-props is added to the command line
0: nothing is added
-1: --no-auto-props is added to command line
if string SUBDIR is not empty files are created in that subdir and the
directory is added/imported"""
# Bootstrap
sbox.build()
# some directories
wc_dir = sbox.wc_dir
tmp_dir = os.path.abspath(svntest.main.temp_dir)
config_dir = os.path.join(tmp_dir, 'autoprops_config')
repos_url = svntest.main.current_repo_url
svntest.main.set_config_dir(config_dir)
# initialize parameters
if cmd == 'import':
parameters = ['import', '--username', svntest.main.wc_author,
'--password', svntest.main.wc_passwd, '-m', 'bla']
files_dir = tmp_dir
else:
parameters = ['add']
files_dir = wc_dir
parameters = parameters + ['--config-dir', config_dir]
create_config(config_dir, cfgenable)
# add comandline flags
if clienable == 1:
parameters = parameters + ['--auto-props']
enable_flag = 1
elif clienable == -1:
parameters = parameters + ['--no-auto-props']
enable_flag = 0
else:
enable_flag = cfgenable
# setup subdirectory if needed
if len(subdir) > 0:
files_dir = os.path.join(files_dir, subdir)
files_wc_dir = os.path.join(wc_dir, subdir)
os.makedirs(files_dir)
else:
files_wc_dir = wc_dir
# create test files
filenames = ['foo.h',
'foo.c',
'foo.jpg',
'fubar.tar',
'foobar.lha',
'spacetest']
for filename in filenames:
create_test_file(files_dir, filename)
if len(subdir) == 0:
# add/import the files
for filename in filenames:
path = os.path.join(files_dir, filename)
if cmd == 'import':
tmp_params = parameters + [path, os.path.join(repos_url, filename)]
else:
tmp_params = parameters + [path]
svntest.main.run_svn(None, *tmp_params)
else:
# add/import subdirectory
if cmd == 'import':
parameters = parameters + [files_dir, repos_url]
else:
parameters = parameters + [files_wc_dir]
svntest.main.run_svn(None, *parameters)
# do an svn co if needed
if cmd == 'import':
svntest.main.run_svn(None, 'checkout', repos_url, files_wc_dir)
# check the properties
if enable_flag:
filename = os.path.join(files_wc_dir, 'foo.h' )
check_proplist(filename,['auto'])
check_prop('auto', filename, ['oui'])
filename = os.path.join(files_wc_dir, 'foo.c' )
check_proplist(filename,['cfile', 'auto'])
check_prop('auto', filename, ['oui'])
check_prop('cfile', filename, ['yes'])
filename = os.path.join(files_wc_dir, 'foo.jpg' )
check_proplist(filename,['jpgfile', 'auto'])
check_prop('auto', filename, ['oui'])
check_prop('jpgfile', filename, ['ja'])
filename = os.path.join(files_wc_dir, 'fubar.tar' )
check_proplist(filename,['tarfile', 'auto'])
check_prop('auto', filename, ['oui'])
check_prop('tarfile', filename, ['si'])
filename = os.path.join(files_wc_dir, 'foobar.lha' )
check_proplist(filename,['lhafile', 'lzhfile', 'auto'])
check_prop('auto', filename, ['oui'])
check_prop('lhafile', filename, ['da'])
check_prop('lzhfile', filename, ['niet'])
filename = os.path.join(files_wc_dir, 'spacetest' )
check_proplist(filename,['a b c', 'g h i', 'm n o', 'auto'])
check_prop('auto', filename, ['oui'])
check_prop('a b c', filename, ['d e f'])
check_prop('g h i', filename, ['j k l'])
check_prop('m n o', filename, [])
else:
for filename in filenames:
check_proplist(os.path.join(files_wc_dir, filename), [])
#----------------------------------------------------------------------
def autoprops_add_no_none(sbox):
"add: config=no, commandline=none"
autoprops_test(sbox, 'add', 0, 0, '')
#----------------------------------------------------------------------
def autoprops_add_yes_none(sbox):
"add: config=yes, commandline=none"
autoprops_test(sbox, 'add', 1, 0, '')
#----------------------------------------------------------------------
def autoprops_add_no_yes(sbox):
"add: config=no, commandline=yes"
autoprops_test(sbox, 'add', 0, 1, '')
#----------------------------------------------------------------------
def autoprops_add_yes_yes(sbox):
"add: config=yes, commandline=yes"
autoprops_test(sbox, 'add', 1, 1, '')
#----------------------------------------------------------------------
def autoprops_add_no_no(sbox):
"add: config=no, commandline=no"
autoprops_test(sbox, 'add', 0, -1, '')
#----------------------------------------------------------------------
def autoprops_add_yes_no(sbox):
"add: config=yes, commandline=no"
autoprops_test(sbox, 'add', 1, -1, '')
#----------------------------------------------------------------------
def autoprops_imp_no_none(sbox):
"import: config=no, commandline=none"
autoprops_test(sbox, 'import', 0, 0, '')
#----------------------------------------------------------------------
def autoprops_imp_yes_none(sbox):
"import: config=yes, commandline=none"
autoprops_test(sbox, 'import', 1, 0, '')
#----------------------------------------------------------------------
def autoprops_imp_no_yes(sbox):
"import: config=no, commandline=yes"
autoprops_test(sbox, 'import', 0, 1, '')
#----------------------------------------------------------------------
def autoprops_imp_yes_yes(sbox):
"import: config=yes, commandline=yes"
autoprops_test(sbox, 'import', 1, 1, '')
#----------------------------------------------------------------------
def autoprops_imp_no_no(sbox):
"import: config=no, commandline=no"
autoprops_test(sbox, 'import', 0, -1, '')
#----------------------------------------------------------------------
def autoprops_imp_yes_no(sbox):
"import: config=yes, commandline=no"
autoprops_test(sbox, 'import', 1, -1, '')
#----------------------------------------------------------------------
def autoprops_add_dir(sbox):
"add directory"
autoprops_test(sbox, 'add', 1, 0, 'autodir')
#----------------------------------------------------------------------
def autoprops_imp_dir(sbox):
"import directory"
autoprops_test(sbox, 'import', 1, 0, 'autodir')
########################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
autoprops_add_no_none,
autoprops_add_yes_none,
autoprops_add_no_yes,
autoprops_add_yes_yes,
autoprops_add_no_no,
autoprops_add_yes_no,
autoprops_imp_no_none,
autoprops_imp_yes_none,
autoprops_imp_no_yes,
autoprops_imp_yes_yes,
autoprops_imp_no_no,
autoprops_imp_yes_no,
autoprops_add_dir,
autoprops_imp_dir,
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
|
# TessuMod: Mod for integrating TeamSpeak into World of Tanks
# Copyright (C) 2016 Janne Hakonen
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from lib import pydash as _
from lib import logutils, gameapi
from lib.littletable.littletable import Table, DataObject
import os
import json
import ConfigParser
import csv
import shutil
logger = logutils.logger.getChild("migrate")
res_mods_dirpath = os.path.normpath(os.path.join(gameapi.find_res_mods_version_path(), ".."))
def migrate():
source_dirpath = os.path.join(res_mods_dirpath, "configs", "tessu_mod")
dest_dirpath = os.path.join(res_mods_dirpath, "configs", "tessumod")
if not os.path.isdir(dest_dirpath):
os.makedirs(dest_dirpath)
# migrate that stuff
migrate_user_cache_0_6_to_0_7(source_dirpath, dest_dirpath)
migrate_settings_0_6_to_0_7(source_dirpath, dest_dirpath)
# get rid of old config dir, if it has no files anymore
if os.path.isdir(source_dirpath) and not _.flatten_deep([files for root,dirs,files in os.walk(source_dirpath)]):
shutil.rmtree(source_dirpath)
def migrate_user_cache_0_6_to_0_7(source_dirpath, dest_dirpath):
"""
This function migrates tessu_mod_cache.ini to following files:
* users_cache.v1.json
* players_cache.v1.json
* pairings_cache.v1.json
"""
source_filepath = os.path.join(source_dirpath, "tessu_mod_cache.ini")
users_filepath = os.path.join(dest_dirpath, "users_cache.v1.json")
players_filepath = os.path.join(dest_dirpath, "players_cache.v1.json")
pairings_filepath = os.path.join(dest_dirpath, "pairings_cache.v1.json")
backup_filepath = os.path.join(dest_dirpath, "tessu_mod_cache.ini.old-0.6")
source_exists = os.path.isfile(source_filepath)
dest_exists = all(map(os.path.isfile, [users_filepath, players_filepath, pairings_filepath]))
if source_exists and not dest_exists:
logger.info("Migrating caches from version 0.6 to 0.7")
# Schema for new caches
users = Table()
users.create_index('unique_id', unique=True)
players = Table()
players.create_index('id', unique=True)
pairings = Table()
pairings.create_index('player_id')
pairings.create_index('user_unique_id')
# Load old 0.6.x cache file
parser = ConfigParser.ConfigParser()
with open(source_filepath, "rb") as file:
parser.readfp(file)
# Build new cache structures
users.insert_many(DataObject(unique_id=id, name=name) for name, id in parser.items("TeamSpeakUsers"))
players.insert_many(DataObject(id=id, name=name) for name, id in parser.items("GamePlayers"))
for user_name, player_names in parser.items("UserPlayerPairings"):
userid = _.head(users.where(name=user_name)).unique_id
for player_name in list(csv.reader([player_names]))[0]:
playerid = _.head(players.where(name=player_name)).id
pairings.insert(DataObject(player_id=playerid, user_unique_id=userid))
# create destination directory if it doesn't exist yet
if not os.path.isdir(dest_dirpath):
os.makedirs(dest_dirpath)
# write out the new cache files
users.json_export(users_filepath)
players.json_export(players_filepath)
pairings.json_export(pairings_filepath)
# backup and remove old cache file
backup_filepath = os.path.join(dest_dirpath, os.path.basename(source_filepath)) + ".old-0.6"
if os.path.isfile(backup_filepath):
os.remove(backup_filepath)
os.rename(source_filepath, backup_filepath)
def migrate_settings_0_6_to_0_7(source_dirpath, dest_dirpath):
"""
This function migrates following files into settings.v1.json:
* tessu_mod.ini
* ignored_plugin_version
"""
source_settings_path = os.path.join(source_dirpath, "tessu_mod.ini")
source_states_dirpath = os.path.join(source_dirpath, "states")
source_states_path = os.path.join(source_states_dirpath, "ignored_plugin_version")
dest_filepath = os.path.join(dest_dirpath, "settings.v1.json")
dest_structure = { "version": 1 }
# If destination already exists, load it so we can override values in it
# with values from the old settings file
if os.path.isfile(dest_filepath):
with open(dest_filepath, "rb") as file:
dest_structure = json.loads(file.read())
if os.path.isfile(source_settings_path) and dest_structure["version"] == 1:
logger.info("Migrating settings from version 0.6 to 0.7")
parser = ConfigParser.ConfigParser()
with open(source_settings_path, "rb") as file:
parser.readfp(file)
for section in parser.sections():
for option in parser.options(section):
if section == "General":
if option == "speak_stop_delay":
dest_structure.setdefault("General", {})["speak_stop_delay"] = parser.getint(section, option)
elif option == "get_wot_nick_from_ts_metadata":
dest_structure.setdefault("General", {})["get_wot_nick_from_ts_metadata"] = parser.getboolean(section, option)
elif option == "update_cache_in_replays":
dest_structure.setdefault("General", {})["update_cache_in_replays"] = parser.getboolean(section, option)
elif option == "ts_nick_search_enabled":
dest_structure.setdefault("General", {})["ts_nick_search_enabled"] = parser.getboolean(section, option)
elif option == "nick_extract_patterns":
dest_structure.setdefault("General", {})["nick_extract_patterns"] = parser.get(section, option).split(",")
elif section == "NameMappings":
dest_structure.setdefault("NameMappings", {})[option] = parser.get(section, option)
elif section == "TSClientQueryService":
if option == "api_key":
dest_structure.setdefault("TSClientQueryService", {})["api_key"] = parser.get(section, option)
if option == "host":
dest_structure.setdefault("TSClientQueryService", {})["host"] = parser.get(section, option)
elif option == "port":
dest_structure.setdefault("TSClientQueryService", {})["port"] = parser.getint(section, option)
elif option == "polling_interval":
dest_structure.setdefault("TSClientQueryService", {})["polling_interval"] = parser.getfloat(section, option)
elif section == "VoiceChatNotifications":
if option == "enabled":
dest_structure.setdefault("VoiceChatNotifications", {})["enabled"] = parser.getboolean(section, option)
elif option == "self_enabled":
dest_structure.setdefault("VoiceChatNotifications", {})["self_enabled"] = parser.getboolean(section, option)
elif section == "MinimapNotifications":
if option == "enabled":
dest_structure.setdefault("VoiceChatNotifications", {})["enabled"] = parser.getboolean(section, option)
elif option == "self_enabled":
dest_structure.setdefault("VoiceChatNotifications", {})["self_enabled"] = parser.getboolean(section, option)
elif option == "action":
dest_structure.setdefault("VoiceChatNotifications", {})["action"] = parser.get(section, option)
elif option == "repeat_interval":
dest_structure.setdefault("VoiceChatNotifications", {})["repeat_interval"] = parser.getfloat(section, option)
if os.path.isfile(source_states_path) and dest_structure["version"] == 1:
logger.info("Migrating plugin install opt-out from version 0.6 to 0.7")
dest_structure.setdefault("General", {})["tsplugin_opt_out"] = True
# create destination directory if it doesn't exist yet
dest_dirpath = os.path.dirname(dest_filepath)
if not os.path.isdir(dest_dirpath):
os.makedirs(dest_dirpath)
# write out the settings file
with open(dest_filepath, "wb") as out_file:
out_file.write(json.dumps(dest_structure, indent=4))
# backup and remove old settings file
if os.path.isfile(source_settings_path):
backup_filepath = os.path.join(dest_dirpath, os.path.basename(source_settings_path)) + ".old-0.6"
if os.path.isfile(backup_filepath):
os.remove(backup_filepath)
os.rename(source_settings_path, backup_filepath)
# remove old plugin opt-out dir
if os.path.isdir(source_states_dirpath):
shutil.rmtree(source_states_dirpath)
model refactoring: Migration script fixes.
- Fixed migration leaving unpaired users and players to cache.
- Fixed player ids type from string to int.
# TessuMod: Mod for integrating TeamSpeak into World of Tanks
# Copyright (C) 2016 Janne Hakonen
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from lib import pydash as _
from lib import logutils, gameapi
from lib.littletable.littletable import Table, DataObject
import os
import json
import ConfigParser
import csv
import shutil
logger = logutils.logger.getChild("migrate")
res_mods_dirpath = os.path.normpath(os.path.join(gameapi.find_res_mods_version_path(), ".."))
def migrate():
source_dirpath = os.path.join(res_mods_dirpath, "configs", "tessu_mod")
dest_dirpath = os.path.join(res_mods_dirpath, "configs", "tessumod")
if not os.path.isdir(dest_dirpath):
os.makedirs(dest_dirpath)
# migrate that stuff
migrate_user_cache_0_6_to_0_7(source_dirpath, dest_dirpath)
migrate_settings_0_6_to_0_7(source_dirpath, dest_dirpath)
# get rid of old config dir, if it has no files anymore
if os.path.isdir(source_dirpath) and not _.flatten_deep([files for root,dirs,files in os.walk(source_dirpath)]):
shutil.rmtree(source_dirpath)
def migrate_user_cache_0_6_to_0_7(source_dirpath, dest_dirpath):
"""
This function migrates tessu_mod_cache.ini to following files:
* users_cache.v1.json
* players_cache.v1.json
* pairings_cache.v1.json
"""
source_filepath = os.path.join(source_dirpath, "tessu_mod_cache.ini")
users_filepath = os.path.join(dest_dirpath, "users_cache.v1.json")
players_filepath = os.path.join(dest_dirpath, "players_cache.v1.json")
pairings_filepath = os.path.join(dest_dirpath, "pairings_cache.v1.json")
backup_filepath = os.path.join(dest_dirpath, "tessu_mod_cache.ini.old-0.6")
source_exists = os.path.isfile(source_filepath)
dest_exists = all(map(os.path.isfile, [users_filepath, players_filepath, pairings_filepath]))
if source_exists and not dest_exists:
logger.info("Migrating caches from version 0.6 to 0.7")
# Schema for new caches
users = Table()
users.create_index('unique_id', unique=True)
players = Table()
players.create_index('id', unique=True)
pairings = Table()
pairings.create_index('player_id')
pairings.create_index('user_unique_id')
# Load old 0.6.x cache file
parser = ConfigParser.ConfigParser()
with open(source_filepath, "rb") as file:
parser.readfp(file)
# Build new cache structures
users.insert_many(DataObject(unique_id=id, name=name) for name, id in parser.items("TeamSpeakUsers"))
players.insert_many(DataObject(id=int(id), name=name) for name, id in parser.items("GamePlayers"))
for user_name, player_names in parser.items("UserPlayerPairings"):
userid = _.head(users.where(name=user_name)).unique_id
for player_name in list(csv.reader([player_names]))[0]:
playerid = _.head(players.where(name=player_name)).id
pairings.insert(DataObject(player_id=int(playerid), user_unique_id=userid))
# Remove users & players which do not exist in pairings
for user in users.clone():
if not pairings.where(user_unique_id=user.unique_id):
users.remove(user)
for player in players.clone():
if not pairings.where(player_id=player.id):
players.remove(player)
# create destination directory if it doesn't exist yet
if not os.path.isdir(dest_dirpath):
os.makedirs(dest_dirpath)
# write out the new cache files
users.json_export(users_filepath)
players.json_export(players_filepath)
pairings.json_export(pairings_filepath)
# backup and remove old cache file
backup_filepath = os.path.join(dest_dirpath, os.path.basename(source_filepath)) + ".old-0.6"
if os.path.isfile(backup_filepath):
os.remove(backup_filepath)
os.rename(source_filepath, backup_filepath)
def migrate_settings_0_6_to_0_7(source_dirpath, dest_dirpath):
"""
This function migrates following files into settings.v1.json:
* tessu_mod.ini
* ignored_plugin_version
"""
source_settings_path = os.path.join(source_dirpath, "tessu_mod.ini")
source_states_dirpath = os.path.join(source_dirpath, "states")
source_states_path = os.path.join(source_states_dirpath, "ignored_plugin_version")
dest_filepath = os.path.join(dest_dirpath, "settings.v1.json")
dest_structure = { "version": 1 }
# If destination already exists, load it so we can override values in it
# with values from the old settings file
if os.path.isfile(dest_filepath):
with open(dest_filepath, "rb") as file:
dest_structure = json.loads(file.read())
if os.path.isfile(source_settings_path) and dest_structure["version"] == 1:
logger.info("Migrating settings from version 0.6 to 0.7")
parser = ConfigParser.ConfigParser()
with open(source_settings_path, "rb") as file:
parser.readfp(file)
for section in parser.sections():
for option in parser.options(section):
if section == "General":
if option == "speak_stop_delay":
dest_structure.setdefault("General", {})["speak_stop_delay"] = parser.getint(section, option)
elif option == "get_wot_nick_from_ts_metadata":
dest_structure.setdefault("General", {})["get_wot_nick_from_ts_metadata"] = parser.getboolean(section, option)
elif option == "update_cache_in_replays":
dest_structure.setdefault("General", {})["update_cache_in_replays"] = parser.getboolean(section, option)
elif option == "ts_nick_search_enabled":
dest_structure.setdefault("General", {})["ts_nick_search_enabled"] = parser.getboolean(section, option)
elif option == "nick_extract_patterns":
dest_structure.setdefault("General", {})["nick_extract_patterns"] = parser.get(section, option).split(",")
elif section == "NameMappings":
dest_structure.setdefault("NameMappings", {})[option] = parser.get(section, option)
elif section == "TSClientQueryService":
if option == "api_key":
dest_structure.setdefault("TSClientQueryService", {})["api_key"] = parser.get(section, option)
if option == "host":
dest_structure.setdefault("TSClientQueryService", {})["host"] = parser.get(section, option)
elif option == "port":
dest_structure.setdefault("TSClientQueryService", {})["port"] = parser.getint(section, option)
elif option == "polling_interval":
dest_structure.setdefault("TSClientQueryService", {})["polling_interval"] = parser.getfloat(section, option)
elif section == "VoiceChatNotifications":
if option == "enabled":
dest_structure.setdefault("VoiceChatNotifications", {})["enabled"] = parser.getboolean(section, option)
elif option == "self_enabled":
dest_structure.setdefault("VoiceChatNotifications", {})["self_enabled"] = parser.getboolean(section, option)
elif section == "MinimapNotifications":
if option == "enabled":
dest_structure.setdefault("VoiceChatNotifications", {})["enabled"] = parser.getboolean(section, option)
elif option == "self_enabled":
dest_structure.setdefault("VoiceChatNotifications", {})["self_enabled"] = parser.getboolean(section, option)
elif option == "action":
dest_structure.setdefault("VoiceChatNotifications", {})["action"] = parser.get(section, option)
elif option == "repeat_interval":
dest_structure.setdefault("VoiceChatNotifications", {})["repeat_interval"] = parser.getfloat(section, option)
if os.path.isfile(source_states_path) and dest_structure["version"] == 1:
logger.info("Migrating plugin install opt-out from version 0.6 to 0.7")
dest_structure.setdefault("General", {})["tsplugin_opt_out"] = True
# create destination directory if it doesn't exist yet
dest_dirpath = os.path.dirname(dest_filepath)
if not os.path.isdir(dest_dirpath):
os.makedirs(dest_dirpath)
# write out the settings file
with open(dest_filepath, "wb") as out_file:
out_file.write(json.dumps(dest_structure, indent=4))
# backup and remove old settings file
if os.path.isfile(source_settings_path):
backup_filepath = os.path.join(dest_dirpath, os.path.basename(source_settings_path)) + ".old-0.6"
if os.path.isfile(backup_filepath):
os.remove(backup_filepath)
os.rename(source_settings_path, backup_filepath)
# remove old plugin opt-out dir
if os.path.isdir(source_states_dirpath):
shutil.rmtree(source_states_dirpath)
|
# -*- coding: utf-8 -*-
"""
Display the current active keyboard layout.
Configuration parameters:
- cache_timeout: check for keyboard layout change every seconds
- color: a single color value for all layouts. eg: "#FCE94F"
- colors: a comma separated string of color values for each layout,
eg: "us=#FCE94F, fr=#729FCF".
- format : see placeholders below
Format of status string placeholders:
{layout} - currently active keyboard layout
Requires:
- xkblayout-state
or
- setxkbmap and xset (works for the first two predefined layouts.)
@author shadowprince, tuxitop
@license Eclipse Public License
"""
from subprocess import check_output
from time import time
import re
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 10
color = None
colors = 'us=#729FCF, fr=#268BD2, ua=#FCE94F, ru=#F75252'
format = '{layout}'
def __init__(self):
"""
find the best implementation to get the keyboard's layout
"""
try:
self._xkblayout()
self._command = self._xkblayout
except:
self._command = self._xset
def keyboard_layout(self, i3s_output_list, i3s_config):
response = {
'cached_until': time() + self.cache_timeout,
'full_text': ''
}
if not self.color:
self.colors_dict = dict((k.strip(), v.strip()) for k, v in (
layout.split('=') for layout in self.colors.split(',')))
lang = self._command().strip() or '??'
lang_color = self.color if self.color else self.colors_dict.get(lang)
if lang_color:
response['color'] = lang_color
response['full_text'] = self.format.format(layout=lang)
return response
def _get_layouts(self):
"""
Returns a list of predefined keyboard layouts
"""
layouts_re = re.compile(r".*layout:\s*((\w+,?)+).*", flags=re.DOTALL)
out = check_output(["setxkbmap", "-query"]).decode("utf-8")
layouts = re.match(layouts_re, out).group(1).split(",")
return layouts
def _xkblayout(self):
"""
check using xkblayout-state
"""
return check_output(["xkblayout-state", "print", "%s"]).decode('utf-8')
def _xset(self):
"""
Check using setxkbmap >= 1.3.0 and xset
This method works only for the first two predefined layouts.
"""
ledmask_re = re.compile(r".*LED\smask:\s*(\d+).*", flags=re.DOTALL)
layouts = self._get_layouts()
if len(layouts) == 1:
return layouts[0]
xset_output = check_output(["xset", "-q"]).decode("utf-8")
led_mask = re.match(ledmask_re, xset_output).group(1)
if len(led_mask) == 8:
lang = int(led_mask[4], 16)
if layouts[lang] is not None:
return layouts[lang]
return "Err"
if __name__ == "__main__":
"""
Test this module by calling it directly.
"""
from time import sleep
x = Py3status()
config = {
'color_good': '#00FF00',
'color_degraded': '#FFFF00',
'color_bad': '#FF0000',
}
while True:
print(x.keyboard_layout([], config))
sleep(1)
avoiding KeyIndex error
# -*- coding: utf-8 -*-
"""
Display the current active keyboard layout.
Configuration parameters:
- cache_timeout: check for keyboard layout change every seconds
- color: a single color value for all layouts. eg: "#FCE94F"
- colors: a comma separated string of color values for each layout,
eg: "us=#FCE94F, fr=#729FCF".
- format : see placeholders below
Format of status string placeholders:
{layout} - currently active keyboard layout
Requires:
- xkblayout-state
or
- setxkbmap and xset (works for the first two predefined layouts.)
@author shadowprince, tuxitop
@license Eclipse Public License
"""
from subprocess import check_output
from time import time
import re
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 10
color = None
colors = 'us=#729FCF, fr=#268BD2, ua=#FCE94F, ru=#F75252'
format = '{layout}'
def __init__(self):
"""
find the best implementation to get the keyboard's layout
"""
try:
self._xkblayout()
self._command = self._xkblayout
except:
self._command = self._xset
def keyboard_layout(self, i3s_output_list, i3s_config):
response = {
'cached_until': time() + self.cache_timeout,
'full_text': ''
}
if not self.color:
self.colors_dict = dict((k.strip(), v.strip()) for k, v in (
layout.split('=') for layout in self.colors.split(',')))
lang = self._command().strip() or '??'
lang_color = self.color if self.color else self.colors_dict.get(lang)
if lang_color:
response['color'] = lang_color
response['full_text'] = self.format.format(layout=lang)
return response
def _get_layouts(self):
"""
Returns a list of predefined keyboard layouts
"""
layouts_re = re.compile(r".*layout:\s*((\w+,?)+).*", flags=re.DOTALL)
out = check_output(["setxkbmap", "-query"]).decode("utf-8")
layouts = re.match(layouts_re, out).group(1).split(",")
return layouts
def _xkblayout(self):
"""
check using xkblayout-state
"""
return check_output(["xkblayout-state", "print", "%s"]).decode('utf-8')
def _xset(self):
"""
Check using setxkbmap >= 1.3.0 and xset
This method works only for the first two predefined layouts.
"""
ledmask_re = re.compile(r".*LED\smask:\s*(\d+).*", flags=re.DOTALL)
layouts = self._get_layouts()
if len(layouts) == 1:
return layouts[0]
xset_output = check_output(["xset", "-q"]).decode("utf-8")
led_mask = re.match(ledmask_re, xset_output).group(1)
if len(led_mask) == 8:
lang = int(led_mask[4], 16)
if lang < len(layouts):
return layouts[lang]
return "Err"
if __name__ == "__main__":
"""
Test this module by calling it directly.
"""
from time import sleep
x = Py3status()
config = {
'color_good': '#00FF00',
'color_degraded': '#FFFF00',
'color_bad': '#FF0000',
}
while True:
print(x.keyboard_layout([], config))
sleep(1)
|
import re
import os
from django.conf import settings
from django.utils import six
from django.utils.importlib import import_module
from django.core.urlresolvers import RegexURLResolver, RegexURLPattern
from django.contrib.admindocs.views import simplify_regex
from rest_framework.views import APIView
from .apidocview import APIDocView
class UrlParser(object):
def get_apis(self, patterns=None, urlconf=None, filter_path=None, exclude_namespaces=[]):
"""
Returns all the DRF APIViews found in the project URLs
patterns -- supply list of patterns (optional)
exclude_namespaces -- list of namespaces to ignore (optional)
"""
if patterns is None and urlconf is not None:
if isinstance(urlconf, six.string_types):
urls = import_module(urlconf)
else:
urls = urlconf
patterns = urls.urlpatterns
elif patterns is None and urlconf is None:
urls = import_module(settings.ROOT_URLCONF)
patterns = urls.urlpatterns
apis = self.__flatten_patterns_tree__(
patterns,
filter_path=filter_path,
exclude_namespaces=exclude_namespaces,
)
if filter_path is not None:
return self.get_filtered_apis(apis, filter_path)
return apis
def get_filtered_apis(self, apis, filter_path):
filtered_list = []
for api in apis:
if filter_path in api['path'].strip('/'):
filtered_list.append(api)
return filtered_list
def get_top_level_apis(self, apis):
"""
Returns the 'top level' APIs (ie. swagger 'resources')
apis -- list of APIs as returned by self.get_apis
"""
root_paths = set()
api_paths = [endpoint['path'].strip("/") for endpoint in apis]
for path in api_paths:
# If a URLs /resource/ and /resource/{pk} exist, use the base
# as the resource. If there is no base resource URL, then include
path_base = path.split('/{')[0]
if '{' in path and path_base in api_paths:
continue
root_paths.add(path_base)
top_level_apis = self.__filter_top_level_apis__(root_paths)
return sorted(top_level_apis, key=self.__get_last_element__)
def __filter_top_level_apis__(self, root_paths):
"""
Returns top level APIs
"""
filtered_paths = set()
base_path = self.__get_base_path__(root_paths)
for path in root_paths:
resource = path.replace(base_path, '').split('/')[0]
filtered_paths.add(base_path + resource)
return list(filtered_paths)
def __get_base_path__(self, root_paths):
base_path = os.path.commonprefix(root_paths)
slash_index = base_path.rfind('/') + 1
base_path = base_path[:slash_index]
return base_path
def __get_last_element__(self, paths):
split_paths = paths.split('/')
return split_paths[len(split_paths) - 1]
def __assemble_endpoint_data__(self, pattern, prefix='', filter_path=None):
"""
Creates a dictionary for matched API urls
pattern -- the pattern to parse
prefix -- the API path prefix (used by recursion)
"""
callback = self.__get_pattern_api_callback__(pattern)
if callback is None or self.__exclude_router_api_root__(callback):
return
path = simplify_regex(prefix + pattern.regex.pattern)
if filter_path is not None:
if re.match('^/?%s(/.*)?$' % filter_path, path) is None:
return None
path = path.replace('<', '{').replace('>', '}')
if self.__exclude_format_endpoints__(path):
return
return {
'path': path,
'pattern': pattern,
'callback': callback,
}
def __flatten_patterns_tree__(self, patterns, prefix='', filter_path=None, exclude_namespaces=[]):
"""
Uses recursion to flatten url tree.
patterns -- urlpatterns list
prefix -- (optional) Prefix for URL pattern
"""
pattern_list = []
for pattern in patterns:
if isinstance(pattern, RegexURLPattern):
endpoint_data = self.__assemble_endpoint_data__(pattern, prefix, filter_path=filter_path)
if endpoint_data is None:
continue
pattern_list.append(endpoint_data)
elif isinstance(pattern, RegexURLResolver):
if pattern.namespace in exclude_namespaces:
continue
pref = prefix + pattern.regex.pattern
pattern_list.extend(self.__flatten_patterns_tree__(
pattern.url_patterns,
pref,
filter_path=filter_path,
exclude_namespaces=exclude_namespaces,
))
return pattern_list
def __get_pattern_api_callback__(self, pattern):
"""
Verifies that pattern callback is a subclass of APIView, and returns the class
Handles older django & django rest 'cls_instance'
"""
if not hasattr(pattern, 'callback'):
return
if (hasattr(pattern.callback, 'cls') and
issubclass(pattern.callback.cls, APIView) and
not issubclass(pattern.callback.cls, APIDocView)):
return pattern.callback.cls
elif (hasattr(pattern.callback, 'cls_instance') and
isinstance(pattern.callback.cls_instance, APIView) and
not issubclass(pattern.callback.cls_instance, APIDocView)):
return pattern.callback.cls_instance
def __exclude_router_api_root__(self, callback):
"""
Returns True if the URL's callback is rest_framework.routers.APIRoot
"""
if callback.__module__ == 'rest_framework.routers':
return True
return False
def __exclude_format_endpoints__(self, path):
"""
Excludes URL patterns that contain .{format}
"""
if '.{format}' in path:
return True
return False
escape
import re
import os
from django.conf import settings
from django.utils import six
from django.utils.importlib import import_module
from django.core.urlresolvers import RegexURLResolver, RegexURLPattern
from django.contrib.admindocs.views import simplify_regex
from rest_framework.views import APIView
from .apidocview import APIDocView
class UrlParser(object):
def get_apis(self, patterns=None, urlconf=None, filter_path=None, exclude_namespaces=[]):
"""
Returns all the DRF APIViews found in the project URLs
patterns -- supply list of patterns (optional)
exclude_namespaces -- list of namespaces to ignore (optional)
"""
if patterns is None and urlconf is not None:
if isinstance(urlconf, six.string_types):
urls = import_module(urlconf)
else:
urls = urlconf
patterns = urls.urlpatterns
elif patterns is None and urlconf is None:
urls = import_module(settings.ROOT_URLCONF)
patterns = urls.urlpatterns
apis = self.__flatten_patterns_tree__(
patterns,
filter_path=filter_path,
exclude_namespaces=exclude_namespaces,
)
if filter_path is not None:
return self.get_filtered_apis(apis, filter_path)
return apis
def get_filtered_apis(self, apis, filter_path):
filtered_list = []
for api in apis:
if filter_path in api['path'].strip('/'):
filtered_list.append(api)
return filtered_list
def get_top_level_apis(self, apis):
"""
Returns the 'top level' APIs (ie. swagger 'resources')
apis -- list of APIs as returned by self.get_apis
"""
root_paths = set()
api_paths = [endpoint['path'].strip("/") for endpoint in apis]
for path in api_paths:
# If a URLs /resource/ and /resource/{pk} exist, use the base
# as the resource. If there is no base resource URL, then include
path_base = path.split('/{')[0]
if '{' in path and path_base in api_paths:
continue
root_paths.add(path_base)
top_level_apis = self.__filter_top_level_apis__(root_paths)
return sorted(top_level_apis, key=self.__get_last_element__)
def __filter_top_level_apis__(self, root_paths):
"""
Returns top level APIs
"""
filtered_paths = set()
base_path = self.__get_base_path__(root_paths)
for path in root_paths:
resource = path.replace(base_path, '').split('/')[0]
filtered_paths.add(base_path + resource)
return list(filtered_paths)
def __get_base_path__(self, root_paths):
base_path = os.path.commonprefix(root_paths)
slash_index = base_path.rfind('/') + 1
base_path = base_path[:slash_index]
return base_path
def __get_last_element__(self, paths):
split_paths = paths.split('/')
return split_paths[len(split_paths) - 1]
def __assemble_endpoint_data__(self, pattern, prefix='', filter_path=None):
"""
Creates a dictionary for matched API urls
pattern -- the pattern to parse
prefix -- the API path prefix (used by recursion)
"""
callback = self.__get_pattern_api_callback__(pattern)
if callback is None or self.__exclude_router_api_root__(callback):
return
path = simplify_regex(prefix + pattern.regex.pattern)
if filter_path is not None:
if re.match('^/?%s(/.*)?$' % re.escape(filter_path), path) is None:
return None
path = path.replace('<', '{').replace('>', '}')
if self.__exclude_format_endpoints__(path):
return
return {
'path': path,
'pattern': pattern,
'callback': callback,
}
def __flatten_patterns_tree__(self, patterns, prefix='', filter_path=None, exclude_namespaces=[]):
"""
Uses recursion to flatten url tree.
patterns -- urlpatterns list
prefix -- (optional) Prefix for URL pattern
"""
pattern_list = []
for pattern in patterns:
if isinstance(pattern, RegexURLPattern):
endpoint_data = self.__assemble_endpoint_data__(pattern, prefix, filter_path=filter_path)
if endpoint_data is None:
continue
pattern_list.append(endpoint_data)
elif isinstance(pattern, RegexURLResolver):
if pattern.namespace in exclude_namespaces:
continue
pref = prefix + pattern.regex.pattern
pattern_list.extend(self.__flatten_patterns_tree__(
pattern.url_patterns,
pref,
filter_path=filter_path,
exclude_namespaces=exclude_namespaces,
))
return pattern_list
def __get_pattern_api_callback__(self, pattern):
"""
Verifies that pattern callback is a subclass of APIView, and returns the class
Handles older django & django rest 'cls_instance'
"""
if not hasattr(pattern, 'callback'):
return
if (hasattr(pattern.callback, 'cls') and
issubclass(pattern.callback.cls, APIView) and
not issubclass(pattern.callback.cls, APIDocView)):
return pattern.callback.cls
elif (hasattr(pattern.callback, 'cls_instance') and
isinstance(pattern.callback.cls_instance, APIView) and
not issubclass(pattern.callback.cls_instance, APIDocView)):
return pattern.callback.cls_instance
def __exclude_router_api_root__(self, callback):
"""
Returns True if the URL's callback is rest_framework.routers.APIRoot
"""
if callback.__module__ == 'rest_framework.routers':
return True
return False
def __exclude_format_endpoints__(self, path):
"""
Excludes URL patterns that contain .{format}
"""
if '.{format}' in path:
return True
return False
|
修改类型转换
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of CKAN Private Dataset Extension.
# CKAN Private Dataset Extension is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# CKAN Private Dataset Extension is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with CKAN Private Dataset Extension. If not, see <http://www.gnu.org/licenses/>.
import ckan.lib.search as search
import ckan.plugins as p
import ckan.plugins.toolkit as tk
import auth
import actions
import constants
import converters_validators as conv_val
import db
import helpers as helpers
class PrivateDatasets(p.SingletonPlugin, tk.DefaultDatasetForm):
p.implements(p.IDatasetForm)
p.implements(p.IAuthFunctions)
p.implements(p.IConfigurer)
p.implements(p.IRoutes, inherit=True)
p.implements(p.IActions)
p.implements(p.IPackageController, inherit=True)
p.implements(p.ITemplateHelpers)
######################################################################
############################ DATASET FORM ############################
######################################################################
def __init__(self, name=None):
self.indexer = search.PackageSearchIndex()
def _modify_package_schema(self):
return {
# remove datasets_with_no_organization_cannot_be_private validator
'private': [tk.get_validator('ignore_missing'),
tk.get_validator('boolean_validator')],
constants.ALLOWED_USERS_STR: [tk.get_validator('ignore_missing'),
conv_val.private_datasets_metadata_checker],
constants.ALLOWED_USERS: [conv_val.allowed_users_convert,
tk.get_validator('ignore_missing'),
conv_val.private_datasets_metadata_checker],
constants.ACQUIRE_URL: [tk.get_validator('ignore_missing'),
conv_val.private_datasets_metadata_checker,
conv_val.url_checker,
tk.get_converter('convert_to_extras')],
constants.SEARCHABLE: [tk.get_validator('ignore_missing'),
conv_val.private_datasets_metadata_checker,
tk.get_converter('convert_to_extras'),
tk.get_validator('boolean_validator')]
}
def create_package_schema(self):
# grab the default schema in our plugin
schema = super(PrivateDatasets, self).create_package_schema()
schema.update(self._modify_package_schema())
return schema
def update_package_schema(self):
# grab the default schema in our plugin
schema = super(PrivateDatasets, self).update_package_schema()
schema.update(self._modify_package_schema())
return schema
def show_package_schema(self):
schema = super(PrivateDatasets, self).show_package_schema()
schema.update({
constants.ALLOWED_USERS: [conv_val.get_allowed_users,
tk.get_validator('ignore_missing')],
constants.ACQUIRE_URL: [tk.get_converter('convert_from_extras'),
tk.get_validator('ignore_missing')],
constants.SEARCHABLE: [tk.get_converter('convert_from_extras'),
tk.get_validator('ignore_missing')]
})
return schema
def is_fallback(self):
# Return True to register this plugin as the default handler for
# package types not handled by any other IDatasetForm plugin.
return True
def package_types(self):
# This plugin doesn't handle any special package types, it just
# registers itself as the default (above).
return []
######################################################################
########################### AUTH FUNCTIONS ###########################
######################################################################
def get_auth_functions(self):
return {'package_show': auth.package_show,
'package_update': auth.package_update,
'resource_show': auth.resource_show,
constants.PACKAGE_ACQUIRED: auth.package_acquired}
######################################################################
############################ ICONFIGURER #############################
######################################################################
def update_config(self, config):
# Add this plugin's templates dir to CKAN's extra_template_paths, so
# that CKAN will use this plugin's custom templates.
tk.add_template_directory(config, 'templates')
# Register this plugin's fanstatic directory with CKAN.
tk.add_resource('fanstatic', 'privatedatasets')
######################################################################
############################## IROUTES ###############################
######################################################################
def before_map(self, m):
# DataSet acquired notification
m.connect('user_acquired_datasets', '/dashboard/acquired', ckan_icon='shopping-cart',
controller='ckanext.privatedatasets.controllers.ui_controller:AcquiredDatasetsControllerUI',
action='user_acquired_datasets', conditions=dict(method=['GET']))
return m
######################################################################
############################## IACTIONS ##############################
######################################################################
def get_actions(self):
return {constants.PACKAGE_ACQUIRED: actions.package_acquired}
######################################################################
######################### IPACKAGECONTROLLER #########################
######################################################################
def before_index(self, pkg_dict):
if 'extras_' + constants.SEARCHABLE in pkg_dict:
if pkg_dict['extras_searchable'] == 'False':
pkg_dict['capacity'] = 'private'
else:
pkg_dict['capacity'] = 'public'
return pkg_dict
def after_create(self, context, pkg_dict):
session = context['session']
update_cache = False
db.init_db(context['model'])
# Get the users and the package ID
if constants.ALLOWED_USERS in pkg_dict:
allowed_users = pkg_dict[constants.ALLOWED_USERS]
package_id = pkg_dict['id']
# Get current users
users = db.AllowedUser.get(package_id=package_id)
# Delete users and save the list of current users
current_users = []
for user in users:
current_users.append(user.user_name)
if user.user_name not in allowed_users:
session.delete(user)
update_cache = True
# Add non existing users
for user_name in allowed_users:
if user_name not in current_users:
out = db.AllowedUser()
out.package_id = package_id
out.user_name = user_name
out.save()
session.add(out)
update_cache = True
session.commit()
# The cache should be updated. Otherwise, the system may return
# outdated information in future requests
if update_cache:
new_pkg_dict = tk.get_action('package_show')(
{'model': context['model'],
'ignore_auth': True,
'validate': False,
'use_cache': False},
{'id': package_id})
self.indexer.update_dict(new_pkg_dict)
return pkg_dict
def after_update(self, context, pkg_dict):
return self.after_create(context, pkg_dict)
def after_show(self, context, pkg_dict):
user_obj = context.get('auth_user_obj')
updating_via_api = context.get(constants.CONTEXT_CALLBACK, False)
# allowed_users, searchable and acquire_url fileds can be only viewed by (and only if the dataset is private):
# * the dataset creator
# * the sysadmin
# * users allowed to update the allowed_users list via the notification API
if pkg_dict.get('private') is False or not updating_via_api and (not user_obj or (pkg_dict['creator_user_id'] != user_obj.id and not user_obj.sysadmin)):
attrs = [constants.ALLOWED_USERS, constants.SEARCHABLE, constants.ACQUIRE_URL]
for attr in attrs:
if attr in pkg_dict:
del pkg_dict[attr]
return pkg_dict
def after_delete(self, context, pkg_dict):
session = context['session']
package_id = pkg_dict['id']
# Get current users
db.init_db(context['model'])
users = db.AllowedUser.get(package_id=package_id)
# Delete all the users
for user in users:
session.delete(user)
session.commit()
return pkg_dict
######################################################################
######################### ITEMPLATESHELPER ###########################
######################################################################
def get_helpers(self):
return {'is_dataset_acquired': helpers.is_dataset_acquired,
'get_allowed_users_str': helpers.get_allowed_users_str,
'is_owner': helpers.is_owner,
'can_read': helpers.can_read,
'show_acquire_url_on_create': helpers.show_acquire_url_on_create,
'show_acquire_url_on_edit': helpers.show_acquire_url_on_edit
}
Fix #14: Acquired datasets jumps to the first position
# -*- coding: utf-8 -*-
# Copyright (c) 2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of CKAN Private Dataset Extension.
# CKAN Private Dataset Extension is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# CKAN Private Dataset Extension is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with CKAN Private Dataset Extension. If not, see <http://www.gnu.org/licenses/>.
import ckan.lib.search as search
import ckan.plugins as p
import ckan.plugins.toolkit as tk
import auth
import actions
import constants
import converters_validators as conv_val
import db
import helpers as helpers
class PrivateDatasets(p.SingletonPlugin, tk.DefaultDatasetForm):
p.implements(p.IDatasetForm)
p.implements(p.IAuthFunctions)
p.implements(p.IConfigurer)
p.implements(p.IRoutes, inherit=True)
p.implements(p.IActions)
p.implements(p.IPackageController, inherit=True)
p.implements(p.ITemplateHelpers)
######################################################################
############################ DATASET FORM ############################
######################################################################
def __init__(self, name=None):
self.indexer = search.PackageSearchIndex()
def _modify_package_schema(self):
return {
# remove datasets_with_no_organization_cannot_be_private validator
'private': [tk.get_validator('ignore_missing'),
tk.get_validator('boolean_validator')],
constants.ALLOWED_USERS_STR: [tk.get_validator('ignore_missing'),
conv_val.private_datasets_metadata_checker],
constants.ALLOWED_USERS: [conv_val.allowed_users_convert,
tk.get_validator('ignore_missing'),
conv_val.private_datasets_metadata_checker],
constants.ACQUIRE_URL: [tk.get_validator('ignore_missing'),
conv_val.private_datasets_metadata_checker,
conv_val.url_checker,
tk.get_converter('convert_to_extras')],
constants.SEARCHABLE: [tk.get_validator('ignore_missing'),
conv_val.private_datasets_metadata_checker,
tk.get_converter('convert_to_extras'),
tk.get_validator('boolean_validator')]
}
def create_package_schema(self):
# grab the default schema in our plugin
schema = super(PrivateDatasets, self).create_package_schema()
schema.update(self._modify_package_schema())
return schema
def update_package_schema(self):
# grab the default schema in our plugin
schema = super(PrivateDatasets, self).update_package_schema()
schema.update(self._modify_package_schema())
return schema
def show_package_schema(self):
schema = super(PrivateDatasets, self).show_package_schema()
schema.update({
constants.ALLOWED_USERS: [conv_val.get_allowed_users,
tk.get_validator('ignore_missing')],
constants.ACQUIRE_URL: [tk.get_converter('convert_from_extras'),
tk.get_validator('ignore_missing')],
constants.SEARCHABLE: [tk.get_converter('convert_from_extras'),
tk.get_validator('ignore_missing')]
})
return schema
def is_fallback(self):
# Return True to register this plugin as the default handler for
# package types not handled by any other IDatasetForm plugin.
return True
def package_types(self):
# This plugin doesn't handle any special package types, it just
# registers itself as the default (above).
return []
######################################################################
########################### AUTH FUNCTIONS ###########################
######################################################################
def get_auth_functions(self):
return {'package_show': auth.package_show,
'package_update': auth.package_update,
'resource_show': auth.resource_show,
constants.PACKAGE_ACQUIRED: auth.package_acquired}
######################################################################
############################ ICONFIGURER #############################
######################################################################
def update_config(self, config):
# Add this plugin's templates dir to CKAN's extra_template_paths, so
# that CKAN will use this plugin's custom templates.
tk.add_template_directory(config, 'templates')
# Register this plugin's fanstatic directory with CKAN.
tk.add_resource('fanstatic', 'privatedatasets')
######################################################################
############################## IROUTES ###############################
######################################################################
def before_map(self, m):
# DataSet acquired notification
m.connect('user_acquired_datasets', '/dashboard/acquired', ckan_icon='shopping-cart',
controller='ckanext.privatedatasets.controllers.ui_controller:AcquiredDatasetsControllerUI',
action='user_acquired_datasets', conditions=dict(method=['GET']))
return m
######################################################################
############################## IACTIONS ##############################
######################################################################
def get_actions(self):
return {constants.PACKAGE_ACQUIRED: actions.package_acquired}
######################################################################
######################### IPACKAGECONTROLLER #########################
######################################################################
def before_index(self, pkg_dict):
if 'extras_' + constants.SEARCHABLE in pkg_dict:
if pkg_dict['extras_searchable'] == 'False':
pkg_dict['capacity'] = 'private'
else:
pkg_dict['capacity'] = 'public'
return pkg_dict
def after_create(self, context, pkg_dict):
session = context['session']
update_cache = False
db.init_db(context['model'])
# Get the users and the package ID
if constants.ALLOWED_USERS in pkg_dict:
allowed_users = pkg_dict[constants.ALLOWED_USERS]
package_id = pkg_dict['id']
# Get current users
users = db.AllowedUser.get(package_id=package_id)
# Delete users and save the list of current users
current_users = []
for user in users:
current_users.append(user.user_name)
if user.user_name not in allowed_users:
session.delete(user)
update_cache = True
# Add non existing users
for user_name in allowed_users:
if user_name not in current_users:
out = db.AllowedUser()
out.package_id = package_id
out.user_name = user_name
out.save()
session.add(out)
update_cache = True
session.commit()
# The cache should be updated. Otherwise, the system may return
# outdated information in future requests
if update_cache:
new_pkg_dict = tk.get_action('package_show')(
{'model': context['model'],
'ignore_auth': True,
'validate': False,
'use_cache': False},
{'id': package_id})
# Prevent acquired datasets jumping to the first position
new_pkg_dict['metadata_modified'] = new_pkg_dict['revision_timestamp']
self.indexer.update_dict(new_pkg_dict)
return pkg_dict
def after_update(self, context, pkg_dict):
return self.after_create(context, pkg_dict)
def after_show(self, context, pkg_dict):
user_obj = context.get('auth_user_obj')
updating_via_api = context.get(constants.CONTEXT_CALLBACK, False)
# allowed_users, searchable and acquire_url fileds can be only viewed by (and only if the dataset is private):
# * the dataset creator
# * the sysadmin
# * users allowed to update the allowed_users list via the notification API
if pkg_dict.get('private') is False or not updating_via_api and (not user_obj or (pkg_dict['creator_user_id'] != user_obj.id and not user_obj.sysadmin)):
attrs = [constants.ALLOWED_USERS, constants.SEARCHABLE, constants.ACQUIRE_URL]
for attr in attrs:
if attr in pkg_dict:
del pkg_dict[attr]
return pkg_dict
def after_delete(self, context, pkg_dict):
session = context['session']
package_id = pkg_dict['id']
# Get current users
db.init_db(context['model'])
users = db.AllowedUser.get(package_id=package_id)
# Delete all the users
for user in users:
session.delete(user)
session.commit()
return pkg_dict
######################################################################
######################### ITEMPLATESHELPER ###########################
######################################################################
def get_helpers(self):
return {'is_dataset_acquired': helpers.is_dataset_acquired,
'get_allowed_users_str': helpers.get_allowed_users_str,
'is_owner': helpers.is_owner,
'can_read': helpers.can_read,
'show_acquire_url_on_create': helpers.show_acquire_url_on_create,
'show_acquire_url_on_edit': helpers.show_acquire_url_on_edit
}
|
# future
from __future__ import annotations
# stdlib
from collections.abc import Sequence
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
# third party
import numpy as np
from numpy.typing import NDArray
from scipy.ndimage.interpolation import rotate
# relative
from .... import lib
from ....ast.klass import pointerize_args_and_kwargs
from ....core.adp.data_subject_ledger import DataSubjectLedger
from ....core.adp.data_subject_list import DataSubjectArray
from ....core.adp.data_subject_list import dslarraytonumpyutf8
from ....core.adp.data_subject_list import numpyutf8todslarray
from ....core.node.common.action.get_or_set_property_action import (
GetOrSetPropertyAction,
)
from ....core.node.common.action.get_or_set_property_action import PropertyActions
from ....lib.numpy.array import capnp_deserialize
from ....lib.numpy.array import capnp_serialize
from ....lib.python.util import upcast
from ....util import inherit_tags
from ...common.serde.capnp import CapnpModule
from ...common.serde.capnp import chunk_bytes
from ...common.serde.capnp import combine_bytes
from ...common.serde.capnp import get_capnp_schema
from ...common.serde.capnp import serde_magic_header
from ...common.serde.deserialize import _deserialize as deserialize
from ...common.serde.serializable import serializable
from ...common.serde.serialize import _serialize as serialize
from ...common.uid import UID
from ...node.abstract.node import AbstractNodeClient
from ...node.common.action.run_class_method_action import RunClassMethodAction
from ...pointer.pointer import Pointer
from ..broadcastable import is_broadcastable
from ..config import DEFAULT_INT_NUMPY_TYPE
from ..fixed_precision_tensor import FixedPrecisionTensor
from ..lazy_repeat_array import compute_min_max
from ..lazy_repeat_array import lazyrepeatarray
from ..passthrough import AcceptableSimpleType # type: ignore
from ..passthrough import PassthroughTensor # type: ignore
from ..passthrough import SupportedChainType # type: ignore
from ..passthrough import is_acceptable_simple_type # type: ignore
from ..smpc import utils
from ..smpc.mpc_tensor import MPCTensor
from ..smpc.utils import TYPE_TO_RING_SIZE
from ..util import implements
from .adp_tensor import ADPTensor
from .gamma_tensor import GammaTensor
from .gamma_tensor import TensorWrappedGammaTensorPointer
@serializable(recursive_serde=True)
class TensorWrappedPhiTensorPointer(Pointer, PassthroughTensor):
__name__ = "TensorWrappedPhiTensorPointer"
__module__ = "syft.core.tensor.autodp.phi_tensor"
__attr_allowlist__ = [
# default pointer attrs
"client",
"id_at_location",
"object_type",
"tags",
"description",
# phi_tensor attrs
"data_subjects",
"min_vals",
"max_vals",
"public_dtype",
"public_shape",
]
__serde_overrides__ = {
"client": [lambda x: x.address, lambda y: y],
"public_shape": [lambda x: x, lambda y: upcast(y)],
"data_subjects": [dslarraytonumpyutf8, numpyutf8todslarray],
}
_exhausted = False
is_enum = False
def __init__(
self,
data_subjects: np.ndarray,
min_vals: np.typing.ArrayLike,
max_vals: np.typing.ArrayLike,
client: Any,
id_at_location: Optional[UID] = None,
object_type: str = "",
tags: Optional[List[str]] = None,
description: str = "",
public_shape: Optional[Tuple[int, ...]] = None,
public_dtype: Optional[np.dtype] = None,
):
super().__init__(
client=client,
id_at_location=id_at_location,
object_type=object_type,
tags=tags,
description=description,
)
self.min_vals = min_vals
self.max_vals = max_vals
self.data_subjects = data_subjects
self.public_shape = public_shape
self.public_dtype = public_dtype
# TODO: Modify for large arrays
@property
def synthetic(self) -> np.ndarray:
public_dtype_func = getattr(
self.public_dtype, "upcast", lambda: self.public_dtype
)
return (
np.random.rand(*list(self.public_shape)) # type: ignore
* (self.max_vals.to_numpy() - self.min_vals.to_numpy())
+ self.min_vals.to_numpy()
).astype(public_dtype_func())
def __repr__(self) -> str:
return (
self.synthetic.__repr__()
+ "\n\n (The data printed above is synthetic - it's an imitation of the real data.)"
)
def share(self, *parties: Tuple[AbstractNodeClient, ...]) -> MPCTensor:
all_parties = list(parties) + [self.client]
ring_size = TYPE_TO_RING_SIZE.get(self.public_dtype, None)
self_mpc = MPCTensor(
secret=self,
shape=self.public_shape,
ring_size=ring_size,
parties=all_parties,
)
return self_mpc
@property
def shape(self) -> Optional[Tuple[int, ...]]:
if hasattr(self, "public_shape"):
return self.public_shape
else:
return None
def _apply_tensor_op(self, other: Any, op_str: str) -> Any:
# we want to get the return type which matches the attr_path_and_name
# so we ask lib_ast for the return type name that matches out
# attr_path_and_name and then use that to get the actual pointer klass
# then set the result to that pointer klass
# We always maintain a Tensor hierarchy Tensor ---> PT--> Actual Data
attr_path_and_name = f"syft.core.tensor.tensor.Tensor.{op_str}"
min_vals, max_vals = compute_min_max(
self.min_vals, self.max_vals, other, op_str
)
result = TensorWrappedPhiTensorPointer(
data_subjects=self.data_subjects,
min_vals=min_vals,
max_vals=max_vals,
client=self.client,
)
# QUESTION can the id_at_location be None?
result_id_at_location = getattr(result, "id_at_location", None)
if result_id_at_location is not None:
# first downcast anything primitive which is not already PyPrimitive
(
downcast_args,
downcast_kwargs,
) = lib.python.util.downcast_args_and_kwargs(args=[other], kwargs={})
# then we convert anything which isnt a pointer into a pointer
pointer_args, pointer_kwargs = pointerize_args_and_kwargs(
args=downcast_args,
kwargs=downcast_kwargs,
client=self.client,
gc_enabled=False,
)
cmd = RunClassMethodAction(
path=attr_path_and_name,
_self=self,
args=pointer_args,
kwargs=pointer_kwargs,
id_at_location=result_id_at_location,
address=self.client.address,
)
self.client.send_immediate_msg_without_reply(msg=cmd)
inherit_tags(
attr_path_and_name=attr_path_and_name,
result=result,
self_obj=self,
args=[other],
kwargs={},
)
result_public_shape = None
if isinstance(other, TensorWrappedPhiTensorPointer):
other_shape = other.public_shape
other_dtype = other.public_dtype
elif isinstance(other, (int, float)):
other_shape = (1,)
other_dtype = DEFAULT_INT_NUMPY_TYPE
elif isinstance(other, bool):
other_shape = (1,)
other_dtype = np.dtype("bool")
elif isinstance(other, np.ndarray):
other_shape = other.shape
other_dtype = other.dtype
else:
raise ValueError(
f"Invalid Type for TensorWrappedPhiTensorPointer:{type(other)}"
)
if self.public_shape is not None and other_shape is not None:
result_public_shape = utils.get_shape(
op_str, self.public_shape, other_shape
)
if self.public_dtype is None or other_dtype is None:
if self.public_dtype != other_dtype:
raise ValueError(
f"Dtype for self: {self.public_dtype} and other :{other_dtype} should not be None"
)
result_public_dtype = self.public_dtype
result.public_shape = result_public_shape
result.public_dtype = result_public_dtype
return result
@property
def gamma(self) -> TensorWrappedGammaTensorPointer:
return TensorWrappedGammaTensorPointer(
data_subjects=self.data_subjects,
client=self.client,
id_at_location=self.id_at_location,
object_type=self.object_type,
tags=self.tags,
description=self.description,
min_vals=self.min_vals,
max_vals=self.max_vals,
public_shape=getattr(self, "public_shape", None),
public_dtype=getattr(self, "public_dtype", None),
)
@staticmethod
def _apply_op(
self: TensorWrappedPhiTensorPointer,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
op_str: str,
) -> Union[MPCTensor, TensorWrappedPhiTensorPointer]:
"""Performs the operation based on op_str
Args:
other (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]): second operand.
Returns:
Tuple[MPCTensor,Union[MPCTensor,int,float,np.ndarray]] : Result of the operation
"""
if isinstance(other, TensorWrappedPhiTensorPointer):
if (self.data_subjects != other.data_subjects).all(): # type: ignore
return getattr(self.gamma, op_str)(other.gamma)
elif isinstance(other, TensorWrappedGammaTensorPointer):
return getattr(self.gamma, op_str)(other)
if (
isinstance(other, TensorWrappedPhiTensorPointer)
and self.client != other.client
):
parties = [self.client, other.client]
self_mpc = MPCTensor(secret=self, shape=self.public_shape, parties=parties)
other_mpc = MPCTensor(
secret=other, shape=other.public_shape, parties=parties
)
return getattr(self_mpc, op_str)(other_mpc)
elif isinstance(other, MPCTensor):
return getattr(other, op_str)(self)
elif is_acceptable_simple_type(other) or isinstance(
other, TensorWrappedPhiTensorPointer
):
return self._apply_tensor_op(other=other, op_str=op_str)
else:
print("Type is unsupported:" + str(type(other)))
raise NotImplementedError
def __add__(
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "add" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__add__")
def __sub__(
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "sub" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__sub__")
def __mul__(
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "mul" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__mul__")
def __matmul__(
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "matmul" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__matmul__")
def __rmatmul__(
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "matmul" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__rmatmul__")
def __lt__(
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "lt" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__lt__")
def __gt__(
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "gt" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__gt__")
def __ge__(
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "ge" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__ge__")
def __le__(
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "le" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__le__")
def __eq__( # type: ignore
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "eq" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__eq__")
def __ne__( # type: ignore
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "ne" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__ne__")
def concatenate(
self,
other: TensorWrappedPhiTensorPointer,
*args: List[Any],
**kwargs: Dict[str, Any],
) -> MPCTensor:
"""Apply the "add" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
if not isinstance(other, TensorWrappedPhiTensorPointer):
raise ValueError(
f"Concatenate works only for TensorWrappedPhiTensorPointer got type: {type(other)}"
)
if self.client != other.client:
parties = [self.client, other.client]
self_mpc = MPCTensor(secret=self, shape=self.public_shape, parties=parties)
other_mpc = MPCTensor(
secret=other, shape=other.public_shape, parties=parties
)
return self_mpc.concatenate(other_mpc, *args, **kwargs)
else:
raise ValueError(
"Concatenate method currently works only between two different clients."
)
def __truediv__(
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "truediv" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__truediv__")
def sum(
self,
*args: Tuple[Any, ...],
**kwargs: Any,
) -> Union[
TensorWrappedPhiTensorPointer, MPCTensor, TensorWrappedGammaTensorPointer
]:
"""Apply the "truediv" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return self.gamma.sum(*args, **kwargs)
def __getitem__(
self, key: Union[int, bool, slice]
) -> TensorWrappedPhiTensorPointer:
"""Apply the slice operation on "self"
Args:
y (Union[int,bool,slice]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
attr_path_and_name = "syft.core.tensor.tensor.Tensor.__getitem__"
result: TensorWrappedPhiTensorPointer
min_vals = self.min_vals.__getitem__(key)
max_vals = self.max_vals.__getitem__(key)
result = TensorWrappedPhiTensorPointer(
data_subjects=self.data_subjects,
min_vals=min_vals,
max_vals=max_vals,
client=self.client,
)
# QUESTION can the id_at_location be None?
result_id_at_location = getattr(result, "id_at_location", None)
if result_id_at_location is not None:
# first downcast anything primitive which is not already PyPrimitive
(
downcast_args,
downcast_kwargs,
) = lib.python.util.downcast_args_and_kwargs(args=[key], kwargs={})
# then we convert anything which isnt a pointer into a pointer
pointer_args, pointer_kwargs = pointerize_args_and_kwargs(
args=downcast_args,
kwargs=downcast_kwargs,
client=self.client,
gc_enabled=False,
)
cmd = RunClassMethodAction(
path=attr_path_and_name,
_self=self,
args=pointer_args,
kwargs=pointer_kwargs,
id_at_location=result_id_at_location,
address=self.client.address,
)
self.client.send_immediate_msg_without_reply(msg=cmd)
inherit_tags(
attr_path_and_name=attr_path_and_name,
result=result,
self_obj=self,
args=[key],
kwargs={},
)
dummy_res = np.empty(self.public_shape).__getitem__(key)
result.public_shape = dummy_res.shape
result.public_dtype = self.public_dtype
return result
def ones_like(
self,
*args: Tuple[Any, ...],
**kwargs: Any,
) -> TensorWrappedPhiTensorPointer:
"""Apply the "truediv" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
attr_path_and_name = "syft.core.tensor.tensor.Tensor.ones_like"
result: TensorWrappedPhiTensorPointer
min_vals = self.min_vals.ones_like(*args, **kwargs)
max_vals = self.max_vals.ones_like(*args, **kwargs)
result = TensorWrappedPhiTensorPointer(
data_subjects=self.data_subjects,
min_vals=min_vals,
max_vals=max_vals,
client=self.client,
)
# QUESTION can the id_at_location be None?
result_id_at_location = getattr(result, "id_at_location", None)
if result_id_at_location is not None:
# first downcast anything primitive which is not already PyPrimitive
(
downcast_args,
downcast_kwargs,
) = lib.python.util.downcast_args_and_kwargs(args=args, kwargs=kwargs)
# then we convert anything which isnt a pointer into a pointer
pointer_args, pointer_kwargs = pointerize_args_and_kwargs(
args=downcast_args,
kwargs=downcast_kwargs,
client=self.client,
gc_enabled=False,
)
cmd = RunClassMethodAction(
path=attr_path_and_name,
_self=self,
args=pointer_args,
kwargs=pointer_kwargs,
id_at_location=result_id_at_location,
address=self.client.address,
)
self.client.send_immediate_msg_without_reply(msg=cmd)
inherit_tags(
attr_path_and_name=attr_path_and_name,
result=result,
self_obj=self,
args=[],
kwargs={},
)
dummy_res = np.ones_like(np.empty(self.public_shape), *args, **kwargs)
result.public_shape = dummy_res.shape
result.public_dtype = self.public_dtype
return result
def exp(
self,
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "truediv" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
attr_path_and_name = "syft.core.tensor.tensor.Tensor.exp"
# TODO: should modify to log reduction.
def exp_reduction(val: np.ndarray) -> np.ndarray:
pos_index = val >= 0
neg_index = val < 0
exp = np.exp((pos_index * val * -1) + (neg_index * val))
pos_values = (pos_index) * exp
neg_values = (neg_index) * exp * -1
return pos_values + neg_values
min_vals = self.min_vals.copy()
min_vals.data = np.array(exp_reduction(min_vals.data))
max_vals = self.max_vals.copy()
max_vals.data = np.array(exp_reduction(max_vals.data))
result = TensorWrappedPhiTensorPointer(
data_subjects=self.data_subjects,
min_vals=min_vals,
max_vals=max_vals,
client=self.client,
)
# QUESTION can the id_at_location be None?
result_id_at_location = getattr(result, "id_at_location", None)
if result_id_at_location is not None:
# first downcast anything primitive which is not already PyPrimitive
(
downcast_args,
downcast_kwargs,
) = lib.python.util.downcast_args_and_kwargs(args=[], kwargs={})
# then we convert anything which isnt a pointer into a pointer
pointer_args, pointer_kwargs = pointerize_args_and_kwargs(
args=downcast_args,
kwargs=downcast_kwargs,
client=self.client,
gc_enabled=False,
)
cmd = RunClassMethodAction(
path=attr_path_and_name,
_self=self,
args=pointer_args,
kwargs=pointer_kwargs,
id_at_location=result_id_at_location,
address=self.client.address,
)
self.client.send_immediate_msg_without_reply(msg=cmd)
inherit_tags(
attr_path_and_name=attr_path_and_name,
result=result,
self_obj=self,
args=[],
kwargs={},
)
result.public_shape = self.public_shape
result.public_dtype = self.public_dtype
return result
def reciprocal(
self,
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "reciprocal" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
attr_path_and_name = "syft.core.tensor.tensor.Tensor.reciprocal"
min_vals = self.min_vals.copy()
min_vals.data = np.array(1 / min_vals.data)
max_vals = self.max_vals.copy()
max_vals.data = np.array(1 / max_vals.data)
result = TensorWrappedPhiTensorPointer(
data_subjects=self.data_subjects,
min_vals=min_vals,
max_vals=max_vals,
client=self.client,
)
# QUESTION can the id_at_location be None?
result_id_at_location = getattr(result, "id_at_location", None)
if result_id_at_location is not None:
# first downcast anything primitive which is not already PyPrimitive
(
downcast_args,
downcast_kwargs,
) = lib.python.util.downcast_args_and_kwargs(args=[], kwargs={})
# then we convert anything which isnt a pointer into a pointer
pointer_args, pointer_kwargs = pointerize_args_and_kwargs(
args=downcast_args,
kwargs=downcast_kwargs,
client=self.client,
gc_enabled=False,
)
cmd = RunClassMethodAction(
path=attr_path_and_name,
_self=self,
args=pointer_args,
kwargs=pointer_kwargs,
id_at_location=result_id_at_location,
address=self.client.address,
)
self.client.send_immediate_msg_without_reply(msg=cmd)
inherit_tags(
attr_path_and_name=attr_path_and_name,
result=result,
self_obj=self,
args=[],
kwargs={},
)
result.public_shape = self.public_shape
result.public_dtype = self.public_dtype
return result
def softmax(
self,
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "softmax" operation on "self"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
attr_path_and_name = "syft.core.tensor.tensor.Tensor.softmax"
# TODO: should modify to log reduction.
def softmax(val: np.ndarray) -> np.ndarray:
logits = val - val.max()
numerator = np.exp(logits)
inv = 1 / numerator.sum()
return numerator * inv
min_vals = self.min_vals.copy()
min_vals.data = np.array(softmax(min_vals.data))
max_vals = self.max_vals.copy()
max_vals.data = np.array(softmax(max_vals.data))
result = TensorWrappedPhiTensorPointer(
data_subjects=self.data_subjects,
min_vals=min_vals,
max_vals=max_vals,
client=self.client,
)
# QUESTION can the id_at_location be None?
result_id_at_location = getattr(result, "id_at_location", None)
if result_id_at_location is not None:
# first downcast anything primitive which is not already PyPrimitive
(
downcast_args,
downcast_kwargs,
) = lib.python.util.downcast_args_and_kwargs(args=[], kwargs={})
# then we convert anything which isnt a pointer into a pointer
pointer_args, pointer_kwargs = pointerize_args_and_kwargs(
args=downcast_args,
kwargs=downcast_kwargs,
client=self.client,
gc_enabled=False,
)
cmd = RunClassMethodAction(
path=attr_path_and_name,
_self=self,
args=pointer_args,
kwargs=pointer_kwargs,
id_at_location=result_id_at_location,
address=self.client.address,
)
self.client.send_immediate_msg_without_reply(msg=cmd)
inherit_tags(
attr_path_and_name=attr_path_and_name,
result=result,
self_obj=self,
args=[],
kwargs={},
)
result.public_shape = self.public_shape
result.public_dtype = self.public_dtype
return result
@property
def T(self) -> TensorWrappedPhiTensorPointer:
# We always maintain a Tensor hierarchy Tensor ---> PT--> Actual Data
attr_path_and_name = "syft.core.tensor.tensor.Tensor.T"
result = TensorWrappedPhiTensorPointer(
data_subjects=self.data_subjects,
min_vals=self.min_vals.transpose(),
max_vals=self.max_vals.transpose(),
client=self.client,
)
# QUESTION can the id_at_location be None?
result_id_at_location = getattr(result, "id_at_location", None)
if result_id_at_location is not None:
# first downcast anything primitive which is not already PyPrimitive
(
downcast_args,
downcast_kwargs,
) = lib.python.util.downcast_args_and_kwargs(args=[], kwargs={})
# then we convert anything which isnt a pointer into a pointer
pointer_args, pointer_kwargs = pointerize_args_and_kwargs(
args=downcast_args,
kwargs=downcast_kwargs,
client=self.client,
gc_enabled=False,
)
cmd = GetOrSetPropertyAction(
path=attr_path_and_name,
id_at_location=result_id_at_location,
address=self.client.address,
_self=self,
args=pointer_args,
kwargs=pointer_kwargs,
action=PropertyActions.GET,
map_to_dyn=False,
)
self.client.send_immediate_msg_without_reply(msg=cmd)
inherit_tags(
attr_path_and_name=attr_path_and_name,
result=result,
self_obj=self,
args=[],
kwargs={},
)
result_public_shape = np.empty(self.public_shape).T.shape
result.public_shape = result_public_shape
result.public_dtype = self.public_dtype
return result
def one_hot(self: TensorWrappedPhiTensorPointer) -> np.array:
tensor_size = np.empty(self.public_shape).size
one_hot_Y = np.zeros((tensor_size, self.max_vals.data[0] + 1))
one_hot_Y = one_hot_Y.T
attr_path_and_name = "syft.core.tensor.tensor.Tensor.one_hot"
result = TensorWrappedPhiTensorPointer(
data_subjects=self.data_subjects,
min_vals=self.min_vals,
max_vals=self.max_vals,
client=self.client,
)
# QUESTION can the id_at_location be None?
result_id_at_location = getattr(result, "id_at_location", None)
if result_id_at_location is not None:
# first downcast anything primitive which is not already PyPrimitive
(
downcast_args,
downcast_kwargs,
) = lib.python.util.downcast_args_and_kwargs(args=[], kwargs={})
# then we convert anything which isnt a pointer into a pointer
pointer_args, pointer_kwargs = pointerize_args_and_kwargs(
args=downcast_args,
kwargs=downcast_kwargs,
client=self.client,
gc_enabled=False,
)
cmd = RunClassMethodAction(
path=attr_path_and_name,
_self=self,
args=pointer_args,
kwargs=pointer_kwargs,
id_at_location=result_id_at_location,
address=self.client.address,
)
self.client.send_immediate_msg_without_reply(msg=cmd)
inherit_tags(
attr_path_and_name=attr_path_and_name,
result=result,
self_obj=self,
args=[],
kwargs={},
)
result.public_shape = one_hot_Y.shape
result.public_dtype = self.public_dtype
return result
def to_local_object_without_private_data_child(self) -> PhiTensor:
"""Convert this pointer into a partial version of the PhiTensor but without
any of the private data therein."""
# relative
from ..tensor import Tensor
public_shape = getattr(self, "public_shape", None)
public_dtype = getattr(self, "public_dtype", None)
return Tensor(
child=PhiTensor(
child=FixedPrecisionTensor(value=np.empty(self.data_subjects.shape)),
data_subjects=self.data_subjects,
min_vals=self.min_vals, # type: ignore
max_vals=self.max_vals, # type: ignore
),
public_shape=public_shape,
public_dtype=public_dtype,
)
@implements(TensorWrappedPhiTensorPointer, np.ones_like)
def ones_like(
tensor: TensorWrappedPhiTensorPointer,
*args: Tuple[Any, ...],
**kwargs: Dict[Any, Any],
) -> TensorWrappedPhiTensorPointer:
return tensor.ones_like(*args, **kwargs)
@serializable(capnp_bytes=True)
class PhiTensor(PassthroughTensor, ADPTensor):
PointerClassOverride = TensorWrappedPhiTensorPointer
# __attr_allowlist__ = ["child", "min_vals", "max_vals", "data_subjects"]
__slots__ = (
"child",
"min_vals",
"max_vals",
"data_subjects",
)
def __init__(
self,
child: Union[Sequence, NDArray],
data_subjects: Union[DataSubjectArray, NDArray],
min_vals: Union[np.ndarray, lazyrepeatarray],
max_vals: Union[np.ndarray, lazyrepeatarray],
) -> None:
# self.data_subjects: Union[DataSubjectList, np.ndarray]
# child = the actual private data
super().__init__(child)
# lazyrepeatarray matching the shape of child
if not isinstance(min_vals, lazyrepeatarray):
min_vals = lazyrepeatarray(data=min_vals, shape=child.shape) # type: ignore
if not isinstance(max_vals, lazyrepeatarray):
max_vals = lazyrepeatarray(data=max_vals, shape=child.shape) # type: ignore
self.min_vals = min_vals
self.max_vals = max_vals
numpy_data_subjects: np.ndarray = DataSubjectArray.from_objs(data_subjects)
self.data_subjects = numpy_data_subjects
if numpy_data_subjects.shape != self.shape:
raise ValueError(
f"DataSubjects shape: {numpy_data_subjects.shape} should match data shape: {self.shape}"
)
@property
def proxy_public_kwargs(self) -> Dict[str, Any]:
return {
"min_vals": self.min_vals,
"max_vals": self.max_vals,
"data_subjects": self.data_subjects,
}
# def init_pointer(
# self,
# client: Any,
# id_at_location: Optional[UID] = None,
# object_type: str = "",
# tags: Optional[List[str]] = None,
# description: str = "",
# ) -> TensorWrappedPhiTensorPointer:
# return TensorWrappedPhiTensorPointer(
# # Arguments specifically for SEPhiTensor
# data_subjects=self.data_subjects,
# min_vals=self.min_vals,
# max_vals=self.max_vals,
# # Arguments required for a Pointer to work
# client=client,
# id_at_location=id_at_location,
# object_type=object_type,
# tags=tags,
# description=description,
# )
@property
def gamma(self) -> GammaTensor:
"""Property to cast this tensor into a GammaTensor"""
return self.create_gamma()
def copy(self, order: Optional[str] = "K") -> PhiTensor:
"""Return copy of the given object"""
return PhiTensor(
child=self.child.copy(order=order),
min_vals=self.min_vals.copy(order=order),
max_vals=self.max_vals.copy(order=order),
data_subjects=self.data_subjects.copy(),
)
def all(self) -> bool:
return self.child.all()
def any(self) -> bool:
return self.child.any()
def copy_with(self, child: np.ndarray) -> PhiTensor:
new_tensor = self.copy()
new_tensor.child = child
return new_tensor
def __getitem__(self, item: Union[str, int, slice, PassthroughTensor]) -> PhiTensor:
if isinstance(item, PassthroughTensor):
data = self.child[item.child]
return PhiTensor(
child=data,
min_vals=lazyrepeatarray(data=data, shape=data.shape),
max_vals=lazyrepeatarray(data=data, shape=data.shape),
data_subjects=self.data_subjects[item.child],
)
else:
data = self.child[item]
return PhiTensor(
child=data,
min_vals=lazyrepeatarray(data=data, shape=data.shape),
max_vals=lazyrepeatarray(data=data, shape=data.shape),
data_subjects=self.data_subjects[item],
)
def zeros_like(
self,
*args: Tuple[Any, ...],
**kwargs: Any,
) -> Union[PhiTensor, GammaTensor]:
# TODO: Add support for axes arguments later
min_vals = self.min_vals.zeros_like(*args, **kwargs)
max_vals = self.max_vals.zeros_like(*args, **kwargs)
child = (
np.zeros_like(self.child, *args, **kwargs)
if isinstance(self.child, np.ndarray)
else self.child.ones_like(*args, **kwargs)
)
return PhiTensor(
child=child,
min_vals=min_vals,
max_vals=max_vals,
data_subjects=self.data_subjects,
)
def __setitem__(
self,
key: Union[int, slice, NDArray],
value: Union[PhiTensor, GammaTensor, np.ndarray],
) -> Union[PhiTensor, GammaTensor]:
if isinstance(value, PhiTensor):
self.child[key] = value.child
minv = value.child.min()
maxv = value.child.max()
if minv < self.min_vals.data.min():
self.min_vals.data = minv
if maxv > self.max_vals.data.max():
self.max_vals.data = maxv
gamma_output = self.gamma
gamma_output[key] = value.gamma
# print("It's on the right track")
return gamma_output
elif isinstance(value, GammaTensor):
gamma = self.gamma
gamma[key] = value
return gamma
elif isinstance(value, np.ndarray):
self.child[key] = value
minv = value.min()
maxv = value.max()
if minv < self.min_vals.data.min():
self.min_vals.data = minv
if maxv > self.max_vals.data.max():
self.max_vals.data = maxv
return PhiTensor(
child=self.child,
data_subjects=self.data_subjects,
min_vals=self.min_vals,
max_vals=self.max_vals,
)
else:
raise NotImplementedError
def abs(self) -> PhiTensor:
data = self.child
output = np.abs(data)
return PhiTensor(
child=output,
data_subjects=self.data_subjects,
min_vals=np.abs(self.min_vals.data),
max_vals=np.abs(self.min_vals.data),
)
def reshape(self, *shape: Tuple[int, ...]) -> PhiTensor:
data = self.child
output_data = np.reshape(data, *shape)
return PhiTensor(
child=output_data,
data_subjects=np.reshape(self.data_subjects, *shape),
min_vals=output_data.min(),
max_vals=output_data.max(),
)
def pad(self, width: int, padding_mode: str = "reflect") -> PhiTensor:
data = self.child
if padding_mode == "reflect":
pad_left = pad_right = pad_top = pad_bottom = width
# RGB image
if len(data.shape) == 3:
output_data = np.pad(
data,
((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)),
padding_mode,
)
output_data_subjects = np.pad(
self.data_subjects,
((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)),
padding_mode,
)
# Grayscale image
elif len(data.shape) == 2:
output_data = np.pad(
data, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode
)
output_data_subjects = np.pad(
self.data_subjects,
((pad_top, pad_bottom), (pad_left, pad_right)),
padding_mode,
)
else:
output_data = np.pad(data, width, padding_mode)
output_data_subjects = np.pad(self.data_subjects, width, padding_mode)
else:
raise NotImplementedError
output_min_val, output_max_val = output_data.min(), output_data.max()
return PhiTensor(
child=output_data,
data_subjects=output_data_subjects,
min_vals=output_min_val,
max_vals=output_max_val,
)
def ravel(self) -> PhiTensor:
data = self.child
output_data = data.ravel()
output_data_subjects = self.data_subjects.ravel()
min_vals = lazyrepeatarray(data=self.min_vals.data, shape=output_data.shape)
max_vals = lazyrepeatarray(data=self.max_vals.data, shape=output_data.shape)
return PhiTensor(
child=output_data,
data_subjects=output_data_subjects,
min_vals=min_vals,
max_vals=max_vals,
)
def random_horizontal_flip(self, p: float = 0.5) -> PhiTensor:
"""Could make more efficient by not encoding/decoding FPT"""
if np.random.random() <= p:
return PhiTensor(
child=np.fliplr(self.child),
data_subjects=self.data_subjects,
min_vals=self.min_vals.horizontal_flip(),
max_vals=self.max_vals.horizontal_flip(),
)
else:
return self
def random_vertical_flip(self, p: float = 0.5) -> PhiTensor:
"""Could make more efficient by not encoding/decoding FPT"""
if np.random.random() <= p:
return PhiTensor(
child=np.flipud(self.child),
data_subjects=self.data_subjects,
min_vals=self.min_vals.vertical_flip(),
max_vals=self.max_vals.vertical_flip(),
)
else:
return self
def random_rotation(self, degrees: Union[int, Tuple]) -> PhiTensor:
if isinstance(degrees, int):
angle = np.random.randint(low=-degrees, high=degrees)
elif isinstance(degrees, tuple):
angle = np.random.randint(low=degrees[0], high=degrees[1])
rotated_data_value = rotate(self.child, angle)
return PhiTensor(
child=rotated_data_value,
data_subjects=self.data_subjects,
min_vals=rotated_data_value.min(),
max_vals=rotated_data_value.max(),
)
def max(self, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> PhiTensor:
indices = self.child.argmax(axis)
result = self.child.max(axis)
return PhiTensor(
child=result,
data_subjects=self.data_subjects[indices],
min_vals=lazyrepeatarray(data=result.min(), shape=result.shape),
max_vals=lazyrepeatarray(data=result.max(), shape=result.shape),
)
def min(self, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> PhiTensor:
indices = self.child.argmin(axis)
result = self.child.min(axis)
return PhiTensor(
child=result,
data_subjects=self.data_subjects[indices],
min_vals=lazyrepeatarray(data=result.min(), shape=result.shape),
max_vals=lazyrepeatarray(data=result.max(), shape=result.shape),
)
def _argmax(self, axis: Optional[int]) -> PhiTensor:
return self.child.argmax(axis)
def unravel_argmax(
self, axis: Optional[int] = None
) -> Tuple[np.ndarray]: # possible privacy violation?
arg_result = self._argmax(axis=axis)
shape = self.shape
return np.unravel_index(arg_result, shape)
def mean(
self,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
**kwargs: Any,
) -> PhiTensor:
result = self.child.mean(axis, **kwargs)
return PhiTensor(
child=result,
data_subjects=self.data_subjects.mean(axis, **kwargs),
min_vals=lazyrepeatarray(data=self.min_vals.data, shape=result.shape),
max_vals=lazyrepeatarray(data=self.max_vals.data, shape=result.shape),
)
def std(
self,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
**kwargs: Any,
) -> PhiTensor:
result = self.child.std(axis, **kwargs)
return PhiTensor(
child=result,
data_subjects=self.data_subjects.std(axis, **kwargs),
min_vals=lazyrepeatarray(data=0, shape=result.shape),
max_vals=lazyrepeatarray(
data=0.25
* (self.max_vals.data - self.min_vals.data)
** 2, # rough approximation, could be off
shape=result.shape,
),
)
def sqrt(self) -> PhiTensor:
result = np.sqrt(self.child)
return PhiTensor(
child=result,
data_subjects=np.sqrt(self.data_subjects),
min_vals=lazyrepeatarray(
data=np.sqrt(self.min_vals.data), shape=result.shape
),
max_vals=lazyrepeatarray(
data=np.sqrt(self.max_vals.data), shape=result.shape
),
)
def normalize(
self, mean: Union[float, Sequence[float]], std: Union[float, Sequence[float]]
) -> PhiTensor:
# TODO: Double check if normalization bounds are correct; they might be data dependent
if isinstance(mean, float) and isinstance(std, float):
return PhiTensor(
child=(self.child - mean) / std,
data_subjects=self.data_subjects,
min_vals=(self.min_vals - mean) * (1 / std),
max_vals=(self.max_vals - mean) * (1 / std),
)
else:
# This is easily doable in the future
raise NotImplementedError
def create_gamma(self) -> GammaTensor:
"""Return a new Gamma tensor based on this phi tensor"""
gamma_tensor = GammaTensor(
child=self.child,
data_subjects=self.data_subjects,
min_vals=self.min_vals,
max_vals=self.max_vals,
)
return gamma_tensor
def view(self, *args: List[Any]) -> PhiTensor:
# TODO: Figure out how to fix lazyrepeatarray reshape
data = self.child.reshape(*args)
return PhiTensor(
child=data,
data_subjects=self.data_subjects,
min_vals=lazyrepeatarray(data=self.min_vals.data.min(), shape=data.shape),
max_vals=lazyrepeatarray(data=self.max_vals.data.max(), shape=data.shape),
)
def publish(
self,
get_budget_for_user: Callable,
deduct_epsilon_for_user: Callable,
ledger: DataSubjectLedger,
sigma: float,
) -> AcceptableSimpleType:
print("PUBLISHING TO GAMMA:")
print(self.child)
gamma = self.gamma
# gamma.func = lambda x: x
gamma.state[gamma.id] = gamma
res = gamma.publish(
get_budget_for_user=get_budget_for_user,
deduct_epsilon_for_user=deduct_epsilon_for_user,
ledger=ledger,
sigma=sigma,
)
print("Final Values", res)
return res
@property
def value(self) -> np.ndarray:
return self.child
def astype(self, np_type: np.dtype) -> PhiTensor:
return self.__class__(
child=self.child.astype(np_type),
data_subjects=self.data_subjects,
min_vals=self.min_vals.astype(np_type),
max_vals=self.max_vals.astype(np_type),
# scalar_manager=self.scalar_manager,
)
@property
def shape(self) -> Tuple[Any, ...]:
if self.child is None:
return ()
else:
return self.child.shape
def __repr__(self) -> str:
"""Pretty print some information, optimized for Jupyter notebook viewing."""
return (
f"{self.__class__.__name__}(child={self.child}, "
+ f"min_vals={self.min_vals}, max_vals={self.max_vals})"
)
def __eq__(self, other: Any) -> Union[PhiTensor, GammaTensor]: # type: ignore
# TODO: what about data_subjects and min / max values?
if is_acceptable_simple_type(other) or len(self.child) == len(other.child):
gamma_output = False
if is_acceptable_simple_type(other):
result = self.child == other
else:
# check data_subjects match, if they dont gamma_output = True
result = self.child == other.child
if isinstance(result, GammaTensor): # TODO: Check this
gamma_output = True
if not gamma_output:
# min_vals=self.min_vals * 0.0,
# max_vals=self.max_vals * 0.0 + 1.0,
return self.copy_with(child=result)
else:
return self.copy_with(child=result).gamma
else:
raise Exception(
"Tensor dims do not match for __eq__: "
+ f"{len(self.child)} != {len(other.child)}"
)
def __add__(self, other: SupportedChainType) -> Union[PhiTensor, GammaTensor]:
# if the tensor being added is also private
if isinstance(other, PhiTensor):
return self.gamma + other.gamma
# if self.data_subjects != other.data_subjects:
# return self.gamma + other.gamma
# return PhiTensor(
# child=self.child + other.child,
# min_vals=self.min_vals + other.min_vals,
# max_vals=self.max_vals + other.max_vals,
# data_subjects=self.data_subjects,
# # scalar_manager=self.scalar_manager,
# )
# if the tensor being added is a public tensor / int / float / etc.
elif is_acceptable_simple_type(other):
return PhiTensor(
child=self.child + other,
min_vals=self.min_vals + other,
max_vals=self.max_vals + other,
data_subjects=self.data_subjects,
)
elif isinstance(other, GammaTensor):
return self.gamma + other
else:
print("Type is unsupported:" + str(type(other)))
raise NotImplementedError
def __sub__(self, other: SupportedChainType) -> Union[PhiTensor, GammaTensor]:
if isinstance(other, PhiTensor):
return self.gamma - other.gamma
# diff_data_subjects = (
# self.data_subjects.one_hot_lookup != other.data_subjects.one_hot_lookup
# )
# diff_data_subjects = (
# diff_data_subjects
# if isinstance(diff_data_subjects, bool)
# else diff_data_subjects.any()
# )
# if diff_data_subjects:
# return self.gamma - other.gamma
# # raise NotImplementedError
# data = self.child - other.child
# min_min = self.min_vals.data - other.min_vals.data
# min_max = self.min_vals.data - other.max_vals.data
# max_min = self.max_vals.data - other.min_vals.data
# max_max = self.max_vals.data - other.max_vals.data
# _min_vals = np.minimum.reduce([min_min, min_max, max_min, max_max])
# _max_vals = np.maximum.reduce([min_min, min_max, max_min, max_max])
# min_vals = self.min_vals.copy()
# min_vals.data = _min_vals
# max_vals = self.max_vals.copy()
# max_vals.data = _max_vals
# data_subjects = self.data_subjects
elif is_acceptable_simple_type(other):
if isinstance(other, np.ndarray):
if not is_broadcastable(other.shape, self.child.shape): # type: ignore
raise Exception(
f"Shapes do not match for subtraction: {self.child.shape} and {other.shape}"
)
data = self.child - other
min_vals = self.min_vals - other
max_vals = self.max_vals - other
data_subjects = self.data_subjects
elif isinstance(other, GammaTensor):
return self.gamma - other
else:
print("Type is unsupported:" + str(type(other)))
raise NotImplementedError
return PhiTensor(
child=data,
data_subjects=data_subjects,
min_vals=min_vals,
max_vals=max_vals,
)
def __mul__(self, other: SupportedChainType) -> Union[PhiTensor, GammaTensor]:
if isinstance(other, PhiTensor):
return self.gamma + other.gamma
# if self.data_subjects != other.data_subjects:
# return self.gamma * other.gamma
# data = self.child * other.child
# min_min = self.min_vals.data * other.min_vals.data
# min_max = self.min_vals.data * other.max_vals.data
# max_min = self.max_vals.data * other.min_vals.data
# max_max = self.max_vals.data * other.max_vals.data
# _min_vals = np.min([min_min, min_max, max_min, max_max], axis=0) # type: ignore
# _max_vals = np.max([min_min, min_max, max_min, max_max], axis=0) # type: ignore
# min_vals = self.min_vals.copy()
# min_vals.data = _min_vals
# max_vals = self.max_vals.copy()
# max_vals.data = _max_vals
# data_subjects = self.data_subjects
# return PhiTensor(
# child=data,
# data_subjects=data_subjects,
# min_vals=min_vals,
# max_vals=max_vals,
# )
elif is_acceptable_simple_type(other):
data = self.child * other
min_min = self.min_vals.data * other
min_max = self.min_vals.data * other
max_min = self.max_vals.data * other
max_max = self.max_vals.data * other
_min_vals = np.min([min_min, min_max, max_min, max_max], axis=0) # type: ignore
_max_vals = np.max([min_min, min_max, max_min, max_max], axis=0) # type: ignore
min_vals = self.min_vals.copy()
min_vals.data = _min_vals
max_vals = self.max_vals.copy()
max_vals.data = _max_vals
data_subjects = self.data_subjects
return PhiTensor(
child=data,
data_subjects=data_subjects,
min_vals=min_vals,
max_vals=max_vals,
)
elif isinstance(other, GammaTensor):
return self.gamma * other
else:
print("Type is unsupported:" + str(type(other)))
raise NotImplementedError
def __rtruediv__(self, other: SupportedChainType) -> Union[PhiTensor, GammaTensor]:
if is_acceptable_simple_type(other):
return PhiTensor(
child=(1 / self.child) * other,
min_vals=(1 / self.min_vals) * other,
max_vals=(1 / self.max_vals) * other,
data_subjects=self.data_subjects,
# scalar_manager=self.scalar_manager,
)
elif isinstance(other, GammaTensor):
return (1 / self.gamma) * other
else:
print("Type is unsupported:" + str(type(other)))
raise NotImplementedError
def __matmul__(
self, other: Union[np.ndarray, PhiTensor]
) -> Union[PhiTensor, GammaTensor]:
if not isinstance(other, (np.ndarray, PhiTensor, GammaTensor)):
raise Exception(
f"Matrix multiplication not yet implemented for type {type(other)}"
)
else:
# Modify before merge, to know is broadcast is actually necessary
if False: # and not is_broadcastable(self.shape, other.shape):
raise Exception(
f"Shapes not broadcastable: {self.shape} and {other.shape}"
)
else:
if isinstance(other, np.ndarray):
data = self.child.__matmul__(other)
min_vals = self.min_vals.__matmul__(other)
max_vals = self.max_vals.__matmul__(other)
output_ds = self.data_subjects @ other
elif isinstance(other, PhiTensor):
return self.gamma @ other.gamma
# if self.data_subjects != other.data_subjects:
# return self.gamma @ other.gamma
# else:
# data = self.child.__matmul__(other.child)
# min_vals = self.min_vals.__matmul__(other.min_vals)
# max_vals = self.max_vals.__matmul__(other.max_vals)
# output_ds = DataSubjectList(
# one_hot_lookup=np.concatenate(
# (
# self.data_subjects.one_hot_lookup,
# other.data_subjects.one_hot_lookup,
# )
# ),
# data_subjects_indexed=np.concatenate(
# (np.zeros_like(data), np.ones_like(data))
# ), # replace with (1, *data.shape) if inc shape
# )
elif isinstance(other, GammaTensor):
return self.gamma @ other
else:
print("Type is unsupported:" + str(type(other)))
raise NotImplementedError
return PhiTensor(
child=data,
max_vals=max_vals,
min_vals=min_vals,
data_subjects=output_ds,
)
def __rmatmul__(
self, other: Union[np.ndarray, PhiTensor]
) -> Union[PhiTensor, GammaTensor]:
if not isinstance(other, (np.ndarray, PhiTensor, GammaTensor)):
raise Exception(
f"Matrix multiplication not yet implemented for type {type(other)}"
)
else:
# Modify before merge, to know is broadcast is actually necessary
if False: # and not is_broadcastable(self.shape, other.shape):
raise Exception(
f"Shapes not broadcastable: {self.shape} and {other.shape}"
)
else:
if isinstance(other, np.ndarray):
data = self.child.__rmatmul__(other)
min_vals = self.min_vals.__rmatmul__(other)
max_vals = self.max_vals.__rmatmul__(other)
output_ds = self.data_subjects.__rmatmul__(other)
elif isinstance(other, PhiTensor):
return self.gamma.__rmatmul__(other.gamma)
# if self.data_subjects != other.data_subjects:
# # return convert_to_gamma_tensor(self).__matmul__(convert_to_gamma_tensor(other))
# raise NotImplementedError
# else:
# data = self.child.__rmatmul__(other.child)
# # _min_vals = np.array(
# # [self.min_vals.data.__matmul__(other.min_vals.data)]
# # )
# # _max_vals = np.array(
# # [self.max_vals.data.__matmul__(other.max_vals.data)]
# # )
# # min_vals = self.min_vals.copy()
# # min_vals.data = _min_vals
# # max_vals = self.max_vals.copy()
# # max_vals.data = _max_vals
# min_vals = self.min_vals.__rmatmul__(other.min_vals)
# max_vals = self.max_vals.__rmatmul__(other.max_vals)
else:
print("Type is unsupported:" + str(type(other)))
raise NotImplementedError
return PhiTensor(
child=data,
max_vals=max_vals,
min_vals=min_vals,
data_subjects=output_ds,
)
def clip(self, a_min: float, a_max: float) -> PhiTensor:
output_data = np.clip(self.child, a_min, a_max)
min_v = np.clip(self.min_vals.data, a_min, a_max)
max_v = np.clip(self.max_vals.data, a_min, a_max)
min_vals = lazyrepeatarray(data=min_v, shape=output_data.shape)
max_vals = lazyrepeatarray(data=max_v, shape=output_data.shape)
return PhiTensor(
child=output_data,
data_subjects=self.data_subjects,
min_vals=min_vals,
max_vals=max_vals,
)
def transpose(self, *args: Any, **kwargs: Any) -> PhiTensor:
"""Transposes self.child, min_vals, and max_vals if these can be transposed, otherwise doesn't change them."""
data: np.ndarray
if (
isinstance(self.child, int)
or isinstance(self.child, float)
or isinstance(self.child, bool)
):
# For these data types, the transpose operation is meaningless, so don't change them.
data = self.child # type: ignore
print(
f"Warning: Tensor data was of type {type(data)}, transpose operation had no effect."
)
else:
data = self.child.transpose(*args)
# TODO: Should we give warnings for min_vals and max_vals being single floats/integers/booleans too?
if (
isinstance(self.min_vals, int)
or isinstance(self.min_vals, float)
or isinstance(self.min_vals, bool)
):
# For these data types, the transpose operation is meaningless, so don't change them.
min_vals = self.min_vals
# print(f'Warning: Tensor data was of type {type(data)}, transpose operation had no effect.')
else:
min_vals = data.min()
if (
isinstance(self.max_vals, int)
or isinstance(self.max_vals, float)
or isinstance(self.max_vals, bool)
):
# For these data types, the transpose operation is meaningless, so don't change them.
max_vals = self.max_vals
# print(f'Warning: Tensor data was of type {type(data)}, transpose operation had no effect.')
else:
max_vals = data.max()
output_ds = self.data_subjects.transpose(*args)
return PhiTensor(
child=data,
data_subjects=output_ds,
min_vals=min_vals,
max_vals=max_vals,
)
def concatenate(
self,
other: Union[np.ndarray, PhiTensor],
*args: List[Any],
**kwargs: Dict[str, Any],
) -> Union[PhiTensor, GammaTensor]:
# if the tensor being added is also private
if isinstance(other, PhiTensor):
if self.data_subjects != other.data_subjects:
return self.gamma + other.gamma
return PhiTensor(
child=self.child.concatenate(other.child, *args, **kwargs),
min_vals=self.min_vals.concatenate(other.min_vals, *args, **kwargs),
max_vals=self.max_vals.concatenate(other.max_vals, *args, **kwargs),
data_subjects=self.data_subjects,
)
elif is_acceptable_simple_type(other):
raise NotImplementedError
else:
print("Type is unsupported:" + str(type(other)))
raise NotImplementedError
def __lt__(self, other: SupportedChainType) -> Union[PhiTensor, GammaTensor]:
# if the tensor being compared is also private
if isinstance(other, PhiTensor):
return self.gamma.__lt__(other.gamma)
# if self.data_subjects != other.data_subjects:
# # return self.gamma < other.gamma
# raise NotImplementedError
# if len(self.child) != len(other.child):
# raise Exception(
# f"Tensor dims do not match for __lt__: {len(self.child)} != {len(other.child)}" # type: ignore
# )
# data = (
# self.child < other.child
# ) # the * 1 just makes sure it returns integers instead of True/False
# min_vals = self.min_vals * 0
# max_vals = (self.max_vals * 0) + 1
# data_subjects = self.data_subjects
# return PhiTensor(
# child=data,
# data_subjects=data_subjects,
# min_vals=min_vals,
# max_vals=max_vals,
# )
# if the tensor being compared is a public tensor / int / float / etc.
elif is_acceptable_simple_type(other):
data = self.child < other
min_vals = self.min_vals * 0
max_vals = (self.max_vals * 0) + 1
data_subjects = self.data_subjects
return PhiTensor(
child=data,
data_subjects=data_subjects,
min_vals=min_vals,
max_vals=max_vals,
)
else:
return NotImplementedError # type: ignore
def __le__(self, other: SupportedChainType) -> Union[PhiTensor, GammaTensor]:
# if the tensor being compared is also private
if isinstance(other, PhiTensor):
return self.gamma.__le__(other.gamma)
# if self.data_subjects != other.data_subjects:
# # return self.gamma < other.gamma
# raise NotImplementedError
# if len(self.child) != len(other.child):
# raise Exception(
# f"Tensor dims do not match for __le__: {len(self.child)} != {len(other.child)}" # type: ignore
# )
# data = (
# self.child <= other.child
# ) # the * 1 just makes sure it returns integers instead of True/False
# min_vals = self.min_vals * 0
# max_vals = (self.max_vals * 0) + 1
# data_subjects = self.data_subjects
# return PhiTensor(
# child=data,
# data_subjects=data_subjects,
# min_vals=min_vals,
# max_vals=max_vals,
# )
# if the tensor being compared is a public tensor / int / float / etc.
elif is_acceptable_simple_type(other):
data = self.child <= other
min_vals = self.min_vals * 0
max_vals = (self.max_vals * 0) + 1
data_subjects = self.data_subjects
return PhiTensor(
child=data,
data_subjects=data_subjects,
min_vals=min_vals,
max_vals=max_vals,
)
else:
return NotImplementedError # type: ignore
def __gt__(self, other: SupportedChainType) -> Union[PhiTensor, GammaTensor]:
# if the tensor being compared is also private
if isinstance(other, PhiTensor):
return self.gamma.__gt__(other.gamma)
# if self.data_subjects != other.data_subjects:
# # return self.gamma < other.gamma
# raise NotImplementedError
# if len(self.child) != len(other.child):
# raise Exception(
# f"Tensor dims do not match for __gt__: {len(self.child)} != {len(other.child)}" # type: ignore
# )
# data = (
# self.child > other.child
# ) # the * 1 just makes sure it returns integers instead of True/False
# min_vals = self.min_vals * 0
# max_vals = (self.max_vals * 0) + 1
# data_subjects = self.data_subjects
# return PhiTensor(
# child=data,
# data_subjects=data_subjects,
# min_vals=min_vals,
# max_vals=max_vals,
# )
# if the tensor being compared is a public tensor / int / float / etc.
elif is_acceptable_simple_type(other):
data = self.child > other
min_vals = self.min_vals * 0
max_vals = (self.max_vals * 0) + 1
data_subjects = self.data_subjects
return PhiTensor(
child=data,
data_subjects=data_subjects,
min_vals=min_vals,
max_vals=max_vals,
)
else:
raise NotImplementedError # type: ignore
# Re enable after testing
def dot(
self, other: Union[PhiTensor, GammaTensor, np.ndarray]
) -> Union[PhiTensor, GammaTensor]:
if isinstance(other, np.ndarray):
return PhiTensor(
child=np.dot(self.child, other),
min_vals=np.dot(self.min_vals, other),
max_vals=np.dot(self.max_vals, other),
data_subjects=np.dot(self.data_subjects, other),
)
elif isinstance(other, PhiTensor):
return self.gamma.dot(other.gamma)
# if self.data_subjects.one_hot_lookup == other.data_subjects.one_hot_lookup:
# return PhiTensor(
# child=np.dot(self.child, other.child),
# min_vals=np.dot(self.min_vals, other.min_vals),
# max_vals=np.dot(self.max_vals, other.max_vals),
# data_subjects=self.data_subjects,
# )
# else:
# return self.gamma.dot(other.gamma)
elif isinstance(other, GammaTensor):
return self.gamma.dot(other)
else:
raise NotImplementedError
def sum(
self,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
**kwargs: Any,
) -> Union[PhiTensor, GammaTensor]:
return self.gamma.sum(axis, **kwargs)
# # TODO: Add support for axes arguments later
# min_val = self.min_vals.sum(axis=axis)
# max_val = self.max_vals.sum(axis=axis)
# if len(self.data_subjects.one_hot_lookup) == 1:
# result = self.child.sum(axis=axis)
# return PhiTensor(
# child=result,
# min_vals=min_val,
# max_vals=max_val,
# data_subjects=self.data_subjects.sum(target_shape=result.shape),
# )
# result = self.child.sum(axis=axis)
# return GammaTensor(
# child=result,
# data_subjects=self.data_subjects.sum(target_shape=result.shape),
# min_vals=min_val,
# max_vals=max_val,
# )
def expand_dims(self, axis: int) -> PhiTensor:
result = np.expand_dims(self.child, axis=axis)
minv = self.min_vals.copy()
minv.shape = result.shape
maxv = self.max_vals.copy()
maxv.shape = result.shape
return PhiTensor(
child=result,
min_vals=minv,
max_vals=maxv,
data_subjects=np.expand_dims(self.data_subjects, axis=axis),
)
def ones_like(
self,
*args: Tuple[Any, ...],
**kwargs: Any,
) -> Union[PhiTensor, GammaTensor]:
# TODO: Add support for axes arguments later
min_vals = self.min_vals.ones_like(*args, **kwargs)
max_vals = self.max_vals.ones_like(*args, **kwargs)
child = (
np.ones_like(self.child, *args, **kwargs)
if isinstance(self.child, np.ndarray)
else self.child.ones_like(*args, **kwargs)
)
return PhiTensor(
child=child,
min_vals=min_vals,
max_vals=max_vals,
data_subjects=self.data_subjects,
)
def __ne__(self, other: Any) -> Union[PhiTensor, GammaTensor]: # type: ignore
# TODO: what about data_subjects and min / max values?
if is_acceptable_simple_type(other) or len(self.child) == len(other.child):
gamma_output = False
if is_acceptable_simple_type(other):
result = self.child != other
else:
# check data_subjects match, if they dont gamma_output = True
#
result = self.child != other.child
if isinstance(result, GammaTensor):
gamma_output = True
if not gamma_output:
return self.copy_with(child=result)
else:
return self.copy_with(child=result).gamma
else:
raise Exception(
"Tensor dims do not match for __eq__: "
+ f"{len(self.child)} != {len(other.child)}"
)
def __neg__(self) -> PhiTensor:
return PhiTensor(
child=self.child * -1,
min_vals=self.max_vals * -1,
max_vals=self.min_vals * -1,
data_subjects=self.data_subjects,
)
def __pos__(self) -> PhiTensor:
return PhiTensor(
child=self.child,
min_vals=self.min_vals,
max_vals=self.max_vals,
data_subjects=self.data_subjects,
)
def exp(self) -> PhiTensor:
# relative
from ...smpc.approximations import exp
def exp_reduction(val: np.ndarray) -> np.ndarray:
pos_index = val >= 0
neg_index = val < 0
exp = np.exp((pos_index * val * -1) + (neg_index * val))
pos_values = (pos_index) * exp
neg_values = (neg_index) * exp * -1
return pos_values + neg_values
min_vals = self.min_vals.copy()
min_vals.data = np.array(exp_reduction(min_vals.data))
max_vals = self.max_vals.copy()
max_vals.data = np.array(exp_reduction(max_vals.data))
return PhiTensor(
child=exp(self.child), # type: ignore
min_vals=min_vals,
max_vals=max_vals,
data_subjects=self.data_subjects,
)
def softmax(self) -> PhiTensor:
# relative
from ...smpc.approximations import exp
from ...smpc.approximations import reciprocal
def softmax(val: np.ndarray) -> np.ndarray:
logits = val - val.max()
numerator = np.exp(logits)
inv = 1 / numerator.sum()
return numerator * inv
min_vals = self.min_vals.copy()
min_vals.data = np.array(softmax(min_vals.data))
max_vals = self.max_vals.copy()
max_vals.data = np.array(softmax(max_vals.data))
fpt = self.child.copy()
if not isinstance(fpt.child, np.ndarray):
raise ValueError("Softmax currently works only for numpy child")
fpt.child = fpt.child - fpt.child.max()
numerator = exp(fpt)
inv = reciprocal(numerator.sum()) # type: ignore
return PhiTensor(
child=numerator * inv, # type: ignore
min_vals=min_vals,
max_vals=max_vals,
data_subjects=self.data_subjects,
)
def reciprocal(self) -> PhiTensor:
# relative
from ...smpc.approximations import reciprocal
min_vals = self.min_vals.copy()
min_vals.data = np.array(1 / min_vals.data)
max_vals = self.max_vals.copy()
max_vals.data = np.array(1 / max_vals.data)
return PhiTensor(
child=reciprocal(self.child),
min_vals=min_vals,
max_vals=max_vals,
data_subjects=self.data_subjects,
)
def one_hot(self) -> PhiTensor:
one_hot_child = self.child.one_hot()
return PhiTensor(
child=one_hot_child,
min_vals=self.min_vals,
max_vals=self.max_vals,
data_subjects=self.data_subjects,
)
def repeat(
self, repeats: Union[int, Tuple[int, ...]], axis: Optional[int] = None
) -> PhiTensor:
"""
Repeat elements of an array.
Parameters
repeats: int or array of ints
The number of repetitions for each element. repeats is broadcasted to fit the shape of the given axis.
axis: int, optional
The axis along which to repeat values. By default, use the flattened input array, and return a flat output array.
Returns
repeated_array: PhiTensor
Output array which has the same shape as a, except along the given axis.
"""
result = self.child.repeat(repeats, axis)
if isinstance(self.min_vals, lazyrepeatarray):
minv = lazyrepeatarray(data=self.min_vals.data.min(), shape=result.shape)
maxv = lazyrepeatarray(data=self.max_vals.data.max(), shape=result.shape)
else:
minv = self.min_vals
maxv = self.max_vals
return PhiTensor(
child=result,
data_subjects=self.data_subjects.repeat(repeats, axis),
min_vals=minv,
max_vals=maxv,
)
def choose(
self,
choices: Sequence[Union[PassthroughTensor, np.ndarray]],
out: Optional[np.ndarray] = None,
mode: Optional[str] = "raise",
) -> PhiTensor:
"""
Construct an array from an index array and a list of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples - in its full generality,
this function is less simple than it might seem from the following code description
(below ndi = numpy.lib.index_tricks):
np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)]).
But this omits some subtleties. Here is a fully general summary:
Given an “index” array (a) of integers and a sequence of n arrays (choices), a and each choice array are first
broadcast, as necessary, to arrays of a common shape; calling these Ba and Bchoices[i], i = 0,…,n-1 we have that
necessarily, Ba.shape == Bchoices[i].shape for each i. Then, a new array with shape Ba.shape is created
as follows:
if mode='raise' (the default), then, first of all, each element of a (and thus Ba) must be in the range
[0, n-1]; now, suppose that i (in that range) is the value at the (j0, j1, ..., jm) position in Ba -
then the value at the same position in the new array is the value in Bchoices[i] at that same position;
if mode='wrap', values in a (and thus Ba) may be any (signed) integer; modular arithmetic is used to map
integers outside the range [0, n-1] back into that range; and then the new array is constructed as above;
if mode='clip', values in a (and thus Ba) may be any (signed) integer; negative integers are mapped to 0;
values greater than n-1 are mapped to n-1; and then the new array is constructed as above.
Parameters
choices: sequence of arrays
Choice arrays. a and all of the choices must be broadcastable to the same shape. If choices is itself an
array (not recommended), then its outermost dimension (i.e., the one corresponding to choices.shape[0])
is taken as defining the “sequence”.
out: array, optional
If provided, the result will be inserted into this array. It should be of the appropriate shape and
dtype. Note that out is always buffered if mode='raise'; use other modes for better performance.
mode{‘raise’ (default), ‘wrap’, ‘clip’}, optional
Specifies how indices outside [0, n-1] will be treated:
‘raise’ : an exception is raised
‘wrap’ : value becomes value mod n
‘clip’ : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
merged_array: PhiTensor
The merged result.
Raises
ValueError: shape mismatch
If a and each choice array are not all broadcastable to the same shape.
"""
result = self.child.choose(choices, mode=mode)
if isinstance(self.min_vals, lazyrepeatarray):
minv = lazyrepeatarray(data=self.min_vals.data.min(), shape=result.shape)
maxv = lazyrepeatarray(data=self.max_vals.data.max(), shape=result.shape)
else:
minv, maxv = self.min_vals, self.max_vals
return PhiTensor(
child=result,
data_subjects=self.data_subjects.take(choices),
min_vals=minv,
max_vals=maxv,
)
def _object2bytes(self) -> bytes:
schema = get_capnp_schema(schema_file="phi_tensor.capnp")
pt_struct: CapnpModule = schema.PT # type: ignore
pt_msg = pt_struct.new_message()
# this is how we dispatch correct deserialization of bytes
pt_msg.magicHeader = serde_magic_header(type(self))
if isinstance(self.child, np.ndarray) or np.isscalar(self.child):
chunk_bytes(capnp_serialize(np.array(self.child), to_bytes=True), "child", pt_msg) # type: ignore
pt_msg.isNumpy = True
else:
chunk_bytes(serialize(self.child, to_bytes=True), "child", pt_msg) # type: ignore
pt_msg.isNumpy = False
pt_msg.minVals = serialize(self.min_vals, to_bytes=True)
pt_msg.maxVals = serialize(self.max_vals, to_bytes=True)
chunk_bytes(
capnp_serialize(dslarraytonumpyutf8(self.data_subjects), to_bytes=True),
"dataSubjects",
pt_msg,
)
# to pack or not to pack?
# to_bytes = pt_msg.to_bytes()
return pt_msg.to_bytes_packed()
@staticmethod
def _bytes2object(buf: bytes) -> PhiTensor:
schema = get_capnp_schema(schema_file="phi_tensor.capnp")
pt_struct: CapnpModule = schema.PT # type: ignore
# https://stackoverflow.com/questions/48458839/capnproto-maximum-filesize
MAX_TRAVERSAL_LIMIT = 2**64 - 1
# to pack or not to pack?
# pt_msg = pt_struct.from_bytes(buf, traversal_limit_in_words=2 ** 64 - 1)
pt_msg = pt_struct.from_bytes_packed(
buf, traversal_limit_in_words=MAX_TRAVERSAL_LIMIT
)
if pt_msg.isNumpy:
child = capnp_deserialize(combine_bytes(pt_msg.child), from_bytes=True)
else:
child = deserialize(combine_bytes(pt_msg.child), from_bytes=True)
min_vals = deserialize(pt_msg.minVals, from_bytes=True)
max_vals = deserialize(pt_msg.maxVals, from_bytes=True)
data_subjects = numpyutf8todslarray(
capnp_deserialize(combine_bytes(pt_msg.dataSubjects), from_bytes=True)
)
return PhiTensor(
child=child,
min_vals=min_vals,
max_vals=max_vals,
data_subjects=data_subjects,
)
added implementation for resize, compress and squeeze
# future
from __future__ import annotations
# stdlib
from collections.abc import Sequence
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
# third party
import numpy as np
from numpy.typing import NDArray
from scipy.ndimage.interpolation import rotate
# relative
from .... import lib
from ....ast.klass import pointerize_args_and_kwargs
from ....core.adp.data_subject_ledger import DataSubjectLedger
from ....core.adp.data_subject_list import DataSubjectArray
from ....core.adp.data_subject_list import dslarraytonumpyutf8
from ....core.adp.data_subject_list import numpyutf8todslarray
from ....core.node.common.action.get_or_set_property_action import (
GetOrSetPropertyAction,
)
from ....core.node.common.action.get_or_set_property_action import PropertyActions
from ....lib.numpy.array import capnp_deserialize
from ....lib.numpy.array import capnp_serialize
from ....lib.python.util import upcast
from ....util import inherit_tags
from ...common.serde.capnp import CapnpModule
from ...common.serde.capnp import chunk_bytes
from ...common.serde.capnp import combine_bytes
from ...common.serde.capnp import get_capnp_schema
from ...common.serde.capnp import serde_magic_header
from ...common.serde.deserialize import _deserialize as deserialize
from ...common.serde.serializable import serializable
from ...common.serde.serialize import _serialize as serialize
from ...common.uid import UID
from ...node.abstract.node import AbstractNodeClient
from ...node.common.action.run_class_method_action import RunClassMethodAction
from ...pointer.pointer import Pointer
from ..broadcastable import is_broadcastable
from ..config import DEFAULT_INT_NUMPY_TYPE
from ..fixed_precision_tensor import FixedPrecisionTensor
from ..lazy_repeat_array import compute_min_max
from ..lazy_repeat_array import lazyrepeatarray
from ..passthrough import AcceptableSimpleType # type: ignore
from ..passthrough import PassthroughTensor # type: ignore
from ..passthrough import SupportedChainType # type: ignore
from ..passthrough import is_acceptable_simple_type # type: ignore
from ..smpc import utils
from ..smpc.mpc_tensor import MPCTensor
from ..smpc.utils import TYPE_TO_RING_SIZE
from ..util import implements
from .adp_tensor import ADPTensor
from .gamma_tensor import GammaTensor
from .gamma_tensor import TensorWrappedGammaTensorPointer
@serializable(recursive_serde=True)
class TensorWrappedPhiTensorPointer(Pointer, PassthroughTensor):
__name__ = "TensorWrappedPhiTensorPointer"
__module__ = "syft.core.tensor.autodp.phi_tensor"
__attr_allowlist__ = [
# default pointer attrs
"client",
"id_at_location",
"object_type",
"tags",
"description",
# phi_tensor attrs
"data_subjects",
"min_vals",
"max_vals",
"public_dtype",
"public_shape",
]
__serde_overrides__ = {
"client": [lambda x: x.address, lambda y: y],
"public_shape": [lambda x: x, lambda y: upcast(y)],
"data_subjects": [dslarraytonumpyutf8, numpyutf8todslarray],
}
_exhausted = False
is_enum = False
def __init__(
self,
data_subjects: np.ndarray,
min_vals: np.typing.ArrayLike,
max_vals: np.typing.ArrayLike,
client: Any,
id_at_location: Optional[UID] = None,
object_type: str = "",
tags: Optional[List[str]] = None,
description: str = "",
public_shape: Optional[Tuple[int, ...]] = None,
public_dtype: Optional[np.dtype] = None,
):
super().__init__(
client=client,
id_at_location=id_at_location,
object_type=object_type,
tags=tags,
description=description,
)
self.min_vals = min_vals
self.max_vals = max_vals
self.data_subjects = data_subjects
self.public_shape = public_shape
self.public_dtype = public_dtype
# TODO: Modify for large arrays
@property
def synthetic(self) -> np.ndarray:
public_dtype_func = getattr(
self.public_dtype, "upcast", lambda: self.public_dtype
)
return (
np.random.rand(*list(self.public_shape)) # type: ignore
* (self.max_vals.to_numpy() - self.min_vals.to_numpy())
+ self.min_vals.to_numpy()
).astype(public_dtype_func())
def __repr__(self) -> str:
return (
self.synthetic.__repr__()
+ "\n\n (The data printed above is synthetic - it's an imitation of the real data.)"
)
def share(self, *parties: Tuple[AbstractNodeClient, ...]) -> MPCTensor:
all_parties = list(parties) + [self.client]
ring_size = TYPE_TO_RING_SIZE.get(self.public_dtype, None)
self_mpc = MPCTensor(
secret=self,
shape=self.public_shape,
ring_size=ring_size,
parties=all_parties,
)
return self_mpc
@property
def shape(self) -> Optional[Tuple[int, ...]]:
if hasattr(self, "public_shape"):
return self.public_shape
else:
return None
def _apply_tensor_op(self, other: Any, op_str: str) -> Any:
# we want to get the return type which matches the attr_path_and_name
# so we ask lib_ast for the return type name that matches out
# attr_path_and_name and then use that to get the actual pointer klass
# then set the result to that pointer klass
# We always maintain a Tensor hierarchy Tensor ---> PT--> Actual Data
attr_path_and_name = f"syft.core.tensor.tensor.Tensor.{op_str}"
min_vals, max_vals = compute_min_max(
self.min_vals, self.max_vals, other, op_str
)
result = TensorWrappedPhiTensorPointer(
data_subjects=self.data_subjects,
min_vals=min_vals,
max_vals=max_vals,
client=self.client,
)
# QUESTION can the id_at_location be None?
result_id_at_location = getattr(result, "id_at_location", None)
if result_id_at_location is not None:
# first downcast anything primitive which is not already PyPrimitive
(
downcast_args,
downcast_kwargs,
) = lib.python.util.downcast_args_and_kwargs(args=[other], kwargs={})
# then we convert anything which isnt a pointer into a pointer
pointer_args, pointer_kwargs = pointerize_args_and_kwargs(
args=downcast_args,
kwargs=downcast_kwargs,
client=self.client,
gc_enabled=False,
)
cmd = RunClassMethodAction(
path=attr_path_and_name,
_self=self,
args=pointer_args,
kwargs=pointer_kwargs,
id_at_location=result_id_at_location,
address=self.client.address,
)
self.client.send_immediate_msg_without_reply(msg=cmd)
inherit_tags(
attr_path_and_name=attr_path_and_name,
result=result,
self_obj=self,
args=[other],
kwargs={},
)
result_public_shape = None
if isinstance(other, TensorWrappedPhiTensorPointer):
other_shape = other.public_shape
other_dtype = other.public_dtype
elif isinstance(other, (int, float)):
other_shape = (1,)
other_dtype = DEFAULT_INT_NUMPY_TYPE
elif isinstance(other, bool):
other_shape = (1,)
other_dtype = np.dtype("bool")
elif isinstance(other, np.ndarray):
other_shape = other.shape
other_dtype = other.dtype
else:
raise ValueError(
f"Invalid Type for TensorWrappedPhiTensorPointer:{type(other)}"
)
if self.public_shape is not None and other_shape is not None:
result_public_shape = utils.get_shape(
op_str, self.public_shape, other_shape
)
if self.public_dtype is None or other_dtype is None:
if self.public_dtype != other_dtype:
raise ValueError(
f"Dtype for self: {self.public_dtype} and other :{other_dtype} should not be None"
)
result_public_dtype = self.public_dtype
result.public_shape = result_public_shape
result.public_dtype = result_public_dtype
return result
@property
def gamma(self) -> TensorWrappedGammaTensorPointer:
return TensorWrappedGammaTensorPointer(
data_subjects=self.data_subjects,
client=self.client,
id_at_location=self.id_at_location,
object_type=self.object_type,
tags=self.tags,
description=self.description,
min_vals=self.min_vals,
max_vals=self.max_vals,
public_shape=getattr(self, "public_shape", None),
public_dtype=getattr(self, "public_dtype", None),
)
@staticmethod
def _apply_op(
self: TensorWrappedPhiTensorPointer,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
op_str: str,
) -> Union[MPCTensor, TensorWrappedPhiTensorPointer]:
"""Performs the operation based on op_str
Args:
other (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]): second operand.
Returns:
Tuple[MPCTensor,Union[MPCTensor,int,float,np.ndarray]] : Result of the operation
"""
if isinstance(other, TensorWrappedPhiTensorPointer):
if (self.data_subjects != other.data_subjects).all(): # type: ignore
return getattr(self.gamma, op_str)(other.gamma)
elif isinstance(other, TensorWrappedGammaTensorPointer):
return getattr(self.gamma, op_str)(other)
if (
isinstance(other, TensorWrappedPhiTensorPointer)
and self.client != other.client
):
parties = [self.client, other.client]
self_mpc = MPCTensor(secret=self, shape=self.public_shape, parties=parties)
other_mpc = MPCTensor(
secret=other, shape=other.public_shape, parties=parties
)
return getattr(self_mpc, op_str)(other_mpc)
elif isinstance(other, MPCTensor):
return getattr(other, op_str)(self)
elif is_acceptable_simple_type(other) or isinstance(
other, TensorWrappedPhiTensorPointer
):
return self._apply_tensor_op(other=other, op_str=op_str)
else:
print("Type is unsupported:" + str(type(other)))
raise NotImplementedError
def __add__(
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "add" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__add__")
def __sub__(
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "sub" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__sub__")
def __mul__(
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "mul" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__mul__")
def __matmul__(
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "matmul" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__matmul__")
def __rmatmul__(
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "matmul" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__rmatmul__")
def __lt__(
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "lt" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__lt__")
def __gt__(
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "gt" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__gt__")
def __ge__(
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "ge" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__ge__")
def __le__(
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "le" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__le__")
def __eq__( # type: ignore
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "eq" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__eq__")
def __ne__( # type: ignore
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "ne" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__ne__")
def concatenate(
self,
other: TensorWrappedPhiTensorPointer,
*args: List[Any],
**kwargs: Dict[str, Any],
) -> MPCTensor:
"""Apply the "add" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
if not isinstance(other, TensorWrappedPhiTensorPointer):
raise ValueError(
f"Concatenate works only for TensorWrappedPhiTensorPointer got type: {type(other)}"
)
if self.client != other.client:
parties = [self.client, other.client]
self_mpc = MPCTensor(secret=self, shape=self.public_shape, parties=parties)
other_mpc = MPCTensor(
secret=other, shape=other.public_shape, parties=parties
)
return self_mpc.concatenate(other_mpc, *args, **kwargs)
else:
raise ValueError(
"Concatenate method currently works only between two different clients."
)
def __truediv__(
self,
other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "truediv" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return TensorWrappedPhiTensorPointer._apply_op(self, other, "__truediv__")
def sum(
self,
*args: Tuple[Any, ...],
**kwargs: Any,
) -> Union[
TensorWrappedPhiTensorPointer, MPCTensor, TensorWrappedGammaTensorPointer
]:
"""Apply the "truediv" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
return self.gamma.sum(*args, **kwargs)
def __getitem__(
self, key: Union[int, bool, slice]
) -> TensorWrappedPhiTensorPointer:
"""Apply the slice operation on "self"
Args:
y (Union[int,bool,slice]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
attr_path_and_name = "syft.core.tensor.tensor.Tensor.__getitem__"
result: TensorWrappedPhiTensorPointer
min_vals = self.min_vals.__getitem__(key)
max_vals = self.max_vals.__getitem__(key)
result = TensorWrappedPhiTensorPointer(
data_subjects=self.data_subjects,
min_vals=min_vals,
max_vals=max_vals,
client=self.client,
)
# QUESTION can the id_at_location be None?
result_id_at_location = getattr(result, "id_at_location", None)
if result_id_at_location is not None:
# first downcast anything primitive which is not already PyPrimitive
(
downcast_args,
downcast_kwargs,
) = lib.python.util.downcast_args_and_kwargs(args=[key], kwargs={})
# then we convert anything which isnt a pointer into a pointer
pointer_args, pointer_kwargs = pointerize_args_and_kwargs(
args=downcast_args,
kwargs=downcast_kwargs,
client=self.client,
gc_enabled=False,
)
cmd = RunClassMethodAction(
path=attr_path_and_name,
_self=self,
args=pointer_args,
kwargs=pointer_kwargs,
id_at_location=result_id_at_location,
address=self.client.address,
)
self.client.send_immediate_msg_without_reply(msg=cmd)
inherit_tags(
attr_path_and_name=attr_path_and_name,
result=result,
self_obj=self,
args=[key],
kwargs={},
)
dummy_res = np.empty(self.public_shape).__getitem__(key)
result.public_shape = dummy_res.shape
result.public_dtype = self.public_dtype
return result
def ones_like(
self,
*args: Tuple[Any, ...],
**kwargs: Any,
) -> TensorWrappedPhiTensorPointer:
"""Apply the "truediv" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
attr_path_and_name = "syft.core.tensor.tensor.Tensor.ones_like"
result: TensorWrappedPhiTensorPointer
min_vals = self.min_vals.ones_like(*args, **kwargs)
max_vals = self.max_vals.ones_like(*args, **kwargs)
result = TensorWrappedPhiTensorPointer(
data_subjects=self.data_subjects,
min_vals=min_vals,
max_vals=max_vals,
client=self.client,
)
# QUESTION can the id_at_location be None?
result_id_at_location = getattr(result, "id_at_location", None)
if result_id_at_location is not None:
# first downcast anything primitive which is not already PyPrimitive
(
downcast_args,
downcast_kwargs,
) = lib.python.util.downcast_args_and_kwargs(args=args, kwargs=kwargs)
# then we convert anything which isnt a pointer into a pointer
pointer_args, pointer_kwargs = pointerize_args_and_kwargs(
args=downcast_args,
kwargs=downcast_kwargs,
client=self.client,
gc_enabled=False,
)
cmd = RunClassMethodAction(
path=attr_path_and_name,
_self=self,
args=pointer_args,
kwargs=pointer_kwargs,
id_at_location=result_id_at_location,
address=self.client.address,
)
self.client.send_immediate_msg_without_reply(msg=cmd)
inherit_tags(
attr_path_and_name=attr_path_and_name,
result=result,
self_obj=self,
args=[],
kwargs={},
)
dummy_res = np.ones_like(np.empty(self.public_shape), *args, **kwargs)
result.public_shape = dummy_res.shape
result.public_dtype = self.public_dtype
return result
def exp(
self,
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "truediv" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
attr_path_and_name = "syft.core.tensor.tensor.Tensor.exp"
# TODO: should modify to log reduction.
def exp_reduction(val: np.ndarray) -> np.ndarray:
pos_index = val >= 0
neg_index = val < 0
exp = np.exp((pos_index * val * -1) + (neg_index * val))
pos_values = (pos_index) * exp
neg_values = (neg_index) * exp * -1
return pos_values + neg_values
min_vals = self.min_vals.copy()
min_vals.data = np.array(exp_reduction(min_vals.data))
max_vals = self.max_vals.copy()
max_vals.data = np.array(exp_reduction(max_vals.data))
result = TensorWrappedPhiTensorPointer(
data_subjects=self.data_subjects,
min_vals=min_vals,
max_vals=max_vals,
client=self.client,
)
# QUESTION can the id_at_location be None?
result_id_at_location = getattr(result, "id_at_location", None)
if result_id_at_location is not None:
# first downcast anything primitive which is not already PyPrimitive
(
downcast_args,
downcast_kwargs,
) = lib.python.util.downcast_args_and_kwargs(args=[], kwargs={})
# then we convert anything which isnt a pointer into a pointer
pointer_args, pointer_kwargs = pointerize_args_and_kwargs(
args=downcast_args,
kwargs=downcast_kwargs,
client=self.client,
gc_enabled=False,
)
cmd = RunClassMethodAction(
path=attr_path_and_name,
_self=self,
args=pointer_args,
kwargs=pointer_kwargs,
id_at_location=result_id_at_location,
address=self.client.address,
)
self.client.send_immediate_msg_without_reply(msg=cmd)
inherit_tags(
attr_path_and_name=attr_path_and_name,
result=result,
self_obj=self,
args=[],
kwargs={},
)
result.public_shape = self.public_shape
result.public_dtype = self.public_dtype
return result
def reciprocal(
self,
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "reciprocal" operation between "self" and "other"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
attr_path_and_name = "syft.core.tensor.tensor.Tensor.reciprocal"
min_vals = self.min_vals.copy()
min_vals.data = np.array(1 / min_vals.data)
max_vals = self.max_vals.copy()
max_vals.data = np.array(1 / max_vals.data)
result = TensorWrappedPhiTensorPointer(
data_subjects=self.data_subjects,
min_vals=min_vals,
max_vals=max_vals,
client=self.client,
)
# QUESTION can the id_at_location be None?
result_id_at_location = getattr(result, "id_at_location", None)
if result_id_at_location is not None:
# first downcast anything primitive which is not already PyPrimitive
(
downcast_args,
downcast_kwargs,
) = lib.python.util.downcast_args_and_kwargs(args=[], kwargs={})
# then we convert anything which isnt a pointer into a pointer
pointer_args, pointer_kwargs = pointerize_args_and_kwargs(
args=downcast_args,
kwargs=downcast_kwargs,
client=self.client,
gc_enabled=False,
)
cmd = RunClassMethodAction(
path=attr_path_and_name,
_self=self,
args=pointer_args,
kwargs=pointer_kwargs,
id_at_location=result_id_at_location,
address=self.client.address,
)
self.client.send_immediate_msg_without_reply(msg=cmd)
inherit_tags(
attr_path_and_name=attr_path_and_name,
result=result,
self_obj=self,
args=[],
kwargs={},
)
result.public_shape = self.public_shape
result.public_dtype = self.public_dtype
return result
def softmax(
self,
) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:
"""Apply the "softmax" operation on "self"
Args:
y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand.
Returns:
Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation.
"""
attr_path_and_name = "syft.core.tensor.tensor.Tensor.softmax"
# TODO: should modify to log reduction.
def softmax(val: np.ndarray) -> np.ndarray:
logits = val - val.max()
numerator = np.exp(logits)
inv = 1 / numerator.sum()
return numerator * inv
min_vals = self.min_vals.copy()
min_vals.data = np.array(softmax(min_vals.data))
max_vals = self.max_vals.copy()
max_vals.data = np.array(softmax(max_vals.data))
result = TensorWrappedPhiTensorPointer(
data_subjects=self.data_subjects,
min_vals=min_vals,
max_vals=max_vals,
client=self.client,
)
# QUESTION can the id_at_location be None?
result_id_at_location = getattr(result, "id_at_location", None)
if result_id_at_location is not None:
# first downcast anything primitive which is not already PyPrimitive
(
downcast_args,
downcast_kwargs,
) = lib.python.util.downcast_args_and_kwargs(args=[], kwargs={})
# then we convert anything which isnt a pointer into a pointer
pointer_args, pointer_kwargs = pointerize_args_and_kwargs(
args=downcast_args,
kwargs=downcast_kwargs,
client=self.client,
gc_enabled=False,
)
cmd = RunClassMethodAction(
path=attr_path_and_name,
_self=self,
args=pointer_args,
kwargs=pointer_kwargs,
id_at_location=result_id_at_location,
address=self.client.address,
)
self.client.send_immediate_msg_without_reply(msg=cmd)
inherit_tags(
attr_path_and_name=attr_path_and_name,
result=result,
self_obj=self,
args=[],
kwargs={},
)
result.public_shape = self.public_shape
result.public_dtype = self.public_dtype
return result
@property
def T(self) -> TensorWrappedPhiTensorPointer:
# We always maintain a Tensor hierarchy Tensor ---> PT--> Actual Data
attr_path_and_name = "syft.core.tensor.tensor.Tensor.T"
result = TensorWrappedPhiTensorPointer(
data_subjects=self.data_subjects,
min_vals=self.min_vals.transpose(),
max_vals=self.max_vals.transpose(),
client=self.client,
)
# QUESTION can the id_at_location be None?
result_id_at_location = getattr(result, "id_at_location", None)
if result_id_at_location is not None:
# first downcast anything primitive which is not already PyPrimitive
(
downcast_args,
downcast_kwargs,
) = lib.python.util.downcast_args_and_kwargs(args=[], kwargs={})
# then we convert anything which isnt a pointer into a pointer
pointer_args, pointer_kwargs = pointerize_args_and_kwargs(
args=downcast_args,
kwargs=downcast_kwargs,
client=self.client,
gc_enabled=False,
)
cmd = GetOrSetPropertyAction(
path=attr_path_and_name,
id_at_location=result_id_at_location,
address=self.client.address,
_self=self,
args=pointer_args,
kwargs=pointer_kwargs,
action=PropertyActions.GET,
map_to_dyn=False,
)
self.client.send_immediate_msg_without_reply(msg=cmd)
inherit_tags(
attr_path_and_name=attr_path_and_name,
result=result,
self_obj=self,
args=[],
kwargs={},
)
result_public_shape = np.empty(self.public_shape).T.shape
result.public_shape = result_public_shape
result.public_dtype = self.public_dtype
return result
def one_hot(self: TensorWrappedPhiTensorPointer) -> np.array:
tensor_size = np.empty(self.public_shape).size
one_hot_Y = np.zeros((tensor_size, self.max_vals.data[0] + 1))
one_hot_Y = one_hot_Y.T
attr_path_and_name = "syft.core.tensor.tensor.Tensor.one_hot"
result = TensorWrappedPhiTensorPointer(
data_subjects=self.data_subjects,
min_vals=self.min_vals,
max_vals=self.max_vals,
client=self.client,
)
# QUESTION can the id_at_location be None?
result_id_at_location = getattr(result, "id_at_location", None)
if result_id_at_location is not None:
# first downcast anything primitive which is not already PyPrimitive
(
downcast_args,
downcast_kwargs,
) = lib.python.util.downcast_args_and_kwargs(args=[], kwargs={})
# then we convert anything which isnt a pointer into a pointer
pointer_args, pointer_kwargs = pointerize_args_and_kwargs(
args=downcast_args,
kwargs=downcast_kwargs,
client=self.client,
gc_enabled=False,
)
cmd = RunClassMethodAction(
path=attr_path_and_name,
_self=self,
args=pointer_args,
kwargs=pointer_kwargs,
id_at_location=result_id_at_location,
address=self.client.address,
)
self.client.send_immediate_msg_without_reply(msg=cmd)
inherit_tags(
attr_path_and_name=attr_path_and_name,
result=result,
self_obj=self,
args=[],
kwargs={},
)
result.public_shape = one_hot_Y.shape
result.public_dtype = self.public_dtype
return result
def to_local_object_without_private_data_child(self) -> PhiTensor:
"""Convert this pointer into a partial version of the PhiTensor but without
any of the private data therein."""
# relative
from ..tensor import Tensor
public_shape = getattr(self, "public_shape", None)
public_dtype = getattr(self, "public_dtype", None)
return Tensor(
child=PhiTensor(
child=FixedPrecisionTensor(value=np.empty(self.data_subjects.shape)),
data_subjects=self.data_subjects,
min_vals=self.min_vals, # type: ignore
max_vals=self.max_vals, # type: ignore
),
public_shape=public_shape,
public_dtype=public_dtype,
)
@implements(TensorWrappedPhiTensorPointer, np.ones_like)
def ones_like(
tensor: TensorWrappedPhiTensorPointer,
*args: Tuple[Any, ...],
**kwargs: Dict[Any, Any],
) -> TensorWrappedPhiTensorPointer:
return tensor.ones_like(*args, **kwargs)
@serializable(capnp_bytes=True)
class PhiTensor(PassthroughTensor, ADPTensor):
PointerClassOverride = TensorWrappedPhiTensorPointer
# __attr_allowlist__ = ["child", "min_vals", "max_vals", "data_subjects"]
__slots__ = (
"child",
"min_vals",
"max_vals",
"data_subjects",
)
def __init__(
self,
child: Union[Sequence, NDArray],
data_subjects: Union[DataSubjectArray, NDArray],
min_vals: Union[np.ndarray, lazyrepeatarray],
max_vals: Union[np.ndarray, lazyrepeatarray],
) -> None:
# self.data_subjects: Union[DataSubjectList, np.ndarray]
# child = the actual private data
super().__init__(child)
# lazyrepeatarray matching the shape of child
if not isinstance(min_vals, lazyrepeatarray):
min_vals = lazyrepeatarray(data=min_vals, shape=child.shape) # type: ignore
if not isinstance(max_vals, lazyrepeatarray):
max_vals = lazyrepeatarray(data=max_vals, shape=child.shape) # type: ignore
self.min_vals = min_vals
self.max_vals = max_vals
numpy_data_subjects: np.ndarray = DataSubjectArray.from_objs(data_subjects)
self.data_subjects = numpy_data_subjects
if numpy_data_subjects.shape != self.shape:
raise ValueError(
f"DataSubjects shape: {numpy_data_subjects.shape} should match data shape: {self.shape}"
)
@property
def proxy_public_kwargs(self) -> Dict[str, Any]:
return {
"min_vals": self.min_vals,
"max_vals": self.max_vals,
"data_subjects": self.data_subjects,
}
# def init_pointer(
# self,
# client: Any,
# id_at_location: Optional[UID] = None,
# object_type: str = "",
# tags: Optional[List[str]] = None,
# description: str = "",
# ) -> TensorWrappedPhiTensorPointer:
# return TensorWrappedPhiTensorPointer(
# # Arguments specifically for SEPhiTensor
# data_subjects=self.data_subjects,
# min_vals=self.min_vals,
# max_vals=self.max_vals,
# # Arguments required for a Pointer to work
# client=client,
# id_at_location=id_at_location,
# object_type=object_type,
# tags=tags,
# description=description,
# )
@property
def gamma(self) -> GammaTensor:
"""Property to cast this tensor into a GammaTensor"""
return self.create_gamma()
def copy(self, order: Optional[str] = "K") -> PhiTensor:
"""Return copy of the given object"""
return PhiTensor(
child=self.child.copy(order=order),
min_vals=self.min_vals.copy(order=order),
max_vals=self.max_vals.copy(order=order),
data_subjects=self.data_subjects.copy(),
)
def all(self) -> bool:
return self.child.all()
def any(self) -> bool:
return self.child.any()
def copy_with(self, child: np.ndarray) -> PhiTensor:
new_tensor = self.copy()
new_tensor.child = child
return new_tensor
def __getitem__(self, item: Union[str, int, slice, PassthroughTensor]) -> PhiTensor:
if isinstance(item, PassthroughTensor):
data = self.child[item.child]
return PhiTensor(
child=data,
min_vals=lazyrepeatarray(data=data, shape=data.shape),
max_vals=lazyrepeatarray(data=data, shape=data.shape),
data_subjects=self.data_subjects[item.child],
)
else:
data = self.child[item]
return PhiTensor(
child=data,
min_vals=lazyrepeatarray(data=data, shape=data.shape),
max_vals=lazyrepeatarray(data=data, shape=data.shape),
data_subjects=self.data_subjects[item],
)
def zeros_like(
self,
*args: Tuple[Any, ...],
**kwargs: Any,
) -> Union[PhiTensor, GammaTensor]:
# TODO: Add support for axes arguments later
min_vals = self.min_vals.zeros_like(*args, **kwargs)
max_vals = self.max_vals.zeros_like(*args, **kwargs)
child = (
np.zeros_like(self.child, *args, **kwargs)
if isinstance(self.child, np.ndarray)
else self.child.ones_like(*args, **kwargs)
)
return PhiTensor(
child=child,
min_vals=min_vals,
max_vals=max_vals,
data_subjects=self.data_subjects,
)
def __setitem__(
self,
key: Union[int, slice, NDArray],
value: Union[PhiTensor, GammaTensor, np.ndarray],
) -> Union[PhiTensor, GammaTensor]:
if isinstance(value, PhiTensor):
self.child[key] = value.child
minv = value.child.min()
maxv = value.child.max()
if minv < self.min_vals.data.min():
self.min_vals.data = minv
if maxv > self.max_vals.data.max():
self.max_vals.data = maxv
gamma_output = self.gamma
gamma_output[key] = value.gamma
# print("It's on the right track")
return gamma_output
elif isinstance(value, GammaTensor):
gamma = self.gamma
gamma[key] = value
return gamma
elif isinstance(value, np.ndarray):
self.child[key] = value
minv = value.min()
maxv = value.max()
if minv < self.min_vals.data.min():
self.min_vals.data = minv
if maxv > self.max_vals.data.max():
self.max_vals.data = maxv
return PhiTensor(
child=self.child,
data_subjects=self.data_subjects,
min_vals=self.min_vals,
max_vals=self.max_vals,
)
else:
raise NotImplementedError
def abs(self) -> PhiTensor:
data = self.child
output = np.abs(data)
return PhiTensor(
child=output,
data_subjects=self.data_subjects,
min_vals=np.abs(self.min_vals.data),
max_vals=np.abs(self.min_vals.data),
)
def reshape(self, *shape: Tuple[int, ...]) -> PhiTensor:
data = self.child
output_data = np.reshape(data, *shape)
return PhiTensor(
child=output_data,
data_subjects=np.reshape(self.data_subjects, *shape),
min_vals=output_data.min(),
max_vals=output_data.max(),
)
def pad(self, width: int, padding_mode: str = "reflect") -> PhiTensor:
data = self.child
if padding_mode == "reflect":
pad_left = pad_right = pad_top = pad_bottom = width
# RGB image
if len(data.shape) == 3:
output_data = np.pad(
data,
((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)),
padding_mode,
)
output_data_subjects = np.pad(
self.data_subjects,
((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)),
padding_mode,
)
# Grayscale image
elif len(data.shape) == 2:
output_data = np.pad(
data, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode
)
output_data_subjects = np.pad(
self.data_subjects,
((pad_top, pad_bottom), (pad_left, pad_right)),
padding_mode,
)
else:
output_data = np.pad(data, width, padding_mode)
output_data_subjects = np.pad(self.data_subjects, width, padding_mode)
else:
raise NotImplementedError
output_min_val, output_max_val = output_data.min(), output_data.max()
return PhiTensor(
child=output_data,
data_subjects=output_data_subjects,
min_vals=output_min_val,
max_vals=output_max_val,
)
def ravel(self) -> PhiTensor:
data = self.child
output_data = data.ravel()
output_data_subjects = self.data_subjects.ravel()
min_vals = lazyrepeatarray(data=self.min_vals.data, shape=output_data.shape)
max_vals = lazyrepeatarray(data=self.max_vals.data, shape=output_data.shape)
return PhiTensor(
child=output_data,
data_subjects=output_data_subjects,
min_vals=min_vals,
max_vals=max_vals,
)
def random_horizontal_flip(self, p: float = 0.5) -> PhiTensor:
"""Could make more efficient by not encoding/decoding FPT"""
if np.random.random() <= p:
return PhiTensor(
child=np.fliplr(self.child),
data_subjects=self.data_subjects,
min_vals=self.min_vals.horizontal_flip(),
max_vals=self.max_vals.horizontal_flip(),
)
else:
return self
def random_vertical_flip(self, p: float = 0.5) -> PhiTensor:
"""Could make more efficient by not encoding/decoding FPT"""
if np.random.random() <= p:
return PhiTensor(
child=np.flipud(self.child),
data_subjects=self.data_subjects,
min_vals=self.min_vals.vertical_flip(),
max_vals=self.max_vals.vertical_flip(),
)
else:
return self
def random_rotation(self, degrees: Union[int, Tuple]) -> PhiTensor:
if isinstance(degrees, int):
angle = np.random.randint(low=-degrees, high=degrees)
elif isinstance(degrees, tuple):
angle = np.random.randint(low=degrees[0], high=degrees[1])
rotated_data_value = rotate(self.child, angle)
return PhiTensor(
child=rotated_data_value,
data_subjects=self.data_subjects,
min_vals=rotated_data_value.min(),
max_vals=rotated_data_value.max(),
)
def max(self, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> PhiTensor:
indices = self.child.argmax(axis)
result = self.child.max(axis)
return PhiTensor(
child=result,
data_subjects=self.data_subjects[indices],
min_vals=lazyrepeatarray(data=result.min(), shape=result.shape),
max_vals=lazyrepeatarray(data=result.max(), shape=result.shape),
)
def min(self, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> PhiTensor:
indices = self.child.argmin(axis)
result = self.child.min(axis)
return PhiTensor(
child=result,
data_subjects=self.data_subjects[indices],
min_vals=lazyrepeatarray(data=result.min(), shape=result.shape),
max_vals=lazyrepeatarray(data=result.max(), shape=result.shape),
)
def _argmax(self, axis: Optional[int]) -> PhiTensor:
return self.child.argmax(axis)
def unravel_argmax(
self, axis: Optional[int] = None
) -> Tuple[np.ndarray]: # possible privacy violation?
arg_result = self._argmax(axis=axis)
shape = self.shape
return np.unravel_index(arg_result, shape)
def mean(
self,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
**kwargs: Any,
) -> PhiTensor:
result = self.child.mean(axis, **kwargs)
return PhiTensor(
child=result,
data_subjects=self.data_subjects.mean(axis, **kwargs),
min_vals=lazyrepeatarray(data=self.min_vals.data, shape=result.shape),
max_vals=lazyrepeatarray(data=self.max_vals.data, shape=result.shape),
)
def std(
self,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
**kwargs: Any,
) -> PhiTensor:
result = self.child.std(axis, **kwargs)
return PhiTensor(
child=result,
data_subjects=self.data_subjects.std(axis, **kwargs),
min_vals=lazyrepeatarray(data=0, shape=result.shape),
max_vals=lazyrepeatarray(
data=0.25
* (self.max_vals.data - self.min_vals.data)
** 2, # rough approximation, could be off
shape=result.shape,
),
)
def sqrt(self) -> PhiTensor:
result = np.sqrt(self.child)
return PhiTensor(
child=result,
data_subjects=np.sqrt(self.data_subjects),
min_vals=lazyrepeatarray(
data=np.sqrt(self.min_vals.data), shape=result.shape
),
max_vals=lazyrepeatarray(
data=np.sqrt(self.max_vals.data), shape=result.shape
),
)
def normalize(
self, mean: Union[float, Sequence[float]], std: Union[float, Sequence[float]]
) -> PhiTensor:
# TODO: Double check if normalization bounds are correct; they might be data dependent
if isinstance(mean, float) and isinstance(std, float):
return PhiTensor(
child=(self.child - mean) / std,
data_subjects=self.data_subjects,
min_vals=(self.min_vals - mean) * (1 / std),
max_vals=(self.max_vals - mean) * (1 / std),
)
else:
# This is easily doable in the future
raise NotImplementedError
def create_gamma(self) -> GammaTensor:
"""Return a new Gamma tensor based on this phi tensor"""
gamma_tensor = GammaTensor(
child=self.child,
data_subjects=self.data_subjects,
min_vals=self.min_vals,
max_vals=self.max_vals,
)
return gamma_tensor
def view(self, *args: List[Any]) -> PhiTensor:
# TODO: Figure out how to fix lazyrepeatarray reshape
data = self.child.reshape(*args)
return PhiTensor(
child=data,
data_subjects=self.data_subjects,
min_vals=lazyrepeatarray(data=self.min_vals.data.min(), shape=data.shape),
max_vals=lazyrepeatarray(data=self.max_vals.data.max(), shape=data.shape),
)
def publish(
self,
get_budget_for_user: Callable,
deduct_epsilon_for_user: Callable,
ledger: DataSubjectLedger,
sigma: float,
) -> AcceptableSimpleType:
print("PUBLISHING TO GAMMA:")
print(self.child)
gamma = self.gamma
# gamma.func = lambda x: x
gamma.state[gamma.id] = gamma
res = gamma.publish(
get_budget_for_user=get_budget_for_user,
deduct_epsilon_for_user=deduct_epsilon_for_user,
ledger=ledger,
sigma=sigma,
)
print("Final Values", res)
return res
@property
def value(self) -> np.ndarray:
return self.child
def astype(self, np_type: np.dtype) -> PhiTensor:
return self.__class__(
child=self.child.astype(np_type),
data_subjects=self.data_subjects,
min_vals=self.min_vals.astype(np_type),
max_vals=self.max_vals.astype(np_type),
# scalar_manager=self.scalar_manager,
)
@property
def shape(self) -> Tuple[Any, ...]:
if self.child is None:
return ()
else:
return self.child.shape
def __repr__(self) -> str:
"""Pretty print some information, optimized for Jupyter notebook viewing."""
return (
f"{self.__class__.__name__}(child={self.child}, "
+ f"min_vals={self.min_vals}, max_vals={self.max_vals})"
)
def __eq__(self, other: Any) -> Union[PhiTensor, GammaTensor]: # type: ignore
# TODO: what about data_subjects and min / max values?
if is_acceptable_simple_type(other) or len(self.child) == len(other.child):
gamma_output = False
if is_acceptable_simple_type(other):
result = self.child == other
else:
# check data_subjects match, if they dont gamma_output = True
result = self.child == other.child
if isinstance(result, GammaTensor): # TODO: Check this
gamma_output = True
if not gamma_output:
# min_vals=self.min_vals * 0.0,
# max_vals=self.max_vals * 0.0 + 1.0,
return self.copy_with(child=result)
else:
return self.copy_with(child=result).gamma
else:
raise Exception(
"Tensor dims do not match for __eq__: "
+ f"{len(self.child)} != {len(other.child)}"
)
def __add__(self, other: SupportedChainType) -> Union[PhiTensor, GammaTensor]:
# if the tensor being added is also private
if isinstance(other, PhiTensor):
return self.gamma + other.gamma
# if self.data_subjects != other.data_subjects:
# return self.gamma + other.gamma
# return PhiTensor(
# child=self.child + other.child,
# min_vals=self.min_vals + other.min_vals,
# max_vals=self.max_vals + other.max_vals,
# data_subjects=self.data_subjects,
# # scalar_manager=self.scalar_manager,
# )
# if the tensor being added is a public tensor / int / float / etc.
elif is_acceptable_simple_type(other):
return PhiTensor(
child=self.child + other,
min_vals=self.min_vals + other,
max_vals=self.max_vals + other,
data_subjects=self.data_subjects,
)
elif isinstance(other, GammaTensor):
return self.gamma + other
else:
print("Type is unsupported:" + str(type(other)))
raise NotImplementedError
def __sub__(self, other: SupportedChainType) -> Union[PhiTensor, GammaTensor]:
if isinstance(other, PhiTensor):
return self.gamma - other.gamma
# diff_data_subjects = (
# self.data_subjects.one_hot_lookup != other.data_subjects.one_hot_lookup
# )
# diff_data_subjects = (
# diff_data_subjects
# if isinstance(diff_data_subjects, bool)
# else diff_data_subjects.any()
# )
# if diff_data_subjects:
# return self.gamma - other.gamma
# # raise NotImplementedError
# data = self.child - other.child
# min_min = self.min_vals.data - other.min_vals.data
# min_max = self.min_vals.data - other.max_vals.data
# max_min = self.max_vals.data - other.min_vals.data
# max_max = self.max_vals.data - other.max_vals.data
# _min_vals = np.minimum.reduce([min_min, min_max, max_min, max_max])
# _max_vals = np.maximum.reduce([min_min, min_max, max_min, max_max])
# min_vals = self.min_vals.copy()
# min_vals.data = _min_vals
# max_vals = self.max_vals.copy()
# max_vals.data = _max_vals
# data_subjects = self.data_subjects
elif is_acceptable_simple_type(other):
if isinstance(other, np.ndarray):
if not is_broadcastable(other.shape, self.child.shape): # type: ignore
raise Exception(
f"Shapes do not match for subtraction: {self.child.shape} and {other.shape}"
)
data = self.child - other
min_vals = self.min_vals - other
max_vals = self.max_vals - other
data_subjects = self.data_subjects
elif isinstance(other, GammaTensor):
return self.gamma - other
else:
print("Type is unsupported:" + str(type(other)))
raise NotImplementedError
return PhiTensor(
child=data,
data_subjects=data_subjects,
min_vals=min_vals,
max_vals=max_vals,
)
def __mul__(self, other: SupportedChainType) -> Union[PhiTensor, GammaTensor]:
if isinstance(other, PhiTensor):
return self.gamma + other.gamma
# if self.data_subjects != other.data_subjects:
# return self.gamma * other.gamma
# data = self.child * other.child
# min_min = self.min_vals.data * other.min_vals.data
# min_max = self.min_vals.data * other.max_vals.data
# max_min = self.max_vals.data * other.min_vals.data
# max_max = self.max_vals.data * other.max_vals.data
# _min_vals = np.min([min_min, min_max, max_min, max_max], axis=0) # type: ignore
# _max_vals = np.max([min_min, min_max, max_min, max_max], axis=0) # type: ignore
# min_vals = self.min_vals.copy()
# min_vals.data = _min_vals
# max_vals = self.max_vals.copy()
# max_vals.data = _max_vals
# data_subjects = self.data_subjects
# return PhiTensor(
# child=data,
# data_subjects=data_subjects,
# min_vals=min_vals,
# max_vals=max_vals,
# )
elif is_acceptable_simple_type(other):
data = self.child * other
min_min = self.min_vals.data * other
min_max = self.min_vals.data * other
max_min = self.max_vals.data * other
max_max = self.max_vals.data * other
_min_vals = np.min([min_min, min_max, max_min, max_max], axis=0) # type: ignore
_max_vals = np.max([min_min, min_max, max_min, max_max], axis=0) # type: ignore
min_vals = self.min_vals.copy()
min_vals.data = _min_vals
max_vals = self.max_vals.copy()
max_vals.data = _max_vals
data_subjects = self.data_subjects
return PhiTensor(
child=data,
data_subjects=data_subjects,
min_vals=min_vals,
max_vals=max_vals,
)
elif isinstance(other, GammaTensor):
return self.gamma * other
else:
print("Type is unsupported:" + str(type(other)))
raise NotImplementedError
def __rtruediv__(self, other: SupportedChainType) -> Union[PhiTensor, GammaTensor]:
if is_acceptable_simple_type(other):
return PhiTensor(
child=(1 / self.child) * other,
min_vals=(1 / self.min_vals) * other,
max_vals=(1 / self.max_vals) * other,
data_subjects=self.data_subjects,
# scalar_manager=self.scalar_manager,
)
elif isinstance(other, GammaTensor):
return (1 / self.gamma) * other
else:
print("Type is unsupported:" + str(type(other)))
raise NotImplementedError
def __matmul__(
self, other: Union[np.ndarray, PhiTensor]
) -> Union[PhiTensor, GammaTensor]:
if not isinstance(other, (np.ndarray, PhiTensor, GammaTensor)):
raise Exception(
f"Matrix multiplication not yet implemented for type {type(other)}"
)
else:
# Modify before merge, to know is broadcast is actually necessary
if False: # and not is_broadcastable(self.shape, other.shape):
raise Exception(
f"Shapes not broadcastable: {self.shape} and {other.shape}"
)
else:
if isinstance(other, np.ndarray):
data = self.child.__matmul__(other)
min_vals = self.min_vals.__matmul__(other)
max_vals = self.max_vals.__matmul__(other)
output_ds = self.data_subjects @ other
elif isinstance(other, PhiTensor):
return self.gamma @ other.gamma
# if self.data_subjects != other.data_subjects:
# return self.gamma @ other.gamma
# else:
# data = self.child.__matmul__(other.child)
# min_vals = self.min_vals.__matmul__(other.min_vals)
# max_vals = self.max_vals.__matmul__(other.max_vals)
# output_ds = DataSubjectList(
# one_hot_lookup=np.concatenate(
# (
# self.data_subjects.one_hot_lookup,
# other.data_subjects.one_hot_lookup,
# )
# ),
# data_subjects_indexed=np.concatenate(
# (np.zeros_like(data), np.ones_like(data))
# ), # replace with (1, *data.shape) if inc shape
# )
elif isinstance(other, GammaTensor):
return self.gamma @ other
else:
print("Type is unsupported:" + str(type(other)))
raise NotImplementedError
return PhiTensor(
child=data,
max_vals=max_vals,
min_vals=min_vals,
data_subjects=output_ds,
)
def __rmatmul__(
self, other: Union[np.ndarray, PhiTensor]
) -> Union[PhiTensor, GammaTensor]:
if not isinstance(other, (np.ndarray, PhiTensor, GammaTensor)):
raise Exception(
f"Matrix multiplication not yet implemented for type {type(other)}"
)
else:
# Modify before merge, to know is broadcast is actually necessary
if False: # and not is_broadcastable(self.shape, other.shape):
raise Exception(
f"Shapes not broadcastable: {self.shape} and {other.shape}"
)
else:
if isinstance(other, np.ndarray):
data = self.child.__rmatmul__(other)
min_vals = self.min_vals.__rmatmul__(other)
max_vals = self.max_vals.__rmatmul__(other)
output_ds = self.data_subjects.__rmatmul__(other)
elif isinstance(other, PhiTensor):
return self.gamma.__rmatmul__(other.gamma)
# if self.data_subjects != other.data_subjects:
# # return convert_to_gamma_tensor(self).__matmul__(convert_to_gamma_tensor(other))
# raise NotImplementedError
# else:
# data = self.child.__rmatmul__(other.child)
# # _min_vals = np.array(
# # [self.min_vals.data.__matmul__(other.min_vals.data)]
# # )
# # _max_vals = np.array(
# # [self.max_vals.data.__matmul__(other.max_vals.data)]
# # )
# # min_vals = self.min_vals.copy()
# # min_vals.data = _min_vals
# # max_vals = self.max_vals.copy()
# # max_vals.data = _max_vals
# min_vals = self.min_vals.__rmatmul__(other.min_vals)
# max_vals = self.max_vals.__rmatmul__(other.max_vals)
else:
print("Type is unsupported:" + str(type(other)))
raise NotImplementedError
return PhiTensor(
child=data,
max_vals=max_vals,
min_vals=min_vals,
data_subjects=output_ds,
)
def clip(self, a_min: float, a_max: float) -> PhiTensor:
output_data = np.clip(self.child, a_min, a_max)
min_v = np.clip(self.min_vals.data, a_min, a_max)
max_v = np.clip(self.max_vals.data, a_min, a_max)
min_vals = lazyrepeatarray(data=min_v, shape=output_data.shape)
max_vals = lazyrepeatarray(data=max_v, shape=output_data.shape)
return PhiTensor(
child=output_data,
data_subjects=self.data_subjects,
min_vals=min_vals,
max_vals=max_vals,
)
def transpose(self, *args: Any, **kwargs: Any) -> PhiTensor:
"""Transposes self.child, min_vals, and max_vals if these can be transposed, otherwise doesn't change them."""
data: np.ndarray
if (
isinstance(self.child, int)
or isinstance(self.child, float)
or isinstance(self.child, bool)
):
# For these data types, the transpose operation is meaningless, so don't change them.
data = self.child # type: ignore
print(
f"Warning: Tensor data was of type {type(data)}, transpose operation had no effect."
)
else:
data = self.child.transpose(*args)
# TODO: Should we give warnings for min_vals and max_vals being single floats/integers/booleans too?
if (
isinstance(self.min_vals, int)
or isinstance(self.min_vals, float)
or isinstance(self.min_vals, bool)
):
# For these data types, the transpose operation is meaningless, so don't change them.
min_vals = self.min_vals
# print(f'Warning: Tensor data was of type {type(data)}, transpose operation had no effect.')
else:
min_vals = data.min()
if (
isinstance(self.max_vals, int)
or isinstance(self.max_vals, float)
or isinstance(self.max_vals, bool)
):
# For these data types, the transpose operation is meaningless, so don't change them.
max_vals = self.max_vals
# print(f'Warning: Tensor data was of type {type(data)}, transpose operation had no effect.')
else:
max_vals = data.max()
output_ds = self.data_subjects.transpose(*args)
return PhiTensor(
child=data,
data_subjects=output_ds,
min_vals=min_vals,
max_vals=max_vals,
)
def concatenate(
self,
other: Union[np.ndarray, PhiTensor],
*args: List[Any],
**kwargs: Dict[str, Any],
) -> Union[PhiTensor, GammaTensor]:
# if the tensor being added is also private
if isinstance(other, PhiTensor):
if self.data_subjects != other.data_subjects:
return self.gamma + other.gamma
return PhiTensor(
child=self.child.concatenate(other.child, *args, **kwargs),
min_vals=self.min_vals.concatenate(other.min_vals, *args, **kwargs),
max_vals=self.max_vals.concatenate(other.max_vals, *args, **kwargs),
data_subjects=self.data_subjects,
)
elif is_acceptable_simple_type(other):
raise NotImplementedError
else:
print("Type is unsupported:" + str(type(other)))
raise NotImplementedError
def __lt__(self, other: SupportedChainType) -> Union[PhiTensor, GammaTensor]:
# if the tensor being compared is also private
if isinstance(other, PhiTensor):
return self.gamma.__lt__(other.gamma)
# if self.data_subjects != other.data_subjects:
# # return self.gamma < other.gamma
# raise NotImplementedError
# if len(self.child) != len(other.child):
# raise Exception(
# f"Tensor dims do not match for __lt__: {len(self.child)} != {len(other.child)}" # type: ignore
# )
# data = (
# self.child < other.child
# ) # the * 1 just makes sure it returns integers instead of True/False
# min_vals = self.min_vals * 0
# max_vals = (self.max_vals * 0) + 1
# data_subjects = self.data_subjects
# return PhiTensor(
# child=data,
# data_subjects=data_subjects,
# min_vals=min_vals,
# max_vals=max_vals,
# )
# if the tensor being compared is a public tensor / int / float / etc.
elif is_acceptable_simple_type(other):
data = self.child < other
min_vals = self.min_vals * 0
max_vals = (self.max_vals * 0) + 1
data_subjects = self.data_subjects
return PhiTensor(
child=data,
data_subjects=data_subjects,
min_vals=min_vals,
max_vals=max_vals,
)
else:
return NotImplementedError # type: ignore
def __le__(self, other: SupportedChainType) -> Union[PhiTensor, GammaTensor]:
# if the tensor being compared is also private
if isinstance(other, PhiTensor):
return self.gamma.__le__(other.gamma)
# if self.data_subjects != other.data_subjects:
# # return self.gamma < other.gamma
# raise NotImplementedError
# if len(self.child) != len(other.child):
# raise Exception(
# f"Tensor dims do not match for __le__: {len(self.child)} != {len(other.child)}" # type: ignore
# )
# data = (
# self.child <= other.child
# ) # the * 1 just makes sure it returns integers instead of True/False
# min_vals = self.min_vals * 0
# max_vals = (self.max_vals * 0) + 1
# data_subjects = self.data_subjects
# return PhiTensor(
# child=data,
# data_subjects=data_subjects,
# min_vals=min_vals,
# max_vals=max_vals,
# )
# if the tensor being compared is a public tensor / int / float / etc.
elif is_acceptable_simple_type(other):
data = self.child <= other
min_vals = self.min_vals * 0
max_vals = (self.max_vals * 0) + 1
data_subjects = self.data_subjects
return PhiTensor(
child=data,
data_subjects=data_subjects,
min_vals=min_vals,
max_vals=max_vals,
)
else:
return NotImplementedError # type: ignore
def __gt__(self, other: SupportedChainType) -> Union[PhiTensor, GammaTensor]:
# if the tensor being compared is also private
if isinstance(other, PhiTensor):
return self.gamma.__gt__(other.gamma)
# if self.data_subjects != other.data_subjects:
# # return self.gamma < other.gamma
# raise NotImplementedError
# if len(self.child) != len(other.child):
# raise Exception(
# f"Tensor dims do not match for __gt__: {len(self.child)} != {len(other.child)}" # type: ignore
# )
# data = (
# self.child > other.child
# ) # the * 1 just makes sure it returns integers instead of True/False
# min_vals = self.min_vals * 0
# max_vals = (self.max_vals * 0) + 1
# data_subjects = self.data_subjects
# return PhiTensor(
# child=data,
# data_subjects=data_subjects,
# min_vals=min_vals,
# max_vals=max_vals,
# )
# if the tensor being compared is a public tensor / int / float / etc.
elif is_acceptable_simple_type(other):
data = self.child > other
min_vals = self.min_vals * 0
max_vals = (self.max_vals * 0) + 1
data_subjects = self.data_subjects
return PhiTensor(
child=data,
data_subjects=data_subjects,
min_vals=min_vals,
max_vals=max_vals,
)
else:
raise NotImplementedError # type: ignore
# Re enable after testing
def dot(
self, other: Union[PhiTensor, GammaTensor, np.ndarray]
) -> Union[PhiTensor, GammaTensor]:
if isinstance(other, np.ndarray):
return PhiTensor(
child=np.dot(self.child, other),
min_vals=np.dot(self.min_vals, other),
max_vals=np.dot(self.max_vals, other),
data_subjects=np.dot(self.data_subjects, other),
)
elif isinstance(other, PhiTensor):
return self.gamma.dot(other.gamma)
# if self.data_subjects.one_hot_lookup == other.data_subjects.one_hot_lookup:
# return PhiTensor(
# child=np.dot(self.child, other.child),
# min_vals=np.dot(self.min_vals, other.min_vals),
# max_vals=np.dot(self.max_vals, other.max_vals),
# data_subjects=self.data_subjects,
# )
# else:
# return self.gamma.dot(other.gamma)
elif isinstance(other, GammaTensor):
return self.gamma.dot(other)
else:
raise NotImplementedError
def sum(
self,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
**kwargs: Any,
) -> Union[PhiTensor, GammaTensor]:
return self.gamma.sum(axis, **kwargs)
# # TODO: Add support for axes arguments later
# min_val = self.min_vals.sum(axis=axis)
# max_val = self.max_vals.sum(axis=axis)
# if len(self.data_subjects.one_hot_lookup) == 1:
# result = self.child.sum(axis=axis)
# return PhiTensor(
# child=result,
# min_vals=min_val,
# max_vals=max_val,
# data_subjects=self.data_subjects.sum(target_shape=result.shape),
# )
# result = self.child.sum(axis=axis)
# return GammaTensor(
# child=result,
# data_subjects=self.data_subjects.sum(target_shape=result.shape),
# min_vals=min_val,
# max_vals=max_val,
# )
def expand_dims(self, axis: int) -> PhiTensor:
result = np.expand_dims(self.child, axis=axis)
minv = self.min_vals.copy()
minv.shape = result.shape
maxv = self.max_vals.copy()
maxv.shape = result.shape
return PhiTensor(
child=result,
min_vals=minv,
max_vals=maxv,
data_subjects=np.expand_dims(self.data_subjects, axis=axis),
)
def ones_like(
self,
*args: Tuple[Any, ...],
**kwargs: Any,
) -> Union[PhiTensor, GammaTensor]:
# TODO: Add support for axes arguments later
min_vals = self.min_vals.ones_like(*args, **kwargs)
max_vals = self.max_vals.ones_like(*args, **kwargs)
child = (
np.ones_like(self.child, *args, **kwargs)
if isinstance(self.child, np.ndarray)
else self.child.ones_like(*args, **kwargs)
)
return PhiTensor(
child=child,
min_vals=min_vals,
max_vals=max_vals,
data_subjects=self.data_subjects,
)
def __ne__(self, other: Any) -> Union[PhiTensor, GammaTensor]: # type: ignore
# TODO: what about data_subjects and min / max values?
if is_acceptable_simple_type(other) or len(self.child) == len(other.child):
gamma_output = False
if is_acceptable_simple_type(other):
result = self.child != other
else:
# check data_subjects match, if they dont gamma_output = True
#
result = self.child != other.child
if isinstance(result, GammaTensor):
gamma_output = True
if not gamma_output:
return self.copy_with(child=result)
else:
return self.copy_with(child=result).gamma
else:
raise Exception(
"Tensor dims do not match for __eq__: "
+ f"{len(self.child)} != {len(other.child)}"
)
def __neg__(self) -> PhiTensor:
return PhiTensor(
child=self.child * -1,
min_vals=self.max_vals * -1,
max_vals=self.min_vals * -1,
data_subjects=self.data_subjects,
)
def __pos__(self) -> PhiTensor:
return PhiTensor(
child=self.child,
min_vals=self.min_vals,
max_vals=self.max_vals,
data_subjects=self.data_subjects,
)
def exp(self) -> PhiTensor:
# relative
from ...smpc.approximations import exp
def exp_reduction(val: np.ndarray) -> np.ndarray:
pos_index = val >= 0
neg_index = val < 0
exp = np.exp((pos_index * val * -1) + (neg_index * val))
pos_values = (pos_index) * exp
neg_values = (neg_index) * exp * -1
return pos_values + neg_values
min_vals = self.min_vals.copy()
min_vals.data = np.array(exp_reduction(min_vals.data))
max_vals = self.max_vals.copy()
max_vals.data = np.array(exp_reduction(max_vals.data))
return PhiTensor(
child=exp(self.child), # type: ignore
min_vals=min_vals,
max_vals=max_vals,
data_subjects=self.data_subjects,
)
def softmax(self) -> PhiTensor:
# relative
from ...smpc.approximations import exp
from ...smpc.approximations import reciprocal
def softmax(val: np.ndarray) -> np.ndarray:
logits = val - val.max()
numerator = np.exp(logits)
inv = 1 / numerator.sum()
return numerator * inv
min_vals = self.min_vals.copy()
min_vals.data = np.array(softmax(min_vals.data))
max_vals = self.max_vals.copy()
max_vals.data = np.array(softmax(max_vals.data))
fpt = self.child.copy()
if not isinstance(fpt.child, np.ndarray):
raise ValueError("Softmax currently works only for numpy child")
fpt.child = fpt.child - fpt.child.max()
numerator = exp(fpt)
inv = reciprocal(numerator.sum()) # type: ignore
return PhiTensor(
child=numerator * inv, # type: ignore
min_vals=min_vals,
max_vals=max_vals,
data_subjects=self.data_subjects,
)
def reciprocal(self) -> PhiTensor:
# relative
from ...smpc.approximations import reciprocal
min_vals = self.min_vals.copy()
min_vals.data = np.array(1 / min_vals.data)
max_vals = self.max_vals.copy()
max_vals.data = np.array(1 / max_vals.data)
return PhiTensor(
child=reciprocal(self.child),
min_vals=min_vals,
max_vals=max_vals,
data_subjects=self.data_subjects,
)
def one_hot(self) -> PhiTensor:
one_hot_child = self.child.one_hot()
return PhiTensor(
child=one_hot_child,
min_vals=self.min_vals,
max_vals=self.max_vals,
data_subjects=self.data_subjects,
)
def resize(self, new_shape: Union[int, Tuple[int, ...]]) -> PhiTensor:
out_child = np.resize(self.child.data, new_shape)
return PhiTensor(
child=out_child,
min_vals=lazyrepeatarray(self.min_vals.data.min(), out_child.shape),
max_vals=lazyrepeatarray(self.max_vals.data.max(), out_child.shape),
data_subjects=np.resize(self.data_subjects, new_shape)
)
def compress(self, condition: List[bool], axis: int = None) -> PhiTensor:
out_child = self.child.compress(condition, axis)
return PhiTensor(
child=out_child,
min_vals=lazyrepeatarray(self.min_vals.data.min(), out_child.shape),
max_vals=lazyrepeatarray(self.max_vals.data.max(), out_child.shape),
data_subjects=self.data_subjects.compress(condition, axis)
)
def squeeze(self, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> PhiTensor:
out_child = self.child.squeeze(axis)
return PhiTensor(
child=out_child,
min_vals=self.min_vals.reshape(out_child.shape),
max_vals=self.max_vals.reshape(out_child.shape),
data_subjects=np.squeeze(self.data_subjects, axis)
)
def repeat(
self, repeats: Union[int, Tuple[int, ...]], axis: Optional[int] = None
) -> PhiTensor:
"""
Repeat elements of an array.
Parameters
repeats: int or array of ints
The number of repetitions for each element. repeats is broadcasted to fit the shape of the given axis.
axis: int, optional
The axis along which to repeat values. By default, use the flattened input array, and return a flat output array.
Returns
repeated_array: PhiTensor
Output array which has the same shape as a, except along the given axis.
"""
result = self.child.repeat(repeats, axis)
if isinstance(self.min_vals, lazyrepeatarray):
minv = lazyrepeatarray(data=self.min_vals.data.min(), shape=result.shape)
maxv = lazyrepeatarray(data=self.max_vals.data.max(), shape=result.shape)
else:
minv = self.min_vals
maxv = self.max_vals
return PhiTensor(
child=result,
data_subjects=self.data_subjects.repeat(repeats, axis),
min_vals=minv,
max_vals=maxv,
)
def choose(
self,
choices: Sequence[Union[PassthroughTensor, np.ndarray]],
out: Optional[np.ndarray] = None,
mode: Optional[str] = "raise",
) -> PhiTensor:
"""
Construct an array from an index array and a list of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples - in its full generality,
this function is less simple than it might seem from the following code description
(below ndi = numpy.lib.index_tricks):
np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)]).
But this omits some subtleties. Here is a fully general summary:
Given an “index” array (a) of integers and a sequence of n arrays (choices), a and each choice array are first
broadcast, as necessary, to arrays of a common shape; calling these Ba and Bchoices[i], i = 0,…,n-1 we have that
necessarily, Ba.shape == Bchoices[i].shape for each i. Then, a new array with shape Ba.shape is created
as follows:
if mode='raise' (the default), then, first of all, each element of a (and thus Ba) must be in the range
[0, n-1]; now, suppose that i (in that range) is the value at the (j0, j1, ..., jm) position in Ba -
then the value at the same position in the new array is the value in Bchoices[i] at that same position;
if mode='wrap', values in a (and thus Ba) may be any (signed) integer; modular arithmetic is used to map
integers outside the range [0, n-1] back into that range; and then the new array is constructed as above;
if mode='clip', values in a (and thus Ba) may be any (signed) integer; negative integers are mapped to 0;
values greater than n-1 are mapped to n-1; and then the new array is constructed as above.
Parameters
choices: sequence of arrays
Choice arrays. a and all of the choices must be broadcastable to the same shape. If choices is itself an
array (not recommended), then its outermost dimension (i.e., the one corresponding to choices.shape[0])
is taken as defining the “sequence”.
out: array, optional
If provided, the result will be inserted into this array. It should be of the appropriate shape and
dtype. Note that out is always buffered if mode='raise'; use other modes for better performance.
mode{‘raise’ (default), ‘wrap’, ‘clip’}, optional
Specifies how indices outside [0, n-1] will be treated:
‘raise’ : an exception is raised
‘wrap’ : value becomes value mod n
‘clip’ : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
merged_array: PhiTensor
The merged result.
Raises
ValueError: shape mismatch
If a and each choice array are not all broadcastable to the same shape.
"""
result = self.child.choose(choices, mode=mode)
if isinstance(self.min_vals, lazyrepeatarray):
minv = lazyrepeatarray(data=self.min_vals.data.min(), shape=result.shape)
maxv = lazyrepeatarray(data=self.max_vals.data.max(), shape=result.shape)
else:
minv, maxv = self.min_vals, self.max_vals
return PhiTensor(
child=result,
data_subjects=self.data_subjects.take(choices),
min_vals=minv,
max_vals=maxv,
)
def _object2bytes(self) -> bytes:
schema = get_capnp_schema(schema_file="phi_tensor.capnp")
pt_struct: CapnpModule = schema.PT # type: ignore
pt_msg = pt_struct.new_message()
# this is how we dispatch correct deserialization of bytes
pt_msg.magicHeader = serde_magic_header(type(self))
if isinstance(self.child, np.ndarray) or np.isscalar(self.child):
chunk_bytes(capnp_serialize(np.array(self.child), to_bytes=True), "child", pt_msg) # type: ignore
pt_msg.isNumpy = True
else:
chunk_bytes(serialize(self.child, to_bytes=True), "child", pt_msg) # type: ignore
pt_msg.isNumpy = False
pt_msg.minVals = serialize(self.min_vals, to_bytes=True)
pt_msg.maxVals = serialize(self.max_vals, to_bytes=True)
chunk_bytes(
capnp_serialize(dslarraytonumpyutf8(self.data_subjects), to_bytes=True),
"dataSubjects",
pt_msg,
)
# to pack or not to pack?
# to_bytes = pt_msg.to_bytes()
return pt_msg.to_bytes_packed()
@staticmethod
def _bytes2object(buf: bytes) -> PhiTensor:
schema = get_capnp_schema(schema_file="phi_tensor.capnp")
pt_struct: CapnpModule = schema.PT # type: ignore
# https://stackoverflow.com/questions/48458839/capnproto-maximum-filesize
MAX_TRAVERSAL_LIMIT = 2**64 - 1
# to pack or not to pack?
# pt_msg = pt_struct.from_bytes(buf, traversal_limit_in_words=2 ** 64 - 1)
pt_msg = pt_struct.from_bytes_packed(
buf, traversal_limit_in_words=MAX_TRAVERSAL_LIMIT
)
if pt_msg.isNumpy:
child = capnp_deserialize(combine_bytes(pt_msg.child), from_bytes=True)
else:
child = deserialize(combine_bytes(pt_msg.child), from_bytes=True)
min_vals = deserialize(pt_msg.minVals, from_bytes=True)
max_vals = deserialize(pt_msg.maxVals, from_bytes=True)
data_subjects = numpyutf8todslarray(
capnp_deserialize(combine_bytes(pt_msg.dataSubjects), from_bytes=True)
)
return PhiTensor(
child=child,
min_vals=min_vals,
max_vals=max_vals,
data_subjects=data_subjects,
)
|
"""
glc.animation
=============
Abstract definition for Animation objects.
(c) 2016 LeoV
https://github.com/leovoel/
"""
from .render_list import RenderList
class Animation:
"""Base class for animations.
Shouldn't be instantiated. You should instead use :class:`GifAnimation`.
Parameters
----------
width : int
Width of the drawing surface, in pixels. Defaults to 500.
height : int
Height of the drawing surface, in pixels. Defaults to 500.
duration : float
Duration of the animation, in seconds. Defaults to 2.
fps : float
The frame rate of the animation. Defaults to 30.
ease : string
The overall easing function of the animation. Defaults to ``'sine'``.
loop : bool
Whether the animation should loop. Defaults to ``True``.
Attributes
----------
render_list : :class:`RenderList`
List of renderables/shapes. It's what you use to create the actual animation.
"""
def __init__(self, filename, *args, **kwargs):
self.filename = filename
self.render_list = RenderList(*args, **kwargs)
self.width = self.render_list.width
self.height = self.render_list.height
self.ease = self.render_list.ease
self.loop = self.render_list.loop
self.duration = kwargs.pop("duration", 2.0)
self.fps = kwargs.pop("fps", 30)
def set_size(self, width, height):
"""Changes the size of the drawing surface.
Please note that this creates an entirely new drawing surface/context.
Parameters
----------
width : int
New width of the surface.
height : int
New height of the surface.
Returns
-------
self : :class:`Animation`
For method chaining.
"""
self.width = width
self.height = height
self.render_list.size(width, height)
return self
def set_ease(self, ease):
"""Sets the overall easing function for this animation.
Parameters
----------
ease : str
The easing function to use.
Returns
-------
self : :class:`Animation`
For method chaining.
"""
self.ease = ease
self.render_list.ease = ease
return self
def set_loop(self, loop=None):
"""Sets the looping mode to the specified value.
If a value for ``loop`` is not passed in, then the loop
value is toggled - as in, if it's ``False``, then
it will become ``True``. And vice-versa.
Parameters
----------
loop : bool
Whether the animation should loop or not.
Returns
-------
self : :class:`Animation`
For method chaining.
"""
if loop is None:
self.loop = self.render_list.loop = not self.loop
else:
self.loop = loop
self.render_list.loop = loop
return self
def set_fps(self, fps):
"""Sets the frame rate for this animation.
Parameters
----------
fps : float
The frame rate to apply.
Returns
-------
self : :class:`Animation`
For method chaining.
"""
self.fps = fps
self.render_list.fps = fps
return self
def set_duration(self, duration):
"""Sets the duration for this animation.
Parameters
----------
duration : float
The duration to apply.
Returns
-------
self : :class:`Animation`
For method chaining.
"""
self.duration = duration
self.render_list.duration = duration
return self
def set_default_style(self, name, value):
"""Sets a default style to the specified value.
Parameters
----------
name : str
The name of the style attribute to change.
value : str
The value to apply.
Returns
-------
self : :class:`Animation`
For method chaining.
"""
if name in self.render_list.default_styles:
self.render_list.default_styles[name] = value
return self
def set_bg_color(self, color):
"""Shortcut to set the background color to the specified color.
See the documentation on colors to know what kind of
value you should pass as a parameter to this function.
Parameters
----------
color : :class:`Color`
The color to use in the background.
Returns
-------
self : :class:`Animation`
For method chaining.
"""
self.set_default_style("bg_color", color)
return self
def render_all(self):
"""Renders all the necessary frames for this animation to numpy arrays.
Returns
-------
frames : list of numpy arrays
"""
frames = []
total_frames = self.duration * self.fps
speed = 1 / total_frames
t = 0
rendering = True
while rendering:
frames.append(self.render_list.render(t))
t += speed
if round(t * 10000) / 10000 >= 1:
t = 0
rendering = False
return frames
@property
def w(self):
return self.width
@property
def h(self):
return self.height
# context management
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
if not exception_type:
self.render_and_save(self.filename)
expose context from Animation
"""
glc.animation
=============
Abstract definition for Animation objects.
(c) 2016 LeoV
https://github.com/leovoel/
"""
from .render_list import RenderList
class Animation:
"""Base class for animations.
Shouldn't be instantiated. You should instead use :class:`GifAnimation`.
Parameters
----------
width : int
Width of the drawing surface, in pixels. Defaults to 500.
height : int
Height of the drawing surface, in pixels. Defaults to 500.
duration : float
Duration of the animation, in seconds. Defaults to 2.
fps : float
The frame rate of the animation. Defaults to 30.
ease : string
The overall easing function of the animation. Defaults to ``'sine'``.
loop : bool
Whether the animation should loop. Defaults to ``True``.
Attributes
----------
render_list : :class:`RenderList`
List of renderables/shapes. It's what you use to create the actual animation.
"""
def __init__(self, filename, *args, **kwargs):
self.filename = filename
self.render_list = RenderList(*args, **kwargs)
self.width = self.render_list.width
self.height = self.render_list.height
self.ease = self.render_list.ease
self.loop = self.render_list.loop
self.duration = kwargs.pop("duration", 2.0)
self.fps = kwargs.pop("fps", 30)
def set_size(self, width, height):
"""Changes the size of the drawing surface.
Please note that this creates an entirely new drawing surface/context.
Parameters
----------
width : int
New width of the surface.
height : int
New height of the surface.
Returns
-------
self : :class:`Animation`
For method chaining.
"""
self.width = width
self.height = height
self.render_list.size(width, height)
return self
def set_ease(self, ease):
"""Sets the overall easing function for this animation.
Parameters
----------
ease : str
The easing function to use.
Returns
-------
self : :class:`Animation`
For method chaining.
"""
self.ease = ease
self.render_list.ease = ease
return self
def set_loop(self, loop=None):
"""Sets the looping mode to the specified value.
If a value for ``loop`` is not passed in, then the loop
value is toggled - as in, if it's ``False``, then
it will become ``True``. And vice-versa.
Parameters
----------
loop : bool
Whether the animation should loop or not.
Returns
-------
self : :class:`Animation`
For method chaining.
"""
if loop is None:
self.loop = self.render_list.loop = not self.loop
else:
self.loop = loop
self.render_list.loop = loop
return self
def set_fps(self, fps):
"""Sets the frame rate for this animation.
Parameters
----------
fps : float
The frame rate to apply.
Returns
-------
self : :class:`Animation`
For method chaining.
"""
self.fps = fps
self.render_list.fps = fps
return self
def set_duration(self, duration):
"""Sets the duration for this animation.
Parameters
----------
duration : float
The duration to apply.
Returns
-------
self : :class:`Animation`
For method chaining.
"""
self.duration = duration
self.render_list.duration = duration
return self
def set_default_style(self, name, value):
"""Sets a default style to the specified value.
Parameters
----------
name : str
The name of the style attribute to change.
value : str
The value to apply.
Returns
-------
self : :class:`Animation`
For method chaining.
"""
if name in self.render_list.default_styles:
self.render_list.default_styles[name] = value
return self
def set_bg_color(self, color):
"""Shortcut to set the background color to the specified color.
See the documentation on colors to know what kind of
value you should pass as a parameter to this function.
Parameters
----------
color : :class:`Color`
The color to use in the background.
Returns
-------
self : :class:`Animation`
For method chaining.
"""
self.set_default_style("bg_color", color)
return self
def render_all(self):
"""Renders all the necessary frames for this animation to numpy arrays.
Returns
-------
frames : list of numpy arrays
"""
frames = []
total_frames = self.duration * self.fps
speed = 1 / total_frames
t = 0
rendering = True
while rendering:
frames.append(self.render_list.render(t))
t += speed
if round(t * 10000) / 10000 >= 1:
t = 0
rendering = False
return frames
@property
def context(self):
return self.render_list.context
@property
def w(self):
return self.width
@property
def h(self):
return self.height
# context management
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
if not exception_type:
self.render_and_save(self.filename)
|
import numpy as np
from skimage.util import img_as_float
from skimage.filters import gabor_kernel, sobel, scharr, prewitt, roberts, rank
from skimage.morphology import disk
from skimage.transform import resize
from skimage import data, io, filters
from scipy import ndimage as ndi
import scipy.ndimage.filters as filter
from skimage.color import rgb2gray
from skimage.feature import hog, daisy
from skimage import exposure
from numpy import arange
import skimage
import math
import sys
from scipy.stats import entropy
from skimage.feature import ORB, match_descriptors, local_binary_pattern
from skimage.feature import match_template
from sklearn.cluster import KMeans
class Features:
def __init__(self,
docs,
resize=(225, 225),
equalize=False,
edges='none',
pixels_per_cell=(8, 8),
cells_per_block=(3, 3),
contrast='none',
features='hog-vow',
channels='rgb',
correlation='yes',
sample_size=1000000,
num_centers=1000,
encoding='hist',
**kwargs):
self.resize = resize
self.equalize = equalize
self.pixels_per_cell = pixels_per_cell
self.cells_per_block = cells_per_block
self.edges = edges
self.contrast = contrast
self.features = features
self.channels = channels
self.correlation = correlation
self.encoding = encoding
self.train_features = {} # a hack to avoid the training double processing of the whole dataset
vectors = []
for filename in docs:
V = []
for vec in self.compute_features(filename):
V.append(vec)
vectors.append(vec)
self.train_features[filename] = V
print("the training set contains {0} vectors".format(len(vectors)), file=sys.stderr)
if len(vectors) > sample_size:
vectors = np.random.choice(vectors, sample_size)
print("we kept {0} vectors, randomly selected".format(len(vectors)), file=sys.stderr)
self.model = KMeans(num_centers, verbose=1)
print("preparing to fit our codebook with {0} centers".format(num_centers), file=sys.stderr)
self.model.fit(vectors)
vectors = None
print("encoding our training vectors", file=sys.stderr)
for filename, veclist in self.train_features.items():
self.train_features[filename] = self.encode(veclist)
print("our feature module is fitted", file=sys.stderr)
def encode(self, veclist):
if self.encoding == 'hist':
return self.hist(veclist)
elif self.encoding == 'seq':
return self.sequence(veclist)
else:
raise Exception("Unknown feature encoding {0}".format(self.encoding))
def sequence(self, veclist):
seq = []
for vec in veclist:
c = np.argmin(self.model.transform(vec))
seq.append(c)
return seq
def hist(self, veclist):
h = np.zeros(self.model.n_clusters)
for vec in veclist:
c = np.argmin(self.model.transform(vec))
h[c] += 1
return h
def __getitem__(self, filename):
# print("==== processing", filename, ", gabor: ", self.gabor, ", resize: ", self.resize, file=sys.stderr)
if self.train_features:
s = self.train_features.get(filename, None)
if s:
return s
self.train_features = None # if we reach this code we are beyond the training phase
return self.encode(self.compute_features(filename))
def compute_features(self, path_file):
imagen = io.imread(path_file)
imagen = resize(imagen, self.resize, mode='edge')
if self.contrast == 'sub-mean':
for i in range(3):
imagen[:, :, i] = imagen[:, :, i] - imagen[:, :, i].mean()
if self.channels == "green":
imagen = imagen[:, :, 1]
elif self.channels == "blue":
imagen = imagen[:, 1, :]
elif self.channels == "red":
imagen = imagen[1, :, :]
else:
imagen = rgb2gray(imagen)
if self.equalize != 'none':
if self.equalize == 'global':
imagen = exposure.equalize_hist(imagen)
else:
d = int(self.equalize.split(':')[-1])
imagen = rank.equalize(imagen, selem=disk(d))
if self.edges != 'none':
if self.edges == 'sobel':
imagen = sobel(imagen)
elif self.edges == 'scharr':
imagen = scharr(imagen)
elif self.edges == 'prewitt':
imagen = prewitt(imagen)
elif self.edges == 'roberts':
imagen = roberts(imagen)
else:
raise Exception("Unknown edge detector {0}".format(self.edges))
if self.correlation:
mascara = io.imread(self.correlation)
mascara = rgb2gray(mascara)
mascara = np.array(mascara)
mascara = skimage.transform.resize(mascara, (25, 25), mode='edge')
resultado = match_template(imagen, mascara)
resultado = resultado + abs(resultado.min())
resultado[~((resultado < 0.2) | (resultado > resultado.max()-0.2))] = 0.6
img = resultado
else:
img = img_as_float(imagen)
if self.features.startswith('hog'):
orientations = 8
vec = hog(img, orientations=orientations, pixels_per_cell=self.pixels_per_cell, cells_per_block=self.cells_per_block, block_norm='L2-Hys')
if self.features == 'hog':
return [vec]
elif self.features == 'hog-bovw':
m = orientations * self.cells_per_block[0] * self.cells_per_block[1]
XX = np.split(vec, len(vec) // m)
return XX
else:
raise Exception("Unknown feature detection {0}".format(self.features))
elif self.features == 'daisy':
return daisy(img, step=32, radius=16, rings=3).flatten()
else:
raise Exception("Unknown feature detection {0}".format(self.features))
change to bovw
import numpy as np
from skimage.util import img_as_float
from skimage.filters import gabor_kernel, sobel, scharr, prewitt, roberts, rank
from skimage.morphology import disk
from skimage.transform import resize
from skimage import data, io, filters
from scipy import ndimage as ndi
import scipy.ndimage.filters as filter
from skimage.color import rgb2gray
from skimage.feature import hog, daisy
from skimage import exposure
from numpy import arange
import skimage
import math
import sys
from scipy.stats import entropy
from skimage.feature import ORB, match_descriptors, local_binary_pattern
from skimage.feature import match_template
from sklearn.cluster import KMeans
class Features:
def __init__(self,
docs,
resize=(225, 225),
equalize=False,
edges='none',
pixels_per_cell=(8, 8),
cells_per_block=(3, 3),
contrast='none',
features='hog-vow',
channels='rgb',
correlation='yes',
sample_size=50000,
num_centers=223,
encoding='hist',
**kwargs):
self.resize = resize
self.equalize = equalize
self.pixels_per_cell = pixels_per_cell
self.cells_per_block = cells_per_block
self.edges = edges
self.contrast = contrast
self.features = features
self.channels = channels
self.correlation = correlation
self.encoding = encoding
self.train_features = {} # a hack to avoid the training double processing of the whole dataset
vectors = []
for filename in docs:
V = []
for vec in self.compute_features(filename):
V.append(vec)
vectors.append(vec)
self.train_features[filename] = V
print("the training set contains {0} vectors".format(len(vectors)), file=sys.stderr)
if len(vectors) > sample_size:
vectors = np.random.choice(vectors, sample_size)
print("we kept {0} vectors, randomly selected".format(len(vectors)), file=sys.stderr)
self.model = KMeans(num_centers, verbose=1)
print("preparing to fit our codebook with {0} centers".format(num_centers), file=sys.stderr)
self.model.fit(vectors)
vectors = None
print("encoding our training vectors", file=sys.stderr)
for filename, veclist in self.train_features.items():
self.train_features[filename] = self.encode(veclist)
print("our feature module is fitted", file=sys.stderr)
def encode(self, veclist):
if self.encoding == 'hist':
return self.hist(veclist)
elif self.encoding == 'seq':
return self.sequence(veclist)
else:
raise Exception("Unknown feature encoding {0}".format(self.encoding))
def sequence(self, veclist):
seq = []
for vec in veclist:
c = np.argmin(self.model.transform(vec))
seq.append(c)
return seq
def hist(self, veclist):
h = np.zeros(self.model.n_clusters)
for vec in veclist:
c = np.argmin(self.model.transform(vec))
h[c] += 1
return h
def __getitem__(self, filename):
# print("==== processing", filename, ", gabor: ", self.gabor, ", resize: ", self.resize, file=sys.stderr)
if self.train_features:
s = self.train_features.get(filename, None)
if s:
return s
self.train_features = None # if we reach this code we are beyond the training phase
return self.encode(self.compute_features(filename))
def compute_features(self, path_file):
imagen = io.imread(path_file)
imagen = resize(imagen, self.resize, mode='edge')
if self.contrast == 'sub-mean':
for i in range(3):
imagen[:, :, i] = imagen[:, :, i] - imagen[:, :, i].mean()
if self.channels == "green":
imagen = imagen[:, :, 1]
elif self.channels == "blue":
imagen = imagen[:, 1, :]
elif self.channels == "red":
imagen = imagen[1, :, :]
else:
imagen = rgb2gray(imagen)
if self.equalize != 'none':
if self.equalize == 'global':
imagen = exposure.equalize_hist(imagen)
else:
d = int(self.equalize.split(':')[-1])
imagen = rank.equalize(imagen, selem=disk(d))
if self.edges != 'none':
if self.edges == 'sobel':
imagen = sobel(imagen)
elif self.edges == 'scharr':
imagen = scharr(imagen)
elif self.edges == 'prewitt':
imagen = prewitt(imagen)
elif self.edges == 'roberts':
imagen = roberts(imagen)
else:
raise Exception("Unknown edge detector {0}".format(self.edges))
if self.correlation:
mascara = io.imread(self.correlation)
mascara = rgb2gray(mascara)
mascara = np.array(mascara)
mascara = skimage.transform.resize(mascara, (25, 25), mode='edge')
resultado = match_template(imagen, mascara)
resultado = resultado + abs(resultado.min())
resultado[~((resultado < 0.2) | (resultado > resultado.max()-0.2))] = 0.6
img = resultado
else:
img = img_as_float(imagen)
if self.features.startswith('hog'):
orientations = 8
vec = hog(img, orientations=orientations, pixels_per_cell=self.pixels_per_cell, cells_per_block=self.cells_per_block, block_norm='L2-Hys')
if self.features == 'hog':
return [vec]
elif self.features == 'hog-bovw':
m = orientations * self.cells_per_block[0] * self.cells_per_block[1]
XX = np.split(vec, len(vec) // m)
return XX
else:
raise Exception("Unknown feature detection {0}".format(self.features))
elif self.features == 'daisy':
return daisy(img, step=32, radius=16, rings=3).flatten()
else:
raise Exception("Unknown feature detection {0}".format(self.features)) |
#!/usr/bin/env python
#
# Copyright (c) 2009-2013, Fortylines LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of fortylines nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Fortylines LLC ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Fortylines LLC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Implements workspace management.
The workspace manager script is used to setup a local machine
with third-party prerequisites and source code under revision
control such that it is possible to execute a development cycle
(edit/build/run) on a local machine.
The script will email build reports when the --mailto command line option
is specified. There are no sensible default values for the following
variables thus those should be set in the shell environment before
invoking the script.
dwsEmail=
smtpHost=
smtpPort=
dwsSmtpLogin=
dwsSmtpPasswd=
"""
# Primary Author(s): Sebastien Mirolo <smirolo@fortylines.com>
#
# Requires Python 2.7 or above.
__version__ = None
import datetime, hashlib, inspect, logging, logging.config, re, optparse
import os, shutil, socket, subprocess, sys, tempfile, urllib2, urlparse
import xml.dom.minidom, xml.sax
import cStringIO
# \todo executable used to return a password compatible with sudo. This is used
# temporarly while sudo implementation is broken when invoked with no tty.
ASK_PASS = ''
# When True, all commands invoked through shell_command() are printed
# but not executed.
DO_NOT_EXECUTE = False
# Global variables that contain all encountered errors.
ERRORS = []
# When processing a project dependency index file, all project names matching
# one of the *EXCLUDE_PATS* will be considered non-existant.
EXCLUDE_PATS = []
# Log commands output
LOGGER = None
LOGGER_BUFFER = None
LOGGER_BUFFERING_COUNT = 0
# Pattern used to search for logs to report through email.
LOG_PAT = None
# When True, the log object is not used and output is only
# done on sys.stdout.
NO_LOG = False
# Address to email log reports to.
MAILTO = []
# When True, *find_lib* will prefer static libraries over dynamic ones if both
# exist for a specific libname. This should match .LIBPATTERNS in prefix.mk.
STATIC_LIB_FIRST = True
# When True, the script runs in batch mode and assumes the default answer
# for every question where it would have prompted the user for an answer.
USE_DEFAULT_ANSWER = False
# Directories where things get installed
INSTALL_DIRS = [ 'bin', 'include', 'lib', 'libexec', 'etc', 'share' ]
# distributions per native package managers
APT_DISTRIBS = [ 'Debian', 'Ubuntu' ]
YUM_DISTRIBS = [ 'Fedora' ]
PORT_DISTRIBS = [ 'Darwin' ]
CONTEXT = None
INDEX = None
class Error(RuntimeError):
'''This type of exception is used to identify "expected"
error condition and will lead to a useful message.
Other exceptions are not caught when *__main__* executes,
and an internal stack trace will be displayed. Exceptions
which are not *Error*s are concidered bugs in the workspace
management script.'''
def __init__(self, msg='unknow error', code=1, project_name=None):
RuntimeError.__init__(self)
self.code = code
self.msg = msg
self.project_name = project_name
def __str__(self):
if self.project_name:
return ':'.join([self.project_name, str(self.code), ' error']) \
+ ' ' + self.msg + '\n'
return 'error: ' + self.msg + ' (error ' + str(self.code) + ')\n'
class CircleError(Error):
'''Thrown when a circle has been detected while doing
a topological traversal of a graph.'''
def __init__(self, connected):
Error.__init__(
self, msg="detected a circle within %s" % ' '.join(connected))
class MissingError(Error):
'''This error is thrown whenever a project has missing prerequisites.'''
def __init__(self, project_name, prerequisites):
Error.__init__(self,'The following prerequisistes are missing: ' \
+ ' '.join(prerequisites),2,project_name)
class Context:
'''The workspace configuration file contains environment variables used
to update, build and package projects. The environment variables are roots
of the general dependency graph as most other routines depend on srcTop
and buildTop at the least.'''
config_name = 'dws.mk'
indexName = 'dws.xml'
def __init__(self):
# Two following variables are used by interactively change the make
# command-line.
self.tunnel_point = None
self.targets = []
self.overrides = []
site_top = Pathname('siteTop',
{ 'description':
'Root of the tree where the website is generated\n'\
' and thus where *remoteSiteTop* is cached\n'\
' on the local system',
'default':os.getcwd()})
remote_site_top = Pathname('remoteSiteTop',
{ 'description':
'Root of the remote tree that holds the published website\n'
' (ex: url:/var/cache).',
'default':''})
install_top = Pathname('installTop',
{ 'description':'Root of the tree for installed bin/,'\
' include/, lib/, ...',
'base':'siteTop','default':''})
# We use installTop (previously siteTop), such that a command like
# "dws build *remoteIndex* *siteTop*" run from a local build
# directory creates intermediate and installed files there while
# checking out the sources under siteTop.
# It might just be my preference...
build_top = Pathname('buildTop',
{ 'description':'Root of the tree where intermediate'\
' files are created.',
'base':'siteTop','default':'build'})
src_top = Pathname('srcTop',
{ 'description':
'Root of the tree where the source code under revision\n'
' control lives on the local machine.',
'base': 'siteTop',
'default':'reps'})
self.environ = { 'buildTop': build_top,
'srcTop' : src_top,
'patchTop': Pathname('patchTop',
{'description':'Root of the tree where patches are stored',
'base':'siteTop',
'default':'patch'}),
'binDir': Pathname('binDir',
{'description':'Root of the tree where executables are installed',
'base':'installTop'}),
'installTop': install_top,
'includeDir': Pathname('includeDir',
{'description':'Root of the tree where include files are installed',
'base':'installTop'}),
'libDir': Pathname('libDir',
{'description':'Root of the tree where libraries are installed',
'base':'installTop'}),
'libexecDir': Pathname('libexecDir',
{'description':'Root of the tree where executable helpers'\
' are installed',
'base':'installTop'}),
'etcDir': Pathname('etcDir',
{'description':
'Root of the tree where configuration files for the local\n'
' system are installed',
'base':'installTop'}),
'shareDir': Pathname('shareDir',
{'description':'Directory where the shared files are installed.',
'base':'installTop'}),
'siteTop': site_top,
'logDir': Pathname('logDir',
{'description':'Directory where the generated log files are'\
' created',
'base':'siteTop',
'default':'log'}),
'indexFile': Pathname('indexFile',
{'description':'Index file with projects dependencies information',
'base':'siteTop',
'default':os.path.join('resources',
os.path.basename(sys.argv[0]) + '.xml')}),
'remoteSiteTop': remote_site_top,
'remoteSrcTop': Pathname('remoteSrcTop',
{'description':
'Root of the tree on the remote machine where repositories\n'\
' are located.',
'base':'remoteSiteTop',
'default':'reps'}),
'remoteIndex': Pathname('remoteIndex',
{'description':
'Url to the remote index file with projects dependencies\n'\
' information',
'base':'remoteSiteTop',
'default':'reps/dws.git/dws.xml'}),
'darwinTargetVolume': Single('darwinTargetVolume',
{ 'description':
'Destination of installed packages on a Darwin local\n'\
' machine. Installing on the "LocalSystem" requires\n'\
' administrator privileges.',
'choices': {'LocalSystem':
'install packages on the system root for all users',
'CurrentUserHomeDirectory':
'install packages for the current user only'} }),
'distHost': HostPlatform('distHost'),
'smtpHost': Variable('smtpHost',
{ 'description':'Hostname for the SMTP server through'\
' which logs are sent.',
'default':'localhost'}),
'smtpPort': Variable('smtpPort',
{ 'description':'Port for the SMTP server through'\
' which logs are sent.',
'default':'5870'}),
'dwsSmtpLogin': Variable('dwsSmtpLogin',
{ 'description':
'Login on the SMTP server for the user through which\n'\
' logs are sent.'}),
'dwsSmtpPasswd': Variable('dwsSmtpPasswd',
{ 'description':
'Password on the SMTP server for the user through which\n'\
' logs are sent.'}),
'dwsEmail': Variable('dwsEmail',
{ 'description':
'dws occasionally emails build reports (see --mailto\n'
' command line option). This is the address that will\n'\
' be shown in the *From* field.',
'default':os.environ['LOGNAME'] + '@localhost'}) }
self.build_top_relative_cwd = None
self.config_filename = None
def base(self, name):
'''Returns a basename of the uri/path specified in variable *name*.
We do not use os.path.basename directly because it wasn't designed
to handle uri nor does urlparse was designed to handle git/ssh locators.
'''
locator = self.value(name)
look = re.match('\S+@\S+:(\S+)', locator)
if look:
return os.path.splitext(os.path.basename(look.group(1)))[0]
look = re.match('https?:(\S+)', locator)
if look:
uri = urlparse.urlparse(locator)
return os.path.splitext(os.path.basename(uri.path))[0]
return os.path.splitext(os.path.basename(locator))[0]
def bin_build_dir(self):
'''Returns the bin/ directory located inside buildTop.'''
return os.path.join(self.value('buildTop'), 'bin')
def derived_helper(self, name):
'''Absolute path to a file which is part of drop helper files
located in the share/dws subdirectory. The absolute directory
name to share/dws is derived from the path of the script
being executed as such: dirname(sys.argv[0])/../share/dws.'''
return os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))),
'share','dws', name)
# That code does not work when we are doing dws make (no recurse).
# return os.path.join(self.value('buildTop'),'share','dws',name)
def log_path(self, name):
'''Absolute path to a file in the local system log
directory hierarchy.'''
return os.path.join(self.value('logDir'), name)
def remote_src_path(self, name):
'''Absolute path to access a repository on the remote machine.'''
return os.path.join(self.value('remoteSrcTop'), name)
def remote_host(self):
'''Returns the host pointed by *remoteSiteTop*'''
uri = urlparse.urlparse(CONTEXT.value('remoteSiteTop'))
hostname = uri.netloc
if not uri.netloc:
# If there is no protocol specified, the hostname
# will be in uri.scheme (That seems like a bug in urlparse).
hostname = uri.scheme
return hostname
def cwd_project(self):
'''Returns a project name derived out of the current directory.'''
if not self.build_top_relative_cwd:
self.environ['buildTop'].default = os.path.dirname(os.getcwd())
log_info('no workspace configuration file could be ' \
+ 'found from ' + os.getcwd() \
+ ' all the way up to /. A new one, called ' + self.config_name\
+ ', will be created in *buildTop* after that path is set.')
self.config_filename = os.path.join(self.value('buildTop'),
self.config_name)
self.save()
self.locate()
if os.path.realpath(os.getcwd()).startswith(
os.path.realpath(self.value('buildTop'))):
top = os.path.realpath(self.value('buildTop'))
elif os.path.realpath(os.getcwd()).startswith(
os.path.realpath(self.value('srcTop'))):
top = os.path.realpath(self.value('srcTop'))
else:
raise Error("You must run dws from within a subdirectory of "\
"buildTop or srcTop")
prefix = os.path.commonprefix([top, os.getcwd()])
return os.getcwd()[len(prefix) + 1:]
def db_pathname(self):
'''Absolute pathname to the project index file.'''
if not str(self.environ['indexFile']):
filtered = filter_rep_ext(CONTEXT.value('remoteIndex'))
if filtered != CONTEXT.value('remoteIndex'):
prefix = CONTEXT.value('remoteSrcTop')
if not prefix.endswith(':') and not prefix.endswith(os.sep):
prefix = prefix + os.sep
self.environ['indexFile'].default = \
CONTEXT.src_dir(filtered.replace(prefix, ''))
else:
self.environ['indexFile'].default = \
CONTEXT.local_dir(CONTEXT.value('remoteIndex'))
return self.value('indexFile')
def host(self):
'''Returns the distribution of the local system
on which the script is running.'''
return self.value('distHost')
def local_dir(self, name):
'''Returns the path on the local system to a directory.'''
site_top = self.value('siteTop')
pos = name.rfind('./')
if pos >= 0:
localname = os.path.join(site_top, name[pos + 2:])
elif (str(self.environ['remoteSiteTop'])
and name.startswith(self.value('remoteSiteTop'))):
localname = filter_rep_ext(name)
remote_site_top = self.value('remoteSiteTop')
if remote_site_top.endswith(':'):
site_top = site_top + '/'
localname = localname.replace(remote_site_top, site_top)
elif ':' in name:
localname = os.path.join(
site_top,'resources', os.path.basename(name))
elif not name.startswith(os.sep):
localname = os.path.join(site_top, name)
else:
localname = name.replace(
self.value('remoteSiteTop'), site_top)
return localname
def remote_dir(self, name):
'''Returns the absolute path on the remote system that corresponds
to *name*, the absolute path of a file or directory on the local
system.'''
if name.startswith(self.value('siteTop')):
return name.replace(self.value('siteTop'),
self.value('remoteSiteTop'))
return None
def load_context(self, filename):
site_top_found = False
config_file = open(filename)
line = config_file.readline()
while line != '':
look = re.match(r'(\S+)\s*=\s*(\S+)', line)
if look != None:
if look.group(1) == 'siteTop':
site_top_found = True
if (look.group(1) in self.environ
and isinstance(self.environ[look.group(1)], Variable)):
self.environ[look.group(1)].value = look.group(2)
else:
self.environ[look.group(1)] = look.group(2)
line = config_file.readline()
config_file.close()
return site_top_found
def locate(self, config_filename=None):
'''Locate the workspace configuration file and derive the project
name out of its location.'''
try:
if config_filename:
self.config_filename = config_filename
self.config_name = os.path.basename(config_filename)
self.build_top_relative_cwd = os.path.dirname(config_filename)
else:
self.build_top_relative_cwd, self.config_filename \
= search_back_to_root(self.config_name)
except IOError:
self.build_top_relative_cwd = None
self.environ['buildTop'].configure(self)
build_top = str(self.environ['buildTop'])
site_top = str(self.environ['siteTop'])
if build_top.startswith(site_top):
# When build_top is inside the site_top, we create the config
# file in site_top for convinience so dws commands can be run
# anywhere from within site_top (i.e. both build_top
# and src_top).
self.config_filename = os.path.join(site_top, self.config_name)
else:
# When we have a split hierarchy we can build the same src_top
# multiple different ways but dws commands should exclusively
# be run from within the build_top.
self.config_filename = os.path.join(build_top, self.config_name)
if not os.path.isfile(self.config_filename):
self.save()
if self.build_top_relative_cwd == '.':
self.build_top_relative_cwd = os.path.basename(os.getcwd())
# \todo is this code still relevent?
look = re.match('([^-]+)-.*', self.build_top_relative_cwd)
if look:
# Change of project name in *indexName* on "make dist-src".
# self.build_top_relative_cwd = look.group(1)
pass
# -- Read the environment variables set in the config file.
home_dir = os.environ['HOME']
if 'SUDO_USER' in os.environ:
home_dir = home_dir.replace(os.environ['SUDO_USER'],
os.environ['LOGNAME'])
user_default_config = os.path.join(home_dir, '.dws')
if os.path.exists(user_default_config):
self.load_context(user_default_config)
site_top_found = self.load_context(self.config_filename)
if not site_top_found:
# By default we set *siteTop* to be the directory
# where the configuration file was found since basic paths
# such as *buildTop* and *srcTop* defaults are based on it.
self.environ['siteTop'].value = os.path.dirname(
self.config_filename)
def logname(self):
'''Name of the XML tagged log file where sys.stdout is captured.'''
filename = os.path.basename(self.config_name)
filename = os.path.splitext(filename)[0] + '.log'
filename = self.log_path(filename)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
return filename
def logbuildname(self):
'''Name of the log file for build summary.'''
filename = os.path.basename(self.config_name)
filename = os.path.splitext(filename)[0] + '-build.log'
filename = self.log_path(filename)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
return filename
def obj_dir(self, name):
return os.path.join(self.value('buildTop'), name)
def patch_dir(self, name):
return os.path.join(self.value('patchTop'), name)
def from_remote_index(self, remote_path):
'''We need to set the *remoteIndex* to a realpath when we are dealing
with a local file else links could end-up generating a different prefix
than *remoteSiteTop* for *remoteIndex*/*indexName*.'''
if search_repo_pat(remote_path):
remote_path = os.path.join(remote_path, self.indexName)
# Set remoteIndex.value instead of remoteIndex.default because
# we don't want to trigger a configure of logDir before we have
# a chance to set the siteTop.
look = re.match(r'(\S+@)?(\S+):(.*)', remote_path)
if look:
self.tunnel_point = look.group(2)
src_base = look.group(3)
site_base = src_base
remote_path_list = look.group(3).split(os.sep)
host_prefix = self.tunnel_point + ':'
if look.group(1):
host_prefix = look.group(1) + host_prefix
else:
# We compute *base* here through the same algorithm as done
# in *local_dir*. We do not call *local_dir* because remoteSiteTop
# is not yet defined at this point.
src_base = os.path.dirname(remote_path)
while not os.path.isdir(src_base):
src_base = os.path.dirname(src_base)
remote_path_list = remote_path.split(os.sep)
site_base = os.path.dirname(src_base)
host_prefix = ''
for i in range(0, len(remote_path_list)):
if remote_path_list[i] == '.':
site_base = os.sep.join(remote_path_list[0:i])
src_base = os.path.join(site_base, remote_path_list[i + 1])
break
look = search_repo_pat(remote_path_list[i])
if look:
# splitext does not return any extensions when the path
# starts with dot.
rep_ext = look.group(1)
if not rep_ext.startswith('.'):
_, rep_ext = os.path.splitext(look.group(1))
if remote_path_list[i] == rep_ext:
i = i - 1
if i > 2:
src_base = os.sep.join(remote_path_list[0:i])
site_base = os.sep.join(remote_path_list[0:i-1])
elif i > 1:
src_base = remote_path_list[0]
site_base = ''
else:
src_base = ''
site_base = ''
break
if not self.tunnel_point:
# We can't use realpath before we figured out where the '.'
# delimiter is in remote_path.
remote_path = remote_path.replace(
src_base, os.path.realpath(src_base))
src_base = os.path.realpath(src_base)
site_base = os.path.realpath(site_base)
self.environ['remoteIndex'].value = remote_path
self.environ['remoteSrcTop'].default = host_prefix + src_base
# Note: We used to set the context[].default field which had for side
# effect to print the value the first time the variable was used.
# The problem is that we need to make sure remoteSiteTop is defined
# before calling *local_dir*, otherwise the resulting indexFile value
# will be different from the place the remoteIndex is fetched to.
self.environ['remoteSiteTop'].value = host_prefix + site_base
def save(self):
'''Write the config back to a file.'''
if not self.config_filename:
# No config_filename means we are still figuring out siteTop,
# so we don't know where to store the config file.
return
if not os.path.exists(os.path.dirname(self.config_filename)):
os.makedirs(os.path.dirname(self.config_filename))
config_file = open(self.config_filename, 'w')
keys = sorted(self.environ.keys())
config_file.write('# configuration for development workspace\n\n')
for key in keys:
val = self.environ[key]
if len(str(val)) > 0:
config_file.write(key + '=' + str(val) + '\n')
config_file.close()
def search_path(self, name, variant=None):
'''Derives a list of directory names based on the PATH
environment variable, *name* and a *variant* triplet.'''
dirs = []
# We want the actual value of *name*Dir and not one derived from binDir
dirname = CONTEXT.value(name + 'Dir')
if os.path.isdir(dirname):
if variant:
for subdir in os.listdir(dirname):
if re.match(variant, subdir):
dirs += [ os.path.join(dirname, subdir) ]
else:
dirs += [ dirname ]
for path in os.environ['PATH'].split(':'):
base = os.path.dirname(path)
if name == 'lib':
# On mixed 32/64-bit system, libraries also get installed
# in lib64/. This is also true for 64-bit native python modules.
for subpath in [ name, 'lib64' ]:
dirname = os.path.join(base, subpath)
if os.path.isdir(dirname):
if variant:
for subdir in os.listdir(dirname):
if re.match(variant, subdir):
dirs += [ os.path.join(dirname, subdir) ]
else:
dirs += [ dirname ]
elif name == 'bin':
# Especially on Fedora, /sbin, /usr/sbin, etc. are many times
# not in the PATH.
if os.path.isdir(path):
dirs += [ path ]
sbin = os.path.join(base, 'sbin')
if (not sbin in os.environ['PATH'].split(':')
and os.path.isdir(sbin)):
dirs += [ sbin ]
else:
if os.path.isdir(os.path.join(base, name)):
dirs += [ os.path.join(base, name) ]
if name == 'lib' and self.host() in PORT_DISTRIBS:
# Just because python modules do not get installed
# in /opt/local/lib/python2.7/site-packages
dirs += [ '/opt/local/Library/Frameworks' ]
return dirs
def src_dir(self, name):
return os.path.join(self.value('srcTop'), name)
def value(self, name):
'''returns the value of the workspace variable *name*. If the variable
has no value yet, a prompt is displayed for it.'''
if not name in self.environ:
raise Error("Trying to read unknown variable " + name + ".")
if (isinstance(self.environ[name], Variable)
and self.environ[name].configure(self)):
self.save()
# recursively resolve any variables that might appear
# in the variable value. We do this here and not while loading
# the context because those names can have been defined later.
value = str(self.environ[name])
look = re.match(r'(.*)\${(\S+)}(.*)', value)
while look:
indirect = ''
if look.group(2) in self.environ:
indirect = self.value(look.group(2))
elif look.group(2) in os.environ:
indirect = os.environ[look.group(2)]
value = look.group(1) + indirect + look.group(3)
look = re.match(r'(.*)\${(\S+)}(.*)', value)
return value
# Formats help for script commands. The necessity for this class
# can be understood by the following posts on the internet:
# - http://groups.google.com/group/comp.lang.python/browse_thread/thread/6df6e
# - http://www.alexonlinux.com/pythons-optparse-for-human-beings
#
# \todo The argparse (http://code.google.com/p/argparse/) might be part
# of the standard python library and address the issue at some point.
class CommandsFormatter(optparse.IndentedHelpFormatter):
def format_epilog(self, description):
import textwrap
result = ""
if description:
desc_width = self.width - self.current_indent
bits = description.split('\n')
formatted_bits = [
textwrap.fill(bit,
desc_width,
initial_indent="",
subsequent_indent=" ")
for bit in bits]
result = result + "\n".join(formatted_bits) + "\n"
return result
class IndexProjects:
'''Index file containing the graph dependency for all projects.'''
def __init__(self, context, source = None):
self.context = context
self.parser = XMLDbParser(context)
self.source = source
def closure(self, dgen):
'''Find out all dependencies from a root set of projects as defined
by the dependency generator *dgen*.'''
while dgen.more():
self.parse(dgen)
return dgen.topological()
def parse(self, dgen):
'''Parse the project index and generates callbacks to *dgen*'''
self.validate()
self.parser.parse(self.source, dgen)
def validate(self, force=False):
'''Create the project index file if it does not exist
either by fetching it from a remote server or collecting
projects indices locally.'''
if not self.source:
self.source = self.context.db_pathname()
if not self.source.startswith('<?xml'):
# The source is an actual string, thus we do not fetch any file.
if not os.path.exists(self.source) or force:
selection = ''
if not force:
# index or copy.
selection = select_one(
'The project index file could not '
+ 'be found at "' + self.source \
+ '". It can be regenerated through one ' \
+ 'of the two following method:',
[ [ 'fetching', 'from remote server' ],
[ 'indexing',
'local projects in the workspace' ] ],
False)
if selection == 'indexing':
pub_collect([])
elif selection == 'fetching' or force:
remote_index = self.context.value('remoteIndex')
vcs = Repository.associate(remote_index)
# XXX Does not matter here for rsync.
# What about other repos?
vcs.update(None, self.context)
if not os.path.exists(self.source):
raise Error(self.source + ' does not exist.')
class PdbHandler(object):
'''Callback interface for a project index as generated by an *xmlDbParser*.
The generic handler does not do anything. It is the responsability of
implementing classes to filter callback events they care about.'''
def __init__(self):
pass
def end_parse(self):
pass
def project(self, proj):
pass
class Unserializer(PdbHandler):
'''Builds *Project* instances for every project that matches *include_pats*
and not *exclude_pats*. See *filters*() for implementation.'''
def __init__(self, include_pats=None, exclude_pats=None, custom_steps=None):
PdbHandler.__init__(self)
self.projects = {}
self.first_project = None
if include_pats:
self.include_pats = set(include_pats)
# Project which either fullfil all prerequisites or that have been
# explicitely excluded from installation by the user will be added
# to *exclude_pats*.
if exclude_pats:
self.exclude_pats = set(exclude_pats)
else:
self.exclude_pats = set([])
if custom_steps:
self.custom_steps = dict(custom_steps)
else:
self.custom_steps = {}
def as_project(self, name):
if not name in self.projects:
raise Error("unable to find " + name + " in the index file.",
project_name=name)
return self.projects[name]
def filters(self, project_name):
for inc in self.include_pats:
inc = inc.replace('+','\\+')
if re.match(inc, project_name):
for exc in self.exclude_pats:
if re.match(exc.replace('+','\\+'), project_name):
return False
return True
return False
def project(self, proj_obj):
'''Callback for the parser.'''
if (not proj_obj.name in self.projects) and self.filters(proj_obj.name):
if not self.first_project:
self.first_project = proj_obj
self.projects[proj_obj.name] = proj_obj
class DependencyGenerator(Unserializer):
'''*DependencyGenerator* implements a breath-first search of the project
dependencies index with a specific twist.
At each iteration, if all prerequisites for a project can be found
on the local system, the dependency edge is cut from the next iteration.
Missing prerequisite executables, headers and libraries require
the installation of prerequisite projects as stated by the *missings*
list of edges. The user will be prompt for *candidates*() and through
the options available will choose to install prerequisites through
compiling them out of a source controlled repository or a binary
distribution package.
*DependencyGenerator.end_parse*() is at the heart of the workspace
bootstrapping and other "recurse" features.
'''
def __init__(self, repositories, packages, exclude_pats = None,
custom_steps = None, force_update = False):
'''*repositories* will be installed from compiling
a source controlled repository while *packages* will be installed
from a binary distribution package.
*exclude_pats* is a list of projects which should be removed from
the final topological order.'''
self.roots = packages + repositories
Unserializer.__init__(self, self.roots, exclude_pats, custom_steps)
# When True, an exception will stop the recursive make
# and exit with an error code, otherwise it moves on to
# the next project.
self.stop_make_after_error = False
self.packages = set(packages)
self.repositories = set(repositories)
self.active_prerequisites = {}
for prereq_name in repositories + packages:
self.active_prerequisites[prereq_name] = (
prereq_name, 0, TargetStep(0, prereq_name) )
self.levels = {}
self.levels[0] = set([])
for rep in repositories + packages:
self.levels[0] |= set([ TargetStep(0, rep) ])
# Vertices in the dependency tree
self.vertices = {}
self.force_update = force_update
def __str__(self):
return "vertices:\n%s" % str(self.vertices)
def connect_to_setup(self, name, step):
if name in self.vertices:
self.vertices[name].prerequisites += [ step ]
def add_config_make(self, variant, configure, make, prerequisites):
config = None
config_name = ConfigureStep.genid(variant.project, variant.target)
if not config_name in self.vertices:
config = configure.associate(variant.target)
self.vertices[config_name] = config
else:
config = self.vertices[config_name]
make_name = BuildStep.genid(variant.project, variant.target)
if not make_name in self.vertices:
make = make.associate(variant.target)
make.force_update = self.force_update
self.vertices[make_name] = make
for prereq in prerequisites:
make.prerequisites += [ prereq ]
if config:
make.prerequisites += [ config ]
setup_name = SetupStep.genid(variant.project, variant.target)
self.connect_to_setup(setup_name, make)
return self.vertices[make_name]
def add_install(self, project_name):
flavor = None
install_step = None
managed_name = project_name.split(os.sep)[-1]
install_name = InstallStep.genid(managed_name)
if install_name in self.vertices:
# We already decided to install this project, nothing more to add.
return self.vertices[install_name], flavor
# We do not know the target at this point so we can't build a fully
# qualified setup_name and index into *vertices* directly. Since we
# are trying to install projects through the local package manager,
# it is doubtful we should either know or care about the target.
# That's a primary reason why target got somewhat slightly overloaded.
# We used runtime="python" instead of target="python" in an earlier
# design.
setup = None
setup_name = SetupStep.genid(project_name)
for name, step in self.vertices.iteritems():
if name.endswith(setup_name):
setup = step
if (setup and not setup.run(CONTEXT)):
install_step = create_managed(
managed_name, setup.versions, setup.target)
if not install_step and project_name in self.projects:
project = self.projects[project_name]
if CONTEXT.host() in project.packages:
filenames = []
flavor = project.packages[CONTEXT.host()]
for remote_path in flavor.update.fetches:
filenames += [ CONTEXT.local_dir(remote_path) ]
install_step = create_package_file(project_name, filenames)
update_s = self.add_update(project_name, flavor.update)
# package files won't install without prerequisites already
# on the local system.
install_step.prerequisites += self.add_setup(setup.target,
flavor.prerequisites([CONTEXT.host()]))
if update_s:
install_step.prerequisites += [ update_s ]
elif project.patch:
# build and install from source
flavor = project.patch
prereqs = self.add_setup(setup.target,
flavor.prerequisites([CONTEXT.host()]))
update_s = self.add_update(
project_name, project.patch.update)
if update_s:
prereqs += [ update_s ]
install_step = self.add_config_make(
TargetStep(0, project_name, setup.target),
flavor.configure, flavor.make, prereqs)
if not install_step:
# Remove special case install_step is None; replace it with
# a placeholder instance that will throw an exception
# when the *run* method is called.
install_step = InstallStep(project_name, target=setup.target)
self.connect_to_setup(setup_name, install_step)
return install_step, flavor
def add_setup(self, target, deps):
targets = []
for dep in deps:
target_name = dep.target
if not dep.target:
target_name = target
cap = SetupStep.genid(dep.name)
if cap in self.custom_steps:
setup = self.custom_steps[cap](dep.name, dep.files)
else:
setup = SetupStep(
dep.name, dep.files, dep.versions, target_name)
if not setup.name in self.vertices:
self.vertices[setup.name] = setup
else:
self.vertices[setup.name].insert(setup)
targets += [ self.vertices[setup.name] ]
return targets
def add_update(self, project_name, update, update_rep=True):
update_name = UpdateStep.genid(project_name)
if update_name in self.vertices:
return self.vertices[update_name]
update_s = None
fetches = {}
if len(update.fetches) > 0:
# We could unconditionally add all source tarball since
# the *fetch* function will perform a *find_cache* before
# downloading missing files. Unfortunately this would
# interfere with *pub_configure* which checks there are
# no missing prerequisites whithout fetching anything.
fetches = find_cache(CONTEXT, update.fetches)
rep = None
if update_rep or not os.path.isdir(CONTEXT.src_dir(project_name)):
rep = update.rep
if update.rep or len(fetches) > 0:
update_s = UpdateStep(project_name, rep, fetches)
self.vertices[update_s.name] = update_s
return update_s
def contextual_targets(self, variant):
raise Error("DependencyGenerator should not be instantiated directly")
def end_parse(self):
further = False
next_active_prerequisites = {}
for prereq_name in self.active_prerequisites:
# Each edge is a triplet source: (color, depth, variant)
# Gather next active Edges.
color = self.active_prerequisites[prereq_name][0]
depth = self.active_prerequisites[prereq_name][1]
variant = self.active_prerequisites[prereq_name][2]
next_depth = depth + 1
# The algorithm to select targets depends on the command semantic.
# The build, make and install commands differ in behavior there
# in the presence of repository, patch and package tags.
need_prompt, targets = self.contextual_targets(variant)
if need_prompt:
next_active_prerequisites[prereq_name] = (color, depth, variant)
else:
for target in targets:
further = True
target_name = str(target.project)
if target_name in next_active_prerequisites:
if next_active_prerequisites[target_name][0] > color:
# We propagate a color attribute through
# the constructed DAG to detect cycles later on.
next_active_prerequisites[target_name] = (color,
next_depth,
target)
else:
next_active_prerequisites[target_name] = (color,
next_depth,
target)
if not next_depth in self.levels:
self.levels[next_depth] = set([])
self.levels[ next_depth ] |= set([target])
self.active_prerequisites = next_active_prerequisites
if not further:
# This is an opportunity to prompt the user.
# The user's selection will decide, when available, if the project
# should be installed from a repository, a patch, a binary package
# or just purely skipped.
reps = []
packages = []
for name in self.active_prerequisites:
if (not os.path.isdir(CONTEXT.src_dir(name))
and self.filters(name)):
# If a prerequisite project is not defined as an explicit
# package, we will assume the prerequisite name is
# enough to install the required tools for the prerequisite.
row = [ name ]
if name in self.projects:
project = self.as_project(name)
if project.installed_version:
row += [ project.installed_version ]
if project.repository:
reps += [ row ]
if not project.repository:
packages += [ row ]
else:
packages += [ row ]
# Prompt to choose amongst installing from repository
# patch or package when those tags are available.'''
reps, packages = select_checkout(reps, packages)
self.repositories |= set(reps)
self.packages |= set(packages)
# Add all these in the include_pats such that we load project
# information the next time around.
for name in self.active_prerequisites:
if not name in self.include_pats:
self.include_pats |= set([ name ])
def more(self):
'''True if there are more iterations to conduct.'''
return len(self.active_prerequisites) > 0
def topological(self):
'''Returns a topological ordering of projects selected.'''
ordered = []
remains = []
for name in self.packages:
# We have to wait until here to create the install steps. Before
# then, we do not know if they will be required nor if prerequisites
# are repository projects in the index file or not.
install_step, _ = self.add_install(name)
if install_step and not install_step.name in self.vertices:
remains += [ install_step ]
for step in self.vertices:
remains += [ self.vertices[step] ]
next_remains = []
if False:
log_info('!!!remains:')
for step in remains:
is_vert = ''
if step.name in self.vertices:
is_vert = '*'
log_info('!!!\t%s %s %s'
% (step.name, str(is_vert),
str([ pre.name for pre in step.prerequisites])))
while len(remains) > 0:
for step in remains:
ready = True
insert_point = 0
for prereq in step.prerequisites:
index = 0
found = False
for ordered_step in ordered:
index = index + 1
if prereq.name == ordered_step.name:
found = True
break
if not found:
ready = False
break
else:
if index > insert_point:
insert_point = index
if ready:
for ordered_step in ordered[insert_point:]:
if ordered_step.priority > step.priority:
break
insert_point = insert_point + 1
ordered.insert(insert_point, step)
else:
next_remains += [ step ]
if len(remains) <= len(next_remains):
raise CircleError([vert.name for vert in next_remains])
remains = next_remains
next_remains = []
if False:
log_info("!!! => ordered:")
for ordered_step in ordered:
log_info(" " + ordered_step.name)
return ordered
class BuildGenerator(DependencyGenerator):
'''Forces selection of installing from repository when that tag
is available in a project.'''
def contextual_targets(self, variant):
'''At this point we want to add all prerequisites which are either
a repository or a patch/package for which the dependencies are not
complete.'''
targets = []
name = variant.project
if name in self.projects:
tags = [ CONTEXT.host() ]
project = self.as_project(name)
if project.repository:
self.repositories |= set([name])
targets = self.add_setup(variant.target,
project.repository.prerequisites(tags))
update_s = self.add_update(name, project.repository.update)
prereqs = targets
if update_s:
prereqs = [ update_s ] + targets
self.add_config_make(variant,
project.repository.configure,
project.repository.make,
prereqs)
else:
self.packages |= set([name])
install_step, flavor = self.add_install(name)
if flavor:
targets = self.add_setup(variant.target,
flavor.prerequisites(tags))
else:
# We leave the native host package manager to deal with this one...
self.packages |= set([ name ])
self.add_install(name)
return (False, targets)
class MakeGenerator(DependencyGenerator):
'''Forces selection of installing from repository when that tag
is available in a project.'''
def __init__(self, repositories, packages,
exclude_pats = None, custom_steps = None):
DependencyGenerator.__init__(
self, repositories, packages,
exclude_pats, custom_steps, force_update=True)
self.stop_make_after_error = True
def contextual_targets(self, variant):
name = variant.project
if not name in self.projects:
self.packages |= set([ name ])
return (False, [])
need_prompt = True
project = self.as_project(name)
if os.path.isdir(CONTEXT.src_dir(name)):
# If there is already a local source directory in *srcTop*, it is
# also a no brainer - invoke make.
nb_choices = 1
else:
# First, compute how many potential installation tags we have here.
nb_choices = 0
if project.repository:
nb_choices = nb_choices + 1
if project.patch:
nb_choices = nb_choices + 1
if len(project.packages) > 0:
nb_choices = nb_choices + 1
targets = []
tags = [ CONTEXT.host() ]
if nb_choices == 1:
# Only one choice is easy. We just have to make sure we won't
# put the project in two different sets.
chosen = self.repositories | self.packages
if project.repository:
need_prompt = False
targets = self.add_setup(variant.target,
project.repository.prerequisites(tags))
update_s = self.add_update(
name, project.repository.update, False)
prereqs = targets
if update_s:
prereqs = [ update_s ] + targets
self.add_config_make(variant,
project.repository.configure,
project.repository.make,
prereqs)
if not name in chosen:
self.repositories |= set([name])
elif len(project.packages) > 0 or project.patch:
need_prompt = False
install_step, flavor = self.add_install(name)
if flavor:
# XXX This will already have been done in add_install ...
targets = self.add_setup(variant.target,
flavor.prerequisites(tags))
if not name in chosen:
self.packages |= set([name])
# At this point there is more than one choice to install the project.
# When the repository, patch or package tag to follow through has
# already been decided, let's check if we need to go deeper through
# the prerequisistes.
if need_prompt:
if name in self.repositories:
need_prompt = False
targets = self.add_setup(variant.target,
project.repository.prerequisites(tags))
update_s = self.add_update(
name, project.repository.update, False)
prereqs = targets
if update_s:
prereqs = [ update_s ] + targets
self.add_config_make(variant,
project.repository.configure,
project.repository.make,
prereqs)
elif len(project.packages) > 0 or project.patch:
need_prompt = False
install_step, flavor = self.add_install(name)
if flavor:
targets = self.add_setup(variant.target,
flavor.prerequisites(tags))
return (need_prompt, targets)
def topological(self):
'''Filter out the roots from the topological ordering in order
for 'make recurse' to behave as expected (i.e. not compiling roots).'''
vertices = DependencyGenerator.topological(self)
results = []
roots = set([ MakeStep.genid(root) for root in self.roots ])
for project in vertices:
if not project.name in roots:
results += [ project ]
return results
class MakeDepGenerator(MakeGenerator):
'''Generate the set of prerequisite projects regardless of the executables,
libraries, etc. which are already installed.'''
def add_install(self, name):
# We use a special "no-op" add_install in the MakeDepGenerator because
# we are not interested in prerequisites past the repository projects
# and their direct dependencies.
return InstallStep(name), None
def add_setup(self, target, deps):
targets = []
for dep in deps:
target_name = dep.target
if not dep.target:
target_name = target
setup = SetupStep(dep.name, dep.files, dep.versions, target_name)
if not setup.name in self.vertices:
self.vertices[setup.name] = setup
else:
setup = self.vertices[setup.name].insert(setup)
targets += [ self.vertices[setup.name] ]
return targets
class DerivedSetsGenerator(PdbHandler):
'''Generate the set of projects which are not dependency
for any other project.'''
def __init__(self):
PdbHandler.__init__(self)
self.roots = []
self.nonroots = []
def project(self, proj):
for dep_name in proj.prerequisite_names([ CONTEXT.host() ]):
if dep_name in self.roots:
self.roots.remove(dep_name)
if not dep_name in self.nonroots:
self.nonroots += [ dep_name ]
if (not proj.name in self.nonroots
and not proj.name in self.roots):
self.roots += [ proj.name ]
# =============================================================================
# Writers are used to save *Project* instances to persistent storage
# in different formats.
# =============================================================================
class NativeWriter(PdbHandler):
'''Write *Project* objects as xml formatted text that can be loaded back
by the script itself.'''
def __init__(self):
PdbHandler.__init__(self)
class Variable:
'''Variable that ends up being defined in the workspace make
fragment and thus in Makefile.'''
def __init__(self, name, pairs):
self.name = name
self.value = None
self.descr = None
self.default = None
if isinstance(pairs, dict):
for key, val in pairs.iteritems():
if key == 'description':
self.descr = val
elif key == 'value':
self.value = val
elif key == 'default':
self.default = val
else:
self.value = pairs
self.default = self.value
self.constrains = {}
def __str__(self):
if self.value:
return str(self.value)
else:
return ''
def constrain(self, variables):
pass
def configure(self, context):
'''Set value to the string entered at the prompt.
We used to define a *Pathname* base field as a pointer to a *Pathname*
instance instead of a string to index context.environ[]. That only
worked the first time (before dws.mk is created) and when the base
functionality wasn't used later on. As a result we need to pass the
*context* as a parameter here.'''
if self.name in os.environ:
# In case the variable was set in the environment,
# we do not print its value on the terminal, as a very
# rudimentary way to avoid leaking sensitive information.
self.value = os.environ[self.name]
if self.value != None:
return False
log_info('\n' + self.name + ':')
log_info(self.descr)
if USE_DEFAULT_ANSWER:
self.value = self.default
else:
default_prompt = ""
if self.default:
default_prompt = " [" + self.default + "]"
self.value = prompt("Enter a string %s: " % default_prompt)
log_info("%s set to %s" % (self.name, str(self.value)))
return True
class HostPlatform(Variable):
def __init__(self, name, pairs=None):
'''Initialize an HostPlatform variable. *pairs* is a dictionnary.'''
Variable.__init__(self, name, pairs)
self.dist_codename = None
def configure(self, context):
'''Set value to the distribution on which the script is running.'''
if self.value != None:
return False
# sysname, nodename, release, version, machine
sysname, _, _, version, _ = os.uname()
if sysname == 'Darwin':
self.value = 'Darwin'
elif sysname == 'Linux':
# Let's try to determine the host platform
for version_path in [ '/etc/system-release', '/etc/lsb-release',
'/etc/debian_version', '/proc/version' ]:
if os.path.exists(version_path):
version = open(version_path)
line = version.readline()
while line != '':
for dist in [ 'Debian', 'Ubuntu', 'Fedora' ]:
look = re.match('.*' + dist + '.*', line)
if look:
self.value = dist
look = re.match('.*' + dist.lower() + '.*', line)
if look:
self.value = dist
if not self.dist_codename:
look = re.match(
r'DISTRIB_CODENAME=\s*(\S+)', line)
if look:
self.dist_codename = look.group(1)
elif self.value:
# First time around the loop we will
# match this pattern but not the previous
# one that sets value to 'Fedora'.
look = re.match(r'.*release (\d+)', line)
if look:
self.dist_codename = \
self.value + look.group(1)
line = version.readline()
version.close()
if self.value:
break
if self.value:
self.value = self.value.capitalize()
return True
class Pathname(Variable):
def __init__(self, name, pairs):
Variable.__init__(self, name, pairs)
self.base = None
if 'base' in pairs:
self.base = pairs['base']
def configure(self, context):
'''Generate an interactive prompt to enter a workspace variable
*var* value and returns True if the variable value as been set.'''
if self.value != None:
return False
# compute the default leaf directory from the variable name
leaf_dir = self.name
for last in range(0, len(self.name)):
if self.name[last] in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
leaf_dir = self.name[:last]
break
dirname = self
base_value = None
off_base_chosen = False
default = self.default
# We buffer the text and delay writing to log because we can get
# here to find out where the log resides!
if self.name == 'logDir':
global LOGGER_BUFFERING_COUNT
LOGGER_BUFFERING_COUNT = LOGGER_BUFFERING_COUNT + 1
log_info('\n%s:\n%s' % (self.name, self.descr))
if (not default
or (not ((':' in default) or default.startswith(os.sep)))):
# If there are no default values or the default is not
# an absolute pathname.
if self.base:
base_value = str(context.environ[self.base])
if default != None:
# Because '' will evaluates to False
show_default = '*' + self.base + '*/' + default
else:
show_default = '*' + self.base + '*/' + leaf_dir
if not base_value:
directly = 'Enter *' + self.name + '* directly ?'
offbase = 'Enter *' + self.base + '*, *' + self.name \
+ '* will defaults to ' + show_default \
+ ' ?'
selection= select_one(
'%s is based on *%s* by default. Would you like to ... '
% (self.name, self.base),
[ [ offbase ], [ directly ] ], False)
if selection == offbase:
off_base_chosen = True
if isinstance(context.environ[self.base], Pathname):
context.environ[self.base].configure(context)
base_value = str(context.environ[self.base])
else:
base_value = os.getcwd()
if default != None:
# Because '' will evaluates to False
default = os.path.join(base_value, default)
else:
default = os.path.join(base_value, leaf_dir)
if not default:
default = os.getcwd()
dirname = default
if off_base_chosen:
base_value = str(context.environ[self.base])
if self.default:
dirname = os.path.join(base_value, self.default)
else:
dirname = os.path.join(base_value, leaf_dir)
else:
if not USE_DEFAULT_ANSWER:
dirname = prompt("Enter a pathname [%s]: " % default)
if dirname == '':
dirname = default
if not ':' in dirname:
dirname = os.path.normpath(os.path.abspath(dirname))
self.value = dirname
if not ':' in dirname:
if not os.path.exists(self.value):
log_info(self.value + ' does not exist.')
# We should not assume the pathname is a directory,
# hence we do not issue a os.makedirs(self.value)
# Now it should be safe to write to the logfile.
if self.name == 'logDir':
LOGGER_BUFFERING_COUNT = LOGGER_BUFFERING_COUNT - 1
log_info('%s set to %s' % (self.name, self.value))
return True
class Metainfo(Variable):
def __init__(self, name, pairs):
Variable.__init__(self, name, pairs)
class Multiple(Variable):
def __init__(self, name, pairs):
if pairs and isinstance(pairs, str):
pairs = pairs.split(' ')
Variable.__init__(self, name, pairs)
self.choices = {}
if 'choices' in pairs:
self.choices = pairs['choices']
def __str__(self):
return ' '.join(self.value)
def configure(self, context):
'''Generate an interactive prompt to enter a workspace variable
*var* value and returns True if the variable value as been set.'''
# There is no point to propose a choice already constraint by other
# variables values.
choices = []
for key, descr in self.choices.iteritems():
if not key in self.value:
choices += [ [key, descr] ]
if len(choices) == 0:
return False
descr = self.descr
if len(self.value) > 0:
descr += " (constrained: " + ", ".join(self.value) + ")"
self.value = select_multiple(descr, choices)
log_info('%s set to %s', (self.name, ', '.join(self.value)))
self.choices = []
return True
def constrain(self, variables):
if not self.value:
self.value = []
for var in variables:
if isinstance(variables[var], Variable) and variables[var].value:
if isinstance(variables[var].value, list):
for val in variables[var].value:
if (val in variables[var].constrains
and self.name in variables[var].constrains[val]):
self.value += \
variables[var].constrains[val][self.name]
else:
val = variables[var].value
if (val in variables[var].constrains
and self.name in variables[var].constrains[val]):
self.value += variables[var].constrains[val][self.name]
class Single(Variable):
def __init__(self, name, pairs):
Variable.__init__(self, name, pairs)
self.choices = None
if 'choices' in pairs:
self.choices = []
for key, descr in pairs['choices'].iteritems():
self.choices += [ [key, descr] ]
def configure(self, context):
'''Generate an interactive prompt to enter a workspace variable
*var* value and returns True if the variable value as been set.'''
if self.value:
return False
self.value = select_one(self.descr, self.choices)
log_info('%s set to%s' % (self.name, self.value))
return True
def constrain(self, variables):
for var in variables:
if isinstance(variables[var], Variable) and variables[var].value:
if isinstance(variables[var].value, list):
for val in variables[var].value:
if (val in variables[var].constrains
and self.name in variables[var].constrains[val]):
self.value = \
variables[var].constrains[val][self.name]
else:
val = variables[var].value
if (val in variables[var].constrains
and self.name in variables[var].constrains[val]):
self.value = variables[var].constrains[val][self.name]
class Dependency:
def __init__(self, name, pairs):
self.versions = { 'includes': [], 'excludes': [] }
self.target = None
self.files = {}
self.name = name
for key, val in pairs.iteritems():
if key == 'excludes':
self.versions['excludes'] = eval(val)
elif key == 'includes':
self.versions['includes'] = [ val ]
elif key == 'target':
# The index file loader will have generated fully-qualified
# names to avoid key collisions when a project depends on both
# proj and target/proj. We need to revert the name back to
# the actual project name here.
self.target = val
self.name = os.sep.join(self.name.split(os.sep)[1:])
else:
if isinstance(val, list):
self.files[key] = []
for filename in val:
self.files[key] += [ (filename, None) ]
else:
self.files[key] = [ (val, None) ]
def populate(self, build_deps):
'''*build_deps* is a dictionary.'''
if self.name in build_deps:
deps = build_deps[self.name].files
for dep in deps:
if dep in self.files:
files = []
for look_pat, look_path in self.files[dep]:
found = False
if not look_path:
for pat, path in deps[dep]:
if pat == look_pat:
files += [ (look_pat, path) ]
found = True
break
if not found:
files += [ (look_pat, look_path) ]
self.files[dep] = files
def prerequisites(self, tags):
return [ self ]
class Alternates(Dependency):
'''Provides a set of dependencies where one of them is enough
to fullfil the prerequisite condition. This is used to allow
differences in packaging between distributions.'''
def __init__(self, name, pairs):
Dependency.__init__(self, name, pairs)
self.by_tags = {}
for key, val in pairs.iteritems():
self.by_tags[key] = []
for dep_key, dep_val in val.iteritems():
self.by_tags[key] += [ Dependency(dep_key, dep_val) ]
def __str__(self):
return 'alternates: ' + str(self.by_tags)
def populate(self, build_deps=None):
'''XXX write doc. *build_deps* is a dictionary.'''
for tag in self.by_tags:
for dep in self.by_tags[tag]:
dep.populate(build_deps)
def prerequisites(self, tags):
prereqs = []
for tag in tags:
if tag in self.by_tags:
for dep in self.by_tags[tag]:
prereqs += dep.prerequisites(tags)
return prereqs
class Maintainer:
'''Information about the maintainer of a project.'''
def __init__(self, fullname, email):
self.fullname = fullname
self.email = email
def __str__(self):
return self.fullname + ' <' + self.email + '>'
class Step:
'''Step in the build DAG.'''
configure = 1
install_native = 2
install_lang = 3
install = 4
update = 5
setup = 6
make = 7
def __init__(self, priority, project_name):
self.project = project_name
self.prerequisites = []
self.priority = priority
self.name = self.__class__.genid(project_name)
self.updated = False
def __str__(self):
return self.name
def qualified_project_name(self, target_name = None):
name = self.project
if target_name:
name = os.path.join(target_name, self.project)
return name
@classmethod
def genid(cls, project_name, target_name = None):
name = unicode(project_name.replace(os.sep, '_').replace('-', '_'))
if target_name:
name = target_name + '_' + name
if issubclass(cls, ConfigureStep):
name = 'configure_' + name
elif issubclass(cls, InstallStep):
name = 'install_' + name
elif issubclass(cls, UpdateStep):
name = 'update_' + name
elif issubclass(cls, SetupStep):
name = name + 'Setup'
else:
name = name
return name
class TargetStep(Step):
def __init__(self, prefix, project_name, target = None ):
self.target = target
Step.__init__(self, prefix, project_name)
self.name = self.__class__.genid(project_name, target)
class ConfigureStep(TargetStep):
'''The *configure* step in the development cycle initializes variables
that drive the make step such as compiler flags, where files are installed,
etc.'''
def __init__(self, project_name, envvars, target = None):
TargetStep.__init__(self, Step.configure, project_name, target)
self.envvars = envvars
def associate(self, target):
return ConfigureStep(self.project, self.envvars, target)
def run(self, context):
self.updated = config_var(context, self.envvars)
class InstallStep(Step):
'''The *install* step in the development cycle installs prerequisites
to a project.'''
def __init__(self, project_name, managed = None, target = None,
priority=Step.install):
Step.__init__(self, priority, project_name)
if managed and len(managed) == 0:
self.managed = [ project_name ]
else:
self.managed = managed
self.target = target
def insert(self, install_step):
if install_step.managed:
self.managed += install_step.managed
def run(self, context):
raise Error("Does not know how to install '%s' on %s for %s"
% (str(self.managed), context.host(), self.name))
def info(self):
raise Error(
"Does not know how to search package manager for '%s' on %s for %s"
% (str(self.managed), CONTEXT.host(), self.name))
class AptInstallStep(InstallStep):
''' Install a prerequisite to a project through apt (Debian, Ubuntu).'''
def __init__(self, project_name, target = None):
managed = [ project_name ]
packages = managed
if target and target.startswith('python'):
packages = [ target + '-' + man for man in managed ]
InstallStep.__init__(self, project_name, packages,
priority=Step.install_native)
def run(self, context):
# Add DEBIAN_FRONTEND=noninteractive such that interactive
# configuration of packages do not pop up in the middle
# of installation. We are going to update the configuration
# in /etc afterwards anyway.
# Emit only one shell command so that we can find out what the script
# tried to do when we did not get priviledge access.
shell_command(['sh', '-c',
'"/usr/bin/apt-get update'\
' && DEBIAN_FRONTEND=noninteractive /usr/bin/apt-get -y install '
+ ' '.join(self.managed) + '"'],
admin=True)
self.updated = True
def info(self):
info = []
unmanaged = []
try:
# apt-cache showpkg will return 0 even when the package cannot
# be found.
cmdline = ['apt-cache', 'showpkg' ] + self.managed
manager_output = subprocess.check_output(
' '.join(cmdline), shell=True, stderr=subprocess.STDOUT)
found = False
for line in manager_output.splitlines():
if re.match('^Package:', line):
# Apparently we are not able to get error messages
# from stderr here ...
found = True
if not found:
unmanaged = self.managed
else:
info = self.managed
except subprocess.CalledProcessError:
unmanaged = self.managed
return info, unmanaged
class DarwinInstallStep(InstallStep):
''' Install a prerequisite to a project through pkg (Darwin, OSX).'''
def __init__(self, project_name, filenames, target = None):
InstallStep.__init__(self, project_name, managed=filenames,
priority=Step.install_native)
def run(self, context):
'''Mount *image*, a pathnme to a .dmg file and use the Apple installer
to install the *pkg*, a .pkg package onto the platform through the Apple
installer.'''
for filename in self.managed:
try:
volume = None
if filename.endswith('.dmg'):
base, ext = os.path.splitext(filename)
volume = os.path.join('/Volumes', os.path.basename(base))
shell_command(['hdiutil', 'attach', filename])
target = context.value('darwinTargetVolume')
if target != 'CurrentUserHomeDirectory':
message = 'ATTENTION: You need administrator privileges '\
+ 'on the local machine to execute the following cmmand\n'
log_info(message)
admin = True
else:
admin = False
pkg = filename
if not filename.endswith('.pkg'):
pkgs = find_files(volume, r'\.pkg')
if len(pkgs) != 1:
raise RuntimeError(
'ambiguous: not exactly one .pkg to install')
pkg = pkgs[0]
shell_command(['installer', '-pkg', os.path.join(volume, pkg),
'-target "' + target + '"'], admin)
if filename.endswith('.dmg'):
shell_command(['hdiutil', 'detach', volume])
except:
raise Error('failure to install darwin package ' + filename)
self.updated = True
class DpkgInstallStep(InstallStep):
''' Install a prerequisite to a project through dpkg (Debian, Ubuntu).'''
def __init__(self, project_name, filenames, target = None):
InstallStep.__init__(self, project_name, managed=filenames,
priority=Step.install_native)
def run(self, context):
shell_command(['dpkg', '-i', ' '.join(self.managed)], admin=True)
self.updated = True
class MacPortInstallStep(InstallStep):
''' Install a prerequisite to a project through Macports.'''
def __init__(self, project_name, target = None):
managed = [ project_name ]
packages = managed
if target:
look = re.match(r'python(\d(\.\d)?)?', target)
if look:
if look.group(1):
prefix = 'py%s-' % look.group(1).replace('.', '')
else:
prefix = 'py27-'
packages = []
for man in managed:
packages += [ prefix + man ]
darwin_names = {
# translation of package names. It is simpler than
# creating an <alternates> node even if it look more hacky.
'libicu-dev': 'icu' }
pre_packages = packages
packages = []
for package in pre_packages:
if package in darwin_names:
packages += [ darwin_names[package] ]
else:
packages += [ package ]
InstallStep.__init__(self, project_name, packages,
priority=Step.install_native)
def run(self, context):
shell_command(['/opt/local/bin/port', 'install' ] + self.managed,
admin=True)
self.updated = True
def info(self):
info = []
unmanaged = []
try:
shell_command(['port', 'info' ] + self.managed)
info = self.managed
except Error:
unmanaged = self.managed
return info, unmanaged
class NpmInstallStep(InstallStep):
''' Install a prerequisite to a project through npm (Node.js manager).'''
def __init__(self, project_name, target = None):
InstallStep.__init__(self, project_name, [project_name ],
priority=Step.install_lang)
def _manager(self):
# nodejs is not available as a package on Fedora 17 or rather,
# it was until the repo site went down.
find_npm(CONTEXT)
return os.path.join(CONTEXT.value('buildTop'), 'bin', 'npm')
def run(self, context):
shell_command([self._manager(), 'install' ] + self.managed, admin=True)
self.updated = True
def info(self):
info = []
unmanaged = []
try:
shell_command([self._manager(), 'search' ] + self.managed)
info = self.managed
except Error:
unmanaged = self.managed
return info, unmanaged
class PipInstallStep(InstallStep):
''' Install a prerequisite to a project through pip (Python eggs).'''
def __init__(self, project_name, versions=None, target=None):
install_name = project_name
if (versions and 'includes' in versions
and len(versions['includes']) > 0):
install_name = '%s==%s' % (project_name, versions['includes'][0])
InstallStep.__init__(self, project_name, [install_name],
priority=Step.install_lang)
def collect(self, context):
"""Collect prerequisites from requirements.txt"""
filepath = context.src_dir(
os.path.join(self.project, 'requirements.txt'))
with open(filepath) as file_obj:
for line in file_obj.readlines():
look = re.match('([\w\-_]+)((>=|==)(\S+))?', line)
if look:
prerequisite = look.group(1)
sys.stdout.write('''<dep name="%s">
<lib>.*/(%s)/__init__.py</lib>
</dep>
''' % (prerequisite, prerequisite))
def run(self, context):
# In most cases, when installing through pip, we should be running
# under virtualenv. This is only true for development machines though.
admin = False
if not 'VIRTUAL_ENV' in os.environ:
admin = True
shell_command([find_pip(context), 'install' ] + self.managed,
admin=admin)
self.updated = True
def info(self):
info = []
unmanaged = []
try:
# XXX There are no pip info command, search is the closest we get.
# Pip search might match other packages and thus returns zero
# inadvertently but it is the closest we get so far.
shell_command([find_pip(CONTEXT), 'search' ] + self.managed)
info = self.managed
except Error:
unmanaged = self.managed
return info, unmanaged
class RpmInstallStep(InstallStep):
''' Install a prerequisite to a project through rpm (Fedora).'''
def __init__(self, project_name, filenames, target = None):
InstallStep.__init__(self, project_name, managed=filenames,
priority=Step.install_native)
def run(self, context):
# --nodeps because rpm looks stupid and can't figure out that
# the vcd package provides the libvcd.so required by the executable.
shell_command(['rpm', '-i', '--force',
' '.join(self.managed), '--nodeps'],
admin=True)
self.updated = True
class YumInstallStep(InstallStep):
''' Install a prerequisite to a project through yum (Fedora).'''
def __init__(self, project_name, target = None):
managed = [project_name ]
packages = managed
if target:
if target.startswith('python'):
packages = []
for man in managed:
packages += [ target + '-' + man ]
fedora_names = {
'libbz2-dev': 'bzip2-devel',
'python-all-dev': 'python-devel',
'zlib1g-dev': 'zlib-devel' }
pre_packages = packages
packages = []
for package in pre_packages:
if package in fedora_names:
packages += [ fedora_names[package] ]
elif package.endswith('-dev'):
packages += [ package + 'el' ]
else:
packages += [ package ]
InstallStep.__init__(self, project_name, packages,
priority=Step.install_native)
def run(self, context):
shell_command(['yum', '-y', 'update'], admin=True)
filtered = shell_command(['yum', '-y', 'install' ] + self.managed,
admin=True, pat='No package (.*) available')
if len(filtered) > 0:
look = re.match('No package (.*) available', filtered[0])
if look:
unmanaged = look.group(1).split(' ')
if len(unmanaged) > 0:
raise Error("yum cannot install " + ' '.join(unmanaged))
self.updated = True
def info(self):
info = []
unmanaged = []
try:
filtered = shell_command(['yum', 'info' ] + self.managed,
pat=r'Name\s*:\s*(\S+)')
if filtered:
info = self.managed
else:
unmanaged = self.managed
except Error:
unmanaged = self.managed
return info, unmanaged
class BuildStep(TargetStep):
'''Build a project running make, executing a script, etc.'''
def __init__(self, project_name, target = None, force_update = True):
TargetStep.__init__(self, Step.make, project_name, target)
self.force_update = force_update
def _should_run(self):
updated_prerequisites = False
for prereq in self.prerequisites:
updated_prerequisites |= prereq.updated
return self.force_update or updated_prerequisites
class MakeStep(BuildStep):
'''The *make* step in the development cycle builds executable binaries,
libraries and other files necessary to install the project.'''
def associate(self, target):
return MakeStep(self.project, target)
def run(self, context):
if self._should_run():
# We include the configfile (i.e. variable=value) before
# the project Makefile for convenience. Adding a statement
# include $(shell dws context) at the top of the Makefile
# is still a good idea to permit "make" from the command line.
# Otherwise it just duplicates setting some variables.
context = localize_context(context, self.project, self.target)
makefile = context.src_dir(os.path.join(self.project, 'Makefile'))
if os.path.isfile(makefile):
cmdline = ['make',
'-f', context.config_filename,
'-f', makefile]
# If we do not set PATH to *bin_build_dir*:*binDir*:${PATH}
# and the install directory is not in PATH, then we cannot
# build a package for drop because 'make dist' depends
# on executables installed in *binDir* (dws, dbldpkg, ...)
# that are not linked into *binBuildDir* at the time
# 'cd drop ; make dist' is run. Note that it is not an issue
# for other projects since those can be explicitely depending
# on drop as a prerequisite.
# XXX We should only have to include binBuildDir is PATH
# but that fails because of "/usr/bin/env python" statements
# and other little tools like hostname, date, etc.
shell_command(cmdline + context.targets + context.overrides,
search_path=[context.bin_build_dir()]
+ context.search_path('bin'))
self.updated = True
class ShellStep(BuildStep):
'''Run a shell script to *make* a step in the development cycle.'''
def __init__(self, project_name, script, target = None):
BuildStep.__init__(self, project_name, target)
self.script = script
def associate(self, target):
return ShellStep(self.project, self.script, target)
def run(self, context):
if self._should_run() and self.script:
context = localize_context(context, self.project, self.target)
script = tempfile.NamedTemporaryFile(mode='w+t', delete=False)
script.write('#!/bin/sh\n\n')
script.write('. ' + context.config_filename + '\n\n')
script.write(self.script)
script.close()
shell_command([ 'sh', '-x', '-e', script.name ],
search_path=[context.bin_build_dir()]
+ context.search_path('bin'))
os.remove(script.name)
self.updated = True
class SetupStep(TargetStep):
'''The *setup* step in the development cycle installs third-party
prerequisites. This steps gathers all the <dep> statements referring
to a specific prerequisite.'''
def __init__(self, project_name, files, versions=None, target=None):
'''We keep a reference to the project because we want to decide
to add native installer/made package/patch right after run'''
TargetStep.__init__(self, Step.setup, project_name, target)
self.files = files
self.updated = False
if versions:
self.versions = versions
else:
self.versions = {'includes': [], 'excludes': [] }
def insert(self, setup):
'''We only add prerequisites from *dep* which are not already present
in *self*. This is important because *find_prerequisites* will initialize
tuples (name_pat,absolute_path).'''
files = {}
for dirname in setup.files:
if not dirname in self.files:
self.files[dirname] = setup.files[dirname]
files[dirname] = setup.files[dirname]
else:
for prereq_1 in setup.files[dirname]:
found = False
for prereq_2 in self.files[dirname]:
if prereq_2[0] == prereq_1[0]:
found = True
break
if not found:
self.files[dirname] += [ prereq_1 ]
if not dirname in files:
files[dirname] = []
files[dirname] += [ prereq_1 ]
self.versions['excludes'] += setup.versions['excludes']
self.versions['includes'] += setup.versions['includes']
return SetupStep(self.project, files, self.versions, self.target)
def run(self, context):
self.files, complete = find_prerequisites(
self.files, self.versions, self.target)
if complete:
self.files, complete = link_prerequisites(
self.files, self.versions, self.target)
self.updated = True
return complete
class UpdateStep(Step):
'''The *update* step in the development cycle fetches files and source
repositories from remote server onto the local system.'''
updated_sources = {}
def __init__(self, project_name, rep, fetches):
Step.__init__(self, Step.update, project_name)
self.rep = rep
self.fetches = fetches
self.updated = False
def run(self, context):
try:
fetch(context, self.fetches)
except IOError:
raise Error("unable to fetch " + str(self.fetches))
if self.rep:
# try:
self.updated = self.rep.update(self.project, context)
if self.updated:
UpdateStep.updated_sources[self.project] = self.rep.rev
self.rep.apply_patches(self.project, context)
# except:
# raise Error('cannot update repository or apply patch for %s\n'
# % str(self.project))
class Repository:
'''All prerequisites information to install a project
from a source control system.'''
dirPats = r'(\.git|\.svn|CVS)'
def __init__(self, sync, rev):
self.type = None
self.url = sync
self.rev = rev
def __str__(self):
result = '\t\tsync repository from ' + self.url + '\n'
if self.rev:
result = result + '\t\t\tat revision' + str(self.rev) + '\n'
else:
result = result + '\t\t\tat head\n'
return result
def apply_patches(self, name, context):
if os.path.isdir(context.patch_dir(name)):
patches = []
for pathname in os.listdir(context.patch_dir(name)):
if pathname.endswith('.patch'):
patches += [ pathname ]
if len(patches) > 0:
log_info('######## patching ' + name + '...')
prev = os.getcwd()
os.chdir(context.src_dir(name))
shell_command(['patch',
'< ' + os.path.join(context.patch_dir(name),
'*.patch')])
os.chdir(prev)
@staticmethod
def associate(pathname):
'''This methods returns a boiler plate *Repository* that does
nothing in case an empty sync url is specified. This is different
from an absent sync field which would use rsync as a "Repository".
'''
rev = None
if pathname and len(pathname) > 0:
repos = { '.git': GitRepository,
'.svn': SvnRepository }
sync = pathname
look = search_repo_pat(pathname)
if look:
sync = look.group(1)
rev = look.group(4)
path_list = sync.split(os.sep)
for i in range(0, len(path_list)):
for ext, repo_class in repos.iteritems():
if path_list[i].endswith(ext):
if path_list[i] == ext:
i = i - 1
return repo_class(os.sep.join(path_list[:i + 1]), rev)
# We will guess, assuming the repository is on the local system
for ext, repo_class in repos.iteritems():
if os.path.isdir(os.path.join(pathname, ext)):
return repo_class(pathname, rev)
return RsyncRepository(pathname, rev)
return Repository("", rev)
def update(self, name, context, force=False):
return False
class GitRepository(Repository):
'''All prerequisites information to install a project
from a git source control repository.'''
def apply_patches(self, name, context):
'''Apply patches that can be found in the *obj_dir* for the project.'''
prev = os.getcwd()
if os.path.isdir(context.patch_dir(name)):
patches = []
for pathname in os.listdir(context.patch_dir(name)):
if pathname.endswith('.patch'):
patches += [ pathname ]
if len(patches) > 0:
log_info('######## patching ' + name + '...')
os.chdir(context.src_dir(name))
shell_command([ find_git(context), 'am', '-3', '-k',
os.path.join(context.patch_dir(name),
'*.patch')])
os.chdir(prev)
def push(self, pathname):
prev = os.getcwd()
os.chdir(pathname)
shell_command([ find_git(CONTEXT), 'push' ])
os.chdir(prev)
def tarball(self, name, version='HEAD'):
local = CONTEXT.src_dir(name)
gitexe = find_git(CONTEXT)
cwd = os.getcwd()
os.chdir(local)
if version == 'HEAD':
shell_command([ gitexe, 'rev-parse', version ])
prefix = name + '-' + version
output_name = os.path.join(cwd, prefix + '.tar.bz2')
shell_command([ gitexe, 'archive', '--prefix', prefix + os.sep,
'-o', output_name, 'HEAD'])
os.chdir(cwd)
def update(self, name, context, force=False):
# If the path to the remote repository is not absolute,
# derive it from *remoteTop*. Binding any sooner will
# trigger a potentially unnecessary prompt for remote_cache_path.
if not ':' in self.url and context:
self.url = context.remote_src_path(self.url)
if not name:
prefix = context.value('remoteSrcTop')
if not prefix.endswith(':') and not prefix.endswith(os.sep):
prefix = prefix + os.sep
name = self.url.replace(prefix, '')
if name.endswith('.git'):
name = name[:-4]
local = context.src_dir(name)
pulled = False
updated = False
cwd = os.getcwd()
git_executable = find_git(context)
if not os.path.exists(os.path.join(local, '.git')):
shell_command([ git_executable, 'clone', self.url, local])
updated = True
else:
pulled = True
os.chdir(local)
cmdline = ' '.join([git_executable, 'fetch'])
log_info(cmdline)
cmd = subprocess.Popen(cmdline,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
line = cmd.stdout.readline()
while line != '':
log_info(line)
look = re.match('^updating', line)
if look:
updated = True
line = cmd.stdout.readline()
cmd.wait()
if cmd.returncode != 0:
# It is ok to get an error in case we are running
# this on the server machine.
pass
cof = '-m'
if force:
cof = '-f'
cmd = [ git_executable, 'checkout', cof ]
if self.rev:
cmd += [ self.rev ]
if self.rev or pulled:
os.chdir(local)
shell_command(cmd)
# Print HEAD
if updated:
# Just the commit: cmd = [git_executable, 'rev-parse', 'HEAD']
cmd = [git_executable, 'log', '-1', '--pretty=oneline' ]
os.chdir(local)
logline = subprocess.check_output(cmd)
log_info(logline)
self.rev = logline.split(' ')[0]
os.chdir(cwd)
return updated
class SvnRepository(Repository):
'''All prerequisites information to install a project
from a svn source control repository.'''
def __init__(self, sync, rev):
Repository.__init__(self, sync, rev)
def update(self, name, context, force=False):
# If the path to the remote repository is not absolute,
# derive it from *remoteTop*. Binding any sooner will
# trigger a potentially unnecessary prompt for remote_cache_path.
if not ':' in self.url and context:
self.url = context.remote_src_path(self.url)
local = context.src_dir(name)
if not os.path.exists(os.path.join(local, '.svn')):
shell_command(['svn', 'co', self.url, local])
else:
cwd = os.getcwd()
os.chdir(local)
shell_command(['svn', 'update'])
os.chdir(cwd)
# \todo figure out how any updates is signaled by svn.
return True
class RsyncRepository(Repository):
'''All prerequisites information to install a project
from a remote directory.'''
def __init__(self, sync, rev):
Repository.__init__(self, sync, rev)
def update(self, name, context, force=False):
# If the path to the remote repository is not absolute,
# derive it from *remoteTop*. Binding any sooner will
# trigger a potentially unnecessary prompt for remote_cache_path.
if not ':' in self.url and context:
self.url = context.remote_src_path(self.url)
fetch(context, {self.url: ''}, force=True)
return True
class InstallFlavor:
'''All information necessary to install a project on the local system.'''
def __init__(self, name, pairs):
rep = None
fetches = {}
variables = {}
self.deps = {}
self.make = None
for key, val in pairs.iteritems():
if isinstance(val, Variable):
variables[key] = val
# XXX Hack? We add the variable in the context here
# because it might be needed by the setup step even though
# no configure step has run.
if CONTEXT and not key in CONTEXT.environ:
CONTEXT.environ[key] = val
elif key == 'sync':
rep = Repository.associate(val)
elif key == 'shell':
self.make = ShellStep(name, val)
elif key == 'fetch':
if isinstance(val, list):
blocks = val
else:
blocks = [ val ]
for blk in blocks:
file_url = blk['url']
blk.pop('url')
fetches[file_url] = blk
elif key == 'alternates':
self.deps[key] = Alternates(key, val)
else:
self.deps[key] = Dependency(key, val)
self.update = UpdateStep(name, rep, fetches)
self.configure = ConfigureStep(name, variables, None)
if not self.make:
self.make = MakeStep(name)
def __str__(self):
result = ''
if len(self.update.fetches) > 0:
result = result + '\t\tfetch archives\n'
for archive in self.update.fetches:
result = result + '\t\t\t' + archive + '\n'
if len(self.deps) > 0:
result = result + '\t\tdependencies from local system\n'
for dep in self.deps:
result = result + '\t\t\t' + str(dep) + '\n'
if len(self.configure.envvars) > 0:
result = result + '\t\tenvironment variables\n'
for var in self.configure.envvars:
result = result + '\t\t\t' + str(var) + '\n'
return result
def fetches(self):
return self.update.fetches
def prerequisites(self, tags):
prereqs = []
for dep in self.deps.itervalues():
prereqs += dep.prerequisites(tags)
return prereqs
def prerequisite_names(self, tags):
'''same as *prerequisites* except only returns the names
of the prerequisite projects.'''
names = []
for dep in self.deps.itervalues():
names += [ prereq.name for prereq in dep.prerequisites(tags) ]
return names
def vars(self):
return self.configure.envvars
class Project:
'''Definition of a project with its prerequisites.'''
def __init__(self, name, pairs):
self.name = name
self.title = None
self.descr = None
# *packages* maps a set of tags to *Package* instances. A *Package*
# contains dependencies to install a project from a binary distribution.
# Default update.rep is relative to *remoteSrcTop*. We initialize
# to a relative path instead of an absolute path here such that it
# does not trigger a prompt for *remoteSrcTop* until we actually
# do the repository pull.
self.packages = {}
self.patch = None
self.repository = None
self.installed_version = None
for key, val in pairs.iteritems():
if key == 'title':
self.title = val
elif key == 'version':
self.version = val
elif key == 'description':
self.descr = val
elif key == 'maintainer':
self.maintainer = Maintainer(val['personname'], val['email'])
elif key == 'patch':
self.patch = InstallFlavor(name, val)
if not self.patch.update.rep:
self.patch.update.rep = Repository.associate(name+'.git')
elif key == 'repository':
self.repository = InstallFlavor(name, val)
if not self.repository.update.rep:
self.repository.update.rep = Repository.associate(name+'.git')
else:
self.packages[key] = InstallFlavor(name, val)
def __str__(self):
result = 'project ' + self.name + '\n' \
+ '\t' + str(self.title) + '\n' \
+ '\tfound version ' + str(self.installed_version) \
+ ' installed locally\n'
if len(self.packages) > 0:
result = result + '\tpackages\n'
for package_name in self.packages:
result = result + '\t[' + package_name + ']\n'
result = result + str(self.packages[package_name]) + '\n'
if self.patch:
result = result + '\tpatch\n' + str(self.patch) + '\n'
if self.repository:
result = result + '\trepository\n' + str(self.repository) + '\n'
return result
def prerequisites(self, tags):
'''returns a set of *Dependency* instances for the project based
on the provided tags. It enables choosing between alternate
prerequisites set based on the local machine operating system, etc.'''
prereqs = []
if self.repository:
prereqs += self.repository.prerequisites(tags)
if self.patch:
prereqs += self.patch.prerequisites(tags)
for tag in self.packages:
if tag in tags:
prereqs += self.packages[tag].prerequisites(tags)
return prereqs
def prerequisite_names(self, tags):
'''same as *prerequisites* except only returns the names
of the prerequisite projects.'''
names = []
for prereq in self.prerequisites(tags):
names += [ prereq.name ]
return names
class XMLDbParser(xml.sax.ContentHandler):
'''Parse a project index database stored as an XML file on disc
and generate callbacks on a PdbHandler. The handler will update
its state based on the callback sequence.'''
# Global Constants for the database parser
tagDb = 'projects'
tagProject = 'project'
tagPattern = '.*<' + tagProject + r'\s+name="(.*)"'
trailerTxt = '</' + tagDb + '>'
# For dbldpkg
tagPackage = 'package'
tagTag = 'tag'
tagFetch = 'fetch'
tagHash = 'sha1'
def __init__(self, context):
xml.sax.ContentHandler.__init__(self)
self.context = context
self.handler = None
# stack used to reconstruct the tree.
self.nodes = []
self.text = ""
def startElement(self, name, attrs):
'''Start populating an element.'''
self.text = ""
key = name
elems = {}
for attr in attrs.keys():
if attr == 'name':
# \todo have to conserve name if just for fetches.
# key = Step.genid(Step, attrs['name'], target)
if 'target' in attrs.keys():
target = attrs['target']
key = os.path.join(target, attrs['name'])
else:
key = attrs['name']
else:
elems[attr] = attrs[attr]
self.nodes += [ (name, {key:elems}) ]
def characters(self, characters):
self.text += characters
def endElement(self, name):
'''Once the element is fully populated, call back the simplified
interface on the handler.'''
node_name, pairs = self.nodes.pop()
self.text = self.text.strip()
if self.text:
aggregate = self.text
self.text = ""
else:
aggregate = {}
while node_name != name:
# We are keeping the structure as simple as possible,
# only introducing lists when there are more than one element.
for k in pairs.keys():
if not k in aggregate:
aggregate[k] = pairs[k]
elif isinstance(aggregate[k], list):
if isinstance(pairs[k], list):
aggregate[k] += pairs[k]
else:
aggregate[k] += [ pairs[k] ]
else:
if isinstance(pairs[k], list):
aggregate[k] = [ aggregate[k] ] + pairs[k]
else:
aggregate[k] = [ aggregate[k], pairs[k] ]
node_name, pairs = self.nodes.pop()
key = pairs.keys()[0]
cap = name.capitalize()
if cap in [ 'Metainfo', 'Multiple',
'Pathname', 'Single', 'Variable' ]:
aggregate = getattr(sys.modules[__name__], cap)(key, aggregate)
if isinstance(aggregate, dict):
pairs[key].update(aggregate)
else:
pairs[key] = aggregate
if name == 'project':
self.handler.project(Project(key, pairs[key]))
elif name == 'projects':
self.handler.end_parse()
self.nodes += [ (name, pairs) ]
def parse(self, source, handler):
'''This is the public interface for one pass through the database
that generates callbacks on the handler interface.'''
self.handler = handler
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_namespaces, 0)
parser.setContentHandler(self)
if source.startswith('<?xml'):
parser.parse(cStringIO.StringIO(source))
else:
parser.parse(source)
# The following methods are used to merge multiple databases together.
def copy(self, db_next, db_prev, remove_project_end_tag=False):
'''Copy lines in the db_prev file until hitting the definition
of a package and return the name of the package.'''
name = None
line = db_prev.readline()
while line != '':
look = re.match(self.tagPattern, line)
if look != None:
name = look.group(1)
break
write_line = True
look = re.match('.*' + self.trailerTxt, line)
if look:
write_line = False
if remove_project_end_tag:
look = re.match('.*</' + self.tagProject + '>', line)
if look:
write_line = False
if write_line:
db_next.write(line)
line = db_prev.readline()
return name
def next(self, db_prev):
'''Skip lines in the db_prev file until hitting the definition
of a package and return the name of the package.'''
name = None
line = db_prev.readline()
while line != '':
look = re.match(self.tagPattern, line)
if look != None:
name = look.group(1)
break
line = db_prev.readline()
return name
def start_project(self, db_next, name):
db_next.write(' <' + self.tagProject + ' name="' + name + '">\n')
def trailer(self, db_next):
'''XML files need a finish tag. We make sure to remove it while
processing Upd and Prev then add it back before closing
the final file.'''
db_next.write(self.trailerTxt)
def basenames(pathnames):
'''return the basename of all pathnames in a list.'''
bases = []
for pathname in pathnames:
bases += [ os.path.basename(pathname) ]
return bases
def search_repo_pat(sync_path):
'''returns a RegexMatch if *sync_path* refers to a repository url/path.'''
return re.search('(\S*%s)(@(\S+))?$' % Repository.dirPats, sync_path)
def filter_rep_ext(name):
'''Filters the repository type indication from a pathname.'''
localname = name
remote_path_list = name.split(os.sep)
for i in range(0, len(remote_path_list)):
look = search_repo_pat(remote_path_list[i])
if look:
_, rep_ext = os.path.splitext(look.group(1))
if remote_path_list[i] == rep_ext:
localname = os.sep.join(remote_path_list[:i] + \
remote_path_list[i+1:])
else:
localname = os.sep.join(remote_path_list[:i] + \
[ remote_path_list[i][:-len(rep_ext)] ] + \
remote_path_list[i+1:])
break
return localname
def mark(filename, suffix):
base, ext = os.path.splitext(filename)
return base + '-' + suffix + ext
def stamp(date=datetime.datetime.now()):
return str(date.year) \
+ ('_%02d' % (date.month)) \
+ ('_%02d' % (date.day)) \
+ ('-%02d' % (date.hour))
def stampfile(filename):
global CONTEXT
if not CONTEXT:
# This code here is very special. dstamp.py relies on some dws
# functions all of them do not rely on a context except
# this special case here.
CONTEXT = Context()
CONTEXT.locate()
if not 'buildstamp' in CONTEXT.environ:
CONTEXT.environ['buildstamp'] = stamp(datetime.datetime.now())
CONTEXT.save()
return mark(os.path.basename(filename), CONTEXT.value('buildstamp'))
def create_index_pathname(db_index_pathname, db_pathnames):
'''create a global dependency database (i.e. project index file) out of
a set local dependency index files.'''
parser = XMLDbParser(CONTEXT)
dirname = os.path.dirname(db_index_pathname)
if not os.path.isdir(dirname):
os.makedirs(dirname)
db_next = sort_build_conf_list(db_pathnames, parser)
db_index = open(db_index_pathname, 'wb')
db_next.seek(0)
shutil.copyfileobj(db_next, db_index)
db_next.close()
db_index.close()
def find_bin(names, search_path, build_top, versions=None, variant=None):
'''Search for a list of binaries that can be executed from $PATH.
*names* is a list of (pattern,absolute_path) pairs where the absolutePat
can be None and in which case pattern will be used to search
for an executable. *versions['excludes']* is a list of versions
that are concidered false positive and need to be excluded, usually
as a result of incompatibilities.
This function returns a list of populated (pattern,absolute_path)
and a version number. The version number is retrieved
through a command line flag. --version and -V are tried out.
This function differs from findInclude() and find_lib() in its
search algorithm. find_bin() strictly behave like $PATH and
always returns the FIRST executable reachable from $PATH regardless
of version number, unless the version is excluded, in which case
the result is the same as if the executable hadn't been found.
Implementation Note:
*names* and *excludes* are two lists instead of a dictionary
indexed by executale name for two reasons:
1. Most times find_bin() is called with *names* of executables
from the same project. It is cumbersome to specify exclusion
per executable instead of per-project.
2. The prototype of find_bin() needs to match the ones of
findInclude() and find_lib().
Implementation Note: Since the boostrap relies on finding rsync,
it is possible we invoke this function with log == None hence
the tests for it.
'''
version = None
if versions and 'excludes' in versions:
excludes = versions['excludes']
else:
excludes = []
results = []
droots = search_path
complete = True
for name_pat, absolute_path in names:
if absolute_path != None and os.path.exists(absolute_path):
# absolute paths only occur when the search has already been
# executed and completed successfuly.
results.append((name_pat, absolute_path))
continue
link_name, suffix = link_build_name(name_pat, 'bin', variant)
if os.path.islink(link_name):
# If we already have a symbolic link in the binBuildDir,
# we will assume it is the one to use in order to cut off
# recomputing of things that hardly change.
results.append((name_pat,
os.path.realpath(os.path.join(link_name, suffix))))
continue
if variant:
log_interactive(variant + '/')
log_interactive(name_pat + '... ')
found = False
if name_pat.endswith('.app'):
binpath = os.path.join('/Applications', name_pat)
if os.path.isdir(binpath):
found = True
log_info('yes')
results.append((name_pat, binpath))
else:
for path in droots:
for binname in find_first_files(path, name_pat):
binpath = os.path.join(path, binname)
if (os.path.isfile(binpath)
and os.access(binpath, os.X_OK)):
# We found an executable with the appropriate name,
# let's find out if we can retrieve a version number.
numbers = []
if not (variant and len(variant) > 0):
# When looking for a specific *variant*, we do not
# try to execute executables as they are surely
# not meant to be run on the native system.
# We run the help flag before --version, -V
# because bzip2 would wait on stdin for data
# otherwise.
# XXX semilla --help is broken :(
for flag in [ '--version', '-V' ]:
numbers = []
cmdline = [ binpath, flag ]
try:
output = subprocess.check_output(
cmdline, stderr=subprocess.STDOUT)
for line in output.splitlines():
numbers += version_candidates(line)
except subprocess.CalledProcessError:
# When the command returns with an error
# code, we assume we passed an incorrect
# flag to retrieve the version number.
numbers = []
if len(numbers) > 0:
break
# At this point *numbers* contains a list that can
# interpreted as versions. Hopefully, there is only
# one candidate.
if len(numbers) == 1:
excluded = False
if excludes:
for exclude in list(excludes):
if ((not exclude[0]
or version_compare(
exclude[0], numbers[0]) <= 0)
and (not exclude[1]
or version_compare(
numbers[0], exclude[1]) < 0)):
excluded = True
break
if not excluded:
version = numbers[0]
log_info(str(version))
results.append((name_pat, binpath))
else:
log_info('excluded (' +str(numbers[0])+ ')')
else:
log_info('yes')
results.append((name_pat, binpath))
found = True
break
if found:
break
if not found:
log_info('no')
results.append((name_pat, None))
complete = False
return results, version, complete
def find_cache(context, names):
'''Search for the presence of files in the cache directory. *names*
is a dictionnary of file names used as key and the associated checksum.'''
results = {}
for pathname in names:
name = os.path.basename(urlparse.urlparse(pathname).path)
log_interactive(name + "... ")
local_name = context.local_dir(pathname)
if os.path.exists(local_name):
if isinstance(names[pathname], dict):
if 'sha1' in names[pathname]:
expected = names[pathname]['sha1']
with open(local_name, 'rb') as local_file:
sha1sum = hashlib.sha1(local_file.read()).hexdigest()
if sha1sum == expected:
# checksum are matching
log_info("matched (sha1)")
else:
log_info("corrupted? (sha1)")
else:
log_info("yes")
else:
log_info("yes")
else:
results[ pathname ] = names[pathname]
log_info("no")
return results
def find_files(base, name_pat, recurse=True):
'''Search the directory tree rooted at *base* for files matching *name_pat*
and returns a list of absolute pathnames to those files.'''
result = []
try:
if os.path.exists(base):
for name in os.listdir(base):
path = os.path.join(base, name)
look = re.match('.*' + name_pat + '$', path)
if look:
result += [ path ]
elif recurse and os.path.isdir(path):
result += find_files(path, name_pat)
except OSError:
# In case permission to execute os.listdir is denied.
pass
return sorted(result, reverse=True)
def find_first_files(base, name_pat, subdir=''):
'''Search the directory tree rooted at *base* for files matching pattern
*name_pat* and returns a list of relative pathnames to those files
from *base*.
If .*/ is part of pattern, base is searched recursively in breadth search
order until at least one result is found.'''
try:
subdirs = []
results = []
pat_num_sub_dirs = len(name_pat.split(os.sep))
sub_num_sub_dirs = len(subdir.split(os.sep))
candidate_dir = os.path.join(base, subdir)
if os.path.exists(candidate_dir):
for filename in os.listdir(candidate_dir):
relative = os.path.join(subdir, filename)
path = os.path.join(base, relative)
regex = name_pat_regex(name_pat)
look = regex.match(path)
if look != None:
results += [ relative ]
elif (((('.*' + os.sep) in name_pat)
or (sub_num_sub_dirs < pat_num_sub_dirs))
and os.path.isdir(path)):
# When we see .*/, it means we are looking for a pattern
# that can be matched by files in subdirectories
# of the base.
subdirs += [ relative ]
if len(results) == 0:
for subdir in subdirs:
results += find_first_files(base, name_pat, subdir)
except OSError:
# Permission to a subdirectory might be denied.
pass
return sorted(results, reverse=True)
def find_data(dirname, names,
search_path, build_top, versions=None, variant=None):
'''Search for a list of extra files that can be found from $PATH
where bin was replaced by *dir*.'''
results = []
droots = search_path
complete = True
if versions and 'excludes' in versions:
excludes = versions['excludes']
else:
excludes = []
if variant:
build_dir = os.path.join(build_top, variant, dirname)
else:
build_dir = os.path.join(build_top, dirname)
for name_pat, absolute_path in names:
if absolute_path != None and os.path.exists(absolute_path):
# absolute paths only occur when the search has already been
# executed and completed successfuly.
results.append((name_pat, absolute_path))
continue
link_name, suffix = link_build_name(name_pat, dirname, variant)
if os.path.islink(link_name):
# If we already have a symbolic link in the dataBuildDir,
# we will assume it is the one to use in order to cut off
# recomputing of things that hardly change.
# XXX Be careful if suffix starts with '/'
results.append((name_pat,
os.path.realpath(os.path.join(link_name, suffix))))
continue
if variant:
log_interactive(variant + '/')
log_interactive(name_pat + '... ')
link_num = 0
if name_pat.startswith('.*' + os.sep):
link_num = len(name_pat.split(os.sep)) - 2
found = False
# The structure of share/ directories is not as standard as others
# and requires a recursive search for prerequisites. As a result,
# it might take a lot of time to update unmodified links.
# We thus first check links in build_dir are still valid.
full_names = find_files(build_dir, name_pat)
if len(full_names) > 0:
try:
os.stat(full_names[0])
log_info('yes')
results.append((name_pat, full_names[0]))
found = True
except IOError:
pass
if not found:
for base in droots:
full_names = find_files(base, name_pat)
if len(full_names) > 0:
log_info('yes')
tokens = full_names[0].split(os.sep)
linked = os.sep.join(tokens[:len(tokens) - link_num])
# DEPRECATED: results.append((name_pat, linked))
results.append((name_pat, full_names[0]))
found = True
break
if not found:
log_info('no')
results.append((name_pat, None))
complete = False
return results, None, complete
def find_etc(names, search_path, build_top, versions=None, variant=None):
return find_data('etc', names, search_path, build_top, versions)
def find_include(names, search_path, build_top, versions=None, variant=None):
'''Search for a list of headers that can be found from $PATH
where bin was replaced by include.
*names* is a list of (pattern,absolute_path) pairs where the absolutePat
can be None and in which case pattern will be used to search
for a header filename patterns. *excludes* is a list
of versions that are concidered false positive and need to be
excluded, usually as a result of incompatibilities.
This function returns a populated list of (pattern,absolute_path) pairs
and a version number if available.
This function differs from find_bin() and find_lib() in its search
algorithm. find_include() might generate a breadth search based
out of a derived root of $PATH. It opens found header files
and look for a "#define.*VERSION" pattern in order to deduce
a version number.'''
results = []
version = None
if versions and 'excludes' in versions:
excludes = versions['excludes']
else:
excludes = []
complete = True
prefix = ''
include_sys_dirs = search_path
for name_pat, absolute_path in names:
if absolute_path != None and os.path.exists(absolute_path):
# absolute paths only occur when the search has already been
# executed and completed successfuly.
results.append((name_pat, absolute_path))
continue
link_name, suffix = link_build_name(name_pat, 'include', variant)
if os.path.islink(link_name):
# If we already have a symbolic link in the binBuildDir,
# we will assume it is the one to use in order to cut off
# recomputing of things that hardly change.
# XXX Be careful if suffix starts with '/'
results.append(
(name_pat, os.path.realpath(os.path.join(link_name, suffix))))
continue
if variant:
log_interactive(variant + '/')
log_interactive(name_pat + '... ')
found = False
for include_sys_dir in include_sys_dirs:
includes = []
for header in find_first_files(include_sys_dir,
name_pat.replace(prefix, '')):
# Open the header file and search for all defines
# that end in VERSION.
numbers = []
# First parse the pathname for a version number...
parts = os.path.dirname(header).split(os.sep)
parts.reverse()
for part in parts:
for ver in version_candidates(part):
if not ver in numbers:
numbers += [ ver ]
# Second open the file and search for a version identifier...
header = os.path.join(include_sys_dir, header)
with open(header, 'rt') as header_file:
line = header_file.readline()
while line != '':
look = re.match(r'\s*#define.*VERSION\s+(\S+)', line)
if look != None:
for ver in version_candidates(look.group(1)):
if not ver in numbers:
numbers += [ ver ]
line = header_file.readline()
# At this point *numbers* contains a list that can
# interpreted as versions. Hopefully, there is only
# one candidate.
if len(numbers) >= 1:
# With more than one version number, we assume the first
# one found is the most relevent and use it regardless.
# This is different from previously assumption that more
# than one number was an error in the version detection
# algorithm. As it turns out, boost packages sources
# in a -1_41_0.tar.gz file while version.hpp says 1_41.
excluded = False
if excludes:
for exclude in list(excludes):
if ((not exclude[0]
or version_compare(
exclude[0], numbers[0]) <= 0)
and (not exclude[1]
or version_compare(
numbers[0], exclude[1]) < 0)):
excluded = True
break
if not excluded:
index = 0
for include in includes:
if ((not include[1])
or version_compare(include[1], numbers[0]) < 0):
break
index = index + 1
includes.insert(index, (header, numbers[0]))
else:
# If we find no version number, we append the header
# at the end of the list with 'None' for version.
includes.append((header, None))
if len(includes) > 0:
if includes[0][1]:
version = includes[0][1]
log_info(version)
else:
log_info('yes')
results.append((name_pat, includes[0][0]))
name_pat_parts = name_pat.split(os.sep)
include_file_parts = includes[0][0].split(os.sep)
while (len(name_pat_parts) > 0
and name_pat_parts[len(name_pat_parts)-1]
== include_file_parts[len(include_file_parts)-1]):
name_pat_part = name_pat_parts.pop()
include_file_part = include_file_parts.pop()
prefix = os.sep.join(name_pat_parts)
if prefix and len(prefix) > 0:
prefix = prefix + os.sep
include_sys_dirs = [ os.sep.join(include_file_parts) ]
else:
include_sys_dirs = [ os.path.dirname(includes[0][0]) ]
found = True
break
if not found:
log_info('no')
results.append((name_pat, None))
complete = False
return results, version, complete
def find_lib(names, search_path, build_top, versions=None, variant=None):
'''Search for a list of libraries that can be found from $PATH
where bin was replaced by lib.
*names* is a list of (pattern,absolute_path) pairs where the absolutePat
can be None and in which case pattern will be used to search
for library names with neither a 'lib' prefix
nor a '.a', '.so', etc. suffix. *excludes* is a list
of versions that are concidered false positive and need to be
excluded, usually as a result of incompatibilities.
This function returns a populated list of (pattern,absolute_path) pairs
and a version number if available.
This function differs from find_bin() and find_include() in its
search algorithm. find_lib() might generate a breadth search based
out of a derived root of $PATH. It uses the full library name
in order to deduce a version number if possible.'''
results = []
version = None
if versions and 'excludes' in versions:
excludes = versions['excludes']
else:
excludes = []
complete = True
# We used to look for lib suffixes '-version' and '_version'. Unfortunately
# it picked up libldap_r.so when we were looking for libldap.so. Looking
# through /usr/lib on Ubuntu does not show any libraries ending with
# a '_version' suffix so we will remove it from the regular expression.
suffix = '(-.+)?(\\' + lib_static_suffix() \
+ '|\\' + lib_dyn_suffix() + r'(\\.\S+)?)'
if not variant and CONTEXT.host() in APT_DISTRIBS:
# Ubuntu 12.04+: host libraries are not always installed
# in /usr/lib. Sometimes they end-up in /usr/lib/x86_64-linux-gnu
# like libgmp.so for example.
droots = []
for path in search_path:
droots += [ path, os.path.join(path, 'x86_64-linux-gnu') ]
else:
droots = search_path
for name_pat, absolute_path in names:
if absolute_path != None and os.path.exists(absolute_path):
# absolute paths only occur when the search has already been
# executed and completed successfuly.
results.append((name_pat, absolute_path))
continue
lib_base_pat = lib_prefix() + name_pat
if '.*' in name_pat:
# Dealing with a regular expression already
lib_suffix_by_priority = []
link_pats = [ name_pat ]
elif lib_base_pat.endswith('.so'):
# local override to select dynamic library.
lib_base_pat = lib_base_pat[:-3]
lib_suffix_by_priority = [ lib_dyn_suffix(), lib_static_suffix() ]
link_pats = [ lib_base_pat + '.so',
lib_base_pat + lib_static_suffix() ]
elif STATIC_LIB_FIRST:
lib_suffix_by_priority = [ lib_static_suffix(), lib_dyn_suffix() ]
link_pats = [ lib_base_pat + lib_static_suffix(),
lib_base_pat + '.so' ]
else:
lib_suffix_by_priority = [ lib_dyn_suffix(), lib_static_suffix() ]
link_pats = [ lib_base_pat + '.so',
lib_base_pat + lib_static_suffix() ]
found = False
for link_pat in link_pats:
link_name, link_suffix = link_build_name(link_pat, 'lib', variant)
if os.path.islink(link_name):
# If we already have a symbolic link in the libBuildDir,
# we will assume it is the one to use in order to cut off
# recomputing of things that hardly change.
results.append((name_pat, os.path.realpath(os.path.join(
link_name, link_suffix))))
found = True
break
if found:
continue
if variant:
log_interactive(variant + '/')
log_interactive(name_pat + '... ')
found = False
for lib_sys_dir in droots:
libs = []
if '.*' in name_pat:
# We were already given a regular expression.
# If we are not dealing with a honest to god library, let's
# just use the pattern we were given. This is because, python,
# ruby, etc. also put their stuff in libDir.
# ex patterns for things also in libDir:
# - ruby/.*/json.rb
# - cgi-bin/awstats.pl
# - .*/registration/__init__.py
lib_pat = name_pat
else:
lib_pat = lib_base_pat + suffix
for libname in find_first_files(lib_sys_dir, lib_pat):
numbers = version_candidates(libname)
absolute_path = os.path.join(lib_sys_dir, libname)
absolute_path_base = os.path.dirname(absolute_path)
absolute_path_ext = '.' \
+ os.path.basename(absolute_path).split('.')[1]
if len(numbers) == 1:
excluded = False
if excludes:
for exclude in list(excludes):
if ((not exclude[0]
or version_compare(
exclude[0], numbers[0]) <= 0)
and (not exclude[1]
or version_compare(
numbers[0], exclude[1]) < 0)):
excluded = True
break
if not excluded:
# Insert candidate into a sorted list. First to last,
# higher version number, dynamic libraries.
index = 0
for lib in libs:
lib_path_base = os.path.dirname(lib[0])
if ((not lib[1])
or version_compare(lib[1], numbers[0]) < 0):
break
elif (absolute_path_base == lib_path_base
and absolute_path_ext
== lib_suffix_by_priority[0]):
break
index = index + 1
libs.insert(index, (absolute_path, numbers[0]))
else:
# Insert candidate into a sorted list. First to last,
# higher version number, shortest name, dynamic libraries.
index = 0
for lib in libs:
lib_path_base = os.path.dirname(lib[0])
if lib[1]:
pass
elif absolute_path_base == lib_path_base:
if absolute_path_ext == lib_suffix_by_priority[0]:
break
elif lib_path_base.startswith(absolute_path_base):
break
index = index + 1
libs.insert(index, (absolute_path, None))
if len(libs) > 0:
candidate = libs[0][0]
version = libs[0][1]
look = re.match('.*%s(.+)' % lib_base_pat, candidate)
if look:
suffix = look.group(1)
log_info(suffix)
else:
log_info('yes (no suffix?)')
results.append((name_pat, candidate))
found = True
break
if not found:
log_info('no')
results.append((name_pat, None))
complete = False
return results, version, complete
def find_prerequisites(deps, versions=None, variant=None):
'''Find a set of executables, headers, libraries, etc. on a local machine.
*deps* is a dictionary where each key associates an install directory
(bin, include, lib, etc.) to a pair (pattern,absolute_path) as required
by *find_bin*(), *find_lib*(), *find_include*(), etc.
*excludes* contains a list of excluded version ranges because they are
concidered false positive, usually as a result of incompatibilities.
This function will try to find the latest version of each file which
was not excluded.
This function will return a dictionnary matching *deps* where each found
file will be replaced by an absolute pathname and each file not found
will not be present. This function returns True if all files in *deps*
can be fulfilled and returns False if any file cannot be found.'''
version = None
installed = {}
complete = True
for dep in deps:
# Make sure the extras do not get filtered out.
if not dep in INSTALL_DIRS:
installed[dep] = deps[dep]
for dirname in INSTALL_DIRS:
# The search order "bin, include, lib, etc" will determine
# how excluded versions apply.
if dirname in deps:
command = 'find_' + dirname
# First time ever *find* is called, libDir will surely not defined
# in the workspace make fragment and thus we will trigger
# interactive input from the user.
# We want to make sure the output of the interactive session does
# not mangle the search for a library so we preemptively trigger
# an interactive session.
# deprecated: done in search_path. context.value(dir + 'Dir')
installed[dirname], installed_version, installed_complete = \
getattr(sys.modules[__name__], command)(deps[dirname],
CONTEXT.search_path(dirname,variant),
CONTEXT.value('buildTop'),
versions, variant)
# Once we have selected a version out of the installed
# local system, we lock it down and only search for
# that specific version.
if not version and installed_version:
version = installed_version
versions = { 'excludes': [ (None, version), (version_incr(version), None) ] }
if not installed_complete:
complete = False
return installed, complete
def find_libexec(names, search_path, build_top, versions=None, variant=None):
'''find files specificed in names inside the libexec/ directory.
*excludes* is a list of version to exclude from the set of matches.'''
return find_data(
'libexec', names, search_path, build_top, versions, variant)
def find_share(names, search_path, build_top, versions=None, variant=None):
'''find files specificed in names inside the share/ directory.
*excludes* is a list of version to exclude from the set of matches.'''
return find_data('share', names, search_path, build_top, versions, variant)
def find_boot_bin(context, name, package=None, dbindex=None):
'''This script needs a few tools to be installed to bootstrap itself,
most noticeably the initial source control tool used to checkout
the projects dependencies index file.'''
executable = os.path.join(context.bin_build_dir(), name)
if not os.path.exists(executable):
# We do not use *validate_controls* here because dws in not
# a project in *srcTop* and does not exist on the remote machine.
# We use find_bin() and link_context() directly also because it looks
# weird when the script prompts for installing a non-existent dws
# project before looking for the rsync prerequisite.
if not package:
package = name
if not dbindex:
dbindex = IndexProjects(context,
'''<?xml version="1.0" ?>
<projects>
<project name="dws">
<repository>
<dep name="%s">
<bin>%s</bin>
</dep>
</repository>
</project>
</projects>
''' % (package, name))
executables, version, complete = find_bin([ [ name, None ] ],
context.search_path('bin'),
context.value('buildTop'))
if len(executables) == 0 or not executables[0][1]:
install([package], dbindex)
executables, version, complete = find_bin([ [ name, None ] ],
context.search_path('bin'),
context.value('buildTop'))
name, absolute_path = executables.pop()
link_pat_path(name, absolute_path, 'bin')
executable = os.path.join(context.bin_build_dir(), name)
return executable
def find_git(context):
if not os.path.lexists(
os.path.join(context.value('buildTop'), 'bin', 'git')):
files = { 'bin': [('git', None)]}
if context.host() in APT_DISTRIBS:
files.update({'share': [('git-core', None)]})
else:
files.update({'libexec': [('git-core', None)]})
setup = SetupStep('git-all', files=files)
setup.run(context)
return 'git'
def find_npm(context):
build_npm = os.path.join(context.value('buildTop'), 'bin', 'npm')
if not os.path.lexists(build_npm):
dbindex=IndexProjects(context,
'''<?xml version="1.0" ?>
<projects>
<project name="nvm">
<repository>
<sync>https://github.com/creationix/nvm.git</sync>
<shell>
export NVM_DIR=${buildTop}
. ${srcTop}/nvm/nvm.sh
nvm install 0.8.14
</shell>
</repository>
</project>
</projects>
''')
validate_controls(
BuildGenerator([ 'nvm' ], [], force_update = True), dbindex)
prev = os.getcwd()
os.chdir(os.path.join(context.value('buildTop'), 'bin'))
os.symlink('../v0.8.14/bin/npm', 'npm')
os.symlink('../v0.8.14/bin/node', 'node')
os.chdir(prev)
return 'npm'
def find_pip(context):
pip_package = None
if context.host() in YUM_DISTRIBS:
pip_package = 'python-pip'
find_boot_bin(context, '(pip).*', pip_package)
return os.path.join(context.value('buildTop'), 'bin', 'pip')
def find_rsync(context, host, relative=True, admin=False,
username=None, key=None):
'''Check if rsync is present and install it through the package
manager if it is not. rsync is a little special since it is used
directly by this script and the script is not always installed
through a project.'''
rsync = find_boot_bin(context, 'rsync')
# We are accessing the remote machine through a mounted
# drive or through ssh.
prefix = ""
if username:
prefix = prefix + username + '@'
# -a is equivalent to -rlptgoD, we are only interested in -r (recursive),
# -p (permissions), -t (times)
cmdline = [ rsync, '-qrptuz' ]
if relative:
cmdline = [ rsync, '-qrptuzR' ]
if host:
# We are accessing the remote machine through ssh
prefix = prefix + host + ':'
ssh = '--rsh="ssh -q'
if admin:
ssh = ssh + ' -t'
if key:
ssh = ssh + ' -i ' + str(key)
ssh = ssh + '"'
cmdline += [ ssh ]
if admin and username != 'root':
cmdline += [ '--rsync-path "sudo rsync"' ]
return cmdline, prefix
def name_pat_regex(name_pat):
# Many C++ tools contain ++ in their name which might trip
# the regular expression parser.
# We must postpend the '$' sign to the regular expression
# otherwise "makeconv" and "makeinfo" will be picked up by
# a match for the "make" executable.
pat = name_pat.replace('++','\+\+')
if not pat.startswith('.*'):
# If we don't add the separator here we will end-up with unrelated
# links to automake, pkmake, etc. when we are looking for "make".
pat = '.*' + os.sep + pat
return re.compile(pat + '$')
def config_var(context, variables):
'''Look up the workspace configuration file the workspace make fragment
for definition of variables *variables*, instances of classes derived from
Variable (ex. Pathname, Single).
If those do not exist, prompt the user for input.'''
found = False
for key, val in variables.iteritems():
# apply constrains where necessary
val.constrain(context.environ)
if not key in context.environ:
# If we do not add variable to the context, they won't
# be saved in the workspace make fragment
context.environ[key] = val
found |= val.configure(context)
if found:
context.save()
return found
def cwd_projects(reps, recurse=False):
'''returns a list of projects based on the current directory
and/or a list passed as argument.'''
if len(reps) == 0:
# We try to derive project names from the current directory whever
# it is a subdirectory of buildTop or srcTop.
cwd = os.path.realpath(os.getcwd())
build_top = os.path.realpath(CONTEXT.value('buildTop'))
src_top = os.path.realpath(CONTEXT.value('srcTop'))
project_name = None
src_dir = src_top
src_prefix = os.path.commonprefix([ cwd, src_top ])
build_prefix = os.path.commonprefix([ cwd, build_top ])
if src_prefix == src_top:
src_dir = cwd
project_name = src_dir[len(src_top) + 1:]
elif build_prefix == build_top:
src_dir = cwd.replace(build_top, src_top)
project_name = src_dir[len(src_top) + 1:]
if project_name:
reps = [ project_name ]
else:
for repdir in find_files(src_dir, Repository.dirPats):
reps += [ os.path.dirname(
repdir.replace(src_top + os.sep, '')) ]
if recurse:
raise NotImplementedError()
return reps
def ordered_prerequisites(roots, index):
'''returns the dependencies in topological order for a set of project
names in *roots*.'''
dgen = MakeDepGenerator(roots, [], exclude_pats=EXCLUDE_PATS)
steps = index.closure(dgen)
results = []
for step in steps:
# XXX this is an ugly little hack!
if isinstance(step, InstallStep) or isinstance(step, BuildStep):
results += [ step.qualified_project_name() ]
return results
def fetch(context, filenames,
force=False, admin=False, relative=True):
'''download *filenames*, typically a list of distribution packages,
from the remote server into *cacheDir*. See the upload function
for uploading files to the remote server.
When the files to fetch require sudo permissions on the remote
machine, set *admin* to true.
'''
if filenames and len(filenames) > 0:
# Expand filenames to absolute urls
remote_site_top = context.value('remoteSiteTop')
uri = urlparse.urlparse(remote_site_top)
pathnames = {}
for name in filenames:
# Absolute path to access a file on the remote machine.
remote_path = ''
if name:
if name.startswith('http') or ':' in name:
remote_path = name
elif len(uri.path) > 0 and name.startswith(uri.path):
remote_path = os.path.join(remote_site_top,
'.' + name.replace(uri.path, ''))
elif name.startswith('/'):
remote_path = '/.' + name
else:
remote_path = os.path.join(remote_site_top, './' + name)
pathnames[ remote_path ] = filenames[name]
# Check the local cache
if force:
downloads = pathnames
else:
downloads = find_cache(context, pathnames)
for filename in downloads:
local_filename = context.local_dir(filename)
dirname = os.path.dirname(local_filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
# Split fetches by protocol
https = []
sshs = []
for package in downloads:
# Splits between files downloaded through http and ssh.
if package.startswith('http'):
https += [ package ]
else:
sshs += [ package ]
# fetch https
for remotename in https:
localname = context.local_dir(remotename)
if not os.path.exists(os.path.dirname(localname)):
os.makedirs(os.path.dirname(localname))
log_info('fetching ' + remotename + '...')
remote = urllib2.urlopen(urllib2.Request(remotename))
local = open(localname, 'w')
local.write(remote.read())
local.close()
remote.close()
# fetch sshs
if len(sshs) > 0:
sources = []
hostname = uri.netloc
if not uri.netloc:
# If there is no protocol specified, the hostname
# will be in uri.scheme (That seems like a bug in urlparse).
hostname = uri.scheme
for ssh in sshs:
sources += [ ssh.replace(hostname + ':', '') ]
if len(sources) > 0:
if admin:
shell_command(['stty -echo;', 'ssh', hostname,
'sudo', '-v', '; stty echo'])
cmdline, prefix = find_rsync(context, context.remote_host(),
relative, admin)
shell_command(cmdline + ["'" + prefix + ' '.join(sources) + "'",
context.value('siteTop') ])
def create_managed(project_name, versions, target):
'''Create a step that will install *project_name* through the local
package manager.
If the target is pure python, we will try pip before native package
manager because we prefer to install in the virtualenv. We solely rely
on the native package manager for python with C bindings.'''
install_step = None
if target and target.startswith('python'):
install_step = PipInstallStep(project_name, versions, target)
elif target and target.startswith('nodejs'):
install_step = NpmInstallStep(project_name, target)
elif CONTEXT.host() in APT_DISTRIBS:
install_step = AptInstallStep(project_name, target)
elif CONTEXT.host() in PORT_DISTRIBS:
install_step = MacPortInstallStep(project_name, target)
elif CONTEXT.host() in YUM_DISTRIBS:
install_step = YumInstallStep(project_name, target)
else:
install_step = None
return install_step
def create_package_file(project_name, filenames):
if CONTEXT.host() in APT_DISTRIBS:
install_step = DpkgInstallStep(project_name, filenames)
elif CONTEXT.host() in PORT_DISTRIBS:
install_step = DarwinInstallStep(project_name, filenames)
elif CONTEXT.host() in YUM_DISTRIBS:
install_step = RpmInstallStep(project_name, filenames)
else:
install_step = None
return install_step
def elapsed_duration(start, finish):
'''Returns elapsed time between start and finish'''
duration = finish - start
# XXX until most system move to python 2.7, we compute
# the number of seconds ourselves. +1 insures we run for
# at least a second.
return datetime.timedelta(seconds=((duration.microseconds
+ (duration.seconds
+ duration.days * 24 * 3600)
* 10**6) / 10**6) + 1)
def install(packages, dbindex):
'''install a pre-built (also pre-fetched) package.
'''
projects = []
local_files = []
package_files = None
for name in packages:
if os.path.isfile(name):
local_files += [ name ]
else:
projects += [ name ]
if len(local_files) > 0:
package_files = create_package_file(local_files[0], local_files)
if len(projects) > 0:
handler = Unserializer(projects)
dbindex.parse(handler)
managed = []
for name in projects:
# *name* is definitely handled by the local system package manager
# whenever there is no associated project.
if name in handler.projects:
package = handler.as_project(name).packages[CONTEXT.host()]
if package:
package_files.insert(create_package_file(name,
package.fetches()))
else:
managed += [ name ]
else:
managed += [ name ]
if len(managed) > 0:
step = create_managed(managed[0], versions=None, target=None)
for package in managed[1:]:
step.insert(create_managed(package, versions=None, target=None))
step.run(CONTEXT)
if package_files:
package_files.run(CONTEXT)
def help_book(help_string):
'''Print a text string help message as formatted docbook.'''
first_term = True
first_section = True
lines = help_string.getvalue().split('\n')
while len(lines) > 0:
line = lines.pop(0)
if line.strip().startswith('Usage'):
look = re.match(r'Usage: (\S+)', line.strip())
cmdname = look.group(1)
# /usr/share/xml/docbook/schema/dtd/4.5/docbookx.dtd
# dtd/docbook-xml/docbookx.dtd
sys.stdout.write("""<?xml version="1.0"?>
<refentry xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id=\"""" + cmdname + """">
<info>
<author>
<personname>Sebastien Mirolo <smirolo@fortylines.com></personname>
</author>
</info>
<refmeta>
<refentrytitle>""" + cmdname + """</refentrytitle>
<manvolnum>1</manvolnum>
<refmiscinfo class="manual">User Commands</refmiscinfo>
<refmiscinfo class="source">drop</refmiscinfo>
<refmiscinfo class="version">""" + str(__version__) + """</refmiscinfo>
</refmeta>
<refnamediv>
<refname>""" + cmdname + """</refname>
<refpurpose>inter-project dependencies tool</refpurpose>
</refnamediv>
<refsynopsisdiv>
<cmdsynopsis>
<command>""" + cmdname + """</command>
<arg choice="opt">
<option>options</option>
</arg>
<arg>command</arg>
</cmdsynopsis>
</refsynopsisdiv>
""")
elif (line.strip().startswith('Version')
or re.match(r'\S+ version', line.strip())):
pass
elif line.strip().endswith(':'):
if not first_term:
sys.stdout.write("</para>\n")
sys.stdout.write("</listitem>\n")
sys.stdout.write("</varlistentry>\n")
if not first_section:
sys.stdout.write("</variablelist>\n")
sys.stdout.write("</refsection>\n")
first_section = False
sys.stdout.write("<refsection>\n")
sys.stdout.write('<title>' + line.strip() + '</title>\n')
sys.stdout.write("<variablelist>")
first_term = True
elif len(line) > 0 and (re.search("[a-z]", line[0])
or line.startswith(" -")):
stmt = line.strip().split(' ')
if not first_term:
sys.stdout.write("</para>\n")
sys.stdout.write("</listitem>\n")
sys.stdout.write("</varlistentry>\n")
first_term = False
for word in stmt[1:]:
if len(word) > 0:
break
if line.startswith(" -h,"):
# Hack because "show" does not start
# with uppercase.
sys.stdout.write("<varlistentry>\n<term>" + ' '.join(stmt[0:2])
+ "</term>\n")
word = 'S'
stmt = stmt[1:]
elif not re.search("[A-Z]", word[0]):
sys.stdout.write("<varlistentry>\n<term>" + line + "</term>\n")
else:
if not stmt[0].startswith('-'):
sys.stdout.write("<varlistentry xml:id=\"dws." \
+ stmt[0] + "\">\n")
else:
sys.stdout.write("<varlistentry>\n")
sys.stdout.write("<term>" + stmt[0] + "</term>\n")
sys.stdout.write("<listitem>\n")
sys.stdout.write("<para>\n")
if re.search("[A-Z]", word[0]):
sys.stdout.write(' '.join(stmt[1:]) + '\n')
else:
sys.stdout.write(line + '\n')
if not first_term:
sys.stdout.write("</para>\n")
sys.stdout.write("</listitem>\n")
sys.stdout.write("</varlistentry>\n")
if not first_section:
sys.stdout.write("</variablelist>\n")
sys.stdout.write("</refsection>\n")
sys.stdout.write("</refentry>\n")
def lib_prefix():
'''Returns the prefix for library names.'''
lib_prefixes = {
'Cygwin': ''
}
if CONTEXT.host() in lib_prefixes:
return lib_prefixes[CONTEXT.host()]
return 'lib'
def lib_static_suffix():
'''Returns the suffix for static library names.'''
lib_static_suffixes = {
}
if CONTEXT.host() in lib_static_suffixes:
return lib_static_suffixes[CONTEXT.host()]
return '.a'
def lib_dyn_suffix():
'''Returns the suffix for dynamic library names.'''
lib_dyn_suffixes = {
'Cygwin': '.dll',
'Darwin': '.dylib'
}
if CONTEXT.host() in lib_dyn_suffixes:
return lib_dyn_suffixes[CONTEXT.host()]
return '.so'
def link_prerequisites(files, versions=None, target=None):
'''All projects which are dependencies but are not part of *srcTop*
are not under development in the current workspace. Links to
the required executables, headers, libraries, etc. will be added to
the install directories such that projects in *srcTop* can build.
*excludes* is a list of versions to exclude.'''
# First, we will check if find_prerequisites needs to be rerun.
# It is the case if the link in [bin|include|lib|...]Dir does
# not exist and the pathname for it in build_deps is not
# an absolute path.
complete = True
for dirname in INSTALL_DIRS:
if dirname in files:
for name_pat, absolute_path in files[dirname]:
complete &= link_pat_path(name_pat, absolute_path,
dirname, target)
if not complete:
files, complete = find_prerequisites(files, versions, target)
if complete:
for dirname in INSTALL_DIRS:
if dirname in files:
for name_pat, absolute_path in files[dirname]:
complete &= link_pat_path(
name_pat, absolute_path, dirname, target)
return files, complete
def link_context(path, link_name):
'''link a *path* into the workspace.'''
if not path:
log_error('There is no target for link ' + link_name + '\n')
return
if os.path.realpath(path) == os.path.realpath(link_name):
return
if not os.path.exists(os.path.dirname(link_name)):
os.makedirs(os.path.dirname(link_name))
# In the following two 'if' statements, we are very careful
# to only remove/update symlinks and leave other files
# present in [bin|lib|...]Dir 'as is'.
if os.path.islink(link_name):
os.remove(link_name)
if not os.path.exists(link_name) and os.path.exists(path):
os.symlink(path, link_name)
def link_build_name(name_pat, subdir, target=None):
# We normalize the library link name such as to make use of the default
# definitions of .LIBPATTERNS and search paths in make. It also avoids
# having to prefix and suffix library names in Makefile with complex
# variable substitution logic.
suffix = ''
regex = name_pat_regex(name_pat)
if regex.groups == 0:
name = name_pat.replace('\\', '')
parts = name.split(os.sep)
if len(parts) > 0:
name = parts[len(parts) - 1]
else:
name = re.search(r'\((.+)\)', name_pat).group(1)
if '|' in name:
name = name.split('|')[0]
# XXX +1 ')', +2 '/'
suffix = name_pat[re.search(r'\((.+)\)', name_pat).end(1) + 2:]
subpath = subdir
if target:
subpath = os.path.join(target, subdir)
link_build = os.path.join(CONTEXT.value('buildTop'), subpath, name)
return link_build, suffix
def link_pat_path(name_pat, absolute_path, subdir, target=None):
'''Create a link in the build directory.'''
link_path = absolute_path
ext = ''
if absolute_path:
_, ext = os.path.splitext(absolute_path)
subpath = subdir
if target:
subpath = os.path.join(target, subdir)
if name_pat.endswith('.a') or name_pat.endswith('.so'):
name_pat, _ = os.path.splitext(name_pat)
if ext == lib_static_suffix():
name = 'lib' + name_pat + '.a'
link_name = os.path.join(CONTEXT.value('buildTop'), subpath, name)
elif ext == lib_dyn_suffix():
name = 'lib' + name_pat + '.so'
link_name = os.path.join(CONTEXT.value('buildTop'), subpath, name)
else:
# \todo if the dynamic lib suffix ends with .so.X we will end-up here.
# This is wrong since at that time we won't create a lib*name*.so link.
link_name, suffix = link_build_name(name_pat, subdir, target)
if absolute_path and len(suffix) > 0 and absolute_path.endswith(suffix):
# Interestingly absolute_path[:-0] returns an empty string.
link_path = absolute_path[:-len(suffix)]
# create links
complete = True
if link_path:
if not os.path.isfile(link_name):
link_context(link_path, link_name)
else:
if not os.path.isfile(link_name):
complete = False
return complete
def localize_context(context, name, target):
'''Create the environment in *buildTop* necessary to make a project
from source.'''
if target:
local_context = Context()
local_context.environ['buildTop'] \
= os.path.join(context.value('buildTop'), target)
local_context.config_filename \
= os.path.join(local_context.value('buildTop'), context.config_name)
if os.path.exists(local_context.config_filename):
local_context.locate(local_context.config_filename)
else:
local_context.environ['srcTop'] = context.value('srcTop')
local_context.environ['siteTop'] = context.value('siteTop')
local_context.environ['installTop'].default \
= os.path.join(context.value('installTop'), target)
local_context.save()
else:
local_context = context
obj_dir = context.obj_dir(name)
if obj_dir != os.getcwd():
if not os.path.exists(obj_dir):
os.makedirs(obj_dir)
os.chdir(obj_dir)
# prefix.mk and suffix.mk expects these variables to be defined
# in the workspace make fragment. If they are not you might get
# some strange errors where a g++ command-line appears with
# -I <nothing> or -L <nothing> for example.
# This code was moved to be executed right before the issue
# of a "make" subprocess in order to let the project index file
# a change to override defaults for installTop, etc.
for dir_name in [ 'include', 'lib', 'bin', 'etc', 'share' ]:
name = local_context.value(dir_name + 'Dir')
# \todo save local context only when necessary
local_context.save()
return local_context
def merge_unique(left, right):
'''Merge a list of additions into a previously existing list.
Or: adds elements in *right* to the end of *left* if they were not
already present in *left*.'''
for item in right:
if not item in left:
left += [ item ]
return left
def merge_build_conf(db_prev, db_upd, parser):
'''Merge an updated project dependency database into an existing
project dependency database. The existing database has been
augmented by user-supplied information such as "use source
controlled repository", "skip version X dependency", etc. Hence
we do a merge instead of a complete replace.'''
if db_prev == None:
return db_upd
elif db_upd == None:
return db_prev
else:
# We try to keep user-supplied information in the prev
# database whenever possible.
# Both databases supply packages in alphabetical order,
# so the merge can be done in a single pass.
db_next = tempfile.TemporaryFile()
proj_prev = parser.copy(db_next, db_prev)
proj_upd = parser.next(db_upd)
while proj_prev != None and proj_upd != None:
if proj_prev < proj_upd:
parser.start_project(db_next, proj_prev)
proj_prev = parser.copy(db_next, db_prev)
elif proj_prev > proj_upd:
parser.start_project(db_next, proj_upd)
proj_upd = parser.copy(db_next, db_upd)
elif proj_prev == proj_upd:
# when names are equals, we need to import user-supplied
# information as appropriate. For now, there are only one
# user supplied-information, the install mode for the package.
# Package name is a unique key so we can increment
# both iterators.
parser.start_project(db_next, proj_upd)
#installMode, version = parser.installMode(proj_prev)
#parser.setInstallMode(db_next,installMode,version)
# It is critical this line appears after we set the installMode
# because it guarentees that the install mode will always be
# the first line after the package tag.
proj_upd = parser.copy(db_next, db_upd, True)
proj_prev = parser.copy(db_next, db_prev)
while proj_prev != None:
parser.start_project(db_next, proj_prev)
proj_prev = parser.copy(db_next, db_prev)
while proj_upd != None:
parser.start_project(db_next, proj_upd)
proj_upd = parser.copy(db_next, db_upd)
parser.trailer(db_next)
return db_next
def upload(filenames, cache_dir=None):
'''upload *filenames*, typically a list of result logs,
to the remote server. See the fetch function for downloading
files from the remote server.
'''
remote_cache_path = CONTEXT.remote_dir(CONTEXT.log_path(''))
cmdline, _ = find_rsync(CONTEXT, CONTEXT.remote_host(), not cache_dir)
up_cmdline = cmdline + [ ' '.join(filenames), remote_cache_path ]
shell_command(up_cmdline)
def createmail(subject, filenames=None):
'''Returns an e-mail with *filenames* as attachments.
'''
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = CONTEXT.value('dwsEmail')
msg.preamble = 'The contents of %s' % ', '.join(filenames)
for filename in list(filenames):
with open(filename, 'rb') as filep:
content = MIMEText(filep.read())
content.add_header('Content-Disposition', 'attachment',
filename=os.path.basename(filename))
msg.attach(content)
return msg.as_string()
def sendmail(msgtext, dests):
'''Send a formatted email *msgtext* through the default smtp server.'''
if len(dests) > 0:
if CONTEXT.value('smtpHost') == 'localhost':
try:
session = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
session.connect(
CONTEXT.value('smtpHost'), CONTEXT.value('smtpPort'))
session.shutdown(2)
except socket.error:
# Can't connect to that port on local host, we will thus assume
# we are accessing the smtp server through a ssh tunnel.
ssh_tunnels(CONTEXT.tunnel_point,
[ CONTEXT.value('smtpPort')[:-1] ])
import smtplib
# Send the message via our own SMTP server, but don't include the
# envelope header.
session = smtplib.SMTP(
CONTEXT.value('smtpHost'), CONTEXT.value('smtpPort'))
session.set_debuglevel(1)
session.ehlo()
session.starttls()
session.ehlo()
session.login(
CONTEXT.value('dwsSmtpLogin'), CONTEXT.value('dwsSmtpPasswd'))
session.sendmail(CONTEXT.value('dwsEmail'), dests,
'To:' + ', '.join(dests) + '\r\n' + msgtext)
session.close()
def search_back_to_root(filename, root=os.sep):
'''Search recursively from the current directory to the *root*
of the directory hierarchy for a specified *filename*.
This function returns the relative path from *filename* to pwd
and the absolute path to *filename* if found.'''
cur_dir = os.getcwd()
dirname = '.'
while (not os.path.samefile(cur_dir, root)
and not os.path.isfile(os.path.join(cur_dir, filename))):
if dirname == '.':
dirname = os.path.basename(cur_dir)
else:
dirname = os.path.join(os.path.basename(cur_dir), dirname)
cur_dir = os.path.dirname(cur_dir)
if not os.path.isfile(os.path.join(cur_dir, filename)):
raise IOError(1, "cannot find file", filename)
return dirname, os.path.join(cur_dir, filename)
def shell_command(execute, admin=False, search_path=None, pat=None):
'''Execute a shell command and throws an exception when the command fails.
sudo is used when *admin* is True.
the text output is filtered and returned when pat exists.
'''
filtered_output = []
if admin:
if False:
# \todo cannot do this simple check because of a shell variable
# setup before call to apt-get.
if not execute.startswith('/'):
raise Error("admin command without a fully quaified path: " \
+ execute)
# ex: su username -c 'sudo port install icu'
cmdline = [ '/usr/bin/sudo' ]
if USE_DEFAULT_ANSWER:
# Error out if sudo prompts for a password because this should
# never happen in non-interactive mode.
if ASK_PASS:
# XXX Workaround while sudo is broken
# http://groups.google.com/group/comp.lang.python/\
# browse_thread/thread/4c2bb14c12d31c29
cmdline = [ 'SUDO_ASKPASS="' + ASK_PASS + '"' ] \
+ cmdline + [ '-A' ]
else:
cmdline += [ '-n' ]
cmdline += execute
else:
cmdline = execute
log_info(' '.join(cmdline))
if not DO_NOT_EXECUTE:
env = os.environ.copy()
if search_path:
env['PATH'] = ':'.join(search_path)
cmd = subprocess.Popen(' '.join(cmdline),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True)
line = cmd.stdout.readline()
while line != '':
if pat and re.match(pat, line):
filtered_output += [ line ]
log_info(line[:-1])
line = cmd.stdout.readline()
cmd.wait()
if cmd.returncode != 0:
raise Error("unable to complete: " + ' '.join(cmdline) \
+ '\n' + '\n'.join(filtered_output),
cmd.returncode)
return filtered_output
def sort_build_conf_list(db_pathnames, parser):
'''Sort/Merge projects defined in a list of files, *db_pathnames*.
*parser* is the parser used to read the projects files in.'''
db_prev = None
db_upd = None
if len(db_pathnames) == 0:
return None
elif len(db_pathnames) == 1:
db_prev = open(db_pathnames[0])
return db_prev
elif len(db_pathnames) == 2:
db_prev = open(db_pathnames[0])
db_upd = open(db_pathnames[1])
else:
db_prev = sort_build_conf_list(
db_pathnames[:len(db_pathnames) / 2], parser)
db_upd = sort_build_conf_list(
db_pathnames[len(db_pathnames) / 2:], parser)
db_next = merge_build_conf(db_prev, db_upd, parser)
db_next.seek(0)
db_prev.close()
db_upd.close()
return db_next
def ssh_tunnels(hostname, ports):
'''Create ssh tunnels from localhost to a remote host when they don't
already exist.'''
if len(ports) > 0:
cmdline = ['ps', 'xwww']
connections = []
for line in subprocess.check_output(' '.join(cmdline), shell=True,
stderr=subprocess.STDOUT).splitlines():
look = re.match('ssh', line)
if look:
connections += [ line ]
tunnels = []
for port in ports:
found = False
tunnel = port + '0:localhost:' + port
for connection in connections:
look = re.match(tunnel, connection)
if look:
found = True
break
if not found:
tunnels += [ '-L', tunnel ]
if len(tunnels) > 0:
err = os.system(' '.join(['ssh', '-fN' ] + tunnels + [hostname]))
if err:
raise Error("attempt to create ssh tunnels to " \
+ hostname + " failed.")
def validate_controls(dgen, dbindex,
graph=False, priorities = [ 1, 2, 3, 4, 5, 6, 7 ]):
'''Checkout source code files, install packages such that
the projects specified in *repositories* can be built.
*dbindex* is the project index that contains the dependency
information to use. If None, the global index fetched from
the remote machine will be used.
This function returns a topologicaly sorted list of projects
in *srcTop* and an associated dictionary of Project instances.
By iterating through the list, it is possible to 'make'
each prerequisite project in order.'''
dbindex.validate()
global ERRORS
# Add deep dependencies
vertices = dbindex.closure(dgen)
if graph:
gph_filename = os.path.splitext(CONTEXT.logname())[0] + '.dot'
gph_file = open(gph_filename,'w')
gph_file.write("digraph structural {\n")
for vertex in vertices:
for project in vertex.prerequisites:
gph_file.write(
"\t%s -> %s;\n" % (vertex.name, project.name))
gph_file.write("}\n")
gph_file.close()
while len(vertices) > 0:
first = vertices.pop(0)
glob = [ first ]
while len(vertices) > 0:
vertex = vertices.pop(0)
if vertex.__class__ != first.__class__:
vertices.insert(0, vertex)
break
if 'insert' in dir(first):
first.insert(vertex)
else:
glob += [ vertex ]
# \todo "make recurse" should update only projects which are missing
# from *srcTop* and leave other projects in whatever state they are in.
# This is different from "build" which should update all projects.
if first.priority in priorities:
for vertex in glob:
errcode = 0
elapsed = 0
log_header(vertex.name)
start = datetime.datetime.now()
try:
vertex.run(CONTEXT)
finish = datetime.datetime.now()
elapsed = elapsed_duration(start, finish)
except Error, err:
if True:
import traceback
traceback.print_exc()
errcode = err.code
ERRORS += [ str(vertex) ]
if dgen.stop_make_after_error:
finish = datetime.datetime.now()
elapsed = elapsed_duration(start, finish)
log_footer(vertex.name, elapsed, errcode)
raise err
else:
log_error(str(err))
log_footer(vertex.name, elapsed, errcode)
nb_updated_projects = len(UpdateStep.updated_sources)
if nb_updated_projects > 0:
log_info('%d updated project(s).' % nb_updated_projects)
else:
log_info('all project(s) are up-to-date.')
return nb_updated_projects
def version_candidates(line):
'''Extract patterns from *line* that could be interpreted as a
version numbers. That is every pattern that is a set of digits
separated by dots and/or underscores.'''
part = line
candidates = []
while part != '':
# numbers should be full, i.e. including '.'
look = re.match(r'[^0-9]*([0-9].*)', part)
if look:
part = look.group(1)
look = re.match(r'[^0-9]*([0-9]+([_\.][0-9]+)+)+(.*)', part)
if look:
candidates += [ look.group(1) ]
part = look.group(2)
else:
while (len(part) > 0
and part[0] in ['0', '1', '2', '3', '4', '5',
'6', '7', '8', '9' ]):
part = part[1:]
else:
part = ''
return candidates
def version_compare(left, right):
'''Compare version numbers
This function returns -1 if a *left* is less than *right*, 0 if *left
is equal to *right* and 1 if *left* is greater than *right*.
It is suitable as a custom comparaison function for sorted().'''
left_remain = left.replace('_', '.').split('.')
right_remain = right.replace('_', '.').split('.')
while len(left_remain) > 0 and len(right_remain) > 0:
left_num = left_remain.pop(0)
right_num = right_remain.pop(0)
if left_num < right_num:
return -1
elif left_num > right_num:
return 1
if len(left_remain) < len(right_remain):
return -1
elif len(left_remain) > len(right_remain):
return 1
return 0
def version_incr(ver_num):
'''returns the version number with the smallest increment
that is greater than *v*.'''
return ver_num + '.1'
def build_subcommands_parser(parser, module):
'''Returns a parser for the subcommands defined in the *module*
(i.e. commands starting with a 'pub_' prefix).'''
mdefs = module.__dict__
keys = mdefs.keys()
keys.sort()
subparsers = parser.add_subparsers(help='sub-command help')
for command in keys:
if command.startswith('pub_'):
func = module.__dict__[command]
parser = subparsers.add_parser(command[4:], help=func.__doc__)
parser.set_defaults(func=func)
argspec = inspect.getargspec(func)
flags = len(argspec.args)
if argspec.defaults:
flags = len(argspec.args) - len(argspec.defaults)
if flags >= 1:
for arg in argspec.args[:flags - 1]:
parser.add_argument(arg)
parser.add_argument(argspec.args[flags - 1], nargs='*')
for idx, arg in enumerate(argspec.args[flags:]):
if isinstance(argspec.defaults[idx], list):
parser.add_argument('-%s' % arg[0], '--%s' % arg,
action='append')
elif argspec.defaults[idx] is False:
parser.add_argument('-%s' % arg[0], '--%s' % arg,
action='store_true')
else:
parser.add_argument('-%s' % arg[0], '--%s' % arg)
def filter_subcommand_args(func, options):
'''Filter out all options which are not part of the function *func*
prototype and returns a set that can be used as kwargs for calling func.'''
kwargs = {}
argspec = inspect.getargspec(func)
for arg in argspec.args:
if arg in options:
kwargs.update({ arg: getattr(options, arg)})
return kwargs
def integrate(srcdir, pchdir, verbose=True):
'''Replaces files in srcdir with links to files in pchdir
for all files that match in the directory hierarchy.'''
for name in os.listdir(pchdir):
srcname = os.path.join(srcdir, name)
pchname = os.path.join(pchdir, name)
if (os.path.isdir(pchname)
and not re.match(Repository.dirPats, os.path.basename(name))):
integrate(srcname, pchname, verbose)
else:
if not name.endswith('~'):
if not os.path.islink(srcname):
if verbose:
# Use sys.stdout and not log as the integrate command
# will mostly be emitted from a Makefile and thus
# trigger a "recursive" call to dws. We thus do not
# want nor need to open a new log file.
sys.stdout.write(srcname + '... patched\n')
# Change directory such that relative paths are computed
# correctly.
prev = os.getcwd()
dirname = os.path.dirname(srcname)
basename = os.path.basename(srcname)
if not os.path.isdir(dirname):
os.makedirs(dirname)
os.chdir(dirname)
if os.path.exists(basename):
shutil.move(basename, basename + '~')
os.symlink(os.path.relpath(pchname), basename)
os.chdir(prev)
def wait_until_ssh_up(hostname,
login=None, keyfile=None, port=None, timeout=120):
'''wait until an ssh connection can be established to *hostname*
or the attempt timed out after *timeout* seconds.'''
is_up = False
waited = 0
cmdline = ['ssh',
'-v',
'-o', 'ConnectTimeout 30',
'-o', 'BatchMode yes',
'-o', 'StrictHostKeyChecking no' ]
if port:
cmdline += [ '-p', str(port) ]
if keyfile:
cmdline += [ '-i', keyfile ]
ssh_connect = hostname
if login:
ssh_connect = login + '@' + hostname
cmdline += [ ssh_connect, 'echo' ]
while (not is_up) and (waited <= timeout):
try:
subprocess.check_call(cmdline)
is_up = True
except subprocess.CalledProcessError:
waited = waited + 30
sys.stdout.write("waiting 30 more seconds (" \
+ str(waited) + " so far)...\n")
if waited > timeout:
raise Error("ssh connection attempt to " + hostname + " timed out.")
def prompt(message):
'''If the script is run through a ssh command, the message would not
appear if passed directly in raw_input.'''
log_interactive(message)
return raw_input("")
def log_init():
global LOGGER
if not LOGGER:
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '[%(asctime)s] [%(levelname)s] %(message)s',
'datefmt': '%d/%b/%Y:%H:%M:%S %z'
},
},
'handlers': {
'logfile':{
'level': 'INFO',
'class':'logging.handlers.WatchedFileHandler',
'filename': CONTEXT.logname(),
'formatter': 'simple'
},
'logbuild':{
'level': 'INFO',
'class':'logging.handlers.WatchedFileHandler',
'filename': CONTEXT.logbuildname(),
'formatter': 'simple'
},
},
'loggers': {
__name__: {
'handlers': [ 'logfile' ],
'level': 'INFO',
'propagate': True,
},
'build': {
'handlers': [ 'logbuild' ],
'level': 'INFO',
'propagate': True,
}
},
})
LOGGER = logging.getLogger(__name__)
def log_footer(prefix, elapsed=datetime.timedelta(), errcode=0):
'''Write a footer into the log file.'''
if not NO_LOG:
if not LOGGER:
log_init()
if errcode > 0:
LOGGER.info('%s: error (%d) after %s'
% (prefix, errcode, elapsed))
else:
LOGGER.info('%s: completed in %s' % (prefix, elapsed))
def log_header(message, *args, **kwargs):
'''Write a header into the log file'''
sys.stdout.write('######## ' + message + '...\n')
if not NO_LOG:
if not LOGGER:
log_init()
LOGGER.info('######## ' + message + '...')
def log_error(message, *args, **kwargs):
'''Write an error message onto stdout and into the log file'''
sys.stderr.write('error: ' + message)
if not NO_LOG:
if not LOGGER:
log_init()
LOGGER.error(message, *args, **kwargs)
def log_interactive(message):
'''Write a message that should absolutely end up on the screen
even when no newline is present at the end of the message.'''
sys.stdout.write(message)
sys.stdout.flush()
if not NO_LOG:
global LOGGER_BUFFER
if not LOGGER_BUFFER:
LOGGER_BUFFER = cStringIO.StringIO()
LOGGER_BUFFER.write(message)
def log_info(message, *args, **kwargs):
'''Write a info message onto stdout and into the log file'''
sys.stdout.write(message + '\n')
if not NO_LOG:
global LOGGER_BUFFER
if LOGGER_BUFFERING_COUNT > 0:
if not LOGGER_BUFFER:
LOGGER_BUFFER = cStringIO.StringIO()
LOGGER_BUFFER.write((message + '\n' % args) % kwargs)
else:
if not LOGGER:
log_init()
if LOGGER_BUFFER:
LOGGER_BUFFER.write((message + '\n' % args) % kwargs)
for line in LOGGER_BUFFER.getvalue().splitlines():
LOGGER.info(line)
LOGGER_BUFFER = None
else:
LOGGER.info(message, *args, **kwargs)
def pub_build(args, graph=False, noclean=False):
'''remoteIndex [ siteTop [ buildTop ] ]
This command executes a complete build cycle:
- (optional) delete all files in *siteTop*,
*buildTop* and *installTop*.
- fetch the build dependency file *remoteIndex*
- setup third-party prerequisites through
the local package manager.
- update a local source tree from remote
repositories
- (optional) apply local patches
- configure required environment variables
- make libraries, executables and tests.
- (optional) send a report email.
As such, this command is most useful as part
of a cron job on build servers. Thus it is designed
to run to completion with no human interaction.
To be really useful in an automatic build system,
authentication to the remote server (if required)
should also be setup to run with no human
interaction.
ex: dws build http://hostname/everything.git
--graph Generate a .dot graph of
the dependencies
--noclean Do not remove any directory before
executing a build command.
'''
global USE_DEFAULT_ANSWER
USE_DEFAULT_ANSWER = True
CONTEXT.from_remote_index(args[0])
if len(args) > 1:
site_top = os.path.abspath(args[1])
else:
site_top = os.path.join(os.getcwd(), CONTEXT.base('remoteIndex'))
CONTEXT.environ['siteTop'].value = site_top
if not noclean:
# We don't want to remove the log we just created
# so we buffer until it is safe to flush.
global LOGGER_BUFFERING_COUNT
LOGGER_BUFFERING_COUNT = LOGGER_BUFFERING_COUNT + 1
if len(args) > 2:
CONTEXT.environ['buildTop'].value = args[2]
else:
# Can't call *configure* before *locate*, otherwise config_filename
# is set to be inside the buildTop on the first save.
CONTEXT.environ['buildTop'].value = os.path.join(site_top, 'build')
build_top = str(CONTEXT.environ['buildTop'])
prevcwd = os.getcwd()
if not os.path.exists(build_top):
os.makedirs(build_top)
os.chdir(build_top)
CONTEXT.locate()
if not str(CONTEXT.environ['installTop']):
CONTEXT.environ['installTop'].configure(CONTEXT)
install_top = str(CONTEXT.environ['installTop'])
if not noclean:
# First we backup everything in siteTop, buildTop and installTop
# as we are about to remove those directories - just in case.
tardirs = []
for path in [site_top, build_top, install_top]:
if os.path.isdir(path):
tardirs += [ path ]
if len(tardirs) > 0:
prefix = os.path.commonprefix(tardirs)
tarname = os.path.basename(site_top) + '-' + stamp() + '.tar.bz2'
if os.path.samefile(prefix, site_top):
# optimize common case: *buildTop* and *installTop* are within
# *siteTop*. We cd into the parent directory to create the tar
# in order to avoid 'Removing leading /' messages. Those do
# not display the same on Darwin and Ubuntu, creating false
# positive regressions between both systems.
shell_command(['cd', os.path.dirname(site_top),
'&&', 'tar', 'jcf', tarname,
os.path.basename(site_top) ])
else:
shell_command(['cd', os.path.dirname(site_top),
'&&', 'tar', 'jcf', tarname ] + tardirs)
os.chdir(prevcwd)
for dirpath in [ build_top, install_top]:
# we only remove build_top and installTop. Can neither be too
# prudent.
if os.path.isdir(dirpath):
# Test directory exists, in case it is a subdirectory
# of another one we already removed.
sys.stdout.write('removing ' + dirpath + '...\n')
shutil.rmtree(dirpath)
if not os.path.exists(build_top):
os.makedirs(build_top)
os.chdir(build_top)
LOGGER_BUFFERING_COUNT = LOGGER_BUFFERING_COUNT - 1
rgen = DerivedSetsGenerator()
# If we do not force the update of the index file, the dependency
# graph might not reflect the latest changes in the repository server.
INDEX.validate(True)
INDEX.parse(rgen)
# note that *EXCLUDE_PATS* is global.
dgen = BuildGenerator(rgen.roots, [], EXCLUDE_PATS)
CONTEXT.targets = [ 'install' ]
# Set the buildstamp that will be use by all "install" commands.
if not 'buildstamp' in CONTEXT.environ:
CONTEXT.environ['buildstamp'] = '-'.join([socket.gethostname(),
stamp(datetime.datetime.now())])
CONTEXT.save()
if validate_controls(dgen, INDEX, graph=graph):
# Once we have built the repository, let's report the results
# back to the remote server. We stamp the logfile such that
# it gets a unique name before uploading it.
logstamp = stampfile(CONTEXT.logname())
if not os.path.exists(os.path.dirname(CONTEXT.log_path(logstamp))):
os.makedirs(os.path.dirname(CONTEXT.log_path(logstamp)))
if LOGGER:
for handler in LOGGER.handlers:
handler.flush()
shell_command(['install', '-m', '644', CONTEXT.logname(),
CONTEXT.log_path(logstamp)])
logging.getLogger('build').info(
'build %s'% str(UpdateStep.updated_sources))
look = re.match(r'.*(-.+-\d\d\d\d_\d\d_\d\d-\d\d\.log)', logstamp)
global LOG_PAT
LOG_PAT = look.group(1)
if len(ERRORS) > 0:
raise Error("Found errors while making " + ' '.join(ERRORS))
def pub_collect(args, output=None):
'''[ project ... ]
Consolidate local dependencies information
into a global dependency database. Copy all
distribution packages built into a platform
distribution directory.
(example: dws --exclude test collect)
'''
# Collect cannot log or it will prompt for index file.
roots = []
if len(args) > 0:
for dir_name in args:
roots += [ os.path.join(CONTEXT.value('srcTop'), dir_name) ]
else:
roots = [ CONTEXT.value('srcTop') ]
# Name of the output index file generated by collect commands.
collected_index = output
if not collected_index:
collected_index = CONTEXT.db_pathname()
else:
collected_index = os.path.abspath(collected_index)
# Create the distribution directory, i.e. where packages are stored.
package_dir = CONTEXT.local_dir('./resources/' + CONTEXT.host())
if not os.path.exists(package_dir):
os.makedirs(package_dir)
src_package_dir = CONTEXT.local_dir('./resources/srcs')
if not os.path.exists(src_package_dir):
os.makedirs(src_package_dir)
# Create the project index file
# and copy the packages in the distribution directory.
extensions = { 'Darwin': (r'\.dsx', r'\.dmg'),
'Fedora': (r'\.spec', r'\.rpm'),
'Debian': (r'\.dsc', r'\.deb'),
'Ubuntu': (r'\.dsc', r'\.deb')
}
# collect index files and packages
indices = []
for root in roots:
pre_exclude_indices = find_files(root, CONTEXT.indexName)
for index in pre_exclude_indices:
# We exclude any project index files that has been determined
# to be irrelevent to the collection being built.
found = False
if index == collected_index:
found = True
else:
for exclude_pat in EXCLUDE_PATS:
if re.match('.*' + exclude_pat + '.*', index):
found = True
break
if not found:
indices += [ index ]
pkg_indices = []
cpy_src_packages = None
copy_bin_packages = None
if str(CONTEXT.environ['buildTop']):
# If there are no build directory, then don't bother to look
# for built packages and avoid prompty for an unncessary value
# for buildTop.
for index in indices:
buildr = os.path.dirname(index.replace(CONTEXT.value('buildTop'),
CONTEXT.value('srcTop')))
src_packages = find_files(buildr, '.tar.bz2')
if len(src_packages) > 0:
cmdline, prefix = find_rsync(CONTEXT, CONTEXT.remote_host())
cpy_src_packages = cmdline + [ ' '.join(src_packages),
src_package_dir]
if CONTEXT.host() in extensions:
ext = extensions[CONTEXT.host()]
pkg_indices += find_files(buildr, ext[0])
bin_packages = find_files(buildr, ext[1])
if len(bin_packages) > 0:
cmdline, prefix = find_rsync(CONTEXT, CONTEXT.remote_host())
copy_bin_packages = cmdline + [ ' '.join(bin_packages),
package_dir ]
# Create the index and checks it is valid according to the schema.
create_index_pathname(collected_index, indices + pkg_indices)
shell_command(['xmllint', '--noout', '--schema ',
CONTEXT.derived_helper('index.xsd'), collected_index])
# We should only copy the index file after we created it.
if copy_bin_packages:
shell_command(copy_bin_packages)
if cpy_src_packages:
shell_command(cpy_src_packages)
def pub_configure(args):
'''Locate direct dependencies of a project on
the local machine and create the appropriate
symbolic links such that the project can be made
later on.
'''
CONTEXT.environ['indexFile'].value = CONTEXT.src_dir(
os.path.join(CONTEXT.cwd_project(), CONTEXT.indexName))
project_name = CONTEXT.cwd_project()
dgen = MakeGenerator([ project_name ], [])
dbindex = IndexProjects(CONTEXT, CONTEXT.value('indexFile'))
dbindex.parse(dgen)
prerequisites = set([])
for vertex in dgen.vertices:
if vertex.endswith('Setup'):
setup = dgen.vertices[vertex]
if not setup.run(CONTEXT):
prerequisites |= set([ str(setup.project) ])
elif vertex.startswith('update_'):
update = dgen.vertices[vertex]
if len(update.fetches) > 0:
for miss in update.fetches:
prerequisites |= set([ miss ])
if len(prerequisites) > 0:
raise MissingError(project_name, prerequisites)
def pub_context(args):
'''[ file ]
Prints the absolute pathname to a *file*.
If the file cannot be found from the current
directory up to the workspace root, i.e where
the .mk fragment is located (usually *buildTop*,
it assumes the file is in *shareDir* alongside
other make helpers.
'''
pathname = CONTEXT.config_filename
if len(args) >= 1:
try:
_, pathname = search_back_to_root(args[0],
os.path.dirname(CONTEXT.config_filename))
except IOError:
pathname = CONTEXT.derived_helper(args[0])
sys.stdout.write(pathname)
def pub_deps(args):
''' Prints the dependency graph for a project.
'''
top = os.path.realpath(os.getcwd())
if ((str(CONTEXT.environ['buildTop'])
and top.startswith(os.path.realpath(CONTEXT.value('buildTop')))
and top != os.path.realpath(CONTEXT.value('buildTop')))
or (str(CONTEXT.environ['srcTop'])
and top.startswith(os.path.realpath(CONTEXT.value('srcTop')))
and top != os.path.realpath(CONTEXT.value('srcTop')))):
roots = [ CONTEXT.cwd_project() ]
else:
# make from the top directory makes every project in the index file.
rgen = DerivedSetsGenerator()
INDEX.parse(rgen)
roots = rgen.roots
sys.stdout.write(' '.join(ordered_prerequisites(roots, INDEX)) + '\n')
def pub_export(args):
'''rootpath
Exports the project index file in a format
compatible with Jenkins. [experimental]
'''
rootpath = args[0]
top = os.path.realpath(os.getcwd())
if (top == os.path.realpath(CONTEXT.value('buildTop'))
or top == os.path.realpath(CONTEXT.value('srcTop'))):
rgen = DerivedSetsGenerator()
INDEX.parse(rgen)
roots = rgen.roots
else:
roots = [ CONTEXT.cwd_project() ]
handler = Unserializer(roots)
if os.path.isfile(CONTEXT.db_pathname()):
INDEX.parse(handler)
for name in roots:
jobdir = os.path.join(rootpath, name)
if not os.path.exists(jobdir):
os.makedirs(os.path.join(jobdir, 'builds'))
os.makedirs(os.path.join(jobdir, 'workspace'))
with open(os.path.join(jobdir, 'nextBuildNumber'), 'w') as \
next_build_number:
next_build_number.write('0\n')
project = handler.projects[name]
rep = project.repository.update.rep
config = open(os.path.join(jobdir, 'config.xml'), 'w')
config.write('''<?xml version='1.0' encoding='UTF-8'?>
<project>
<actions/>
<description>''' + project.descr + '''</description>
<keepDependencies>false</keepDependencies>
<properties/>
<scm class="hudson.plugins.git.GitSCM">
<configVersion>2</configVersion>
<userRemoteConfigs>
<hudson.plugins.git.UserRemoteConfig>
<name>origin</name>
<refspec>+refs/heads/*:refs/remotes/origin/*</refspec>
<url>''' + rep.url + '''</url>
</hudson.plugins.git.UserRemoteConfig>
</userRemoteConfigs>
<branches>
<hudson.plugins.git.BranchSpec>
<name>**</name>
</hudson.plugins.git.BranchSpec>
</branches>
<recursiveSubmodules>false</recursiveSubmodules>
<doGenerateSubmoduleConfigurations>false</doGenerateSubmoduleConfigurations>
<authorOrCommitter>false</authorOrCommitter>
<clean>false</clean>
<wipeOutWorkspace>false</wipeOutWorkspace>
<pruneBranches>false</pruneBranches>
<remotePoll>false</remotePoll>
<buildChooser class="hudson.plugins.git.util.DefaultBuildChooser"/>
<gitTool>Default</gitTool>
<submoduleCfg class="list"/>
<relativeTargetDir>''' + os.path.join('reps', name)+ '''</relativeTargetDir>
<excludedRegions></excludedRegions>
<excludedUsers></excludedUsers>
<gitConfigName></gitConfigName>
<gitConfigEmail></gitConfigEmail>
<skipTag>false</skipTag>
<scmName></scmName>
</scm>
<canRoam>true</canRoam>
<disabled>false</disabled>
<blockBuildWhenDownstreamBuilding>true</blockBuildWhenDownstreamBuilding>
<blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
<triggers class="vector">
<hudson.triggers.SCMTrigger>
<spec></spec>
</hudson.triggers.SCMTrigger>
</triggers>
<concurrentBuild>false</concurrentBuild>
<builders>
<hudson.tasks.Shell>
<command>
cd ''' + os.path.join('build', name) + '''
dws configure
dws make
</command>
</hudson.tasks.Shell>
</builders>
<publishers />
<buildWrappers/>
</project>
''')
config.close()
def pub_find(args):
'''bin|lib filename ...
Search through a set of directories derived
from PATH for *filename*.
'''
dir_name = args[0]
command = 'find_' + dir_name
searches = []
for arg in args[1:]:
searches += [ (arg, None) ]
installed, _, complete = \
getattr(sys.modules[__name__], command)(
searches, CONTEXT.search_path(dir_name), CONTEXT.value('buildTop'))
if len(installed) != len(searches):
sys.exit(1)
def pub_init(args):
''' Prompt for variables which have not been
initialized in the workspace make fragment.
(This will fetch the project index).
'''
config_var(CONTEXT, CONTEXT.environ)
INDEX.validate()
def pub_install(args):
''' [ binPackage | project ... ]
Install a package *binPackage* on the local system
or a binary package associated to *project*
through either a *package* or *patch* node in the
index database or through the local package
manager.
'''
INDEX.validate()
install(args, INDEX)
def pub_integrate(args):
'''[ srcPackage ... ]
Integrate a patch into a source package
'''
while len(args) > 0:
srcdir = unpack(args.pop(0))
pchdir = CONTEXT.src_dir(os.path.join(CONTEXT.cwd_project(),
srcdir + '-patch'))
integrate(srcdir, pchdir)
class FilteredList(PdbHandler):
'''Print a list binary package files specified in an index file.'''
# Note: This code is used by dservices.
def __init__(self):
PdbHandler.__init__(self)
self.first_time = True
self.fetches = []
def project(self, proj_obj):
host = CONTEXT.host()
if host in proj_obj.packages and proj_obj.packages[host]:
if len(proj_obj.packages[host].update.fetches) > 0:
for file_to_fetch in proj_obj.packages[host].update.fetches:
self.fetches += [ file_to_fetch ]
class ListPdbHandler(PdbHandler):
'''List project available in the workspace.'''
def __init__(self):
PdbHandler.__init__(self)
self.first_time = True
def project(self, proj):
if self.first_time:
sys.stdout.write('HEAD name\n')
self.first_time = False
if os.path.exists(CONTEXT.src_dir(proj.name)):
prev = os.getcwd()
os.chdir(CONTEXT.src_dir(proj.name))
cmdline = ' '.join(['git', 'rev-parse', 'HEAD'])
lines = subprocess.check_output(
cmdline, shell=True, stderr=subprocess.STDOUT).splitlines()
sys.stdout.write(' '.join(lines).strip() + ' ')
os.chdir(prev)
sys.stdout.write(proj.name + '\n')
def pub_list(args):
''' List available projects
'''
INDEX.parse(ListPdbHandler())
def pub_make(args, graph=False):
''' Make projects. "make recurse" will build
all dependencies required before a project
can be itself built.
'''
# \todo That should not be required:
# context.environ['siteTop'].default = os.path.dirname(os.path.dirname(
# os.path.realpath(os.getcwd())))
CONTEXT.targets = []
recurse = False
top = os.path.realpath(os.getcwd())
if (top == os.path.realpath(CONTEXT.value('buildTop'))
or top == os.path.realpath(CONTEXT.value('srcTop'))):
# make from the top directory makes every project in the index file.
rgen = DerivedSetsGenerator()
INDEX.parse(rgen)
roots = rgen.roots
recurse = True
else:
roots = [ CONTEXT.cwd_project() ]
for opt in args:
if opt == 'recurse':
CONTEXT.targets += [ 'install' ]
recurse = True
elif re.match(r'\S+=.*', opt):
CONTEXT.overrides += [ opt ]
else:
CONTEXT.targets += [ opt ]
if recurse:
# note that *EXCLUDE_PATS* is global.
validate_controls(MakeGenerator(roots, [], EXCLUDE_PATS), INDEX,
graph=graph)
else:
handler = Unserializer(roots)
if os.path.isfile(CONTEXT.db_pathname()):
INDEX.parse(handler)
for name in roots:
make = None
src_dir = CONTEXT.src_dir(name)
if os.path.exists(src_dir):
if name in handler.projects:
rep = handler.as_project(name).repository
if not rep:
rep = handler.as_project(name).patch
make = rep.make
else:
# No luck we do not have any more information than
# the directory name. Let's do with that.
make = MakeStep(name)
if make:
make.run(CONTEXT)
if len(ERRORS) > 0:
raise Error("Found errors while making " + ' '.join(ERRORS))
def pub_patch(args):
''' Generate patches vs. the last pull from a remote
repository, optionally send it to a list
of receipients.
'''
reps = args
recurse = False
if 'recurse' in args:
recurse = True
reps.remove('recurse')
reps = cwd_projects(reps, recurse)
prev = os.getcwd()
for rep in reps:
patches = []
log_info('######## generating patch for project ' + rep)
os.chdir(CONTEXT.src_dir(rep))
patch_dir = CONTEXT.patch_dir(rep)
if not os.path.exists(patch_dir):
os.makedirs(patch_dir)
cmdline = ['git', 'format-patch', '-o', patch_dir, 'origin']
for line in subprocess.check_output(' '.join(cmdline), shell=True,
stderr=subprocess.STDOUT).splitlines():
patches += [ line.strip() ]
sys.stdout.write(line)
for patch in patches:
with open(patch) as msgfile:
msg = msgfile.readlines()
msg = ''.join(msg[1:])
sendmail(msg, MAILTO)
os.chdir(prev)
def pub_push(args):
''' Push commits to projects checked out
in the workspace.
'''
reps = args
recurse = False
if 'recurse' in args:
recurse = True
reps.remove('recurse')
reps = cwd_projects(reps, recurse)
for rep in reps:
sys.stdout.write('######## pushing project ' + str(rep) + '\n')
src_dir = CONTEXT.src_dir(rep)
svc = Repository.associate(src_dir)
svc.push(src_dir)
def pub_status(args, recurse=False):
''' Show status of projects checked out
in the workspace with regards to commits.
'''
reps = cwd_projects(args, recurse)
cmdline = 'git status'
prev = os.getcwd()
for rep in reps:
os.chdir(CONTEXT.src_dir(rep))
try:
output = subprocess.check_output(cmdline, shell=True,
stderr=subprocess.STDOUT)
untracked = False
for line in output.splitlines():
look = re.match(r'#\s+([a-z]+):\s+(\S+)', line)
if look:
sys.stdout.write(' '.join([
look.group(1).capitalize()[0],
rep, look.group(2)]) + '\n')
elif re.match('# Untracked files:', line):
untracked = True
elif untracked:
look = re.match(r'# (\S+)', line)
if look:
sys.stdout.write(' '.join(['?', rep,
look.group(1)]) + '\n')
except subprocess.CalledProcessError:
# It is ok. git will return error code 1 when no changes
# are to be committed.
pass
os.chdir(prev)
def pub_update(args):
'''[ project ... ]
Update projects that have a *repository* or *patch*
node in the index database and are also present in
the workspace by pulling changes from the remote
server. "update recurse" will recursively update all
dependencies for *project*.
If a project only contains a *package* node in
the index database, the local system will be
modified only if the version provided is greater
than the version currently installed.
'''
reps = args
recurse = False
if 'recurse' in args:
recurse = True
reps.remove('recurse')
INDEX.validate(True)
reps = cwd_projects(reps)
if recurse:
# note that *EXCLUDE_PATS* is global.
dgen = MakeGenerator(reps, [], EXCLUDE_PATS)
validate_controls(dgen, INDEX)
else:
global ERRORS
handler = Unserializer(reps)
INDEX.parse(handler)
for name in reps:
# The project is present in *srcTop*, so we will update the source
# code from a repository.
update = None
if not name in handler.projects:
# We found a directory that contains source control information
# but which is not in the interdependencies index file.
src_dir = CONTEXT.src_dir(name)
if os.path.exists(src_dir):
update = UpdateStep(
name, Repository.associate(src_dir), None)
else:
update = handler.as_project(name).repository.update
if not update:
update = handler.as_project(name).patch.update
if update:
# Not every project is made a first-class citizen. If there are
# no rep structure for a project, it must depend on a project
# that does in order to have a source repled repository.
# This is a simple way to specify inter-related projects
# with complex dependency set and barely any code.
# \todo We do not propagate force= here to avoid messing up
# the local checkouts on pubUpdate()
try:
log_header(update.name)
update.run(CONTEXT)
log_footer(update.name)
except Error, err:
log_info('warning: cannot update repository from ' \
+ str(update.rep.url))
log_footer(update.name, errcode=err.code)
else:
ERRORS += [ name ]
if len(ERRORS) > 0:
raise Error('%s is/are not project(s) under source control.'
% ' '.join(ERRORS))
nb_updated_projects = len(UpdateStep.updated_sources)
if nb_updated_projects > 0:
log_info('%d updated project(s).' % nb_updated_projects)
else:
log_info('all project(s) are up-to-date.')
def pub_upstream(args):
'''[ srcPackage ... ]
Generate a patch to submit to upstream
maintainer out of a source package and
a -patch subdirectory in a project src_dir.
'''
while len(args) > 0:
pkgfilename = args.pop(0)
srcdir = unpack(pkgfilename)
orgdir = srcdir + '.orig'
if os.path.exists(orgdir):
shutil.rmtree(orgdir, ignore_errors=True)
shutil.move(srcdir, orgdir)
srcdir = unpack(pkgfilename)
pchdir = CONTEXT.src_dir(os.path.join(CONTEXT.cwd_project(),
srcdir + '-patch'))
integrate(srcdir, pchdir)
# In the common case, no variables will be added to the workspace
# make fragment when the upstream command is run. Hence sys.stdout
# will only display the patched information. This is important to be
# able to execute:
# dws upstream > patch
cmdline = [ 'diff', '-ruNa', orgdir, srcdir ]
subprocess.call(' '.join(cmdline), shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def select_checkout(rep_candidates, package_candidates):
'''Interactive prompt for a selection of projects to checkout.
*rep_candidates* contains a list of rows describing projects available
for selection. This function will return a list of projects to checkout
from a source repository and a list of projects to install through
a package manager.'''
reps = []
if len(rep_candidates) > 0:
reps = select_multiple(
'''The following dependencies need to be present on your system.
You have now the choice to install them from a source repository. You will
later have the choice to install them from either a patch, a binary package
or not at all.''',
rep_candidates)
log_info(select_string)
# Filters out the dependencies which the user has decided to install
# from a repository.
packages = []
for row in package_candidates:
if not row[0] in reps:
packages += [ row ]
packages = select_install(packages)
return reps, packages
def select_install(package_candidates):
'''Interactive prompt for a selection of projects to install
as binary packages. *package_candidates* contains a list of rows
describing projects available for selection. This function will
return a list of projects to install through a package manager. '''
packages = []
if len(package_candidates) > 0:
packages = select_multiple(
'''The following dependencies need to be present on your system.
You have now the choice to install them from a binary package. You can skip
this step if you know those dependencies will be resolved correctly later on.
''', package_candidates)
log_info(select_string)
return packages
def select_one(description, choices, sort=True):
'''Prompt an interactive list of choices and returns the element selected
by the user. *description* is a text that explains the reason for the
prompt. *choices* is a list of elements to choose from. Each element is
in itself a list. Only the first value of each element is of significance
and returned by this function. The other values are only use as textual
context to help the user make an informed choice.'''
choice = None
if sort:
# We should not sort 'Enter ...' choices for pathnames else we will
# end-up selecting unexpected pathnames by default.
choices.sort()
while True:
show_multiple(description, choices)
if USE_DEFAULT_ANSWER:
selection = "1"
else:
selection = prompt("Enter a single number [1]: ")
if selection == "":
selection = "1"
try:
choice = int(selection)
if choice >= 1 and choice <= len(choices):
choice = choices[choice - 1][0]
break
except TypeError:
choice = None
except ValueError:
choice = None
return choice
def select_multiple(description, selects):
'''Prompt an interactive list of choices and returns elements selected
by the user. *description* is a text that explains the reason for the
prompt. *choices* is a list of elements to choose from. Each element is
in itself a list. Only the first value of each element is of significance
and returned by this function. The other values are only use as textual
context to help the user make an informed choice.'''
result = []
done = False
selects.sort()
choices = [ [ 'all' ] ] + selects
while len(choices) > 1 and not done:
show_multiple(description, choices)
log_info("%d) done", len(choices) + 1)
if USE_DEFAULT_ANSWER:
selection = "1"
else:
selection = prompt(
"Enter a list of numbers separated by spaces [1]: ")
if len(selection) == 0:
selection = "1"
# parse the answer for valid inputs
selection = selection.split(' ')
for sel in selection:
try:
choice = int(sel)
except TypeError:
choice = 0
except ValueError:
choice = 0
if choice > 1 and choice <= len(choices):
result += [ choices[choice - 1][0] ]
elif choice == 1:
result = []
for choice_value in choices[1:]:
result += [ choice_value[0] ]
done = True
elif choice == len(choices) + 1:
done = True
# remove selected items from list of choices
remains = []
for row in choices:
if not row[0] in result:
remains += [ row ]
choices = remains
return result
def select_yes_no(description):
'''Prompt for a yes/no answer.'''
if USE_DEFAULT_ANSWER:
return True
yes_no = prompt(description + " [Y/n]? ")
if yes_no == '' or yes_no == 'Y' or yes_no == 'y':
return True
return False
def show_multiple(description, choices):
'''Returns a list of choices on the user interface as a string.
We do this instead of printing directly because this function
is called to configure CONTEXT variables, including *logDir*.'''
# Compute display layout
widths = []
displayed = []
for item, row in enumerate(choices, start=1):
line = []
for col_index, column in enumerate([ str(item) + ')' ] + row):
col = column
if isinstance(col, dict):
if 'description' in column:
col = column['description'] # { description: ... }
else:
col = ""
line += [ col ]
if len(widths) <= col_index:
widths += [ 2 ]
widths[col_index] = max(widths[col_index], len(col) + 2)
displayed += [ line ]
# Ask user to review selection
log_info('%s' % description)
for project in displayed:
for col_index, col in enumerate(project):
log_info(col.ljust(widths[col_index]))
def unpack(pkgfilename):
'''unpack a tar[.gz|.bz2] source distribution package.'''
if pkgfilename.endswith('.bz2'):
pkgflag = 'j'
elif pkgfilename.endswith('.gz'):
pkgflag = 'z'
shell_command(['tar', pkgflag + 'xf', pkgfilename])
return os.path.basename(os.path.splitext(
os.path.splitext(pkgfilename)[0])[0])
def main(args):
'''Main Entry Point'''
# XXX use of this code?
# os.setuid(int(os.getenv('SUDO_UID')))
# os.setgid(int(os.getenv('SUDO_GID')))
exit_code = 0
try:
import __main__
import argparse
global CONTEXT
CONTEXT = Context()
keys = CONTEXT.environ.keys()
keys.sort()
epilog = 'Variables defined in the workspace make fragment (' \
+ CONTEXT.config_name + '):\n'
for varname in keys:
var = CONTEXT.environ[varname]
if var.descr:
epilog += (' ' + var.name).ljust(23, ' ') + var.descr + '\n'
parser = argparse.ArgumentParser(
usage='%(prog)s [options] command\n\nVersion\n %(prog)s version '
+ str(__version__),
formatter_class=argparse.RawTextHelpFormatter,
epilog=epilog)
parser.add_argument('--version', action='version',
version='%(prog)s ' + str(__version__))
parser.add_argument('--config', dest='config', action='store',
help='Set the path to the config file instead of deriving it'\
' from the current directory.')
parser.add_argument('--default', dest='default', action='store_true',
help='Use default answer for every interactive prompt.')
parser.add_argument('--exclude', dest='exclude_pats', action='append',
help='The specified command will not be applied to projects'\
' matching the name pattern.')
parser.add_argument('--nolog', dest='nolog', action='store_true',
help='Do not generate output in the log file')
parser.add_argument('--patch', dest='patchTop', action='store',
help='Set *patchTop* the root where local patches can be found.')
parser.add_argument('--prefix', dest='installTop', action='store',
help='Set the root for installed bin, include, lib, etc. ')
parser.add_argument('--mailto', dest='mailto', action='append',
help='Add an email address to send log reports to')
build_subcommands_parser(parser, __main__)
if len(args) <= 1:
parser.print_help()
return 1
if args[1] == 'help-book':
# Print help in docbook format.
# We need the parser here so we can't create a pub_ function
# for this command.
help_str = cStringIO.StringIO()
parser.print_help(help_str)
help_book(help_str)
return 0
options = parser.parse_args(args[1:])
# Find the build information
global USE_DEFAULT_ANSWER
USE_DEFAULT_ANSWER = options.default
global NO_LOG
NO_LOG = options.nolog
if options.exclude_pats:
global EXCLUDE_PATS
EXCLUDE_PATS = options.exclude_pats
if not options.func in [ pub_build ]:
# The *build* command is special in that it does not rely
# on locating a pre-existing context file.
try:
CONTEXT.locate(options.config)
except IOError:
pass
except:
raise
if options.installTop:
CONTEXT.environ['installTop'] = os.path.abspath(options.installTop)
if options.patchTop:
CONTEXT.environ['patchTop'] = os.path.abspath(options.patchTop)
global INDEX
INDEX = IndexProjects(CONTEXT)
# Filter out options with are not part of the function prototype.
func_args = filter_subcommand_args(options.func, options)
options.func(**func_args)
except Error, err:
log_error(str(err))
exit_code = err.code
if options.mailto and len(options.mailto) > 0 and LOG_PAT:
logs = find_files(CONTEXT.log_path(''), LOG_PAT)
log_info('forwarding logs ' + ' '.join(logs) + '...')
sendmail(createmail('build report', logs), options.mailto)
return exit_code
if __name__ == '__main__':
sys.exit(main(sys.argv))
add gems installer
#!/usr/bin/env python
#
# Copyright (c) 2009-2013, Fortylines LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of fortylines nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Fortylines LLC ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Fortylines LLC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Implements workspace management.
The workspace manager script is used to setup a local machine
with third-party prerequisites and source code under revision
control such that it is possible to execute a development cycle
(edit/build/run) on a local machine.
The script will email build reports when the --mailto command line option
is specified. There are no sensible default values for the following
variables thus those should be set in the shell environment before
invoking the script.
dwsEmail=
smtpHost=
smtpPort=
dwsSmtpLogin=
dwsSmtpPasswd=
"""
# Primary Author(s): Sebastien Mirolo <smirolo@fortylines.com>
#
# Requires Python 2.7 or above.
__version__ = None
import datetime, hashlib, inspect, logging, logging.config, re, optparse
import os, shutil, socket, subprocess, sys, tempfile, urllib2, urlparse
import xml.dom.minidom, xml.sax
import cStringIO
# \todo executable used to return a password compatible with sudo. This is used
# temporarly while sudo implementation is broken when invoked with no tty.
ASK_PASS = ''
# When True, all commands invoked through shell_command() are printed
# but not executed.
DO_NOT_EXECUTE = False
# Global variables that contain all encountered errors.
ERRORS = []
# When processing a project dependency index file, all project names matching
# one of the *EXCLUDE_PATS* will be considered non-existant.
EXCLUDE_PATS = []
# Log commands output
LOGGER = None
LOGGER_BUFFER = None
LOGGER_BUFFERING_COUNT = 0
# Pattern used to search for logs to report through email.
LOG_PAT = None
# When True, the log object is not used and output is only
# done on sys.stdout.
NO_LOG = False
# Address to email log reports to.
MAILTO = []
# When True, *find_lib* will prefer static libraries over dynamic ones if both
# exist for a specific libname. This should match .LIBPATTERNS in prefix.mk.
STATIC_LIB_FIRST = True
# When True, the script runs in batch mode and assumes the default answer
# for every question where it would have prompted the user for an answer.
USE_DEFAULT_ANSWER = False
# Directories where things get installed
INSTALL_DIRS = [ 'bin', 'include', 'lib', 'libexec', 'etc', 'share' ]
# distributions per native package managers
APT_DISTRIBS = [ 'Debian', 'Ubuntu' ]
YUM_DISTRIBS = [ 'Fedora' ]
PORT_DISTRIBS = [ 'Darwin' ]
CONTEXT = None
INDEX = None
class Error(RuntimeError):
'''This type of exception is used to identify "expected"
error condition and will lead to a useful message.
Other exceptions are not caught when *__main__* executes,
and an internal stack trace will be displayed. Exceptions
which are not *Error*s are concidered bugs in the workspace
management script.'''
def __init__(self, msg='unknow error', code=1, project_name=None):
RuntimeError.__init__(self)
self.code = code
self.msg = msg
self.project_name = project_name
def __str__(self):
if self.project_name:
return ':'.join([self.project_name, str(self.code), ' error']) \
+ ' ' + self.msg + '\n'
return 'error: ' + self.msg + ' (error ' + str(self.code) + ')\n'
class CircleError(Error):
'''Thrown when a circle has been detected while doing
a topological traversal of a graph.'''
def __init__(self, connected):
Error.__init__(
self, msg="detected a circle within %s" % ' '.join(connected))
class MissingError(Error):
'''This error is thrown whenever a project has missing prerequisites.'''
def __init__(self, project_name, prerequisites):
Error.__init__(self,'The following prerequisistes are missing: ' \
+ ' '.join(prerequisites),2,project_name)
class Context:
'''The workspace configuration file contains environment variables used
to update, build and package projects. The environment variables are roots
of the general dependency graph as most other routines depend on srcTop
and buildTop at the least.'''
config_name = 'dws.mk'
indexName = 'dws.xml'
def __init__(self):
# Two following variables are used by interactively change the make
# command-line.
self.tunnel_point = None
self.targets = []
self.overrides = []
site_top = Pathname('siteTop',
{ 'description':
'Root of the tree where the website is generated\n'\
' and thus where *remoteSiteTop* is cached\n'\
' on the local system',
'default':os.getcwd()})
remote_site_top = Pathname('remoteSiteTop',
{ 'description':
'Root of the remote tree that holds the published website\n'
' (ex: url:/var/cache).',
'default':''})
install_top = Pathname('installTop',
{ 'description':'Root of the tree for installed bin/,'\
' include/, lib/, ...',
'base':'siteTop','default':''})
# We use installTop (previously siteTop), such that a command like
# "dws build *remoteIndex* *siteTop*" run from a local build
# directory creates intermediate and installed files there while
# checking out the sources under siteTop.
# It might just be my preference...
build_top = Pathname('buildTop',
{ 'description':'Root of the tree where intermediate'\
' files are created.',
'base':'siteTop','default':'build'})
src_top = Pathname('srcTop',
{ 'description':
'Root of the tree where the source code under revision\n'
' control lives on the local machine.',
'base': 'siteTop',
'default':'reps'})
self.environ = { 'buildTop': build_top,
'srcTop' : src_top,
'patchTop': Pathname('patchTop',
{'description':'Root of the tree where patches are stored',
'base':'siteTop',
'default':'patch'}),
'binDir': Pathname('binDir',
{'description':'Root of the tree where executables are installed',
'base':'installTop'}),
'installTop': install_top,
'includeDir': Pathname('includeDir',
{'description':'Root of the tree where include files are installed',
'base':'installTop'}),
'libDir': Pathname('libDir',
{'description':'Root of the tree where libraries are installed',
'base':'installTop'}),
'libexecDir': Pathname('libexecDir',
{'description':'Root of the tree where executable helpers'\
' are installed',
'base':'installTop'}),
'etcDir': Pathname('etcDir',
{'description':
'Root of the tree where configuration files for the local\n'
' system are installed',
'base':'installTop'}),
'shareDir': Pathname('shareDir',
{'description':'Directory where the shared files are installed.',
'base':'installTop'}),
'siteTop': site_top,
'logDir': Pathname('logDir',
{'description':'Directory where the generated log files are'\
' created',
'base':'siteTop',
'default':'log'}),
'indexFile': Pathname('indexFile',
{'description':'Index file with projects dependencies information',
'base':'siteTop',
'default':os.path.join('resources',
os.path.basename(sys.argv[0]) + '.xml')}),
'remoteSiteTop': remote_site_top,
'remoteSrcTop': Pathname('remoteSrcTop',
{'description':
'Root of the tree on the remote machine where repositories\n'\
' are located.',
'base':'remoteSiteTop',
'default':'reps'}),
'remoteIndex': Pathname('remoteIndex',
{'description':
'Url to the remote index file with projects dependencies\n'\
' information',
'base':'remoteSiteTop',
'default':'reps/dws.git/dws.xml'}),
'darwinTargetVolume': Single('darwinTargetVolume',
{ 'description':
'Destination of installed packages on a Darwin local\n'\
' machine. Installing on the "LocalSystem" requires\n'\
' administrator privileges.',
'choices': {'LocalSystem':
'install packages on the system root for all users',
'CurrentUserHomeDirectory':
'install packages for the current user only'} }),
'distHost': HostPlatform('distHost'),
'smtpHost': Variable('smtpHost',
{ 'description':'Hostname for the SMTP server through'\
' which logs are sent.',
'default':'localhost'}),
'smtpPort': Variable('smtpPort',
{ 'description':'Port for the SMTP server through'\
' which logs are sent.',
'default':'5870'}),
'dwsSmtpLogin': Variable('dwsSmtpLogin',
{ 'description':
'Login on the SMTP server for the user through which\n'\
' logs are sent.'}),
'dwsSmtpPasswd': Variable('dwsSmtpPasswd',
{ 'description':
'Password on the SMTP server for the user through which\n'\
' logs are sent.'}),
'dwsEmail': Variable('dwsEmail',
{ 'description':
'dws occasionally emails build reports (see --mailto\n'
' command line option). This is the address that will\n'\
' be shown in the *From* field.',
'default':os.environ['LOGNAME'] + '@localhost'}) }
self.build_top_relative_cwd = None
self.config_filename = None
def base(self, name):
'''Returns a basename of the uri/path specified in variable *name*.
We do not use os.path.basename directly because it wasn't designed
to handle uri nor does urlparse was designed to handle git/ssh locators.
'''
locator = self.value(name)
look = re.match('\S+@\S+:(\S+)', locator)
if look:
return os.path.splitext(os.path.basename(look.group(1)))[0]
look = re.match('https?:(\S+)', locator)
if look:
uri = urlparse.urlparse(locator)
return os.path.splitext(os.path.basename(uri.path))[0]
return os.path.splitext(os.path.basename(locator))[0]
def bin_build_dir(self):
'''Returns the bin/ directory located inside buildTop.'''
return os.path.join(self.value('buildTop'), 'bin')
def derived_helper(self, name):
'''Absolute path to a file which is part of drop helper files
located in the share/dws subdirectory. The absolute directory
name to share/dws is derived from the path of the script
being executed as such: dirname(sys.argv[0])/../share/dws.'''
return os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))),
'share','dws', name)
# That code does not work when we are doing dws make (no recurse).
# return os.path.join(self.value('buildTop'),'share','dws',name)
def log_path(self, name):
'''Absolute path to a file in the local system log
directory hierarchy.'''
return os.path.join(self.value('logDir'), name)
def remote_src_path(self, name):
'''Absolute path to access a repository on the remote machine.'''
return os.path.join(self.value('remoteSrcTop'), name)
def remote_host(self):
'''Returns the host pointed by *remoteSiteTop*'''
uri = urlparse.urlparse(CONTEXT.value('remoteSiteTop'))
hostname = uri.netloc
if not uri.netloc:
# If there is no protocol specified, the hostname
# will be in uri.scheme (That seems like a bug in urlparse).
hostname = uri.scheme
return hostname
def cwd_project(self):
'''Returns a project name derived out of the current directory.'''
if not self.build_top_relative_cwd:
self.environ['buildTop'].default = os.path.dirname(os.getcwd())
log_info('no workspace configuration file could be ' \
+ 'found from ' + os.getcwd() \
+ ' all the way up to /. A new one, called ' + self.config_name\
+ ', will be created in *buildTop* after that path is set.')
self.config_filename = os.path.join(self.value('buildTop'),
self.config_name)
self.save()
self.locate()
if os.path.realpath(os.getcwd()).startswith(
os.path.realpath(self.value('buildTop'))):
top = os.path.realpath(self.value('buildTop'))
elif os.path.realpath(os.getcwd()).startswith(
os.path.realpath(self.value('srcTop'))):
top = os.path.realpath(self.value('srcTop'))
else:
raise Error("You must run dws from within a subdirectory of "\
"buildTop or srcTop")
prefix = os.path.commonprefix([top, os.getcwd()])
return os.getcwd()[len(prefix) + 1:]
def db_pathname(self):
'''Absolute pathname to the project index file.'''
if not str(self.environ['indexFile']):
filtered = filter_rep_ext(CONTEXT.value('remoteIndex'))
if filtered != CONTEXT.value('remoteIndex'):
prefix = CONTEXT.value('remoteSrcTop')
if not prefix.endswith(':') and not prefix.endswith(os.sep):
prefix = prefix + os.sep
self.environ['indexFile'].default = \
CONTEXT.src_dir(filtered.replace(prefix, ''))
else:
self.environ['indexFile'].default = \
CONTEXT.local_dir(CONTEXT.value('remoteIndex'))
return self.value('indexFile')
def host(self):
'''Returns the distribution of the local system
on which the script is running.'''
return self.value('distHost')
def local_dir(self, name):
'''Returns the path on the local system to a directory.'''
site_top = self.value('siteTop')
pos = name.rfind('./')
if pos >= 0:
localname = os.path.join(site_top, name[pos + 2:])
elif (str(self.environ['remoteSiteTop'])
and name.startswith(self.value('remoteSiteTop'))):
localname = filter_rep_ext(name)
remote_site_top = self.value('remoteSiteTop')
if remote_site_top.endswith(':'):
site_top = site_top + '/'
localname = localname.replace(remote_site_top, site_top)
elif ':' in name:
localname = os.path.join(
site_top,'resources', os.path.basename(name))
elif not name.startswith(os.sep):
localname = os.path.join(site_top, name)
else:
localname = name.replace(
self.value('remoteSiteTop'), site_top)
return localname
def remote_dir(self, name):
'''Returns the absolute path on the remote system that corresponds
to *name*, the absolute path of a file or directory on the local
system.'''
if name.startswith(self.value('siteTop')):
return name.replace(self.value('siteTop'),
self.value('remoteSiteTop'))
return None
def load_context(self, filename):
site_top_found = False
config_file = open(filename)
line = config_file.readline()
while line != '':
look = re.match(r'(\S+)\s*=\s*(\S+)', line)
if look != None:
if look.group(1) == 'siteTop':
site_top_found = True
if (look.group(1) in self.environ
and isinstance(self.environ[look.group(1)], Variable)):
self.environ[look.group(1)].value = look.group(2)
else:
self.environ[look.group(1)] = look.group(2)
line = config_file.readline()
config_file.close()
return site_top_found
def locate(self, config_filename=None):
'''Locate the workspace configuration file and derive the project
name out of its location.'''
try:
if config_filename:
self.config_filename = config_filename
self.config_name = os.path.basename(config_filename)
self.build_top_relative_cwd = os.path.dirname(config_filename)
else:
self.build_top_relative_cwd, self.config_filename \
= search_back_to_root(self.config_name)
except IOError:
self.build_top_relative_cwd = None
self.environ['buildTop'].configure(self)
build_top = str(self.environ['buildTop'])
site_top = str(self.environ['siteTop'])
if build_top.startswith(site_top):
# When build_top is inside the site_top, we create the config
# file in site_top for convinience so dws commands can be run
# anywhere from within site_top (i.e. both build_top
# and src_top).
self.config_filename = os.path.join(site_top, self.config_name)
else:
# When we have a split hierarchy we can build the same src_top
# multiple different ways but dws commands should exclusively
# be run from within the build_top.
self.config_filename = os.path.join(build_top, self.config_name)
if not os.path.isfile(self.config_filename):
self.save()
if self.build_top_relative_cwd == '.':
self.build_top_relative_cwd = os.path.basename(os.getcwd())
# \todo is this code still relevent?
look = re.match('([^-]+)-.*', self.build_top_relative_cwd)
if look:
# Change of project name in *indexName* on "make dist-src".
# self.build_top_relative_cwd = look.group(1)
pass
# -- Read the environment variables set in the config file.
home_dir = os.environ['HOME']
if 'SUDO_USER' in os.environ:
home_dir = home_dir.replace(os.environ['SUDO_USER'],
os.environ['LOGNAME'])
user_default_config = os.path.join(home_dir, '.dws')
if os.path.exists(user_default_config):
self.load_context(user_default_config)
site_top_found = self.load_context(self.config_filename)
if not site_top_found:
# By default we set *siteTop* to be the directory
# where the configuration file was found since basic paths
# such as *buildTop* and *srcTop* defaults are based on it.
self.environ['siteTop'].value = os.path.dirname(
self.config_filename)
def logname(self):
'''Name of the XML tagged log file where sys.stdout is captured.'''
filename = os.path.basename(self.config_name)
filename = os.path.splitext(filename)[0] + '.log'
filename = self.log_path(filename)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
return filename
def logbuildname(self):
'''Name of the log file for build summary.'''
filename = os.path.basename(self.config_name)
filename = os.path.splitext(filename)[0] + '-build.log'
filename = self.log_path(filename)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
return filename
def obj_dir(self, name):
return os.path.join(self.value('buildTop'), name)
def patch_dir(self, name):
return os.path.join(self.value('patchTop'), name)
def from_remote_index(self, remote_path):
'''We need to set the *remoteIndex* to a realpath when we are dealing
with a local file else links could end-up generating a different prefix
than *remoteSiteTop* for *remoteIndex*/*indexName*.'''
if search_repo_pat(remote_path):
remote_path = os.path.join(remote_path, self.indexName)
# Set remoteIndex.value instead of remoteIndex.default because
# we don't want to trigger a configure of logDir before we have
# a chance to set the siteTop.
look = re.match(r'(\S+@)?(\S+):(.*)', remote_path)
if look:
self.tunnel_point = look.group(2)
src_base = look.group(3)
site_base = src_base
remote_path_list = look.group(3).split(os.sep)
host_prefix = self.tunnel_point + ':'
if look.group(1):
host_prefix = look.group(1) + host_prefix
else:
# We compute *base* here through the same algorithm as done
# in *local_dir*. We do not call *local_dir* because remoteSiteTop
# is not yet defined at this point.
src_base = os.path.dirname(remote_path)
while not os.path.isdir(src_base):
src_base = os.path.dirname(src_base)
remote_path_list = remote_path.split(os.sep)
site_base = os.path.dirname(src_base)
host_prefix = ''
for i in range(0, len(remote_path_list)):
if remote_path_list[i] == '.':
site_base = os.sep.join(remote_path_list[0:i])
src_base = os.path.join(site_base, remote_path_list[i + 1])
break
look = search_repo_pat(remote_path_list[i])
if look:
# splitext does not return any extensions when the path
# starts with dot.
rep_ext = look.group(1)
if not rep_ext.startswith('.'):
_, rep_ext = os.path.splitext(look.group(1))
if remote_path_list[i] == rep_ext:
i = i - 1
if i > 2:
src_base = os.sep.join(remote_path_list[0:i])
site_base = os.sep.join(remote_path_list[0:i-1])
elif i > 1:
src_base = remote_path_list[0]
site_base = ''
else:
src_base = ''
site_base = ''
break
if not self.tunnel_point:
# We can't use realpath before we figured out where the '.'
# delimiter is in remote_path.
remote_path = remote_path.replace(
src_base, os.path.realpath(src_base))
src_base = os.path.realpath(src_base)
site_base = os.path.realpath(site_base)
self.environ['remoteIndex'].value = remote_path
self.environ['remoteSrcTop'].default = host_prefix + src_base
# Note: We used to set the context[].default field which had for side
# effect to print the value the first time the variable was used.
# The problem is that we need to make sure remoteSiteTop is defined
# before calling *local_dir*, otherwise the resulting indexFile value
# will be different from the place the remoteIndex is fetched to.
self.environ['remoteSiteTop'].value = host_prefix + site_base
def save(self):
'''Write the config back to a file.'''
if not self.config_filename:
# No config_filename means we are still figuring out siteTop,
# so we don't know where to store the config file.
return
if not os.path.exists(os.path.dirname(self.config_filename)):
os.makedirs(os.path.dirname(self.config_filename))
config_file = open(self.config_filename, 'w')
keys = sorted(self.environ.keys())
config_file.write('# configuration for development workspace\n\n')
for key in keys:
val = self.environ[key]
if len(str(val)) > 0:
config_file.write(key + '=' + str(val) + '\n')
config_file.close()
def search_path(self, name, variant=None):
'''Derives a list of directory names based on the PATH
environment variable, *name* and a *variant* triplet.'''
dirs = []
# We want the actual value of *name*Dir and not one derived from binDir
dirname = CONTEXT.value(name + 'Dir')
if os.path.isdir(dirname):
if variant:
for subdir in os.listdir(dirname):
if re.match(variant, subdir):
dirs += [ os.path.join(dirname, subdir) ]
else:
dirs += [ dirname ]
for path in os.environ['PATH'].split(':'):
base = os.path.dirname(path)
if name == 'lib':
# On mixed 32/64-bit system, libraries also get installed
# in lib64/. This is also true for 64-bit native python modules.
for subpath in [ name, 'lib64' ]:
dirname = os.path.join(base, subpath)
if os.path.isdir(dirname):
if variant:
for subdir in os.listdir(dirname):
if re.match(variant, subdir):
dirs += [ os.path.join(dirname, subdir) ]
else:
dirs += [ dirname ]
elif name == 'bin':
# Especially on Fedora, /sbin, /usr/sbin, etc. are many times
# not in the PATH.
if os.path.isdir(path):
dirs += [ path ]
sbin = os.path.join(base, 'sbin')
if (not sbin in os.environ['PATH'].split(':')
and os.path.isdir(sbin)):
dirs += [ sbin ]
else:
if os.path.isdir(os.path.join(base, name)):
dirs += [ os.path.join(base, name) ]
if name == 'lib' and self.host() in PORT_DISTRIBS:
# Just because python modules do not get installed
# in /opt/local/lib/python2.7/site-packages
dirs += [ '/opt/local/Library/Frameworks' ]
if name == 'share' and self.host() in APT_DISTRIBS:
dirs += [ '/var/lib/gems' ]
return dirs
def src_dir(self, name):
return os.path.join(self.value('srcTop'), name)
def value(self, name):
'''returns the value of the workspace variable *name*. If the variable
has no value yet, a prompt is displayed for it.'''
if not name in self.environ:
raise Error("Trying to read unknown variable " + name + ".")
if (isinstance(self.environ[name], Variable)
and self.environ[name].configure(self)):
self.save()
# recursively resolve any variables that might appear
# in the variable value. We do this here and not while loading
# the context because those names can have been defined later.
value = str(self.environ[name])
look = re.match(r'(.*)\${(\S+)}(.*)', value)
while look:
indirect = ''
if look.group(2) in self.environ:
indirect = self.value(look.group(2))
elif look.group(2) in os.environ:
indirect = os.environ[look.group(2)]
value = look.group(1) + indirect + look.group(3)
look = re.match(r'(.*)\${(\S+)}(.*)', value)
return value
# Formats help for script commands. The necessity for this class
# can be understood by the following posts on the internet:
# - http://groups.google.com/group/comp.lang.python/browse_thread/thread/6df6e
# - http://www.alexonlinux.com/pythons-optparse-for-human-beings
#
# \todo The argparse (http://code.google.com/p/argparse/) might be part
# of the standard python library and address the issue at some point.
class CommandsFormatter(optparse.IndentedHelpFormatter):
def format_epilog(self, description):
import textwrap
result = ""
if description:
desc_width = self.width - self.current_indent
bits = description.split('\n')
formatted_bits = [
textwrap.fill(bit,
desc_width,
initial_indent="",
subsequent_indent=" ")
for bit in bits]
result = result + "\n".join(formatted_bits) + "\n"
return result
class IndexProjects:
'''Index file containing the graph dependency for all projects.'''
def __init__(self, context, source = None):
self.context = context
self.parser = XMLDbParser(context)
self.source = source
def closure(self, dgen):
'''Find out all dependencies from a root set of projects as defined
by the dependency generator *dgen*.'''
while dgen.more():
self.parse(dgen)
return dgen.topological()
def parse(self, dgen):
'''Parse the project index and generates callbacks to *dgen*'''
self.validate()
self.parser.parse(self.source, dgen)
def validate(self, force=False):
'''Create the project index file if it does not exist
either by fetching it from a remote server or collecting
projects indices locally.'''
if not self.source:
self.source = self.context.db_pathname()
if not self.source.startswith('<?xml'):
# The source is an actual string, thus we do not fetch any file.
if not os.path.exists(self.source) or force:
selection = ''
if not force:
# index or copy.
selection = select_one(
'The project index file could not '
+ 'be found at "' + self.source \
+ '". It can be regenerated through one ' \
+ 'of the two following method:',
[ [ 'fetching', 'from remote server' ],
[ 'indexing',
'local projects in the workspace' ] ],
False)
if selection == 'indexing':
pub_collect([])
elif selection == 'fetching' or force:
remote_index = self.context.value('remoteIndex')
vcs = Repository.associate(remote_index)
# XXX Does not matter here for rsync.
# What about other repos?
vcs.update(None, self.context)
if not os.path.exists(self.source):
raise Error(self.source + ' does not exist.')
class PdbHandler(object):
'''Callback interface for a project index as generated by an *xmlDbParser*.
The generic handler does not do anything. It is the responsability of
implementing classes to filter callback events they care about.'''
def __init__(self):
pass
def end_parse(self):
pass
def project(self, proj):
pass
class Unserializer(PdbHandler):
'''Builds *Project* instances for every project that matches *include_pats*
and not *exclude_pats*. See *filters*() for implementation.'''
def __init__(self, include_pats=None, exclude_pats=None, custom_steps=None):
PdbHandler.__init__(self)
self.projects = {}
self.first_project = None
if include_pats:
self.include_pats = set(include_pats)
# Project which either fullfil all prerequisites or that have been
# explicitely excluded from installation by the user will be added
# to *exclude_pats*.
if exclude_pats:
self.exclude_pats = set(exclude_pats)
else:
self.exclude_pats = set([])
if custom_steps:
self.custom_steps = dict(custom_steps)
else:
self.custom_steps = {}
def as_project(self, name):
if not name in self.projects:
raise Error("unable to find " + name + " in the index file.",
project_name=name)
return self.projects[name]
def filters(self, project_name):
for inc in self.include_pats:
inc = inc.replace('+','\\+')
if re.match(inc, project_name):
for exc in self.exclude_pats:
if re.match(exc.replace('+','\\+'), project_name):
return False
return True
return False
def project(self, proj_obj):
'''Callback for the parser.'''
if (not proj_obj.name in self.projects) and self.filters(proj_obj.name):
if not self.first_project:
self.first_project = proj_obj
self.projects[proj_obj.name] = proj_obj
class DependencyGenerator(Unserializer):
'''*DependencyGenerator* implements a breath-first search of the project
dependencies index with a specific twist.
At each iteration, if all prerequisites for a project can be found
on the local system, the dependency edge is cut from the next iteration.
Missing prerequisite executables, headers and libraries require
the installation of prerequisite projects as stated by the *missings*
list of edges. The user will be prompt for *candidates*() and through
the options available will choose to install prerequisites through
compiling them out of a source controlled repository or a binary
distribution package.
*DependencyGenerator.end_parse*() is at the heart of the workspace
bootstrapping and other "recurse" features.
'''
def __init__(self, repositories, packages, exclude_pats = None,
custom_steps = None, force_update = False):
'''*repositories* will be installed from compiling
a source controlled repository while *packages* will be installed
from a binary distribution package.
*exclude_pats* is a list of projects which should be removed from
the final topological order.'''
self.roots = packages + repositories
Unserializer.__init__(self, self.roots, exclude_pats, custom_steps)
# When True, an exception will stop the recursive make
# and exit with an error code, otherwise it moves on to
# the next project.
self.stop_make_after_error = False
self.packages = set(packages)
self.repositories = set(repositories)
self.active_prerequisites = {}
for prereq_name in repositories + packages:
self.active_prerequisites[prereq_name] = (
prereq_name, 0, TargetStep(0, prereq_name) )
self.levels = {}
self.levels[0] = set([])
for rep in repositories + packages:
self.levels[0] |= set([ TargetStep(0, rep) ])
# Vertices in the dependency tree
self.vertices = {}
self.force_update = force_update
def __str__(self):
return "vertices:\n%s" % str(self.vertices)
def connect_to_setup(self, name, step):
if name in self.vertices:
self.vertices[name].prerequisites += [ step ]
def add_config_make(self, variant, configure, make, prerequisites):
config = None
config_name = ConfigureStep.genid(variant.project, variant.target)
if not config_name in self.vertices:
config = configure.associate(variant.target)
self.vertices[config_name] = config
else:
config = self.vertices[config_name]
make_name = BuildStep.genid(variant.project, variant.target)
if not make_name in self.vertices:
make = make.associate(variant.target)
make.force_update = self.force_update
self.vertices[make_name] = make
for prereq in prerequisites:
make.prerequisites += [ prereq ]
if config:
make.prerequisites += [ config ]
setup_name = SetupStep.genid(variant.project, variant.target)
self.connect_to_setup(setup_name, make)
return self.vertices[make_name]
def add_install(self, project_name):
flavor = None
install_step = None
managed_name = project_name.split(os.sep)[-1]
install_name = InstallStep.genid(managed_name)
if install_name in self.vertices:
# We already decided to install this project, nothing more to add.
return self.vertices[install_name], flavor
# We do not know the target at this point so we can't build a fully
# qualified setup_name and index into *vertices* directly. Since we
# are trying to install projects through the local package manager,
# it is doubtful we should either know or care about the target.
# That's a primary reason why target got somewhat slightly overloaded.
# We used runtime="python" instead of target="python" in an earlier
# design.
setup = None
setup_name = SetupStep.genid(project_name)
for name, step in self.vertices.iteritems():
if name.endswith(setup_name):
setup = step
if (setup and not setup.run(CONTEXT)):
install_step = create_managed(
managed_name, setup.versions, setup.target)
if not install_step and project_name in self.projects:
project = self.projects[project_name]
if CONTEXT.host() in project.packages:
filenames = []
flavor = project.packages[CONTEXT.host()]
for remote_path in flavor.update.fetches:
filenames += [ CONTEXT.local_dir(remote_path) ]
install_step = create_package_file(project_name, filenames)
update_s = self.add_update(project_name, flavor.update)
# package files won't install without prerequisites already
# on the local system.
install_step.prerequisites += self.add_setup(setup.target,
flavor.prerequisites([CONTEXT.host()]))
if update_s:
install_step.prerequisites += [ update_s ]
elif project.patch:
# build and install from source
flavor = project.patch
prereqs = self.add_setup(setup.target,
flavor.prerequisites([CONTEXT.host()]))
update_s = self.add_update(
project_name, project.patch.update)
if update_s:
prereqs += [ update_s ]
install_step = self.add_config_make(
TargetStep(0, project_name, setup.target),
flavor.configure, flavor.make, prereqs)
if not install_step:
# Remove special case install_step is None; replace it with
# a placeholder instance that will throw an exception
# when the *run* method is called.
install_step = InstallStep(project_name, target=setup.target)
self.connect_to_setup(setup_name, install_step)
return install_step, flavor
def add_setup(self, target, deps):
targets = []
for dep in deps:
target_name = dep.target
if not dep.target:
target_name = target
cap = SetupStep.genid(dep.name)
if cap in self.custom_steps:
setup = self.custom_steps[cap](dep.name, dep.files)
else:
setup = SetupStep(
dep.name, dep.files, dep.versions, target_name)
if not setup.name in self.vertices:
self.vertices[setup.name] = setup
else:
self.vertices[setup.name].insert(setup)
targets += [ self.vertices[setup.name] ]
return targets
def add_update(self, project_name, update, update_rep=True):
update_name = UpdateStep.genid(project_name)
if update_name in self.vertices:
return self.vertices[update_name]
update_s = None
fetches = {}
if len(update.fetches) > 0:
# We could unconditionally add all source tarball since
# the *fetch* function will perform a *find_cache* before
# downloading missing files. Unfortunately this would
# interfere with *pub_configure* which checks there are
# no missing prerequisites whithout fetching anything.
fetches = find_cache(CONTEXT, update.fetches)
rep = None
if update_rep or not os.path.isdir(CONTEXT.src_dir(project_name)):
rep = update.rep
if update.rep or len(fetches) > 0:
update_s = UpdateStep(project_name, rep, fetches)
self.vertices[update_s.name] = update_s
return update_s
def contextual_targets(self, variant):
raise Error("DependencyGenerator should not be instantiated directly")
def end_parse(self):
further = False
next_active_prerequisites = {}
for prereq_name in self.active_prerequisites:
# Each edge is a triplet source: (color, depth, variant)
# Gather next active Edges.
color = self.active_prerequisites[prereq_name][0]
depth = self.active_prerequisites[prereq_name][1]
variant = self.active_prerequisites[prereq_name][2]
next_depth = depth + 1
# The algorithm to select targets depends on the command semantic.
# The build, make and install commands differ in behavior there
# in the presence of repository, patch and package tags.
need_prompt, targets = self.contextual_targets(variant)
if need_prompt:
next_active_prerequisites[prereq_name] = (color, depth, variant)
else:
for target in targets:
further = True
target_name = str(target.project)
if target_name in next_active_prerequisites:
if next_active_prerequisites[target_name][0] > color:
# We propagate a color attribute through
# the constructed DAG to detect cycles later on.
next_active_prerequisites[target_name] = (color,
next_depth,
target)
else:
next_active_prerequisites[target_name] = (color,
next_depth,
target)
if not next_depth in self.levels:
self.levels[next_depth] = set([])
self.levels[ next_depth ] |= set([target])
self.active_prerequisites = next_active_prerequisites
if not further:
# This is an opportunity to prompt the user.
# The user's selection will decide, when available, if the project
# should be installed from a repository, a patch, a binary package
# or just purely skipped.
reps = []
packages = []
for name in self.active_prerequisites:
if (not os.path.isdir(CONTEXT.src_dir(name))
and self.filters(name)):
# If a prerequisite project is not defined as an explicit
# package, we will assume the prerequisite name is
# enough to install the required tools for the prerequisite.
row = [ name ]
if name in self.projects:
project = self.as_project(name)
if project.installed_version:
row += [ project.installed_version ]
if project.repository:
reps += [ row ]
if not project.repository:
packages += [ row ]
else:
packages += [ row ]
# Prompt to choose amongst installing from repository
# patch or package when those tags are available.'''
reps, packages = select_checkout(reps, packages)
self.repositories |= set(reps)
self.packages |= set(packages)
# Add all these in the include_pats such that we load project
# information the next time around.
for name in self.active_prerequisites:
if not name in self.include_pats:
self.include_pats |= set([ name ])
def more(self):
'''True if there are more iterations to conduct.'''
return len(self.active_prerequisites) > 0
def topological(self):
'''Returns a topological ordering of projects selected.'''
ordered = []
remains = []
for name in self.packages:
# We have to wait until here to create the install steps. Before
# then, we do not know if they will be required nor if prerequisites
# are repository projects in the index file or not.
install_step, _ = self.add_install(name)
if install_step and not install_step.name in self.vertices:
remains += [ install_step ]
for step in self.vertices:
remains += [ self.vertices[step] ]
next_remains = []
if False:
log_info('!!!remains:')
for step in remains:
is_vert = ''
if step.name in self.vertices:
is_vert = '*'
log_info('!!!\t%s %s %s'
% (step.name, str(is_vert),
str([ pre.name for pre in step.prerequisites])))
while len(remains) > 0:
for step in remains:
ready = True
insert_point = 0
for prereq in step.prerequisites:
index = 0
found = False
for ordered_step in ordered:
index = index + 1
if prereq.name == ordered_step.name:
found = True
break
if not found:
ready = False
break
else:
if index > insert_point:
insert_point = index
if ready:
for ordered_step in ordered[insert_point:]:
if ordered_step.priority > step.priority:
break
insert_point = insert_point + 1
ordered.insert(insert_point, step)
else:
next_remains += [ step ]
if len(remains) <= len(next_remains):
raise CircleError([vert.name for vert in next_remains])
remains = next_remains
next_remains = []
if False:
log_info("!!! => ordered:")
for ordered_step in ordered:
log_info(" " + ordered_step.name)
return ordered
class BuildGenerator(DependencyGenerator):
'''Forces selection of installing from repository when that tag
is available in a project.'''
def contextual_targets(self, variant):
'''At this point we want to add all prerequisites which are either
a repository or a patch/package for which the dependencies are not
complete.'''
targets = []
name = variant.project
if name in self.projects:
tags = [ CONTEXT.host() ]
project = self.as_project(name)
if project.repository:
self.repositories |= set([name])
targets = self.add_setup(variant.target,
project.repository.prerequisites(tags))
update_s = self.add_update(name, project.repository.update)
prereqs = targets
if update_s:
prereqs = [ update_s ] + targets
self.add_config_make(variant,
project.repository.configure,
project.repository.make,
prereqs)
else:
self.packages |= set([name])
install_step, flavor = self.add_install(name)
if flavor:
targets = self.add_setup(variant.target,
flavor.prerequisites(tags))
else:
# We leave the native host package manager to deal with this one...
self.packages |= set([ name ])
self.add_install(name)
return (False, targets)
class MakeGenerator(DependencyGenerator):
'''Forces selection of installing from repository when that tag
is available in a project.'''
def __init__(self, repositories, packages,
exclude_pats = None, custom_steps = None):
DependencyGenerator.__init__(
self, repositories, packages,
exclude_pats, custom_steps, force_update=True)
self.stop_make_after_error = True
def contextual_targets(self, variant):
name = variant.project
if not name in self.projects:
self.packages |= set([ name ])
return (False, [])
need_prompt = True
project = self.as_project(name)
if os.path.isdir(CONTEXT.src_dir(name)):
# If there is already a local source directory in *srcTop*, it is
# also a no brainer - invoke make.
nb_choices = 1
else:
# First, compute how many potential installation tags we have here.
nb_choices = 0
if project.repository:
nb_choices = nb_choices + 1
if project.patch:
nb_choices = nb_choices + 1
if len(project.packages) > 0:
nb_choices = nb_choices + 1
targets = []
tags = [ CONTEXT.host() ]
if nb_choices == 1:
# Only one choice is easy. We just have to make sure we won't
# put the project in two different sets.
chosen = self.repositories | self.packages
if project.repository:
need_prompt = False
targets = self.add_setup(variant.target,
project.repository.prerequisites(tags))
update_s = self.add_update(
name, project.repository.update, False)
prereqs = targets
if update_s:
prereqs = [ update_s ] + targets
self.add_config_make(variant,
project.repository.configure,
project.repository.make,
prereqs)
if not name in chosen:
self.repositories |= set([name])
elif len(project.packages) > 0 or project.patch:
need_prompt = False
install_step, flavor = self.add_install(name)
if flavor:
# XXX This will already have been done in add_install ...
targets = self.add_setup(variant.target,
flavor.prerequisites(tags))
if not name in chosen:
self.packages |= set([name])
# At this point there is more than one choice to install the project.
# When the repository, patch or package tag to follow through has
# already been decided, let's check if we need to go deeper through
# the prerequisistes.
if need_prompt:
if name in self.repositories:
need_prompt = False
targets = self.add_setup(variant.target,
project.repository.prerequisites(tags))
update_s = self.add_update(
name, project.repository.update, False)
prereqs = targets
if update_s:
prereqs = [ update_s ] + targets
self.add_config_make(variant,
project.repository.configure,
project.repository.make,
prereqs)
elif len(project.packages) > 0 or project.patch:
need_prompt = False
install_step, flavor = self.add_install(name)
if flavor:
targets = self.add_setup(variant.target,
flavor.prerequisites(tags))
return (need_prompt, targets)
def topological(self):
'''Filter out the roots from the topological ordering in order
for 'make recurse' to behave as expected (i.e. not compiling roots).'''
vertices = DependencyGenerator.topological(self)
results = []
roots = set([ MakeStep.genid(root) for root in self.roots ])
for project in vertices:
if not project.name in roots:
results += [ project ]
return results
class MakeDepGenerator(MakeGenerator):
'''Generate the set of prerequisite projects regardless of the executables,
libraries, etc. which are already installed.'''
def add_install(self, name):
# We use a special "no-op" add_install in the MakeDepGenerator because
# we are not interested in prerequisites past the repository projects
# and their direct dependencies.
return InstallStep(name), None
def add_setup(self, target, deps):
targets = []
for dep in deps:
target_name = dep.target
if not dep.target:
target_name = target
setup = SetupStep(dep.name, dep.files, dep.versions, target_name)
if not setup.name in self.vertices:
self.vertices[setup.name] = setup
else:
setup = self.vertices[setup.name].insert(setup)
targets += [ self.vertices[setup.name] ]
return targets
class DerivedSetsGenerator(PdbHandler):
'''Generate the set of projects which are not dependency
for any other project.'''
def __init__(self):
PdbHandler.__init__(self)
self.roots = []
self.nonroots = []
def project(self, proj):
for dep_name in proj.prerequisite_names([ CONTEXT.host() ]):
if dep_name in self.roots:
self.roots.remove(dep_name)
if not dep_name in self.nonroots:
self.nonroots += [ dep_name ]
if (not proj.name in self.nonroots
and not proj.name in self.roots):
self.roots += [ proj.name ]
# =============================================================================
# Writers are used to save *Project* instances to persistent storage
# in different formats.
# =============================================================================
class NativeWriter(PdbHandler):
'''Write *Project* objects as xml formatted text that can be loaded back
by the script itself.'''
def __init__(self):
PdbHandler.__init__(self)
class Variable:
'''Variable that ends up being defined in the workspace make
fragment and thus in Makefile.'''
def __init__(self, name, pairs):
self.name = name
self.value = None
self.descr = None
self.default = None
if isinstance(pairs, dict):
for key, val in pairs.iteritems():
if key == 'description':
self.descr = val
elif key == 'value':
self.value = val
elif key == 'default':
self.default = val
else:
self.value = pairs
self.default = self.value
self.constrains = {}
def __str__(self):
if self.value:
return str(self.value)
else:
return ''
def constrain(self, variables):
pass
def configure(self, context):
'''Set value to the string entered at the prompt.
We used to define a *Pathname* base field as a pointer to a *Pathname*
instance instead of a string to index context.environ[]. That only
worked the first time (before dws.mk is created) and when the base
functionality wasn't used later on. As a result we need to pass the
*context* as a parameter here.'''
if self.name in os.environ:
# In case the variable was set in the environment,
# we do not print its value on the terminal, as a very
# rudimentary way to avoid leaking sensitive information.
self.value = os.environ[self.name]
if self.value != None:
return False
log_info('\n' + self.name + ':')
log_info(self.descr)
if USE_DEFAULT_ANSWER:
self.value = self.default
else:
default_prompt = ""
if self.default:
default_prompt = " [" + self.default + "]"
self.value = prompt("Enter a string %s: " % default_prompt)
log_info("%s set to %s" % (self.name, str(self.value)))
return True
class HostPlatform(Variable):
def __init__(self, name, pairs=None):
'''Initialize an HostPlatform variable. *pairs* is a dictionnary.'''
Variable.__init__(self, name, pairs)
self.dist_codename = None
def configure(self, context):
'''Set value to the distribution on which the script is running.'''
if self.value != None:
return False
# sysname, nodename, release, version, machine
sysname, _, _, version, _ = os.uname()
if sysname == 'Darwin':
self.value = 'Darwin'
elif sysname == 'Linux':
# Let's try to determine the host platform
for version_path in [ '/etc/system-release', '/etc/lsb-release',
'/etc/debian_version', '/proc/version' ]:
if os.path.exists(version_path):
version = open(version_path)
line = version.readline()
while line != '':
for dist in [ 'Debian', 'Ubuntu', 'Fedora' ]:
look = re.match('.*' + dist + '.*', line)
if look:
self.value = dist
look = re.match('.*' + dist.lower() + '.*', line)
if look:
self.value = dist
if not self.dist_codename:
look = re.match(
r'DISTRIB_CODENAME=\s*(\S+)', line)
if look:
self.dist_codename = look.group(1)
elif self.value:
# First time around the loop we will
# match this pattern but not the previous
# one that sets value to 'Fedora'.
look = re.match(r'.*release (\d+)', line)
if look:
self.dist_codename = \
self.value + look.group(1)
line = version.readline()
version.close()
if self.value:
break
if self.value:
self.value = self.value.capitalize()
return True
class Pathname(Variable):
def __init__(self, name, pairs):
Variable.__init__(self, name, pairs)
self.base = None
if 'base' in pairs:
self.base = pairs['base']
def configure(self, context):
'''Generate an interactive prompt to enter a workspace variable
*var* value and returns True if the variable value as been set.'''
if self.value != None:
return False
# compute the default leaf directory from the variable name
leaf_dir = self.name
for last in range(0, len(self.name)):
if self.name[last] in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
leaf_dir = self.name[:last]
break
dirname = self
base_value = None
off_base_chosen = False
default = self.default
# We buffer the text and delay writing to log because we can get
# here to find out where the log resides!
if self.name == 'logDir':
global LOGGER_BUFFERING_COUNT
LOGGER_BUFFERING_COUNT = LOGGER_BUFFERING_COUNT + 1
log_info('\n%s:\n%s' % (self.name, self.descr))
if (not default
or (not ((':' in default) or default.startswith(os.sep)))):
# If there are no default values or the default is not
# an absolute pathname.
if self.base:
base_value = str(context.environ[self.base])
if default != None:
# Because '' will evaluates to False
show_default = '*' + self.base + '*/' + default
else:
show_default = '*' + self.base + '*/' + leaf_dir
if not base_value:
directly = 'Enter *' + self.name + '* directly ?'
offbase = 'Enter *' + self.base + '*, *' + self.name \
+ '* will defaults to ' + show_default \
+ ' ?'
selection= select_one(
'%s is based on *%s* by default. Would you like to ... '
% (self.name, self.base),
[ [ offbase ], [ directly ] ], False)
if selection == offbase:
off_base_chosen = True
if isinstance(context.environ[self.base], Pathname):
context.environ[self.base].configure(context)
base_value = str(context.environ[self.base])
else:
base_value = os.getcwd()
if default != None:
# Because '' will evaluates to False
default = os.path.join(base_value, default)
else:
default = os.path.join(base_value, leaf_dir)
if not default:
default = os.getcwd()
dirname = default
if off_base_chosen:
base_value = str(context.environ[self.base])
if self.default:
dirname = os.path.join(base_value, self.default)
else:
dirname = os.path.join(base_value, leaf_dir)
else:
if not USE_DEFAULT_ANSWER:
dirname = prompt("Enter a pathname [%s]: " % default)
if dirname == '':
dirname = default
if not ':' in dirname:
dirname = os.path.normpath(os.path.abspath(dirname))
self.value = dirname
if not ':' in dirname:
if not os.path.exists(self.value):
log_info(self.value + ' does not exist.')
# We should not assume the pathname is a directory,
# hence we do not issue a os.makedirs(self.value)
# Now it should be safe to write to the logfile.
if self.name == 'logDir':
LOGGER_BUFFERING_COUNT = LOGGER_BUFFERING_COUNT - 1
log_info('%s set to %s' % (self.name, self.value))
return True
class Metainfo(Variable):
def __init__(self, name, pairs):
Variable.__init__(self, name, pairs)
class Multiple(Variable):
def __init__(self, name, pairs):
if pairs and isinstance(pairs, str):
pairs = pairs.split(' ')
Variable.__init__(self, name, pairs)
self.choices = {}
if 'choices' in pairs:
self.choices = pairs['choices']
def __str__(self):
return ' '.join(self.value)
def configure(self, context):
'''Generate an interactive prompt to enter a workspace variable
*var* value and returns True if the variable value as been set.'''
# There is no point to propose a choice already constraint by other
# variables values.
choices = []
for key, descr in self.choices.iteritems():
if not key in self.value:
choices += [ [key, descr] ]
if len(choices) == 0:
return False
descr = self.descr
if len(self.value) > 0:
descr += " (constrained: " + ", ".join(self.value) + ")"
self.value = select_multiple(descr, choices)
log_info('%s set to %s', (self.name, ', '.join(self.value)))
self.choices = []
return True
def constrain(self, variables):
if not self.value:
self.value = []
for var in variables:
if isinstance(variables[var], Variable) and variables[var].value:
if isinstance(variables[var].value, list):
for val in variables[var].value:
if (val in variables[var].constrains
and self.name in variables[var].constrains[val]):
self.value += \
variables[var].constrains[val][self.name]
else:
val = variables[var].value
if (val in variables[var].constrains
and self.name in variables[var].constrains[val]):
self.value += variables[var].constrains[val][self.name]
class Single(Variable):
def __init__(self, name, pairs):
Variable.__init__(self, name, pairs)
self.choices = None
if 'choices' in pairs:
self.choices = []
for key, descr in pairs['choices'].iteritems():
self.choices += [ [key, descr] ]
def configure(self, context):
'''Generate an interactive prompt to enter a workspace variable
*var* value and returns True if the variable value as been set.'''
if self.value:
return False
self.value = select_one(self.descr, self.choices)
log_info('%s set to%s' % (self.name, self.value))
return True
def constrain(self, variables):
for var in variables:
if isinstance(variables[var], Variable) and variables[var].value:
if isinstance(variables[var].value, list):
for val in variables[var].value:
if (val in variables[var].constrains
and self.name in variables[var].constrains[val]):
self.value = \
variables[var].constrains[val][self.name]
else:
val = variables[var].value
if (val in variables[var].constrains
and self.name in variables[var].constrains[val]):
self.value = variables[var].constrains[val][self.name]
class Dependency:
def __init__(self, name, pairs):
self.versions = { 'includes': [], 'excludes': [] }
self.target = None
self.files = {}
self.name = name
for key, val in pairs.iteritems():
if key == 'excludes':
self.versions['excludes'] = eval(val)
elif key == 'includes':
self.versions['includes'] = [ val ]
elif key == 'target':
# The index file loader will have generated fully-qualified
# names to avoid key collisions when a project depends on both
# proj and target/proj. We need to revert the name back to
# the actual project name here.
self.target = val
self.name = os.sep.join(self.name.split(os.sep)[1:])
else:
if isinstance(val, list):
self.files[key] = []
for filename in val:
self.files[key] += [ (filename, None) ]
else:
self.files[key] = [ (val, None) ]
def populate(self, build_deps):
'''*build_deps* is a dictionary.'''
if self.name in build_deps:
deps = build_deps[self.name].files
for dep in deps:
if dep in self.files:
files = []
for look_pat, look_path in self.files[dep]:
found = False
if not look_path:
for pat, path in deps[dep]:
if pat == look_pat:
files += [ (look_pat, path) ]
found = True
break
if not found:
files += [ (look_pat, look_path) ]
self.files[dep] = files
def prerequisites(self, tags):
return [ self ]
class Alternates(Dependency):
'''Provides a set of dependencies where one of them is enough
to fullfil the prerequisite condition. This is used to allow
differences in packaging between distributions.'''
def __init__(self, name, pairs):
Dependency.__init__(self, name, pairs)
self.by_tags = {}
for key, val in pairs.iteritems():
self.by_tags[key] = []
for dep_key, dep_val in val.iteritems():
self.by_tags[key] += [ Dependency(dep_key, dep_val) ]
def __str__(self):
return 'alternates: ' + str(self.by_tags)
def populate(self, build_deps=None):
'''XXX write doc. *build_deps* is a dictionary.'''
for tag in self.by_tags:
for dep in self.by_tags[tag]:
dep.populate(build_deps)
def prerequisites(self, tags):
prereqs = []
for tag in tags:
if tag in self.by_tags:
for dep in self.by_tags[tag]:
prereqs += dep.prerequisites(tags)
return prereqs
class Maintainer:
'''Information about the maintainer of a project.'''
def __init__(self, fullname, email):
self.fullname = fullname
self.email = email
def __str__(self):
return self.fullname + ' <' + self.email + '>'
class Step:
'''Step in the build DAG.'''
configure = 1
install_native = 2
install_lang = 3
install = 4
update = 5
setup = 6
make = 7
def __init__(self, priority, project_name):
self.project = project_name
self.prerequisites = []
self.priority = priority
self.name = self.__class__.genid(project_name)
self.updated = False
def __str__(self):
return self.name
def qualified_project_name(self, target_name = None):
name = self.project
if target_name:
name = os.path.join(target_name, self.project)
return name
@classmethod
def genid(cls, project_name, target_name = None):
name = unicode(project_name.replace(os.sep, '_').replace('-', '_'))
if target_name:
name = target_name + '_' + name
if issubclass(cls, ConfigureStep):
name = 'configure_' + name
elif issubclass(cls, InstallStep):
name = 'install_' + name
elif issubclass(cls, UpdateStep):
name = 'update_' + name
elif issubclass(cls, SetupStep):
name = name + 'Setup'
else:
name = name
return name
class TargetStep(Step):
def __init__(self, prefix, project_name, target = None ):
self.target = target
Step.__init__(self, prefix, project_name)
self.name = self.__class__.genid(project_name, target)
class ConfigureStep(TargetStep):
'''The *configure* step in the development cycle initializes variables
that drive the make step such as compiler flags, where files are installed,
etc.'''
def __init__(self, project_name, envvars, target = None):
TargetStep.__init__(self, Step.configure, project_name, target)
self.envvars = envvars
def associate(self, target):
return ConfigureStep(self.project, self.envvars, target)
def run(self, context):
self.updated = config_var(context, self.envvars)
class InstallStep(Step):
'''The *install* step in the development cycle installs prerequisites
to a project.'''
def __init__(self, project_name, managed = None, target = None,
priority=Step.install):
Step.__init__(self, priority, project_name)
if managed and len(managed) == 0:
self.managed = [ project_name ]
else:
self.managed = managed
self.target = target
def insert(self, install_step):
if install_step.managed:
self.managed += install_step.managed
def run(self, context):
raise Error("Does not know how to install '%s' on %s for %s"
% (str(self.managed), context.host(), self.name))
def info(self):
raise Error(
"Does not know how to search package manager for '%s' on %s for %s"
% (str(self.managed), CONTEXT.host(), self.name))
class AptInstallStep(InstallStep):
''' Install a prerequisite to a project through apt (Debian, Ubuntu).'''
def __init__(self, project_name, target = None):
managed = [ project_name ]
packages = managed
if target and target.startswith('python'):
packages = [ target + '-' + man for man in managed ]
InstallStep.__init__(self, project_name, packages,
priority=Step.install_native)
def run(self, context):
# Add DEBIAN_FRONTEND=noninteractive such that interactive
# configuration of packages do not pop up in the middle
# of installation. We are going to update the configuration
# in /etc afterwards anyway.
# Emit only one shell command so that we can find out what the script
# tried to do when we did not get priviledge access.
shell_command(['sh', '-c',
'"/usr/bin/apt-get update'\
' && DEBIAN_FRONTEND=noninteractive /usr/bin/apt-get -y install '
+ ' '.join(self.managed) + '"'],
admin=True)
self.updated = True
def info(self):
info = []
unmanaged = []
try:
# apt-cache showpkg will return 0 even when the package cannot
# be found.
cmdline = ['apt-cache', 'showpkg' ] + self.managed
manager_output = subprocess.check_output(
' '.join(cmdline), shell=True, stderr=subprocess.STDOUT)
found = False
for line in manager_output.splitlines():
if re.match('^Package:', line):
# Apparently we are not able to get error messages
# from stderr here ...
found = True
if not found:
unmanaged = self.managed
else:
info = self.managed
except subprocess.CalledProcessError:
unmanaged = self.managed
return info, unmanaged
class DarwinInstallStep(InstallStep):
''' Install a prerequisite to a project through pkg (Darwin, OSX).'''
def __init__(self, project_name, filenames, target = None):
InstallStep.__init__(self, project_name, managed=filenames,
priority=Step.install_native)
def run(self, context):
'''Mount *image*, a pathnme to a .dmg file and use the Apple installer
to install the *pkg*, a .pkg package onto the platform through the Apple
installer.'''
for filename in self.managed:
try:
volume = None
if filename.endswith('.dmg'):
base, ext = os.path.splitext(filename)
volume = os.path.join('/Volumes', os.path.basename(base))
shell_command(['hdiutil', 'attach', filename])
target = context.value('darwinTargetVolume')
if target != 'CurrentUserHomeDirectory':
message = 'ATTENTION: You need administrator privileges '\
+ 'on the local machine to execute the following cmmand\n'
log_info(message)
admin = True
else:
admin = False
pkg = filename
if not filename.endswith('.pkg'):
pkgs = find_files(volume, r'\.pkg')
if len(pkgs) != 1:
raise RuntimeError(
'ambiguous: not exactly one .pkg to install')
pkg = pkgs[0]
shell_command(['installer', '-pkg', os.path.join(volume, pkg),
'-target "' + target + '"'], admin)
if filename.endswith('.dmg'):
shell_command(['hdiutil', 'detach', volume])
except:
raise Error('failure to install darwin package ' + filename)
self.updated = True
class DpkgInstallStep(InstallStep):
''' Install a prerequisite to a project through dpkg (Debian, Ubuntu).'''
def __init__(self, project_name, filenames, target = None):
InstallStep.__init__(self, project_name, managed=filenames,
priority=Step.install_native)
def run(self, context):
shell_command(['dpkg', '-i', ' '.join(self.managed)], admin=True)
self.updated = True
class GemInstallStep(InstallStep):
'''Install a prerequisite to a project through gem (Ruby).'''
def __init__(self, project_name, versions=None, target=None):
install_name = project_name
if (versions and 'includes' in versions
and len(versions['includes']) > 0):
install_name = '%s==%s' % (project_name, versions['includes'][0])
InstallStep.__init__(self, project_name, [install_name],
priority=Step.install_lang)
def collect(self, context):
"""Collect prerequisites from Gemfile"""
sys.stdout.write('''XXX collect from Gemfile NotYetImplemented!\n''')
def run(self, context):
shell_command(
[find_gem(context), 'install' ] + self.managed, admin=True)
self.updated = True
def info(self):
info = []
unmanaged = []
try:
# XXX There are no pip info command, search is the closest we get.
# Pip search might match other packages and thus returns zero
# inadvertently but it is the closest we get so far.
shell_command([find_gem(CONTEXT), 'search' ] + self.managed)
info = self.managed
except Error:
unmanaged = self.managed
return info, unmanaged
class MacPortInstallStep(InstallStep):
''' Install a prerequisite to a project through Macports.'''
def __init__(self, project_name, target = None):
managed = [ project_name ]
packages = managed
if target:
look = re.match(r'python(\d(\.\d)?)?', target)
if look:
if look.group(1):
prefix = 'py%s-' % look.group(1).replace('.', '')
else:
prefix = 'py27-'
packages = []
for man in managed:
packages += [ prefix + man ]
darwin_names = {
# translation of package names. It is simpler than
# creating an <alternates> node even if it look more hacky.
'libicu-dev': 'icu' }
pre_packages = packages
packages = []
for package in pre_packages:
if package in darwin_names:
packages += [ darwin_names[package] ]
else:
packages += [ package ]
InstallStep.__init__(self, project_name, packages,
priority=Step.install_native)
def run(self, context):
shell_command(['/opt/local/bin/port', 'install' ] + self.managed,
admin=True)
self.updated = True
def info(self):
info = []
unmanaged = []
try:
shell_command(['port', 'info' ] + self.managed)
info = self.managed
except Error:
unmanaged = self.managed
return info, unmanaged
class NpmInstallStep(InstallStep):
''' Install a prerequisite to a project through npm (Node.js manager).'''
def __init__(self, project_name, target = None):
InstallStep.__init__(self, project_name, [project_name ],
priority=Step.install_lang)
def _manager(self):
# nodejs is not available as a package on Fedora 17 or rather,
# it was until the repo site went down.
find_npm(CONTEXT)
return os.path.join(CONTEXT.value('buildTop'), 'bin', 'npm')
def run(self, context):
shell_command([self._manager(), 'install' ] + self.managed, admin=True)
self.updated = True
def info(self):
info = []
unmanaged = []
try:
shell_command([self._manager(), 'search' ] + self.managed)
info = self.managed
except Error:
unmanaged = self.managed
return info, unmanaged
class PipInstallStep(InstallStep):
''' Install a prerequisite to a project through pip (Python eggs).'''
def __init__(self, project_name, versions=None, target=None):
install_name = project_name
if (versions and 'includes' in versions
and len(versions['includes']) > 0):
install_name = '%s==%s' % (project_name, versions['includes'][0])
InstallStep.__init__(self, project_name, [install_name],
priority=Step.install_lang)
def collect(self, context):
"""Collect prerequisites from requirements.txt"""
filepath = context.src_dir(
os.path.join(self.project, 'requirements.txt'))
with open(filepath) as file_obj:
for line in file_obj.readlines():
look = re.match('([\w\-_]+)((>=|==)(\S+))?', line)
if look:
prerequisite = look.group(1)
sys.stdout.write('''<dep name="%s">
<lib>.*/(%s)/__init__.py</lib>
</dep>
''' % (prerequisite, prerequisite))
def run(self, context):
# In most cases, when installing through pip, we should be running
# under virtualenv. This is only true for development machines though.
admin = False
if not 'VIRTUAL_ENV' in os.environ:
admin = True
shell_command([find_pip(context), 'install' ] + self.managed,
admin=admin)
self.updated = True
def info(self):
info = []
unmanaged = []
try:
# XXX There are no pip info command, search is the closest we get.
# Pip search might match other packages and thus returns zero
# inadvertently but it is the closest we get so far.
shell_command([find_pip(CONTEXT), 'search' ] + self.managed)
info = self.managed
except Error:
unmanaged = self.managed
return info, unmanaged
class RpmInstallStep(InstallStep):
''' Install a prerequisite to a project through rpm (Fedora).'''
def __init__(self, project_name, filenames, target = None):
InstallStep.__init__(self, project_name, managed=filenames,
priority=Step.install_native)
def run(self, context):
# --nodeps because rpm looks stupid and can't figure out that
# the vcd package provides the libvcd.so required by the executable.
shell_command(['rpm', '-i', '--force',
' '.join(self.managed), '--nodeps'],
admin=True)
self.updated = True
class YumInstallStep(InstallStep):
''' Install a prerequisite to a project through yum (Fedora).'''
def __init__(self, project_name, target = None):
managed = [project_name ]
packages = managed
if target:
if target.startswith('python'):
packages = []
for man in managed:
packages += [ target + '-' + man ]
fedora_names = {
'libbz2-dev': 'bzip2-devel',
'python-all-dev': 'python-devel',
'zlib1g-dev': 'zlib-devel' }
pre_packages = packages
packages = []
for package in pre_packages:
if package in fedora_names:
packages += [ fedora_names[package] ]
elif package.endswith('-dev'):
packages += [ package + 'el' ]
else:
packages += [ package ]
InstallStep.__init__(self, project_name, packages,
priority=Step.install_native)
def run(self, context):
shell_command(['yum', '-y', 'update'], admin=True)
filtered = shell_command(['yum', '-y', 'install' ] + self.managed,
admin=True, pat='No package (.*) available')
if len(filtered) > 0:
look = re.match('No package (.*) available', filtered[0])
if look:
unmanaged = look.group(1).split(' ')
if len(unmanaged) > 0:
raise Error("yum cannot install " + ' '.join(unmanaged))
self.updated = True
def info(self):
info = []
unmanaged = []
try:
filtered = shell_command(['yum', 'info' ] + self.managed,
pat=r'Name\s*:\s*(\S+)')
if filtered:
info = self.managed
else:
unmanaged = self.managed
except Error:
unmanaged = self.managed
return info, unmanaged
class BuildStep(TargetStep):
'''Build a project running make, executing a script, etc.'''
def __init__(self, project_name, target = None, force_update = True):
TargetStep.__init__(self, Step.make, project_name, target)
self.force_update = force_update
def _should_run(self):
updated_prerequisites = False
for prereq in self.prerequisites:
updated_prerequisites |= prereq.updated
return self.force_update or updated_prerequisites
class MakeStep(BuildStep):
'''The *make* step in the development cycle builds executable binaries,
libraries and other files necessary to install the project.'''
def associate(self, target):
return MakeStep(self.project, target)
def run(self, context):
if self._should_run():
# We include the configfile (i.e. variable=value) before
# the project Makefile for convenience. Adding a statement
# include $(shell dws context) at the top of the Makefile
# is still a good idea to permit "make" from the command line.
# Otherwise it just duplicates setting some variables.
context = localize_context(context, self.project, self.target)
makefile = context.src_dir(os.path.join(self.project, 'Makefile'))
if os.path.isfile(makefile):
cmdline = ['make',
'-f', context.config_filename,
'-f', makefile]
# If we do not set PATH to *bin_build_dir*:*binDir*:${PATH}
# and the install directory is not in PATH, then we cannot
# build a package for drop because 'make dist' depends
# on executables installed in *binDir* (dws, dbldpkg, ...)
# that are not linked into *binBuildDir* at the time
# 'cd drop ; make dist' is run. Note that it is not an issue
# for other projects since those can be explicitely depending
# on drop as a prerequisite.
# XXX We should only have to include binBuildDir is PATH
# but that fails because of "/usr/bin/env python" statements
# and other little tools like hostname, date, etc.
shell_command(cmdline + context.targets + context.overrides,
search_path=[context.bin_build_dir()]
+ context.search_path('bin'))
self.updated = True
class ShellStep(BuildStep):
'''Run a shell script to *make* a step in the development cycle.'''
def __init__(self, project_name, script, target = None):
BuildStep.__init__(self, project_name, target)
self.script = script
def associate(self, target):
return ShellStep(self.project, self.script, target)
def run(self, context):
if self._should_run() and self.script:
context = localize_context(context, self.project, self.target)
script = tempfile.NamedTemporaryFile(mode='w+t', delete=False)
script.write('#!/bin/sh\n\n')
script.write('. ' + context.config_filename + '\n\n')
script.write(self.script)
script.close()
shell_command([ 'sh', '-x', '-e', script.name ],
search_path=[context.bin_build_dir()]
+ context.search_path('bin'))
os.remove(script.name)
self.updated = True
class SetupStep(TargetStep):
'''The *setup* step in the development cycle installs third-party
prerequisites. This steps gathers all the <dep> statements referring
to a specific prerequisite.'''
def __init__(self, project_name, files, versions=None, target=None):
'''We keep a reference to the project because we want to decide
to add native installer/made package/patch right after run'''
TargetStep.__init__(self, Step.setup, project_name, target)
self.files = files
self.updated = False
if versions:
self.versions = versions
else:
self.versions = {'includes': [], 'excludes': [] }
def insert(self, setup):
'''We only add prerequisites from *dep* which are not already present
in *self*. This is important because *find_prerequisites* will initialize
tuples (name_pat,absolute_path).'''
files = {}
for dirname in setup.files:
if not dirname in self.files:
self.files[dirname] = setup.files[dirname]
files[dirname] = setup.files[dirname]
else:
for prereq_1 in setup.files[dirname]:
found = False
for prereq_2 in self.files[dirname]:
if prereq_2[0] == prereq_1[0]:
found = True
break
if not found:
self.files[dirname] += [ prereq_1 ]
if not dirname in files:
files[dirname] = []
files[dirname] += [ prereq_1 ]
self.versions['excludes'] += setup.versions['excludes']
self.versions['includes'] += setup.versions['includes']
return SetupStep(self.project, files, self.versions, self.target)
def run(self, context):
self.files, complete = find_prerequisites(
self.files, self.versions, self.target)
if complete:
self.files, complete = link_prerequisites(
self.files, self.versions, self.target)
self.updated = True
return complete
class UpdateStep(Step):
'''The *update* step in the development cycle fetches files and source
repositories from remote server onto the local system.'''
updated_sources = {}
def __init__(self, project_name, rep, fetches):
Step.__init__(self, Step.update, project_name)
self.rep = rep
self.fetches = fetches
self.updated = False
def run(self, context):
try:
fetch(context, self.fetches)
except IOError:
raise Error("unable to fetch " + str(self.fetches))
if self.rep:
# try:
self.updated = self.rep.update(self.project, context)
if self.updated:
UpdateStep.updated_sources[self.project] = self.rep.rev
self.rep.apply_patches(self.project, context)
# except:
# raise Error('cannot update repository or apply patch for %s\n'
# % str(self.project))
class Repository:
'''All prerequisites information to install a project
from a source control system.'''
dirPats = r'(\.git|\.svn|CVS)'
def __init__(self, sync, rev):
self.type = None
self.url = sync
self.rev = rev
def __str__(self):
result = '\t\tsync repository from ' + self.url + '\n'
if self.rev:
result = result + '\t\t\tat revision' + str(self.rev) + '\n'
else:
result = result + '\t\t\tat head\n'
return result
def apply_patches(self, name, context):
if os.path.isdir(context.patch_dir(name)):
patches = []
for pathname in os.listdir(context.patch_dir(name)):
if pathname.endswith('.patch'):
patches += [ pathname ]
if len(patches) > 0:
log_info('######## patching ' + name + '...')
prev = os.getcwd()
os.chdir(context.src_dir(name))
shell_command(['patch',
'< ' + os.path.join(context.patch_dir(name),
'*.patch')])
os.chdir(prev)
@staticmethod
def associate(pathname):
'''This methods returns a boiler plate *Repository* that does
nothing in case an empty sync url is specified. This is different
from an absent sync field which would use rsync as a "Repository".
'''
rev = None
if pathname and len(pathname) > 0:
repos = { '.git': GitRepository,
'.svn': SvnRepository }
sync = pathname
look = search_repo_pat(pathname)
if look:
sync = look.group(1)
rev = look.group(4)
path_list = sync.split(os.sep)
for i in range(0, len(path_list)):
for ext, repo_class in repos.iteritems():
if path_list[i].endswith(ext):
if path_list[i] == ext:
i = i - 1
return repo_class(os.sep.join(path_list[:i + 1]), rev)
# We will guess, assuming the repository is on the local system
for ext, repo_class in repos.iteritems():
if os.path.isdir(os.path.join(pathname, ext)):
return repo_class(pathname, rev)
return RsyncRepository(pathname, rev)
return Repository("", rev)
def update(self, name, context, force=False):
return False
class GitRepository(Repository):
'''All prerequisites information to install a project
from a git source control repository.'''
def apply_patches(self, name, context):
'''Apply patches that can be found in the *obj_dir* for the project.'''
prev = os.getcwd()
if os.path.isdir(context.patch_dir(name)):
patches = []
for pathname in os.listdir(context.patch_dir(name)):
if pathname.endswith('.patch'):
patches += [ pathname ]
if len(patches) > 0:
log_info('######## patching ' + name + '...')
os.chdir(context.src_dir(name))
shell_command([ find_git(context), 'am', '-3', '-k',
os.path.join(context.patch_dir(name),
'*.patch')])
os.chdir(prev)
def push(self, pathname):
prev = os.getcwd()
os.chdir(pathname)
shell_command([ find_git(CONTEXT), 'push' ])
os.chdir(prev)
def tarball(self, name, version='HEAD'):
local = CONTEXT.src_dir(name)
gitexe = find_git(CONTEXT)
cwd = os.getcwd()
os.chdir(local)
if version == 'HEAD':
shell_command([ gitexe, 'rev-parse', version ])
prefix = name + '-' + version
output_name = os.path.join(cwd, prefix + '.tar.bz2')
shell_command([ gitexe, 'archive', '--prefix', prefix + os.sep,
'-o', output_name, 'HEAD'])
os.chdir(cwd)
def update(self, name, context, force=False):
# If the path to the remote repository is not absolute,
# derive it from *remoteTop*. Binding any sooner will
# trigger a potentially unnecessary prompt for remote_cache_path.
if not ':' in self.url and context:
self.url = context.remote_src_path(self.url)
if not name:
prefix = context.value('remoteSrcTop')
if not prefix.endswith(':') and not prefix.endswith(os.sep):
prefix = prefix + os.sep
name = self.url.replace(prefix, '')
if name.endswith('.git'):
name = name[:-4]
local = context.src_dir(name)
pulled = False
updated = False
cwd = os.getcwd()
git_executable = find_git(context)
if not os.path.exists(os.path.join(local, '.git')):
shell_command([ git_executable, 'clone', self.url, local])
updated = True
else:
pulled = True
os.chdir(local)
cmdline = ' '.join([git_executable, 'fetch'])
log_info(cmdline)
cmd = subprocess.Popen(cmdline,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
line = cmd.stdout.readline()
while line != '':
log_info(line)
look = re.match('^updating', line)
if look:
updated = True
line = cmd.stdout.readline()
cmd.wait()
if cmd.returncode != 0:
# It is ok to get an error in case we are running
# this on the server machine.
pass
cof = '-m'
if force:
cof = '-f'
cmd = [ git_executable, 'checkout', cof ]
if self.rev:
cmd += [ self.rev ]
if self.rev or pulled:
os.chdir(local)
shell_command(cmd)
# Print HEAD
if updated:
# Just the commit: cmd = [git_executable, 'rev-parse', 'HEAD']
cmd = [git_executable, 'log', '-1', '--pretty=oneline' ]
os.chdir(local)
logline = subprocess.check_output(cmd)
log_info(logline)
self.rev = logline.split(' ')[0]
os.chdir(cwd)
return updated
class SvnRepository(Repository):
'''All prerequisites information to install a project
from a svn source control repository.'''
def __init__(self, sync, rev):
Repository.__init__(self, sync, rev)
def update(self, name, context, force=False):
# If the path to the remote repository is not absolute,
# derive it from *remoteTop*. Binding any sooner will
# trigger a potentially unnecessary prompt for remote_cache_path.
if not ':' in self.url and context:
self.url = context.remote_src_path(self.url)
local = context.src_dir(name)
if not os.path.exists(os.path.join(local, '.svn')):
shell_command(['svn', 'co', self.url, local])
else:
cwd = os.getcwd()
os.chdir(local)
shell_command(['svn', 'update'])
os.chdir(cwd)
# \todo figure out how any updates is signaled by svn.
return True
class RsyncRepository(Repository):
'''All prerequisites information to install a project
from a remote directory.'''
def __init__(self, sync, rev):
Repository.__init__(self, sync, rev)
def update(self, name, context, force=False):
# If the path to the remote repository is not absolute,
# derive it from *remoteTop*. Binding any sooner will
# trigger a potentially unnecessary prompt for remote_cache_path.
if not ':' in self.url and context:
self.url = context.remote_src_path(self.url)
fetch(context, {self.url: ''}, force=True)
return True
class InstallFlavor:
'''All information necessary to install a project on the local system.'''
def __init__(self, name, pairs):
rep = None
fetches = {}
variables = {}
self.deps = {}
self.make = None
for key, val in pairs.iteritems():
if isinstance(val, Variable):
variables[key] = val
# XXX Hack? We add the variable in the context here
# because it might be needed by the setup step even though
# no configure step has run.
if CONTEXT and not key in CONTEXT.environ:
CONTEXT.environ[key] = val
elif key == 'sync':
rep = Repository.associate(val)
elif key == 'shell':
self.make = ShellStep(name, val)
elif key == 'fetch':
if isinstance(val, list):
blocks = val
else:
blocks = [ val ]
for blk in blocks:
file_url = blk['url']
blk.pop('url')
fetches[file_url] = blk
elif key == 'alternates':
self.deps[key] = Alternates(key, val)
else:
self.deps[key] = Dependency(key, val)
self.update = UpdateStep(name, rep, fetches)
self.configure = ConfigureStep(name, variables, None)
if not self.make:
self.make = MakeStep(name)
def __str__(self):
result = ''
if len(self.update.fetches) > 0:
result = result + '\t\tfetch archives\n'
for archive in self.update.fetches:
result = result + '\t\t\t' + archive + '\n'
if len(self.deps) > 0:
result = result + '\t\tdependencies from local system\n'
for dep in self.deps:
result = result + '\t\t\t' + str(dep) + '\n'
if len(self.configure.envvars) > 0:
result = result + '\t\tenvironment variables\n'
for var in self.configure.envvars:
result = result + '\t\t\t' + str(var) + '\n'
return result
def fetches(self):
return self.update.fetches
def prerequisites(self, tags):
prereqs = []
for dep in self.deps.itervalues():
prereqs += dep.prerequisites(tags)
return prereqs
def prerequisite_names(self, tags):
'''same as *prerequisites* except only returns the names
of the prerequisite projects.'''
names = []
for dep in self.deps.itervalues():
names += [ prereq.name for prereq in dep.prerequisites(tags) ]
return names
def vars(self):
return self.configure.envvars
class Project:
'''Definition of a project with its prerequisites.'''
def __init__(self, name, pairs):
self.name = name
self.title = None
self.descr = None
# *packages* maps a set of tags to *Package* instances. A *Package*
# contains dependencies to install a project from a binary distribution.
# Default update.rep is relative to *remoteSrcTop*. We initialize
# to a relative path instead of an absolute path here such that it
# does not trigger a prompt for *remoteSrcTop* until we actually
# do the repository pull.
self.packages = {}
self.patch = None
self.repository = None
self.installed_version = None
for key, val in pairs.iteritems():
if key == 'title':
self.title = val
elif key == 'version':
self.version = val
elif key == 'description':
self.descr = val
elif key == 'maintainer':
self.maintainer = Maintainer(val['personname'], val['email'])
elif key == 'patch':
self.patch = InstallFlavor(name, val)
if not self.patch.update.rep:
self.patch.update.rep = Repository.associate(name+'.git')
elif key == 'repository':
self.repository = InstallFlavor(name, val)
if not self.repository.update.rep:
self.repository.update.rep = Repository.associate(name+'.git')
else:
self.packages[key] = InstallFlavor(name, val)
def __str__(self):
result = 'project ' + self.name + '\n' \
+ '\t' + str(self.title) + '\n' \
+ '\tfound version ' + str(self.installed_version) \
+ ' installed locally\n'
if len(self.packages) > 0:
result = result + '\tpackages\n'
for package_name in self.packages:
result = result + '\t[' + package_name + ']\n'
result = result + str(self.packages[package_name]) + '\n'
if self.patch:
result = result + '\tpatch\n' + str(self.patch) + '\n'
if self.repository:
result = result + '\trepository\n' + str(self.repository) + '\n'
return result
def prerequisites(self, tags):
'''returns a set of *Dependency* instances for the project based
on the provided tags. It enables choosing between alternate
prerequisites set based on the local machine operating system, etc.'''
prereqs = []
if self.repository:
prereqs += self.repository.prerequisites(tags)
if self.patch:
prereqs += self.patch.prerequisites(tags)
for tag in self.packages:
if tag in tags:
prereqs += self.packages[tag].prerequisites(tags)
return prereqs
def prerequisite_names(self, tags):
'''same as *prerequisites* except only returns the names
of the prerequisite projects.'''
names = []
for prereq in self.prerequisites(tags):
names += [ prereq.name ]
return names
class XMLDbParser(xml.sax.ContentHandler):
'''Parse a project index database stored as an XML file on disc
and generate callbacks on a PdbHandler. The handler will update
its state based on the callback sequence.'''
# Global Constants for the database parser
tagDb = 'projects'
tagProject = 'project'
tagPattern = '.*<' + tagProject + r'\s+name="(.*)"'
trailerTxt = '</' + tagDb + '>'
# For dbldpkg
tagPackage = 'package'
tagTag = 'tag'
tagFetch = 'fetch'
tagHash = 'sha1'
def __init__(self, context):
xml.sax.ContentHandler.__init__(self)
self.context = context
self.handler = None
# stack used to reconstruct the tree.
self.nodes = []
self.text = ""
def startElement(self, name, attrs):
'''Start populating an element.'''
self.text = ""
key = name
elems = {}
for attr in attrs.keys():
if attr == 'name':
# \todo have to conserve name if just for fetches.
# key = Step.genid(Step, attrs['name'], target)
if 'target' in attrs.keys():
target = attrs['target']
key = os.path.join(target, attrs['name'])
else:
key = attrs['name']
else:
elems[attr] = attrs[attr]
self.nodes += [ (name, {key:elems}) ]
def characters(self, characters):
self.text += characters
def endElement(self, name):
'''Once the element is fully populated, call back the simplified
interface on the handler.'''
node_name, pairs = self.nodes.pop()
self.text = self.text.strip()
if self.text:
aggregate = self.text
self.text = ""
else:
aggregate = {}
while node_name != name:
# We are keeping the structure as simple as possible,
# only introducing lists when there are more than one element.
for k in pairs.keys():
if not k in aggregate:
aggregate[k] = pairs[k]
elif isinstance(aggregate[k], list):
if isinstance(pairs[k], list):
aggregate[k] += pairs[k]
else:
aggregate[k] += [ pairs[k] ]
else:
if isinstance(pairs[k], list):
aggregate[k] = [ aggregate[k] ] + pairs[k]
else:
aggregate[k] = [ aggregate[k], pairs[k] ]
node_name, pairs = self.nodes.pop()
key = pairs.keys()[0]
cap = name.capitalize()
if cap in [ 'Metainfo', 'Multiple',
'Pathname', 'Single', 'Variable' ]:
aggregate = getattr(sys.modules[__name__], cap)(key, aggregate)
if isinstance(aggregate, dict):
pairs[key].update(aggregate)
else:
pairs[key] = aggregate
if name == 'project':
self.handler.project(Project(key, pairs[key]))
elif name == 'projects':
self.handler.end_parse()
self.nodes += [ (name, pairs) ]
def parse(self, source, handler):
'''This is the public interface for one pass through the database
that generates callbacks on the handler interface.'''
self.handler = handler
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_namespaces, 0)
parser.setContentHandler(self)
if source.startswith('<?xml'):
parser.parse(cStringIO.StringIO(source))
else:
parser.parse(source)
# The following methods are used to merge multiple databases together.
def copy(self, db_next, db_prev, remove_project_end_tag=False):
'''Copy lines in the db_prev file until hitting the definition
of a package and return the name of the package.'''
name = None
line = db_prev.readline()
while line != '':
look = re.match(self.tagPattern, line)
if look != None:
name = look.group(1)
break
write_line = True
look = re.match('.*' + self.trailerTxt, line)
if look:
write_line = False
if remove_project_end_tag:
look = re.match('.*</' + self.tagProject + '>', line)
if look:
write_line = False
if write_line:
db_next.write(line)
line = db_prev.readline()
return name
def next(self, db_prev):
'''Skip lines in the db_prev file until hitting the definition
of a package and return the name of the package.'''
name = None
line = db_prev.readline()
while line != '':
look = re.match(self.tagPattern, line)
if look != None:
name = look.group(1)
break
line = db_prev.readline()
return name
def start_project(self, db_next, name):
db_next.write(' <' + self.tagProject + ' name="' + name + '">\n')
def trailer(self, db_next):
'''XML files need a finish tag. We make sure to remove it while
processing Upd and Prev then add it back before closing
the final file.'''
db_next.write(self.trailerTxt)
def basenames(pathnames):
'''return the basename of all pathnames in a list.'''
bases = []
for pathname in pathnames:
bases += [ os.path.basename(pathname) ]
return bases
def search_repo_pat(sync_path):
'''returns a RegexMatch if *sync_path* refers to a repository url/path.'''
return re.search('(\S*%s)(@(\S+))?$' % Repository.dirPats, sync_path)
def filter_rep_ext(name):
'''Filters the repository type indication from a pathname.'''
localname = name
remote_path_list = name.split(os.sep)
for i in range(0, len(remote_path_list)):
look = search_repo_pat(remote_path_list[i])
if look:
_, rep_ext = os.path.splitext(look.group(1))
if remote_path_list[i] == rep_ext:
localname = os.sep.join(remote_path_list[:i] + \
remote_path_list[i+1:])
else:
localname = os.sep.join(remote_path_list[:i] + \
[ remote_path_list[i][:-len(rep_ext)] ] + \
remote_path_list[i+1:])
break
return localname
def mark(filename, suffix):
base, ext = os.path.splitext(filename)
return base + '-' + suffix + ext
def stamp(date=datetime.datetime.now()):
return str(date.year) \
+ ('_%02d' % (date.month)) \
+ ('_%02d' % (date.day)) \
+ ('-%02d' % (date.hour))
def stampfile(filename):
global CONTEXT
if not CONTEXT:
# This code here is very special. dstamp.py relies on some dws
# functions all of them do not rely on a context except
# this special case here.
CONTEXT = Context()
CONTEXT.locate()
if not 'buildstamp' in CONTEXT.environ:
CONTEXT.environ['buildstamp'] = stamp(datetime.datetime.now())
CONTEXT.save()
return mark(os.path.basename(filename), CONTEXT.value('buildstamp'))
def create_index_pathname(db_index_pathname, db_pathnames):
'''create a global dependency database (i.e. project index file) out of
a set local dependency index files.'''
parser = XMLDbParser(CONTEXT)
dirname = os.path.dirname(db_index_pathname)
if not os.path.isdir(dirname):
os.makedirs(dirname)
db_next = sort_build_conf_list(db_pathnames, parser)
db_index = open(db_index_pathname, 'wb')
db_next.seek(0)
shutil.copyfileobj(db_next, db_index)
db_next.close()
db_index.close()
def find_bin(names, search_path, build_top, versions=None, variant=None):
'''Search for a list of binaries that can be executed from $PATH.
*names* is a list of (pattern,absolute_path) pairs where the absolutePat
can be None and in which case pattern will be used to search
for an executable. *versions['excludes']* is a list of versions
that are concidered false positive and need to be excluded, usually
as a result of incompatibilities.
This function returns a list of populated (pattern,absolute_path)
and a version number. The version number is retrieved
through a command line flag. --version and -V are tried out.
This function differs from findInclude() and find_lib() in its
search algorithm. find_bin() strictly behave like $PATH and
always returns the FIRST executable reachable from $PATH regardless
of version number, unless the version is excluded, in which case
the result is the same as if the executable hadn't been found.
Implementation Note:
*names* and *excludes* are two lists instead of a dictionary
indexed by executale name for two reasons:
1. Most times find_bin() is called with *names* of executables
from the same project. It is cumbersome to specify exclusion
per executable instead of per-project.
2. The prototype of find_bin() needs to match the ones of
findInclude() and find_lib().
Implementation Note: Since the boostrap relies on finding rsync,
it is possible we invoke this function with log == None hence
the tests for it.
'''
version = None
if versions and 'excludes' in versions:
excludes = versions['excludes']
else:
excludes = []
results = []
droots = search_path
complete = True
for name_pat, absolute_path in names:
if absolute_path != None and os.path.exists(absolute_path):
# absolute paths only occur when the search has already been
# executed and completed successfuly.
results.append((name_pat, absolute_path))
continue
link_name, suffix = link_build_name(name_pat, 'bin', variant)
if os.path.islink(link_name):
# If we already have a symbolic link in the binBuildDir,
# we will assume it is the one to use in order to cut off
# recomputing of things that hardly change.
results.append((name_pat,
os.path.realpath(os.path.join(link_name, suffix))))
continue
if variant:
log_interactive(variant + '/')
log_interactive(name_pat + '... ')
found = False
if name_pat.endswith('.app'):
binpath = os.path.join('/Applications', name_pat)
if os.path.isdir(binpath):
found = True
log_info('yes')
results.append((name_pat, binpath))
else:
for path in droots:
for binname in find_first_files(path, name_pat):
binpath = os.path.join(path, binname)
if (os.path.isfile(binpath)
and os.access(binpath, os.X_OK)):
# We found an executable with the appropriate name,
# let's find out if we can retrieve a version number.
numbers = []
if not (variant and len(variant) > 0):
# When looking for a specific *variant*, we do not
# try to execute executables as they are surely
# not meant to be run on the native system.
# We run the help flag before --version, -V
# because bzip2 would wait on stdin for data
# otherwise.
# XXX semilla --help is broken :(
for flag in [ '--version', '-V' ]:
numbers = []
cmdline = [ binpath, flag ]
try:
output = subprocess.check_output(
cmdline, stderr=subprocess.STDOUT)
for line in output.splitlines():
numbers += version_candidates(line)
except subprocess.CalledProcessError:
# When the command returns with an error
# code, we assume we passed an incorrect
# flag to retrieve the version number.
numbers = []
if len(numbers) > 0:
break
# At this point *numbers* contains a list that can
# interpreted as versions. Hopefully, there is only
# one candidate.
if len(numbers) == 1:
excluded = False
if excludes:
for exclude in list(excludes):
if ((not exclude[0]
or version_compare(
exclude[0], numbers[0]) <= 0)
and (not exclude[1]
or version_compare(
numbers[0], exclude[1]) < 0)):
excluded = True
break
if not excluded:
version = numbers[0]
log_info(str(version))
results.append((name_pat, binpath))
else:
log_info('excluded (' +str(numbers[0])+ ')')
else:
log_info('yes')
results.append((name_pat, binpath))
found = True
break
if found:
break
if not found:
log_info('no')
results.append((name_pat, None))
complete = False
return results, version, complete
def find_cache(context, names):
'''Search for the presence of files in the cache directory. *names*
is a dictionnary of file names used as key and the associated checksum.'''
results = {}
for pathname in names:
name = os.path.basename(urlparse.urlparse(pathname).path)
log_interactive(name + "... ")
local_name = context.local_dir(pathname)
if os.path.exists(local_name):
if isinstance(names[pathname], dict):
if 'sha1' in names[pathname]:
expected = names[pathname]['sha1']
with open(local_name, 'rb') as local_file:
sha1sum = hashlib.sha1(local_file.read()).hexdigest()
if sha1sum == expected:
# checksum are matching
log_info("matched (sha1)")
else:
log_info("corrupted? (sha1)")
else:
log_info("yes")
else:
log_info("yes")
else:
results[ pathname ] = names[pathname]
log_info("no")
return results
def find_files(base, name_pat, recurse=True):
'''Search the directory tree rooted at *base* for files matching *name_pat*
and returns a list of absolute pathnames to those files.'''
result = []
try:
if os.path.exists(base):
for name in os.listdir(base):
path = os.path.join(base, name)
look = re.match('.*' + name_pat + '$', path)
if look:
result += [ path ]
elif recurse and os.path.isdir(path):
result += find_files(path, name_pat)
except OSError:
# In case permission to execute os.listdir is denied.
pass
return sorted(result, reverse=True)
def find_first_files(base, name_pat, subdir=''):
'''Search the directory tree rooted at *base* for files matching pattern
*name_pat* and returns a list of relative pathnames to those files
from *base*.
If .*/ is part of pattern, base is searched recursively in breadth search
order until at least one result is found.'''
try:
subdirs = []
results = []
pat_num_sub_dirs = len(name_pat.split(os.sep))
sub_num_sub_dirs = len(subdir.split(os.sep))
candidate_dir = os.path.join(base, subdir)
if os.path.exists(candidate_dir):
for filename in os.listdir(candidate_dir):
relative = os.path.join(subdir, filename)
path = os.path.join(base, relative)
regex = name_pat_regex(name_pat)
look = regex.match(path)
if look != None:
results += [ relative ]
elif (((('.*' + os.sep) in name_pat)
or (sub_num_sub_dirs < pat_num_sub_dirs))
and os.path.isdir(path)):
# When we see .*/, it means we are looking for a pattern
# that can be matched by files in subdirectories
# of the base.
subdirs += [ relative ]
if len(results) == 0:
for subdir in subdirs:
results += find_first_files(base, name_pat, subdir)
except OSError:
# Permission to a subdirectory might be denied.
pass
return sorted(results, reverse=True)
def find_data(dirname, names,
search_path, build_top, versions=None, variant=None):
'''Search for a list of extra files that can be found from $PATH
where bin was replaced by *dir*.'''
results = []
droots = search_path
complete = True
if versions and 'excludes' in versions:
excludes = versions['excludes']
else:
excludes = []
if variant:
build_dir = os.path.join(build_top, variant, dirname)
else:
build_dir = os.path.join(build_top, dirname)
for name_pat, absolute_path in names:
if absolute_path != None and os.path.exists(absolute_path):
# absolute paths only occur when the search has already been
# executed and completed successfuly.
results.append((name_pat, absolute_path))
continue
link_name, suffix = link_build_name(name_pat, dirname, variant)
if os.path.islink(link_name):
# If we already have a symbolic link in the dataBuildDir,
# we will assume it is the one to use in order to cut off
# recomputing of things that hardly change.
# XXX Be careful if suffix starts with '/'
results.append((name_pat,
os.path.realpath(os.path.join(link_name, suffix))))
continue
if variant:
log_interactive(variant + '/')
log_interactive(name_pat + '... ')
link_num = 0
if name_pat.startswith('.*' + os.sep):
link_num = len(name_pat.split(os.sep)) - 2
found = False
# The structure of share/ directories is not as standard as others
# and requires a recursive search for prerequisites. As a result,
# it might take a lot of time to update unmodified links.
# We thus first check links in build_dir are still valid.
full_names = find_files(build_dir, name_pat)
if len(full_names) > 0:
try:
os.stat(full_names[0])
log_info('yes')
results.append((name_pat, full_names[0]))
found = True
except IOError:
pass
if not found:
for base in droots:
full_names = find_files(base, name_pat)
if len(full_names) > 0:
log_info('yes')
tokens = full_names[0].split(os.sep)
linked = os.sep.join(tokens[:len(tokens) - link_num])
# DEPRECATED: results.append((name_pat, linked))
results.append((name_pat, full_names[0]))
found = True
break
if not found:
log_info('no')
results.append((name_pat, None))
complete = False
return results, None, complete
def find_etc(names, search_path, build_top, versions=None, variant=None):
return find_data('etc', names, search_path, build_top, versions)
def find_include(names, search_path, build_top, versions=None, variant=None):
'''Search for a list of headers that can be found from $PATH
where bin was replaced by include.
*names* is a list of (pattern,absolute_path) pairs where the absolutePat
can be None and in which case pattern will be used to search
for a header filename patterns. *excludes* is a list
of versions that are concidered false positive and need to be
excluded, usually as a result of incompatibilities.
This function returns a populated list of (pattern,absolute_path) pairs
and a version number if available.
This function differs from find_bin() and find_lib() in its search
algorithm. find_include() might generate a breadth search based
out of a derived root of $PATH. It opens found header files
and look for a "#define.*VERSION" pattern in order to deduce
a version number.'''
results = []
version = None
if versions and 'excludes' in versions:
excludes = versions['excludes']
else:
excludes = []
complete = True
prefix = ''
include_sys_dirs = search_path
for name_pat, absolute_path in names:
if absolute_path != None and os.path.exists(absolute_path):
# absolute paths only occur when the search has already been
# executed and completed successfuly.
results.append((name_pat, absolute_path))
continue
link_name, suffix = link_build_name(name_pat, 'include', variant)
if os.path.islink(link_name):
# If we already have a symbolic link in the binBuildDir,
# we will assume it is the one to use in order to cut off
# recomputing of things that hardly change.
# XXX Be careful if suffix starts with '/'
results.append(
(name_pat, os.path.realpath(os.path.join(link_name, suffix))))
continue
if variant:
log_interactive(variant + '/')
log_interactive(name_pat + '... ')
found = False
for include_sys_dir in include_sys_dirs:
includes = []
for header in find_first_files(include_sys_dir,
name_pat.replace(prefix, '')):
# Open the header file and search for all defines
# that end in VERSION.
numbers = []
# First parse the pathname for a version number...
parts = os.path.dirname(header).split(os.sep)
parts.reverse()
for part in parts:
for ver in version_candidates(part):
if not ver in numbers:
numbers += [ ver ]
# Second open the file and search for a version identifier...
header = os.path.join(include_sys_dir, header)
with open(header, 'rt') as header_file:
line = header_file.readline()
while line != '':
look = re.match(r'\s*#define.*VERSION\s+(\S+)', line)
if look != None:
for ver in version_candidates(look.group(1)):
if not ver in numbers:
numbers += [ ver ]
line = header_file.readline()
# At this point *numbers* contains a list that can
# interpreted as versions. Hopefully, there is only
# one candidate.
if len(numbers) >= 1:
# With more than one version number, we assume the first
# one found is the most relevent and use it regardless.
# This is different from previously assumption that more
# than one number was an error in the version detection
# algorithm. As it turns out, boost packages sources
# in a -1_41_0.tar.gz file while version.hpp says 1_41.
excluded = False
if excludes:
for exclude in list(excludes):
if ((not exclude[0]
or version_compare(
exclude[0], numbers[0]) <= 0)
and (not exclude[1]
or version_compare(
numbers[0], exclude[1]) < 0)):
excluded = True
break
if not excluded:
index = 0
for include in includes:
if ((not include[1])
or version_compare(include[1], numbers[0]) < 0):
break
index = index + 1
includes.insert(index, (header, numbers[0]))
else:
# If we find no version number, we append the header
# at the end of the list with 'None' for version.
includes.append((header, None))
if len(includes) > 0:
if includes[0][1]:
version = includes[0][1]
log_info(version)
else:
log_info('yes')
results.append((name_pat, includes[0][0]))
name_pat_parts = name_pat.split(os.sep)
include_file_parts = includes[0][0].split(os.sep)
while (len(name_pat_parts) > 0
and name_pat_parts[len(name_pat_parts)-1]
== include_file_parts[len(include_file_parts)-1]):
name_pat_part = name_pat_parts.pop()
include_file_part = include_file_parts.pop()
prefix = os.sep.join(name_pat_parts)
if prefix and len(prefix) > 0:
prefix = prefix + os.sep
include_sys_dirs = [ os.sep.join(include_file_parts) ]
else:
include_sys_dirs = [ os.path.dirname(includes[0][0]) ]
found = True
break
if not found:
log_info('no')
results.append((name_pat, None))
complete = False
return results, version, complete
def find_lib(names, search_path, build_top, versions=None, variant=None):
'''Search for a list of libraries that can be found from $PATH
where bin was replaced by lib.
*names* is a list of (pattern,absolute_path) pairs where the absolutePat
can be None and in which case pattern will be used to search
for library names with neither a 'lib' prefix
nor a '.a', '.so', etc. suffix. *excludes* is a list
of versions that are concidered false positive and need to be
excluded, usually as a result of incompatibilities.
This function returns a populated list of (pattern,absolute_path) pairs
and a version number if available.
This function differs from find_bin() and find_include() in its
search algorithm. find_lib() might generate a breadth search based
out of a derived root of $PATH. It uses the full library name
in order to deduce a version number if possible.'''
results = []
version = None
if versions and 'excludes' in versions:
excludes = versions['excludes']
else:
excludes = []
complete = True
# We used to look for lib suffixes '-version' and '_version'. Unfortunately
# it picked up libldap_r.so when we were looking for libldap.so. Looking
# through /usr/lib on Ubuntu does not show any libraries ending with
# a '_version' suffix so we will remove it from the regular expression.
suffix = '(-.+)?(\\' + lib_static_suffix() \
+ '|\\' + lib_dyn_suffix() + r'(\\.\S+)?)'
if not variant and CONTEXT.host() in APT_DISTRIBS:
# Ubuntu 12.04+: host libraries are not always installed
# in /usr/lib. Sometimes they end-up in /usr/lib/x86_64-linux-gnu
# like libgmp.so for example.
droots = []
for path in search_path:
droots += [ path, os.path.join(path, 'x86_64-linux-gnu') ]
else:
droots = search_path
for name_pat, absolute_path in names:
if absolute_path != None and os.path.exists(absolute_path):
# absolute paths only occur when the search has already been
# executed and completed successfuly.
results.append((name_pat, absolute_path))
continue
lib_base_pat = lib_prefix() + name_pat
if '.*' in name_pat:
# Dealing with a regular expression already
lib_suffix_by_priority = []
link_pats = [ name_pat ]
elif lib_base_pat.endswith('.so'):
# local override to select dynamic library.
lib_base_pat = lib_base_pat[:-3]
lib_suffix_by_priority = [ lib_dyn_suffix(), lib_static_suffix() ]
link_pats = [ lib_base_pat + '.so',
lib_base_pat + lib_static_suffix() ]
elif STATIC_LIB_FIRST:
lib_suffix_by_priority = [ lib_static_suffix(), lib_dyn_suffix() ]
link_pats = [ lib_base_pat + lib_static_suffix(),
lib_base_pat + '.so' ]
else:
lib_suffix_by_priority = [ lib_dyn_suffix(), lib_static_suffix() ]
link_pats = [ lib_base_pat + '.so',
lib_base_pat + lib_static_suffix() ]
found = False
for link_pat in link_pats:
link_name, link_suffix = link_build_name(link_pat, 'lib', variant)
if os.path.islink(link_name):
# If we already have a symbolic link in the libBuildDir,
# we will assume it is the one to use in order to cut off
# recomputing of things that hardly change.
results.append((name_pat, os.path.realpath(os.path.join(
link_name, link_suffix))))
found = True
break
if found:
continue
if variant:
log_interactive(variant + '/')
log_interactive(name_pat + '... ')
found = False
for lib_sys_dir in droots:
libs = []
if '.*' in name_pat:
# We were already given a regular expression.
# If we are not dealing with a honest to god library, let's
# just use the pattern we were given. This is because, python,
# ruby, etc. also put their stuff in libDir.
# ex patterns for things also in libDir:
# - ruby/.*/json.rb
# - cgi-bin/awstats.pl
# - .*/registration/__init__.py
lib_pat = name_pat
else:
lib_pat = lib_base_pat + suffix
for libname in find_first_files(lib_sys_dir, lib_pat):
numbers = version_candidates(libname)
absolute_path = os.path.join(lib_sys_dir, libname)
absolute_path_base = os.path.dirname(absolute_path)
absolute_path_ext = '.' \
+ os.path.basename(absolute_path).split('.')[1]
if len(numbers) == 1:
excluded = False
if excludes:
for exclude in list(excludes):
if ((not exclude[0]
or version_compare(
exclude[0], numbers[0]) <= 0)
and (not exclude[1]
or version_compare(
numbers[0], exclude[1]) < 0)):
excluded = True
break
if not excluded:
# Insert candidate into a sorted list. First to last,
# higher version number, dynamic libraries.
index = 0
for lib in libs:
lib_path_base = os.path.dirname(lib[0])
if ((not lib[1])
or version_compare(lib[1], numbers[0]) < 0):
break
elif (absolute_path_base == lib_path_base
and absolute_path_ext
== lib_suffix_by_priority[0]):
break
index = index + 1
libs.insert(index, (absolute_path, numbers[0]))
else:
# Insert candidate into a sorted list. First to last,
# higher version number, shortest name, dynamic libraries.
index = 0
for lib in libs:
lib_path_base = os.path.dirname(lib[0])
if lib[1]:
pass
elif absolute_path_base == lib_path_base:
if absolute_path_ext == lib_suffix_by_priority[0]:
break
elif lib_path_base.startswith(absolute_path_base):
break
index = index + 1
libs.insert(index, (absolute_path, None))
if len(libs) > 0:
candidate = libs[0][0]
version = libs[0][1]
look = re.match('.*%s(.+)' % lib_base_pat, candidate)
if look:
suffix = look.group(1)
log_info(suffix)
else:
log_info('yes (no suffix?)')
results.append((name_pat, candidate))
found = True
break
if not found:
log_info('no')
results.append((name_pat, None))
complete = False
return results, version, complete
def find_prerequisites(deps, versions=None, variant=None):
'''Find a set of executables, headers, libraries, etc. on a local machine.
*deps* is a dictionary where each key associates an install directory
(bin, include, lib, etc.) to a pair (pattern,absolute_path) as required
by *find_bin*(), *find_lib*(), *find_include*(), etc.
*excludes* contains a list of excluded version ranges because they are
concidered false positive, usually as a result of incompatibilities.
This function will try to find the latest version of each file which
was not excluded.
This function will return a dictionnary matching *deps* where each found
file will be replaced by an absolute pathname and each file not found
will not be present. This function returns True if all files in *deps*
can be fulfilled and returns False if any file cannot be found.'''
version = None
installed = {}
complete = True
for dep in deps:
# Make sure the extras do not get filtered out.
if not dep in INSTALL_DIRS:
installed[dep] = deps[dep]
for dirname in INSTALL_DIRS:
# The search order "bin, include, lib, etc" will determine
# how excluded versions apply.
if dirname in deps:
command = 'find_' + dirname
# First time ever *find* is called, libDir will surely not defined
# in the workspace make fragment and thus we will trigger
# interactive input from the user.
# We want to make sure the output of the interactive session does
# not mangle the search for a library so we preemptively trigger
# an interactive session.
# deprecated: done in search_path. context.value(dir + 'Dir')
installed[dirname], installed_version, installed_complete = \
getattr(sys.modules[__name__], command)(deps[dirname],
CONTEXT.search_path(dirname,variant),
CONTEXT.value('buildTop'),
versions, variant)
# Once we have selected a version out of the installed
# local system, we lock it down and only search for
# that specific version.
if not version and installed_version:
version = installed_version
versions = { 'excludes': [ (None, version), (version_incr(version), None) ] }
if not installed_complete:
complete = False
return installed, complete
def find_libexec(names, search_path, build_top, versions=None, variant=None):
'''find files specificed in names inside the libexec/ directory.
*excludes* is a list of version to exclude from the set of matches.'''
return find_data(
'libexec', names, search_path, build_top, versions, variant)
def find_share(names, search_path, build_top, versions=None, variant=None):
'''find files specificed in names inside the share/ directory.
*excludes* is a list of version to exclude from the set of matches.'''
return find_data('share', names, search_path, build_top, versions, variant)
def find_boot_bin(context, name, package=None, dbindex=None):
'''This script needs a few tools to be installed to bootstrap itself,
most noticeably the initial source control tool used to checkout
the projects dependencies index file.'''
executable = os.path.join(context.bin_build_dir(), name)
if not os.path.exists(executable):
# We do not use *validate_controls* here because dws in not
# a project in *srcTop* and does not exist on the remote machine.
# We use find_bin() and link_context() directly also because it looks
# weird when the script prompts for installing a non-existent dws
# project before looking for the rsync prerequisite.
if not package:
package = name
if not dbindex:
dbindex = IndexProjects(context,
'''<?xml version="1.0" ?>
<projects>
<project name="dws">
<repository>
<dep name="%s">
<bin>%s</bin>
</dep>
</repository>
</project>
</projects>
''' % (package, name))
executables, version, complete = find_bin([ [ name, None ] ],
context.search_path('bin'),
context.value('buildTop'))
if len(executables) == 0 or not executables[0][1]:
install([package], dbindex)
executables, version, complete = find_bin([ [ name, None ] ],
context.search_path('bin'),
context.value('buildTop'))
name, absolute_path = executables.pop()
link_pat_path(name, absolute_path, 'bin')
executable = os.path.join(context.bin_build_dir(), name)
return executable
def find_gem(context):
gem_package = None
if context.host() in APT_DISTRIBS:
gem_package = 'rubygems'
find_boot_bin(context, '(gem).*', gem_package)
return os.path.join(context.value('buildTop'), 'bin', 'gem')
def find_git(context):
if not os.path.lexists(
os.path.join(context.value('buildTop'), 'bin', 'git')):
files = { 'bin': [('git', None)]}
if context.host() in APT_DISTRIBS:
files.update({'share': [('git-core', None)]})
else:
files.update({'libexec': [('git-core', None)]})
setup = SetupStep('git-all', files=files)
setup.run(context)
return 'git'
def find_npm(context):
build_npm = os.path.join(context.value('buildTop'), 'bin', 'npm')
if not os.path.lexists(build_npm):
dbindex=IndexProjects(context,
'''<?xml version="1.0" ?>
<projects>
<project name="nvm">
<repository>
<sync>https://github.com/creationix/nvm.git</sync>
<shell>
export NVM_DIR=${buildTop}
. ${srcTop}/nvm/nvm.sh
nvm install 0.8.14
</shell>
</repository>
</project>
</projects>
''')
validate_controls(
BuildGenerator([ 'nvm' ], [], force_update = True), dbindex)
prev = os.getcwd()
os.chdir(os.path.join(context.value('buildTop'), 'bin'))
os.symlink('../v0.8.14/bin/npm', 'npm')
os.symlink('../v0.8.14/bin/node', 'node')
os.chdir(prev)
return 'npm'
def find_pip(context):
pip_package = None
if context.host() in YUM_DISTRIBS:
pip_package = 'python-pip'
find_boot_bin(context, '(pip).*', pip_package)
return os.path.join(context.value('buildTop'), 'bin', 'pip')
def find_rsync(context, host, relative=True, admin=False,
username=None, key=None):
'''Check if rsync is present and install it through the package
manager if it is not. rsync is a little special since it is used
directly by this script and the script is not always installed
through a project.'''
rsync = find_boot_bin(context, 'rsync')
# We are accessing the remote machine through a mounted
# drive or through ssh.
prefix = ""
if username:
prefix = prefix + username + '@'
# -a is equivalent to -rlptgoD, we are only interested in -r (recursive),
# -p (permissions), -t (times)
cmdline = [ rsync, '-qrptuz' ]
if relative:
cmdline = [ rsync, '-qrptuzR' ]
if host:
# We are accessing the remote machine through ssh
prefix = prefix + host + ':'
ssh = '--rsh="ssh -q'
if admin:
ssh = ssh + ' -t'
if key:
ssh = ssh + ' -i ' + str(key)
ssh = ssh + '"'
cmdline += [ ssh ]
if admin and username != 'root':
cmdline += [ '--rsync-path "sudo rsync"' ]
return cmdline, prefix
def name_pat_regex(name_pat):
# Many C++ tools contain ++ in their name which might trip
# the regular expression parser.
# We must postpend the '$' sign to the regular expression
# otherwise "makeconv" and "makeinfo" will be picked up by
# a match for the "make" executable.
pat = name_pat.replace('++','\+\+')
if not pat.startswith('.*'):
# If we don't add the separator here we will end-up with unrelated
# links to automake, pkmake, etc. when we are looking for "make".
pat = '.*' + os.sep + pat
return re.compile(pat + '$')
def config_var(context, variables):
'''Look up the workspace configuration file the workspace make fragment
for definition of variables *variables*, instances of classes derived from
Variable (ex. Pathname, Single).
If those do not exist, prompt the user for input.'''
found = False
for key, val in variables.iteritems():
# apply constrains where necessary
val.constrain(context.environ)
if not key in context.environ:
# If we do not add variable to the context, they won't
# be saved in the workspace make fragment
context.environ[key] = val
found |= val.configure(context)
if found:
context.save()
return found
def cwd_projects(reps, recurse=False):
'''returns a list of projects based on the current directory
and/or a list passed as argument.'''
if len(reps) == 0:
# We try to derive project names from the current directory whever
# it is a subdirectory of buildTop or srcTop.
cwd = os.path.realpath(os.getcwd())
build_top = os.path.realpath(CONTEXT.value('buildTop'))
src_top = os.path.realpath(CONTEXT.value('srcTop'))
project_name = None
src_dir = src_top
src_prefix = os.path.commonprefix([ cwd, src_top ])
build_prefix = os.path.commonprefix([ cwd, build_top ])
if src_prefix == src_top:
src_dir = cwd
project_name = src_dir[len(src_top) + 1:]
elif build_prefix == build_top:
src_dir = cwd.replace(build_top, src_top)
project_name = src_dir[len(src_top) + 1:]
if project_name:
reps = [ project_name ]
else:
for repdir in find_files(src_dir, Repository.dirPats):
reps += [ os.path.dirname(
repdir.replace(src_top + os.sep, '')) ]
if recurse:
raise NotImplementedError()
return reps
def ordered_prerequisites(roots, index):
'''returns the dependencies in topological order for a set of project
names in *roots*.'''
dgen = MakeDepGenerator(roots, [], exclude_pats=EXCLUDE_PATS)
steps = index.closure(dgen)
results = []
for step in steps:
# XXX this is an ugly little hack!
if isinstance(step, InstallStep) or isinstance(step, BuildStep):
results += [ step.qualified_project_name() ]
return results
def fetch(context, filenames,
force=False, admin=False, relative=True):
'''download *filenames*, typically a list of distribution packages,
from the remote server into *cacheDir*. See the upload function
for uploading files to the remote server.
When the files to fetch require sudo permissions on the remote
machine, set *admin* to true.
'''
if filenames and len(filenames) > 0:
# Expand filenames to absolute urls
remote_site_top = context.value('remoteSiteTop')
uri = urlparse.urlparse(remote_site_top)
pathnames = {}
for name in filenames:
# Absolute path to access a file on the remote machine.
remote_path = ''
if name:
if name.startswith('http') or ':' in name:
remote_path = name
elif len(uri.path) > 0 and name.startswith(uri.path):
remote_path = os.path.join(remote_site_top,
'.' + name.replace(uri.path, ''))
elif name.startswith('/'):
remote_path = '/.' + name
else:
remote_path = os.path.join(remote_site_top, './' + name)
pathnames[ remote_path ] = filenames[name]
# Check the local cache
if force:
downloads = pathnames
else:
downloads = find_cache(context, pathnames)
for filename in downloads:
local_filename = context.local_dir(filename)
dirname = os.path.dirname(local_filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
# Split fetches by protocol
https = []
sshs = []
for package in downloads:
# Splits between files downloaded through http and ssh.
if package.startswith('http'):
https += [ package ]
else:
sshs += [ package ]
# fetch https
for remotename in https:
localname = context.local_dir(remotename)
if not os.path.exists(os.path.dirname(localname)):
os.makedirs(os.path.dirname(localname))
log_info('fetching ' + remotename + '...')
remote = urllib2.urlopen(urllib2.Request(remotename))
local = open(localname, 'w')
local.write(remote.read())
local.close()
remote.close()
# fetch sshs
if len(sshs) > 0:
sources = []
hostname = uri.netloc
if not uri.netloc:
# If there is no protocol specified, the hostname
# will be in uri.scheme (That seems like a bug in urlparse).
hostname = uri.scheme
for ssh in sshs:
sources += [ ssh.replace(hostname + ':', '') ]
if len(sources) > 0:
if admin:
shell_command(['stty -echo;', 'ssh', hostname,
'sudo', '-v', '; stty echo'])
cmdline, prefix = find_rsync(context, context.remote_host(),
relative, admin)
shell_command(cmdline + ["'" + prefix + ' '.join(sources) + "'",
context.value('siteTop') ])
def create_managed(project_name, versions, target):
'''Create a step that will install *project_name* through the local
package manager.
If the target is pure python, we will try pip before native package
manager because we prefer to install in the virtualenv. We solely rely
on the native package manager for python with C bindings.'''
install_step = None
if target and target.startswith('python'):
install_step = PipInstallStep(project_name, versions, target)
elif target and target.startswith('gems'):
install_step = GemInstallStep(project_name, versions, target)
elif target and target.startswith('nodejs'):
install_step = NpmInstallStep(project_name, target)
elif CONTEXT.host() in APT_DISTRIBS:
install_step = AptInstallStep(project_name, target)
elif CONTEXT.host() in PORT_DISTRIBS:
install_step = MacPortInstallStep(project_name, target)
elif CONTEXT.host() in YUM_DISTRIBS:
install_step = YumInstallStep(project_name, target)
else:
install_step = None
return install_step
def create_package_file(project_name, filenames):
if CONTEXT.host() in APT_DISTRIBS:
install_step = DpkgInstallStep(project_name, filenames)
elif CONTEXT.host() in PORT_DISTRIBS:
install_step = DarwinInstallStep(project_name, filenames)
elif CONTEXT.host() in YUM_DISTRIBS:
install_step = RpmInstallStep(project_name, filenames)
else:
install_step = None
return install_step
def elapsed_duration(start, finish):
'''Returns elapsed time between start and finish'''
duration = finish - start
# XXX until most system move to python 2.7, we compute
# the number of seconds ourselves. +1 insures we run for
# at least a second.
return datetime.timedelta(seconds=((duration.microseconds
+ (duration.seconds
+ duration.days * 24 * 3600)
* 10**6) / 10**6) + 1)
def install(packages, dbindex):
'''install a pre-built (also pre-fetched) package.
'''
projects = []
local_files = []
package_files = None
for name in packages:
if os.path.isfile(name):
local_files += [ name ]
else:
projects += [ name ]
if len(local_files) > 0:
package_files = create_package_file(local_files[0], local_files)
if len(projects) > 0:
handler = Unserializer(projects)
dbindex.parse(handler)
managed = []
for name in projects:
# *name* is definitely handled by the local system package manager
# whenever there is no associated project.
if name in handler.projects:
package = handler.as_project(name).packages[CONTEXT.host()]
if package:
package_files.insert(create_package_file(name,
package.fetches()))
else:
managed += [ name ]
else:
managed += [ name ]
if len(managed) > 0:
step = create_managed(managed[0], versions=None, target=None)
for package in managed[1:]:
step.insert(create_managed(package, versions=None, target=None))
step.run(CONTEXT)
if package_files:
package_files.run(CONTEXT)
def help_book(help_string):
'''Print a text string help message as formatted docbook.'''
first_term = True
first_section = True
lines = help_string.getvalue().split('\n')
while len(lines) > 0:
line = lines.pop(0)
if line.strip().startswith('Usage'):
look = re.match(r'Usage: (\S+)', line.strip())
cmdname = look.group(1)
# /usr/share/xml/docbook/schema/dtd/4.5/docbookx.dtd
# dtd/docbook-xml/docbookx.dtd
sys.stdout.write("""<?xml version="1.0"?>
<refentry xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id=\"""" + cmdname + """">
<info>
<author>
<personname>Sebastien Mirolo <smirolo@fortylines.com></personname>
</author>
</info>
<refmeta>
<refentrytitle>""" + cmdname + """</refentrytitle>
<manvolnum>1</manvolnum>
<refmiscinfo class="manual">User Commands</refmiscinfo>
<refmiscinfo class="source">drop</refmiscinfo>
<refmiscinfo class="version">""" + str(__version__) + """</refmiscinfo>
</refmeta>
<refnamediv>
<refname>""" + cmdname + """</refname>
<refpurpose>inter-project dependencies tool</refpurpose>
</refnamediv>
<refsynopsisdiv>
<cmdsynopsis>
<command>""" + cmdname + """</command>
<arg choice="opt">
<option>options</option>
</arg>
<arg>command</arg>
</cmdsynopsis>
</refsynopsisdiv>
""")
elif (line.strip().startswith('Version')
or re.match(r'\S+ version', line.strip())):
pass
elif line.strip().endswith(':'):
if not first_term:
sys.stdout.write("</para>\n")
sys.stdout.write("</listitem>\n")
sys.stdout.write("</varlistentry>\n")
if not first_section:
sys.stdout.write("</variablelist>\n")
sys.stdout.write("</refsection>\n")
first_section = False
sys.stdout.write("<refsection>\n")
sys.stdout.write('<title>' + line.strip() + '</title>\n')
sys.stdout.write("<variablelist>")
first_term = True
elif len(line) > 0 and (re.search("[a-z]", line[0])
or line.startswith(" -")):
stmt = line.strip().split(' ')
if not first_term:
sys.stdout.write("</para>\n")
sys.stdout.write("</listitem>\n")
sys.stdout.write("</varlistentry>\n")
first_term = False
for word in stmt[1:]:
if len(word) > 0:
break
if line.startswith(" -h,"):
# Hack because "show" does not start
# with uppercase.
sys.stdout.write("<varlistentry>\n<term>" + ' '.join(stmt[0:2])
+ "</term>\n")
word = 'S'
stmt = stmt[1:]
elif not re.search("[A-Z]", word[0]):
sys.stdout.write("<varlistentry>\n<term>" + line + "</term>\n")
else:
if not stmt[0].startswith('-'):
sys.stdout.write("<varlistentry xml:id=\"dws." \
+ stmt[0] + "\">\n")
else:
sys.stdout.write("<varlistentry>\n")
sys.stdout.write("<term>" + stmt[0] + "</term>\n")
sys.stdout.write("<listitem>\n")
sys.stdout.write("<para>\n")
if re.search("[A-Z]", word[0]):
sys.stdout.write(' '.join(stmt[1:]) + '\n')
else:
sys.stdout.write(line + '\n')
if not first_term:
sys.stdout.write("</para>\n")
sys.stdout.write("</listitem>\n")
sys.stdout.write("</varlistentry>\n")
if not first_section:
sys.stdout.write("</variablelist>\n")
sys.stdout.write("</refsection>\n")
sys.stdout.write("</refentry>\n")
def lib_prefix():
'''Returns the prefix for library names.'''
lib_prefixes = {
'Cygwin': ''
}
if CONTEXT.host() in lib_prefixes:
return lib_prefixes[CONTEXT.host()]
return 'lib'
def lib_static_suffix():
'''Returns the suffix for static library names.'''
lib_static_suffixes = {
}
if CONTEXT.host() in lib_static_suffixes:
return lib_static_suffixes[CONTEXT.host()]
return '.a'
def lib_dyn_suffix():
'''Returns the suffix for dynamic library names.'''
lib_dyn_suffixes = {
'Cygwin': '.dll',
'Darwin': '.dylib'
}
if CONTEXT.host() in lib_dyn_suffixes:
return lib_dyn_suffixes[CONTEXT.host()]
return '.so'
def link_prerequisites(files, versions=None, target=None):
'''All projects which are dependencies but are not part of *srcTop*
are not under development in the current workspace. Links to
the required executables, headers, libraries, etc. will be added to
the install directories such that projects in *srcTop* can build.
*excludes* is a list of versions to exclude.'''
# First, we will check if find_prerequisites needs to be rerun.
# It is the case if the link in [bin|include|lib|...]Dir does
# not exist and the pathname for it in build_deps is not
# an absolute path.
complete = True
for dirname in INSTALL_DIRS:
if dirname in files:
for name_pat, absolute_path in files[dirname]:
complete &= link_pat_path(name_pat, absolute_path,
dirname, target)
if not complete:
files, complete = find_prerequisites(files, versions, target)
if complete:
for dirname in INSTALL_DIRS:
if dirname in files:
for name_pat, absolute_path in files[dirname]:
complete &= link_pat_path(
name_pat, absolute_path, dirname, target)
return files, complete
def link_context(path, link_name):
'''link a *path* into the workspace.'''
if not path:
log_error('There is no target for link ' + link_name + '\n')
return
if os.path.realpath(path) == os.path.realpath(link_name):
return
if not os.path.exists(os.path.dirname(link_name)):
os.makedirs(os.path.dirname(link_name))
# In the following two 'if' statements, we are very careful
# to only remove/update symlinks and leave other files
# present in [bin|lib|...]Dir 'as is'.
if os.path.islink(link_name):
os.remove(link_name)
if not os.path.exists(link_name) and os.path.exists(path):
os.symlink(path, link_name)
def link_build_name(name_pat, subdir, target=None):
# We normalize the library link name such as to make use of the default
# definitions of .LIBPATTERNS and search paths in make. It also avoids
# having to prefix and suffix library names in Makefile with complex
# variable substitution logic.
suffix = ''
regex = name_pat_regex(name_pat)
if regex.groups == 0:
name = name_pat.replace('\\', '')
parts = name.split(os.sep)
if len(parts) > 0:
name = parts[len(parts) - 1]
else:
name = re.search(r'\((.+)\)', name_pat).group(1)
if '|' in name:
name = name.split('|')[0]
# XXX +1 ')', +2 '/'
suffix = name_pat[re.search(r'\((.+)\)', name_pat).end(1) + 2:]
subpath = subdir
if target:
subpath = os.path.join(target, subdir)
link_build = os.path.join(CONTEXT.value('buildTop'), subpath, name)
return link_build, suffix
def link_pat_path(name_pat, absolute_path, subdir, target=None):
'''Create a link in the build directory.'''
link_path = absolute_path
ext = ''
if absolute_path:
_, ext = os.path.splitext(absolute_path)
subpath = subdir
if target:
subpath = os.path.join(target, subdir)
if name_pat.endswith('.a') or name_pat.endswith('.so'):
name_pat, _ = os.path.splitext(name_pat)
if ext == lib_static_suffix():
name = 'lib' + name_pat + '.a'
link_name = os.path.join(CONTEXT.value('buildTop'), subpath, name)
elif ext == lib_dyn_suffix():
name = 'lib' + name_pat + '.so'
link_name = os.path.join(CONTEXT.value('buildTop'), subpath, name)
else:
# \todo if the dynamic lib suffix ends with .so.X we will end-up here.
# This is wrong since at that time we won't create a lib*name*.so link.
link_name, suffix = link_build_name(name_pat, subdir, target)
if absolute_path and len(suffix) > 0 and absolute_path.endswith(suffix):
# Interestingly absolute_path[:-0] returns an empty string.
link_path = absolute_path[:-len(suffix)]
# create links
complete = True
if link_path:
if not os.path.isfile(link_name):
link_context(link_path, link_name)
else:
if not os.path.isfile(link_name):
complete = False
return complete
def localize_context(context, name, target):
'''Create the environment in *buildTop* necessary to make a project
from source.'''
if target:
local_context = Context()
local_context.environ['buildTop'] \
= os.path.join(context.value('buildTop'), target)
local_context.config_filename \
= os.path.join(local_context.value('buildTop'), context.config_name)
if os.path.exists(local_context.config_filename):
local_context.locate(local_context.config_filename)
else:
local_context.environ['srcTop'] = context.value('srcTop')
local_context.environ['siteTop'] = context.value('siteTop')
local_context.environ['installTop'].default \
= os.path.join(context.value('installTop'), target)
local_context.save()
else:
local_context = context
obj_dir = context.obj_dir(name)
if obj_dir != os.getcwd():
if not os.path.exists(obj_dir):
os.makedirs(obj_dir)
os.chdir(obj_dir)
# prefix.mk and suffix.mk expects these variables to be defined
# in the workspace make fragment. If they are not you might get
# some strange errors where a g++ command-line appears with
# -I <nothing> or -L <nothing> for example.
# This code was moved to be executed right before the issue
# of a "make" subprocess in order to let the project index file
# a change to override defaults for installTop, etc.
for dir_name in [ 'include', 'lib', 'bin', 'etc', 'share' ]:
name = local_context.value(dir_name + 'Dir')
# \todo save local context only when necessary
local_context.save()
return local_context
def merge_unique(left, right):
'''Merge a list of additions into a previously existing list.
Or: adds elements in *right* to the end of *left* if they were not
already present in *left*.'''
for item in right:
if not item in left:
left += [ item ]
return left
def merge_build_conf(db_prev, db_upd, parser):
'''Merge an updated project dependency database into an existing
project dependency database. The existing database has been
augmented by user-supplied information such as "use source
controlled repository", "skip version X dependency", etc. Hence
we do a merge instead of a complete replace.'''
if db_prev == None:
return db_upd
elif db_upd == None:
return db_prev
else:
# We try to keep user-supplied information in the prev
# database whenever possible.
# Both databases supply packages in alphabetical order,
# so the merge can be done in a single pass.
db_next = tempfile.TemporaryFile()
proj_prev = parser.copy(db_next, db_prev)
proj_upd = parser.next(db_upd)
while proj_prev != None and proj_upd != None:
if proj_prev < proj_upd:
parser.start_project(db_next, proj_prev)
proj_prev = parser.copy(db_next, db_prev)
elif proj_prev > proj_upd:
parser.start_project(db_next, proj_upd)
proj_upd = parser.copy(db_next, db_upd)
elif proj_prev == proj_upd:
# when names are equals, we need to import user-supplied
# information as appropriate. For now, there are only one
# user supplied-information, the install mode for the package.
# Package name is a unique key so we can increment
# both iterators.
parser.start_project(db_next, proj_upd)
#installMode, version = parser.installMode(proj_prev)
#parser.setInstallMode(db_next,installMode,version)
# It is critical this line appears after we set the installMode
# because it guarentees that the install mode will always be
# the first line after the package tag.
proj_upd = parser.copy(db_next, db_upd, True)
proj_prev = parser.copy(db_next, db_prev)
while proj_prev != None:
parser.start_project(db_next, proj_prev)
proj_prev = parser.copy(db_next, db_prev)
while proj_upd != None:
parser.start_project(db_next, proj_upd)
proj_upd = parser.copy(db_next, db_upd)
parser.trailer(db_next)
return db_next
def upload(filenames, cache_dir=None):
'''upload *filenames*, typically a list of result logs,
to the remote server. See the fetch function for downloading
files from the remote server.
'''
remote_cache_path = CONTEXT.remote_dir(CONTEXT.log_path(''))
cmdline, _ = find_rsync(CONTEXT, CONTEXT.remote_host(), not cache_dir)
up_cmdline = cmdline + [ ' '.join(filenames), remote_cache_path ]
shell_command(up_cmdline)
def createmail(subject, filenames=None):
'''Returns an e-mail with *filenames* as attachments.
'''
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = CONTEXT.value('dwsEmail')
msg.preamble = 'The contents of %s' % ', '.join(filenames)
for filename in list(filenames):
with open(filename, 'rb') as filep:
content = MIMEText(filep.read())
content.add_header('Content-Disposition', 'attachment',
filename=os.path.basename(filename))
msg.attach(content)
return msg.as_string()
def sendmail(msgtext, dests):
'''Send a formatted email *msgtext* through the default smtp server.'''
if len(dests) > 0:
if CONTEXT.value('smtpHost') == 'localhost':
try:
session = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
session.connect(
CONTEXT.value('smtpHost'), CONTEXT.value('smtpPort'))
session.shutdown(2)
except socket.error:
# Can't connect to that port on local host, we will thus assume
# we are accessing the smtp server through a ssh tunnel.
ssh_tunnels(CONTEXT.tunnel_point,
[ CONTEXT.value('smtpPort')[:-1] ])
import smtplib
# Send the message via our own SMTP server, but don't include the
# envelope header.
session = smtplib.SMTP(
CONTEXT.value('smtpHost'), CONTEXT.value('smtpPort'))
session.set_debuglevel(1)
session.ehlo()
session.starttls()
session.ehlo()
session.login(
CONTEXT.value('dwsSmtpLogin'), CONTEXT.value('dwsSmtpPasswd'))
session.sendmail(CONTEXT.value('dwsEmail'), dests,
'To:' + ', '.join(dests) + '\r\n' + msgtext)
session.close()
def search_back_to_root(filename, root=os.sep):
'''Search recursively from the current directory to the *root*
of the directory hierarchy for a specified *filename*.
This function returns the relative path from *filename* to pwd
and the absolute path to *filename* if found.'''
cur_dir = os.getcwd()
dirname = '.'
while (not os.path.samefile(cur_dir, root)
and not os.path.isfile(os.path.join(cur_dir, filename))):
if dirname == '.':
dirname = os.path.basename(cur_dir)
else:
dirname = os.path.join(os.path.basename(cur_dir), dirname)
cur_dir = os.path.dirname(cur_dir)
if not os.path.isfile(os.path.join(cur_dir, filename)):
raise IOError(1, "cannot find file", filename)
return dirname, os.path.join(cur_dir, filename)
def shell_command(execute, admin=False, search_path=None, pat=None):
'''Execute a shell command and throws an exception when the command fails.
sudo is used when *admin* is True.
the text output is filtered and returned when pat exists.
'''
filtered_output = []
if admin:
if False:
# \todo cannot do this simple check because of a shell variable
# setup before call to apt-get.
if not execute.startswith('/'):
raise Error("admin command without a fully quaified path: " \
+ execute)
# ex: su username -c 'sudo port install icu'
cmdline = [ '/usr/bin/sudo' ]
if USE_DEFAULT_ANSWER:
# Error out if sudo prompts for a password because this should
# never happen in non-interactive mode.
if ASK_PASS:
# XXX Workaround while sudo is broken
# http://groups.google.com/group/comp.lang.python/\
# browse_thread/thread/4c2bb14c12d31c29
cmdline = [ 'SUDO_ASKPASS="' + ASK_PASS + '"' ] \
+ cmdline + [ '-A' ]
else:
cmdline += [ '-n' ]
cmdline += execute
else:
cmdline = execute
log_info(' '.join(cmdline))
if not DO_NOT_EXECUTE:
env = os.environ.copy()
if search_path:
env['PATH'] = ':'.join(search_path)
cmd = subprocess.Popen(' '.join(cmdline),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True)
line = cmd.stdout.readline()
while line != '':
if pat and re.match(pat, line):
filtered_output += [ line ]
log_info(line[:-1])
line = cmd.stdout.readline()
cmd.wait()
if cmd.returncode != 0:
raise Error("unable to complete: " + ' '.join(cmdline) \
+ '\n' + '\n'.join(filtered_output),
cmd.returncode)
return filtered_output
def sort_build_conf_list(db_pathnames, parser):
'''Sort/Merge projects defined in a list of files, *db_pathnames*.
*parser* is the parser used to read the projects files in.'''
db_prev = None
db_upd = None
if len(db_pathnames) == 0:
return None
elif len(db_pathnames) == 1:
db_prev = open(db_pathnames[0])
return db_prev
elif len(db_pathnames) == 2:
db_prev = open(db_pathnames[0])
db_upd = open(db_pathnames[1])
else:
db_prev = sort_build_conf_list(
db_pathnames[:len(db_pathnames) / 2], parser)
db_upd = sort_build_conf_list(
db_pathnames[len(db_pathnames) / 2:], parser)
db_next = merge_build_conf(db_prev, db_upd, parser)
db_next.seek(0)
db_prev.close()
db_upd.close()
return db_next
def ssh_tunnels(hostname, ports):
'''Create ssh tunnels from localhost to a remote host when they don't
already exist.'''
if len(ports) > 0:
cmdline = ['ps', 'xwww']
connections = []
for line in subprocess.check_output(' '.join(cmdline), shell=True,
stderr=subprocess.STDOUT).splitlines():
look = re.match('ssh', line)
if look:
connections += [ line ]
tunnels = []
for port in ports:
found = False
tunnel = port + '0:localhost:' + port
for connection in connections:
look = re.match(tunnel, connection)
if look:
found = True
break
if not found:
tunnels += [ '-L', tunnel ]
if len(tunnels) > 0:
err = os.system(' '.join(['ssh', '-fN' ] + tunnels + [hostname]))
if err:
raise Error("attempt to create ssh tunnels to " \
+ hostname + " failed.")
def validate_controls(dgen, dbindex,
graph=False, priorities = [ 1, 2, 3, 4, 5, 6, 7 ]):
'''Checkout source code files, install packages such that
the projects specified in *repositories* can be built.
*dbindex* is the project index that contains the dependency
information to use. If None, the global index fetched from
the remote machine will be used.
This function returns a topologicaly sorted list of projects
in *srcTop* and an associated dictionary of Project instances.
By iterating through the list, it is possible to 'make'
each prerequisite project in order.'''
dbindex.validate()
global ERRORS
# Add deep dependencies
vertices = dbindex.closure(dgen)
if graph:
gph_filename = os.path.splitext(CONTEXT.logname())[0] + '.dot'
gph_file = open(gph_filename,'w')
gph_file.write("digraph structural {\n")
for vertex in vertices:
for project in vertex.prerequisites:
gph_file.write(
"\t%s -> %s;\n" % (vertex.name, project.name))
gph_file.write("}\n")
gph_file.close()
while len(vertices) > 0:
first = vertices.pop(0)
glob = [ first ]
while len(vertices) > 0:
vertex = vertices.pop(0)
if vertex.__class__ != first.__class__:
vertices.insert(0, vertex)
break
if 'insert' in dir(first):
first.insert(vertex)
else:
glob += [ vertex ]
# \todo "make recurse" should update only projects which are missing
# from *srcTop* and leave other projects in whatever state they are in.
# This is different from "build" which should update all projects.
if first.priority in priorities:
for vertex in glob:
errcode = 0
elapsed = 0
log_header(vertex.name)
start = datetime.datetime.now()
try:
vertex.run(CONTEXT)
finish = datetime.datetime.now()
elapsed = elapsed_duration(start, finish)
except Error, err:
if True:
import traceback
traceback.print_exc()
errcode = err.code
ERRORS += [ str(vertex) ]
if dgen.stop_make_after_error:
finish = datetime.datetime.now()
elapsed = elapsed_duration(start, finish)
log_footer(vertex.name, elapsed, errcode)
raise err
else:
log_error(str(err))
log_footer(vertex.name, elapsed, errcode)
nb_updated_projects = len(UpdateStep.updated_sources)
if nb_updated_projects > 0:
log_info('%d updated project(s).' % nb_updated_projects)
else:
log_info('all project(s) are up-to-date.')
return nb_updated_projects
def version_candidates(line):
'''Extract patterns from *line* that could be interpreted as a
version numbers. That is every pattern that is a set of digits
separated by dots and/or underscores.'''
part = line
candidates = []
while part != '':
# numbers should be full, i.e. including '.'
look = re.match(r'[^0-9]*([0-9].*)', part)
if look:
part = look.group(1)
look = re.match(r'[^0-9]*([0-9]+([_\.][0-9]+)+)+(.*)', part)
if look:
candidates += [ look.group(1) ]
part = look.group(2)
else:
while (len(part) > 0
and part[0] in ['0', '1', '2', '3', '4', '5',
'6', '7', '8', '9' ]):
part = part[1:]
else:
part = ''
return candidates
def version_compare(left, right):
'''Compare version numbers
This function returns -1 if a *left* is less than *right*, 0 if *left
is equal to *right* and 1 if *left* is greater than *right*.
It is suitable as a custom comparaison function for sorted().'''
left_remain = left.replace('_', '.').split('.')
right_remain = right.replace('_', '.').split('.')
while len(left_remain) > 0 and len(right_remain) > 0:
left_num = left_remain.pop(0)
right_num = right_remain.pop(0)
if left_num < right_num:
return -1
elif left_num > right_num:
return 1
if len(left_remain) < len(right_remain):
return -1
elif len(left_remain) > len(right_remain):
return 1
return 0
def version_incr(ver_num):
'''returns the version number with the smallest increment
that is greater than *v*.'''
return ver_num + '.1'
def build_subcommands_parser(parser, module):
'''Returns a parser for the subcommands defined in the *module*
(i.e. commands starting with a 'pub_' prefix).'''
mdefs = module.__dict__
keys = mdefs.keys()
keys.sort()
subparsers = parser.add_subparsers(help='sub-command help')
for command in keys:
if command.startswith('pub_'):
func = module.__dict__[command]
parser = subparsers.add_parser(command[4:], help=func.__doc__)
parser.set_defaults(func=func)
argspec = inspect.getargspec(func)
flags = len(argspec.args)
if argspec.defaults:
flags = len(argspec.args) - len(argspec.defaults)
if flags >= 1:
for arg in argspec.args[:flags - 1]:
parser.add_argument(arg)
parser.add_argument(argspec.args[flags - 1], nargs='*')
for idx, arg in enumerate(argspec.args[flags:]):
if isinstance(argspec.defaults[idx], list):
parser.add_argument('-%s' % arg[0], '--%s' % arg,
action='append')
elif argspec.defaults[idx] is False:
parser.add_argument('-%s' % arg[0], '--%s' % arg,
action='store_true')
else:
parser.add_argument('-%s' % arg[0], '--%s' % arg)
def filter_subcommand_args(func, options):
'''Filter out all options which are not part of the function *func*
prototype and returns a set that can be used as kwargs for calling func.'''
kwargs = {}
argspec = inspect.getargspec(func)
for arg in argspec.args:
if arg in options:
kwargs.update({ arg: getattr(options, arg)})
return kwargs
def integrate(srcdir, pchdir, verbose=True):
'''Replaces files in srcdir with links to files in pchdir
for all files that match in the directory hierarchy.'''
for name in os.listdir(pchdir):
srcname = os.path.join(srcdir, name)
pchname = os.path.join(pchdir, name)
if (os.path.isdir(pchname)
and not re.match(Repository.dirPats, os.path.basename(name))):
integrate(srcname, pchname, verbose)
else:
if not name.endswith('~'):
if not os.path.islink(srcname):
if verbose:
# Use sys.stdout and not log as the integrate command
# will mostly be emitted from a Makefile and thus
# trigger a "recursive" call to dws. We thus do not
# want nor need to open a new log file.
sys.stdout.write(srcname + '... patched\n')
# Change directory such that relative paths are computed
# correctly.
prev = os.getcwd()
dirname = os.path.dirname(srcname)
basename = os.path.basename(srcname)
if not os.path.isdir(dirname):
os.makedirs(dirname)
os.chdir(dirname)
if os.path.exists(basename):
shutil.move(basename, basename + '~')
os.symlink(os.path.relpath(pchname), basename)
os.chdir(prev)
def wait_until_ssh_up(hostname,
login=None, keyfile=None, port=None, timeout=120):
'''wait until an ssh connection can be established to *hostname*
or the attempt timed out after *timeout* seconds.'''
is_up = False
waited = 0
cmdline = ['ssh',
'-v',
'-o', 'ConnectTimeout 30',
'-o', 'BatchMode yes',
'-o', 'StrictHostKeyChecking no' ]
if port:
cmdline += [ '-p', str(port) ]
if keyfile:
cmdline += [ '-i', keyfile ]
ssh_connect = hostname
if login:
ssh_connect = login + '@' + hostname
cmdline += [ ssh_connect, 'echo' ]
while (not is_up) and (waited <= timeout):
try:
subprocess.check_call(cmdline)
is_up = True
except subprocess.CalledProcessError:
waited = waited + 30
sys.stdout.write("waiting 30 more seconds (" \
+ str(waited) + " so far)...\n")
if waited > timeout:
raise Error("ssh connection attempt to " + hostname + " timed out.")
def prompt(message):
'''If the script is run through a ssh command, the message would not
appear if passed directly in raw_input.'''
log_interactive(message)
return raw_input("")
def log_init():
global LOGGER
if not LOGGER:
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '[%(asctime)s] [%(levelname)s] %(message)s',
'datefmt': '%d/%b/%Y:%H:%M:%S %z'
},
},
'handlers': {
'logfile':{
'level': 'INFO',
'class':'logging.handlers.WatchedFileHandler',
'filename': CONTEXT.logname(),
'formatter': 'simple'
},
'logbuild':{
'level': 'INFO',
'class':'logging.handlers.WatchedFileHandler',
'filename': CONTEXT.logbuildname(),
'formatter': 'simple'
},
},
'loggers': {
__name__: {
'handlers': [ 'logfile' ],
'level': 'INFO',
'propagate': True,
},
'build': {
'handlers': [ 'logbuild' ],
'level': 'INFO',
'propagate': True,
}
},
})
LOGGER = logging.getLogger(__name__)
def log_footer(prefix, elapsed=datetime.timedelta(), errcode=0):
'''Write a footer into the log file.'''
if not NO_LOG:
if not LOGGER:
log_init()
if errcode > 0:
LOGGER.info('%s: error (%d) after %s'
% (prefix, errcode, elapsed))
else:
LOGGER.info('%s: completed in %s' % (prefix, elapsed))
def log_header(message, *args, **kwargs):
'''Write a header into the log file'''
sys.stdout.write('######## ' + message + '...\n')
if not NO_LOG:
if not LOGGER:
log_init()
LOGGER.info('######## ' + message + '...')
def log_error(message, *args, **kwargs):
'''Write an error message onto stdout and into the log file'''
sys.stderr.write('error: ' + message)
if not NO_LOG:
if not LOGGER:
log_init()
LOGGER.error(message, *args, **kwargs)
def log_interactive(message):
'''Write a message that should absolutely end up on the screen
even when no newline is present at the end of the message.'''
sys.stdout.write(message)
sys.stdout.flush()
if not NO_LOG:
global LOGGER_BUFFER
if not LOGGER_BUFFER:
LOGGER_BUFFER = cStringIO.StringIO()
LOGGER_BUFFER.write(message)
def log_info(message, *args, **kwargs):
'''Write a info message onto stdout and into the log file'''
sys.stdout.write(message + '\n')
if not NO_LOG:
global LOGGER_BUFFER
if LOGGER_BUFFERING_COUNT > 0:
if not LOGGER_BUFFER:
LOGGER_BUFFER = cStringIO.StringIO()
LOGGER_BUFFER.write((message + '\n' % args) % kwargs)
else:
if not LOGGER:
log_init()
if LOGGER_BUFFER:
LOGGER_BUFFER.write((message + '\n' % args) % kwargs)
for line in LOGGER_BUFFER.getvalue().splitlines():
LOGGER.info(line)
LOGGER_BUFFER = None
else:
LOGGER.info(message, *args, **kwargs)
def pub_build(args, graph=False, noclean=False):
'''remoteIndex [ siteTop [ buildTop ] ]
This command executes a complete build cycle:
- (optional) delete all files in *siteTop*,
*buildTop* and *installTop*.
- fetch the build dependency file *remoteIndex*
- setup third-party prerequisites through
the local package manager.
- update a local source tree from remote
repositories
- (optional) apply local patches
- configure required environment variables
- make libraries, executables and tests.
- (optional) send a report email.
As such, this command is most useful as part
of a cron job on build servers. Thus it is designed
to run to completion with no human interaction.
To be really useful in an automatic build system,
authentication to the remote server (if required)
should also be setup to run with no human
interaction.
ex: dws build http://hostname/everything.git
--graph Generate a .dot graph of
the dependencies
--noclean Do not remove any directory before
executing a build command.
'''
global USE_DEFAULT_ANSWER
USE_DEFAULT_ANSWER = True
CONTEXT.from_remote_index(args[0])
if len(args) > 1:
site_top = os.path.abspath(args[1])
else:
site_top = os.path.join(os.getcwd(), CONTEXT.base('remoteIndex'))
CONTEXT.environ['siteTop'].value = site_top
if not noclean:
# We don't want to remove the log we just created
# so we buffer until it is safe to flush.
global LOGGER_BUFFERING_COUNT
LOGGER_BUFFERING_COUNT = LOGGER_BUFFERING_COUNT + 1
if len(args) > 2:
CONTEXT.environ['buildTop'].value = args[2]
else:
# Can't call *configure* before *locate*, otherwise config_filename
# is set to be inside the buildTop on the first save.
CONTEXT.environ['buildTop'].value = os.path.join(site_top, 'build')
build_top = str(CONTEXT.environ['buildTop'])
prevcwd = os.getcwd()
if not os.path.exists(build_top):
os.makedirs(build_top)
os.chdir(build_top)
CONTEXT.locate()
if not str(CONTEXT.environ['installTop']):
CONTEXT.environ['installTop'].configure(CONTEXT)
install_top = str(CONTEXT.environ['installTop'])
if not noclean:
# First we backup everything in siteTop, buildTop and installTop
# as we are about to remove those directories - just in case.
tardirs = []
for path in [site_top, build_top, install_top]:
if os.path.isdir(path):
tardirs += [ path ]
if len(tardirs) > 0:
prefix = os.path.commonprefix(tardirs)
tarname = os.path.basename(site_top) + '-' + stamp() + '.tar.bz2'
if os.path.samefile(prefix, site_top):
# optimize common case: *buildTop* and *installTop* are within
# *siteTop*. We cd into the parent directory to create the tar
# in order to avoid 'Removing leading /' messages. Those do
# not display the same on Darwin and Ubuntu, creating false
# positive regressions between both systems.
shell_command(['cd', os.path.dirname(site_top),
'&&', 'tar', 'jcf', tarname,
os.path.basename(site_top) ])
else:
shell_command(['cd', os.path.dirname(site_top),
'&&', 'tar', 'jcf', tarname ] + tardirs)
os.chdir(prevcwd)
for dirpath in [ build_top, install_top]:
# we only remove build_top and installTop. Can neither be too
# prudent.
if os.path.isdir(dirpath):
# Test directory exists, in case it is a subdirectory
# of another one we already removed.
sys.stdout.write('removing ' + dirpath + '...\n')
shutil.rmtree(dirpath)
if not os.path.exists(build_top):
os.makedirs(build_top)
os.chdir(build_top)
LOGGER_BUFFERING_COUNT = LOGGER_BUFFERING_COUNT - 1
rgen = DerivedSetsGenerator()
# If we do not force the update of the index file, the dependency
# graph might not reflect the latest changes in the repository server.
INDEX.validate(True)
INDEX.parse(rgen)
# note that *EXCLUDE_PATS* is global.
dgen = BuildGenerator(rgen.roots, [], EXCLUDE_PATS)
CONTEXT.targets = [ 'install' ]
# Set the buildstamp that will be use by all "install" commands.
if not 'buildstamp' in CONTEXT.environ:
CONTEXT.environ['buildstamp'] = '-'.join([socket.gethostname(),
stamp(datetime.datetime.now())])
CONTEXT.save()
if validate_controls(dgen, INDEX, graph=graph):
# Once we have built the repository, let's report the results
# back to the remote server. We stamp the logfile such that
# it gets a unique name before uploading it.
logstamp = stampfile(CONTEXT.logname())
if not os.path.exists(os.path.dirname(CONTEXT.log_path(logstamp))):
os.makedirs(os.path.dirname(CONTEXT.log_path(logstamp)))
if LOGGER:
for handler in LOGGER.handlers:
handler.flush()
shell_command(['install', '-m', '644', CONTEXT.logname(),
CONTEXT.log_path(logstamp)])
logging.getLogger('build').info(
'build %s'% str(UpdateStep.updated_sources))
look = re.match(r'.*(-.+-\d\d\d\d_\d\d_\d\d-\d\d\.log)', logstamp)
global LOG_PAT
LOG_PAT = look.group(1)
if len(ERRORS) > 0:
raise Error("Found errors while making " + ' '.join(ERRORS))
def pub_collect(args, output=None):
'''[ project ... ]
Consolidate local dependencies information
into a global dependency database. Copy all
distribution packages built into a platform
distribution directory.
(example: dws --exclude test collect)
'''
# Collect cannot log or it will prompt for index file.
roots = []
if len(args) > 0:
for dir_name in args:
roots += [ os.path.join(CONTEXT.value('srcTop'), dir_name) ]
else:
roots = [ CONTEXT.value('srcTop') ]
# Name of the output index file generated by collect commands.
collected_index = output
if not collected_index:
collected_index = CONTEXT.db_pathname()
else:
collected_index = os.path.abspath(collected_index)
# Create the distribution directory, i.e. where packages are stored.
package_dir = CONTEXT.local_dir('./resources/' + CONTEXT.host())
if not os.path.exists(package_dir):
os.makedirs(package_dir)
src_package_dir = CONTEXT.local_dir('./resources/srcs')
if not os.path.exists(src_package_dir):
os.makedirs(src_package_dir)
# Create the project index file
# and copy the packages in the distribution directory.
extensions = { 'Darwin': (r'\.dsx', r'\.dmg'),
'Fedora': (r'\.spec', r'\.rpm'),
'Debian': (r'\.dsc', r'\.deb'),
'Ubuntu': (r'\.dsc', r'\.deb')
}
# collect index files and packages
indices = []
for root in roots:
pre_exclude_indices = find_files(root, CONTEXT.indexName)
for index in pre_exclude_indices:
# We exclude any project index files that has been determined
# to be irrelevent to the collection being built.
found = False
if index == collected_index:
found = True
else:
for exclude_pat in EXCLUDE_PATS:
if re.match('.*' + exclude_pat + '.*', index):
found = True
break
if not found:
indices += [ index ]
pkg_indices = []
cpy_src_packages = None
copy_bin_packages = None
if str(CONTEXT.environ['buildTop']):
# If there are no build directory, then don't bother to look
# for built packages and avoid prompty for an unncessary value
# for buildTop.
for index in indices:
buildr = os.path.dirname(index.replace(CONTEXT.value('buildTop'),
CONTEXT.value('srcTop')))
src_packages = find_files(buildr, '.tar.bz2')
if len(src_packages) > 0:
cmdline, prefix = find_rsync(CONTEXT, CONTEXT.remote_host())
cpy_src_packages = cmdline + [ ' '.join(src_packages),
src_package_dir]
if CONTEXT.host() in extensions:
ext = extensions[CONTEXT.host()]
pkg_indices += find_files(buildr, ext[0])
bin_packages = find_files(buildr, ext[1])
if len(bin_packages) > 0:
cmdline, prefix = find_rsync(CONTEXT, CONTEXT.remote_host())
copy_bin_packages = cmdline + [ ' '.join(bin_packages),
package_dir ]
# Create the index and checks it is valid according to the schema.
create_index_pathname(collected_index, indices + pkg_indices)
shell_command(['xmllint', '--noout', '--schema ',
CONTEXT.derived_helper('index.xsd'), collected_index])
# We should only copy the index file after we created it.
if copy_bin_packages:
shell_command(copy_bin_packages)
if cpy_src_packages:
shell_command(cpy_src_packages)
def pub_configure(args):
'''Locate direct dependencies of a project on
the local machine and create the appropriate
symbolic links such that the project can be made
later on.
'''
CONTEXT.environ['indexFile'].value = CONTEXT.src_dir(
os.path.join(CONTEXT.cwd_project(), CONTEXT.indexName))
project_name = CONTEXT.cwd_project()
dgen = MakeGenerator([ project_name ], [])
dbindex = IndexProjects(CONTEXT, CONTEXT.value('indexFile'))
dbindex.parse(dgen)
prerequisites = set([])
for vertex in dgen.vertices:
if vertex.endswith('Setup'):
setup = dgen.vertices[vertex]
if not setup.run(CONTEXT):
prerequisites |= set([ str(setup.project) ])
elif vertex.startswith('update_'):
update = dgen.vertices[vertex]
if len(update.fetches) > 0:
for miss in update.fetches:
prerequisites |= set([ miss ])
if len(prerequisites) > 0:
raise MissingError(project_name, prerequisites)
def pub_context(args):
'''[ file ]
Prints the absolute pathname to a *file*.
If the file cannot be found from the current
directory up to the workspace root, i.e where
the .mk fragment is located (usually *buildTop*,
it assumes the file is in *shareDir* alongside
other make helpers.
'''
pathname = CONTEXT.config_filename
if len(args) >= 1:
try:
_, pathname = search_back_to_root(args[0],
os.path.dirname(CONTEXT.config_filename))
except IOError:
pathname = CONTEXT.derived_helper(args[0])
sys.stdout.write(pathname)
def pub_deps(args):
''' Prints the dependency graph for a project.
'''
top = os.path.realpath(os.getcwd())
if ((str(CONTEXT.environ['buildTop'])
and top.startswith(os.path.realpath(CONTEXT.value('buildTop')))
and top != os.path.realpath(CONTEXT.value('buildTop')))
or (str(CONTEXT.environ['srcTop'])
and top.startswith(os.path.realpath(CONTEXT.value('srcTop')))
and top != os.path.realpath(CONTEXT.value('srcTop')))):
roots = [ CONTEXT.cwd_project() ]
else:
# make from the top directory makes every project in the index file.
rgen = DerivedSetsGenerator()
INDEX.parse(rgen)
roots = rgen.roots
sys.stdout.write(' '.join(ordered_prerequisites(roots, INDEX)) + '\n')
def pub_export(args):
'''rootpath
Exports the project index file in a format
compatible with Jenkins. [experimental]
'''
rootpath = args[0]
top = os.path.realpath(os.getcwd())
if (top == os.path.realpath(CONTEXT.value('buildTop'))
or top == os.path.realpath(CONTEXT.value('srcTop'))):
rgen = DerivedSetsGenerator()
INDEX.parse(rgen)
roots = rgen.roots
else:
roots = [ CONTEXT.cwd_project() ]
handler = Unserializer(roots)
if os.path.isfile(CONTEXT.db_pathname()):
INDEX.parse(handler)
for name in roots:
jobdir = os.path.join(rootpath, name)
if not os.path.exists(jobdir):
os.makedirs(os.path.join(jobdir, 'builds'))
os.makedirs(os.path.join(jobdir, 'workspace'))
with open(os.path.join(jobdir, 'nextBuildNumber'), 'w') as \
next_build_number:
next_build_number.write('0\n')
project = handler.projects[name]
rep = project.repository.update.rep
config = open(os.path.join(jobdir, 'config.xml'), 'w')
config.write('''<?xml version='1.0' encoding='UTF-8'?>
<project>
<actions/>
<description>''' + project.descr + '''</description>
<keepDependencies>false</keepDependencies>
<properties/>
<scm class="hudson.plugins.git.GitSCM">
<configVersion>2</configVersion>
<userRemoteConfigs>
<hudson.plugins.git.UserRemoteConfig>
<name>origin</name>
<refspec>+refs/heads/*:refs/remotes/origin/*</refspec>
<url>''' + rep.url + '''</url>
</hudson.plugins.git.UserRemoteConfig>
</userRemoteConfigs>
<branches>
<hudson.plugins.git.BranchSpec>
<name>**</name>
</hudson.plugins.git.BranchSpec>
</branches>
<recursiveSubmodules>false</recursiveSubmodules>
<doGenerateSubmoduleConfigurations>false</doGenerateSubmoduleConfigurations>
<authorOrCommitter>false</authorOrCommitter>
<clean>false</clean>
<wipeOutWorkspace>false</wipeOutWorkspace>
<pruneBranches>false</pruneBranches>
<remotePoll>false</remotePoll>
<buildChooser class="hudson.plugins.git.util.DefaultBuildChooser"/>
<gitTool>Default</gitTool>
<submoduleCfg class="list"/>
<relativeTargetDir>''' + os.path.join('reps', name)+ '''</relativeTargetDir>
<excludedRegions></excludedRegions>
<excludedUsers></excludedUsers>
<gitConfigName></gitConfigName>
<gitConfigEmail></gitConfigEmail>
<skipTag>false</skipTag>
<scmName></scmName>
</scm>
<canRoam>true</canRoam>
<disabled>false</disabled>
<blockBuildWhenDownstreamBuilding>true</blockBuildWhenDownstreamBuilding>
<blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
<triggers class="vector">
<hudson.triggers.SCMTrigger>
<spec></spec>
</hudson.triggers.SCMTrigger>
</triggers>
<concurrentBuild>false</concurrentBuild>
<builders>
<hudson.tasks.Shell>
<command>
cd ''' + os.path.join('build', name) + '''
dws configure
dws make
</command>
</hudson.tasks.Shell>
</builders>
<publishers />
<buildWrappers/>
</project>
''')
config.close()
def pub_find(args):
'''bin|lib filename ...
Search through a set of directories derived
from PATH for *filename*.
'''
dir_name = args[0]
command = 'find_' + dir_name
searches = []
for arg in args[1:]:
searches += [ (arg, None) ]
installed, _, complete = \
getattr(sys.modules[__name__], command)(
searches, CONTEXT.search_path(dir_name), CONTEXT.value('buildTop'))
if len(installed) != len(searches):
sys.exit(1)
def pub_init(args):
''' Prompt for variables which have not been
initialized in the workspace make fragment.
(This will fetch the project index).
'''
config_var(CONTEXT, CONTEXT.environ)
INDEX.validate()
def pub_install(args):
''' [ binPackage | project ... ]
Install a package *binPackage* on the local system
or a binary package associated to *project*
through either a *package* or *patch* node in the
index database or through the local package
manager.
'''
INDEX.validate()
install(args, INDEX)
def pub_integrate(args):
'''[ srcPackage ... ]
Integrate a patch into a source package
'''
while len(args) > 0:
srcdir = unpack(args.pop(0))
pchdir = CONTEXT.src_dir(os.path.join(CONTEXT.cwd_project(),
srcdir + '-patch'))
integrate(srcdir, pchdir)
class FilteredList(PdbHandler):
'''Print a list binary package files specified in an index file.'''
# Note: This code is used by dservices.
def __init__(self):
PdbHandler.__init__(self)
self.first_time = True
self.fetches = []
def project(self, proj_obj):
host = CONTEXT.host()
if host in proj_obj.packages and proj_obj.packages[host]:
if len(proj_obj.packages[host].update.fetches) > 0:
for file_to_fetch in proj_obj.packages[host].update.fetches:
self.fetches += [ file_to_fetch ]
class ListPdbHandler(PdbHandler):
'''List project available in the workspace.'''
def __init__(self):
PdbHandler.__init__(self)
self.first_time = True
def project(self, proj):
if self.first_time:
sys.stdout.write('HEAD name\n')
self.first_time = False
if os.path.exists(CONTEXT.src_dir(proj.name)):
prev = os.getcwd()
os.chdir(CONTEXT.src_dir(proj.name))
cmdline = ' '.join(['git', 'rev-parse', 'HEAD'])
lines = subprocess.check_output(
cmdline, shell=True, stderr=subprocess.STDOUT).splitlines()
sys.stdout.write(' '.join(lines).strip() + ' ')
os.chdir(prev)
sys.stdout.write(proj.name + '\n')
def pub_list(args):
''' List available projects
'''
INDEX.parse(ListPdbHandler())
def pub_make(args, graph=False):
''' Make projects. "make recurse" will build
all dependencies required before a project
can be itself built.
'''
# \todo That should not be required:
# context.environ['siteTop'].default = os.path.dirname(os.path.dirname(
# os.path.realpath(os.getcwd())))
CONTEXT.targets = []
recurse = False
top = os.path.realpath(os.getcwd())
if (top == os.path.realpath(CONTEXT.value('buildTop'))
or top == os.path.realpath(CONTEXT.value('srcTop'))):
# make from the top directory makes every project in the index file.
rgen = DerivedSetsGenerator()
INDEX.parse(rgen)
roots = rgen.roots
recurse = True
else:
roots = [ CONTEXT.cwd_project() ]
for opt in args:
if opt == 'recurse':
CONTEXT.targets += [ 'install' ]
recurse = True
elif re.match(r'\S+=.*', opt):
CONTEXT.overrides += [ opt ]
else:
CONTEXT.targets += [ opt ]
if recurse:
# note that *EXCLUDE_PATS* is global.
validate_controls(MakeGenerator(roots, [], EXCLUDE_PATS), INDEX,
graph=graph)
else:
handler = Unserializer(roots)
if os.path.isfile(CONTEXT.db_pathname()):
INDEX.parse(handler)
for name in roots:
make = None
src_dir = CONTEXT.src_dir(name)
if os.path.exists(src_dir):
if name in handler.projects:
rep = handler.as_project(name).repository
if not rep:
rep = handler.as_project(name).patch
make = rep.make
else:
# No luck we do not have any more information than
# the directory name. Let's do with that.
make = MakeStep(name)
if make:
make.run(CONTEXT)
if len(ERRORS) > 0:
raise Error("Found errors while making " + ' '.join(ERRORS))
def pub_patch(args):
''' Generate patches vs. the last pull from a remote
repository, optionally send it to a list
of receipients.
'''
reps = args
recurse = False
if 'recurse' in args:
recurse = True
reps.remove('recurse')
reps = cwd_projects(reps, recurse)
prev = os.getcwd()
for rep in reps:
patches = []
log_info('######## generating patch for project ' + rep)
os.chdir(CONTEXT.src_dir(rep))
patch_dir = CONTEXT.patch_dir(rep)
if not os.path.exists(patch_dir):
os.makedirs(patch_dir)
cmdline = ['git', 'format-patch', '-o', patch_dir, 'origin']
for line in subprocess.check_output(' '.join(cmdline), shell=True,
stderr=subprocess.STDOUT).splitlines():
patches += [ line.strip() ]
sys.stdout.write(line)
for patch in patches:
with open(patch) as msgfile:
msg = msgfile.readlines()
msg = ''.join(msg[1:])
sendmail(msg, MAILTO)
os.chdir(prev)
def pub_push(args):
''' Push commits to projects checked out
in the workspace.
'''
reps = args
recurse = False
if 'recurse' in args:
recurse = True
reps.remove('recurse')
reps = cwd_projects(reps, recurse)
for rep in reps:
sys.stdout.write('######## pushing project ' + str(rep) + '\n')
src_dir = CONTEXT.src_dir(rep)
svc = Repository.associate(src_dir)
svc.push(src_dir)
def pub_status(args, recurse=False):
''' Show status of projects checked out
in the workspace with regards to commits.
'''
reps = cwd_projects(args, recurse)
cmdline = 'git status'
prev = os.getcwd()
for rep in reps:
os.chdir(CONTEXT.src_dir(rep))
try:
output = subprocess.check_output(cmdline, shell=True,
stderr=subprocess.STDOUT)
untracked = False
for line in output.splitlines():
look = re.match(r'#\s+([a-z]+):\s+(\S+)', line)
if look:
sys.stdout.write(' '.join([
look.group(1).capitalize()[0],
rep, look.group(2)]) + '\n')
elif re.match('# Untracked files:', line):
untracked = True
elif untracked:
look = re.match(r'# (\S+)', line)
if look:
sys.stdout.write(' '.join(['?', rep,
look.group(1)]) + '\n')
except subprocess.CalledProcessError:
# It is ok. git will return error code 1 when no changes
# are to be committed.
pass
os.chdir(prev)
def pub_update(args):
'''[ project ... ]
Update projects that have a *repository* or *patch*
node in the index database and are also present in
the workspace by pulling changes from the remote
server. "update recurse" will recursively update all
dependencies for *project*.
If a project only contains a *package* node in
the index database, the local system will be
modified only if the version provided is greater
than the version currently installed.
'''
reps = args
recurse = False
if 'recurse' in args:
recurse = True
reps.remove('recurse')
INDEX.validate(True)
reps = cwd_projects(reps)
if recurse:
# note that *EXCLUDE_PATS* is global.
dgen = MakeGenerator(reps, [], EXCLUDE_PATS)
validate_controls(dgen, INDEX)
else:
global ERRORS
handler = Unserializer(reps)
INDEX.parse(handler)
for name in reps:
# The project is present in *srcTop*, so we will update the source
# code from a repository.
update = None
if not name in handler.projects:
# We found a directory that contains source control information
# but which is not in the interdependencies index file.
src_dir = CONTEXT.src_dir(name)
if os.path.exists(src_dir):
update = UpdateStep(
name, Repository.associate(src_dir), None)
else:
update = handler.as_project(name).repository.update
if not update:
update = handler.as_project(name).patch.update
if update:
# Not every project is made a first-class citizen. If there are
# no rep structure for a project, it must depend on a project
# that does in order to have a source repled repository.
# This is a simple way to specify inter-related projects
# with complex dependency set and barely any code.
# \todo We do not propagate force= here to avoid messing up
# the local checkouts on pubUpdate()
try:
log_header(update.name)
update.run(CONTEXT)
log_footer(update.name)
except Error, err:
log_info('warning: cannot update repository from ' \
+ str(update.rep.url))
log_footer(update.name, errcode=err.code)
else:
ERRORS += [ name ]
if len(ERRORS) > 0:
raise Error('%s is/are not project(s) under source control.'
% ' '.join(ERRORS))
nb_updated_projects = len(UpdateStep.updated_sources)
if nb_updated_projects > 0:
log_info('%d updated project(s).' % nb_updated_projects)
else:
log_info('all project(s) are up-to-date.')
def pub_upstream(args):
'''[ srcPackage ... ]
Generate a patch to submit to upstream
maintainer out of a source package and
a -patch subdirectory in a project src_dir.
'''
while len(args) > 0:
pkgfilename = args.pop(0)
srcdir = unpack(pkgfilename)
orgdir = srcdir + '.orig'
if os.path.exists(orgdir):
shutil.rmtree(orgdir, ignore_errors=True)
shutil.move(srcdir, orgdir)
srcdir = unpack(pkgfilename)
pchdir = CONTEXT.src_dir(os.path.join(CONTEXT.cwd_project(),
srcdir + '-patch'))
integrate(srcdir, pchdir)
# In the common case, no variables will be added to the workspace
# make fragment when the upstream command is run. Hence sys.stdout
# will only display the patched information. This is important to be
# able to execute:
# dws upstream > patch
cmdline = [ 'diff', '-ruNa', orgdir, srcdir ]
subprocess.call(' '.join(cmdline), shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def select_checkout(rep_candidates, package_candidates):
'''Interactive prompt for a selection of projects to checkout.
*rep_candidates* contains a list of rows describing projects available
for selection. This function will return a list of projects to checkout
from a source repository and a list of projects to install through
a package manager.'''
reps = []
if len(rep_candidates) > 0:
reps = select_multiple(
'''The following dependencies need to be present on your system.
You have now the choice to install them from a source repository. You will
later have the choice to install them from either a patch, a binary package
or not at all.''',
rep_candidates)
log_info(select_string)
# Filters out the dependencies which the user has decided to install
# from a repository.
packages = []
for row in package_candidates:
if not row[0] in reps:
packages += [ row ]
packages = select_install(packages)
return reps, packages
def select_install(package_candidates):
'''Interactive prompt for a selection of projects to install
as binary packages. *package_candidates* contains a list of rows
describing projects available for selection. This function will
return a list of projects to install through a package manager. '''
packages = []
if len(package_candidates) > 0:
packages = select_multiple(
'''The following dependencies need to be present on your system.
You have now the choice to install them from a binary package. You can skip
this step if you know those dependencies will be resolved correctly later on.
''', package_candidates)
log_info(select_string)
return packages
def select_one(description, choices, sort=True):
'''Prompt an interactive list of choices and returns the element selected
by the user. *description* is a text that explains the reason for the
prompt. *choices* is a list of elements to choose from. Each element is
in itself a list. Only the first value of each element is of significance
and returned by this function. The other values are only use as textual
context to help the user make an informed choice.'''
choice = None
if sort:
# We should not sort 'Enter ...' choices for pathnames else we will
# end-up selecting unexpected pathnames by default.
choices.sort()
while True:
show_multiple(description, choices)
if USE_DEFAULT_ANSWER:
selection = "1"
else:
selection = prompt("Enter a single number [1]: ")
if selection == "":
selection = "1"
try:
choice = int(selection)
if choice >= 1 and choice <= len(choices):
choice = choices[choice - 1][0]
break
except TypeError:
choice = None
except ValueError:
choice = None
return choice
def select_multiple(description, selects):
'''Prompt an interactive list of choices and returns elements selected
by the user. *description* is a text that explains the reason for the
prompt. *choices* is a list of elements to choose from. Each element is
in itself a list. Only the first value of each element is of significance
and returned by this function. The other values are only use as textual
context to help the user make an informed choice.'''
result = []
done = False
selects.sort()
choices = [ [ 'all' ] ] + selects
while len(choices) > 1 and not done:
show_multiple(description, choices)
log_info("%d) done", len(choices) + 1)
if USE_DEFAULT_ANSWER:
selection = "1"
else:
selection = prompt(
"Enter a list of numbers separated by spaces [1]: ")
if len(selection) == 0:
selection = "1"
# parse the answer for valid inputs
selection = selection.split(' ')
for sel in selection:
try:
choice = int(sel)
except TypeError:
choice = 0
except ValueError:
choice = 0
if choice > 1 and choice <= len(choices):
result += [ choices[choice - 1][0] ]
elif choice == 1:
result = []
for choice_value in choices[1:]:
result += [ choice_value[0] ]
done = True
elif choice == len(choices) + 1:
done = True
# remove selected items from list of choices
remains = []
for row in choices:
if not row[0] in result:
remains += [ row ]
choices = remains
return result
def select_yes_no(description):
'''Prompt for a yes/no answer.'''
if USE_DEFAULT_ANSWER:
return True
yes_no = prompt(description + " [Y/n]? ")
if yes_no == '' or yes_no == 'Y' or yes_no == 'y':
return True
return False
def show_multiple(description, choices):
'''Returns a list of choices on the user interface as a string.
We do this instead of printing directly because this function
is called to configure CONTEXT variables, including *logDir*.'''
# Compute display layout
widths = []
displayed = []
for item, row in enumerate(choices, start=1):
line = []
for col_index, column in enumerate([ str(item) + ')' ] + row):
col = column
if isinstance(col, dict):
if 'description' in column:
col = column['description'] # { description: ... }
else:
col = ""
line += [ col ]
if len(widths) <= col_index:
widths += [ 2 ]
widths[col_index] = max(widths[col_index], len(col) + 2)
displayed += [ line ]
# Ask user to review selection
log_info('%s' % description)
for project in displayed:
for col_index, col in enumerate(project):
log_info(col.ljust(widths[col_index]))
def unpack(pkgfilename):
'''unpack a tar[.gz|.bz2] source distribution package.'''
if pkgfilename.endswith('.bz2'):
pkgflag = 'j'
elif pkgfilename.endswith('.gz'):
pkgflag = 'z'
shell_command(['tar', pkgflag + 'xf', pkgfilename])
return os.path.basename(os.path.splitext(
os.path.splitext(pkgfilename)[0])[0])
def main(args):
'''Main Entry Point'''
# XXX use of this code?
# os.setuid(int(os.getenv('SUDO_UID')))
# os.setgid(int(os.getenv('SUDO_GID')))
exit_code = 0
try:
import __main__
import argparse
global CONTEXT
CONTEXT = Context()
keys = CONTEXT.environ.keys()
keys.sort()
epilog = 'Variables defined in the workspace make fragment (' \
+ CONTEXT.config_name + '):\n'
for varname in keys:
var = CONTEXT.environ[varname]
if var.descr:
epilog += (' ' + var.name).ljust(23, ' ') + var.descr + '\n'
parser = argparse.ArgumentParser(
usage='%(prog)s [options] command\n\nVersion\n %(prog)s version '
+ str(__version__),
formatter_class=argparse.RawTextHelpFormatter,
epilog=epilog)
parser.add_argument('--version', action='version',
version='%(prog)s ' + str(__version__))
parser.add_argument('--config', dest='config', action='store',
help='Set the path to the config file instead of deriving it'\
' from the current directory.')
parser.add_argument('--default', dest='default', action='store_true',
help='Use default answer for every interactive prompt.')
parser.add_argument('--exclude', dest='exclude_pats', action='append',
help='The specified command will not be applied to projects'\
' matching the name pattern.')
parser.add_argument('--nolog', dest='nolog', action='store_true',
help='Do not generate output in the log file')
parser.add_argument('--patch', dest='patchTop', action='store',
help='Set *patchTop* the root where local patches can be found.')
parser.add_argument('--prefix', dest='installTop', action='store',
help='Set the root for installed bin, include, lib, etc. ')
parser.add_argument('--mailto', dest='mailto', action='append',
help='Add an email address to send log reports to')
build_subcommands_parser(parser, __main__)
if len(args) <= 1:
parser.print_help()
return 1
if args[1] == 'help-book':
# Print help in docbook format.
# We need the parser here so we can't create a pub_ function
# for this command.
help_str = cStringIO.StringIO()
parser.print_help(help_str)
help_book(help_str)
return 0
options = parser.parse_args(args[1:])
# Find the build information
global USE_DEFAULT_ANSWER
USE_DEFAULT_ANSWER = options.default
global NO_LOG
NO_LOG = options.nolog
if options.exclude_pats:
global EXCLUDE_PATS
EXCLUDE_PATS = options.exclude_pats
if not options.func in [ pub_build ]:
# The *build* command is special in that it does not rely
# on locating a pre-existing context file.
try:
CONTEXT.locate(options.config)
except IOError:
pass
except:
raise
if options.installTop:
CONTEXT.environ['installTop'] = os.path.abspath(options.installTop)
if options.patchTop:
CONTEXT.environ['patchTop'] = os.path.abspath(options.patchTop)
global INDEX
INDEX = IndexProjects(CONTEXT)
# Filter out options with are not part of the function prototype.
func_args = filter_subcommand_args(options.func, options)
options.func(**func_args)
except Error, err:
log_error(str(err))
exit_code = err.code
if options.mailto and len(options.mailto) > 0 and LOG_PAT:
logs = find_files(CONTEXT.log_path(''), LOG_PAT)
log_info('forwarding logs ' + ' '.join(logs) + '...')
sendmail(createmail('build report', logs), options.mailto)
return exit_code
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
"""
Copyright 2015-2017 Knights Lab, Regents of the University of Minnesota.
This software is released under the GNU Affero General Public License (AGPL) v3.0 License.
"""
import csv
import pandas as pd
from collections import defaultdict
import numpy as np
from shogun import logger
class Taxonomy:
def __init__(self, filename: str):
self.tax = self.parse_taxonomy(filename)
@classmethod
def parse_taxonomy(cls, filename: str) -> dict:
with open(filename) as inf:
csv_inf = csv.reader(inf, delimiter='\t')
return dict(csv_inf)
def __call__(self, id: str):
return self.tax[id]
TAX_LEVELS = ['k', 'p', 'c', 'o', 'f', 'g', 's', 't']
def tree(): return defaultdict(tree)
def add_tree(t, path):
for node in path.split(';'):
t = t[node]
def longest_path_tree(t, path):
s = []
temp_spot = t
for node in path.split(';'):
if node in temp_spot:
temp_spot = temp_spot[node]
s.extend([node])
else:
break
return ';'.join(s)
def parse_bayes(filename: str) -> pd.DataFrame:
columns = ["tax"] + TAX_LEVELS + ["genome_length"]
df = pd.read_csv(filename, sep="\t", header=None, names=columns, index_col = 0)
# Remove spaces in taxonomy for legacy reasons
df.index = [_.replace(" ", "_") for _ in df.index]
return df.sort_index()
def redistribute_taxatable(filename: str, counts_bayes: pd.DataFrame, level=8):
df = pd.read_csv(filename, sep="\t", index_col=0)
df = df[[type(_) == str for _ in df.index]]
cb_index = tree()
_ = [add_tree(cb_index, v) for v in counts_bayes.index]
# Remove spaces in taxonomy for legacy reasons
df.index = [_.replace(" ", "_") for _ in df.index]
df['summary'] = [longest_path_tree(cb_index, v) for v in df.index]
df = df.groupby('summary').sum()
df['level'] = [_.count(';') + 1 if type(_) == str else 0 for _ in df.index]
# summarize up
below_level = df['level'] >= level
leaf_counts_df = df[below_level].copy()
leaf_counts_df['taxa_name'] = [';'.join(v.split(';')[:level]) for v in df[below_level].index]
leaf_counts_df = leaf_counts_df.groupby('taxa_name').sum()
leaf_counts_df = leaf_counts_df.drop('level', axis=1)
# summarize bayes to level
counts_bayes_sum = _summarize_bayes_at_level(counts_bayes, leaf_counts_df.index, level=level)
# summarize down
for i, row in df[~below_level].sort_values('level', ascending=False).iterrows():
# Get all children of item
tmp_name = row.name
leave_filter = _filter_leaves_for_tax(leaf_counts_df, tmp_name)
num_leaves = np.sum(leave_filter)
if num_leaves == 0 or num_leaves is None:
if row.name == "":
logger.debug("Conflict found for sequence at the kingdom level, skipping.")
continue
# Filter back row names until in counts_bayes
blank = ['k__', 'p__', 'c__', 'o__', 'f__', 'g__', 's__', 't__']
for i, _ in enumerate(row.name.split(';')):
blank[i] = _
tmp_counts_bayes_row = _summarize_bayes_at_level(counts_bayes, row.name, level=row.name.count(';') + 1)
tmp_counts_bayes_row.name = ';'.join(blank[:level])
row.name = tmp_counts_bayes_row.name
leaf_counts_df = leaf_counts_df.append(row[:-1])
if tmp_counts_bayes_row.name not in counts_bayes_sum.index:
counts_bayes_sum = counts_bayes_sum.append(tmp_counts_bayes_row)
counts_bayes_sum = counts_bayes_sum.fillna(0)
elif num_leaves == 1:
leaf_counts_df.loc[leave_filter] += row.values[:-1]
elif num_leaves > 1:
tmp_level = row.name.count(';')
tmp_leaves = leaf_counts_df[leave_filter].sort_index()
tmp_bayes = counts_bayes_sum.loc[tmp_leaves.index]
# Series 1xn where n is the number of leave nodes below tax
prob_tax_given_level = (tmp_bayes.iloc[:,tmp_level] + 1)/(tmp_bayes['genome_length'] + 1)
prob_tax_given_level = prob_tax_given_level/np.sum(prob_tax_given_level)
# Series 1xn where n is the number of unique reads for a given taxa
uniqueness_per_genome = tmp_bayes.iloc[:,level-1]/tmp_bayes['genome_length']
# Matrix divide each observed count by uniqueness
counts_over_uniqueness = tmp_leaves.T / uniqueness_per_genome.values
# Matrix divide each uniqueness count by sum of sample
prob_tax = counts_over_uniqueness.T / counts_over_uniqueness.sum(axis=1)
# Get the redistribution parameters
# Should be taxa by samples same as the tmp_leaves
# Each column should sum to 1
redistribution_params = prob_tax.apply(lambda x: x*prob_tax_given_level.values, axis=0).apply(lambda x: x/x.sum(), axis=0)
redistribution_numbers = (redistribution_params * row.values[:-1]).round()
# Add the number back to the dataframe
leaf_counts_df = leaf_counts_df.add(redistribution_numbers, fill_value=0)
return leaf_counts_df
def _summarize_bayes_at_level(counts_bayes: pd.DataFrame, leave_names, level=7):
if level <= 8:
counts_bayes['summary_taxa'] = [';'.join(_.split(';')[:level]) for _ in counts_bayes.index]
_counts_bayes = counts_bayes.groupby('summary_taxa').sum()
_counts_bayes['genome_length_median'] = counts_bayes.groupby('summary_taxa')['genome_length'].median().astype(int)
counts_bayes = _counts_bayes
counts = counts_bayes.iloc[:, level-1:8].sum(axis=1)
counts_bayes.iloc[:, level-1] = counts
counts_bayes = counts_bayes.drop(counts_bayes.columns[level:8], axis=1)
counts_bayes = counts_bayes.loc[leave_names]
else:
counts_bayes['genome_length_median'] = counts_bayes['genome_length']
return counts_bayes
def summarize_bayes_at_level(counts_bayes, leave_names=None, level=7):
if not leave_names:
leave_names = np.unique(np.array([';'.join(_.split(';')[:level]) for _ in counts_bayes.index]))
return _summarize_bayes_at_level(counts_bayes, leave_names, level=level)
def _filter_leaves_for_tax(leaf_counts_df, taxa):
return np.array([_.startswith(taxa + ';') for _ in leaf_counts_df.index])
Updated for strain.
"""
Copyright 2015-2017 Knights Lab, Regents of the University of Minnesota.
This software is released under the GNU Affero General Public License (AGPL) v3.0 License.
"""
import csv
import pandas as pd
from collections import defaultdict
import numpy as np
from shogun import logger
class Taxonomy:
def __init__(self, filename: str):
self.tax = self.parse_taxonomy(filename)
@classmethod
def parse_taxonomy(cls, filename: str) -> dict:
with open(filename) as inf:
csv_inf = csv.reader(inf, delimiter='\t')
return dict(csv_inf)
def __call__(self, id: str):
return self.tax[id]
TAX_LEVELS = ['k', 'p', 'c', 'o', 'f', 'g', 's', 't']
def tree(): return defaultdict(tree)
def add_tree(t, path):
for node in path.split(';'):
t = t[node]
def longest_path_tree(t, path):
s = []
temp_spot = t
for node in path.split(';'):
if node in temp_spot:
temp_spot = temp_spot[node]
s.extend([node])
else:
break
return ';'.join(s)
def parse_bayes(filename: str) -> pd.DataFrame:
columns = ["tax"] + TAX_LEVELS + ["genome_length"]
df = pd.read_csv(filename, sep="\t", header=None, names=columns, index_col = 0)
# Remove spaces in taxonomy for legacy reasons
df.index = [_.replace(" ", "_") for _ in df.index]
return df.sort_index()
def redistribute_taxatable(filename: str, counts_bayes: pd.DataFrame, level=8):
df = pd.read_csv(filename, sep="\t", index_col=0)
df = df[[type(_) == str for _ in df.index]]
cb_index = tree()
_ = [add_tree(cb_index, v) for v in counts_bayes.index]
# Remove spaces in taxonomy for legacy reasons
df.index = [_.replace(" ", "_") for _ in df.index]
df['summary'] = [longest_path_tree(cb_index, v) for v in df.index]
df = df.groupby('summary').sum()
df['level'] = [_.count(';') + 1 if type(_) == str else 0 for _ in df.index]
# summarize up
below_level = df['level'] >= level
leaf_counts_df = df[below_level].copy()
leaf_counts_df['taxa_name'] = [';'.join(v.split(';')[:level]) for v in df[below_level].index]
leaf_counts_df = leaf_counts_df.groupby('taxa_name').sum()
leaf_counts_df = leaf_counts_df.drop('level', axis=1)
# summarize bayes to level
counts_bayes_sum = _summarize_bayes_at_level(counts_bayes, leaf_counts_df.index, level=level)
# summarize down
for i, row in df[~below_level].sort_values('level', ascending=False).iterrows():
# Get all children of item
tmp_name = row.name
leave_filter = _filter_leaves_for_tax(leaf_counts_df, tmp_name)
num_leaves = np.sum(leave_filter)
if num_leaves == 0 or num_leaves is None:
if row.name == "":
logger.debug("Conflict found for sequence at the kingdom level, skipping.")
continue
# Filter back row names until in counts_bayes
blank = ['k__', 'p__', 'c__', 'o__', 'f__', 'g__', 's__', 't__']
for i, _ in enumerate(row.name.split(';')):
blank[i] = _
tmp_counts_bayes_row = _summarize_bayes_at_level(counts_bayes, row.name, level=row.name.count(';') + 1)
tmp_counts_bayes_row.name = ';'.join(blank[:level])
row.name = tmp_counts_bayes_row.name
leaf_counts_df = leaf_counts_df.append(row[:-1])
if tmp_counts_bayes_row.name not in counts_bayes_sum.index:
counts_bayes_sum = counts_bayes_sum.append(tmp_counts_bayes_row)
counts_bayes_sum = counts_bayes_sum.fillna(0)
elif num_leaves == 1:
leaf_counts_df.loc[leave_filter] += row.values[:-1]
elif num_leaves > 1:
tmp_level = row.name.count(';')
tmp_leaves = leaf_counts_df[leave_filter].sort_index()
tmp_bayes = counts_bayes_sum.loc[tmp_leaves.index]
# Series 1xn where n is the number of leave nodes below tax
prob_tax_given_level = (tmp_bayes.iloc[:,tmp_level] + 1)/(tmp_bayes['genome_length'] + 1)
prob_tax_given_level = prob_tax_given_level/np.sum(prob_tax_given_level)
# Series 1xn where n is the number of unique reads for a given taxa
uniqueness_per_genome = tmp_bayes.iloc[:,level-1]/tmp_bayes['genome_length']
# Matrix divide each observed count by uniqueness
counts_over_uniqueness = tmp_leaves.T / uniqueness_per_genome.values
# Matrix divide each uniqueness count by sum of sample
prob_tax = counts_over_uniqueness.T / counts_over_uniqueness.sum(axis=1)
# Get the redistribution parameters
# Should be taxa by samples same as the tmp_leaves
# Each column should sum to 1
redistribution_params = prob_tax.apply(lambda x: x*prob_tax_given_level.values, axis=0).apply(lambda x: x/x.sum(), axis=0)
redistribution_numbers = (redistribution_params * row.values[:-1]).round()
# Add the number back to the dataframe
leaf_counts_df = leaf_counts_df.add(redistribution_numbers, fill_value=0)
return leaf_counts_df
def _summarize_bayes_at_level(counts_bayes: pd.DataFrame, leave_names, level=7):
# Something odd happened here
counts_bayes['summary_taxa'] = [';'.join(_.split(';')[:level]) for _ in counts_bayes.index]
_counts_bayes = counts_bayes.groupby('summary_taxa').sum()
_counts_bayes['genome_length_median'] = counts_bayes.groupby('summary_taxa')['genome_length'].median().astype(int)
counts_bayes = _counts_bayes
counts = counts_bayes.iloc[:, level-1:8].sum(axis=1)
counts_bayes.iloc[:, level-1] = counts
counts_bayes = counts_bayes.drop(counts_bayes.columns[level:8], axis=1)
counts_bayes = counts_bayes.loc[leave_names]
return counts_bayes
def summarize_bayes_at_level(counts_bayes, leave_names=None, level=7):
if not leave_names:
leave_names = np.unique(np.array([';'.join(_.split(';')[:level]) for _ in counts_bayes.index]))
return _summarize_bayes_at_level(counts_bayes, leave_names, level=level)
def _filter_leaves_for_tax(leaf_counts_df, taxa):
return np.array([_.startswith(taxa + ';') for _ in leaf_counts_df.index])
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2017-05-09 09:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('attempts', '0007_auto_20161004_0927'),
]
operations = [
migrations.AddField(
model_name='attempt',
name='submission_date',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='historicalattempt',
name='submission_date',
field=models.DateTimeField(null=True),
),
migrations.RunSQL(
'UPDATE attempts_historicalattempt SET submission_date = history_date'
),
migrations.RunSQL(
'''UPDATE attempts_attempt
SET submission_date = (
SELECT max(history_date)
FROM attempts_historicalattempt
WHERE attempts_attempt.user_id = user_id
AND attempts_attempt.part_id = part_id
)
'''
),
migrations.AlterField(
model_name='attempt',
name='submission_date',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='historicalattempt',
name='submission_date',
field=models.DateTimeField(blank=True, editable=False),
),
]
Revert "Make migration SQLite compatible"
This reverts commit 768d85cccb17c8757dd8d14dad220d0b87568264.
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2017-05-09 09:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('attempts', '0007_auto_20161004_0927'),
]
operations = [
migrations.AddField(
model_name='attempt',
name='submission_date',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='historicalattempt',
name='submission_date',
field=models.DateTimeField(null=True),
),
migrations.RunSQL(
'UPDATE attempts_historicalattempt SET submission_date = history_date'
),
migrations.RunSQL(
'''UPDATE attempts_attempt
SET submission_date = subquery.submission_date
FROM (
SELECT user_id, part_id, max(history_date) AS submission_date
FROM attempts_historicalattempt
GROUP BY user_id, part_id
) AS subquery
WHERE attempts_attempt.user_id = subquery.user_id
AND attempts_attempt.part_id = subquery.part_id
'''
),
migrations.AlterField(
model_name='attempt',
name='submission_date',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='historicalattempt',
name='submission_date',
field=models.DateTimeField(blank=True, editable=False),
),
]
|
#coding: utf-8
from dicts import morph_ru
from pymorphy.morph_tests.base import MorphTestCase, unittest2
class TestPluraliseRu(MorphTestCase):
def test_nouns(self):
self.assertPlural(u'ГОРОД', u'ГОРОДА')
self.assertPlural(u'СТАЛЬ', u'СТАЛИ')
self.assertPlural(u'СТАЛЕВАРОМ', u'СТАЛЕВАРАМИ')
def test_predictor_nouns(self):
self.assertPlural(u'БУТЯВКОЙ', u'БУТЯВКАМИ')
def test_verbs(self):
self.assertPlural(u'ГУЛЯЛ', u'ГУЛЯЛИ')
self.assertPlural(u'ГУЛЯЛА', u'ГУЛЯЛИ')
self.assertPlural(u'РАСПРЫГИВАЕТСЯ', u'РАСПРЫГИВАЮТСЯ')
def test_prefix(self):
self.assertPlural(u'СУПЕРКОТ', u'СУПЕРКОТЫ')
def test_predict_by_suffix(self):
self.assertPlural(u'ДЕПЫРТАМЕНТ', u'ДЕПЫРТАМЕНТЫ')
self.assertPlural(u'ХАБР', u'ХАБРЫ')
def test_invalid_word(self):
self.assertPlural(u'123', u'123')
def test_invalid_graminfo(self):
self.assertPlural(u'НАЧАЛО', u'НАЧАЛА', gram_class=u'С')
class TestInflectRu(MorphTestCase):
def test_inflect(self):
self.assertInflected(u"СУСЛИКОВ", u"дт", u"СУСЛИКАМ")
self.assertInflected(u"СУСЛИК", u"дт", u"СУСЛИКУ")
self.assertInflected(u"СУСЛИКА", u"дт", u"СУСЛИКУ")
self.assertInflected(u"СУСЛИК", u"мн,дт", u"СУСЛИКАМ")
def test_verbs(self):
self.assertInflected(u"ГУЛЯЮ", u"прш", u"ГУЛЯЛ")
self.assertInflected(u"ГУЛЯЛ", u"нст", u"ГУЛЯЮ")
def test_loc2(self):
self.assertInflected(u'ЛЕС', u'пр', u'ЛЕСЕ') # о лесе
self.assertInflected(u'ЛЕС', u'пр,2', u'ЛЕСУ') # в лесу
# о велосипеде
self.assertInflected(u'ВЕЛОСИПЕД', u'пр', u'ВЕЛОСИПЕДЕ')
# а тут второго предложного нет, в велосипеде
self.assertInflected(u'ВЕЛОСИПЕД', u'пр,2', u'ВЕЛОСИПЕДЕ')
def test_decline_bug(self):
self.assertInflected(u'ОРЕЛ', u'рд', u'ОРЛА')
class TestPluralizeInflected(MorphTestCase):
def assertInflectedPlural(self, word, count, result, *args, **kwargs):
morphed_word = morph_ru.pluralize_inflected_ru(word, count, *args, **kwargs)
self.assertEqualRu(morphed_word, result)
def test_parrots(self):
self.assertInflectedPlural(u"ПОПУГАЙ", 1, u"ПОПУГАЙ")
self.assertInflectedPlural(u"ПОПУГАЙ", 2, u"ПОПУГАЯ")
self.assertInflectedPlural(u"ПОПУГАЙ", 3, u"ПОПУГАЯ")
self.assertInflectedPlural(u"ПОПУГАЙ", 4, u"ПОПУГАЯ")
self.assertInflectedPlural(u"ПОПУГАЙ", 5, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 7, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 9, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 0, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 10, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 11, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 12, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 15, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 19, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 21, u"ПОПУГАЙ")
self.assertInflectedPlural(u"ПОПУГАЙ", 32, u"ПОПУГАЯ")
self.assertInflectedPlural(u"ПОПУГАЙ", 38, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 232, u"ПОПУГАЯ")
self.assertInflectedPlural(u"ПОПУГАЙ", 111, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 101, u"ПОПУГАЙ")
def test_butyavka(self):
self.assertInflectedPlural(u"БУТЯВКА", 1, u"БУТЯВКА")
self.assertInflectedPlural(u"БУТЯВКА", 2, u"БУТЯВКИ")
self.assertInflectedPlural(u"БУТЯВКА", 5, u"БУТЯВОК")
def test_adjective(self):
self.assertInflectedPlural(u'АКТИВНЫЙ', 1, u'АКТИВНЫЙ')
self.assertInflectedPlural(u'АКТИВНЫЙ', 2, u'АКТИВНЫХ')
self.assertInflectedPlural(u'АКТИВНЫЙ', 5, u'АКТИВНЫХ')
self.assertInflectedPlural(u'АКТИВНАЯ', 1, u'АКТИВНАЯ')
self.assertInflectedPlural(u'АКТИВНАЯ', 2, u'АКТИВНЫХ')
self.assertInflectedPlural(u'АКТИВНАЯ', 5, u'АКТИВНЫХ')
def test_gerund(self):
self.assertInflectedPlural(u'ИДУЩИЙ', 1, u'ИДУЩИЙ')
self.assertInflectedPlural(u'ИДУЩИЙ', 2, u'ИДУЩИХ')
self.assertInflectedPlural(u'ИДУЩИЙ', 5, u'ИДУЩИХ')
if __name__ == '__main__':
unittest2.main()
Еще пара тестов на склонение слов. See #36.
#coding: utf-8
from dicts import morph_ru
from pymorphy.morph_tests.base import MorphTestCase, unittest2
class TestPluraliseRu(MorphTestCase):
def test_nouns(self):
self.assertPlural(u'ГОРОД', u'ГОРОДА')
self.assertPlural(u'СТАЛЬ', u'СТАЛИ')
self.assertPlural(u'СТАЛЕВАРОМ', u'СТАЛЕВАРАМИ')
def test_predictor_nouns(self):
self.assertPlural(u'БУТЯВКОЙ', u'БУТЯВКАМИ')
def test_verbs(self):
self.assertPlural(u'ГУЛЯЛ', u'ГУЛЯЛИ')
self.assertPlural(u'ГУЛЯЛА', u'ГУЛЯЛИ')
self.assertPlural(u'РАСПРЫГИВАЕТСЯ', u'РАСПРЫГИВАЮТСЯ')
def test_prefix(self):
self.assertPlural(u'СУПЕРКОТ', u'СУПЕРКОТЫ')
def test_predict_by_suffix(self):
self.assertPlural(u'ДЕПЫРТАМЕНТ', u'ДЕПЫРТАМЕНТЫ')
self.assertPlural(u'ХАБР', u'ХАБРЫ')
def test_invalid_word(self):
self.assertPlural(u'123', u'123')
def test_invalid_graminfo(self):
self.assertPlural(u'НАЧАЛО', u'НАЧАЛА', gram_class=u'С')
class TestInflectRu(MorphTestCase):
def test_inflect(self):
self.assertInflected(u"СУСЛИК", u"дт", u"СУСЛИКУ")
self.assertInflected(u"СУСЛИКИ", u"дт", u"СУСЛИКАМ")
self.assertInflected(u"СУСЛИКОВ", u"дт", u"СУСЛИКАМ")
self.assertInflected(u"СУСЛИКА", u"дт", u"СУСЛИКУ")
self.assertInflected(u"СУСЛИК", u"мн,дт", u"СУСЛИКАМ")
def test_verbs(self):
self.assertInflected(u"ГУЛЯЮ", u"прш", u"ГУЛЯЛ")
self.assertInflected(u"ГУЛЯЛ", u"нст", u"ГУЛЯЮ")
def test_loc2(self):
self.assertInflected(u'ЛЕС', u'пр', u'ЛЕСЕ') # о лесе
self.assertInflected(u'ЛЕС', u'пр,2', u'ЛЕСУ') # в лесу
# о велосипеде
self.assertInflected(u'ВЕЛОСИПЕД', u'пр', u'ВЕЛОСИПЕДЕ')
# а тут второго предложного нет, в велосипеде
self.assertInflected(u'ВЕЛОСИПЕД', u'пр,2', u'ВЕЛОСИПЕДЕ')
def test_decline_bug(self):
self.assertInflected(u'ОРЕЛ', u'рд', u'ОРЛА')
# см. https://bitbucket.org/kmike/pymorphy/issue/36/
@unittest2.expectedFailure
def test_improper_guess(self):
self.assertInflected(u'ОСТРОВА', u'дт', u'ОСТРОВАМ')
# см. https://bitbucket.org/kmike/pymorphy/issue/36/
@unittest2.expectedFailure
def test_improper_guess2(self):
self.assertInflected(u'КИЕВ', u'пр', u'КИЕВЕ')
class TestPluralizeInflected(MorphTestCase):
def assertInflectedPlural(self, word, count, result, *args, **kwargs):
morphed_word = morph_ru.pluralize_inflected_ru(word, count, *args, **kwargs)
self.assertEqualRu(morphed_word, result)
def test_parrots(self):
self.assertInflectedPlural(u"ПОПУГАЙ", 1, u"ПОПУГАЙ")
self.assertInflectedPlural(u"ПОПУГАЙ", 2, u"ПОПУГАЯ")
self.assertInflectedPlural(u"ПОПУГАЙ", 3, u"ПОПУГАЯ")
self.assertInflectedPlural(u"ПОПУГАЙ", 4, u"ПОПУГАЯ")
self.assertInflectedPlural(u"ПОПУГАЙ", 5, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 7, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 9, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 0, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 10, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 11, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 12, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 15, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 19, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 21, u"ПОПУГАЙ")
self.assertInflectedPlural(u"ПОПУГАЙ", 32, u"ПОПУГАЯ")
self.assertInflectedPlural(u"ПОПУГАЙ", 38, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 232, u"ПОПУГАЯ")
self.assertInflectedPlural(u"ПОПУГАЙ", 111, u"ПОПУГАЕВ")
self.assertInflectedPlural(u"ПОПУГАЙ", 101, u"ПОПУГАЙ")
def test_butyavka(self):
self.assertInflectedPlural(u"БУТЯВКА", 1, u"БУТЯВКА")
self.assertInflectedPlural(u"БУТЯВКА", 2, u"БУТЯВКИ")
self.assertInflectedPlural(u"БУТЯВКА", 5, u"БУТЯВОК")
def test_adjective(self):
self.assertInflectedPlural(u'АКТИВНЫЙ', 1, u'АКТИВНЫЙ')
self.assertInflectedPlural(u'АКТИВНЫЙ', 2, u'АКТИВНЫХ')
self.assertInflectedPlural(u'АКТИВНЫЙ', 5, u'АКТИВНЫХ')
self.assertInflectedPlural(u'АКТИВНАЯ', 1, u'АКТИВНАЯ')
self.assertInflectedPlural(u'АКТИВНАЯ', 2, u'АКТИВНЫХ')
self.assertInflectedPlural(u'АКТИВНАЯ', 5, u'АКТИВНЫХ')
def test_gerund(self):
self.assertInflectedPlural(u'ИДУЩИЙ', 1, u'ИДУЩИЙ')
self.assertInflectedPlural(u'ИДУЩИЙ', 2, u'ИДУЩИХ')
self.assertInflectedPlural(u'ИДУЩИЙ', 5, u'ИДУЩИХ')
if __name__ == '__main__':
unittest2.main()
|
"""
Atmosphere service exceptions.
"""
from ansible.errors import AnsibleError
class ActionNotAllowed(Exception):
def __init__(self, message):
self.message = message
self.status_code = 409
super(ActionNotAllowed, self).__init__()
class UnderThresholdError(Exception):
def __init__(self, message):
self.message = message
self.status_code = 400
super(UnderThresholdError, self).__init__()
class SecurityGroupNotCreated(Exception):
def __init__(self):
self.message = "Gateway Timeout! Security Group(s) could not be created. Please try again later"
self.status_code = 504
super(SecurityGroupNotCreated, self).__init__()
def __str__(self):
return "%s" % (self.message, )
class HypervisorCapacityError(Exception):
def __init__(self, hypervisor, message):
self.hypervisor = hypervisor
self.message = message
super(HypervisorCapacityError, self).__init__(self.message)
class OverAllocationError(Exception):
def __init__(self, amount_exceeded):
self.overage = amount_exceeded
self.message = "Time allocation exceeded: Instance usage is over by "\
"%s."\
% (self.overage,)
super(OverAllocationError, self).__init__(self.message)
def __str__(self):
return "%s" % (self.message, )
class OverQuotaError(Exception):
def __init__(self, resource=None, requested=None,
used=None, limit=None, message=None):
if not message:
self.message = "Quota exceeded: Requested %s %s but already used "\
"%s/%s %s."\
% (requested, resource, used, limit, resource)
else:
self.message = message
super(OverQuotaError, self).__init__(self.message)
def __str__(self):
return "%s" % (self.message, )
class DeviceBusyException(Exception):
def __init__(self, mount_loc, process_list):
proc_str = ''
for proc_name, pid in process_list:
proc_str += '\nProcess name:%s process id:%s' % (proc_name, pid)
message = "Volume mount location is: %s\nRunning processes that"\
" are accessing that directory must be closed before "\
"unmounting. All offending processes names and IDs are "\
"listed below:%s" % (mount_loc, proc_str)
self.message = message
super(DeviceBusyException, self).__init__(mount_loc, process_list)
def __str__(self):
return "%s:\n%s" % (self.message, repr(self.process_list))
class SizeNotAvailable(Exception):
def __init__(self):
self.message = "Size Not Available."
super(SizeNotAvailable, self).__init__()
def __str__(self):
return "%s" % (self.message, )
class VolumeAttachConflict(Exception):
def __init__(self, instance_id, volume_id):
self.message = "Volume %s is still attached to instance %s"\
% (volume_id, instance_id)
super(VolumeAttachConflict, self).__init__()
def __str__(self):
return "%s" % (self.message, )
class VolumeMountConflict(Exception):
def __init__(self, instance_id, volume_id, extra=None):
self.message = "Volume %s could not be auto-mounted to %s. %s"\
" See Available Volumes -> Mounting a Volume "\
" to learn how to mount the device manually"\
% (volume_id, instance_id, "Reason:%s" % extra)
super(VolumeMountConflict, self).__init__()
def __str__(self):
return "%s" % (self.message, )
class AnsibleDeployException(AnsibleError):
pass
[ATMO-917] inherit from base exception
# -*- coding: utf-8 -*-
"""
Atmosphere service exceptions.
"""
from ansible.errors import AnsibleError
class ServiceException(Exception):
"""
Base Service exception class
"""
class ActionNotAllowed(ServiceException):
def __init__(self, message):
self.message = message
self.status_code = 409
super(ActionNotAllowed, self).__init__()
class UnderThresholdError(ServiceException):
def __init__(self, message):
self.message = message
self.status_code = 400
super(UnderThresholdError, self).__init__()
class SecurityGroupNotCreated(ServiceException):
def __init__(self):
self.message = ("Gateway Timeout! Security Group(s) could not be "
"created. Please try again later")
self.status_code = 504
super(SecurityGroupNotCreated, self).__init__()
def __str__(self):
return "%s" % (self.message, )
class HypervisorCapacityError(ServiceException):
def __init__(self, hypervisor, message):
self.hypervisor = hypervisor
self.message = message
super(HypervisorCapacityError, self).__init__(self.message)
class OverAllocationError(ServiceException):
def __init__(self, amount_exceeded):
self.overage = amount_exceeded
self.message = "Time allocation exceeded: Instance usage is over by "\
"%s."\
% (self.overage,)
super(OverAllocationError, self).__init__(self.message)
def __str__(self):
return "%s" % (self.message, )
class OverQuotaError(ServiceException):
def __init__(self, resource=None, requested=None,
used=None, limit=None, message=None):
if not message:
self.message = "Quota exceeded: Requested %s %s but already used "\
"%s/%s %s."\
% (requested, resource, used, limit, resource)
else:
self.message = message
super(OverQuotaError, self).__init__(self.message)
def __str__(self):
return "%s" % (self.message, )
class DeviceBusyException(ServiceException):
def __init__(self, mount_loc, process_list):
proc_str = ''
for proc_name, pid in process_list:
proc_str += '\nProcess name:%s process id:%s' % (proc_name, pid)
message = "Volume mount location is: %s\nRunning processes that"\
" are accessing that directory must be closed before "\
"unmounting. All offending processes names and IDs are "\
"listed below:%s" % (mount_loc, proc_str)
self.message = message
super(DeviceBusyException, self).__init__(mount_loc, process_list)
def __str__(self):
return "%s:\n%s" % (self.message, repr(self.process_list))
class SizeNotAvailable(ServiceException):
def __init__(self):
self.message = "Size Not Available."
super(SizeNotAvailable, self).__init__()
def __str__(self):
return "%s" % (self.message, )
class VolumeAttachConflict(ServiceException):
def __init__(self, instance_id, volume_id):
self.message = "Volume %s is still attached to instance %s"\
% (volume_id, instance_id)
super(VolumeAttachConflict, self).__init__()
def __str__(self):
return "%s" % (self.message, )
class VolumeMountConflict(ServiceException):
def __init__(self, instance_id, volume_id, extra=None):
self.message = "Volume %s could not be auto-mounted to %s. %s"\
" See Available Volumes -> Mounting a Volume "\
" to learn how to mount the device manually"\
% (volume_id, instance_id, "Reason:%s" % extra)
super(VolumeMountConflict, self).__init__()
def __str__(self):
return "%s" % (self.message, )
class AnsibleDeployException(AnsibleError, ServiceException):
pass
|
#!/usr/bin/env python
import urllib2
import ping_me
import hashlib
import sys
import subprocess
from ping_me.utils import cryptex
country = ping_me.authenticate.extract_phone()[2]
filename = country[:2] + country[-2:]
del country
target = "http://www.himanshumishra.in/pingme/cron/" + filename + '.txt'
email = ping_me.authenticate.extract_email()
password = ping_me.authenticate.extract_password()
hashed_email = hashlib.md5(email).hexdigest()
del email
data = urllib2.urlopen(target)
found = False
for line in data:
line = line.split()
if line[0] == hashed_email:
found = True
message = cryptex.decryptor(password, line[1])
if sys.platform == 'linux2':
subprocess.call(['notify-send', message])
elif sys.platform == 'darwin':
subprocess.call(['terminal-notifier', '-title', 'ping-me', 'Ping!'])
elif sys.platform in ['win32', 'win64']:
# Do things for windows
pass
# If not found in the country's name, search in XXXX.txt
if not found:
target = 'http://www.himanshumishra.in/pingme/cron/XXXX.txt'
data = urllib2.urlopen(target)
for line in data:
line = line.split()
if line[0] == hashed_email:
found = True
message = cryptex.decryptor(password, line[1])
if sys.platform == 'linux2':
subprocess.call(['notify-send', message])
elif sys.platform == 'darwin':
subprocess.call(['terminal-notifier', '-title', message])
elif sys.platform in ['win32', 'win64']:
# Do things for windows
pass
del target
del data
del hashed_email
Modify GET.py
#!/usr/bin/env python
"""Receive request to show notification"""
import urllib2
import hashlib
import sys
import subprocess
import time
from ping_me.utils import cryptex
import ping_me
while(True):
# The try/except is for the case when the file might not exist
try:
country = ping_me.authenticate.extract_phone()[2]
filename = country[:2] + country[-2:] + '.txt'
target = "http://www.himanshumishra.in/pingme/cron/" + filename
email = ping_me.authenticate.extract_email()
hashed_email = hashlib.md5(email).hexdigest()
key = ping_me.authenticate.extract_password()
data = urllib2.urlopen(target)
for line in data:
line = line.split()
if line[0] == hashed_email:
# ping time!
message = cryptex.decryptor(key, line[1])
if sys.platform == 'linux2':
subprocess.call(['notify-send', message])
elif sys.platform == 'darwin':
subprocess.call(['terminal-notifier', '-title', 'ping-me', 'Ping!'])
elif sys.platform in ['win32', 'win64']:
# Do things for windows
pass
# If not found in the country's name, search in XXXX.txt
if not found:
target = 'http://www.himanshumishra.in/pingme/cron/XXXX.txt'
data = urllib.urlopen(target)
for line in data:
line = line.split()
if line[0] == hashed_email:
found = True
message = cryptex.decryptor(key, line[1])
if sys.platform == 'linux2':
subprocess.call(['notify-send', message])
elif sys.platform == 'darwin':
subprocess.call(['terminal-notifier', '-title', message])
elif sys.platform in ['win32', 'win64']:
# Do things for windows
pass
time.sleep(5)
except:
time.sleep(5)
|
from __future__ import print_function
import math
import numpy as np
from scipy.fftpack import fft
import matplotlib.pyplot as plt
from scipy.signal import lfilter
from scipy.fftpack.realtransforms import dct
eps = 0.00000001
def zero_crossing_rate(frame):
"""Computes zero crossing rate of frame"""
count = len(frame)
count_zero = np.sum(np.abs(np.diff(np.sign(frame)))) / 2
return np.float64(count_zero) / np.float64(count - 1.0)
def energy(frame):
"""Computes signal energy of frame"""
return np.sum(frame ** 2) / np.float64(len(frame))
def energy_entropy(frame, n_short_blocks=10):
"""Computes entropy of energy"""
# total frame energy
frame_energy = np.sum(frame ** 2)
frame_length = len(frame)
sub_win_len = int(np.floor(frame_length / n_short_blocks))
if frame_length != sub_win_len * n_short_blocks:
frame = frame[0:sub_win_len * n_short_blocks]
# sub_wins is of size [n_short_blocks x L]
sub_wins = frame.reshape(sub_win_len, n_short_blocks, order='F').copy()
# Compute normalized sub-frame energies:
s = np.sum(sub_wins ** 2, axis=0) / (frame_energy + eps)
# Compute entropy of the normalized sub-frame energies:
entropy = -np.sum(s * np.log2(s + eps))
return entropy
""" Frequency-domain audio features """
def spectral_centroid_spread(fft_magnitude, sampling_rate):
"""Computes spectral centroid of frame (given abs(FFT))"""
ind = (np.arange(1, len(fft_magnitude) + 1)) * \
(sampling_rate / (2.0 * len(fft_magnitude)))
Xt = fft_magnitude.copy()
Xt = Xt / Xt.max()
NUM = np.sum(ind * Xt)
DEN = np.sum(Xt) + eps
# Centroid:
centroid = (NUM / DEN)
# Spread:
spread = np.sqrt(np.sum(((ind - centroid) ** 2) * Xt) / DEN)
# Normalize:
centroid = centroid / (sampling_rate / 2.0)
spread = spread / (sampling_rate / 2.0)
return centroid, spread
def spectral_entropy(signal, n_short_blocks=10):
"""Computes the spectral entropy"""
# number of frame samples
num_frames = len(signal)
# total spectral energy
total_energy = np.sum(signal ** 2)
# length of sub-frame
sub_win_len = int(np.floor(num_frames / n_short_blocks))
if num_frames != sub_win_len * n_short_blocks:
signal = signal[0:sub_win_len * n_short_blocks]
# define sub-frames (using matrix reshape)
sub_wins = signal.reshape(sub_win_len, n_short_blocks, order='F').copy()
# compute spectral sub-energies
s = np.sum(sub_wins ** 2, axis=0) / (total_energy + eps)
# compute spectral entropy
entropy = -np.sum(s * np.log2(s + eps))
return entropy
def spectral_flux(fft_magnitude, previous_fft_magnitude):
"""
Computes the spectral flux feature of the current frame
ARGUMENTS:
fft_magnitude: the abs(fft) of the current frame
previous_fft_magnitude: the abs(fft) of the previous frame
"""
# compute the spectral flux as the sum of square distances:
fft_sum = np.sum(fft_magnitude + eps)
previous_fft_sum = np.sum(previous_fft_magnitude + eps)
sp_flux = np.sum(
(fft_magnitude / fft_sum - previous_fft_magnitude /
previous_fft_sum) ** 2)
return sp_flux
def spectral_rolloff(signal, c):
"""Computes spectral roll-off"""
energy = np.sum(signal ** 2)
fft_length = len(signal)
threshold = c * energy
# Ffind the spectral rolloff as the frequency position
# where the respective spectral energy is equal to c*totalEnergy
cumulative_sum = np.cumsum(signal ** 2) + eps
a = np.nonzero(cumulative_sum > threshold)[0]
if len(a) > 0:
sp_rolloff = np.float64(a[0]) / (float(fft_length))
else:
sp_rolloff = 0.0
return sp_rolloff
def harmonic(frame, sampling_rate):
"""
Computes harmonic ratio and pitch
"""
m = np.round(0.016 * sampling_rate) - 1
r = np.correlate(frame, frame, mode='full')
g = r[len(frame) - 1]
r = r[len(frame):-1]
# estimate m0 (as the first zero crossing of R)
[a, ] = np.nonzero(np.diff(np.sign(r)))
if len(a) == 0:
m0 = len(r) - 1
else:
m0 = a[0]
if m > len(r):
m = len(r) - 1
gamma = np.zeros((m), dtype=np.float64)
cumulative_sum = np.cumsum(frame ** 2)
gamma[m0:m] = r[m0:m] / (np.sqrt((g * cumulative_sum[m:m0:-1])) + eps)
zcr = zero_crossing_rate(gamma)
if zcr > 0.15:
hr = 0.0
f0 = 0.0
else:
if len(gamma) == 0:
hr = 1.0
blag = 0.0
gamma = np.zeros((m), dtype=np.float64)
else:
hr = np.max(gamma)
blag = np.argmax(gamma)
# Get fundamental frequency:
f0 = sampling_rate / (blag + eps)
if f0 > 5000:
f0 = 0.0
if hr < 0.1:
f0 = 0.0
return hr, f0
def mfcc_filter_banks(sampling_rate, num_fft, lowfreq=133.33, linc=200 / 3,
logsc=1.0711703, num_lin_filt=13, num_log_filt=27):
"""
Computes the triangular filterbank for MFCC computation
(used in the stFeatureExtraction function before the stMFCC function call)
This function is taken from the scikits.talkbox library (MIT Licence):
https://pypi.python.org/pypi/scikits.talkbox
"""
if sampling_rate < 8000:
nlogfil = 5
# Total number of filters
num_filt_total = num_lin_filt + num_log_filt
# Compute frequency points of the triangle:
frequencies = np.zeros(num_filt_total + 2)
frequencies[:num_lin_filt] = lowfreq + np.arange(num_lin_filt) * linc
frequencies[num_lin_filt:] = frequencies[num_lin_filt - 1] * logsc ** \
np.arange(1, num_log_filt + 3)
heights = 2. / (frequencies[2:] - frequencies[0:-2])
# Compute filterbank coeff (in fft domain, in bins)
fbank = np.zeros((num_filt_total, num_fft))
nfreqs = np.arange(num_fft) / (1. * num_fft) * sampling_rate
for i in range(num_filt_total):
low_freqs = frequencies[i]
cent_freqs = frequencies[i + 1]
high_freqs = frequencies[i + 2]
lid = np.arange(np.floor(low_freqs * num_fft / sampling_rate) + 1,
np.floor(cent_freqs * num_fft / sampling_rate) + 1,
dtype=np.int)
lslope = heights[i] / (cent_freqs - low_freqs)
rid = np.arange(np.floor(cent_freqs * num_fft / sampling_rate) + 1,
np.floor(high_freqs * num_fft / sampling_rate) + 1,
dtype=np.int)
rslope = heights[i] / (high_freqs - cent_freqs)
fbank[i][lid] = lslope * (nfreqs[lid] - low_freqs)
fbank[i][rid] = rslope * (high_freqs - nfreqs[rid])
return fbank, frequencies
def mfcc(fft_magnitude, fbank, num_mfcc_feats):
"""
Computes the MFCCs of a frame, given the fft mag
ARGUMENTS:
fft_magnitude: fft magnitude abs(FFT)
fbank: filter bank (see mfccInitFilterBanks)
RETURN
ceps: MFCCs (13 element vector)
Note: MFCC calculation is, in general, taken from the
scikits.talkbox library (MIT Licence),
# with a small number of modifications to make it more
compact and suitable for the pyAudioAnalysis Lib
"""
mspec = np.log10(np.dot(fft_magnitude, fbank.T) + eps)
ceps = dct(mspec, type=2, norm='ortho', axis=-1)[:num_mfcc_feats]
return ceps
def chroma_features_init(num_fft, sampling_rate):
"""
This function initializes the chroma matrices used in the calculation
of the chroma features
"""
freqs = np.array([((f + 1) * sampling_rate) /
(2 * num_fft) for f in range(num_fft)])
cp = 27.50
num_chroma = np.round(12.0 * np.log2(freqs / cp)).astype(int)
num_freqs_per_chroma = np.zeros((num_chroma.shape[0],))
unique_chroma = np.unique(num_chroma)
for u in unique_chroma:
idx = np.nonzero(num_chroma == u)
num_freqs_per_chroma[idx] = idx[0].shape
return num_chroma, num_freqs_per_chroma
def chroma_features(signal, sampling_rate, num_fft):
# TODO: 1 complexity
# TODO: 2 bug with large windows
num_chroma, num_freqs_per_chroma = \
chroma_features_init(num_fft, sampling_rate)
chroma_names = ['A', 'A#', 'B', 'C', 'C#', 'D',
'D#', 'E', 'F', 'F#', 'G', 'G#']
spec = signal ** 2
if num_chroma.max() < num_chroma.shape[0]:
C = np.zeros((num_chroma.shape[0],))
C[num_chroma] = spec
C /= num_freqs_per_chroma[num_chroma]
else:
I = np.nonzero(num_chroma > num_chroma.shape[0])[0][0]
C = np.zeros((num_chroma.shape[0],))
C[num_chroma[0:I - 1]] = spec
C /= num_freqs_per_chroma
final_matrix = np.zeros((12, 1))
newD = int(np.ceil(C.shape[0] / 12.0) * 12)
C2 = np.zeros((newD,))
C2[0:C.shape[0]] = C
C2 = C2.reshape(int(C2.shape[0] / 12), 12)
# for i in range(12):
# finalC[i] = np.sum(C[i:C.shape[0]:12])
final_matrix = np.matrix(np.sum(C2, axis=0)).T
final_matrix /= spec.sum()
# ax = plt.gca()
# plt.hold(False)
# plt.plot(finalC)
# ax.set_xticks(range(len(chromaNames)))
# ax.set_xticklabels(chromaNames)
# xaxis = np.arange(0, 0.02, 0.01);
# ax.set_yticks(range(len(xaxis)))
# ax.set_yticklabels(xaxis)
# plt.show(block=False)
# plt.draw()
return chroma_names, final_matrix
def chromagram(signal, sampling_rate, window, step, plot=False):
"""
Short-term FFT mag for spectogram estimation:
Returns:
a np array (num_fft x numOfShortTermWindows)
ARGUMENTS:
signal: the input signal samples
sampling_rate: the sampling freq (in Hz)
window: the short-term window size (in samples)
step: the short-term window step (in samples)
plot: flag, 1 if results are to be ploted
RETURNS:
"""
window = int(window)
step = int(step)
signal = np.double(signal)
signal = signal / (2.0 ** 15)
dc_offset = signal.mean()
maximum = (np.abs(signal)).max()
signal = (signal - dc_offset) / (maximum - dc_offset)
num_samples = len(signal) # total number of signals
cur_position = 0
count_fr = 0
num_fft = int(window / 2)
chromogram = np.array([], dtype=np.float64)
while cur_position + window - 1 < num_samples:
count_fr += 1
x = signal[cur_position:cur_position + window]
cur_position = cur_position + step
X = abs(fft(x))
X = X[0:num_fft]
X = X / len(X)
chroma_names, chroma_feature_matrix = chroma_features(X, sampling_rate,
num_fft)
chroma_feature_matrix = chroma_feature_matrix[:, 0]
if count_fr == 1:
chromogram = chroma_feature_matrix.T
else:
chromogram = np.vstack((chromogram, chroma_feature_matrix.T))
freq_axis = chroma_names
time_axis = [(t * step) / sampling_rate
for t in range(chromogram.shape[0])]
if plot:
fig, ax = plt.subplots()
chromogram_plot = chromogram.transpose()[::-1, :]
ratio = int(chromogram_plot.shape[1] / (3 * chromogram_plot.shape[0]))
if ratio < 1:
ratio = 1
chromogram_plot = np.repeat(chromogram_plot, ratio, axis=0)
imgplot = plt.imshow(chromogram_plot)
ax.set_yticks(range(int(ratio / 2), len(freq_axis) * ratio, ratio))
ax.set_yticklabels(freq_axis[::-1])
t_step = int(count_fr / 3)
time_ticks = range(0, count_fr, t_step)
time_ticks_labels = ['%.2f' % (float(t * step) / sampling_rate)
for t in time_ticks]
ax.set_xticks(time_ticks)
ax.set_xticklabels(time_ticks_labels)
ax.set_xlabel('time (secs)')
imgplot.set_cmap('jet')
plt.colorbar()
plt.show()
return chromogram, time_axis, freq_axis
def spectrogram(signal, sampling_rate, window, step, plot=False):
"""
Short-term FFT mag for spectogram estimation:
Returns:
a np array (num_fft x numOfShortTermWindows)
ARGUMENTS:
signal: the input signal samples
sampling_rate: the sampling freq (in Hz)
window: the short-term window size (in samples)
step: the short-term window step (in samples)
plot: flag, 1 if results are to be ploted
RETURNS:
"""
window = int(window)
step = int(step)
signal = np.double(signal)
signal = signal / (2.0 ** 15)
dc_offset = signal.mean()
maximum = (np.abs(signal)).max()
signal = (signal - dc_offset) / (maximum - dc_offset)
num_samples = len(signal) # total number of signals
cur_p = 0
count_fr = 0
num_fft = int(window / 2)
specgram = np.array([], dtype=np.float64)
while cur_p + window - 1 < num_samples:
count_fr += 1
x = signal[cur_p:cur_p + window]
cur_p = cur_p + step
X = abs(fft(x))
X = X[0:num_fft]
X = X / len(X)
if count_fr == 1:
specgram = X ** 2
else:
specgram = np.vstack((specgram, X))
freq_axis = [float((f + 1) * sampling_rate) / (2 * num_fft)
for f in range(specgram.shape[1])]
time_axis = [float(t * step) / sampling_rate
for t in range(specgram.shape[0])]
if plot:
fig, ax = plt.subplots()
imgplot = plt.imshow(specgram.transpose()[::-1, :])
fstep = int(num_fft / 5.0)
frequency_ticks = range(0, int(num_fft) + fstep, fstep)
frequency_tick_labels = \
[str(sampling_rate / 2 -
int((f * sampling_rate) / (2 * num_fft))) for f in frequency_ticks]
ax.set_yticks(frequency_ticks)
ax.set_yticklabels(frequency_tick_labels)
t_step = int(count_fr / 3)
time_ticks = range(0, count_fr, t_step)
time_ticks_labels = \
['%.2f' % (float(t * step) / sampling_rate) for t in time_ticks]
ax.set_xticks(time_ticks)
ax.set_xticklabels(time_ticks_labels)
ax.set_xlabel('time (secs)')
ax.set_ylabel('freq (Hz)')
imgplot.set_cmap('jet')
plt.colorbar()
plt.show()
return specgram, time_axis, freq_axis
# TODO
def speed_feature(signal, sampling_rate, window, step):
signal = np.double(signal)
signal = signal / (2.0 ** 15)
dc_offset = signal.mean()
maximum = (np.abs(signal)).max()
signal = (signal - dc_offset) / maximum
# print (np.abs(signal)).max()
num_samples = len(signal) # total number of signals
cur_p = 0
count_fr = 0
lowfreq = 133.33
linsc = 200 / 3.
logsc = 1.0711703
nlinfil = 13
nlogfil = 27
n_mfcc_feats = 13
nfil = nlinfil + nlogfil
num_fft = window / 2
if sampling_rate < 8000:
nlogfil = 5
nfil = nlinfil + nlogfil
num_fft = window / 2
# compute filter banks for mfcc:
fbank, freqs = mfcc_filter_banks(sampling_rate, num_fft, lowfreq, linsc,
logsc, nlinfil, nlogfil)
n_time_spectral_feats = 8
n_harmonic_feats = 1
n_total_feats = n_time_spectral_feats + n_mfcc_feats + n_harmonic_feats
# st_features = np.array([], dtype=np.float64)
st_features = []
while cur_p + window - 1 < num_samples:
count_fr += 1
x = signal[cur_p:cur_p + window]
cur_p = cur_p + step
fft_magnitude = abs(fft(x))
fft_magnitude = fft_magnitude[0:num_fft]
fft_magnitude = fft_magnitude / len(fft_magnitude)
Ex = 0.0
El = 0.0
fft_magnitude[0:4] = 0
# M = np.round(0.016 * fs) - 1
# R = np.correlate(frame, frame, mode='full')
st_features.append(harmonic(x, sampling_rate))
# for i in range(len(X)):
# if (i < (len(X) / 8)) and (i > (len(X)/40)):
# Ex += X[i]*X[i]
# El += X[i]*X[i]
# st_features.append(Ex / El)
# st_features.append(np.argmax(X))
# if curFV[n_time_spectral_feats+n_mfcc_feats+1]>0:
# print curFV[n_time_spectral_feats+n_mfcc_feats],
# curFV[n_time_
# spectral_feats+n_mfcc_feats+1]
return np.array(st_features)
def phormants(x, sampling_rate):
N = len(x)
w = np.hamming(N)
# Apply window and high pass filter.
x1 = x * w
x1 = lfilter([1], [1., 0.63], x1)
# Get LPC.
ncoeff = 2 + sampling_rate / 1000
A, e, k = lpc(x1, ncoeff)
# A, e, k = lpc(x1, 8)
# Get roots.
rts = np.roots(A)
rts = [r for r in rts if np.imag(r) >= 0]
# Get angles.
angz = np.arctan2(np.imag(rts), np.real(rts))
# Get frequencies.
frqs = sorted(angz * (sampling_rate / (2 * math.pi)))
return frqs
""" Windowing and feature extraction """
def feature_extraction(signal, sampling_rate, window, step):
"""
This function implements the shor-term windowing process.
For each short-term window a set of features is extracted.
This results to a sequence of feature vectors, stored in a np matrix.
ARGUMENTS
signal: the input signal samples
sampling_rate: the sampling freq (in Hz)
window: the short-term window size (in samples)
step: the short-term window step (in samples)
RETURNS
features (numpy.ndarray): contains features
(n_feats x numOfShortTermWindows)
feature_names (numpy.ndarray): contains feature names
(n_feats x numOfShortTermWindows)
"""
window = int(window)
step = int(step)
# signal normalization
signal = np.double(signal)
signal = signal / (2.0 ** 15)
dc_offset = signal.mean()
signal_max = (np.abs(signal)).max()
signal = (signal - dc_offset) / (signal_max + 0.0000000001)
number_of_samples = len(signal) # total number of samples
current_position = 0
count_fr = 0
num_fft = int(window / 2)
# compute the triangular filter banks used in the mfcc calculation
fbank, freqs = mfcc_filter_banks(sampling_rate, num_fft)
n_time_spectral_feats = 8
n_harmonic_feats = 0
n_mfcc_feats = 13
n_chroma_feats = 13
n_total_feats = n_time_spectral_feats + n_mfcc_feats + n_harmonic_feats + \
n_chroma_feats
# n_total_feats = n_time_spectral_feats + n_mfcc_feats +
# n_harmonic_feats
# define list of feature names
feature_names = ["zcr", "energy", "energy_entropy"]
feature_names += ["spectral_centroid", "spectral_spread"]
feature_names.append("spectral_entropy")
feature_names.append("spectral_flux")
feature_names.append("spectral_rolloff")
feature_names += ["mfcc_{0:d}".format(mfcc_i)
for mfcc_i in range(1, n_mfcc_feats + 1)]
feature_names += ["chroma_{0:d}".format(chroma_i)
for chroma_i in range(1, n_chroma_feats)]
feature_names.append("chroma_std")
# add names for delta featuresL
feature_names_2 = feature_names + ["delta " + f for f in feature_names]
feature_names = feature_names_2
features = []
# for each short-term window to end of signal
while current_position + window - 1 < number_of_samples:
count_fr += 1
# get current window
x = signal[current_position:current_position + window]
# update window position
current_position = current_position + step
# get fft magnitude
fft_magnitude = abs(fft(x))
# normalize fft
fft_magnitude = fft_magnitude[0:num_fft]
fft_magnitude = fft_magnitude / len(fft_magnitude)
# keep previous fft mag (used in spectral flux)
if count_fr == 1:
fft_magnitude_previous = fft_magnitude.copy()
feature_vector = np.zeros((n_total_feats, 1))
# zero crossing rate
feature_vector[0] = zero_crossing_rate(x)
# short-term energy
feature_vector[1] = energy(x)
# short-term entropy of energy
feature_vector[2] = energy_entropy(x)
# sp centroid/spread
[feature_vector[3], feature_vector[4]] = \
spectral_centroid_spread(fft_magnitude,
sampling_rate)
# spectral entropy
feature_vector[5] = \
spectral_entropy(fft_magnitude)
# spectral flux
feature_vector[6] = \
spectral_flux(fft_magnitude,
fft_magnitude_previous)
# spectral rolloff
feature_vector[7] = \
spectral_rolloff(fft_magnitude, 0.90)
# MFCCs
mffc_feats_end = n_time_spectral_feats + n_mfcc_feats
feature_vector[n_time_spectral_feats:mffc_feats_end, 0] = \
mfcc(fft_magnitude, fbank, n_mfcc_feats).copy()
# chroma features
chroma_names, chroma_feature_matrix = \
chroma_features(fft_magnitude, sampling_rate, num_fft)
chroma_features_end = n_time_spectral_feats + n_mfcc_feats + \
n_chroma_feats - 1
feature_vector[mffc_feats_end:chroma_features_end] = \
chroma_feature_matrix
feature_vector[chroma_features_end] = chroma_feature_matrix.std()
# features.append(feature_vector) # uncomment this for no deltas
# delta features
if count_fr > 1:
delta = feature_vector - feature_vector_prev
feature_vector_2 = np.concatenate((feature_vector, delta))
else:
feature_vector_2 = np.concatenate((feature_vector,
np.zeros(feature_vector.shape)))
feature_vector_prev = feature_vector
features.append(feature_vector_2)
# end of delta
fft_magnitude_previous = fft_magnitude.copy()
features = np.concatenate(features, 1)
return features, feature_names
make deltas a parameter in feature_extraction()
from __future__ import print_function
import math
import numpy as np
from scipy.fftpack import fft
import matplotlib.pyplot as plt
from scipy.signal import lfilter
from scipy.fftpack.realtransforms import dct
eps = 0.00000001
def zero_crossing_rate(frame):
"""Computes zero crossing rate of frame"""
count = len(frame)
count_zero = np.sum(np.abs(np.diff(np.sign(frame)))) / 2
return np.float64(count_zero) / np.float64(count - 1.0)
def energy(frame):
"""Computes signal energy of frame"""
return np.sum(frame ** 2) / np.float64(len(frame))
def energy_entropy(frame, n_short_blocks=10):
"""Computes entropy of energy"""
# total frame energy
frame_energy = np.sum(frame ** 2)
frame_length = len(frame)
sub_win_len = int(np.floor(frame_length / n_short_blocks))
if frame_length != sub_win_len * n_short_blocks:
frame = frame[0:sub_win_len * n_short_blocks]
# sub_wins is of size [n_short_blocks x L]
sub_wins = frame.reshape(sub_win_len, n_short_blocks, order='F').copy()
# Compute normalized sub-frame energies:
s = np.sum(sub_wins ** 2, axis=0) / (frame_energy + eps)
# Compute entropy of the normalized sub-frame energies:
entropy = -np.sum(s * np.log2(s + eps))
return entropy
""" Frequency-domain audio features """
def spectral_centroid_spread(fft_magnitude, sampling_rate):
"""Computes spectral centroid of frame (given abs(FFT))"""
ind = (np.arange(1, len(fft_magnitude) + 1)) * \
(sampling_rate / (2.0 * len(fft_magnitude)))
Xt = fft_magnitude.copy()
Xt = Xt / Xt.max()
NUM = np.sum(ind * Xt)
DEN = np.sum(Xt) + eps
# Centroid:
centroid = (NUM / DEN)
# Spread:
spread = np.sqrt(np.sum(((ind - centroid) ** 2) * Xt) / DEN)
# Normalize:
centroid = centroid / (sampling_rate / 2.0)
spread = spread / (sampling_rate / 2.0)
return centroid, spread
def spectral_entropy(signal, n_short_blocks=10):
"""Computes the spectral entropy"""
# number of frame samples
num_frames = len(signal)
# total spectral energy
total_energy = np.sum(signal ** 2)
# length of sub-frame
sub_win_len = int(np.floor(num_frames / n_short_blocks))
if num_frames != sub_win_len * n_short_blocks:
signal = signal[0:sub_win_len * n_short_blocks]
# define sub-frames (using matrix reshape)
sub_wins = signal.reshape(sub_win_len, n_short_blocks, order='F').copy()
# compute spectral sub-energies
s = np.sum(sub_wins ** 2, axis=0) / (total_energy + eps)
# compute spectral entropy
entropy = -np.sum(s * np.log2(s + eps))
return entropy
def spectral_flux(fft_magnitude, previous_fft_magnitude):
"""
Computes the spectral flux feature of the current frame
ARGUMENTS:
fft_magnitude: the abs(fft) of the current frame
previous_fft_magnitude: the abs(fft) of the previous frame
"""
# compute the spectral flux as the sum of square distances:
fft_sum = np.sum(fft_magnitude + eps)
previous_fft_sum = np.sum(previous_fft_magnitude + eps)
sp_flux = np.sum(
(fft_magnitude / fft_sum - previous_fft_magnitude /
previous_fft_sum) ** 2)
return sp_flux
def spectral_rolloff(signal, c):
"""Computes spectral roll-off"""
energy = np.sum(signal ** 2)
fft_length = len(signal)
threshold = c * energy
# Ffind the spectral rolloff as the frequency position
# where the respective spectral energy is equal to c*totalEnergy
cumulative_sum = np.cumsum(signal ** 2) + eps
a = np.nonzero(cumulative_sum > threshold)[0]
if len(a) > 0:
sp_rolloff = np.float64(a[0]) / (float(fft_length))
else:
sp_rolloff = 0.0
return sp_rolloff
def harmonic(frame, sampling_rate):
"""
Computes harmonic ratio and pitch
"""
m = np.round(0.016 * sampling_rate) - 1
r = np.correlate(frame, frame, mode='full')
g = r[len(frame) - 1]
r = r[len(frame):-1]
# estimate m0 (as the first zero crossing of R)
[a, ] = np.nonzero(np.diff(np.sign(r)))
if len(a) == 0:
m0 = len(r) - 1
else:
m0 = a[0]
if m > len(r):
m = len(r) - 1
gamma = np.zeros((m), dtype=np.float64)
cumulative_sum = np.cumsum(frame ** 2)
gamma[m0:m] = r[m0:m] / (np.sqrt((g * cumulative_sum[m:m0:-1])) + eps)
zcr = zero_crossing_rate(gamma)
if zcr > 0.15:
hr = 0.0
f0 = 0.0
else:
if len(gamma) == 0:
hr = 1.0
blag = 0.0
gamma = np.zeros((m), dtype=np.float64)
else:
hr = np.max(gamma)
blag = np.argmax(gamma)
# Get fundamental frequency:
f0 = sampling_rate / (blag + eps)
if f0 > 5000:
f0 = 0.0
if hr < 0.1:
f0 = 0.0
return hr, f0
def mfcc_filter_banks(sampling_rate, num_fft, lowfreq=133.33, linc=200 / 3,
logsc=1.0711703, num_lin_filt=13, num_log_filt=27):
"""
Computes the triangular filterbank for MFCC computation
(used in the stFeatureExtraction function before the stMFCC function call)
This function is taken from the scikits.talkbox library (MIT Licence):
https://pypi.python.org/pypi/scikits.talkbox
"""
if sampling_rate < 8000:
nlogfil = 5
# Total number of filters
num_filt_total = num_lin_filt + num_log_filt
# Compute frequency points of the triangle:
frequencies = np.zeros(num_filt_total + 2)
frequencies[:num_lin_filt] = lowfreq + np.arange(num_lin_filt) * linc
frequencies[num_lin_filt:] = frequencies[num_lin_filt - 1] * logsc ** \
np.arange(1, num_log_filt + 3)
heights = 2. / (frequencies[2:] - frequencies[0:-2])
# Compute filterbank coeff (in fft domain, in bins)
fbank = np.zeros((num_filt_total, num_fft))
nfreqs = np.arange(num_fft) / (1. * num_fft) * sampling_rate
for i in range(num_filt_total):
low_freqs = frequencies[i]
cent_freqs = frequencies[i + 1]
high_freqs = frequencies[i + 2]
lid = np.arange(np.floor(low_freqs * num_fft / sampling_rate) + 1,
np.floor(cent_freqs * num_fft / sampling_rate) + 1,
dtype=np.int)
lslope = heights[i] / (cent_freqs - low_freqs)
rid = np.arange(np.floor(cent_freqs * num_fft / sampling_rate) + 1,
np.floor(high_freqs * num_fft / sampling_rate) + 1,
dtype=np.int)
rslope = heights[i] / (high_freqs - cent_freqs)
fbank[i][lid] = lslope * (nfreqs[lid] - low_freqs)
fbank[i][rid] = rslope * (high_freqs - nfreqs[rid])
return fbank, frequencies
def mfcc(fft_magnitude, fbank, num_mfcc_feats):
"""
Computes the MFCCs of a frame, given the fft mag
ARGUMENTS:
fft_magnitude: fft magnitude abs(FFT)
fbank: filter bank (see mfccInitFilterBanks)
RETURN
ceps: MFCCs (13 element vector)
Note: MFCC calculation is, in general, taken from the
scikits.talkbox library (MIT Licence),
# with a small number of modifications to make it more
compact and suitable for the pyAudioAnalysis Lib
"""
mspec = np.log10(np.dot(fft_magnitude, fbank.T) + eps)
ceps = dct(mspec, type=2, norm='ortho', axis=-1)[:num_mfcc_feats]
return ceps
def chroma_features_init(num_fft, sampling_rate):
"""
This function initializes the chroma matrices used in the calculation
of the chroma features
"""
freqs = np.array([((f + 1) * sampling_rate) /
(2 * num_fft) for f in range(num_fft)])
cp = 27.50
num_chroma = np.round(12.0 * np.log2(freqs / cp)).astype(int)
num_freqs_per_chroma = np.zeros((num_chroma.shape[0],))
unique_chroma = np.unique(num_chroma)
for u in unique_chroma:
idx = np.nonzero(num_chroma == u)
num_freqs_per_chroma[idx] = idx[0].shape
return num_chroma, num_freqs_per_chroma
def chroma_features(signal, sampling_rate, num_fft):
# TODO: 1 complexity
# TODO: 2 bug with large windows
num_chroma, num_freqs_per_chroma = \
chroma_features_init(num_fft, sampling_rate)
chroma_names = ['A', 'A#', 'B', 'C', 'C#', 'D',
'D#', 'E', 'F', 'F#', 'G', 'G#']
spec = signal ** 2
if num_chroma.max() < num_chroma.shape[0]:
C = np.zeros((num_chroma.shape[0],))
C[num_chroma] = spec
C /= num_freqs_per_chroma[num_chroma]
else:
I = np.nonzero(num_chroma > num_chroma.shape[0])[0][0]
C = np.zeros((num_chroma.shape[0],))
C[num_chroma[0:I - 1]] = spec
C /= num_freqs_per_chroma
final_matrix = np.zeros((12, 1))
newD = int(np.ceil(C.shape[0] / 12.0) * 12)
C2 = np.zeros((newD,))
C2[0:C.shape[0]] = C
C2 = C2.reshape(int(C2.shape[0] / 12), 12)
# for i in range(12):
# finalC[i] = np.sum(C[i:C.shape[0]:12])
final_matrix = np.matrix(np.sum(C2, axis=0)).T
final_matrix /= spec.sum()
# ax = plt.gca()
# plt.hold(False)
# plt.plot(finalC)
# ax.set_xticks(range(len(chromaNames)))
# ax.set_xticklabels(chromaNames)
# xaxis = np.arange(0, 0.02, 0.01);
# ax.set_yticks(range(len(xaxis)))
# ax.set_yticklabels(xaxis)
# plt.show(block=False)
# plt.draw()
return chroma_names, final_matrix
def chromagram(signal, sampling_rate, window, step, plot=False):
"""
Short-term FFT mag for spectogram estimation:
Returns:
a np array (num_fft x numOfShortTermWindows)
ARGUMENTS:
signal: the input signal samples
sampling_rate: the sampling freq (in Hz)
window: the short-term window size (in samples)
step: the short-term window step (in samples)
plot: flag, 1 if results are to be ploted
RETURNS:
"""
window = int(window)
step = int(step)
signal = np.double(signal)
signal = signal / (2.0 ** 15)
dc_offset = signal.mean()
maximum = (np.abs(signal)).max()
signal = (signal - dc_offset) / (maximum - dc_offset)
num_samples = len(signal) # total number of signals
cur_position = 0
count_fr = 0
num_fft = int(window / 2)
chromogram = np.array([], dtype=np.float64)
while cur_position + window - 1 < num_samples:
count_fr += 1
x = signal[cur_position:cur_position + window]
cur_position = cur_position + step
X = abs(fft(x))
X = X[0:num_fft]
X = X / len(X)
chroma_names, chroma_feature_matrix = chroma_features(X, sampling_rate,
num_fft)
chroma_feature_matrix = chroma_feature_matrix[:, 0]
if count_fr == 1:
chromogram = chroma_feature_matrix.T
else:
chromogram = np.vstack((chromogram, chroma_feature_matrix.T))
freq_axis = chroma_names
time_axis = [(t * step) / sampling_rate
for t in range(chromogram.shape[0])]
if plot:
fig, ax = plt.subplots()
chromogram_plot = chromogram.transpose()[::-1, :]
ratio = int(chromogram_plot.shape[1] / (3 * chromogram_plot.shape[0]))
if ratio < 1:
ratio = 1
chromogram_plot = np.repeat(chromogram_plot, ratio, axis=0)
imgplot = plt.imshow(chromogram_plot)
ax.set_yticks(range(int(ratio / 2), len(freq_axis) * ratio, ratio))
ax.set_yticklabels(freq_axis[::-1])
t_step = int(count_fr / 3)
time_ticks = range(0, count_fr, t_step)
time_ticks_labels = ['%.2f' % (float(t * step) / sampling_rate)
for t in time_ticks]
ax.set_xticks(time_ticks)
ax.set_xticklabels(time_ticks_labels)
ax.set_xlabel('time (secs)')
imgplot.set_cmap('jet')
plt.colorbar()
plt.show()
return chromogram, time_axis, freq_axis
def spectrogram(signal, sampling_rate, window, step, plot=False):
"""
Short-term FFT mag for spectogram estimation:
Returns:
a np array (num_fft x numOfShortTermWindows)
ARGUMENTS:
signal: the input signal samples
sampling_rate: the sampling freq (in Hz)
window: the short-term window size (in samples)
step: the short-term window step (in samples)
plot: flag, 1 if results are to be ploted
RETURNS:
"""
window = int(window)
step = int(step)
signal = np.double(signal)
signal = signal / (2.0 ** 15)
dc_offset = signal.mean()
maximum = (np.abs(signal)).max()
signal = (signal - dc_offset) / (maximum - dc_offset)
num_samples = len(signal) # total number of signals
cur_p = 0
count_fr = 0
num_fft = int(window / 2)
specgram = np.array([], dtype=np.float64)
while cur_p + window - 1 < num_samples:
count_fr += 1
x = signal[cur_p:cur_p + window]
cur_p = cur_p + step
X = abs(fft(x))
X = X[0:num_fft]
X = X / len(X)
if count_fr == 1:
specgram = X ** 2
else:
specgram = np.vstack((specgram, X))
freq_axis = [float((f + 1) * sampling_rate) / (2 * num_fft)
for f in range(specgram.shape[1])]
time_axis = [float(t * step) / sampling_rate
for t in range(specgram.shape[0])]
if plot:
fig, ax = plt.subplots()
imgplot = plt.imshow(specgram.transpose()[::-1, :])
fstep = int(num_fft / 5.0)
frequency_ticks = range(0, int(num_fft) + fstep, fstep)
frequency_tick_labels = \
[str(sampling_rate / 2 -
int((f * sampling_rate) / (2 * num_fft))) for f in frequency_ticks]
ax.set_yticks(frequency_ticks)
ax.set_yticklabels(frequency_tick_labels)
t_step = int(count_fr / 3)
time_ticks = range(0, count_fr, t_step)
time_ticks_labels = \
['%.2f' % (float(t * step) / sampling_rate) for t in time_ticks]
ax.set_xticks(time_ticks)
ax.set_xticklabels(time_ticks_labels)
ax.set_xlabel('time (secs)')
ax.set_ylabel('freq (Hz)')
imgplot.set_cmap('jet')
plt.colorbar()
plt.show()
return specgram, time_axis, freq_axis
# TODO
def speed_feature(signal, sampling_rate, window, step):
signal = np.double(signal)
signal = signal / (2.0 ** 15)
dc_offset = signal.mean()
maximum = (np.abs(signal)).max()
signal = (signal - dc_offset) / maximum
# print (np.abs(signal)).max()
num_samples = len(signal) # total number of signals
cur_p = 0
count_fr = 0
lowfreq = 133.33
linsc = 200 / 3.
logsc = 1.0711703
nlinfil = 13
nlogfil = 27
n_mfcc_feats = 13
nfil = nlinfil + nlogfil
num_fft = window / 2
if sampling_rate < 8000:
nlogfil = 5
nfil = nlinfil + nlogfil
num_fft = window / 2
# compute filter banks for mfcc:
fbank, freqs = mfcc_filter_banks(sampling_rate, num_fft, lowfreq, linsc,
logsc, nlinfil, nlogfil)
n_time_spectral_feats = 8
n_harmonic_feats = 1
n_total_feats = n_time_spectral_feats + n_mfcc_feats + n_harmonic_feats
# st_features = np.array([], dtype=np.float64)
st_features = []
while cur_p + window - 1 < num_samples:
count_fr += 1
x = signal[cur_p:cur_p + window]
cur_p = cur_p + step
fft_magnitude = abs(fft(x))
fft_magnitude = fft_magnitude[0:num_fft]
fft_magnitude = fft_magnitude / len(fft_magnitude)
Ex = 0.0
El = 0.0
fft_magnitude[0:4] = 0
# M = np.round(0.016 * fs) - 1
# R = np.correlate(frame, frame, mode='full')
st_features.append(harmonic(x, sampling_rate))
# for i in range(len(X)):
# if (i < (len(X) / 8)) and (i > (len(X)/40)):
# Ex += X[i]*X[i]
# El += X[i]*X[i]
# st_features.append(Ex / El)
# st_features.append(np.argmax(X))
# if curFV[n_time_spectral_feats+n_mfcc_feats+1]>0:
# print curFV[n_time_spectral_feats+n_mfcc_feats],
# curFV[n_time_
# spectral_feats+n_mfcc_feats+1]
return np.array(st_features)
def phormants(x, sampling_rate):
N = len(x)
w = np.hamming(N)
# Apply window and high pass filter.
x1 = x * w
x1 = lfilter([1], [1., 0.63], x1)
# Get LPC.
ncoeff = 2 + sampling_rate / 1000
A, e, k = lpc(x1, ncoeff)
# A, e, k = lpc(x1, 8)
# Get roots.
rts = np.roots(A)
rts = [r for r in rts if np.imag(r) >= 0]
# Get angles.
angz = np.arctan2(np.imag(rts), np.real(rts))
# Get frequencies.
frqs = sorted(angz * (sampling_rate / (2 * math.pi)))
return frqs
""" Windowing and feature extraction """
def feature_extraction(signal, sampling_rate, window, step, deltas=True):
"""
This function implements the shor-term windowing process.
For each short-term window a set of features is extracted.
This results to a sequence of feature vectors, stored in a np matrix.
ARGUMENTS
signal: the input signal samples
sampling_rate: the sampling freq (in Hz)
window: the short-term window size (in samples)
step: the short-term window step (in samples)
deltas: (opt) True/False if delta features are to be
computed
RETURNS
features (numpy.ndarray): contains features
(n_feats x numOfShortTermWindows)
feature_names (numpy.ndarray): contains feature names
(n_feats x numOfShortTermWindows)
"""
window = int(window)
step = int(step)
# signal normalization
signal = np.double(signal)
signal = signal / (2.0 ** 15)
dc_offset = signal.mean()
signal_max = (np.abs(signal)).max()
signal = (signal - dc_offset) / (signal_max + 0.0000000001)
number_of_samples = len(signal) # total number of samples
current_position = 0
count_fr = 0
num_fft = int(window / 2)
# compute the triangular filter banks used in the mfcc calculation
fbank, freqs = mfcc_filter_banks(sampling_rate, num_fft)
n_time_spectral_feats = 8
n_harmonic_feats = 0
n_mfcc_feats = 13
n_chroma_feats = 13
n_total_feats = n_time_spectral_feats + n_mfcc_feats + n_harmonic_feats + \
n_chroma_feats
# n_total_feats = n_time_spectral_feats + n_mfcc_feats +
# n_harmonic_feats
# define list of feature names
feature_names = ["zcr", "energy", "energy_entropy"]
feature_names += ["spectral_centroid", "spectral_spread"]
feature_names.append("spectral_entropy")
feature_names.append("spectral_flux")
feature_names.append("spectral_rolloff")
feature_names += ["mfcc_{0:d}".format(mfcc_i)
for mfcc_i in range(1, n_mfcc_feats + 1)]
feature_names += ["chroma_{0:d}".format(chroma_i)
for chroma_i in range(1, n_chroma_feats)]
feature_names.append("chroma_std")
# add names for delta features:
if deltas:
feature_names_2 = feature_names + ["delta " + f for f in feature_names]
feature_names = feature_names_2
features = []
# for each short-term window to end of signal
while current_position + window - 1 < number_of_samples:
count_fr += 1
# get current window
x = signal[current_position:current_position + window]
# update window position
current_position = current_position + step
# get fft magnitude
fft_magnitude = abs(fft(x))
# normalize fft
fft_magnitude = fft_magnitude[0:num_fft]
fft_magnitude = fft_magnitude / len(fft_magnitude)
# keep previous fft mag (used in spectral flux)
if count_fr == 1:
fft_magnitude_previous = fft_magnitude.copy()
feature_vector = np.zeros((n_total_feats, 1))
# zero crossing rate
feature_vector[0] = zero_crossing_rate(x)
# short-term energy
feature_vector[1] = energy(x)
# short-term entropy of energy
feature_vector[2] = energy_entropy(x)
# sp centroid/spread
[feature_vector[3], feature_vector[4]] = \
spectral_centroid_spread(fft_magnitude,
sampling_rate)
# spectral entropy
feature_vector[5] = \
spectral_entropy(fft_magnitude)
# spectral flux
feature_vector[6] = \
spectral_flux(fft_magnitude,
fft_magnitude_previous)
# spectral rolloff
feature_vector[7] = \
spectral_rolloff(fft_magnitude, 0.90)
# MFCCs
mffc_feats_end = n_time_spectral_feats + n_mfcc_feats
feature_vector[n_time_spectral_feats:mffc_feats_end, 0] = \
mfcc(fft_magnitude, fbank, n_mfcc_feats).copy()
# chroma features
chroma_names, chroma_feature_matrix = \
chroma_features(fft_magnitude, sampling_rate, num_fft)
chroma_features_end = n_time_spectral_feats + n_mfcc_feats + \
n_chroma_feats - 1
feature_vector[mffc_feats_end:chroma_features_end] = \
chroma_feature_matrix
feature_vector[chroma_features_end] = chroma_feature_matrix.std()
if not deltas:
features.append(feature_vector)
else:
# delta features
if count_fr > 1:
delta = feature_vector - feature_vector_prev
feature_vector_2 = np.concatenate((feature_vector, delta))
else:
feature_vector_2 = np.concatenate((feature_vector,
np.zeros(feature_vector.
shape)))
feature_vector_prev = feature_vector
features.append(feature_vector_2)
fft_magnitude_previous = fft_magnitude.copy()
features = np.concatenate(features, 1)
return features, feature_names
|
import base64
from odoo import api, fields, models, exceptions, _
class ComunicazioneLiquidazioneExportFile(models.TransientModel):
_name = "comunicazione.liquidazione.export.file"
_description = "Export VAT statement communication XML file"
file_export = fields.Binary('File', readonly=True)
name = fields.Char('File Name', readonly=True, default='liquidazione.xml')
@api.multi
def export(self):
comunicazione_ids = self._context.get('active_ids')
if not comunicazione_ids:
raise exceptions.Warning(_(
"No communication selected"
))
if len(comunicazione_ids) > 1:
raise exceptions.Warning(_(
'You can export only 1 communication at a time'
))
for wizard in self:
for comunicazione in self.env['comunicazione.liquidazione'].\
browse(comunicazione_ids):
out = base64.encodestring(comunicazione.get_export_xml())
wizard.file_export = out
model_data_obj = self.env['ir.model.data']
view_rec = model_data_obj.get_object_reference(
'l10n_it_vat_statement_communication',
'wizard_liquidazione_export_file_exit'
)
view_id = view_rec and view_rec[1] or False
return {
'view_type': 'form',
'view_id': [view_id],
'view_mode': 'form',
'res_model': 'comunicazione.liquidazione.export.file',
'res_id': wizard.id,
'type': 'ir.actions.act_window',
'target': 'new',
}
IMP l10n_it_vat_statement_communication setting correct XML file name
import base64
from odoo import api, fields, models, exceptions, _
class ComunicazioneLiquidazioneExportFile(models.TransientModel):
_name = "comunicazione.liquidazione.export.file"
_description = "Export VAT statement communication XML file"
file_export = fields.Binary('File', readonly=True)
name = fields.Char('File Name', readonly=True, default='liquidazione.xml')
@api.multi
def export(self):
comunicazione_ids = self._context.get('active_ids')
if not comunicazione_ids:
raise exceptions.Warning(_(
"No communication selected"
))
if len(comunicazione_ids) > 1:
raise exceptions.Warning(_(
'You can export only 1 communication at a time'
))
for wizard in self:
for comunicazione in self.env['comunicazione.liquidazione'].\
browse(comunicazione_ids):
out = base64.encodestring(comunicazione.get_export_xml())
wizard.file_export = out
wizard.name = "%s_LI_%s.xml" % (
comunicazione.declarant_fiscalcode,
str(comunicazione.identificativo).rjust(5, '0'))
model_data_obj = self.env['ir.model.data']
view_rec = model_data_obj.get_object_reference(
'l10n_it_vat_statement_communication',
'wizard_liquidazione_export_file_exit'
)
view_id = view_rec and view_rec[1] or False
return {
'view_type': 'form',
'view_id': [view_id],
'view_mode': 'form',
'res_model': 'comunicazione.liquidazione.export.file',
'res_id': wizard.id,
'type': 'ir.actions.act_window',
'target': 'new',
}
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Danilo Bargen
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function, division, absolute_import, unicode_literals
import time
from collections import namedtuple
import RPIO
from flufl.enum import Enum
### PYTHON 3 COMPAT ###
try:
range = xrange
except NameError:
pass
### BIT PATTERNS ###
# Commands
LCD_CLEARDISPLAY = 0x01
LCD_RETURNHOME = 0x02
LCD_ENTRYMODESET = 0x04
LCD_DISPLAYCONTROL = 0x08
LCD_CURSORSHIFT = 0x10
LCD_FUNCTIONSET = 0x20
LCD_SETCGRAMADDR = 0x40
LCD_SETDDRAMADDR = 0x80
# Flags for display entry mode
LCD_ENTRYRIGHT = 0x00
LCD_ENTRYLEFT = 0x02
LCD_ENTRYSHIFTINCREMENT = 0x01
LCD_ENTRYSHIFTDECREMENT = 0x00
# Flags for display on/off control
LCD_DISPLAYON = 0x04
LCD_DISPLAYOFF = 0x00
LCD_CURSORON = 0x02
LCD_CURSOROFF = 0x00
LCD_BLINKON = 0x01
LCD_BLINKOFF = 0x00
# Flags for display/cursor shift
LCD_DISPLAYMOVE = 0x08
LCD_CURSORMOVE = 0x00
# Flags for display/cursor shift
LCD_DISPLAYMOVE = 0x08
LCD_CURSORMOVE = 0x00
LCD_MOVERIGHT = 0x04
LCD_MOVELEFT = 0x00
# Flags for function set
LCD_8BITMODE = 0x10
LCD_4BITMODE = 0x00
LCD_2LINE = 0x08
LCD_1LINE = 0x00
LCD_5x10DOTS = 0x04
LCD_5x8DOTS = 0x00
# Flags for RS pin modes
RS_INSTRUCTION = 0x00
RS_DATA = 0x01
### NAMEDTUPLES ###
PinConfig = namedtuple('PinConfig', 'rs rw e d0 d1 d2 d3 d4 d5 d6 d7 mode')
LCDConfig = namedtuple('LCDConfig', 'rows cols dotsize')
### ENUMS ###
class Alignment(Enum):
left = LCD_ENTRYLEFT
right = LCD_ENTRYRIGHT
class ShiftMode(Enum):
cursor = LCD_ENTRYSHIFTDECREMENT
display = LCD_ENTRYSHIFTINCREMENT
class CursorMode(Enum):
hide = LCD_CURSOROFF | LCD_BLINKOFF
line = LCD_CURSORON | LCD_BLINKOFF
blink = LCD_CURSOROFF | LCD_BLINKON
### HELPER FUNCTIONS ###
def msleep(milliseconds):
"""Sleep the specified amount of milliseconds."""
time.sleep(milliseconds / 1000.0)
def usleep(microseconds):
"""Sleep the specified amount of microseconds."""
time.sleep(microseconds / 1000000.0)
### MAIN ###
class CharLCD(object):
# Init, setup, teardown
def __init__(self, pin_rs=15, pin_rw=18, pin_e=16, pins_data=[21, 22, 23, 24],
numbering_mode=RPIO.BOARD):
"""
Character LCD controller.
The default pin numbers are based on the BOARD numbering scheme (1-26).
You can save 1 pin by not using RW. Set ``pin_rw`` to ``None`` if you
want this.
Args:
pin_rs:
Pin for register select (RS). Default: 15.
pin_rw:
Pin for selecting read or write mode (R/W). Set this to
``None`` for read only mode. Default: 18.
pin_e:
Pin to start data read or write (E). Default: 16.
pins_data:
List of data bus pins in 8 bit mode (DB0-DB7) or in 8 bit mode
(DB4-DB7) in ascending order. Default: [21, 22, 23, 24].
numbering_mode:
Which scheme to use for numbering the GPIO pins.
Default: RPIO.BOARD (1-26).
Returns:
A :class:`CharLCD` instance.
"""
# Set attributes
self.numbering_mode = numbering_mode
if len(pins_data) == 4: # 4 bit mode
self.data_bus_mode = LCD_4BITMODE
block1 = [None] * 4
elif len(pins_data) == 8: # 8 bit mode
self.data_bus_mode = LCD_8BITMODE
block1 = pins_data[:4]
else:
raise ValueError('There should be exactly 4 or 8 data pins.')
block2 = pins_data[-4:]
self.pins = PinConfig(rs=pin_rs, rw=pin_rw, e=pin_e,
d0=block1[0], d1=block1[1], d2=block1[2], d3=block1[3],
d4=block2[0], d5=block2[1], d6=block2[2], d7=block2[3],
mode=numbering_mode)
# Setup GPIO
RPIO.setmode(self.numbering_mode)
for pin in list(filter(None, self.pins))[:-1]:
RPIO.setup(pin, RPIO.OUT)
def setup(self, rows=4, cols=20, dotsize=8):
"""Initialize display with the specified configuration.
Args:
rows:
Number of display rows (usually 1, 2 or 4). Default: 4.
cols:
Number of columns per row (usually 16 or 20). Default 20.
dotsize:
Some 1 line displays allow a font height of 10px.
Allowed: 8 or 10. Default: 8.
"""
# Set attributes
self.lcd = LCDConfig(rows=rows, cols=cols, dotsize=dotsize)
displayfunction = self.data_bus_mode | LCD_1LINE | LCD_5x8DOTS
# LCD only uses two lines on 4 row displays
if rows == 4:
displayfunction |= LCD_2LINE
# For some 1 line displays you can select a 10px font.
assert dotsize in [8, 10], 'The ``dotsize`` argument should be either 8 or 10.'
if dotsize == 10:
displayfunction |= LCD_5x10DOTS
# Initialization
msleep(50)
RPIO.output(self.pins.rs, 0)
RPIO.output(self.pins.e, 0)
if self.pins.rw is not None:
RPIO.output(self.pins.rw, 0)
# Choose 4 or 8 bit mode
if self.data_bus_mode == LCD_4BITMODE:
# Hitachi manual page 46
self._write4bits(0x03)
msleep(4.5)
self._write4bits(0x03)
msleep(4.5)
self._write4bits(0x03)
usleep(100)
self._write4bits(0x02)
elif self.data_bus_mode == LCD_8BITMODE:
# Hitachi manual page 45
self._write8bits(0x30)
msleep(4.5)
self._write8bits(0x30)
usleep(100)
self._write8bits(0x30)
else:
raise ValueError('Invalid data bus mode: {}'.format(self.data_bus_mode))
# Write configuration to display
self.command(LCD_FUNCTIONSET | displayfunction)
usleep(50)
# Configure display mode
self._display_mode = LCD_DISPLAYON
self._cursor_mode = int(CursorMode.hide)
self.command(LCD_DISPLAYCONTROL | self._display_mode | self._cursor_mode)
usleep(50)
# Clear display
self.clear()
# Configure entry mode
self._text_align_mode = int(Alignment.left)
self._display_shift_mode = int(ShiftMode.cursor)
self._cursor_pos = (0, 0)
self.command(LCD_ENTRYMODESET | self._text_align_mode | self._display_shift_mode)
usleep(50)
def close(self, clear=False):
if clear:
self.clear()
RPIO.cleanup()
# Properties
def _get_cursor_pos(self):
return self._cursor_pos
def _set_cursor_pos(self, value):
if not hasattr(value, '__getitem__') or len(value) != 2:
raise ValueError('Cursor position should be determined by a 2-tuple.')
row_offsets = [0x00, 0x40, 0x14, 0x54] # TODO handle smaller displays
self.command(LCD_SETDDRAMADDR | row_offsets[value[0]] + value[1])
usleep(50)
cursor_pos = property(_get_cursor_pos, _set_cursor_pos,
doc='The cursor position as a 2-tuple (row, col).')
def _get_text_align_mode(self):
try:
return Alignment[self._text_align_mode]
except ValueError:
raise ValueError('Internal _text_align_mode has invalid value.')
def _set_text_align_mode(self, value):
if not value in Alignment:
raise ValueError('Cursor move mode must be of ``Alignment`` type.')
self._text_align_mode = int(value)
self.command(LCD_ENTRYMODESET | self._text_align_mode | self._display_shift_mode)
usleep(50)
text_align_mode = property(_get_text_align_mode, _set_text_align_mode,
doc='The text alignment (``Alignment.left`` or ``Alignment.right``).')
def _get_write_shift_mode(self):
try:
return ShiftMode[self._display_shift_mode]
except ValueError:
raise ValueError('Internal _display_shift_mode has invalid value.')
def _set_write_shift_mode(self, value):
if not value in ShiftMode:
raise ValueError('Write shift mode must be of ``ShiftMode`` type.')
self._display_shift_mode = int(value)
self.command(LCD_ENTRYMODESET | self._text_align_mode | self._display_shift_mode)
usleep(50)
write_shift_mode = property(_get_write_shift_mode, _set_write_shift_mode,
doc='The shift mode when writing (``ShiftMode.cursor`` or ``ShiftMode.display``).')
def _get_display_enabled(self):
return self._display_mode == LCD_DISPLAYON
def _set_display_enabled(self, value):
self._display_mode = LCD_DISPLAYON if value else LCD_DISPLAYOFF
self.command(LCD_DISPLAYCONTROL | self._display_mode | self._cursor_mode)
usleep(50)
display_enabled = property(_get_display_enabled, _set_display_enabled,
doc='Whether or not to display any characters.')
def _get_cursor_mode(self):
try:
return CursorMode[self._cursor_mode]
except ValueError:
raise ValueError('Internal _cursor_mode has invalid value.')
def _set_cursor_mode(self, value):
if not value in CursorMode:
raise ValueError('Cursor mode must be of ``CursorMode`` type.')
self._cursor_mode = int(value)
self.command(LCD_DISPLAYCONTROL | self._display_mode | self._cursor_mode)
usleep(50)
cursor_mode = property(_get_cursor_mode, _set_cursor_mode,
doc='How the cursor should behave (``CursorMode.hide``, ' + \
'``CursorMode.line`` or ``CursorMode.blink``).')
# High level commands
def write_string(self, value):
"""Write the specified string to the display."""
for char in value:
self.write(ord(char))
def clear(self):
"""Overwrite display with blank characters and reset cursor position."""
self.command(LCD_CLEARDISPLAY)
msleep(2)
def home(self):
"""Set cursor to initial position and reset any shifting."""
self.command(LCD_RETURNHOME)
msleep(2)
def shift_display(self, amount):
"""Shift the display. Use negative amounts to shift left and positive
amounts to shift right."""
if amount == 0:
return
direction = LCD_MOVERIGHT if amount > 0 else LCD_MOVELEFT
for i in range(abs(amount)):
self.command(LCD_CURSORSHIFT | LCD_DISPLAYMOVE | direction)
usleep(50)
# Mid level commands
def command(self, value):
"""Send a raw command to the LCD."""
self._send(value, RS_INSTRUCTION)
def write(self, value):
"""Write a raw byte to the LCD."""
self._send(value, RS_DATA)
# Low level commands
def _send(self, value, mode):
"""Send the specified value to the display with automatic 4bit / 8bit
selection. The rs_mode is either ``RS_DATA`` or ``RS_INSTRUCTION``."""
# Choose instruction or data mode
RPIO.setup(self.pins.rs, mode)
# If the RW pin is used, set it to low in order to write.
if self.pins.rw is not None:
RPIO.output(self.pins.rw, 0)
# Write data out in chunks of 4 or 8 bit
if self.data_bus_mode == LCD_8BITMODE:
self._write8bits(value)
else:
self._write4bits(value >> 4)
self._write4bits(value)
def _write4bits(self, value):
"""Write 4 bits of data into the data bus."""
for i in range(4):
bit = (value >> i) & 0x01
RPIO.output(self.pins[i + 7], bit)
self._pulse_enable()
def _write8bits(self, value):
"""Write 8 bits of data into the data bus."""
for i in range(8):
bit = (value >> i) & 0x01
RPIO.output(self.pins[i + 3], bit)
self._pulse_enable()
def _pulse_enable(self):
"""Pulse the `enable` flag to process data."""
RPIO.output(self.pins.e, 0)
usleep(1)
RPIO.output(self.pins.e, 1)
usleep(1)
RPIO.output(self.pins.e, 0)
usleep(100) # commands need > 37us to settle
Removed redunant backslash
# -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Danilo Bargen
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function, division, absolute_import, unicode_literals
import time
from collections import namedtuple
import RPIO
from flufl.enum import Enum
### PYTHON 3 COMPAT ###
try:
range = xrange
except NameError:
pass
### BIT PATTERNS ###
# Commands
LCD_CLEARDISPLAY = 0x01
LCD_RETURNHOME = 0x02
LCD_ENTRYMODESET = 0x04
LCD_DISPLAYCONTROL = 0x08
LCD_CURSORSHIFT = 0x10
LCD_FUNCTIONSET = 0x20
LCD_SETCGRAMADDR = 0x40
LCD_SETDDRAMADDR = 0x80
# Flags for display entry mode
LCD_ENTRYRIGHT = 0x00
LCD_ENTRYLEFT = 0x02
LCD_ENTRYSHIFTINCREMENT = 0x01
LCD_ENTRYSHIFTDECREMENT = 0x00
# Flags for display on/off control
LCD_DISPLAYON = 0x04
LCD_DISPLAYOFF = 0x00
LCD_CURSORON = 0x02
LCD_CURSOROFF = 0x00
LCD_BLINKON = 0x01
LCD_BLINKOFF = 0x00
# Flags for display/cursor shift
LCD_DISPLAYMOVE = 0x08
LCD_CURSORMOVE = 0x00
# Flags for display/cursor shift
LCD_DISPLAYMOVE = 0x08
LCD_CURSORMOVE = 0x00
LCD_MOVERIGHT = 0x04
LCD_MOVELEFT = 0x00
# Flags for function set
LCD_8BITMODE = 0x10
LCD_4BITMODE = 0x00
LCD_2LINE = 0x08
LCD_1LINE = 0x00
LCD_5x10DOTS = 0x04
LCD_5x8DOTS = 0x00
# Flags for RS pin modes
RS_INSTRUCTION = 0x00
RS_DATA = 0x01
### NAMEDTUPLES ###
PinConfig = namedtuple('PinConfig', 'rs rw e d0 d1 d2 d3 d4 d5 d6 d7 mode')
LCDConfig = namedtuple('LCDConfig', 'rows cols dotsize')
### ENUMS ###
class Alignment(Enum):
left = LCD_ENTRYLEFT
right = LCD_ENTRYRIGHT
class ShiftMode(Enum):
cursor = LCD_ENTRYSHIFTDECREMENT
display = LCD_ENTRYSHIFTINCREMENT
class CursorMode(Enum):
hide = LCD_CURSOROFF | LCD_BLINKOFF
line = LCD_CURSORON | LCD_BLINKOFF
blink = LCD_CURSOROFF | LCD_BLINKON
### HELPER FUNCTIONS ###
def msleep(milliseconds):
"""Sleep the specified amount of milliseconds."""
time.sleep(milliseconds / 1000.0)
def usleep(microseconds):
"""Sleep the specified amount of microseconds."""
time.sleep(microseconds / 1000000.0)
### MAIN ###
class CharLCD(object):
# Init, setup, teardown
def __init__(self, pin_rs=15, pin_rw=18, pin_e=16, pins_data=[21, 22, 23, 24],
numbering_mode=RPIO.BOARD):
"""
Character LCD controller.
The default pin numbers are based on the BOARD numbering scheme (1-26).
You can save 1 pin by not using RW. Set ``pin_rw`` to ``None`` if you
want this.
Args:
pin_rs:
Pin for register select (RS). Default: 15.
pin_rw:
Pin for selecting read or write mode (R/W). Set this to
``None`` for read only mode. Default: 18.
pin_e:
Pin to start data read or write (E). Default: 16.
pins_data:
List of data bus pins in 8 bit mode (DB0-DB7) or in 8 bit mode
(DB4-DB7) in ascending order. Default: [21, 22, 23, 24].
numbering_mode:
Which scheme to use for numbering the GPIO pins.
Default: RPIO.BOARD (1-26).
Returns:
A :class:`CharLCD` instance.
"""
# Set attributes
self.numbering_mode = numbering_mode
if len(pins_data) == 4: # 4 bit mode
self.data_bus_mode = LCD_4BITMODE
block1 = [None] * 4
elif len(pins_data) == 8: # 8 bit mode
self.data_bus_mode = LCD_8BITMODE
block1 = pins_data[:4]
else:
raise ValueError('There should be exactly 4 or 8 data pins.')
block2 = pins_data[-4:]
self.pins = PinConfig(rs=pin_rs, rw=pin_rw, e=pin_e,
d0=block1[0], d1=block1[1], d2=block1[2], d3=block1[3],
d4=block2[0], d5=block2[1], d6=block2[2], d7=block2[3],
mode=numbering_mode)
# Setup GPIO
RPIO.setmode(self.numbering_mode)
for pin in list(filter(None, self.pins))[:-1]:
RPIO.setup(pin, RPIO.OUT)
def setup(self, rows=4, cols=20, dotsize=8):
"""Initialize display with the specified configuration.
Args:
rows:
Number of display rows (usually 1, 2 or 4). Default: 4.
cols:
Number of columns per row (usually 16 or 20). Default 20.
dotsize:
Some 1 line displays allow a font height of 10px.
Allowed: 8 or 10. Default: 8.
"""
# Set attributes
self.lcd = LCDConfig(rows=rows, cols=cols, dotsize=dotsize)
displayfunction = self.data_bus_mode | LCD_1LINE | LCD_5x8DOTS
# LCD only uses two lines on 4 row displays
if rows == 4:
displayfunction |= LCD_2LINE
# For some 1 line displays you can select a 10px font.
assert dotsize in [8, 10], 'The ``dotsize`` argument should be either 8 or 10.'
if dotsize == 10:
displayfunction |= LCD_5x10DOTS
# Initialization
msleep(50)
RPIO.output(self.pins.rs, 0)
RPIO.output(self.pins.e, 0)
if self.pins.rw is not None:
RPIO.output(self.pins.rw, 0)
# Choose 4 or 8 bit mode
if self.data_bus_mode == LCD_4BITMODE:
# Hitachi manual page 46
self._write4bits(0x03)
msleep(4.5)
self._write4bits(0x03)
msleep(4.5)
self._write4bits(0x03)
usleep(100)
self._write4bits(0x02)
elif self.data_bus_mode == LCD_8BITMODE:
# Hitachi manual page 45
self._write8bits(0x30)
msleep(4.5)
self._write8bits(0x30)
usleep(100)
self._write8bits(0x30)
else:
raise ValueError('Invalid data bus mode: {}'.format(self.data_bus_mode))
# Write configuration to display
self.command(LCD_FUNCTIONSET | displayfunction)
usleep(50)
# Configure display mode
self._display_mode = LCD_DISPLAYON
self._cursor_mode = int(CursorMode.hide)
self.command(LCD_DISPLAYCONTROL | self._display_mode | self._cursor_mode)
usleep(50)
# Clear display
self.clear()
# Configure entry mode
self._text_align_mode = int(Alignment.left)
self._display_shift_mode = int(ShiftMode.cursor)
self._cursor_pos = (0, 0)
self.command(LCD_ENTRYMODESET | self._text_align_mode | self._display_shift_mode)
usleep(50)
def close(self, clear=False):
if clear:
self.clear()
RPIO.cleanup()
# Properties
def _get_cursor_pos(self):
return self._cursor_pos
def _set_cursor_pos(self, value):
if not hasattr(value, '__getitem__') or len(value) != 2:
raise ValueError('Cursor position should be determined by a 2-tuple.')
row_offsets = [0x00, 0x40, 0x14, 0x54] # TODO handle smaller displays
self.command(LCD_SETDDRAMADDR | row_offsets[value[0]] + value[1])
usleep(50)
cursor_pos = property(_get_cursor_pos, _set_cursor_pos,
doc='The cursor position as a 2-tuple (row, col).')
def _get_text_align_mode(self):
try:
return Alignment[self._text_align_mode]
except ValueError:
raise ValueError('Internal _text_align_mode has invalid value.')
def _set_text_align_mode(self, value):
if not value in Alignment:
raise ValueError('Cursor move mode must be of ``Alignment`` type.')
self._text_align_mode = int(value)
self.command(LCD_ENTRYMODESET | self._text_align_mode | self._display_shift_mode)
usleep(50)
text_align_mode = property(_get_text_align_mode, _set_text_align_mode,
doc='The text alignment (``Alignment.left`` or ``Alignment.right``).')
def _get_write_shift_mode(self):
try:
return ShiftMode[self._display_shift_mode]
except ValueError:
raise ValueError('Internal _display_shift_mode has invalid value.')
def _set_write_shift_mode(self, value):
if not value in ShiftMode:
raise ValueError('Write shift mode must be of ``ShiftMode`` type.')
self._display_shift_mode = int(value)
self.command(LCD_ENTRYMODESET | self._text_align_mode | self._display_shift_mode)
usleep(50)
write_shift_mode = property(_get_write_shift_mode, _set_write_shift_mode,
doc='The shift mode when writing (``ShiftMode.cursor`` or ``ShiftMode.display``).')
def _get_display_enabled(self):
return self._display_mode == LCD_DISPLAYON
def _set_display_enabled(self, value):
self._display_mode = LCD_DISPLAYON if value else LCD_DISPLAYOFF
self.command(LCD_DISPLAYCONTROL | self._display_mode | self._cursor_mode)
usleep(50)
display_enabled = property(_get_display_enabled, _set_display_enabled,
doc='Whether or not to display any characters.')
def _get_cursor_mode(self):
try:
return CursorMode[self._cursor_mode]
except ValueError:
raise ValueError('Internal _cursor_mode has invalid value.')
def _set_cursor_mode(self, value):
if not value in CursorMode:
raise ValueError('Cursor mode must be of ``CursorMode`` type.')
self._cursor_mode = int(value)
self.command(LCD_DISPLAYCONTROL | self._display_mode | self._cursor_mode)
usleep(50)
cursor_mode = property(_get_cursor_mode, _set_cursor_mode,
doc='How the cursor should behave (``CursorMode.hide``, ' +
'``CursorMode.line`` or ``CursorMode.blink``).')
# High level commands
def write_string(self, value):
"""Write the specified string to the display."""
for char in value:
self.write(ord(char))
def clear(self):
"""Overwrite display with blank characters and reset cursor position."""
self.command(LCD_CLEARDISPLAY)
msleep(2)
def home(self):
"""Set cursor to initial position and reset any shifting."""
self.command(LCD_RETURNHOME)
msleep(2)
def shift_display(self, amount):
"""Shift the display. Use negative amounts to shift left and positive
amounts to shift right."""
if amount == 0:
return
direction = LCD_MOVERIGHT if amount > 0 else LCD_MOVELEFT
for i in range(abs(amount)):
self.command(LCD_CURSORSHIFT | LCD_DISPLAYMOVE | direction)
usleep(50)
# Mid level commands
def command(self, value):
"""Send a raw command to the LCD."""
self._send(value, RS_INSTRUCTION)
def write(self, value):
"""Write a raw byte to the LCD."""
self._send(value, RS_DATA)
# Low level commands
def _send(self, value, mode):
"""Send the specified value to the display with automatic 4bit / 8bit
selection. The rs_mode is either ``RS_DATA`` or ``RS_INSTRUCTION``."""
# Choose instruction or data mode
RPIO.setup(self.pins.rs, mode)
# If the RW pin is used, set it to low in order to write.
if self.pins.rw is not None:
RPIO.output(self.pins.rw, 0)
# Write data out in chunks of 4 or 8 bit
if self.data_bus_mode == LCD_8BITMODE:
self._write8bits(value)
else:
self._write4bits(value >> 4)
self._write4bits(value)
def _write4bits(self, value):
"""Write 4 bits of data into the data bus."""
for i in range(4):
bit = (value >> i) & 0x01
RPIO.output(self.pins[i + 7], bit)
self._pulse_enable()
def _write8bits(self, value):
"""Write 8 bits of data into the data bus."""
for i in range(8):
bit = (value >> i) & 0x01
RPIO.output(self.pins[i + 3], bit)
self._pulse_enable()
def _pulse_enable(self):
"""Pulse the `enable` flag to process data."""
RPIO.output(self.pins.e, 0)
usleep(1)
RPIO.output(self.pins.e, 1)
usleep(1)
RPIO.output(self.pins.e, 0)
usleep(100) # commands need > 37us to settle
|
#! /usr/env/python
"""
This module attempts to "component-ify" GT's Fastscape stream power erosion.
Created DEJH, March 2014.
"""
import numpy
from landlab import ModelParameterDictionary
from landlab.core.model_parameter_dictionary import MissingKeyError, ParameterValueError
from landlab.field.scalar_data_fields import FieldError
from scipy import weave
from scipy.weave.build_tools import CompileError
UNDEFINED_INDEX = numpy.iinfo(numpy.int32).max
class SPEroder(object):
'''
This class uses the Braun-Willett Fastscape approach to calculate the amount
of erosion at each node in a grid, following a stream power framework.
On initialization, it takes *grid*, a reference to a ModelGrid, and
*input_stream*, a string giving the filename (and optionally, path) of the
required input file.
It needs to be supplied with the key variables:
*K_sp*
*m_sp*
...which it will draw from the supplied input file. *n_sp* has to be 1 for
the BW algorithm to work. If you want n!=1., try calling the explicit
"stream_power" component.
If you want to supply a spatial variation in K, set K_sp to the string
'array', and pass a field name or array to the erode method's K_if_used
argument.
*dt*, *rainfall_intensity*, and *value_field* are optional variables.
*dt* is a fixed timestep, and *rainfall_intensity* is a parameter which
modulates K_sp (by a product, r_i**m_sp) to reflect the direct influence of
rainfall intensity on erosivity. *value_field* is a string giving the name
of the field containing the elevation data in the grid. It defaults to
'topographic_elevation' if not supplied.
This module assumes you have already run
:func:`landlab.components.flow_routing.route_flow_dn.FlowRouter.route_flow`
in the same timestep. It looks for 'upstream_ID_order',
'links_to_flow_receiver', 'drainage_area', 'flow_receiver', and
'topographic_elevation' at the nodes in the grid. 'drainage_area' should
be in area upstream, not volume (i.e., set runoff_rate=1.0 when calling
FlowRouter.route_flow).
If dt is not supplied, you must call gear_timestep(dt_in, rain_intensity_in)
each iteration to set these variables on-the-fly (rainfall_intensity will be
overridden if supplied in the input file).
If dt is supplied but rainfall_intensity is not, the module will assume you
mean r_i = 1.
The primary method of this class is :func:`erode`.
'''
def __init__(self, grid, input_stream):
self.grid = grid
inputs = ModelParameterDictionary(input_stream)
#User sets:
try:
self.K = inputs.read_float('K_sp')
except ParameterValueError:
self.use_K = True
else:
self.use_K = False
self.m = inputs.read_float('m_sp')
try:
self.n = inputs.read_float('n_sp')
except:
self.n = 1.
try:
self.dt = inputs.read_float('dt')
except: #if dt isn't supplied, it must be set by another module, so look in the grid
print 'Set dynamic timestep from the grid. You must call gear_timestep() to set dt each iteration.'
else:
try:
self.r_i = inputs.read_float('rainfall_intensity')
except:
self.r_i = 1.
try:
self.value_field = inputs.read_str('value_field')
except:
self.value_field = 'topographic_elevation'
#make storage variables
self.A_to_the_m = grid.create_node_array_zeros()
self.alpha = grid.empty(centering='node')
if self.n != 1.:
raise ValueError('The Braun Willett stream power algorithm requires n==1. at the moment, sorry...')
def gear_timestep(self, dt_in, rainfall_intensity_in=None):
self.dt = dt_in
if rainfall_intensity_in is not None:
self.r_i = rainfall_intensity_in
return self.dt, self.r_i
def erode(self, grid_in, K_if_used=None):
"""
This method implements the stream power erosion, following the Braun-
Willett (2013) implicit Fastscape algorithm. This should allow it to
be stable against larger timesteps than an explicit stream power scheme.
The method takes *grid*, a reference to the model grid.
Set 'K_if_used' as a field name or nnodes-long array if you set K_sp as
'array' during initialization.
It returns the grid, in which it will have modified the value of
*value_field*, as specified in component initialization.
"""
#self.grid = grid_in #the variables must be stored internally to the grid, in fields
upstream_order_IDs = self.grid['node']['upstream_ID_order']
#ordered_receivers = self.grid['node']['flow_receiver'][upstream_order_IDs] #"j" in GT's sketch
#nonboundaries = numpy.not_equal(upstream_order_IDs, ordered_receivers)
z = self.grid['node'][self.value_field]
#interior_nodes = numpy.greater_equal(self.grid['node']['links_to_flow_receiver'], -1)
#interior_nodes = (self.grid['node']['links_to_flow_receiver'][upstream_order_IDs])[nonboundaries]
#flow_link_lengths = self.grid.link_length[interior_nodes]
##defined_flow_receivers = numpy.greater_equal(self.grid['node']['links_to_flow_receiver'],-1)
defined_flow_receivers = numpy.not_equal(self.grid['node']['links_to_flow_receiver'],UNDEFINED_INDEX)
#flow_link_lengths = numpy.zeros_like(self.alpha)
flow_link_lengths = self.grid.link_length[self.grid['node']['links_to_flow_receiver'][defined_flow_receivers]]
if K_if_used!=None:
assert self.use_K, "An array of erodabilities was provided, but you didn't set K_sp to 'array' in your input file! Aborting..."
try:
self.K = self.grid.at_node[K_if_used][defined_flow_receivers]
except TypeError:
self.K = K_if_used[defined_flow_receivers]
#regular_links = numpy.less(self.grid['node']['links_to_flow_receiver'][defined_flow_receivers],self.grid.number_of_links)
#flow_link_lengths[defined_flow_receivers][regular_links] = self.grid.link_length[(self.grid['node']['links_to_flow_receiver'])[defined_flow_receivers][regular_links]]
#diagonal_links = numpy.logical_not(regular_links)
#flow_link_lengths[defined_flow_receivers][diagonal_links] = numpy.sqrt(self.grid.node_spacing*self.grid.node_spacing)
numpy.power(self.grid['node']['drainage_area'], self.m, out=self.A_to_the_m)
#self.alpha[nonboundaries] = self.K * self.dt * self.A_to_the_m[nonboundaries] / flow_link_lengths
self.alpha[defined_flow_receivers] = self.r_i**self.m * self.K * self.dt * self.A_to_the_m[defined_flow_receivers] / flow_link_lengths
flow_receivers = self.grid['node']['flow_receiver']
n_nodes = upstream_order_IDs.size
alpha = self.alpha
code = """
int current_node;
int j;
for (int i = 0; i < n_nodes; i++) {
current_node = upstream_order_IDs[i];
j = flow_receivers[current_node];
if (current_node != j) {
z[current_node] = (z[current_node] + alpha[current_node]*z[j])/(1.0+alpha[current_node]);
}
}
"""
try:
#raise CompileError
weave.inline(code, ['n_nodes', 'upstream_order_IDs', 'flow_receivers', 'z', 'alpha'])
except CompileError:
for i in upstream_order_IDs:
j = flow_receivers[i]
if i != j:
z[i] = (z[i] + alpha[i]*z[j])/(1.0+alpha[i])
#self.grid['node'][self.value_field] = z
return self.grid
Bug in fastscape_sp
Charlie reported a bug where fastscape_stream_power complained if
neither timestep nor rainfall_intensity was specified; should now be
resolved.
Also implemented a quick and dirty fix that should resolve the
PC-specific problem with the scipy.weaves where an error is printed
every timestep.
#! /usr/env/python
"""
This module attempts to "component-ify" GT's Fastscape stream power erosion.
Created DEJH, March 2014.
"""
import numpy
from landlab import ModelParameterDictionary
from landlab.core.model_parameter_dictionary import MissingKeyError, ParameterValueError
from landlab.field.scalar_data_fields import FieldError
from scipy import weave
from scipy.weave.build_tools import CompileError
UNDEFINED_INDEX = numpy.iinfo(numpy.int32).max
class SPEroder(object):
'''
This class uses the Braun-Willett Fastscape approach to calculate the amount
of erosion at each node in a grid, following a stream power framework.
On initialization, it takes *grid*, a reference to a ModelGrid, and
*input_stream*, a string giving the filename (and optionally, path) of the
required input file.
It needs to be supplied with the key variables:
*K_sp*
*m_sp*
...which it will draw from the supplied input file. *n_sp* has to be 1 for
the BW algorithm to work. If you want n!=1., try calling the explicit
"stream_power" component.
If you want to supply a spatial variation in K, set K_sp to the string
'array', and pass a field name or array to the erode method's K_if_used
argument.
*dt*, *rainfall_intensity*, and *value_field* are optional variables.
*dt* is a fixed timestep, and *rainfall_intensity* is a parameter which
modulates K_sp (by a product, r_i**m_sp) to reflect the direct influence of
rainfall intensity on erosivity. *value_field* is a string giving the name
of the field containing the elevation data in the grid. It defaults to
'topographic_elevation' if not supplied.
This module assumes you have already run
:func:`landlab.components.flow_routing.route_flow_dn.FlowRouter.route_flow`
in the same timestep. It looks for 'upstream_ID_order',
'links_to_flow_receiver', 'drainage_area', 'flow_receiver', and
'topographic_elevation' at the nodes in the grid. 'drainage_area' should
be in area upstream, not volume (i.e., set runoff_rate=1.0 when calling
FlowRouter.route_flow).
If dt is not supplied, you must call gear_timestep(dt_in, rain_intensity_in)
each iteration to set these variables on-the-fly (rainfall_intensity will be
overridden if supplied in the input file).
If dt is supplied but rainfall_intensity is not, the module will assume you
mean r_i = 1.
The primary method of this class is :func:`erode`.
'''
def __init__(self, grid, input_stream):
self.grid = grid
inputs = ModelParameterDictionary(input_stream)
#User sets:
try:
self.K = inputs.read_float('K_sp')
except ParameterValueError:
self.use_K = True
else:
self.use_K = False
self.m = inputs.read_float('m_sp')
try:
self.n = inputs.read_float('n_sp')
except:
self.n = 1.
try:
self.dt = inputs.read_float('dt')
except: #if dt isn't supplied, it must be set by another module, so look in the grid
print 'Set dynamic timestep from the grid. You must call gear_timestep() to set dt each iteration.'
try:
self.r_i = inputs.read_float('rainfall_intensity')
except:
self.r_i = 1.
try:
self.value_field = inputs.read_str('value_field')
except:
self.value_field = 'topographic_elevation'
#make storage variables
self.A_to_the_m = grid.create_node_array_zeros()
self.alpha = grid.empty(centering='node')
if self.n != 1.:
raise ValueError('The Braun Willett stream power algorithm requires n==1. at the moment, sorry...')
#perform a test to see if a weave will work, necessary this way due to PC ineosyncracies...
try:
weave.inline('',[])
except CompileError:
self.weave_flag = False
else:
self.weave_flag = True
def gear_timestep(self, dt_in, rainfall_intensity_in=None):
self.dt = dt_in
if rainfall_intensity_in is not None:
self.r_i = rainfall_intensity_in
return self.dt, self.r_i
def erode(self, grid_in, K_if_used=None):
"""
This method implements the stream power erosion, following the Braun-
Willett (2013) implicit Fastscape algorithm. This should allow it to
be stable against larger timesteps than an explicit stream power scheme.
The method takes *grid*, a reference to the model grid.
Set 'K_if_used' as a field name or nnodes-long array if you set K_sp as
'array' during initialization.
It returns the grid, in which it will have modified the value of
*value_field*, as specified in component initialization.
"""
#self.grid = grid_in #the variables must be stored internally to the grid, in fields
upstream_order_IDs = self.grid['node']['upstream_ID_order']
#ordered_receivers = self.grid['node']['flow_receiver'][upstream_order_IDs] #"j" in GT's sketch
#nonboundaries = numpy.not_equal(upstream_order_IDs, ordered_receivers)
z = self.grid['node'][self.value_field]
#interior_nodes = numpy.greater_equal(self.grid['node']['links_to_flow_receiver'], -1)
#interior_nodes = (self.grid['node']['links_to_flow_receiver'][upstream_order_IDs])[nonboundaries]
#flow_link_lengths = self.grid.link_length[interior_nodes]
##defined_flow_receivers = numpy.greater_equal(self.grid['node']['links_to_flow_receiver'],-1)
defined_flow_receivers = numpy.not_equal(self.grid['node']['links_to_flow_receiver'],UNDEFINED_INDEX)
#flow_link_lengths = numpy.zeros_like(self.alpha)
flow_link_lengths = self.grid.link_length[self.grid['node']['links_to_flow_receiver'][defined_flow_receivers]]
if K_if_used!=None:
assert self.use_K, "An array of erodabilities was provided, but you didn't set K_sp to 'array' in your input file! Aborting..."
try:
self.K = self.grid.at_node[K_if_used][defined_flow_receivers]
except TypeError:
self.K = K_if_used[defined_flow_receivers]
#regular_links = numpy.less(self.grid['node']['links_to_flow_receiver'][defined_flow_receivers],self.grid.number_of_links)
#flow_link_lengths[defined_flow_receivers][regular_links] = self.grid.link_length[(self.grid['node']['links_to_flow_receiver'])[defined_flow_receivers][regular_links]]
#diagonal_links = numpy.logical_not(regular_links)
#flow_link_lengths[defined_flow_receivers][diagonal_links] = numpy.sqrt(self.grid.node_spacing*self.grid.node_spacing)
numpy.power(self.grid['node']['drainage_area'], self.m, out=self.A_to_the_m)
#self.alpha[nonboundaries] = self.K * self.dt * self.A_to_the_m[nonboundaries] / flow_link_lengths
self.alpha[defined_flow_receivers] = self.r_i**self.m * self.K * self.dt * self.A_to_the_m[defined_flow_receivers] / flow_link_lengths
flow_receivers = self.grid['node']['flow_receiver']
n_nodes = upstream_order_IDs.size
alpha = self.alpha
if self.weave_flag:
code = """
int current_node;
int j;
for (int i = 0; i < n_nodes; i++) {
current_node = upstream_order_IDs[i];
j = flow_receivers[current_node];
if (current_node != j) {
z[current_node] = (z[current_node] + alpha[current_node]*z[j])/(1.0+alpha[current_node]);
}
}
"""
weave.inline(code, ['n_nodes', 'upstream_order_IDs', 'flow_receivers', 'z', 'alpha'])
else:
for i in upstream_order_IDs:
j = flow_receivers[i]
if i != j:
z[i] = (z[i] + alpha[i]*z[j])/(1.0+alpha[i])
#self.grid['node'][self.value_field] = z
return self.grid
|
from morepath import Config
import reg
from reg import ClassIndex, KeyIndex
import morepath
def setup_module(module):
morepath.disable_implicit()
def test_dispatch():
config = Config()
class App(morepath.App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch('obj')
def f(obj):
return "fallback"
@App.function(f, obj=Foo)
def f_foo(obj):
return "foo"
@App.function(f, obj=Bar)
def f_bar(obj):
return "bar"
config.commit()
a = App()
lookup = a.lookup
assert f(Foo(), lookup=lookup) == 'foo'
assert f(Bar(), lookup=lookup) == 'bar'
assert f(Other(), lookup=lookup) == 'fallback'
def test_dispatch_external_predicates():
config = Config()
class App(morepath.App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj):
return "fallback"
@App.predicate(f, name='model', default=None, index=ClassIndex)
def f_obj(obj):
return obj.__class__
@App.function(f, model=Foo)
def f_foo(obj):
return "foo"
@App.function(f, model=Bar)
def f_bar(obj):
return "bar"
config.commit()
a = App()
lookup = a.lookup
assert f(Foo(), lookup=lookup) == 'foo'
assert f(Bar(), lookup=lookup) == 'bar'
assert f(Other(), lookup=lookup) == 'fallback'
def test_dispatch_external_predicates_predicate_fallback():
config = Config()
class App(morepath.App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj):
return "dispatch function"
@App.predicate(f, name='model', default=None, index=ClassIndex)
def f_obj(obj):
return obj.__class__
@App.predicate_fallback(f, f_obj)
def f_obj_fallback(obj):
return "f_obj_fallback"
@App.function(f, model=Foo)
def f_foo(obj):
return "foo"
@App.function(f, model=Bar)
def f_bar(obj):
return "bar"
config.commit()
a = App()
lookup = a.lookup
assert f(Foo(), lookup=lookup) == 'foo'
assert f(Bar(), lookup=lookup) == 'bar'
assert f(Other(), lookup=lookup) == 'f_obj_fallback'
def test_dispatch_external_predicates_ordering_after():
config = Config()
class App(morepath.App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj, name):
return "fallback"
@App.predicate(f, name='model', default=None, index=ClassIndex)
def pred_obj(obj):
return obj.__class__
@App.predicate(f, name='name', default='', index=KeyIndex, after=pred_obj)
def pred_name(name):
return name
@App.function(f, model=Foo, name='')
def f_foo_default(obj, name):
return "foo default"
@App.function(f, model=Foo, name='edit')
def f_foo_edit(obj, name):
return "foo edit"
@App.function(f, model=Bar, name='')
def f_bar_default(obj, name):
return "bar default"
@App.function(f, model=Bar, name='edit')
def f_bar_edit(obj, name):
return "bar edit"
config.commit()
a = App()
lookup = a.lookup
assert f(Foo(), '', lookup=lookup) == 'foo default'
assert f(Bar(), '', lookup=lookup) == 'bar default'
assert f(Foo(), 'edit', lookup=lookup) == 'foo edit'
assert f(Bar(), 'edit', lookup=lookup) == 'bar edit'
assert f(Other(), '', lookup=lookup) == 'fallback'
assert f(Other(), 'edit', lookup=lookup) == 'fallback'
def test_dispatch_external_predicates_ordering_before():
config = Config()
class App(morepath.App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj, name):
return "fallback"
@App.predicate(f, name='name', default='', index=KeyIndex)
def pred_name(name):
return name
@App.predicate(f, name='model', default=None, index=ClassIndex,
before=pred_name)
def pred_obj(obj):
return obj.__class__
@App.function(f, model=Foo, name='')
def f_foo_default(obj, name):
return "foo default"
@App.function(f, model=Foo, name='edit')
def f_foo_edit(obj, name):
return "foo edit"
@App.function(f, model=Bar, name='')
def f_bar_default(obj, name):
return "bar default"
@App.function(f, model=Bar, name='edit')
def f_bar_edit(obj, name):
return "bar edit"
config.commit()
a = App()
lookup = a.lookup
assert f(Foo(), '', lookup=lookup) == 'foo default'
assert f(Bar(), '', lookup=lookup) == 'bar default'
assert f(Foo(), 'edit', lookup=lookup) == 'foo edit'
assert f(Bar(), 'edit', lookup=lookup) == 'bar edit'
assert f(Other(), '', lookup=lookup) == 'fallback'
assert f(Other(), 'edit', lookup=lookup) == 'fallback'
Overriding predicates and fallbacks works just fine. Closes #245.
from morepath import Config
import reg
from reg import ClassIndex, KeyIndex
import morepath
def setup_module(module):
morepath.disable_implicit()
def test_dispatch():
config = Config()
class App(morepath.App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch('obj')
def f(obj):
return "fallback"
@App.function(f, obj=Foo)
def f_foo(obj):
return "foo"
@App.function(f, obj=Bar)
def f_bar(obj):
return "bar"
config.commit()
a = App()
lookup = a.lookup
assert f(Foo(), lookup=lookup) == 'foo'
assert f(Bar(), lookup=lookup) == 'bar'
assert f(Other(), lookup=lookup) == 'fallback'
def test_dispatch_external_predicates():
config = Config()
class App(morepath.App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj):
return "fallback"
@App.predicate(f, name='model', default=None, index=ClassIndex)
def f_obj(obj):
return obj.__class__
@App.function(f, model=Foo)
def f_foo(obj):
return "foo"
@App.function(f, model=Bar)
def f_bar(obj):
return "bar"
config.commit()
a = App()
lookup = a.lookup
assert f(Foo(), lookup=lookup) == 'foo'
assert f(Bar(), lookup=lookup) == 'bar'
assert f(Other(), lookup=lookup) == 'fallback'
def test_dispatch_external_predicates_predicate_fallback():
config = Config()
class App(morepath.App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj):
return "dispatch function"
@App.predicate(f, name='model', default=None, index=ClassIndex)
def f_obj(obj):
return obj.__class__
@App.predicate_fallback(f, f_obj)
def f_obj_fallback(obj):
return "f_obj_fallback"
@App.function(f, model=Foo)
def f_foo(obj):
return "foo"
@App.function(f, model=Bar)
def f_bar(obj):
return "bar"
config.commit()
a = App()
lookup = a.lookup
assert f(Foo(), lookup=lookup) == 'foo'
assert f(Bar(), lookup=lookup) == 'bar'
assert f(Other(), lookup=lookup) == 'f_obj_fallback'
def test_dispatch_external_predicates_ordering_after():
config = Config()
class App(morepath.App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj, name):
return "fallback"
@App.predicate(f, name='model', default=None, index=ClassIndex)
def pred_obj(obj):
return obj.__class__
@App.predicate(f, name='name', default='', index=KeyIndex, after=pred_obj)
def pred_name(name):
return name
@App.function(f, model=Foo, name='')
def f_foo_default(obj, name):
return "foo default"
@App.function(f, model=Foo, name='edit')
def f_foo_edit(obj, name):
return "foo edit"
@App.function(f, model=Bar, name='')
def f_bar_default(obj, name):
return "bar default"
@App.function(f, model=Bar, name='edit')
def f_bar_edit(obj, name):
return "bar edit"
config.commit()
a = App()
lookup = a.lookup
assert f(Foo(), '', lookup=lookup) == 'foo default'
assert f(Bar(), '', lookup=lookup) == 'bar default'
assert f(Foo(), 'edit', lookup=lookup) == 'foo edit'
assert f(Bar(), 'edit', lookup=lookup) == 'bar edit'
assert f(Other(), '', lookup=lookup) == 'fallback'
assert f(Other(), 'edit', lookup=lookup) == 'fallback'
def test_dispatch_external_predicates_ordering_before():
config = Config()
class App(morepath.App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj, name):
return "fallback"
@App.predicate(f, name='name', default='', index=KeyIndex)
def pred_name(name):
return name
@App.predicate(f, name='model', default=None, index=ClassIndex,
before=pred_name)
def pred_obj(obj):
return obj.__class__
@App.function(f, model=Foo, name='')
def f_foo_default(obj, name):
return "foo default"
@App.function(f, model=Foo, name='edit')
def f_foo_edit(obj, name):
return "foo edit"
@App.function(f, model=Bar, name='')
def f_bar_default(obj, name):
return "bar default"
@App.function(f, model=Bar, name='edit')
def f_bar_edit(obj, name):
return "bar edit"
config.commit()
a = App()
lookup = a.lookup
assert f(Foo(), '', lookup=lookup) == 'foo default'
assert f(Bar(), '', lookup=lookup) == 'bar default'
assert f(Foo(), 'edit', lookup=lookup) == 'foo edit'
assert f(Bar(), 'edit', lookup=lookup) == 'bar edit'
assert f(Other(), '', lookup=lookup) == 'fallback'
assert f(Other(), 'edit', lookup=lookup) == 'fallback'
def test_dispatch_external_override_fallback():
config = Config()
class App(morepath.App):
testing_config = config
class Sub(App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj):
return "dispatch function"
@App.predicate(f, name='model', default=None, index=ClassIndex)
def f_obj(obj):
return obj.__class__
@App.predicate_fallback(f, f_obj)
def f_obj_fallback(obj):
return "f_obj_fallback"
@Sub.predicate_fallback(f, f_obj)
def f_obj_fallback_sub(obj):
return "f_obj_fallback sub"
@App.function(f, model=Foo)
def f_foo(obj):
return "foo"
@Sub.function(f, model=Foo)
def f_foo_sub(obj):
return "foo sub"
@App.function(f, model=Bar)
def f_bar(obj):
return "bar"
config.commit()
s = Sub()
lookup = s.lookup
assert f(Foo(), lookup=lookup) == 'foo sub'
assert f(Bar(), lookup=lookup) == 'bar'
assert f(Other(), lookup=lookup) == 'f_obj_fallback sub'
# original is unaffected
a = App()
lookup = a.lookup
assert f(Foo(), lookup=lookup) == 'foo'
assert f(Bar(), lookup=lookup) == 'bar'
assert f(Other(), lookup=lookup) == 'f_obj_fallback'
def test_dispatch_external_override_predicate():
config = Config()
class App(morepath.App):
testing_config = config
class Sub(App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj):
return "dispatch function"
@App.predicate(f, name='model', default=None, index=ClassIndex)
def f_obj(obj):
return obj.__class__
@Sub.predicate(f, name='model', default=None, index=ClassIndex)
def f_obj_sub(obj):
return Bar # ridiculous, but lets us test this
@App.predicate_fallback(f, f_obj)
def f_obj_fallback(obj):
return "f_obj_fallback"
@App.function(f, model=Foo)
def f_foo(obj):
return "foo"
@Sub.function(f, model=Foo)
def f_foo_sub(obj):
return "foo"
@App.function(f, model=Bar)
def f_bar(obj):
return "bar"
@Sub.function(f, model=Bar)
def f_bar_sub(obj):
return "bar sub"
config.commit()
s = Sub()
lookup = s.lookup
assert f(Foo(), lookup=lookup) == 'bar sub'
assert f(Bar(), lookup=lookup) == 'bar sub'
assert f(Other(), lookup=lookup) == 'bar sub'
a = App()
lookup = a.lookup
assert f(Foo(), lookup=lookup) == 'foo'
assert f(Bar(), lookup=lookup) == 'bar'
assert f(Other(), lookup=lookup) == 'f_obj_fallback'
|
# flake8: noqa
"""
Tests copied from cpython test suite:
https://github.com/python/cpython/blob/3.9/Lib/test/test_range.py
"""
# stdlib
import itertools
import pickle
import sys
import unittest
try:
# stdlib
from test.support import ALWAYS_EQ
except ImportError:
class _ALWAYS_EQ:
"""
https://github.com/python/cpython/blob/3.9/Lib/test/support/__init__.py
Object that is equal to anything.
"""
def __eq__(self, other):
return True
def __ne__(self, other):
return False
ALWAYS_EQ = _ALWAYS_EQ()
# pure Python implementations (3 args only), for comparison
def pyrange(start, stop, step):
if (start - stop) // step < 0:
# replace stop with next element in the sequence of integers
# that are congruent to start modulo step.
stop += (start - stop) % step
while start != stop:
yield start
start += step
def pyrange_reversed(start, stop, step):
stop += (start - stop) % step
return pyrange(stop - step, start - step, -step)
class RangeTest(unittest.TestCase):
def assert_iterators_equal(self, xs, ys, test_id, limit=None):
# check that an iterator xs matches the expected results ys,
# up to a given limit.
if limit is not None:
xs = itertools.islice(xs, limit)
ys = itertools.islice(ys, limit)
sentinel = object()
pairs = itertools.zip_longest(xs, ys, fillvalue=sentinel)
for i, (x, y) in enumerate(pairs):
if x == y:
continue
elif x == sentinel:
self.fail(
"{}: iterator ended unexpectedly "
"at position {}; expected {}".format(test_id, i, y)
)
elif y == sentinel:
self.fail(f"{test_id}: unexpected excess element {x} at position {i}")
else:
self.fail(
f"{test_id}: wrong element at position {i}; expected {y}, got {x}"
)
def test_range(self):
self.assertEqual(list(range(3)), [0, 1, 2])
self.assertEqual(list(range(1, 5)), [1, 2, 3, 4])
self.assertEqual(list(range(0)), [])
self.assertEqual(list(range(-3)), [])
self.assertEqual(list(range(1, 10, 3)), [1, 4, 7])
self.assertEqual(list(range(5, -5, -3)), [5, 2, -1, -4])
a = 10
b = 100
c = 50
self.assertEqual(list(range(a, a + 2)), [a, a + 1])
self.assertEqual(list(range(a + 2, a, -1)), [a + 2, a + 1])
self.assertEqual(list(range(a + 4, a, -2)), [a + 4, a + 2])
seq = list(range(a, b, c))
self.assertIn(a, seq)
self.assertNotIn(b, seq)
self.assertEqual(len(seq), 2)
seq = list(range(b, a, -c))
self.assertIn(b, seq)
self.assertNotIn(a, seq)
self.assertEqual(len(seq), 2)
seq = list(range(-a, -b, -c))
self.assertIn(-a, seq)
self.assertNotIn(-b, seq)
self.assertEqual(len(seq), 2)
self.assertRaises(TypeError, range)
self.assertRaises(TypeError, range, 1, 2, 3, 4)
self.assertRaises(ValueError, range, 1, 2, 0)
self.assertRaises(TypeError, range, 0.0, 2, 1)
self.assertRaises(TypeError, range, 1, 2.0, 1)
self.assertRaises(TypeError, range, 1, 2, 1.0)
self.assertRaises(TypeError, range, 1e100, 1e101, 1e101)
self.assertRaises(TypeError, range, 0, "spam")
self.assertRaises(TypeError, range, 0, 42, "spam")
self.assertEqual(len(range(0, sys.maxsize, sys.maxsize - 1)), 2)
r = range(-sys.maxsize, sys.maxsize, 2)
self.assertEqual(len(r), sys.maxsize)
def test_range_constructor_error_messages(self):
with self.assertRaisesRegex(
(TypeError, AssertionError),
".*range expected.*",
):
range()
with self.assertRaisesRegex(
TypeError, "range expected at most 3 arguments, got 6"
):
range(1, 2, 3, 4, 5, 6)
def test_large_operands(self):
x = range(10 ** 20, 10 ** 20 + 10, 3)
self.assertEqual(len(x), 4)
self.assertEqual(len(list(x)), 4)
x = range(10 ** 20 + 10, 10 ** 20, 3)
self.assertEqual(len(x), 0)
self.assertEqual(len(list(x)), 0)
self.assertFalse(x)
x = range(10 ** 20, 10 ** 20 + 10, -3)
self.assertEqual(len(x), 0)
self.assertEqual(len(list(x)), 0)
self.assertFalse(x)
x = range(10 ** 20 + 10, 10 ** 20, -3)
self.assertEqual(len(x), 4)
self.assertEqual(len(list(x)), 4)
self.assertTrue(x)
# Now test range() with longs
for x in [range(-(2 ** 100)), range(0, -(2 ** 100)), range(0, 2 ** 100, -1)]:
self.assertEqual(list(x), [])
self.assertFalse(x)
a = int(10 * sys.maxsize)
b = int(100 * sys.maxsize)
c = int(50 * sys.maxsize)
self.assertEqual(list(range(a, a + 2)), [a, a + 1])
self.assertEqual(list(range(a + 2, a, -1)), [a + 2, a + 1])
self.assertEqual(list(range(a + 4, a, -2)), [a + 4, a + 2])
seq = list(range(a, b, c))
self.assertIn(a, seq)
self.assertNotIn(b, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(seq[0], a)
self.assertEqual(seq[-1], a + c)
seq = list(range(b, a, -c))
self.assertIn(b, seq)
self.assertNotIn(a, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(seq[0], b)
self.assertEqual(seq[-1], b - c)
seq = list(range(-a, -b, -c))
self.assertIn(-a, seq)
self.assertNotIn(-b, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(seq[0], -a)
self.assertEqual(seq[-1], -a - c)
def test_large_range(self):
# Check long ranges (len > sys.maxsize)
# len() is expected to fail due to limitations of the __len__ protocol
def _range_len(x):
try:
length = len(x)
except OverflowError:
step = x[1] - x[0]
length = 1 + ((x[-1] - x[0]) // step)
return length
a = -sys.maxsize
b = sys.maxsize
expected_len = b - a
x = range(a, b)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertTrue(x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize + 1
self.assertEqual(x[idx], a + idx)
self.assertEqual(x[idx : idx + 1][0], a + idx)
with self.assertRaises(IndexError):
x[-expected_len - 1]
with self.assertRaises(IndexError):
x[expected_len]
a = 0
b = 2 * sys.maxsize
expected_len = b - a
x = range(a, b)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertTrue(x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize + 1
self.assertEqual(x[idx], a + idx)
self.assertEqual(x[idx : idx + 1][0], a + idx)
with self.assertRaises(IndexError):
x[-expected_len - 1]
with self.assertRaises(IndexError):
x[expected_len]
a = 0
b = sys.maxsize ** 10
c = 2 * sys.maxsize
expected_len = 1 + (b - a) // c
x = range(a, b, c)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertTrue(x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize + 1
self.assertEqual(x[idx], a + (idx * c))
self.assertEqual(x[idx : idx + 1][0], a + (idx * c))
with self.assertRaises(IndexError):
x[-expected_len - 1]
with self.assertRaises(IndexError):
x[expected_len]
a = sys.maxsize ** 10
b = 0
c = -2 * sys.maxsize
expected_len = 1 + (b - a) // c
x = range(a, b, c)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertTrue(x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize + 1
self.assertEqual(x[idx], a + (idx * c))
self.assertEqual(x[idx : idx + 1][0], a + (idx * c))
with self.assertRaises(IndexError):
x[-expected_len - 1]
with self.assertRaises(IndexError):
x[expected_len]
def test_invalid_invocation(self):
self.assertRaises(TypeError, range)
self.assertRaises(TypeError, range, 1, 2, 3, 4)
self.assertRaises(ValueError, range, 1, 2, 0)
a = int(10 * sys.maxsize)
self.assertRaises(ValueError, range, a, a + 1, int(0))
self.assertRaises(TypeError, range, 1.0, 1.0, 1.0)
self.assertRaises(TypeError, range, 1e100, 1e101, 1e101)
self.assertRaises(TypeError, range, 0, "spam")
self.assertRaises(TypeError, range, 0, 42, "spam")
# Exercise various combinations of bad arguments, to check
# refcounting logic
self.assertRaises(TypeError, range, 0.0)
self.assertRaises(TypeError, range, 0, 0.0)
self.assertRaises(TypeError, range, 0.0, 0)
self.assertRaises(TypeError, range, 0.0, 0.0)
self.assertRaises(TypeError, range, 0, 0, 1.0)
self.assertRaises(TypeError, range, 0, 0.0, 1)
self.assertRaises(TypeError, range, 0, 0.0, 1.0)
self.assertRaises(TypeError, range, 0.0, 0, 1)
self.assertRaises(TypeError, range, 0.0, 0, 1.0)
self.assertRaises(TypeError, range, 0.0, 0.0, 1)
self.assertRaises(TypeError, range, 0.0, 0.0, 1.0)
def test_index(self):
u = range(2)
self.assertEqual(u.index(0), 0)
self.assertEqual(u.index(1), 1)
self.assertRaises(ValueError, u.index, 2)
u = range(-2, 3)
self.assertEqual(u.count(0), 1)
self.assertEqual(u.index(0), 2)
self.assertRaises(TypeError, u.index)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = range(4)
self.assertRaises(BadExc, a.index, BadCmp())
a = range(-2, 3)
self.assertEqual(a.index(0), 2)
self.assertEqual(range(1, 10, 3).index(4), 1)
self.assertEqual(range(1, -10, -3).index(-5), 2)
self.assertEqual(range(10 ** 20).index(1), 1)
self.assertEqual(range(10 ** 20).index(10 ** 20 - 1), 10 ** 20 - 1)
self.assertRaises(ValueError, range(1, 2 ** 100, 2).index, 2 ** 87)
self.assertEqual(range(1, 2 ** 100, 2).index(2 ** 87 + 1), 2 ** 86)
self.assertEqual(range(10).index(ALWAYS_EQ), 0)
def test_user_index_method(self):
bignum = 2 * sys.maxsize
smallnum = 42
# User-defined class with an __index__ method
class I:
def __init__(self, n):
self.n = int(n)
def __index__(self):
return self.n
self.assertEqual(list(range(I(bignum), I(bignum + 1))), [bignum])
self.assertEqual(list(range(I(smallnum), I(smallnum + 1))), [smallnum])
# User-defined class with a failing __index__ method
class IX:
def __index__(self):
raise RuntimeError
self.assertRaises(RuntimeError, range, IX())
# User-defined class with an invalid __index__ method
class IN:
def __index__(self):
return "not a number"
self.assertRaises(TypeError, range, IN())
# Test use of user-defined classes in slice indices.
self.assertEqual(range(10)[: I(5)], range(5))
with self.assertRaises(RuntimeError):
range(0, 10)[: IX()]
with self.assertRaises(TypeError):
range(0, 10)[: IN()]
def test_count(self):
self.assertEqual(range(3).count(-1), 0)
self.assertEqual(range(3).count(0), 1)
self.assertEqual(range(3).count(1), 1)
self.assertEqual(range(3).count(2), 1)
self.assertEqual(range(3).count(3), 0)
self.assertIs(type(range(3).count(-1)), int)
self.assertIs(type(range(3).count(1)), int)
self.assertEqual(range(10 ** 20).count(1), 1)
self.assertEqual(range(10 ** 20).count(10 ** 20), 0)
self.assertEqual(range(3).index(1), 1)
self.assertEqual(range(1, 2 ** 100, 2).count(2 ** 87), 0)
self.assertEqual(range(1, 2 ** 100, 2).count(2 ** 87 + 1), 1)
self.assertEqual(range(10).count(ALWAYS_EQ), 10)
self.assertEqual(len(range(sys.maxsize, sys.maxsize + 10)), 10)
def test_repr(self):
self.assertEqual(repr(range(1)), "range(0, 1)")
self.assertEqual(repr(range(1, 2)), "range(1, 2)")
self.assertEqual(repr(range(1, 2, 3)), "range(1, 2, 3)")
def test_pickling(self):
testcases = [
(13,),
(0, 11),
(-22, 10),
(20, 3, -1),
(13, 21, 3),
(-2, 2, 2),
(2 ** 65, 2 ** 65 + 2),
]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for t in testcases:
with self.subTest(proto=proto, test=t):
r = range(*t)
self.assertEqual(
list(pickle.loads(pickle.dumps(r, proto))), list(r)
)
def test_iterator_pickling(self):
testcases = [
(13,),
(0, 11),
(-22, 10),
(20, 3, -1),
(13, 21, 3),
(-2, 2, 2),
(2 ** 65, 2 ** 65 + 2),
]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for t in testcases:
it = itorg = iter(range(*t))
data = list(range(*t))
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(list(it), data)
it = pickle.loads(d)
try:
next(it)
except StopIteration:
continue
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(list(it), data[1:])
def test_exhausted_iterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
r = range(2 ** 65, 2 ** 65 + 2)
i = iter(r)
while True:
r = next(i)
if r == 2 ** 65 + 1:
break
d = pickle.dumps(i, proto)
i2 = pickle.loads(d)
self.assertEqual(list(i), [])
self.assertEqual(list(i2), [])
def test_large_exhausted_iterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
r = range(20)
i = iter(r)
while True:
r = next(i)
if r == 19:
break
d = pickle.dumps(i, proto)
i2 = pickle.loads(d)
self.assertEqual(list(i), [])
self.assertEqual(list(i2), [])
def test_odd_bug(self):
# This used to raise a "SystemError: NULL result without error"
# because the range validation step was eating the exception
# before NULL was returned.
with self.assertRaises(TypeError):
range([], 1, -1)
def test_types(self):
# Non-integer objects *equal* to any of the range's items are supposed
# to be contained in the range.
self.assertIn(1.0, range(3))
self.assertIn(True, range(3))
self.assertIn(1 + 0j, range(3))
self.assertIn(ALWAYS_EQ, range(3))
# Objects are never coerced into other types for comparison.
class C2:
def __int__(self):
return 1
def __index__(self):
return 1
self.assertNotIn(C2(), range(3))
# ..except if explicitly told so.
self.assertIn(int(C2()), range(3))
# Check that the range.__contains__ optimization is only
# used for ints, not for instances of subclasses of int.
class C3(int):
def __eq__(self, other):
return True
self.assertIn(C3(11), range(10))
self.assertIn(C3(11), list(range(10)))
def test_strided_limits(self):
r = range(0, 101, 2)
self.assertIn(0, r)
self.assertNotIn(1, r)
self.assertIn(2, r)
self.assertNotIn(99, r)
self.assertIn(100, r)
self.assertNotIn(101, r)
r = range(0, -20, -1)
self.assertIn(0, r)
self.assertIn(-1, r)
self.assertIn(-19, r)
self.assertNotIn(-20, r)
r = range(0, -20, -2)
self.assertIn(-18, r)
self.assertNotIn(-19, r)
self.assertNotIn(-20, r)
def test_empty(self):
r = range(0)
self.assertNotIn(0, r)
self.assertNotIn(1, r)
r = range(0, -10)
self.assertNotIn(0, r)
self.assertNotIn(-1, r)
self.assertNotIn(1, r)
def test_range_iterators(self):
# exercise 'fast' iterators, that use a rangeiterobject internally.
# see issue 7298
limits = [
base + jiggle
for M in (2 ** 32, 2 ** 64)
for base in (-M, -M // 2, 0, M // 2, M)
for jiggle in (-2, -1, 0, 1, 2)
]
test_ranges = [
(start, end, step)
for start in limits
for end in limits
for step in (-(2 ** 63), -(2 ** 31), -2, -1, 1, 2)
]
for start, end, step in test_ranges:
iter1 = range(start, end, step)
iter2 = pyrange(start, end, step)
test_id = f"range({start}, {end}, {step})"
# check first 100 entries
self.assert_iterators_equal(iter1, iter2, test_id, limit=100)
iter1 = reversed(range(start, end, step))
iter2 = pyrange_reversed(start, end, step)
test_id = f"reversed(range({start}, {end}, {step}))"
self.assert_iterators_equal(iter1, iter2, test_id, limit=100)
def test_range_iterators_invocation(self):
# verify range iterators instances cannot be created by
# calling their type
rangeiter_type = type(iter(range(0)))
if sys.version_info >= (3, 7):
self.assertRaises(TypeError, rangeiter_type, 1, 3, 1)
long_rangeiter_type = type(iter(range(1 << 1000)))
self.assertRaises(TypeError, long_rangeiter_type, 1, 3, 1)
def test_slice(self):
def check(start, stop, step=None):
i = slice(start, stop, step)
self.assertEqual(list(r[i]), list(r)[i])
self.assertEqual(len(r[i]), len(list(r)[i]))
for r in [
range(10),
range(0),
range(1, 9, 3),
range(8, 0, -3),
range(sys.maxsize + 1, sys.maxsize + 10),
]:
check(0, 2)
check(0, 20)
check(1, 2)
check(20, 30)
check(-30, -20)
check(-1, 100, 2)
check(0, -1)
check(-1, -3, -1)
def test_contains(self):
r = range(10)
self.assertIn(0, r)
self.assertIn(1, r)
self.assertIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
r = range(9, -1, -1)
self.assertIn(0, r)
self.assertIn(1, r)
self.assertIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
r = range(0, 10, 2)
self.assertIn(0, r)
self.assertNotIn(1, r)
self.assertNotIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
r = range(9, -1, -2)
self.assertNotIn(0, r)
self.assertIn(1, r)
self.assertIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
def test_reverse_iteration(self):
for r in [
range(10),
range(0),
range(1, 9, 3),
range(8, 0, -3),
range(sys.maxsize + 1, sys.maxsize + 10),
]:
self.assertEqual(list(reversed(r)), list(r)[::-1])
def test_issue11845(self):
r = range(*slice(1, 18, 2).indices(20))
values = {
None,
0,
1,
-1,
2,
-2,
5,
-5,
19,
-19,
20,
-20,
21,
-21,
30,
-30,
99,
-99,
}
for i in values:
for j in values:
for k in values - {0}:
r[i:j:k]
def test_comparison(self):
test_ranges = [
range(0),
range(0, -1),
range(1, 1, 3),
range(1),
range(5, 6),
range(5, 6, 2),
range(5, 7, 2),
range(2),
range(0, 4, 2),
range(0, 5, 2),
range(0, 6, 2),
]
test_tuples = list(map(tuple, test_ranges))
# Check that equality of ranges matches equality of the corresponding
# tuples for each pair from the test lists above.
ranges_eq = [a == b for a in test_ranges for b in test_ranges]
tuples_eq = [a == b for a in test_tuples for b in test_tuples]
self.assertEqual(ranges_eq, tuples_eq)
# Check that != correctly gives the logical negation of ==
ranges_ne = [a != b for a in test_ranges for b in test_ranges]
self.assertEqual(ranges_ne, [not x for x in ranges_eq])
# Equal ranges should have equal hashes.
for a in test_ranges:
for b in test_ranges:
if a == b:
self.assertEqual(hash(a), hash(b))
# Ranges are unequal to other types (even sequence types)
self.assertIs(range(0) == (), False)
self.assertIs(() == range(0), False)
self.assertIs(range(2) == [0, 1], False)
# Huge integers aren't a problem.
self.assertEqual(range(0, 2 ** 100 - 1, 2), range(0, 2 ** 100, 2))
self.assertEqual(hash(range(0, 2 ** 100 - 1, 2)), hash(range(0, 2 ** 100, 2)))
self.assertNotEqual(range(0, 2 ** 100, 2), range(0, 2 ** 100 + 1, 2))
self.assertEqual(
range(2 ** 200, 2 ** 201 - 2 ** 99, 2 ** 100),
range(2 ** 200, 2 ** 201, 2 ** 100),
)
self.assertEqual(
hash(range(2 ** 200, 2 ** 201 - 2 ** 99, 2 ** 100)),
hash(range(2 ** 200, 2 ** 201, 2 ** 100)),
)
self.assertNotEqual(
range(2 ** 200, 2 ** 201, 2 ** 100), range(2 ** 200, 2 ** 201 + 1, 2 ** 100)
)
# Order comparisons are not implemented for ranges.
with self.assertRaises(TypeError):
range(0) < range(0)
with self.assertRaises(TypeError):
range(0) > range(0)
with self.assertRaises(TypeError):
range(0) <= range(0)
with self.assertRaises(TypeError):
range(0) >= range(0)
def test_attributes(self):
# test the start, stop and step attributes of range objects
self.assert_attrs(range(0), 0, 0, 1)
self.assert_attrs(range(10), 0, 10, 1)
self.assert_attrs(range(-10), 0, -10, 1)
self.assert_attrs(range(0, 10, 1), 0, 10, 1)
self.assert_attrs(range(0, 10, 3), 0, 10, 3)
self.assert_attrs(range(10, 0, -1), 10, 0, -1)
self.assert_attrs(range(10, 0, -3), 10, 0, -3)
self.assert_attrs(range(True), 0, 1, 1)
self.assert_attrs(range(False, True), 0, 1, 1)
self.assert_attrs(range(False, True, True), 0, 1, 1)
def assert_attrs(self, rangeobj, start, stop, step):
self.assertEqual(rangeobj.start, start)
self.assertEqual(rangeobj.stop, stop)
self.assertEqual(rangeobj.step, step)
if sys.version_info >= (3, 9):
self.assertIs(type(rangeobj.start), int)
self.assertIs(type(rangeobj.stop), int)
self.assertIs(type(rangeobj.step), int)
with self.assertRaises(AttributeError):
rangeobj.start = 0
with self.assertRaises(AttributeError):
rangeobj.stop = 10
with self.assertRaises(AttributeError):
rangeobj.step = 1
with self.assertRaises(AttributeError):
del rangeobj.start
with self.assertRaises(AttributeError):
del rangeobj.stop
with self.assertRaises(AttributeError):
del rangeobj.step
if __name__ == "__main__":
unittest.main()
Debugging failing range test
# flake8: noqa
"""
Tests copied from cpython test suite:
https://github.com/python/cpython/blob/3.9/Lib/test/test_range.py
"""
# stdlib
import itertools
import pickle
import sys
import unittest
try:
# stdlib
print("Try importing ALWAYS_EQ")
# stdlib
from test.support import ALWAYS_EQ
except Exception as e:
print("Are we getting here?", e)
class _ALWAYS_EQ:
"""
https://github.com/python/cpython/blob/3.9/Lib/test/support/__init__.py
Object that is equal to anything.
"""
def __eq__(self, other):
return True
def __ne__(self, other):
return False
ALWAYS_EQ = _ALWAYS_EQ()
print("do we have ALWAYS_EQ", ALWAYS_EQ, type(ALWAYS_EQ))
# pure Python implementations (3 args only), for comparison
def pyrange(start, stop, step):
if (start - stop) // step < 0:
# replace stop with next element in the sequence of integers
# that are congruent to start modulo step.
stop += (start - stop) % step
while start != stop:
yield start
start += step
def pyrange_reversed(start, stop, step):
stop += (start - stop) % step
return pyrange(stop - step, start - step, -step)
class RangeTest(unittest.TestCase):
def assert_iterators_equal(self, xs, ys, test_id, limit=None):
# check that an iterator xs matches the expected results ys,
# up to a given limit.
if limit is not None:
xs = itertools.islice(xs, limit)
ys = itertools.islice(ys, limit)
sentinel = object()
pairs = itertools.zip_longest(xs, ys, fillvalue=sentinel)
for i, (x, y) in enumerate(pairs):
if x == y:
continue
elif x == sentinel:
self.fail(
"{}: iterator ended unexpectedly "
"at position {}; expected {}".format(test_id, i, y)
)
elif y == sentinel:
self.fail(f"{test_id}: unexpected excess element {x} at position {i}")
else:
self.fail(
f"{test_id}: wrong element at position {i}; expected {y}, got {x}"
)
def test_range(self):
self.assertEqual(list(range(3)), [0, 1, 2])
self.assertEqual(list(range(1, 5)), [1, 2, 3, 4])
self.assertEqual(list(range(0)), [])
self.assertEqual(list(range(-3)), [])
self.assertEqual(list(range(1, 10, 3)), [1, 4, 7])
self.assertEqual(list(range(5, -5, -3)), [5, 2, -1, -4])
a = 10
b = 100
c = 50
self.assertEqual(list(range(a, a + 2)), [a, a + 1])
self.assertEqual(list(range(a + 2, a, -1)), [a + 2, a + 1])
self.assertEqual(list(range(a + 4, a, -2)), [a + 4, a + 2])
seq = list(range(a, b, c))
self.assertIn(a, seq)
self.assertNotIn(b, seq)
self.assertEqual(len(seq), 2)
seq = list(range(b, a, -c))
self.assertIn(b, seq)
self.assertNotIn(a, seq)
self.assertEqual(len(seq), 2)
seq = list(range(-a, -b, -c))
self.assertIn(-a, seq)
self.assertNotIn(-b, seq)
self.assertEqual(len(seq), 2)
self.assertRaises(TypeError, range)
self.assertRaises(TypeError, range, 1, 2, 3, 4)
self.assertRaises(ValueError, range, 1, 2, 0)
self.assertRaises(TypeError, range, 0.0, 2, 1)
self.assertRaises(TypeError, range, 1, 2.0, 1)
self.assertRaises(TypeError, range, 1, 2, 1.0)
self.assertRaises(TypeError, range, 1e100, 1e101, 1e101)
self.assertRaises(TypeError, range, 0, "spam")
self.assertRaises(TypeError, range, 0, 42, "spam")
self.assertEqual(len(range(0, sys.maxsize, sys.maxsize - 1)), 2)
r = range(-sys.maxsize, sys.maxsize, 2)
self.assertEqual(len(r), sys.maxsize)
def test_range_constructor_error_messages(self):
with self.assertRaisesRegex(
(TypeError, AssertionError),
".*range expected.*",
):
range()
with self.assertRaisesRegex(
TypeError, "range expected at most 3 arguments, got 6"
):
range(1, 2, 3, 4, 5, 6)
def test_large_operands(self):
x = range(10 ** 20, 10 ** 20 + 10, 3)
self.assertEqual(len(x), 4)
self.assertEqual(len(list(x)), 4)
x = range(10 ** 20 + 10, 10 ** 20, 3)
self.assertEqual(len(x), 0)
self.assertEqual(len(list(x)), 0)
self.assertFalse(x)
x = range(10 ** 20, 10 ** 20 + 10, -3)
self.assertEqual(len(x), 0)
self.assertEqual(len(list(x)), 0)
self.assertFalse(x)
x = range(10 ** 20 + 10, 10 ** 20, -3)
self.assertEqual(len(x), 4)
self.assertEqual(len(list(x)), 4)
self.assertTrue(x)
# Now test range() with longs
for x in [range(-(2 ** 100)), range(0, -(2 ** 100)), range(0, 2 ** 100, -1)]:
self.assertEqual(list(x), [])
self.assertFalse(x)
a = int(10 * sys.maxsize)
b = int(100 * sys.maxsize)
c = int(50 * sys.maxsize)
self.assertEqual(list(range(a, a + 2)), [a, a + 1])
self.assertEqual(list(range(a + 2, a, -1)), [a + 2, a + 1])
self.assertEqual(list(range(a + 4, a, -2)), [a + 4, a + 2])
seq = list(range(a, b, c))
self.assertIn(a, seq)
self.assertNotIn(b, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(seq[0], a)
self.assertEqual(seq[-1], a + c)
seq = list(range(b, a, -c))
self.assertIn(b, seq)
self.assertNotIn(a, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(seq[0], b)
self.assertEqual(seq[-1], b - c)
seq = list(range(-a, -b, -c))
self.assertIn(-a, seq)
self.assertNotIn(-b, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(seq[0], -a)
self.assertEqual(seq[-1], -a - c)
def test_large_range(self):
# Check long ranges (len > sys.maxsize)
# len() is expected to fail due to limitations of the __len__ protocol
def _range_len(x):
try:
length = len(x)
except OverflowError:
step = x[1] - x[0]
length = 1 + ((x[-1] - x[0]) // step)
return length
a = -sys.maxsize
b = sys.maxsize
expected_len = b - a
x = range(a, b)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertTrue(x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize + 1
self.assertEqual(x[idx], a + idx)
self.assertEqual(x[idx : idx + 1][0], a + idx)
with self.assertRaises(IndexError):
x[-expected_len - 1]
with self.assertRaises(IndexError):
x[expected_len]
a = 0
b = 2 * sys.maxsize
expected_len = b - a
x = range(a, b)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertTrue(x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize + 1
self.assertEqual(x[idx], a + idx)
self.assertEqual(x[idx : idx + 1][0], a + idx)
with self.assertRaises(IndexError):
x[-expected_len - 1]
with self.assertRaises(IndexError):
x[expected_len]
a = 0
b = sys.maxsize ** 10
c = 2 * sys.maxsize
expected_len = 1 + (b - a) // c
x = range(a, b, c)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertTrue(x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize + 1
self.assertEqual(x[idx], a + (idx * c))
self.assertEqual(x[idx : idx + 1][0], a + (idx * c))
with self.assertRaises(IndexError):
x[-expected_len - 1]
with self.assertRaises(IndexError):
x[expected_len]
a = sys.maxsize ** 10
b = 0
c = -2 * sys.maxsize
expected_len = 1 + (b - a) // c
x = range(a, b, c)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertTrue(x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize + 1
self.assertEqual(x[idx], a + (idx * c))
self.assertEqual(x[idx : idx + 1][0], a + (idx * c))
with self.assertRaises(IndexError):
x[-expected_len - 1]
with self.assertRaises(IndexError):
x[expected_len]
def test_invalid_invocation(self):
self.assertRaises(TypeError, range)
self.assertRaises(TypeError, range, 1, 2, 3, 4)
self.assertRaises(ValueError, range, 1, 2, 0)
a = int(10 * sys.maxsize)
self.assertRaises(ValueError, range, a, a + 1, int(0))
self.assertRaises(TypeError, range, 1.0, 1.0, 1.0)
self.assertRaises(TypeError, range, 1e100, 1e101, 1e101)
self.assertRaises(TypeError, range, 0, "spam")
self.assertRaises(TypeError, range, 0, 42, "spam")
# Exercise various combinations of bad arguments, to check
# refcounting logic
self.assertRaises(TypeError, range, 0.0)
self.assertRaises(TypeError, range, 0, 0.0)
self.assertRaises(TypeError, range, 0.0, 0)
self.assertRaises(TypeError, range, 0.0, 0.0)
self.assertRaises(TypeError, range, 0, 0, 1.0)
self.assertRaises(TypeError, range, 0, 0.0, 1)
self.assertRaises(TypeError, range, 0, 0.0, 1.0)
self.assertRaises(TypeError, range, 0.0, 0, 1)
self.assertRaises(TypeError, range, 0.0, 0, 1.0)
self.assertRaises(TypeError, range, 0.0, 0.0, 1)
self.assertRaises(TypeError, range, 0.0, 0.0, 1.0)
def test_index(self):
u = range(2)
self.assertEqual(u.index(0), 0)
self.assertEqual(u.index(1), 1)
self.assertRaises(ValueError, u.index, 2)
u = range(-2, 3)
self.assertEqual(u.count(0), 1)
self.assertEqual(u.index(0), 2)
self.assertRaises(TypeError, u.index)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = range(4)
self.assertRaises(BadExc, a.index, BadCmp())
a = range(-2, 3)
self.assertEqual(a.index(0), 2)
self.assertEqual(range(1, 10, 3).index(4), 1)
self.assertEqual(range(1, -10, -3).index(-5), 2)
self.assertEqual(range(10 ** 20).index(1), 1)
self.assertEqual(range(10 ** 20).index(10 ** 20 - 1), 10 ** 20 - 1)
self.assertRaises(ValueError, range(1, 2 ** 100, 2).index, 2 ** 87)
self.assertEqual(range(1, 2 ** 100, 2).index(2 ** 87 + 1), 2 ** 86)
self.assertEqual(range(10).index(ALWAYS_EQ), 0)
def test_user_index_method(self):
bignum = 2 * sys.maxsize
smallnum = 42
# User-defined class with an __index__ method
class I:
def __init__(self, n):
self.n = int(n)
def __index__(self):
return self.n
self.assertEqual(list(range(I(bignum), I(bignum + 1))), [bignum])
self.assertEqual(list(range(I(smallnum), I(smallnum + 1))), [smallnum])
# User-defined class with a failing __index__ method
class IX:
def __index__(self):
raise RuntimeError
self.assertRaises(RuntimeError, range, IX())
# User-defined class with an invalid __index__ method
class IN:
def __index__(self):
return "not a number"
self.assertRaises(TypeError, range, IN())
# Test use of user-defined classes in slice indices.
self.assertEqual(range(10)[: I(5)], range(5))
with self.assertRaises(RuntimeError):
range(0, 10)[: IX()]
with self.assertRaises(TypeError):
range(0, 10)[: IN()]
def test_count(self):
self.assertEqual(range(3).count(-1), 0)
self.assertEqual(range(3).count(0), 1)
self.assertEqual(range(3).count(1), 1)
self.assertEqual(range(3).count(2), 1)
self.assertEqual(range(3).count(3), 0)
self.assertIs(type(range(3).count(-1)), int)
self.assertIs(type(range(3).count(1)), int)
self.assertEqual(range(10 ** 20).count(1), 1)
self.assertEqual(range(10 ** 20).count(10 ** 20), 0)
self.assertEqual(range(3).index(1), 1)
self.assertEqual(range(1, 2 ** 100, 2).count(2 ** 87), 0)
self.assertEqual(range(1, 2 ** 100, 2).count(2 ** 87 + 1), 1)
self.assertEqual(range(10).count(ALWAYS_EQ), 10)
self.assertEqual(len(range(sys.maxsize, sys.maxsize + 10)), 10)
def test_repr(self):
self.assertEqual(repr(range(1)), "range(0, 1)")
self.assertEqual(repr(range(1, 2)), "range(1, 2)")
self.assertEqual(repr(range(1, 2, 3)), "range(1, 2, 3)")
def test_pickling(self):
testcases = [
(13,),
(0, 11),
(-22, 10),
(20, 3, -1),
(13, 21, 3),
(-2, 2, 2),
(2 ** 65, 2 ** 65 + 2),
]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for t in testcases:
with self.subTest(proto=proto, test=t):
r = range(*t)
self.assertEqual(
list(pickle.loads(pickle.dumps(r, proto))), list(r)
)
def test_iterator_pickling(self):
testcases = [
(13,),
(0, 11),
(-22, 10),
(20, 3, -1),
(13, 21, 3),
(-2, 2, 2),
(2 ** 65, 2 ** 65 + 2),
]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for t in testcases:
it = itorg = iter(range(*t))
data = list(range(*t))
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(list(it), data)
it = pickle.loads(d)
try:
next(it)
except StopIteration:
continue
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(list(it), data[1:])
def test_exhausted_iterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
r = range(2 ** 65, 2 ** 65 + 2)
i = iter(r)
while True:
r = next(i)
if r == 2 ** 65 + 1:
break
d = pickle.dumps(i, proto)
i2 = pickle.loads(d)
self.assertEqual(list(i), [])
self.assertEqual(list(i2), [])
def test_large_exhausted_iterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
r = range(20)
i = iter(r)
while True:
r = next(i)
if r == 19:
break
d = pickle.dumps(i, proto)
i2 = pickle.loads(d)
self.assertEqual(list(i), [])
self.assertEqual(list(i2), [])
def test_odd_bug(self):
# This used to raise a "SystemError: NULL result without error"
# because the range validation step was eating the exception
# before NULL was returned.
with self.assertRaises(TypeError):
range([], 1, -1)
def test_types(self):
# Non-integer objects *equal* to any of the range's items are supposed
# to be contained in the range.
self.assertIn(1.0, range(3))
self.assertIn(True, range(3))
self.assertIn(1 + 0j, range(3))
self.assertIn(ALWAYS_EQ, range(3))
# Objects are never coerced into other types for comparison.
class C2:
def __int__(self):
return 1
def __index__(self):
return 1
self.assertNotIn(C2(), range(3))
# ..except if explicitly told so.
self.assertIn(int(C2()), range(3))
# Check that the range.__contains__ optimization is only
# used for ints, not for instances of subclasses of int.
class C3(int):
def __eq__(self, other):
return True
self.assertIn(C3(11), range(10))
self.assertIn(C3(11), list(range(10)))
def test_strided_limits(self):
r = range(0, 101, 2)
self.assertIn(0, r)
self.assertNotIn(1, r)
self.assertIn(2, r)
self.assertNotIn(99, r)
self.assertIn(100, r)
self.assertNotIn(101, r)
r = range(0, -20, -1)
self.assertIn(0, r)
self.assertIn(-1, r)
self.assertIn(-19, r)
self.assertNotIn(-20, r)
r = range(0, -20, -2)
self.assertIn(-18, r)
self.assertNotIn(-19, r)
self.assertNotIn(-20, r)
def test_empty(self):
r = range(0)
self.assertNotIn(0, r)
self.assertNotIn(1, r)
r = range(0, -10)
self.assertNotIn(0, r)
self.assertNotIn(-1, r)
self.assertNotIn(1, r)
def test_range_iterators(self):
# exercise 'fast' iterators, that use a rangeiterobject internally.
# see issue 7298
limits = [
base + jiggle
for M in (2 ** 32, 2 ** 64)
for base in (-M, -M // 2, 0, M // 2, M)
for jiggle in (-2, -1, 0, 1, 2)
]
test_ranges = [
(start, end, step)
for start in limits
for end in limits
for step in (-(2 ** 63), -(2 ** 31), -2, -1, 1, 2)
]
for start, end, step in test_ranges:
iter1 = range(start, end, step)
iter2 = pyrange(start, end, step)
test_id = f"range({start}, {end}, {step})"
# check first 100 entries
self.assert_iterators_equal(iter1, iter2, test_id, limit=100)
iter1 = reversed(range(start, end, step))
iter2 = pyrange_reversed(start, end, step)
test_id = f"reversed(range({start}, {end}, {step}))"
self.assert_iterators_equal(iter1, iter2, test_id, limit=100)
def test_range_iterators_invocation(self):
# verify range iterators instances cannot be created by
# calling their type
rangeiter_type = type(iter(range(0)))
if sys.version_info >= (3, 7):
self.assertRaises(TypeError, rangeiter_type, 1, 3, 1)
long_rangeiter_type = type(iter(range(1 << 1000)))
self.assertRaises(TypeError, long_rangeiter_type, 1, 3, 1)
def test_slice(self):
def check(start, stop, step=None):
i = slice(start, stop, step)
self.assertEqual(list(r[i]), list(r)[i])
self.assertEqual(len(r[i]), len(list(r)[i]))
for r in [
range(10),
range(0),
range(1, 9, 3),
range(8, 0, -3),
range(sys.maxsize + 1, sys.maxsize + 10),
]:
check(0, 2)
check(0, 20)
check(1, 2)
check(20, 30)
check(-30, -20)
check(-1, 100, 2)
check(0, -1)
check(-1, -3, -1)
def test_contains(self):
r = range(10)
self.assertIn(0, r)
self.assertIn(1, r)
self.assertIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
r = range(9, -1, -1)
self.assertIn(0, r)
self.assertIn(1, r)
self.assertIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
r = range(0, 10, 2)
self.assertIn(0, r)
self.assertNotIn(1, r)
self.assertNotIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
r = range(9, -1, -2)
self.assertNotIn(0, r)
self.assertIn(1, r)
self.assertIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
def test_reverse_iteration(self):
for r in [
range(10),
range(0),
range(1, 9, 3),
range(8, 0, -3),
range(sys.maxsize + 1, sys.maxsize + 10),
]:
self.assertEqual(list(reversed(r)), list(r)[::-1])
def test_issue11845(self):
r = range(*slice(1, 18, 2).indices(20))
values = {
None,
0,
1,
-1,
2,
-2,
5,
-5,
19,
-19,
20,
-20,
21,
-21,
30,
-30,
99,
-99,
}
for i in values:
for j in values:
for k in values - {0}:
r[i:j:k]
def test_comparison(self):
test_ranges = [
range(0),
range(0, -1),
range(1, 1, 3),
range(1),
range(5, 6),
range(5, 6, 2),
range(5, 7, 2),
range(2),
range(0, 4, 2),
range(0, 5, 2),
range(0, 6, 2),
]
test_tuples = list(map(tuple, test_ranges))
# Check that equality of ranges matches equality of the corresponding
# tuples for each pair from the test lists above.
ranges_eq = [a == b for a in test_ranges for b in test_ranges]
tuples_eq = [a == b for a in test_tuples for b in test_tuples]
self.assertEqual(ranges_eq, tuples_eq)
# Check that != correctly gives the logical negation of ==
ranges_ne = [a != b for a in test_ranges for b in test_ranges]
self.assertEqual(ranges_ne, [not x for x in ranges_eq])
# Equal ranges should have equal hashes.
for a in test_ranges:
for b in test_ranges:
if a == b:
self.assertEqual(hash(a), hash(b))
# Ranges are unequal to other types (even sequence types)
self.assertIs(range(0) == (), False)
self.assertIs(() == range(0), False)
self.assertIs(range(2) == [0, 1], False)
# Huge integers aren't a problem.
self.assertEqual(range(0, 2 ** 100 - 1, 2), range(0, 2 ** 100, 2))
self.assertEqual(hash(range(0, 2 ** 100 - 1, 2)), hash(range(0, 2 ** 100, 2)))
self.assertNotEqual(range(0, 2 ** 100, 2), range(0, 2 ** 100 + 1, 2))
self.assertEqual(
range(2 ** 200, 2 ** 201 - 2 ** 99, 2 ** 100),
range(2 ** 200, 2 ** 201, 2 ** 100),
)
self.assertEqual(
hash(range(2 ** 200, 2 ** 201 - 2 ** 99, 2 ** 100)),
hash(range(2 ** 200, 2 ** 201, 2 ** 100)),
)
self.assertNotEqual(
range(2 ** 200, 2 ** 201, 2 ** 100), range(2 ** 200, 2 ** 201 + 1, 2 ** 100)
)
# Order comparisons are not implemented for ranges.
with self.assertRaises(TypeError):
range(0) < range(0)
with self.assertRaises(TypeError):
range(0) > range(0)
with self.assertRaises(TypeError):
range(0) <= range(0)
with self.assertRaises(TypeError):
range(0) >= range(0)
def test_attributes(self):
# test the start, stop and step attributes of range objects
self.assert_attrs(range(0), 0, 0, 1)
self.assert_attrs(range(10), 0, 10, 1)
self.assert_attrs(range(-10), 0, -10, 1)
self.assert_attrs(range(0, 10, 1), 0, 10, 1)
self.assert_attrs(range(0, 10, 3), 0, 10, 3)
self.assert_attrs(range(10, 0, -1), 10, 0, -1)
self.assert_attrs(range(10, 0, -3), 10, 0, -3)
self.assert_attrs(range(True), 0, 1, 1)
self.assert_attrs(range(False, True), 0, 1, 1)
self.assert_attrs(range(False, True, True), 0, 1, 1)
def assert_attrs(self, rangeobj, start, stop, step):
self.assertEqual(rangeobj.start, start)
self.assertEqual(rangeobj.stop, stop)
self.assertEqual(rangeobj.step, step)
if sys.version_info >= (3, 9):
self.assertIs(type(rangeobj.start), int)
self.assertIs(type(rangeobj.stop), int)
self.assertIs(type(rangeobj.step), int)
with self.assertRaises(AttributeError):
rangeobj.start = 0
with self.assertRaises(AttributeError):
rangeobj.stop = 10
with self.assertRaises(AttributeError):
rangeobj.step = 1
with self.assertRaises(AttributeError):
del rangeobj.start
with self.assertRaises(AttributeError):
del rangeobj.stop
with self.assertRaises(AttributeError):
del rangeobj.step
if __name__ == "__main__":
unittest.main()
|
"""
This module is responsible for the wsgi part of glim framework.
"""
from werkzeug.wrappers import Request, Response
from werkzeug.routing import Map, Rule
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.utils import redirect
from werkzeug.contrib.sessions import FilesystemSessionStore
from glim.utils import import_module
import glim.paths as paths
try:
basestring
except NameError:
# 'basestring' is undefined, so it must be Python 3
basestring = (str, bytes)
class Glim(object):
"""
The class that holds the wsgi app of glim framework.
Attributes
----------
config (dict): The 'glim' key of app.config.<env>.
session_store (werkzeug.contrib.sessions.FilesystemSessionStore):
The session store in case of session usage.
url_map (werkzeug.routing.Map): The url map of wsgi app.
Usage
-----
app = Glim(urls, config)
# start the web server
run_simple(host, int(port), app, use_debugger=True, use_reloader=True)
"""
def __init__(self, urls={}, config={}):
self.config = config
try:
self.session_store = FilesystemSessionStore(
self.config['sessions']['path']
)
except:
self.session_store = None
ruleset = self.flatten_urls(urls)
rule_map = []
for url, rule in ruleset.items():
rule_map.append(Rule(url, endpoint=rule))
self.url_map = Map(rule_map)
def flatten_urls(self, urls, current_key="", ruleset={}):
"""
Function flatten urls for route grouping feature of glim. Thanks
for the stackoverflow guy!
Args
----
urls (dict): a dict of url definitions.
current_key (unknown type): a dict or a string marking the
current key that is used for recursive calls.
ruleset (dict): the ruleset that is eventually returned to
dispatcher.
Returns
-------
ruleset (dict): the ruleset to be bound.
"""
for key in urls:
# If the value is of type `dict`, then recurse with the
# value
if isinstance(urls[key], dict):
self.flatten_urls(urls[key], current_key + key)
# Else if the value is type of list, meaning it is a filter
elif isinstance(urls[key], (list, tuple)):
k = ','.join(urls[key])
ruleset[current_key + key] = k
else:
ruleset[current_key + key] = urls[key]
return ruleset
def dispatch_request(self, request):
"""
Function dispatches the request. It also handles route
filtering.
Args
----
request (werkzeug.wrappers.Request): the request
object.
Returns
-------
response (werkzeug.wrappers.Response): the response
object.
"""
adapter = self.url_map.bind_to_environ(request.environ)
try:
endpoint, values = adapter.match()
mcontroller = import_module('app.controllers')
endpoint_pieces = endpoint.split('.')
cls = endpoint_pieces[0]
restful = False
fnc = None
if len(endpoint_pieces) is 1:
restful = True
fnc = None
fnc = endpoint_pieces[1]
obj = getattr(mcontroller, cls)
instance = obj(request)
raw = None
if restful:
raw = getattr(instance, request.method.lower())(**values)
else:
raw = getattr(instance, fnc)(** values)
if isinstance(raw, Response):
return raw
else:
return Response(raw)
except HTTPException as e:
return e
def wsgi_app(self, environ, start_response):
"""
Function returns the wsgi app of glim framework.
Args
----
environ (unknown type): The werkzeug environment.
start_response (function): The werkzeug's start_response
function.
Returns
-------
response (werkzeug.wrappers.Response): the dispatched response
object.
"""
request = Request(environ)
if self.session_store is not None:
sid = request.cookies.get(self.config['sessions']['id_header'])
if sid is None:
request.session = self.session_store.new()
else:
request.session = self.session_store.get(sid)
response = self.dispatch_request(request)
if self.session_store is not None:
if request.session.should_save:
self.session_store.save(request.session)
response.set_cookie(
self.config['sessions']['id_header'],
request.session.sid
)
return response(environ, start_response)
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
improve restful detection
"""
This module is responsible for the wsgi part of glim framework.
"""
from werkzeug.wrappers import Request, Response
from werkzeug.routing import Map, Rule
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.utils import redirect
from werkzeug.contrib.sessions import FilesystemSessionStore
from glim.utils import import_module
import glim.paths as paths
try:
basestring
except NameError:
# 'basestring' is undefined, so it must be Python 3
basestring = (str, bytes)
class Glim(object):
"""
The class that holds the wsgi app of glim framework.
Attributes
----------
config (dict): The 'glim' key of app.config.<env>.
session_store (werkzeug.contrib.sessions.FilesystemSessionStore):
The session store in case of session usage.
url_map (werkzeug.routing.Map): The url map of wsgi app.
Usage
-----
app = Glim(urls, config)
# start the web server
run_simple(host, int(port), app, use_debugger=True, use_reloader=True)
"""
def __init__(self, urls={}, config={}):
self.config = config
try:
self.session_store = FilesystemSessionStore(
self.config['sessions']['path']
)
except:
self.session_store = None
ruleset = self.flatten_urls(urls)
rule_map = []
for url, rule in ruleset.items():
rule_map.append(Rule(url, endpoint=rule))
self.url_map = Map(rule_map)
def flatten_urls(self, urls, current_key="", ruleset={}):
"""
Function flatten urls for route grouping feature of glim. Thanks
for the stackoverflow guy!
Args
----
urls (dict): a dict of url definitions.
current_key (unknown type): a dict or a string marking the
current key that is used for recursive calls.
ruleset (dict): the ruleset that is eventually returned to
dispatcher.
Returns
-------
ruleset (dict): the ruleset to be bound.
"""
for key in urls:
# If the value is of type `dict`, then recurse with the
# value
if isinstance(urls[key], dict):
self.flatten_urls(urls[key], current_key + key)
# Else if the value is type of list, meaning it is a filter
elif isinstance(urls[key], (list, tuple)):
k = ','.join(urls[key])
ruleset[current_key + key] = k
else:
ruleset[current_key + key] = urls[key]
return ruleset
def dispatch_request(self, request):
"""
Function dispatches the request. It also handles route
filtering.
Args
----
request (werkzeug.wrappers.Request): the request
object.
Returns
-------
response (werkzeug.wrappers.Response): the response
object.
"""
adapter = self.url_map.bind_to_environ(request.environ)
try:
endpoint, values = adapter.match()
mcontroller = import_module('app.controllers')
endpoint_pieces = endpoint.split('.')
cls = endpoint_pieces[0]
restful = False
fnc = None
if len(endpoint_pieces) is 1:
restful = True
else:
fnc = endpoint_pieces[1]
obj = getattr(mcontroller, cls)
instance = obj(request)
raw = None
if restful:
raw = getattr(instance, request.method.lower())(**values)
else:
raw = getattr(instance, fnc)(** values)
if isinstance(raw, Response):
return raw
else:
return Response(raw)
except HTTPException as e:
return e
def wsgi_app(self, environ, start_response):
"""
Function returns the wsgi app of glim framework.
Args
----
environ (unknown type): The werkzeug environment.
start_response (function): The werkzeug's start_response
function.
Returns
-------
response (werkzeug.wrappers.Response): the dispatched response
object.
"""
request = Request(environ)
if self.session_store is not None:
sid = request.cookies.get(self.config['sessions']['id_header'])
if sid is None:
request.session = self.session_store.new()
else:
request.session = self.session_store.get(sid)
response = self.dispatch_request(request)
if self.session_store is not None:
if request.session.should_save:
self.session_store.save(request.session)
response.set_cookie(
self.config['sessions']['id_header'],
request.session.sid
)
return response(environ, start_response)
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
|
from contextlib import contextmanager
from casexml.apps.case.mock import CaseFactory
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.signals import case_post_save
from corehq.apps.data_interfaces.models import (AutomaticUpdateRule,
AutomaticUpdateRuleCriteria, AutomaticUpdateAction)
from corehq.apps.data_interfaces.tasks import run_case_update_rules_for_domain
from datetime import datetime, date
from corehq.form_processor.backends.sql.dbaccessors import CaseAccessorSQL
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.tests.utils import (run_with_all_backends, FormProcessorTestUtils,
set_case_property_directly)
from corehq.form_processor.utils.general import should_use_sql_backend
from corehq.util.test_utils import set_parent_case as set_actual_parent_case
from django.test import TestCase
from mock import patch
from corehq.util.context_managers import drop_connected_signals
class AutomaticCaseUpdateTest(TestCase):
def setUp(self):
self.domain = 'auto-update-test'
self.case_db = CaseAccessors(self.domain)
self.factory = CaseFactory(self.domain)
self.rule = AutomaticUpdateRule(
domain=self.domain,
name='test-rule',
case_type='test-case-type',
active=True,
server_modified_boundary=30,
)
self.rule.save()
self.rule.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='last_visit_date',
property_value='30',
match_type=AutomaticUpdateRuleCriteria.MATCH_DAYS_SINCE,
),
]
self.rule.automaticupdateaction_set = [
AutomaticUpdateAction(
action=AutomaticUpdateAction.ACTION_UPDATE,
property_name='update_flag',
property_value='Y',
),
AutomaticUpdateAction(
action=AutomaticUpdateAction.ACTION_CLOSE,
),
]
self.rule2 = AutomaticUpdateRule(
domain=self.domain,
name='test-rule-2',
case_type='test-case-type-2',
active=True,
server_modified_boundary=30,
)
self.rule2.save()
self.rule2.automaticupdateaction_set = [
AutomaticUpdateAction(
action=AutomaticUpdateAction.ACTION_CLOSE,
),
]
self.rule3 = AutomaticUpdateRule(
domain=self.domain,
name='test-rule-3',
case_type='test-case-type-2',
active=True,
server_modified_boundary=50,
)
self.rule3.save()
self.rule3.automaticupdateaction_set = [
AutomaticUpdateAction(
action=AutomaticUpdateAction.ACTION_CLOSE,
),
]
with drop_connected_signals(case_post_save):
case = self.factory.create_case(case_type='test-case-type')
self.case_id = case.case_id
def tearDown(self):
AutomaticUpdateRuleCriteria.objects.all().delete()
AutomaticUpdateAction.objects.all().delete()
AutomaticUpdateRule.objects.all().delete()
FormProcessorTestUtils.delete_all_cases(self.domain)
def _get_case_ids(self, *args, **kwargs):
return [self.case_id]
def _get_case(self):
return self.case_db.get_case(self.case_id)
def _assert_case_revision(self, rev_number, last_modified, expect_modified=False):
if should_use_sql_backend(self.domain):
self.assertEqual(
expect_modified,
CaseAccessorSQL.case_modified_since(self.case_id, last_modified)
)
else:
doc = self._get_case()
self.assertTrue(doc['_rev'].startswith('%s-' % rev_number))
@run_with_all_backends
def test_rule(self):
now = datetime(2015, 10, 22, 0, 0)
with patch('corehq.apps.data_interfaces.models.AutomaticUpdateRule.get_case_ids', new=self._get_case_ids):
# No update: both dates are 27 days away
last_modified = datetime(2015, 9, 25, 12, 0)
_update_case(self.domain, self.case_id, last_modified, date(2015, 9, 25))
self._assert_case_revision(2, last_modified)
run_case_update_rules_for_domain(self.domain, now=now)
self._assert_case_revision(2, last_modified)
# No update: server_modified_on is 32 days away but last_visit_date is 27 days away
last_modified = datetime(2015, 9, 20, 12, 0)
_update_case(self.domain, self.case_id, last_modified, date(2015, 9, 25))
self._assert_case_revision(3, last_modified)
run_case_update_rules_for_domain(self.domain, now=now)
self._assert_case_revision(3, last_modified)
# No update: last_visit_date is 32 days away but server_modified_on is 27 days away
last_modified = datetime(2015, 9, 25, 12, 0)
_update_case(self.domain, self.case_id, last_modified, date(2015, 9, 20))
self._assert_case_revision(4, last_modified)
run_case_update_rules_for_domain(self.domain, now=now)
self._assert_case_revision(4, last_modified)
# Perform update: both dates are 32 days away
last_modified = datetime(2015, 9, 20, 12, 0)
_update_case(self.domain, self.case_id, last_modified, date(2015, 9, 20))
self._assert_case_revision(5, last_modified)
with drop_connected_signals(case_post_save):
run_case_update_rules_for_domain(self.domain, now=now)
self._assert_case_revision(6, last_modified, True)
case = self._get_case()
self.assertEqual(case.get_case_property('update_flag'), 'Y')
self.assertEqual(case.closed, True)
@run_with_all_backends
def test_match_days_since(self):
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
self.rule2.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='last_visit_date',
property_value='30',
match_type=AutomaticUpdateRuleCriteria.MATCH_DAYS_SINCE,
),
]
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2015-12-30')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2015-12-03')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2015-12-02')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2015-11-01')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
@run_with_all_backends
def test_match_equal(self):
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
self.rule2.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='property1',
property_value='value1',
match_type=AutomaticUpdateRuleCriteria.MATCH_EQUAL,
),
]
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property1', 'x')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property1', 'value1')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
@run_with_all_backends
def test_match_not_equal(self):
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
self.rule2.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='property2',
property_value='value2',
match_type=AutomaticUpdateRuleCriteria.MATCH_NOT_EQUAL,
),
]
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property2', 'value2')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property2', 'x')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
@run_with_all_backends
def test_date_case_properties_for_equality(self):
"""
Date case properties are automatically converted from string to date
when fetching from the db, so here we want to make sure this doesn't
interfere with our ability to compare dates for equality.
"""
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
self.rule2.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='property1',
property_value='2016-02-24',
match_type=AutomaticUpdateRuleCriteria.MATCH_EQUAL,
),
]
set_case_property_directly(case, 'property1', '2016-02-24')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property1', '2016-02-25')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
@run_with_all_backends
def test_date_case_properties_for_inequality(self):
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
self.rule2.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='property1',
property_value='2016-02-24',
match_type=AutomaticUpdateRuleCriteria.MATCH_NOT_EQUAL,
),
]
set_case_property_directly(case, 'property1', '2016-02-24')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property1', '2016-02-25')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
@run_with_all_backends
def test_match_has_value(self):
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
self.rule2.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='property3',
match_type=AutomaticUpdateRuleCriteria.MATCH_HAS_VALUE,
),
]
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property3', 'x')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property3', '')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
@run_with_all_backends
def test_and_criteria(self):
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
self.rule2.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='last_visit_date',
property_value='30',
match_type=AutomaticUpdateRuleCriteria.MATCH_DAYS_SINCE,
),
AutomaticUpdateRuleCriteria(
property_name='property1',
property_value='value1',
match_type=AutomaticUpdateRuleCriteria.MATCH_EQUAL,
),
AutomaticUpdateRuleCriteria(
property_name='property2',
property_value='value2',
match_type=AutomaticUpdateRuleCriteria.MATCH_NOT_EQUAL,
),
AutomaticUpdateRuleCriteria(
property_name='property3',
match_type=AutomaticUpdateRuleCriteria.MATCH_HAS_VALUE,
),
]
set_case_property_directly(case, 'last_visit_date', '2015-11-01')
set_case_property_directly(case, 'property1', 'value1')
set_case_property_directly(case, 'property2', 'x')
set_case_property_directly(case, 'property3', 'x')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2015-12-30')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2015-11-01')
set_case_property_directly(case, 'property1', 'x')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property1', 'value1')
set_case_property_directly(case, 'property2', 'value2')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property2', 'x')
set_case_property_directly(case, 'property3', '')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property3', 'x')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
def test_get_rules_from_domain(self):
rules = AutomaticUpdateRule.by_domain(self.domain)
rules_by_case_type = AutomaticUpdateRule.organize_rules_by_case_type(rules)
expected_case_types = ['test-case-type', 'test-case-type-2']
actual_case_types = rules_by_case_type.keys()
self.assertEqual(set(expected_case_types), set(actual_case_types))
expected_rule_ids = [self.rule.pk]
actual_rule_ids = [rule.pk for rule in rules_by_case_type['test-case-type']]
self.assertEqual(set(expected_rule_ids), set(actual_rule_ids))
expected_rule_ids = [self.rule2.pk, self.rule3.pk]
actual_rule_ids = [rule.pk for rule in rules_by_case_type['test-case-type-2']]
self.assertEqual(set(expected_rule_ids), set(actual_rule_ids))
def test_boundary_date(self):
rules = AutomaticUpdateRule.by_domain(self.domain)
rules_by_case_type = AutomaticUpdateRule.organize_rules_by_case_type(rules)
boundary_date = AutomaticUpdateRule.get_boundary_date(
rules_by_case_type['test-case-type'], datetime(2016, 1, 1))
self.assertEqual(boundary_date, datetime(2015, 12, 2))
boundary_date = AutomaticUpdateRule.get_boundary_date(
rules_by_case_type['test-case-type-2'], datetime(2016, 1, 1))
self.assertEqual(boundary_date, datetime(2015, 12, 2))
@run_with_all_backends
def test_parent_case_lookup(self):
with _with_case(self.domain, 'test-child-case-type', datetime(2016, 1, 1)) as child, \
_with_case(self.domain, 'test-parent-case-type', datetime(2016, 1, 1), case_name='abc') as parent:
# Set the parent case relationship
child = set_parent_case(self.domain, child, parent)
# Create a rule that references parent/name which should match
rule = AutomaticUpdateRule(
domain=self.domain,
name='test-parent-rule',
case_type='test-child-case-type',
active=True,
server_modified_boundary=30,
)
rule.save()
self.addCleanup(rule.delete)
rule.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='parent/name',
property_value='abc',
match_type=AutomaticUpdateRuleCriteria.MATCH_EQUAL,
),
]
self.assertTrue(rule.rule_matches_case(child, datetime(2016, 3, 1)))
# Update the rule to match on a different name and now it shouldn't match
rule.automaticupdaterulecriteria_set.all().delete()
rule.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='parent/name',
property_value='def',
match_type=AutomaticUpdateRuleCriteria.MATCH_EQUAL,
),
]
self.assertFalse(rule.rule_matches_case(child, datetime(2016, 3, 1)))
@contextmanager
def _with_case(domain, case_type, last_modified, **kwargs):
with drop_connected_signals(case_post_save):
case = CaseFactory(domain).create_case(case_type=case_type, **kwargs)
_update_case(domain, case.case_id, last_modified)
accessors = CaseAccessors(domain)
case = accessors.get_case(case.case_id)
try:
yield case
finally:
if should_use_sql_backend(domain):
CaseAccessorSQL.hard_delete_cases(domain, [case.case_id])
else:
case.delete()
def _save_case(domain, case):
if should_use_sql_backend(domain):
CaseAccessorSQL.save_case(case)
else:
# can't call case.save() since it overrides the server_modified_on property
CommCareCase.get_db().save_doc(case.to_json())
def _update_case(domain, case_id, server_modified_on, last_visit_date=None):
accessors = CaseAccessors(domain)
case = accessors.get_case(case_id)
case.server_modified_on = server_modified_on
if last_visit_date:
set_case_property_directly(case, 'last_visit_date', last_visit_date.strftime('%Y-%m-%d'))
_save_case(domain, case)
def set_parent_case(domain, child_case, parent_case):
server_modified_on = child_case.server_modified_on
set_actual_parent_case(domain, child_case, parent_case)
child_case = CaseAccessors(domain).get_case(child_case.case_id)
child_case.server_modified_on = server_modified_on
_save_case(domain, child_case)
return CaseAccessors(domain).get_case(child_case.case_id)
adding tests for auto update enhancements
from contextlib import contextmanager
from casexml.apps.case.mock import CaseFactory
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.signals import case_post_save
from corehq.apps.data_interfaces.models import (AutomaticUpdateRule, AutomaticUpdateRuleCriteria,
AutomaticUpdateAction, PropertyTypeChoices)
from corehq.apps.data_interfaces.tasks import run_case_update_rules_for_domain
from datetime import datetime, date
from corehq.form_processor.backends.sql.dbaccessors import CaseAccessorSQL
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.tests.utils import (run_with_all_backends, FormProcessorTestUtils,
set_case_property_directly)
from corehq.form_processor.utils.general import should_use_sql_backend
from corehq.form_processor.signals import sql_case_post_save
from corehq.util.test_utils import set_parent_case as set_actual_parent_case, update_case
from django.test import TestCase
from mock import patch
from corehq.util.context_managers import drop_connected_signals
from toggle.shortcuts import update_toggle_cache
from corehq.toggles import NAMESPACE_DOMAIN, AUTO_CASE_UPDATE_ENHANCEMENTS
class AutomaticCaseUpdateTest(TestCase):
def setUp(self):
self.domain = 'auto-update-test'
update_toggle_cache(AUTO_CASE_UPDATE_ENHANCEMENTS.slug, self.domain, True, NAMESPACE_DOMAIN)
self.case_db = CaseAccessors(self.domain)
self.factory = CaseFactory(self.domain)
self.rule = AutomaticUpdateRule(
domain=self.domain,
name='test-rule',
case_type='test-case-type',
active=True,
server_modified_boundary=30,
)
self.rule.save()
self.rule.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='last_visit_date',
property_value='30',
match_type=AutomaticUpdateRuleCriteria.MATCH_DAYS_SINCE,
),
]
self.rule.automaticupdateaction_set = [
AutomaticUpdateAction(
action=AutomaticUpdateAction.ACTION_UPDATE,
property_name='update_flag',
property_value='Y',
)
]
self.rule2 = AutomaticUpdateRule(
domain=self.domain,
name='test-rule-2',
case_type='test-case-type-2',
active=True,
server_modified_boundary=30,
)
self.rule2.save()
self.rule2.automaticupdateaction_set = [
AutomaticUpdateAction(
action=AutomaticUpdateAction.ACTION_CLOSE,
),
]
self.rule3 = AutomaticUpdateRule(
domain=self.domain,
name='test-rule-3',
case_type='test-case-type-2',
active=True,
server_modified_boundary=50,
)
self.rule3.save()
self.rule3.automaticupdateaction_set = [
AutomaticUpdateAction(
action=AutomaticUpdateAction.ACTION_CLOSE,
),
]
self.rule4 = AutomaticUpdateRule(
domain=self.domain,
name='test-rule-4',
case_type='test-case-type',
active=True,
server_modified_boundary=30,
)
self.rule4.save()
self.rule4.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='last_visit_date',
property_value='40',
match_type=AutomaticUpdateRuleCriteria.MATCH_DAYS_SINCE,
),
]
self.rule4.automaticupdateaction_set = [
AutomaticUpdateAction(
action=AutomaticUpdateAction.ACTION_UPDATE,
property_name='update_flag',
property_value='C',
),
AutomaticUpdateAction(
action=AutomaticUpdateAction.ACTION_CLOSE,
),
]
self.rule5 = AutomaticUpdateRule(
domain=self.domain,
name='test-rule-5',
case_type='test-case-type-3',
active=True,
filter_on_server_modified=False
)
self.rule5.save()
self.rule5.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='name',
property_value='signal',
match_type=AutomaticUpdateRuleCriteria.MATCH_EQUAL,
),
]
self.rule5.automaticupdateaction_set = [
AutomaticUpdateAction(
action=AutomaticUpdateAction.ACTION_UPDATE,
property_name='after_save',
property_value='updated',
),
]
with drop_connected_signals(case_post_save):
case = self.factory.create_case(case_type='test-case-type')
self.case_id = case.case_id
def tearDown(self):
AutomaticUpdateRuleCriteria.objects.all().delete()
AutomaticUpdateAction.objects.all().delete()
AutomaticUpdateRule.objects.all().delete()
FormProcessorTestUtils.delete_all_cases(self.domain)
def _get_case_ids(self, *args, **kwargs):
return [self.case_id]
def _get_case(self):
return self.case_db.get_case(self.case_id)
def _assert_case_revision(self, rev_number, last_modified, expect_modified=False):
if should_use_sql_backend(self.domain):
self.assertEqual(
expect_modified,
CaseAccessorSQL.case_modified_since(self.case_id, last_modified)
)
else:
doc = self._get_case()
self.assertTrue(doc['_rev'].startswith('%s-' % rev_number))
@run_with_all_backends
def test_rule(self):
now = datetime(2015, 10, 22, 0, 0)
with patch('corehq.apps.data_interfaces.models.AutomaticUpdateRule.get_case_ids', new=self._get_case_ids):
# No update: both dates are 27 days away
last_modified = datetime(2015, 9, 25, 12, 0)
_update_case(self.domain, self.case_id, last_modified, date(2015, 9, 25))
self._assert_case_revision(2, last_modified)
run_case_update_rules_for_domain(self.domain, now=now)
self._assert_case_revision(2, last_modified)
# No update: server_modified_on is 32 days away but last_visit_date is 27 days away
last_modified = datetime(2015, 9, 20, 12, 0)
_update_case(self.domain, self.case_id, last_modified, date(2015, 9, 25))
self._assert_case_revision(3, last_modified)
run_case_update_rules_for_domain(self.domain, now=now)
self._assert_case_revision(3, last_modified)
# No update: last_visit_date is 32 days away but server_modified_on is 27 days away
last_modified = datetime(2015, 9, 25, 12, 0)
_update_case(self.domain, self.case_id, last_modified, date(2015, 9, 20))
self._assert_case_revision(4, last_modified)
run_case_update_rules_for_domain(self.domain, now=now)
self._assert_case_revision(4, last_modified)
# Perform update: both dates are 32 days away
last_modified = datetime(2015, 9, 20, 12, 0)
_update_case(self.domain, self.case_id, last_modified, date(2015, 9, 20))
self._assert_case_revision(5, last_modified)
with drop_connected_signals(case_post_save):
run_case_update_rules_for_domain(self.domain, now=now)
self._assert_case_revision(6, last_modified, True)
case = self._get_case()
self.assertEqual(case.get_case_property('update_flag'), 'Y')
# No update: case state matches final state
_update_case(self.domain, self.case_id, last_modified, date(2015, 9, 20))
self._assert_case_revision(7, last_modified)
with drop_connected_signals(case_post_save):
run_case_update_rules_for_domain(self.domain, now=now)
self._assert_case_revision(7, last_modified)
# Perform update: case closed because date is 42 days away
_update_case(self.domain, self.case_id, last_modified, date(2015, 9, 10))
with drop_connected_signals(case_post_save):
run_case_update_rules_for_domain(self.domain, now=now)
case = self._get_case()
self.assertEqual(case.get_case_property('update_flag'), 'C')
self.assertEqual(case.closed, True)
@run_with_all_backends
def test_match_days_since(self):
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
self.rule2.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='last_visit_date',
property_value='30',
match_type=AutomaticUpdateRuleCriteria.MATCH_DAYS_SINCE,
),
]
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2015-12-30')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2015-12-03')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2015-12-02')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2015-11-01')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
@run_with_all_backends
def test_match_equal(self):
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
self.rule2.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='property1',
property_value='value1',
match_type=AutomaticUpdateRuleCriteria.MATCH_EQUAL,
),
]
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property1', 'x')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property1', 'value1')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
@run_with_all_backends
def test_match_not_equal(self):
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
self.rule2.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='property2',
property_value='value2',
match_type=AutomaticUpdateRuleCriteria.MATCH_NOT_EQUAL,
),
]
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property2', 'value2')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property2', 'x')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
@run_with_all_backends
def test_date_case_properties_for_equality(self):
"""
Date case properties are automatically converted from string to date
when fetching from the db, so here we want to make sure this doesn't
interfere with our ability to compare dates for equality.
"""
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
self.rule2.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='property1',
property_value='2016-02-24',
match_type=AutomaticUpdateRuleCriteria.MATCH_EQUAL,
),
]
set_case_property_directly(case, 'property1', '2016-02-24')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property1', '2016-02-25')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
@run_with_all_backends
def test_date_case_properties_for_inequality(self):
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
self.rule2.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='property1',
property_value='2016-02-24',
match_type=AutomaticUpdateRuleCriteria.MATCH_NOT_EQUAL,
),
]
set_case_property_directly(case, 'property1', '2016-02-24')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property1', '2016-02-25')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
@run_with_all_backends
def test_match_has_value(self):
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
self.rule2.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='property3',
match_type=AutomaticUpdateRuleCriteria.MATCH_HAS_VALUE,
),
]
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property3', 'x')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property3', '')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
@run_with_all_backends
def test_and_criteria(self):
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
self.rule2.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='last_visit_date',
property_value='30',
match_type=AutomaticUpdateRuleCriteria.MATCH_DAYS_SINCE,
),
AutomaticUpdateRuleCriteria(
property_name='property1',
property_value='value1',
match_type=AutomaticUpdateRuleCriteria.MATCH_EQUAL,
),
AutomaticUpdateRuleCriteria(
property_name='property2',
property_value='value2',
match_type=AutomaticUpdateRuleCriteria.MATCH_NOT_EQUAL,
),
AutomaticUpdateRuleCriteria(
property_name='property3',
match_type=AutomaticUpdateRuleCriteria.MATCH_HAS_VALUE,
),
]
set_case_property_directly(case, 'last_visit_date', '2015-11-01')
set_case_property_directly(case, 'property1', 'value1')
set_case_property_directly(case, 'property2', 'x')
set_case_property_directly(case, 'property3', 'x')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2015-12-30')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2015-11-01')
set_case_property_directly(case, 'property1', 'x')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property1', 'value1')
set_case_property_directly(case, 'property2', 'value2')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property2', 'x')
set_case_property_directly(case, 'property3', '')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property3', 'x')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
def test_get_rules_from_domain(self):
rules = AutomaticUpdateRule.by_domain(self.domain)
rules_by_case_type = AutomaticUpdateRule.organize_rules_by_case_type(rules)
expected_case_types = ['test-case-type', 'test-case-type-2', 'test-case-type-3']
actual_case_types = rules_by_case_type.keys()
self.assertEqual(set(expected_case_types), set(actual_case_types))
expected_rule_ids = [self.rule.pk, self.rule4.pk]
actual_rule_ids = [rule.pk for rule in rules_by_case_type['test-case-type']]
self.assertEqual(set(expected_rule_ids), set(actual_rule_ids))
expected_rule_ids = [self.rule2.pk, self.rule3.pk]
actual_rule_ids = [rule.pk for rule in rules_by_case_type['test-case-type-2']]
self.assertEqual(set(expected_rule_ids), set(actual_rule_ids))
expected_rule_ids = [self.rule5.pk]
actual_rule_ids = [rule.pk for rule in rules_by_case_type['test-case-type-3']]
self.assertEqual(set(expected_rule_ids), set(actual_rule_ids))
def test_boundary_date(self):
rules = AutomaticUpdateRule.by_domain(self.domain)
rules_by_case_type = AutomaticUpdateRule.organize_rules_by_case_type(rules)
boundary_date = AutomaticUpdateRule.get_boundary_date(
rules_by_case_type['test-case-type'], datetime(2016, 1, 1))
self.assertEqual(boundary_date, datetime(2015, 12, 2))
boundary_date = AutomaticUpdateRule.get_boundary_date(
rules_by_case_type['test-case-type-2'], datetime(2016, 1, 1))
self.assertEqual(boundary_date, datetime(2015, 12, 2))
@run_with_all_backends
def test_parent_cases(self):
with _with_case(self.domain, 'test-child-case-type', datetime(2016, 1, 1)) as child, \
_with_case(self.domain, 'test-parent-case-type', datetime(2016, 1, 1), case_name='abc') as parent:
# Set the parent case relationship
child = set_parent_case(self.domain, child, parent)
# Create a rule that references parent/name which should match
rule = AutomaticUpdateRule(
domain=self.domain,
name='test-parent-rule',
case_type='test-child-case-type',
active=True,
server_modified_boundary=30,
)
rule.save()
self.addCleanup(rule.delete)
rule.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='parent/name',
property_value='abc',
match_type=AutomaticUpdateRuleCriteria.MATCH_EQUAL,
),
]
rule.automaticupdateaction_set = [
AutomaticUpdateAction(
action=AutomaticUpdateAction.ACTION_UPDATE,
property_name='parent/update_flag',
property_value='P',
),
AutomaticUpdateAction(
action=AutomaticUpdateAction.ACTION_UPDATE,
property_name='parent_name',
property_value='parent/name',
property_value_type=PropertyTypeChoices.CASE_PROPERTY
)
]
# rule should match on parent case property and update parent case
rule.apply_rule(child, datetime(2016, 3, 1))
updated_parent = self.case_db.get_case(parent.case_id)
updated_child = self.case_db.get_case(child.case_id)
self.assertEqual(updated_parent.get_case_property('update_flag'), 'P')
self.assertEqual(updated_child.get_case_property('parent_name'), 'abc')
# Update the rule to match on a different name and now it shouldn't match
rule.automaticupdaterulecriteria_set.all().delete()
rule.automaticupdaterulecriteria_set = [
AutomaticUpdateRuleCriteria(
property_name='parent/name',
property_value='def',
match_type=AutomaticUpdateRuleCriteria.MATCH_EQUAL,
),
]
self.assertFalse(rule.rule_matches_case(child, datetime(2016, 3, 1)))
@run_with_all_backends
def test_no_server_boundary(self):
with _with_case(self.domain, 'test-case-type-3', datetime(2016, 1, 1), case_name='signal') as case:
# no filtering on server modified date so same day matches
self.assertTrue(self.rule5.rule_matches_case(case, datetime(2016, 1, 1)))
@run_with_all_backends
def test_run_on_save(self):
with _with_case(self.domain, 'test-case-type-3', datetime(2016, 1, 1), case_name='signal') as case:
self.assertIsNone(case.get_case_property('after_save'))
# property is updated after save signal (case update used to force save)
update_case(self.domain, case.case_id, {'saving': 'saved'})
updated_case = self.case_db.get_case(case.case_id)
self.assertEqual(updated_case.get_case_property('after_save'), 'updated')
@contextmanager
def _with_case(domain, case_type, last_modified, **kwargs):
with drop_connected_signals(case_post_save), drop_connected_signals(sql_case_post_save):
case = CaseFactory(domain).create_case(case_type=case_type, **kwargs)
_update_case(domain, case.case_id, last_modified)
accessors = CaseAccessors(domain)
case = accessors.get_case(case.case_id)
try:
yield case
finally:
if should_use_sql_backend(domain):
CaseAccessorSQL.hard_delete_cases(domain, [case.case_id])
else:
case.delete()
def _save_case(domain, case):
if should_use_sql_backend(domain):
CaseAccessorSQL.save_case(case)
else:
# can't call case.save() since it overrides the server_modified_on property
CommCareCase.get_db().save_doc(case.to_json())
def _update_case(domain, case_id, server_modified_on, last_visit_date=None):
accessors = CaseAccessors(domain)
case = accessors.get_case(case_id)
case.server_modified_on = server_modified_on
if last_visit_date:
set_case_property_directly(case, 'last_visit_date', last_visit_date.strftime('%Y-%m-%d'))
_save_case(domain, case)
def set_parent_case(domain, child_case, parent_case):
server_modified_on = child_case.server_modified_on
set_actual_parent_case(domain, child_case, parent_case)
child_case = CaseAccessors(domain).get_case(child_case.case_id)
child_case.server_modified_on = server_modified_on
_save_case(domain, child_case)
return CaseAccessors(domain).get_case(child_case.case_id)
|
#!/usr/bin/env python
"""Tests for transys.labeled_graphs (part of transys subpackage)"""
from __future__ import print_function
from nose.tools import raises, assert_raises
from tulip.transys import labeled_graphs
from tulip.transys.mathset import PowerSet, MathSet
from tulip.transys.transys import FTS
def str2singleton_test():
assert labeled_graphs.str2singleton("p") == {"p"}
assert labeled_graphs.str2singleton({"Cal"}) == {"Cal"}
def prepend_with_check(states, prepend_str, expected):
assert labeled_graphs.prepend_with(states, prepend_str) == expected
def prepend_with_test():
for (states, prepend_str, expected) in [([0,1], "s", ['s0', 's1']),
([], "s", []),
([0], "Cal", ["Cal0"]),
([0, 1], None, [0, 1])]:
yield prepend_with_check, states, prepend_str, expected
class States_test(object):
def setUp(self):
self.S = labeled_graphs.States(labeled_graphs.LabeledDiGraph())
def tearDown(self):
self.S = None
def test_contains(self):
# This also serves as a test for len
self.S.add(1)
self.S.add(2)
assert len(self.S) == 2
assert (2 in self.S) and (1 in self.S)
assert 3 not in self.S
def test_ior(self):
self.S.add(1)
other_S = labeled_graphs.States(labeled_graphs.LabeledDiGraph())
other_S.add(0)
assert len(self.S) == 1
assert set([s for s in self.S]) == {1}
self.S |= other_S
assert len(self.S) == 2
assert set([s for s in self.S]) == {1, 0}
def test_add(self):
self.S.add(1)
assert set([s for s in self.S]) == set([1])
self.S.add(2)
assert set([s for s in self.S]) == set([1, 2])
self.S.add("Cal")
assert set([s for s in self.S]) == set([1, 2, "Cal"])
def test_add_from(self):
self.S.add_from(range(3))
assert len(self.S) == 3
assert set([s for s in self.S]) == set(range(3))
self.S.add_from(["Cal", "tech"])
assert len(self.S) == 5
assert set([s for s in self.S]) == set(list(range(3))+["Cal", "tech"])
def test_remove(self):
# This also tests remove_from
self.S.add_from(range(4))
self.S.remove(1)
assert set([s for s in self.S]) == set([0, 2, 3])
self.S.remove_from([0, 3])
assert len(self.S) == 1 and 2 in self.S
def test_call(self):
self.S.add_from([-1, "Cal"])
S_imm_dat = self.S(data=True)
assert (len(S_imm_dat) == 2) and ((-1, dict()) in S_imm_dat) and \
(("Cal", dict()) in S_imm_dat)
def test_postpre(self):
self.S.add_from(range(5))
self.S.graph.add_edges_from([(0, 1), (0, 2), (1, 3), (3, 4)])
assert self.S.post(0) == {1, 2}
assert self.S.post([0, 1]) == {1, 2, 3}
assert self.S.pre(4) == {3}
assert self.S.pre([1, 2, 4]) == {0, 3}
def test_is_terminal(self):
self.S.add_from([0, 1])
self.S.graph.add_edge(0, 1)
assert not self.S.is_terminal(0)
assert self.S.is_terminal(1)
class Transitions_test(object):
def setUp(self):
G = labeled_graphs.LabeledDiGraph()
self.T = labeled_graphs.Transitions(G)
G.transitions = self.T
self.T.graph.states.add_from([1, 2, 3, 4, 5])
def tearDown(self):
self.T = None
def test_len(self):
assert len(self.T) == 0
self.T.add(1, 2)
assert len(self.T) == 1
self.T.add(2, 3)
assert len(self.T) == 2
# Transitions should be unaffected by new states
self.T.graph.states.add_from([10])
assert len(self.T) == 2
@raises(Exception)
def test_missing_states(self):
self.T.add(10, 11, check=True)
def test_add_from(self):
self.T.add(1, 4)
assert len(self.T) == 1 and set([t for t in self.T()]) == set([(1, 4)])
assert_raises(Exception, self.T.add, 1, 4)
assert len(self.T) == 1 # Edge already exists, so not added
self.T.add_from([(5, 2), (4, 3)])
assert len(self.T) == 3
assert set([t for t in self.T()]) == set([(1, 4), (5, 2), (4, 3)])
def test_add_comb(self):
self.T.add_comb([1, 2], [3, 4])
assert len(self.T) == 4 and set([t for t in self.T()]) == set([(1, 3),
(2, 3),
(1, 4),
(2, 4)])
def test_remove(self):
# This also tests remove_from
self.T.add_from([(1, 2), (1, 3), (4, 3), (3, 2)], check=False)
assert len(self.T) == 4
self.T.remove(1, 2)
assert len(self.T) == 3
assert set([t for t in self.T()]) == set([(1, 3), (4, 3), (3, 2)])
self.T.remove_from([(1, 2), (4, 3), (3, 2)])
assert set([t for t in self.T()]) == set([(1, 3)])
class States_labeling_test(object):
def setUp(self):
node_label_def = [{
'name': 'ap',
'values': PowerSet({'p', 'q', 'r', 'x', 'a', 'b'})}]
G = labeled_graphs.LabeledDiGraph(node_label_def)
self.S_ap = labeled_graphs.States(G)
G.states = self.S_ap
def tearDown(self):
self.S_ap = None
@raises(Exception)
def test_add_untyped_keys(self):
self.S_ap.add(1, foo=MathSet(['p']), check=True)
def test_add(self):
self.S_ap.add(1, ap={'p'} )
assert len(self.S_ap) == 1
self.S_ap.add(2, ap={'p'} )
assert len(self.S_ap) == 2
self.S_ap.add(3, ap={'q', 'r'} )
assert len(self.S_ap) == 3
assert self.S_ap[1] == {'ap': {'p'} }
assert self.S_ap[3] == {'ap': {'q', 'r'} }
nodes = {u for u,l in self.S_ap.find(
with_attr_dict={'ap': {'p'} })}
assert nodes == set([1, 2])
def test_add_from(self):
self.S_ap.add_from([(0, {'ap':{'p'}}), (1, {'ap':{'q'} })])
assert len(self.S_ap) == 2
current_labels = [l["ap"] for (s,l) in self.S_ap(data=True)]
assert len(current_labels) == 2 and \
MathSet(current_labels) == MathSet([{'p'}, {'q'}])
self.S_ap.add_from([(10, {'ap':{'r'} }),
(11, {'ap':{'x'} })],
check=False)
assert len(self.S_ap) == 4
self.S_ap.add_from([(10, {'ap':{'a'} }),
(11, {'ap':{'b'} })])
assert len(self.S_ap) == 4
current_labels = [l["ap"] for (s,l) in self.S_ap(data=True)]
assert len(current_labels) == 4 and \
MathSet(current_labels)== MathSet([{'p'}, {'q'}, {'a'}, {'b'}])
assert MathSet([l["ap"] for (s,l) in self.S_ap(data=True)]) == MathSet(current_labels)
def test_find(self):
state_list = ["state"+str(i) for i in range(4)]
state_list = zip(state_list,
[{"ap": L} for L in [{'p'}, {'q'}, {'p'}, {'q'}]])
self.S_ap.add_from(state_list, check=False)
result = self.S_ap.find("state1")
assert len(result) == 1 and result[0] == ("state1", {"ap": set(['q'])})
result = self.S_ap.find(["state1", "state0"])
assert len(result) == 2 and \
("state1", {"ap": set(['q'])}) in result and \
("state0", {"ap": set(['p'])}) in result
result = self.S_ap.find(with_attr_dict={"ap": {'p'}})
print(result)
assert len(result) == 2 and \
set([s for (s, l) in result]) == set(["state0", "state2"])
same_result = self.S_ap.find(ap={'p'})
assert(same_result == result)
class LabeledDiGraph_test(object):
def setUp(self):
p = PowerSet({1, 2})
node_labeling = [
{
'name': 'month',
'values': ['Jan', 'Feb']
},
{
'name': 'day',
'values': ['Mon', 'Tue']
},
{
'name': 'comb',
'values': p,
'setter': p.math_set
}
]
edge_labeling = node_labeling
G = labeled_graphs.LabeledDiGraph(node_labeling, edge_labeling)
G.states.add_from({1, 2})
G.transitions.add(1, 2, month='Jan', day='Mon')
assert_raises(Exception, G.transitions.add,
1, 2, {'month': 'Jan', 'day': 'abc'})
# note how untyped keys can be set directly via assignment,
# whereas check=False is needed for G.add_node
G.nodes[1]['mont'] = 'Feb'
assert(G.nodes[1] == {'mont':'Feb'})
G[1][2][0]['day'] = 'Tue'
assert(G[1][2][0] == {'month':'Jan', 'day':'Tue'})
self.G = G
@raises(AttributeError)
def test_add_edge_only_typed(self):
"""check that untyped attribute keys are caught"""
self.G.add_edge(1, 2, mo='Jan')
def test_add_edge_untyped(self):
"""the untyped attribute key 'mo' should be allowed,
because check=False
"""
self.G.add_edge(1, 2, mo='Jan', check=False)
assert(self.G[1][2][1] == {'mo':'Jan'})
@raises(ValueError)
def test_add_edge_illegal_value(self):
self.G.add_edge(1, 2, month='haha')
# @raises(ValueError)
# def test_node_subscript_assign_illegal_value(self):
# self.G.nodes[1]['month'] = 'abc'
# @raises(ValueError)
# def test_edge_subscript_assign_illegal_value(self):
# self.G[1][2][0]['day'] = 'abc'
def open_fts_multiple_env_actions_test():
env_modes = MathSet({'up', 'down'})
env_choice = MathSet({'left', 'right'})
env_actions = [
{
'name': 'env_modes',
'values': env_modes,
'setter': True},
{
'name': 'env_choices',
'values': env_choice}]
ts = FTS(env_actions)
assert(ts.env_modes is env_modes)
assert(not hasattr(ts, 'env_choices') )
assert(ts.sys_actions == MathSet() )
def test_remove_deadends():
g = labeled_graphs.LabeledDiGraph()
# cycle
n = 5
g.add_nodes_from(range(n))
for i in range(n):
j = (i + 1) % n
g.add_edge(i, j)
g.remove_deadends()
assert(len(g) == n)
# line + cycle
g.add_nodes_from(range(n, 2*n))
for i in range(n, 2*n-1):
g.add_edge(i, i+1)
assert(len(g) == 2*n)
g.remove_deadends()
assert(len(g) == n)
# line + self-loop
g.remove_edge(4, 0)
g.add_edge(0, 0)
g.remove_deadends()
assert(len(g) == 1)
STY: format as shorter lines
#!/usr/bin/env python
"""Tests for transys.labeled_graphs (part of transys subpackage)"""
from __future__ import print_function
from nose.tools import raises, assert_raises
from tulip.transys import labeled_graphs
from tulip.transys.mathset import PowerSet, MathSet
from tulip.transys.transys import FTS
def str2singleton_test():
assert labeled_graphs.str2singleton("p") == {"p"}
assert labeled_graphs.str2singleton({"Cal"}) == {"Cal"}
def prepend_with_check(states, prepend_str, expected):
assert labeled_graphs.prepend_with(states, prepend_str) == expected
def prepend_with_test():
for (states, prepend_str, expected) in [([0,1], "s", ['s0', 's1']),
([], "s", []),
([0], "Cal", ["Cal0"]),
([0, 1], None, [0, 1])]:
yield prepend_with_check, states, prepend_str, expected
class States_test(object):
def setUp(self):
self.S = labeled_graphs.States(labeled_graphs.LabeledDiGraph())
def tearDown(self):
self.S = None
def test_contains(self):
# This also serves as a test for len
self.S.add(1)
self.S.add(2)
assert len(self.S) == 2
assert (2 in self.S) and (1 in self.S)
assert 3 not in self.S
def test_ior(self):
self.S.add(1)
other_S = labeled_graphs.States(labeled_graphs.LabeledDiGraph())
other_S.add(0)
assert len(self.S) == 1
assert set([s for s in self.S]) == {1}
self.S |= other_S
assert len(self.S) == 2
assert set([s for s in self.S]) == {1, 0}
def test_add(self):
self.S.add(1)
assert set([s for s in self.S]) == set([1])
self.S.add(2)
assert set([s for s in self.S]) == set([1, 2])
self.S.add("Cal")
assert set([s for s in self.S]) == set([1, 2, "Cal"])
def test_add_from(self):
self.S.add_from(range(3))
assert len(self.S) == 3
assert set([s for s in self.S]) == set(range(3))
self.S.add_from(["Cal", "tech"])
assert len(self.S) == 5
assert set([s for s in self.S]) == set(list(range(3))+["Cal", "tech"])
def test_remove(self):
# This also tests remove_from
self.S.add_from(range(4))
self.S.remove(1)
assert set([s for s in self.S]) == set([0, 2, 3])
self.S.remove_from([0, 3])
assert len(self.S) == 1 and 2 in self.S
def test_call(self):
self.S.add_from([-1, "Cal"])
S_imm_dat = self.S(data=True)
assert (len(S_imm_dat) == 2) and ((-1, dict()) in S_imm_dat) and \
(("Cal", dict()) in S_imm_dat)
def test_postpre(self):
self.S.add_from(range(5))
self.S.graph.add_edges_from([(0, 1), (0, 2), (1, 3), (3, 4)])
assert self.S.post(0) == {1, 2}
assert self.S.post([0, 1]) == {1, 2, 3}
assert self.S.pre(4) == {3}
assert self.S.pre([1, 2, 4]) == {0, 3}
def test_is_terminal(self):
self.S.add_from([0, 1])
self.S.graph.add_edge(0, 1)
assert not self.S.is_terminal(0)
assert self.S.is_terminal(1)
class Transitions_test(object):
def setUp(self):
G = labeled_graphs.LabeledDiGraph()
self.T = labeled_graphs.Transitions(G)
G.transitions = self.T
self.T.graph.states.add_from([1, 2, 3, 4, 5])
def tearDown(self):
self.T = None
def test_len(self):
assert len(self.T) == 0
self.T.add(1, 2)
assert len(self.T) == 1
self.T.add(2, 3)
assert len(self.T) == 2
# Transitions should be unaffected by new states
self.T.graph.states.add_from([10])
assert len(self.T) == 2
@raises(Exception)
def test_missing_states(self):
self.T.add(10, 11, check=True)
def test_add_from(self):
self.T.add(1, 4)
assert len(self.T) == 1 and set([t for t in self.T()]) == set([(1, 4)])
assert_raises(Exception, self.T.add, 1, 4)
assert len(self.T) == 1 # Edge already exists, so not added
self.T.add_from([(5, 2), (4, 3)])
assert len(self.T) == 3
assert set([t for t in self.T()]) == set([(1, 4), (5, 2), (4, 3)])
def test_add_comb(self):
self.T.add_comb([1, 2], [3, 4])
assert len(self.T) == 4 and set([t for t in self.T()]) == set([(1, 3),
(2, 3),
(1, 4),
(2, 4)])
def test_remove(self):
# This also tests remove_from
self.T.add_from([(1, 2), (1, 3), (4, 3), (3, 2)], check=False)
assert len(self.T) == 4
self.T.remove(1, 2)
assert len(self.T) == 3
assert set([t for t in self.T()]) == set([(1, 3), (4, 3), (3, 2)])
self.T.remove_from([(1, 2), (4, 3), (3, 2)])
assert set([t for t in self.T()]) == set([(1, 3)])
class States_labeling_test(object):
def setUp(self):
node_label_def = [{
'name': 'ap',
'values': PowerSet({'p', 'q', 'r', 'x', 'a', 'b'})}]
G = labeled_graphs.LabeledDiGraph(node_label_def)
self.S_ap = labeled_graphs.States(G)
G.states = self.S_ap
def tearDown(self):
self.S_ap = None
@raises(Exception)
def test_add_untyped_keys(self):
self.S_ap.add(1, foo=MathSet(['p']), check=True)
def test_add(self):
self.S_ap.add(1, ap={'p'} )
assert len(self.S_ap) == 1
self.S_ap.add(2, ap={'p'} )
assert len(self.S_ap) == 2
self.S_ap.add(3, ap={'q', 'r'} )
assert len(self.S_ap) == 3
assert self.S_ap[1] == {'ap': {'p'} }
assert self.S_ap[3] == {'ap': {'q', 'r'} }
nodes = {u for u,l in self.S_ap.find(
with_attr_dict={'ap': {'p'} })}
assert nodes == set([1, 2])
def test_add_from(self):
self.S_ap.add_from(
[(0, {'ap':{'p'}}),
(1, {'ap':{'q'} })])
assert len(self.S_ap) == 2
current_labels = [l["ap"] for (s, l) in self.S_ap(data=True)]
assert len(current_labels) == 2
assert MathSet(current_labels) == MathSet([{'p'}, {'q'}])
self.S_ap.add_from(
[(10, {'ap': {'r'}}),
(11, {'ap': {'x'}})],
check=False)
assert len(self.S_ap) == 4
self.S_ap.add_from(
[(10, {'ap': {'a'}}),
(11, {'ap': {'b'}})])
assert len(self.S_ap) == 4
current_labels = [l["ap"] for (s, l) in self.S_ap(data=True)]
assert len(current_labels) == 4
a = MathSet(current_labels)
b = MathSet([{'p'}, {'q'}, {'a'}, {'b'}])
assert a == b, (a, b)
a = MathSet([l["ap"] for (s, l) in self.S_ap(data=True)])
b = MathSet(current_labels)
assert a == b, (a, b)
def test_find(self):
state_list = ["state"+str(i) for i in range(4)]
state_list = zip(state_list,
[{"ap": L} for L in [{'p'}, {'q'}, {'p'}, {'q'}]])
self.S_ap.add_from(state_list, check=False)
result = self.S_ap.find("state1")
assert len(result) == 1 and result[0] == ("state1", {"ap": set(['q'])})
result = self.S_ap.find(["state1", "state0"])
assert len(result) == 2 and \
("state1", {"ap": set(['q'])}) in result and \
("state0", {"ap": set(['p'])}) in result
result = self.S_ap.find(with_attr_dict={"ap": {'p'}})
print(result)
assert len(result) == 2 and \
set([s for (s, l) in result]) == set(["state0", "state2"])
same_result = self.S_ap.find(ap={'p'})
assert(same_result == result)
class LabeledDiGraph_test(object):
def setUp(self):
p = PowerSet({1, 2})
node_labeling = [
{
'name': 'month',
'values': ['Jan', 'Feb']
},
{
'name': 'day',
'values': ['Mon', 'Tue']
},
{
'name': 'comb',
'values': p,
'setter': p.math_set
}
]
edge_labeling = node_labeling
G = labeled_graphs.LabeledDiGraph(node_labeling, edge_labeling)
G.states.add_from({1, 2})
G.transitions.add(1, 2, month='Jan', day='Mon')
assert_raises(Exception, G.transitions.add,
1, 2, {'month': 'Jan', 'day': 'abc'})
# note how untyped keys can be set directly via assignment,
# whereas check=False is needed for G.add_node
G.nodes[1]['mont'] = 'Feb'
assert(G.nodes[1] == {'mont':'Feb'})
G[1][2][0]['day'] = 'Tue'
assert(G[1][2][0] == {'month':'Jan', 'day':'Tue'})
self.G = G
@raises(AttributeError)
def test_add_edge_only_typed(self):
"""check that untyped attribute keys are caught"""
self.G.add_edge(1, 2, mo='Jan')
def test_add_edge_untyped(self):
"""the untyped attribute key 'mo' should be allowed,
because check=False
"""
self.G.add_edge(1, 2, mo='Jan', check=False)
assert(self.G[1][2][1] == {'mo':'Jan'})
@raises(ValueError)
def test_add_edge_illegal_value(self):
self.G.add_edge(1, 2, month='haha')
# @raises(ValueError)
# def test_node_subscript_assign_illegal_value(self):
# self.G.nodes[1]['month'] = 'abc'
# @raises(ValueError)
# def test_edge_subscript_assign_illegal_value(self):
# self.G[1][2][0]['day'] = 'abc'
def open_fts_multiple_env_actions_test():
env_modes = MathSet({'up', 'down'})
env_choice = MathSet({'left', 'right'})
env_actions = [
{
'name': 'env_modes',
'values': env_modes,
'setter': True},
{
'name': 'env_choices',
'values': env_choice}]
ts = FTS(env_actions)
assert(ts.env_modes is env_modes)
assert(not hasattr(ts, 'env_choices') )
assert(ts.sys_actions == MathSet() )
def test_remove_deadends():
g = labeled_graphs.LabeledDiGraph()
# cycle
n = 5
g.add_nodes_from(range(n))
for i in range(n):
j = (i + 1) % n
g.add_edge(i, j)
g.remove_deadends()
assert(len(g) == n)
# line + cycle
g.add_nodes_from(range(n, 2*n))
for i in range(n, 2*n-1):
g.add_edge(i, i+1)
assert(len(g) == 2*n)
g.remove_deadends()
assert(len(g) == n)
# line + self-loop
g.remove_edge(4, 0)
g.add_edge(0, 0)
g.remove_deadends()
assert(len(g) == 1)
|
import copy
import logging
import bqplot.marks
import bqplot as bq
import bqplot.interacts
import ipywidgets as widgets
import vaex
from . import bqplot_image
import bqplot.pyplot as plt
import numpy as np
import vaex.events
from .plot import BackendBase
from .utils import debounced
logger = logging.getLogger("vaex.nb.bqplot")
class BqplotBackend(BackendBase):
def __init__(self, figure=None, figure_key=None):
bqplot_image.patch()
self._dirty = False
self.figure_key = figure_key
self.figure = figure
self.signal_limits = vaex.events.Signal()
self._cleanups = []
def update_image(self, rgb_image):
src = vaex.image.rgba_to_url(rgb_image)
self.image.src = src
# self.scale_x.min, self.scale_x.max = self.limits[0]
# self.scale_y.min, self.scale_y.max = self.limits[1]
self.image.x = self.scale_x.min
self.image.y = self.scale_y.max
self.image.width = self.scale_x.max - self.scale_x.min
self.image.height = -(self.scale_y.max - self.scale_y.min)
def create_widget(self, output, plot, dataset, limits):
self.plot = plot
self.output = output
self.dataset = dataset
self.limits = np.array(limits).tolist()
self.scale_x = bqplot.LinearScale(min=limits[0][0], max=limits[0][1])
self.scale_y = bqplot.LinearScale(min=limits[1][0], max=limits[1][1])
self.scale_rotation = bqplot.LinearScale(min=0, max=1)
self.scale_size = bqplot.LinearScale(min=0, max=1)
self.scale_opacity = bqplot.LinearScale(min=0, max=1)
self.scales = {'x': self.scale_x, 'y': self.scale_y, 'rotation': self.scale_rotation,
'size': self.scale_size, 'opacity': self.scale_opacity}
margin = {'bottom': 30, 'left': 60, 'right': 0, 'top': 0}
self.figure = plt.figure(self.figure_key, fig=self.figure, scales=self.scales, fig_margin=margin)
plt.figure(fig=self.figure)
self.figure.padding_y = 0
x = np.arange(0, 10)
y = x ** 2
self._fix_scatter = s = plt.scatter(x, y, visible=False, rotation=x, scales=self.scales)
self._fix_scatter.visible = False
# self.scale_rotation = self.scales['rotation']
src = "" # vaex.image.rgba_to_url(self._create_rgb_grid())
# self.scale_x.min, self.scale_x.max = self.limits[0]
# self.scale_y.min, self.scale_y.max = self.limits[1]
self.image = bqplot_image.Image(scales=self.scales, src=src, x=self.scale_x.min, y=self.scale_y.max,
width=self.scale_x.max - self.scale_x.min, height=-(self.scale_y.max - self.scale_y.min))
self.figure.marks = self.figure.marks + [self.image]
# self.figure.animation_duration = 500
self.figure.layout.width = '100%'
self.figure.layout.max_width = '500px'
self.scatter = s = plt.scatter(x, y, visible=False, rotation=x, scales=self.scales, size=x, marker="arrow")
self.panzoom = bqplot.PanZoom(scales={'x': [self.scale_x], 'y': [self.scale_y]})
self.figure.interaction = self.panzoom
# self.figure.axes[0].label = self.x
# self.figure.axes[1].label = self.y
self.scale_x.observe(self._update_limits, "min")
self.scale_x.observe(self._update_limits, "max")
self.scale_y.observe(self._update_limits, "min")
self.scale_y.observe(self._update_limits, "max")
self.observe(self._update_scales, "limits")
self.image.observe(self._on_view_count_change, 'view_count')
self.control_widget = widgets.VBox()
self.widget = widgets.VBox(children=[self.control_widget, self.figure])
self.create_tools()
def _update_limits(self, *args):
with self.output:
limits = copy.deepcopy(self.limits)
limits[0:2] = [[scale.min, scale.max] for scale in [self.scale_x, self.scale_y]]
self.limits = limits
def _update_scales(self, *args):
with self.scale_x.hold_trait_notifications():
self.scale_x.min = self.limits[0][0]
self.scale_x.max = self.limits[0][1]
with self.scale_y.hold_trait_notifications():
self.scale_y.min = self.limits[1][0]
self.scale_y.max = self.limits[1][1]
# self.update_grid()
def create_tools(self):
self.tools = []
tool_actions = []
tool_actions_map = {u"pan/zoom": self.panzoom}
tool_actions.append(u"pan/zoom")
# self.control_widget.set_title(0, "Main")
self._main_widget = widgets.VBox()
self._main_widget_1 = widgets.HBox()
self._main_widget_2 = widgets.HBox()
if 1: # tool_select:
self.brush = bqplot.interacts.BrushSelector(x_scale=self.scale_x, y_scale=self.scale_y, color="green")
tool_actions_map["select"] = self.brush
tool_actions.append("select")
self.brush.observe(self.update_brush, ["selected", "selected_x"])
# fig.interaction = brush
# callback = self.dataset.signal_selection_changed.connect(lambda dataset: update_image())
# callback = self.dataset.signal_selection_changed.connect(lambda *x: self.update_grid())
# def cleanup(callback=callback):
# self.dataset.signal_selection_changed.disconnect(callback=callback)
# self._cleanups.append(cleanup)
self.button_select_nothing = widgets.Button(description="", icon="trash-o")
self.button_reset = widgets.Button(description="", icon="refresh")
import copy
self.start_limits = copy.deepcopy(self.limits)
def reset(*args):
self.limits = copy.deepcopy(self.start_limits)
with self.scale_y.hold_trait_notifications():
self.scale_y.min, self.scale_y.max = self.limits[1]
with self.scale_x.hold_trait_notifications():
self.scale_x.min, self.scale_x.max = self.limits[0]
self.plot.update_grid()
self.button_reset.on_click(reset)
self.button_select_nothing.on_click(lambda *ignore: self.plot.select_nothing())
self.tools.append(self.button_select_nothing)
self.modes_names = "replace and or xor subtract".split()
self.modes_labels = "replace and or xor subtract".split()
self.button_selection_mode = widgets.Dropdown(description='select', options=self.modes_labels)
self.tools.append(self.button_selection_mode)
def change_interact(*args):
# print "change", args
self.figure.interaction = tool_actions_map[self.button_action.value]
tool_actions = ["pan/zoom", "select"]
# tool_actions = [("m", "m"), ("b", "b")]
self.button_action = widgets.ToggleButtons(description='', options=[(action, action) for action in tool_actions],
icons=["arrows", "pencil-square-o"])
self.button_action.observe(change_interact, "value")
self.tools.insert(0, self.button_action)
self.button_action.value = "pan/zoom" # tool_actions[-1]
if len(self.tools) == 1:
tools = []
# self._main_widget_1.children += (self.button_reset,)
self._main_widget_1.children += (self.button_action,)
self._main_widget_1.children += (self.button_select_nothing,)
# self._main_widget_2.children += (self.button_selection_mode,)
self._main_widget.children = [self._main_widget_1, self._main_widget_2]
self.control_widget.children += (self._main_widget,)
self._update_grid_counter = 0 # keep track of t
self._update_grid_counter_scheduled = 0 # keep track of t
def _on_view_count_change(self, *args):
with self.output:
logger.debug("views: %d", self.image.view_count)
if self._dirty and self.image.view_count > 0:
try:
logger.debug("was dirty, and needs an update")
self.update()
finally:
self._dirty = False
@debounced(0.5, method=True)
def update_brush(self, *args):
with self.output:
if not self.brush.brushing: # if we ended brushing, reset it
self.figure.interaction = None
if self.brush.selected is not None:
(x1, y1), (x2, y2) = self.brush.selected
mode = self.modes_names[self.modes_labels.index(self.button_selection_mode.value)]
self.plot.select_rectangle(x1, y1, x2, y2, mode=mode)
else:
self.dataset.select_nothing()
if not self.brush.brushing: # but then put it back again so the rectangle is gone,
self.figure.interaction = self.brush
vaex-jupyter: have a min with for bqplot
import copy
import logging
import bqplot.marks
import bqplot as bq
import bqplot.interacts
import ipywidgets as widgets
import vaex
from . import bqplot_image
import bqplot.pyplot as plt
import numpy as np
import vaex.events
from .plot import BackendBase
from .utils import debounced
logger = logging.getLogger("vaex.nb.bqplot")
class BqplotBackend(BackendBase):
def __init__(self, figure=None, figure_key=None):
bqplot_image.patch()
self._dirty = False
self.figure_key = figure_key
self.figure = figure
self.signal_limits = vaex.events.Signal()
self._cleanups = []
def update_image(self, rgb_image):
src = vaex.image.rgba_to_url(rgb_image)
self.image.src = src
# self.scale_x.min, self.scale_x.max = self.limits[0]
# self.scale_y.min, self.scale_y.max = self.limits[1]
self.image.x = self.scale_x.min
self.image.y = self.scale_y.max
self.image.width = self.scale_x.max - self.scale_x.min
self.image.height = -(self.scale_y.max - self.scale_y.min)
def create_widget(self, output, plot, dataset, limits):
self.plot = plot
self.output = output
self.dataset = dataset
self.limits = np.array(limits).tolist()
self.scale_x = bqplot.LinearScale(min=limits[0][0], max=limits[0][1])
self.scale_y = bqplot.LinearScale(min=limits[1][0], max=limits[1][1])
self.scale_rotation = bqplot.LinearScale(min=0, max=1)
self.scale_size = bqplot.LinearScale(min=0, max=1)
self.scale_opacity = bqplot.LinearScale(min=0, max=1)
self.scales = {'x': self.scale_x, 'y': self.scale_y, 'rotation': self.scale_rotation,
'size': self.scale_size, 'opacity': self.scale_opacity}
margin = {'bottom': 30, 'left': 60, 'right': 0, 'top': 0}
self.figure = plt.figure(self.figure_key, fig=self.figure, scales=self.scales, fig_margin=margin)
self.figure.layout.min_width = '900px'
plt.figure(fig=self.figure)
self.figure.padding_y = 0
x = np.arange(0, 10)
y = x ** 2
self._fix_scatter = s = plt.scatter(x, y, visible=False, rotation=x, scales=self.scales)
self._fix_scatter.visible = False
# self.scale_rotation = self.scales['rotation']
src = "" # vaex.image.rgba_to_url(self._create_rgb_grid())
# self.scale_x.min, self.scale_x.max = self.limits[0]
# self.scale_y.min, self.scale_y.max = self.limits[1]
self.image = bqplot_image.Image(scales=self.scales, src=src, x=self.scale_x.min, y=self.scale_y.max,
width=self.scale_x.max - self.scale_x.min, height=-(self.scale_y.max - self.scale_y.min))
self.figure.marks = self.figure.marks + [self.image]
# self.figure.animation_duration = 500
self.figure.layout.width = '100%'
self.figure.layout.max_width = '500px'
self.scatter = s = plt.scatter(x, y, visible=False, rotation=x, scales=self.scales, size=x, marker="arrow")
self.panzoom = bqplot.PanZoom(scales={'x': [self.scale_x], 'y': [self.scale_y]})
self.figure.interaction = self.panzoom
# self.figure.axes[0].label = self.x
# self.figure.axes[1].label = self.y
self.scale_x.observe(self._update_limits, "min")
self.scale_x.observe(self._update_limits, "max")
self.scale_y.observe(self._update_limits, "min")
self.scale_y.observe(self._update_limits, "max")
self.observe(self._update_scales, "limits")
self.image.observe(self._on_view_count_change, 'view_count')
self.control_widget = widgets.VBox()
self.widget = widgets.VBox(children=[self.control_widget, self.figure])
self.create_tools()
def _update_limits(self, *args):
with self.output:
limits = copy.deepcopy(self.limits)
limits[0:2] = [[scale.min, scale.max] for scale in [self.scale_x, self.scale_y]]
self.limits = limits
def _update_scales(self, *args):
with self.scale_x.hold_trait_notifications():
self.scale_x.min = self.limits[0][0]
self.scale_x.max = self.limits[0][1]
with self.scale_y.hold_trait_notifications():
self.scale_y.min = self.limits[1][0]
self.scale_y.max = self.limits[1][1]
# self.update_grid()
def create_tools(self):
self.tools = []
tool_actions = []
tool_actions_map = {u"pan/zoom": self.panzoom}
tool_actions.append(u"pan/zoom")
# self.control_widget.set_title(0, "Main")
self._main_widget = widgets.VBox()
self._main_widget_1 = widgets.HBox()
self._main_widget_2 = widgets.HBox()
if 1: # tool_select:
self.brush = bqplot.interacts.BrushSelector(x_scale=self.scale_x, y_scale=self.scale_y, color="green")
tool_actions_map["select"] = self.brush
tool_actions.append("select")
self.brush.observe(self.update_brush, ["selected", "selected_x"])
# fig.interaction = brush
# callback = self.dataset.signal_selection_changed.connect(lambda dataset: update_image())
# callback = self.dataset.signal_selection_changed.connect(lambda *x: self.update_grid())
# def cleanup(callback=callback):
# self.dataset.signal_selection_changed.disconnect(callback=callback)
# self._cleanups.append(cleanup)
self.button_select_nothing = widgets.Button(description="", icon="trash-o")
self.button_reset = widgets.Button(description="", icon="refresh")
import copy
self.start_limits = copy.deepcopy(self.limits)
def reset(*args):
self.limits = copy.deepcopy(self.start_limits)
with self.scale_y.hold_trait_notifications():
self.scale_y.min, self.scale_y.max = self.limits[1]
with self.scale_x.hold_trait_notifications():
self.scale_x.min, self.scale_x.max = self.limits[0]
self.plot.update_grid()
self.button_reset.on_click(reset)
self.button_select_nothing.on_click(lambda *ignore: self.plot.select_nothing())
self.tools.append(self.button_select_nothing)
self.modes_names = "replace and or xor subtract".split()
self.modes_labels = "replace and or xor subtract".split()
self.button_selection_mode = widgets.Dropdown(description='select', options=self.modes_labels)
self.tools.append(self.button_selection_mode)
def change_interact(*args):
# print "change", args
self.figure.interaction = tool_actions_map[self.button_action.value]
tool_actions = ["pan/zoom", "select"]
# tool_actions = [("m", "m"), ("b", "b")]
self.button_action = widgets.ToggleButtons(description='', options=[(action, action) for action in tool_actions],
icons=["arrows", "pencil-square-o"])
self.button_action.observe(change_interact, "value")
self.tools.insert(0, self.button_action)
self.button_action.value = "pan/zoom" # tool_actions[-1]
if len(self.tools) == 1:
tools = []
# self._main_widget_1.children += (self.button_reset,)
self._main_widget_1.children += (self.button_action,)
self._main_widget_1.children += (self.button_select_nothing,)
# self._main_widget_2.children += (self.button_selection_mode,)
self._main_widget.children = [self._main_widget_1, self._main_widget_2]
self.control_widget.children += (self._main_widget,)
self._update_grid_counter = 0 # keep track of t
self._update_grid_counter_scheduled = 0 # keep track of t
def _on_view_count_change(self, *args):
with self.output:
logger.debug("views: %d", self.image.view_count)
if self._dirty and self.image.view_count > 0:
try:
logger.debug("was dirty, and needs an update")
self.update()
finally:
self._dirty = False
@debounced(0.5, method=True)
def update_brush(self, *args):
with self.output:
if not self.brush.brushing: # if we ended brushing, reset it
self.figure.interaction = None
if self.brush.selected is not None:
(x1, y1), (x2, y2) = self.brush.selected
mode = self.modes_names[self.modes_labels.index(self.button_selection_mode.value)]
self.plot.select_rectangle(x1, y1, x2, y2, mode=mode)
else:
self.dataset.select_nothing()
if not self.brush.brushing: # but then put it back again so the rectangle is gone,
self.figure.interaction = self.brush
|
"""
Example usage:
km = KM('my-api-key')
km.identify('simon')
km.record('an event', {'attr': '1'})
"""
import urllib
import socket
import httplib
from datetime import datetime
class KM(object):
def __init__(self, key, host='trk.kissmetrics.com:80', http_timeout=2, logging=True):
self._key = key
self._host = host
self._http_timeout = http_timeout
self._logging = logging
def identify(self, id):
self._id = id
def record(self, action, props={}):
self.check_id_key()
if isinstance(action, dict):
self.set(action)
props.update({'_n': action})
self.request('e', props)
def set(self, data):
self.check_id_key()
self.request('s',data)
def alias(self, name, alias_to):
self.check_init()
self.request('a', {'_n': alias_to, '_p': name}, False)
def log_file(self):
return '/tmp/kissmetrics_error.log'
def reset(self):
self._id = None
self._key = None
def check_identify(self):
if self._id == None:
raise Exception, "Need to identify first (KM.identify <user>)"
def check_init(self):
if self._key == None:
raise Exception, "Need to initialize first (KM.init <your_key>)"
def now(self):
return datetime.utcnow()
def check_id_key(self):
self.check_init()
self.check_identify()
def logm(self, msg):
if not self._logging:
return
msg = self.now().strftime('<%c> ') + msg
try:
fh = open(self.log_file(), 'a')
fh.write(msg)
fh.close()
except IOError:
pass #just discard at this point
def request(self, type, data, update=True):
query = []
# if user has defined their own _t, then include necessary _d
if '_t' in data:
data['_d'] = 1
else:
data['_t'] = self.now().strftime('%s')
# add customer key to data sent
data['_k'] = self._key
if update:
data['_p'] = self._id
try:
connection = httplib.HTTPConnection(self._host, timeout=self._http_timeout)
connection.request('GET', '/%s?%s' % (type, urllib.urlencode(data)))
r = connection.getresponse()
connection.close()
except:
self.logm("Could not transmit to " + self._host)
Close connection even if we don't reach the end of the try block
"""
Example usage:
km = KM('my-api-key')
km.identify('simon')
km.record('an event', {'attr': '1'})
"""
import urllib
import socket
import httplib
from datetime import datetime
class KM(object):
def __init__(self, key, host='trk.kissmetrics.com:80', http_timeout=2, logging=True):
self._key = key
self._host = host
self._http_timeout = http_timeout
self._logging = logging
def identify(self, id):
self._id = id
def record(self, action, props={}):
self.check_id_key()
if isinstance(action, dict):
self.set(action)
props.update({'_n': action})
self.request('e', props)
def set(self, data):
self.check_id_key()
self.request('s',data)
def alias(self, name, alias_to):
self.check_init()
self.request('a', {'_n': alias_to, '_p': name}, False)
def log_file(self):
return '/tmp/kissmetrics_error.log'
def reset(self):
self._id = None
self._key = None
def check_identify(self):
if self._id == None:
raise Exception, "Need to identify first (KM.identify <user>)"
def check_init(self):
if self._key == None:
raise Exception, "Need to initialize first (KM.init <your_key>)"
def now(self):
return datetime.utcnow()
def check_id_key(self):
self.check_init()
self.check_identify()
def logm(self, msg):
if not self._logging:
return
msg = self.now().strftime('<%c> ') + msg
try:
fh = open(self.log_file(), 'a')
fh.write(msg)
fh.close()
except IOError:
pass #just discard at this point
def request(self, type, data, update=True):
query = []
# if user has defined their own _t, then include necessary _d
if '_t' in data:
data['_d'] = 1
else:
data['_t'] = self.now().strftime('%s')
# add customer key to data sent
data['_k'] = self._key
if update:
data['_p'] = self._id
try:
connection = httplib.HTTPConnection(self._host, timeout=self._http_timeout)
connection.request('GET', '/%s?%s' % (type, urllib.urlencode(data)))
r = connection.getresponse()
except:
self.logm("Could not transmit to " + self._host)
finally:
connection.close()
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2015-2020 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import numpy
from openquake.qa_tests_data.scenario_risk import (
case_1, case_2, case_2d, case_1g, case_1h, case_3, case_4, case_5,
case_6a, case_7, case_8, case_10, occupants, case_master,
case_shakemap)
from openquake.baselib.general import gettemp
from openquake.hazardlib import InvalidFile
from openquake.commonlib.logictree import InvalidLogicTree
from openquake.calculators.tests import CalculatorTestCase
from openquake.calculators.views import view
from openquake.calculators.export import export
from openquake.calculators.extract import extract
aac = numpy.testing.assert_allclose
def tot_loss(dstore):
return dstore['loss_data/data']['loss'].sum(axis=0)
class ScenarioRiskTestCase(CalculatorTestCase):
def test_case_1(self):
out = self.run_calc(case_1.__file__, 'job_risk.ini', exports='csv')
[fname] = out['agglosses', 'csv']
self.assertEqualFiles('expected/agg.csv', fname)
# check the exported GMFs
[fname, sitefile] = export(('gmf_data', 'csv'), self.calc.datastore)
self.assertEqualFiles('expected/gmf-FromFile.csv', fname)
self.assertEqualFiles('expected/sites.csv', sitefile)
[fname] = out['losses_by_event', 'csv']
self.assertEqualFiles('expected/losses_by_event.csv', fname)
def test_case_2(self):
out = self.run_calc(case_2.__file__, 'job_risk.ini', exports='csv')
[fname] = out['agglosses', 'csv']
self.assertEqualFiles('expected/agg.csv', fname)
def test_case_2d(self):
# time_event not specified in job_h.ini but specified in job_r.ini
out = self.run_calc(case_2d.__file__, 'job_h.ini,job_r.ini',
exports='csv')
# this is also a case with a single site but an exposure grid,
# to test a corner case
[fname] = out['avg_losses-rlzs', 'csv']
self.assertEqualFiles('expected/losses_by_asset.csv', fname)
# test agglosses
tot = extract(self.calc.datastore, 'agg_losses/occupants')
aac(tot.array, [0.031716], atol=1E-5)
# test agglosses with *
tbl = extract(self.calc.datastore, 'agg_losses/occupants?taxonomy=*')
self.assertEqual(tbl.array.shape, (1, 1)) # 1 taxonomy, 1 rlz
def test_case_3(self):
# a4 has a missing cost
out = self.run_calc(case_3.__file__, 'job.ini', exports='csv')
[fname] = out['avg_losses-rlzs', 'csv']
self.assertEqualFiles('expected/asset-loss.csv', fname)
[fname] = out['agglosses', 'csv']
self.assertEqualFiles('expected/agg_loss.csv', fname)
def test_case_4(self):
# this test is sensitive to the ordering of the epsilons
# in openquake.riskinput.make_eps
out = self.run_calc(case_4.__file__, 'job.ini', exports='csv')
fname = gettemp(view('totlosses', self.calc.datastore))
self.assertEqualFiles('expected/totlosses.txt', fname)
[fname] = out['agglosses', 'csv']
self.assertEqualFiles('expected/agglosses.csv', fname, delta=1E-6)
def test_occupants(self):
out = self.run_calc(occupants.__file__, 'job_haz.ini,job_risk.ini',
exports='csv')
[fname] = out['avg_losses-rlzs', 'csv']
self.assertEqualFiles('expected/asset-loss.csv', fname)
[fname] = out['agglosses', 'csv']
self.assertEqualFiles('expected/agg_loss.csv', fname)
def test_case_5(self):
# case with site model and 11 sites filled out of 17
out = self.run_calc(case_5.__file__, 'job.ini', exports='csv')
[fname] = out['avg_losses-rlzs', 'csv']
self.assertEqualFiles('expected/losses_by_asset.csv', fname,
delta=2E-6)
# check pandas
df = self.calc.datastore.read_df('avg_losses-rlzs', 'asset_id')
self.assertEqual(list(df.columns), ['rlz', 'loss_type', 'value'])
def test_case_6a(self):
# case with two gsims
self.run_calc(case_6a.__file__, 'job_haz.ini,job_risk.ini',
exports='csv')
[f] = export(('agglosses', 'csv'), self.calc.datastore)
self.assertEqualFiles('expected/agg_structural.csv', f)
# testing the totlosses view
dstore = self.calc.datastore
fname = gettemp(view('totlosses', dstore))
self.assertEqualFiles('expected/totlosses.txt', fname)
# testing portfolio_losses
fname = gettemp(view('portfolio_losses', dstore))
self.assertEqualFiles('expected/portfolio_losses.txt', fname)
# two equal gsims
with self.assertRaises(InvalidLogicTree):
self.run_calc(case_6a.__file__, 'job_haz.ini',
gsim_logic_tree_file='wrong_gmpe_logic_tree.xml')
def test_case_1g(self):
out = self.run_calc(case_1g.__file__, 'job_haz.ini,job_risk.ini',
exports='csv')
[fname] = out['agglosses', 'csv']
self.assertEqualFiles('expected/agg-gsimltp_@.csv', fname)
def test_case_1h(self):
# this is a case with 2 assets spawning 2 tasks
out = self.run_calc(case_1h.__file__, 'job.ini', exports='csv')
[fname] = out['avg_losses-rlzs', 'csv']
self.assertEqualFiles('expected/losses_by_asset.csv', fname)
# with a single task
out = self.run_calc(case_1h.__file__, 'job.ini', exports='csv',
concurrent_tasks='0')
[fname] = out['avg_losses-rlzs', 'csv']
self.assertEqualFiles('expected/losses_by_asset.csv', fname)
def test_case_master(self):
# a case with two GSIMs
self.run_calc(case_master.__file__, 'job.ini', exports='npz')
# check realizations
[fname] = export(('realizations', 'csv'), self.calc.datastore)
self.assertEqualFiles('expected/realizations.csv', fname)
# check losses by taxonomy
agglosses = extract(self.calc.datastore, 'agg_losses/structural?'
'taxonomy=*').array # shape (T, R) = (3, 2)
self.assertEqualFiles('expected/agglosses_taxo.txt',
gettemp(str(agglosses)))
# extract agglosses with a * and a selection
obj = extract(self.calc.datastore, 'agg_losses/structural?'
'state=*&cresta=0.11')
self.assertEqual(obj.selected, [b'state=*', b'cresta=0.11'])
self.assertEqual(obj.tags, [b'state=01'])
aac(obj.array, [[2611.7139]]) # extracted from avg_losses-stats
def test_case_7(self):
# check independence from concurrent_tasks
self.run_calc(case_7.__file__, 'job.ini', concurrent_tasks='10')
tot10 = tot_loss(self.calc.datastore)
self.run_calc(case_7.__file__, 'job.ini', concurrent_tasks='20')
tot20 = tot_loss(self.calc.datastore)
aac(tot10, tot20, atol=.0001) # must be around 230.0107
def test_case_8(self):
# a complex scenario_risk from GMFs where the hazard sites are
# not in the asset locations
self.run_calc(case_8.__file__, 'job.ini')
agglosses = extract(self.calc.datastore, 'agg_losses/structural')
aac(agglosses.array, [1159325.6])
# make sure the fullreport can be extracted
view('fullreport', self.calc.datastore)
def test_case_10(self):
# missing occupants in the exposure
with self.assertRaises(InvalidFile):
self.run_calc(case_10.__file__, 'job.ini')
def test_case_shakemap(self):
self.run_calc(case_shakemap.__file__, 'pre-job.ini')
self.run_calc(case_shakemap.__file__, 'job.ini',
hazard_calculation_id=str(self.calc.datastore.calc_id))
sitecol = self.calc.datastore['sitecol']
self.assertEqual(len(sitecol), 9)
gmfdict = dict(extract(self.calc.datastore, 'gmf_data'))
gmfa = gmfdict['rlz-000']
self.assertEqual(gmfa.shape, (9,))
self.assertEqual(gmfa.dtype.names,
('lon', 'lat', 'PGA', 'SA(0.3)', 'SA(1.0)'))
[fname] = export(('agglosses', 'csv'), self.calc.datastore)
self.assertEqualFiles('expected/agglosses.csv', fname)
[fname] = export(('realizations', 'csv'), self.calc.datastore)
self.assertEqualFiles('expected/realizations.csv', fname)
Skipped a test on darwin
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2015-2020 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import numpy
from openquake.qa_tests_data.scenario_risk import (
case_1, case_2, case_2d, case_1g, case_1h, case_3, case_4, case_5,
case_6a, case_7, case_8, case_10, occupants, case_master,
case_shakemap)
from openquake.baselib.general import gettemp
from openquake.hazardlib import InvalidFile
from openquake.commonlib.logictree import InvalidLogicTree
from openquake.calculators.tests import CalculatorTestCase
from openquake.calculators.views import view
from openquake.calculators.export import export
from openquake.calculators.extract import extract
aac = numpy.testing.assert_allclose
def tot_loss(dstore):
return dstore['loss_data/data']['loss'].sum(axis=0)
class ScenarioRiskTestCase(CalculatorTestCase):
def test_case_1(self):
out = self.run_calc(case_1.__file__, 'job_risk.ini', exports='csv')
[fname] = out['agglosses', 'csv']
self.assertEqualFiles('expected/agg.csv', fname)
# check the exported GMFs
[fname, sitefile] = export(('gmf_data', 'csv'), self.calc.datastore)
self.assertEqualFiles('expected/gmf-FromFile.csv', fname)
self.assertEqualFiles('expected/sites.csv', sitefile)
[fname] = out['losses_by_event', 'csv']
self.assertEqualFiles('expected/losses_by_event.csv', fname)
def test_case_2(self):
out = self.run_calc(case_2.__file__, 'job_risk.ini', exports='csv')
[fname] = out['agglosses', 'csv']
self.assertEqualFiles('expected/agg.csv', fname)
def test_case_2d(self):
# time_event not specified in job_h.ini but specified in job_r.ini
out = self.run_calc(case_2d.__file__, 'job_h.ini,job_r.ini',
exports='csv')
# this is also a case with a single site but an exposure grid,
# to test a corner case
[fname] = out['avg_losses-rlzs', 'csv']
self.assertEqualFiles('expected/losses_by_asset.csv', fname)
# test agglosses
tot = extract(self.calc.datastore, 'agg_losses/occupants')
aac(tot.array, [0.031716], atol=1E-5)
# test agglosses with *
tbl = extract(self.calc.datastore, 'agg_losses/occupants?taxonomy=*')
self.assertEqual(tbl.array.shape, (1, 1)) # 1 taxonomy, 1 rlz
def test_case_3(self):
# a4 has a missing cost
out = self.run_calc(case_3.__file__, 'job.ini', exports='csv')
[fname] = out['avg_losses-rlzs', 'csv']
self.assertEqualFiles('expected/asset-loss.csv', fname)
[fname] = out['agglosses', 'csv']
self.assertEqualFiles('expected/agg_loss.csv', fname)
def test_case_4(self):
# this test is sensitive to the ordering of the epsilons
# in openquake.riskinput.make_eps
out = self.run_calc(case_4.__file__, 'job.ini', exports='csv')
fname = gettemp(view('totlosses', self.calc.datastore))
self.assertEqualFiles('expected/totlosses.txt', fname)
[fname] = out['agglosses', 'csv']
self.assertEqualFiles('expected/agglosses.csv', fname, delta=1E-6)
def test_occupants(self):
out = self.run_calc(occupants.__file__, 'job_haz.ini,job_risk.ini',
exports='csv')
[fname] = out['avg_losses-rlzs', 'csv']
self.assertEqualFiles('expected/asset-loss.csv', fname)
[fname] = out['agglosses', 'csv']
self.assertEqualFiles('expected/agg_loss.csv', fname)
def test_case_5(self):
# case with site model and 11 sites filled out of 17
out = self.run_calc(case_5.__file__, 'job.ini', exports='csv')
[fname] = out['avg_losses-rlzs', 'csv']
self.assertEqualFiles('expected/losses_by_asset.csv', fname,
delta=2E-6)
# check pandas
df = self.calc.datastore.read_df('avg_losses-rlzs', 'asset_id')
self.assertEqual(list(df.columns), ['rlz', 'loss_type', 'value'])
def test_case_6a(self):
# case with two gsims
self.run_calc(case_6a.__file__, 'job_haz.ini,job_risk.ini',
exports='csv')
[f] = export(('agglosses', 'csv'), self.calc.datastore)
self.assertEqualFiles('expected/agg_structural.csv', f)
# testing the totlosses view
dstore = self.calc.datastore
fname = gettemp(view('totlosses', dstore))
self.assertEqualFiles('expected/totlosses.txt', fname)
# testing portfolio_losses
fname = gettemp(view('portfolio_losses', dstore))
self.assertEqualFiles('expected/portfolio_losses.txt', fname)
# two equal gsims
with self.assertRaises(InvalidLogicTree):
self.run_calc(case_6a.__file__, 'job_haz.ini',
gsim_logic_tree_file='wrong_gmpe_logic_tree.xml')
def test_case_1g(self):
out = self.run_calc(case_1g.__file__, 'job_haz.ini,job_risk.ini',
exports='csv')
[fname] = out['agglosses', 'csv']
self.assertEqualFiles('expected/agg-gsimltp_@.csv', fname)
def test_case_1h(self):
# this is a case with 2 assets spawning 2 tasks
out = self.run_calc(case_1h.__file__, 'job.ini', exports='csv')
[fname] = out['avg_losses-rlzs', 'csv']
self.assertEqualFiles('expected/losses_by_asset.csv', fname)
# with a single task
out = self.run_calc(case_1h.__file__, 'job.ini', exports='csv',
concurrent_tasks='0')
[fname] = out['avg_losses-rlzs', 'csv']
self.assertEqualFiles('expected/losses_by_asset.csv', fname)
def test_case_master(self):
# a case with two GSIMs
self.run_calc(case_master.__file__, 'job.ini', exports='npz')
# check realizations
[fname] = export(('realizations', 'csv'), self.calc.datastore)
self.assertEqualFiles('expected/realizations.csv', fname)
# check losses by taxonomy
agglosses = extract(self.calc.datastore, 'agg_losses/structural?'
'taxonomy=*').array # shape (T, R) = (3, 2)
if sys.platform != 'darwin':
self.assertEqualFiles('expected/agglosses_taxo.txt',
gettemp(str(agglosses)))
# extract agglosses with a * and a selection
obj = extract(self.calc.datastore, 'agg_losses/structural?'
'state=*&cresta=0.11')
self.assertEqual(obj.selected, [b'state=*', b'cresta=0.11'])
self.assertEqual(obj.tags, [b'state=01'])
aac(obj.array, [[2611.7139]]) # extracted from avg_losses-stats
def test_case_7(self):
# check independence from concurrent_tasks
self.run_calc(case_7.__file__, 'job.ini', concurrent_tasks='10')
tot10 = tot_loss(self.calc.datastore)
self.run_calc(case_7.__file__, 'job.ini', concurrent_tasks='20')
tot20 = tot_loss(self.calc.datastore)
aac(tot10, tot20, atol=.0001) # must be around 230.0107
def test_case_8(self):
# a complex scenario_risk from GMFs where the hazard sites are
# not in the asset locations
self.run_calc(case_8.__file__, 'job.ini')
agglosses = extract(self.calc.datastore, 'agg_losses/structural')
aac(agglosses.array, [1159325.6])
# make sure the fullreport can be extracted
view('fullreport', self.calc.datastore)
def test_case_10(self):
# missing occupants in the exposure
with self.assertRaises(InvalidFile):
self.run_calc(case_10.__file__, 'job.ini')
def test_case_shakemap(self):
self.run_calc(case_shakemap.__file__, 'pre-job.ini')
self.run_calc(case_shakemap.__file__, 'job.ini',
hazard_calculation_id=str(self.calc.datastore.calc_id))
sitecol = self.calc.datastore['sitecol']
self.assertEqual(len(sitecol), 9)
gmfdict = dict(extract(self.calc.datastore, 'gmf_data'))
gmfa = gmfdict['rlz-000']
self.assertEqual(gmfa.shape, (9,))
self.assertEqual(gmfa.dtype.names,
('lon', 'lat', 'PGA', 'SA(0.3)', 'SA(1.0)'))
[fname] = export(('agglosses', 'csv'), self.calc.datastore)
self.assertEqualFiles('expected/agglosses.csv', fname)
[fname] = export(('realizations', 'csv'), self.calc.datastore)
self.assertEqualFiles('expected/realizations.csv', fname)
|
#!/usr/bin/env python
import subprocess
import os
import sys
import argparse
import time
from subprocess import PIPE, STDOUT
null = open("/dev/null", "wb")
def wait(p):
rc = p.wait()
if rc != 0:
p = play("sound/misc/compiler-failure.ogg")
p.wait()
assert p.returncode == 0
sys.exit(rc)
def play(soundfile):
p = subprocess.Popen(["play", soundfile], stdout=null, stderr=null)
assert p.wait() == 0
return p
def stage1():
p = subprocess.Popen("(cd tgui; /bin/bash ./build.sh)", shell=True)
wait(p)
play("sound/misc/compiler-stage1.ogg")
def stage2(map):
if map:
txt = "-M{}".format(map)
else:
txt = ''
args = "bash tools/travis/dm.sh {} tgstation.dme".format(txt)
print(args)
p = subprocess.Popen(args, shell=True)
wait(p)
def stage3(profile_mode=False):
start_time = time.time()
play("sound/misc/compiler-stage2.ogg")
logfile = open('server.log~','w')
p = subprocess.Popen(
"DreamDaemon tgstation.dmb 25001 -trusted",
shell=True, stdout=PIPE, stderr=STDOUT)
try:
while p.returncode is None:
stdout = p.stdout.readline()
if "Initializations complete." in stdout:
play("sound/misc/server-ready.ogg")
time_taken = time.time() - start_time
print("{} seconds taken to fully start".format(time_taken))
if "Map is ready." in stdout:
time_taken = time.time() - start_time
print("{} seconds for initial map loading".format(time_taken))
if profile_mode:
return time_taken
sys.stdout.write(stdout)
sys.stdout.flush()
logfile.write(stdout)
finally:
logfile.flush()
os.fsync(logfile.fileno())
logfile.close()
p.kill()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s','---stage',default=1,type=int)
parser.add_argument('--only',action='store_true')
parser.add_argument('-m','--map',type=str)
parser.add_argument('--profile-mode',action='store_true')
args = parser.parse_args()
stage = args.stage
assert stage in (1,2,3)
if stage == 1:
stage1()
if not args.only:
stage = 2
if stage == 2:
stage2(args.map)
if not args.only:
stage = 3
if stage == 3:
value = stage3(profile_mode=args.profile_mode)
with open('profile~', 'a') as f:
f.write("{}\n".format(value))
if __name__=='__main__':
try:
main()
except KeyboardInterrupt:
pass
Fixes Linux buildscript chime when server ready
#!/usr/bin/env python
import subprocess
import os
import sys
import argparse
import time
from subprocess import PIPE, STDOUT
null = open("/dev/null", "wb")
def wait(p):
rc = p.wait()
if rc != 0:
p = play("sound/misc/compiler-failure.ogg")
p.wait()
assert p.returncode == 0
sys.exit(rc)
def play(soundfile):
p = subprocess.Popen(["play", soundfile], stdout=null, stderr=null)
assert p.wait() == 0
return p
def stage1():
p = subprocess.Popen("(cd tgui; /bin/bash ./build.sh)", shell=True)
wait(p)
play("sound/misc/compiler-stage1.ogg")
def stage2(map):
if map:
txt = "-M{}".format(map)
else:
txt = ''
args = "bash tools/travis/dm.sh {} tgstation.dme".format(txt)
print(args)
p = subprocess.Popen(args, shell=True)
wait(p)
def stage3(profile_mode=False):
start_time = time.time()
play("sound/misc/compiler-stage2.ogg")
logfile = open('server.log~','w')
p = subprocess.Popen(
"DreamDaemon tgstation.dmb 25001 -trusted",
shell=True, stdout=PIPE, stderr=STDOUT)
try:
while p.returncode is None:
stdout = p.stdout.readline()
if "Initializations complete" in stdout:
play("sound/misc/server-ready.ogg")
time_taken = time.time() - start_time
print("{} seconds taken to fully start".format(time_taken))
if "Map is ready." in stdout:
time_taken = time.time() - start_time
print("{} seconds for initial map loading".format(time_taken))
if profile_mode:
return time_taken
sys.stdout.write(stdout)
sys.stdout.flush()
logfile.write(stdout)
finally:
logfile.flush()
os.fsync(logfile.fileno())
logfile.close()
p.kill()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s','---stage',default=1,type=int)
parser.add_argument('--only',action='store_true')
parser.add_argument('-m','--map',type=str)
parser.add_argument('--profile-mode',action='store_true')
args = parser.parse_args()
stage = args.stage
assert stage in (1,2,3)
if stage == 1:
stage1()
if not args.only:
stage = 2
if stage == 2:
stage2(args.map)
if not args.only:
stage = 3
if stage == 3:
value = stage3(profile_mode=args.profile_mode)
with open('profile~', 'a') as f:
f.write("{}\n".format(value))
if __name__=='__main__':
try:
main()
except KeyboardInterrupt:
pass
|
"""
pynamodb attributes tests
"""
import six
import json
from base64 import b64encode
from datetime import datetime
from delorean import Delorean
from pynamodb.compat import CompatTestCase as TestCase
from pynamodb.constants import UTC, DATETIME_FORMAT
from pynamodb.models import Model
from pynamodb.attributes import (
BinarySetAttribute, BinaryAttribute, NumberSetAttribute, NumberAttribute,
UnicodeAttribute, UnicodeSetAttribute, UTCDateTimeAttribute, BooleanAttribute,
JSONAttribute, DEFAULT_ENCODING, NUMBER, STRING, STRING_SET, NUMBER_SET, BINARY_SET,
BINARY)
class AttributeTestModel(Model):
class Meta:
host = 'http://localhost:8000'
table_name = 'test'
binary_attr = BinaryAttribute()
binary_set_attr = BinarySetAttribute()
number_attr = NumberAttribute()
number_set_attr = NumberSetAttribute()
unicode_attr = UnicodeAttribute()
unicode_set_attr = UnicodeSetAttribute()
datetime_attr = UTCDateTimeAttribute()
bool_attr = BooleanAttribute()
json_attr = JSONAttribute()
class AttributeDescriptorTestCase(TestCase):
"""
Test Attribute Descriptors
"""
def setUp(self):
self.instance = AttributeTestModel()
def test_binary_attr(self):
"""
Binary attribute descriptor
"""
self.instance.binary_attr = b'test'
self.assertEqual(self.instance.binary_attr, b'test')
def test_binary_set_attr(self):
"""
Binary set attribute descriptor
"""
self.instance.binary_set_attr = set([b'test', b'test2'])
self.assertEqual(self.instance.binary_set_attr, set([b'test', b'test2']))
def test_number_attr(self):
"""
Number attribute descriptor
"""
self.instance.number_attr = 42
self.assertEqual(self.instance.number_attr, 42)
def test_number_set_attr(self):
"""
Number set attribute descriptor
"""
self.instance.number_set_attr = set([1, 2])
self.assertEqual(self.instance.number_set_attr, set([1, 2]))
def test_unicode_attr(self):
"""
Unicode attribute descriptor
"""
self.instance.unicode_attr = u"test"
self.assertEqual(self.instance.unicode_attr, u"test")
def test_unicode_set_attr(self):
"""
Unicode set attribute descriptor
"""
self.instance.unicode_set_attr = set([u"test", u"test2"])
self.assertEqual(self.instance.unicode_set_attr, set([u"test", u"test2"]))
def test_datetime_attr(self):
"""
Datetime attribute descriptor
"""
now = datetime.now()
self.instance.datetime_attr = now
self.assertEqual(self.instance.datetime_attr, now)
def test_bool_attr(self):
"""
Boolean attribute descriptor
"""
self.instance.bool_attr = True
self.assertEqual(self.instance.bool_attr, True)
def test_json_attr(self):
"""
JSON attribute descriptor
"""
self.instance.json_attr = {'foo': 'bar', 'bar': 42}
self.assertEqual(self.instance.json_attr, {'foo': 'bar', 'bar': 42})
class UTCDateTimeAttributeTestCase(TestCase):
"""
Tests UTCDateTime attributes
"""
def test_utc_datetime_attribute(self):
"""
UTCDateTimeAttribute.default
"""
attr = UTCDateTimeAttribute()
self.assertIsNotNone(attr)
self.assertEqual(attr.attr_type, STRING)
tstamp = datetime.now()
attr = UTCDateTimeAttribute(default=tstamp)
self.assertEqual(attr.default, tstamp)
def test_utc_date_time_deserialize(self):
"""
UTCDateTimeAttribute.deserialize
"""
tstamp = Delorean(timezone=UTC).datetime
attr = UTCDateTimeAttribute()
self.assertEqual(
tstamp,
attr.deserialize(Delorean(tstamp, timezone=UTC).datetime.strftime(DATETIME_FORMAT)),
)
def test_utc_date_time_serialize(self):
"""
UTCDateTimeAttribute.serialize
"""
tstamp = datetime.now()
attr = UTCDateTimeAttribute()
self.assertEqual(attr.serialize(tstamp), Delorean(tstamp, timezone=UTC).datetime.strftime(DATETIME_FORMAT))
class BinaryAttributeTestCase(TestCase):
"""
Tests binary attributes
"""
def test_binary_attribute(self):
"""
BinaryAttribute.default
"""
attr = BinaryAttribute()
self.assertIsNotNone(attr)
self.assertEqual(attr.attr_type, BINARY)
attr = BinaryAttribute(default=b'foo')
self.assertEqual(attr.default, b'foo')
def test_binary_round_trip(self):
"""
BinaryAttribute round trip
"""
attr = BinaryAttribute()
value = b'foo'
serial = attr.serialize(value)
self.assertEqual(attr.deserialize(serial), value)
def test_binary_serialize(self):
"""
BinaryAttribute.serialize
"""
attr = BinaryAttribute()
serial = b64encode(b'foo').decode(DEFAULT_ENCODING)
self.assertEqual(attr.serialize(b'foo'), serial)
def test_binary_deserialize(self):
"""
BinaryAttribute.deserialize
"""
attr = BinaryAttribute()
serial = b64encode(b'foo').decode(DEFAULT_ENCODING)
self.assertEqual(attr.deserialize(serial), b'foo')
def test_binary_set_serialize(self):
"""
BinarySetAttribute.serialize
"""
attr = BinarySetAttribute()
self.assertEqual(attr.attr_type, BINARY_SET)
self.assertEqual(
attr.serialize(set([b'foo', b'bar'])),
[b64encode(val).decode(DEFAULT_ENCODING) for val in sorted(set([b'foo', b'bar']))])
self.assertEqual(attr.serialize(None), None)
def test_binary_set_round_trip(self):
"""
BinarySetAttribute round trip
"""
attr = BinarySetAttribute()
value = set([b'foo', b'bar'])
serial = attr.serialize(value)
self.assertEqual(attr.deserialize(serial), value)
def test_binary_set_deserialize(self):
"""
BinarySetAttribute.deserialize
"""
attr = BinarySetAttribute()
value = set([b'foo', b'bar'])
self.assertEqual(
attr.deserialize([b64encode(val).decode(DEFAULT_ENCODING) for val in sorted(value)]),
value
)
def test_binary_set_attribute(self):
"""
BinarySetAttribute.serialize
"""
attr = BinarySetAttribute()
self.assertIsNotNone(attr)
attr = BinarySetAttribute(default=set([b'foo', b'bar']))
self.assertEqual(attr.default, set([b'foo', b'bar']))
class NumberAttributeTestCase(TestCase):
"""
Tests number attributes
"""
def test_number_attribute(self):
"""
NumberAttribute.default
"""
attr = NumberAttribute()
self.assertIsNotNone(attr)
self.assertEqual(attr.attr_type, NUMBER)
attr = NumberAttribute(default=1)
self.assertEqual(attr.default, 1)
def test_number_serialize(self):
"""
NumberAttribute.serialize
"""
attr = NumberAttribute()
self.assertEqual(attr.serialize(3.141), '3.141')
self.assertEqual(attr.serialize(1), '1')
def test_number_deserialize(self):
"""
NumberAttribute.deserialize
"""
attr = NumberAttribute()
self.assertEqual(attr.deserialize('1'), 1)
self.assertEqual(attr.deserialize('3.141'), 3.141)
def test_number_set_deserialize(self):
"""
NumberSetAttribute.deserialize
"""
attr = NumberSetAttribute()
self.assertEqual(attr.attr_type, NUMBER_SET)
self.assertEqual(attr.deserialize([json.dumps(val) for val in sorted(set([1, 2]))]), set([1, 2]))
def test_number_set_serialize(self):
"""
NumberSetAttribute.serialize
"""
attr = NumberSetAttribute()
self.assertEqual(attr.serialize(set([1, 2])), [json.dumps(val) for val in sorted(set([1, 2]))])
self.assertEqual(attr.serialize(None), None)
def test_number_set_attribute(self):
"""
NumberSetAttribute.default
"""
attr = NumberSetAttribute()
self.assertIsNotNone(attr)
attr = NumberSetAttribute(default=set([1, 2]))
self.assertEqual(attr.default, set([1, 2]))
class UnicodeAttributeTestCase(TestCase):
"""
Tests unicode attributes
"""
def test_unicode_attribute(self):
"""
UnicodeAttribute.default
"""
attr = UnicodeAttribute()
self.assertIsNotNone(attr)
self.assertEqual(attr.attr_type, STRING)
attr = UnicodeAttribute(default=six.u('foo'))
self.assertEqual(attr.default, six.u('foo'))
def test_unicode_serialize(self):
"""
UnicodeAttribute.serialize
"""
attr = UnicodeAttribute()
self.assertEqual(attr.serialize('foo'), six.u('foo'))
self.assertEqual(attr.serialize(u'foo'), six.u('foo'))
self.assertEqual(attr.serialize(u''), None)
self.assertEqual(attr.serialize(None), None)
def test_unicode_deserialize(self):
"""
UnicodeAttribute.deserialize
"""
attr = UnicodeAttribute()
self.assertEqual(attr.deserialize('foo'), six.u('foo'))
self.assertEqual(attr.deserialize(u'foo'), six.u('foo'))
def test_unicode_set_serialize(self):
"""
UnicodeSetAttribute.serialize
"""
attr = UnicodeSetAttribute()
self.assertEqual(attr.attr_type, STRING_SET)
self.assertEqual(attr.deserialize(None), None)
self.assertEqual(
attr.serialize(set([six.u('foo'), six.u('bar')])),
[json.dumps(val) for val in sorted(set([six.u('foo'), six.u('bar')]))])
def test_unicode_set_deserialize(self):
"""
UnicodeSetAttribute.deserialize
"""
attr = UnicodeSetAttribute()
self.assertEqual(
attr.deserialize([json.dumps(val) for val in sorted(set([six.u('foo'), six.u('bar')]))]),
set([six.u('foo'), six.u('bar')])
)
def test_unicode_set_attribute(self):
"""
UnicodeSetAttribute.default
"""
attr = UnicodeSetAttribute()
self.assertIsNotNone(attr)
self.assertEqual(attr.attr_type, STRING_SET)
attr = UnicodeSetAttribute(default=set([six.u('foo'), six.u('bar')]))
self.assertEqual(attr.default, set([six.u('foo'), six.u('bar')]))
class BooleanAttributeTestCase(TestCase):
"""
Tests boolean attributes
"""
def test_boolean_attribute(self):
"""
BooleanAttribute.default
"""
attr = BooleanAttribute()
self.assertIsNotNone(attr)
self.assertEqual(attr.attr_type, NUMBER)
attr = BooleanAttribute(default=True)
self.assertEqual(attr.default, True)
def test_boolean_serialize(self):
"""
BooleanAttribute.serialize
"""
attr = BooleanAttribute()
self.assertEqual(attr.serialize(True), json.dumps(1))
self.assertEqual(attr.serialize(False), json.dumps(0))
self.assertEqual(attr.serialize(None), None)
def test_boolean_deserialize(self):
"""
BooleanAttribute.deserialize
"""
attr = BooleanAttribute()
self.assertEqual(attr.deserialize('1'), True)
self.assertEqual(attr.deserialize('0'), False)
class JSONAttributeTestCase(TestCase):
"""
Tests json attributes
"""
def test_json_attribute(self):
"""
JSONAttribute.default
"""
attr = JSONAttribute()
self.assertIsNotNone(attr)
self.assertEqual(attr.attr_type, STRING)
attr = JSONAttribute(default={})
self.assertEqual(attr.default, {})
def test_json_serialize(self):
"""
JSONAttribute.serialize
"""
attr = JSONAttribute()
item = {'foo': 'bar', 'bool': True, 'number': 3.141}
self.assertEqual(attr.serialize(item), six.u(json.dumps(item)))
self.assertEqual(attr.serialize({}), six.u('{}'))
self.assertEqual(attr.serialize(None), None)
def test_json_deserialize(self):
"""
JSONAttribute.deserialize
"""
attr = JSONAttribute()
item = {'foo': 'bar', 'bool': True, 'number': 3.141}
encoded = six.u(json.dumps(item))
self.assertEqual(attr.deserialize(encoded), item)
def test_control_chars(self):
"""
JSONAttribute with control chars
"""
attr = JSONAttribute()
item = {'foo\t': 'bar\n', 'bool': True, 'number': 3.141}
encoded = six.u(json.dumps(item))
self.assertEqual(attr.deserialize(encoded), item)
Added test for round tripping a unicode set
"""
pynamodb attributes tests
"""
import six
import json
from base64 import b64encode
from datetime import datetime
from delorean import Delorean
from pynamodb.compat import CompatTestCase as TestCase
from pynamodb.constants import UTC, DATETIME_FORMAT
from pynamodb.models import Model
from pynamodb.attributes import (
BinarySetAttribute, BinaryAttribute, NumberSetAttribute, NumberAttribute,
UnicodeAttribute, UnicodeSetAttribute, UTCDateTimeAttribute, BooleanAttribute,
JSONAttribute, DEFAULT_ENCODING, NUMBER, STRING, STRING_SET, NUMBER_SET, BINARY_SET,
BINARY)
class AttributeTestModel(Model):
class Meta:
host = 'http://localhost:8000'
table_name = 'test'
binary_attr = BinaryAttribute()
binary_set_attr = BinarySetAttribute()
number_attr = NumberAttribute()
number_set_attr = NumberSetAttribute()
unicode_attr = UnicodeAttribute()
unicode_set_attr = UnicodeSetAttribute()
datetime_attr = UTCDateTimeAttribute()
bool_attr = BooleanAttribute()
json_attr = JSONAttribute()
class AttributeDescriptorTestCase(TestCase):
"""
Test Attribute Descriptors
"""
def setUp(self):
self.instance = AttributeTestModel()
def test_binary_attr(self):
"""
Binary attribute descriptor
"""
self.instance.binary_attr = b'test'
self.assertEqual(self.instance.binary_attr, b'test')
def test_binary_set_attr(self):
"""
Binary set attribute descriptor
"""
self.instance.binary_set_attr = set([b'test', b'test2'])
self.assertEqual(self.instance.binary_set_attr, set([b'test', b'test2']))
def test_number_attr(self):
"""
Number attribute descriptor
"""
self.instance.number_attr = 42
self.assertEqual(self.instance.number_attr, 42)
def test_number_set_attr(self):
"""
Number set attribute descriptor
"""
self.instance.number_set_attr = set([1, 2])
self.assertEqual(self.instance.number_set_attr, set([1, 2]))
def test_unicode_attr(self):
"""
Unicode attribute descriptor
"""
self.instance.unicode_attr = u"test"
self.assertEqual(self.instance.unicode_attr, u"test")
def test_unicode_set_attr(self):
"""
Unicode set attribute descriptor
"""
self.instance.unicode_set_attr = set([u"test", u"test2"])
self.assertEqual(self.instance.unicode_set_attr, set([u"test", u"test2"]))
def test_datetime_attr(self):
"""
Datetime attribute descriptor
"""
now = datetime.now()
self.instance.datetime_attr = now
self.assertEqual(self.instance.datetime_attr, now)
def test_bool_attr(self):
"""
Boolean attribute descriptor
"""
self.instance.bool_attr = True
self.assertEqual(self.instance.bool_attr, True)
def test_json_attr(self):
"""
JSON attribute descriptor
"""
self.instance.json_attr = {'foo': 'bar', 'bar': 42}
self.assertEqual(self.instance.json_attr, {'foo': 'bar', 'bar': 42})
class UTCDateTimeAttributeTestCase(TestCase):
"""
Tests UTCDateTime attributes
"""
def test_utc_datetime_attribute(self):
"""
UTCDateTimeAttribute.default
"""
attr = UTCDateTimeAttribute()
self.assertIsNotNone(attr)
self.assertEqual(attr.attr_type, STRING)
tstamp = datetime.now()
attr = UTCDateTimeAttribute(default=tstamp)
self.assertEqual(attr.default, tstamp)
def test_utc_date_time_deserialize(self):
"""
UTCDateTimeAttribute.deserialize
"""
tstamp = Delorean(timezone=UTC).datetime
attr = UTCDateTimeAttribute()
self.assertEqual(
tstamp,
attr.deserialize(Delorean(tstamp, timezone=UTC).datetime.strftime(DATETIME_FORMAT)),
)
def test_utc_date_time_serialize(self):
"""
UTCDateTimeAttribute.serialize
"""
tstamp = datetime.now()
attr = UTCDateTimeAttribute()
self.assertEqual(attr.serialize(tstamp), Delorean(tstamp, timezone=UTC).datetime.strftime(DATETIME_FORMAT))
class BinaryAttributeTestCase(TestCase):
"""
Tests binary attributes
"""
def test_binary_attribute(self):
"""
BinaryAttribute.default
"""
attr = BinaryAttribute()
self.assertIsNotNone(attr)
self.assertEqual(attr.attr_type, BINARY)
attr = BinaryAttribute(default=b'foo')
self.assertEqual(attr.default, b'foo')
def test_binary_round_trip(self):
"""
BinaryAttribute round trip
"""
attr = BinaryAttribute()
value = b'foo'
serial = attr.serialize(value)
self.assertEqual(attr.deserialize(serial), value)
def test_binary_serialize(self):
"""
BinaryAttribute.serialize
"""
attr = BinaryAttribute()
serial = b64encode(b'foo').decode(DEFAULT_ENCODING)
self.assertEqual(attr.serialize(b'foo'), serial)
def test_binary_deserialize(self):
"""
BinaryAttribute.deserialize
"""
attr = BinaryAttribute()
serial = b64encode(b'foo').decode(DEFAULT_ENCODING)
self.assertEqual(attr.deserialize(serial), b'foo')
def test_binary_set_serialize(self):
"""
BinarySetAttribute.serialize
"""
attr = BinarySetAttribute()
self.assertEqual(attr.attr_type, BINARY_SET)
self.assertEqual(
attr.serialize(set([b'foo', b'bar'])),
[b64encode(val).decode(DEFAULT_ENCODING) for val in sorted(set([b'foo', b'bar']))])
self.assertEqual(attr.serialize(None), None)
def test_binary_set_round_trip(self):
"""
BinarySetAttribute round trip
"""
attr = BinarySetAttribute()
value = set([b'foo', b'bar'])
serial = attr.serialize(value)
self.assertEqual(attr.deserialize(serial), value)
def test_binary_set_deserialize(self):
"""
BinarySetAttribute.deserialize
"""
attr = BinarySetAttribute()
value = set([b'foo', b'bar'])
self.assertEqual(
attr.deserialize([b64encode(val).decode(DEFAULT_ENCODING) for val in sorted(value)]),
value
)
def test_binary_set_attribute(self):
"""
BinarySetAttribute.serialize
"""
attr = BinarySetAttribute()
self.assertIsNotNone(attr)
attr = BinarySetAttribute(default=set([b'foo', b'bar']))
self.assertEqual(attr.default, set([b'foo', b'bar']))
class NumberAttributeTestCase(TestCase):
"""
Tests number attributes
"""
def test_number_attribute(self):
"""
NumberAttribute.default
"""
attr = NumberAttribute()
self.assertIsNotNone(attr)
self.assertEqual(attr.attr_type, NUMBER)
attr = NumberAttribute(default=1)
self.assertEqual(attr.default, 1)
def test_number_serialize(self):
"""
NumberAttribute.serialize
"""
attr = NumberAttribute()
self.assertEqual(attr.serialize(3.141), '3.141')
self.assertEqual(attr.serialize(1), '1')
def test_number_deserialize(self):
"""
NumberAttribute.deserialize
"""
attr = NumberAttribute()
self.assertEqual(attr.deserialize('1'), 1)
self.assertEqual(attr.deserialize('3.141'), 3.141)
def test_number_set_deserialize(self):
"""
NumberSetAttribute.deserialize
"""
attr = NumberSetAttribute()
self.assertEqual(attr.attr_type, NUMBER_SET)
self.assertEqual(attr.deserialize([json.dumps(val) for val in sorted(set([1, 2]))]), set([1, 2]))
def test_number_set_serialize(self):
"""
NumberSetAttribute.serialize
"""
attr = NumberSetAttribute()
self.assertEqual(attr.serialize(set([1, 2])), [json.dumps(val) for val in sorted(set([1, 2]))])
self.assertEqual(attr.serialize(None), None)
def test_number_set_attribute(self):
"""
NumberSetAttribute.default
"""
attr = NumberSetAttribute()
self.assertIsNotNone(attr)
attr = NumberSetAttribute(default=set([1, 2]))
self.assertEqual(attr.default, set([1, 2]))
class UnicodeAttributeTestCase(TestCase):
"""
Tests unicode attributes
"""
def test_unicode_attribute(self):
"""
UnicodeAttribute.default
"""
attr = UnicodeAttribute()
self.assertIsNotNone(attr)
self.assertEqual(attr.attr_type, STRING)
attr = UnicodeAttribute(default=six.u('foo'))
self.assertEqual(attr.default, six.u('foo'))
def test_unicode_serialize(self):
"""
UnicodeAttribute.serialize
"""
attr = UnicodeAttribute()
self.assertEqual(attr.serialize('foo'), six.u('foo'))
self.assertEqual(attr.serialize(u'foo'), six.u('foo'))
self.assertEqual(attr.serialize(u''), None)
self.assertEqual(attr.serialize(None), None)
def test_unicode_deserialize(self):
"""
UnicodeAttribute.deserialize
"""
attr = UnicodeAttribute()
self.assertEqual(attr.deserialize('foo'), six.u('foo'))
self.assertEqual(attr.deserialize(u'foo'), six.u('foo'))
def test_unicode_set_serialize(self):
"""
UnicodeSetAttribute.serialize
"""
attr = UnicodeSetAttribute()
self.assertEqual(attr.attr_type, STRING_SET)
self.assertEqual(attr.deserialize(None), None)
self.assertEqual(
attr.serialize(set([six.u('foo'), six.u('bar')])),
[json.dumps(val) for val in sorted(set([six.u('foo'), six.u('bar')]))])
def test_round_trip_unicode_set(self):
"""
Round trip a unicode set
"""
attr = UnicodeSetAttribute()
orig = set([six.u('foo'), six.u('bar')])
self.assertEqual(
orig,
attr.deserialize(attr.serialize(orig))
)
def test_unicode_set_deserialize(self):
"""
UnicodeSetAttribute.deserialize
"""
attr = UnicodeSetAttribute()
self.assertEqual(
attr.deserialize([json.dumps(val) for val in sorted(set([six.u('foo'), six.u('bar')]))]),
set([six.u('foo'), six.u('bar')])
)
def test_unicode_set_attribute(self):
"""
UnicodeSetAttribute.default
"""
attr = UnicodeSetAttribute()
self.assertIsNotNone(attr)
self.assertEqual(attr.attr_type, STRING_SET)
attr = UnicodeSetAttribute(default=set([six.u('foo'), six.u('bar')]))
self.assertEqual(attr.default, set([six.u('foo'), six.u('bar')]))
class BooleanAttributeTestCase(TestCase):
"""
Tests boolean attributes
"""
def test_boolean_attribute(self):
"""
BooleanAttribute.default
"""
attr = BooleanAttribute()
self.assertIsNotNone(attr)
self.assertEqual(attr.attr_type, NUMBER)
attr = BooleanAttribute(default=True)
self.assertEqual(attr.default, True)
def test_boolean_serialize(self):
"""
BooleanAttribute.serialize
"""
attr = BooleanAttribute()
self.assertEqual(attr.serialize(True), json.dumps(1))
self.assertEqual(attr.serialize(False), json.dumps(0))
self.assertEqual(attr.serialize(None), None)
def test_boolean_deserialize(self):
"""
BooleanAttribute.deserialize
"""
attr = BooleanAttribute()
self.assertEqual(attr.deserialize('1'), True)
self.assertEqual(attr.deserialize('0'), False)
class JSONAttributeTestCase(TestCase):
"""
Tests json attributes
"""
def test_json_attribute(self):
"""
JSONAttribute.default
"""
attr = JSONAttribute()
self.assertIsNotNone(attr)
self.assertEqual(attr.attr_type, STRING)
attr = JSONAttribute(default={})
self.assertEqual(attr.default, {})
def test_json_serialize(self):
"""
JSONAttribute.serialize
"""
attr = JSONAttribute()
item = {'foo': 'bar', 'bool': True, 'number': 3.141}
self.assertEqual(attr.serialize(item), six.u(json.dumps(item)))
self.assertEqual(attr.serialize({}), six.u('{}'))
self.assertEqual(attr.serialize(None), None)
def test_json_deserialize(self):
"""
JSONAttribute.deserialize
"""
attr = JSONAttribute()
item = {'foo': 'bar', 'bool': True, 'number': 3.141}
encoded = six.u(json.dumps(item))
self.assertEqual(attr.deserialize(encoded), item)
def test_control_chars(self):
"""
JSONAttribute with control chars
"""
attr = JSONAttribute()
item = {'foo\t': 'bar\n', 'bool': True, 'number': 3.141}
encoded = six.u(json.dumps(item))
self.assertEqual(attr.deserialize(encoded), item)
|
"""##TODO: Docstring."""
##TODO: Standard lib imports.
import logging
##TODO: Third-party imports.
##TODO: Local imports.
import arcpy
LOG = logging.getLogger(__name__)
class Toolbox(object): # pylint: disable=too-few-public-methods
"""Define the toolbox.
Toolbox class is required for constructing an ArcGIS Python toolbox.
The name of toolbox is the basename of this file.
"""
def __init__(self):
self.label = "##TODO: Toolbox label."
# Alias is toolbox namespace when attached to ArcPy (arcpy.{alias}).
# Attach using arcpy.AddToolbox().
self.alias = '##TODO: Toolbox alias.'
# List of tool classes associated with this toolbox.
# self.tools must be list (not other iterable).
self.tools = [
# Add tools here by their class name to make visible in toolbox.
ToolExample,
]
class ToolExample(object):
"""Example of an individual tool in an ArcGIS Python toolbox."""
def __init__(self):
# Label is how tool is named within toolbox.
self.label = "##TODO: Label."
# Category is name of sub-toolset tool will be in (optional).
self.category = None
# Description is longer text for tool, shown in side panel.
self.description = """
##TODO: Description.
"""
# Sets whether the tool controls ArcGIS focus while running.
self.canRunInBackground = False # pylint: disable=invalid-name
def getParameterInfo(self): # pylint: disable=invalid-name,no-self-use
"""Load parameters into toolbox."""
# Create the parameters in a separate place (allows reusability),
# then add them here. Recommended: use parameter_from_attributes
# to allow initial definition to be a dictionary/attribute map.
# Return value must be list (not other iterable).
parameters = [
parameter_from_attributes(
{'name': 'example_parameter',
'displayName': "Example Parameter",
# Direction: 'Input' or 'Output'.
'direction': 'Input',
# datatype: http://desktop.arcgis.com/en/arcmap/latest/analyze/creating-tools/defining-parameter-data-types-in-a-python-toolbox.htm
'datatype': 'GPBoolean',
# parameterType: 'Required', 'Optional', or 'Derived'.
'parameterType': 'Required',
# emabled: True or False.
'enabled': True,
# category (optional). Note having one will collapse category on open.
'category': None,
'multiValue': False,
# Value type must be Python type match for datatype.
'value': True,
# symbology (optional): Path to layer file for drawing output.
'symbology': None}
),
]
return parameters
def isLicensed(self): # pylint: disable=invalid-name,no-self-use
"""Set whether tool is licensed to execute."""
# If tool needs extra licensing, checking here will prevent execution.
return True
def updateMessages(self, parameters): # pylint: disable=invalid-name,no-self-use
"""Modify messages created by internal validation for each parameter.
This method is called after internal validation.
"""
# No update requirements at this time.
pass
def updateParameters(self, parameters): # pylint: disable=invalid-name,no-self-use
"""Modify parameters before internal validation is performed.
This method is called whenever a parameter has been changed.
"""
# Follow the below format for checking for changes.
# Same code can be used for updateMessages.
# Remove code if not needed.
parameter_map = {parameter.name: parameter for parameter in parameters}
if parameter_changed(parameter_map['a_parameter']):
# Do something.
pass
def execute(self, parameters, messages): # pylint: disable=no-self-use
"""Procedural code of the tool."""
# Set up logger-like object, logs to both ArPy and file's logger.
log = ArcLogger(loggers=[LOG])
# value_map contains dictionary with parameter name/value key/values.
value_map = parameter_value_map(parameters)
log.info("TODO: Steps of the tool here.")
# Tool-specific helpers.
##TODO: Put objects specific to tool(s) only in this toolbox here.
# Helpers.
##TODO: Put more generic objects here.
class ArcLogger(object):
"""Faux-logger for logging to ArcPy/ArcGIS messaging system."""
arc_function = {
logging.NOTSET: (lambda msg: None),
# No debug level in Arc messaging system 👎.
logging.DEBUG: (lambda msg: None),
logging.INFO: arcpy.AddMessage,
logging.WARNING: arcpy.AddWarning,
logging.ERROR: arcpy.AddError,
# No debug level in Arc messaging system 👎. Map to error level.
logging.CRITICAL: arcpy.AddError,
}
def __init__(self, loggers=None):
"""Instance initialization."""
self.loggers = loggers if loggers else []
def debug(self, msg):
"""Log message with level DEBUG."""
self.log(logging.DEBUG, msg)
def info(self, msg):
"""Log message with level INFO."""
self.log(logging.INFO, msg)
def warning(self, msg):
"""Log message with level WARNING."""
self.log(logging.WARNING, msg)
def error(self, msg):
"""Log message with level ERROR."""
self.log(logging.ERROR, msg)
def critical(self, msg):
"""Log message with level CRITICAL."""
self.log(logging.CRITICAL, msg)
def log(self, lvl, msg):
"""Log message with level lvl."""
self.arc_function[lvl](msg)
for logger in self.loggers:
logger.log(lvl, msg)
def parameter_changed(parameter):
"""Return True if parameter is in a pre-validation changed state."""
return all((parameter.altered, not parameter.hasBeenValidated))
def parameter_from_attributes(attribute_map):
"""Create ArcPy parameter object using an attribute mapping.
Note that this doesn't check if the attribute exists in the default
parameter instance. This means that you can attempt to set a new
attribute, but the result will depend on how the class implements setattr
(usually this will just attach the new attribute).
"""
parameter = arcpy.Parameter()
for attribute_name, attribute_value in attribute_map.items():
# Apply filter later.
if attribute_name.startswith('filter.'):
continue
else:
setattr(parameter, attribute_name, attribute_value)
# Filter attributes don't stick using setattr.
if 'filter.type' in attribute_map:
parameter.filter.type = attribute_map['filter.type']
if 'filter.list' in attribute_map:
parameter.filter.list = attribute_map['filter.list']
return parameter
def parameter_value(parameter):
"""Return value of parameter."""
def handle_value_object(value_object):
"""Return actual value from value object.
Some values embedded in 'value object' (.value.value), others aren't.
"""
return getattr(value_object, 'value', value_object)
if not parameter.multiValue:
result = handle_value_object(parameter.value)
# Multivalue parameters place their values in .values (.value. holds a
# ValueTable object).
else:
result = [handle_value_object(value) for value in parameter.values]
return result
def parameter_value_map(parameters):
"""Create value map from ArcPy parameter objects."""
return {parameter.name: parameter_value(parameter)
for parameter in parameters}
Improve guidance comment.
"""##TODO: Docstring."""
##TODO: Standard lib imports.
import logging
##TODO: Third-party imports.
##TODO: Local imports.
import arcpy
LOG = logging.getLogger(__name__)
class Toolbox(object): # pylint: disable=too-few-public-methods
"""Define the toolbox.
Toolbox class is required for constructing an ArcGIS Python toolbox.
The name of toolbox is the basename of this file.
"""
def __init__(self):
self.label = "##TODO: Toolbox label."
# Alias is toolbox namespace when attached to ArcPy (arcpy.{alias}).
# Attach using arcpy.AddToolbox().
self.alias = '##TODO: Toolbox alias.'
# List of tool classes associated with this toolbox.
# self.tools must be list (not other iterable).
self.tools = [
# Add tools here by their class name to make visible in toolbox.
ToolExample,
]
class ToolExample(object):
"""Example of an individual tool in an ArcGIS Python toolbox."""
def __init__(self):
# Label is how tool is named within toolbox.
self.label = "##TODO: Label."
# Category is name of sub-toolset tool will be in (optional).
self.category = None
# Description is longer text for tool, shown in side panel.
self.description = """
##TODO: Description.
"""
# Sets whether the tool controls ArcGIS focus while running.
self.canRunInBackground = False # pylint: disable=invalid-name
def getParameterInfo(self): # pylint: disable=invalid-name,no-self-use
"""Load parameters into toolbox."""
# Create the parameters in a separate place (allows reusability),
# then add them here. Recommended: use parameter_from_attributes
# to allow initial definition to be a dictionary/attribute map.
# Return value must be list (not other iterable).
parameters = [
parameter_from_attributes(
{'name': 'example_parameter',
'displayName': "Example Parameter",
# Direction: 'Input' or 'Output'.
'direction': 'Input',
# datatype: http://desktop.arcgis.com/en/arcmap/latest/analyze/creating-tools/defining-parameter-data-types-in-a-python-toolbox.htm
'datatype': 'GPBoolean',
# parameterType: 'Required', 'Optional', or 'Derived'.
'parameterType': 'Required',
# emabled: True or False.
'enabled': True,
# category (optional). Note having one will collapse category on open.
'category': None,
'multiValue': False,
# Value type must be Python type match for datatype.
'value': True,
# symbology (optional): Path to layer file for drawing output.
'symbology': None}
),
]
return parameters
def isLicensed(self): # pylint: disable=invalid-name,no-self-use
"""Set whether tool is licensed to execute."""
# If tool needs extra licensing, returning False prevents execution.
return True
def updateMessages(self, parameters): # pylint: disable=invalid-name,no-self-use
"""Modify messages created by internal validation for each parameter.
This method is called after internal validation.
"""
# No update requirements at this time.
pass
def updateParameters(self, parameters): # pylint: disable=invalid-name,no-self-use
"""Modify parameters before internal validation is performed.
This method is called whenever a parameter has been changed.
"""
# Follow the below format for checking for changes.
# Same code can be used for updateMessages.
# Remove code if not needed.
parameter_map = {parameter.name: parameter for parameter in parameters}
if parameter_changed(parameter_map['a_parameter']):
# Do something.
pass
def execute(self, parameters, messages): # pylint: disable=no-self-use
"""Procedural code of the tool."""
# Set up logger-like object, logs to both ArPy and file's logger.
log = ArcLogger(loggers=[LOG])
# value_map contains dictionary with parameter name/value key/values.
value_map = parameter_value_map(parameters)
log.info("TODO: Steps of the tool here.")
# Tool-specific helpers.
##TODO: Put objects specific to tool(s) only in this toolbox here.
# Helpers.
##TODO: Put more generic objects here.
class ArcLogger(object):
"""Faux-logger for logging to ArcPy/ArcGIS messaging system."""
arc_function = {
logging.NOTSET: (lambda msg: None),
# No debug level in Arc messaging system 👎.
logging.DEBUG: (lambda msg: None),
logging.INFO: arcpy.AddMessage,
logging.WARNING: arcpy.AddWarning,
logging.ERROR: arcpy.AddError,
# No debug level in Arc messaging system 👎. Map to error level.
logging.CRITICAL: arcpy.AddError,
}
def __init__(self, loggers=None):
"""Instance initialization."""
self.loggers = loggers if loggers else []
def debug(self, msg):
"""Log message with level DEBUG."""
self.log(logging.DEBUG, msg)
def info(self, msg):
"""Log message with level INFO."""
self.log(logging.INFO, msg)
def warning(self, msg):
"""Log message with level WARNING."""
self.log(logging.WARNING, msg)
def error(self, msg):
"""Log message with level ERROR."""
self.log(logging.ERROR, msg)
def critical(self, msg):
"""Log message with level CRITICAL."""
self.log(logging.CRITICAL, msg)
def log(self, lvl, msg):
"""Log message with level lvl."""
self.arc_function[lvl](msg)
for logger in self.loggers:
logger.log(lvl, msg)
def parameter_changed(parameter):
"""Return True if parameter is in a pre-validation changed state."""
return all((parameter.altered, not parameter.hasBeenValidated))
def parameter_from_attributes(attribute_map):
"""Create ArcPy parameter object using an attribute mapping.
Note that this doesn't check if the attribute exists in the default
parameter instance. This means that you can attempt to set a new
attribute, but the result will depend on how the class implements setattr
(usually this will just attach the new attribute).
"""
parameter = arcpy.Parameter()
for attribute_name, attribute_value in attribute_map.items():
# Apply filter later.
if attribute_name.startswith('filter.'):
continue
else:
setattr(parameter, attribute_name, attribute_value)
# Filter attributes don't stick using setattr.
if 'filter.type' in attribute_map:
parameter.filter.type = attribute_map['filter.type']
if 'filter.list' in attribute_map:
parameter.filter.list = attribute_map['filter.list']
return parameter
def parameter_value(parameter):
"""Return value of parameter."""
def handle_value_object(value_object):
"""Return actual value from value object.
Some values embedded in 'value object' (.value.value), others aren't.
"""
return getattr(value_object, 'value', value_object)
if not parameter.multiValue:
result = handle_value_object(parameter.value)
# Multivalue parameters place their values in .values (.value. holds a
# ValueTable object).
else:
result = [handle_value_object(value) for value in parameter.values]
return result
def parameter_value_map(parameters):
"""Create value map from ArcPy parameter objects."""
return {parameter.name: parameter_value(parameter)
for parameter in parameters}
|
#!python
# coding=utf-8
import os
import random
import bisect
import calendar
from datetime import datetime
import netCDF4
import numpy as np
import pandas as pd
from pyaxiom import logger
class TimeSeries(object):
@staticmethod
def from_dataframe(df, output_directory, output_filename, latitude, longitude, station_name, global_attributes, variable_name, variable_attributes, sensor_vertical_datum=None, fillvalue=None, data_column=None, vertical_axis_name=None, vertical_positive=None):
if fillvalue is None:
fillvalue = -9999.9
if data_column is None:
data_column = 'value'
df[data_column] = df[data_column].fillna(fillvalue)
times = np.asarray([ calendar.timegm(x.utctimetuple()) for x in df['time'] ])
df['depth'] = df['depth'].fillna(fillvalue)
depths = df['depth'].values
try:
ts = TimeSeries(output_directory, latitude, longitude, station_name, global_attributes, times=times, verticals=depths, output_filename=output_filename, vertical_fill=fillvalue, vertical_axis_name=vertical_axis_name, vertical_positive=vertical_positive)
ts.add_variable(variable_name, df[data_column].values, attributes=variable_attributes, sensor_vertical_datum=sensor_vertical_datum, raise_on_error=True)
except ValueError:
logger.warning("Failed first attempt, trying again with unique times.")
try:
# Try uniquing time
newtimes = np.unique(times)
ts = TimeSeries(output_directory, latitude, longitude, station_name, global_attributes, times=newtimes, verticals=depths, output_filename=output_filename, vertical_fill=fillvalue, vertical_axis_name=vertical_axis_name, vertical_positive=vertical_positive)
ts.add_variable(variable_name, df[data_column].values, attributes=variable_attributes, sensor_vertical_datum=sensor_vertical_datum, raise_on_error=True)
except ValueError:
logger.warning("Failed second attempt, trying again with unique depths.")
try:
# Try uniquing depths
newdepths = np.unique(df['depth'].values)
ts = TimeSeries(output_directory, latitude, longitude, station_name, global_attributes, times=times, verticals=newdepths, output_filename=output_filename, vertical_fill=fillvalue, vertical_axis_name=vertical_axis_name, vertical_positive=vertical_positive)
ts.add_variable(variable_name, df[data_column].values, attributes=variable_attributes, sensor_vertical_datum=sensor_vertical_datum, raise_on_error=True)
except ValueError:
logger.warning("Failed third attempt, uniquing time and depth.")
try:
# Unique both time and depth
newdepths = np.unique(df['depth'].values)
ts = TimeSeries(output_directory, latitude, longitude, station_name, global_attributes, times=newtimes, verticals=newdepths, output_filename=output_filename, vertical_fill=fillvalue, vertical_axis_name=vertical_axis_name, vertical_positive=vertical_positive)
ts.add_variable(variable_name, df[data_column].values, attributes=variable_attributes, sensor_vertical_datum=sensor_vertical_datum, raise_on_error=True)
except ValueError:
logger.warning("Failed fourth attempt, manually matching indexes (this is slow).")
# Manually match
ts = TimeSeries(output_directory, latitude, longitude, station_name, global_attributes, times=times, verticals=depths, output_filename=output_filename, vertical_fill=fillvalue, vertical_axis_name=vertical_axis_name, vertical_positive=vertical_positive)
ts.add_variable(variable_name, df[data_column].values, attributes=variable_attributes, times=times, verticals=depths, sensor_vertical_datum=sensor_vertical_datum, raise_on_error=False)
return ts
def __init__(self, output_directory, latitude, longitude, station_name, global_attributes, times=None, verticals=None, vertical_fill=None, output_filename=None, vertical_axis_name=None, vertical_positive=None):
if output_filename is None:
output_filename = '{}_{}.nc'.format(station_name, int(random.random()*100000))
logger.info("No output filename specified, saving as {}".format(output_filename))
self.vertical_positive = vertical_positive or 'down'
self.vertical_axis_name = vertical_axis_name or 'z'
self.time_axis_name = 'time'
# Make directory
if not os.path.exists(output_directory):
os.makedirs(output_directory)
out_file = os.path.abspath(os.path.join(output_directory, output_filename))
self.nc = netCDF4.Dataset(out_file, 'w')
self.time = None
# Global attributes
# These are set by this script, we don't someone to be able to set them manually
global_skips = ["time_coverage_start", "time_coverage_end", "time_coverage_duration", "time_coverage_resolution",
"featureType", "geospatial_vertical_positive", "geospatial_vertical_min", "geospatial_vertical_max",
"geospatial_lat_min", "geospatial_lon_min", "geospatial_lat_max", "geospatial_lon_max",
"geospatial_vertical_resolution", "Conventions", "date_created"]
for k, v in global_attributes.iteritems():
if v is None:
v = "None"
if k not in global_skips:
self.nc.setncattr(k, v)
self.nc.setncattr("Conventions", "CF-1.6")
self.nc.setncattr("date_created", datetime.utcnow().strftime("%Y-%m-%dT%H:%M:00Z"))
# Station name
self.nc.createDimension("feature_type_instance", len(station_name))
name = self.nc.createVariable("feature_type_instance", "S1", ("feature_type_instance",))
name.cf_role = "timeseries_id"
name.long_name = "Identifier for each feature type instance"
name[:] = list(station_name)
# Location
lat = self.nc.createVariable("latitude", "f8")
lat.units = "degrees_north"
lat.standard_name = "latitude"
lat.long_name = "sensor latitude"
lat[:] = latitude
self.nc.setncattr("geospatial_lat_min", latitude)
self.nc.setncattr("geospatial_lat_max", latitude)
lon = self.nc.createVariable("longitude", "f8")
lon.units = "degrees_east"
lon.standard_name = "longitude"
lon.long_name = "sensor longitude"
lon[:] = longitude
self.nc.setncattr("geospatial_lon_min", longitude)
self.nc.setncattr("geospatial_lon_max", longitude)
# Metadata variables
self.crs = self.nc.createVariable("crs", "i4")
self.crs.long_name = "http://www.opengis.net/def/crs/EPSG/0/4326"
self.crs.grid_mapping_name = "latitude_longitude"
self.crs.epsg_code = "EPSG:4326"
self.crs.semi_major_axis = float(6378137.0)
self.crs.inverse_flattening = float(298.257223563)
platform = self.nc.createVariable("platform", "i4")
platform.ioos_code = station_name
platform.short_name = global_attributes.get("title", station_name)
platform.long_name = global_attributes.get("description", station_name)
platform.definition = "http://mmisw.org/ont/ioos/definition/stationID"
if vertical_fill is None:
vertical_fill = -9999.9
self.vertical_fill = vertical_fill
self.setup_times_and_verticals(times, verticals)
logger.info("Created file at '{}'".format(out_file))
def add_instrument_metadata(self, urn):
instrument = self.nc.createVariable("instrument", "i4")
instrument.definition = "http://mmisw.org/ont/ioos/definition/sensorID"
instrument.long_name = urn
instrument.ioos_code = urn
def add_time_bounds(self, delta=None, position=None):
self.nc.createDimension("bounds", 2)
time_bounds = self.nc.createVariable('{}_bounds'.format(self.time_axis_name), "f8", ("time", "bounds",), chunksizes=(1000, 2,))
time_bounds.units = "seconds since 1970-01-01T00:00:00Z"
time_bounds.calendar = "gregorian"
time_objs = netCDF4.num2date(self.time[:], units=self.time.units, calendar=self.time.calendar)
bounds_kwargs = dict(units=time_bounds.units, calendar=time_bounds.calendar)
if position == "start":
time_bounds[:] = np.asarray(zip(self.time[:], netCDF4.date2num(time_objs + delta, **bounds_kwargs)))
elif position == "middle":
time_bounds[:] = np.asarray(zip(netCDF4.date2num(time_objs - delta/2, **bounds_kwargs), netCDF4.date2num(time_objs + delta/2, **bounds_kwargs)))
elif position == "end":
time_bounds[:] = np.asarray(zip(netCDF4.date2num(time_objs - delta, **bounds_kwargs), self.time[:]))
def add_variable(self, variable_name, values, times=None, verticals=None, sensor_vertical_datum=None, attributes=None, unlink_from_profile=None, fillvalue=None, raise_on_error=False):
if isinstance(values, (list, tuple,)) and values:
values = np.asarray(values)
if isinstance(times, (list, tuple,)) and times:
times = np.asarray(times)
if isinstance(verticals, (list, tuple,)) and verticals:
verticals = np.asarray(verticals)
# Set vertical datum on the CRS variable
if sensor_vertical_datum is not None:
try:
self.crs.geoid_name = sensor_vertical_datum
self.crs.vertical_datum = sensor_vertical_datum
self.crs.water_surface_reference_datum = sensor_vertical_datum
except AttributeError:
pass
# Set default fillvalue for new variables
if fillvalue is None:
fillvalue = -9999.9
used_values = None
try:
if unlink_from_profile is True:
used_values = np.ma.reshape(values, (self.time.size, ))
used_values = used_values[self.time_indexes]
# These next two cases should work for all but a few cases, which are caught below
elif self.z.size == 1:
used_values = np.ma.reshape(values, (self.time.size, ))
used_values = used_values[self.time_indexes]
else:
used_values = np.ma.reshape(values, (self.time.size, self.z.size, ))
used_values = used_values[self.time_indexes]
try:
used_values = used_values[:, self.vertical_indexes]
except IndexError:
# The vertical values most likely had duplicates. Ignore the
# falty index here and try to save the values as is.
pass
except ValueError:
if raise_on_error is True:
self.close()
raise
else:
logger.exception("Could not do a simple reshape of data, trying to match manually! Time:{!s}, Heights:{!s}, Values:{!s}".format(self.time.size, self.z.size, values.size))
if self.z.size > 1:
if times is not None and verticals is not None:
# Hmmm, we have two actual height values for this station.
# Not cool man, not cool.
# Reindex the entire values array. This is slow.
indexed = ((bisect.bisect_left(self.time[:], times[i]), bisect.bisect_left(self.z[:], verticals[i]), values[i]) for i in xrange(values.size))
used_values = np.ndarray((self.time.size, self.z.size, ), dtype=np.float64)
used_values.fill(float(fillvalue))
for (tzi, zzi, vz) in indexed:
if zzi < self.z.size and tzi < self.time.size:
used_values[tzi, zzi] = vz
else:
self.close()
raise ValueError("You need to pass in both 'times' and 'verticals' parameters that matches the size of the 'values' parameter.")
else:
if times is not None:
# Ugh, find the time indexes manually
indexed = ((bisect.bisect_left(self.time[:], times[i]), values[i]) for i in xrange(values.size))
used_values = np.ndarray((self.time.size, ), dtype=np.float64)
used_values.fill(float(fillvalue))
for (tzi, vz) in indexed:
if tzi < self.time.size:
used_values[tzi] = vz
else:
self.close()
raise ValueError("You need to pass in a 'times' parameter that matches the size of the 'values' parameter.")
logger.info("Setting values for {}...".format(variable_name))
if len(used_values.shape) == 1:
var = self.nc.createVariable(variable_name, "f8", ("time",), fill_value=fillvalue, chunksizes=(1000,), zlib=True)
if self.z.size == 1:
var.coordinates = "{} {} latitude longitude".format(self.time_axis_name, self.vertical_axis_name)
else:
# This is probably a bottom sensor on an ADCP or something, don't add the height coordinate
var.coordinates = "{} latitude longitude".format(self.time_axis_name)
if unlink_from_profile is True:
# Create metadata variable for the sensor_depth
if self.nc.variables.get('sensor_depth') is None:
logger.info("Setting the special case 'sensor_depth' metadata variable")
inst_depth = self.nc.createVariable('sensor_depth', 'f4')
inst_depth.units = 'm'
inst_depth.standard_name = 'surface_altitude'
inst_depth.positive = self.vertical_positive
if self.vertical_positive.lower() == 'down':
inst_depth.long_name = 'sensor depth below datum'
elif self.vertical_positive.lower() == 'up':
inst_depth.long_name = 'sensor height above datum'
inst_depth.datum = sensor_vertical_datum or 'Unknown'
inst_depth[:] = verticals[0]
elif len(used_values.shape) == 2:
var = self.nc.createVariable(variable_name, "f8", ("time", "z",), fill_value=fillvalue, chunksizes=(1000, self.z.size,), zlib=True)
var.coordinates = "{} {} latitude longitude".format(self.time_axis_name, self.vertical_axis_name)
else:
raise ValueError("Could not create variable. Shape of data is {!s}. Expected a dimension of 1 or 2, not {!s}.".format(used_values.shape, len(used_values.shape)))
# Set the variable attributes as passed in
if attributes:
for k, v in attributes.iteritems():
if k == 'vertical_datum' and sensor_vertical_datum is None and v is not None:
# Use this as the vertical datum if it is specified and we didn't already have one
try:
self.crs.geoid_name = v
self.crs.vertical_datum = v
self.crs.water_surface_reference_datum = v
except AttributeError:
pass
if k not in ['coordinates', '_FillValue'] and v is not None:
try:
var.setncattr(k, v)
except BaseException:
logger.info('Could not add attribute {}: {}, skipping.'.format(k, v))
var.grid_mapping = 'crs'
var[:] = used_values
return var
def setup_times_and_verticals(self, times, verticals):
if isinstance(times, (list, tuple,)):
times = np.asarray(times)
# If nothing is passed in, set to the vertical_fill value.
if not isinstance(verticals, np.ndarray) and not verticals:
verticals = np.ma.masked_values([self.vertical_fill], self.vertical_fill)
# Convert to masked array
if isinstance(verticals, (list, tuple,)) or isinstance(verticals, np.ndarray):
verticals = np.ma.masked_values(verticals, self.vertical_fill)
# Don't unique Time... rely on the person submitting the data correctly.
# That means we allow duplicate times, as long as the data contains duplicate times as well.
self.time_indexes = np.argsort(times)
unique_times = times[self.time_indexes]
# Unique the vertical values
# Special case for all zeros. Added here for greater readability.
if np.isclose(verticals, 0).all():
save_mask = verticals.mask
verticals.mask = False
unique_verticals, self.vertical_indexes = np.ma.unique(verticals, return_index=True)
if save_mask.size > 1:
unique_verticals.mask = save_mask[self.vertical_indexes]
elif verticals is not None and verticals.any():
save_mask = verticals.mask
verticals.mask = False
unique_verticals, self.vertical_indexes = np.ma.unique(verticals, return_index=True)
if save_mask.size > 1:
unique_verticals.mask = save_mask[self.vertical_indexes]
else:
unique_verticals = verticals
self.vertical_indexes = np.arange(len(verticals))
starting = datetime.utcfromtimestamp(unique_times[0])
ending = datetime.utcfromtimestamp(unique_times[-1])
logger.debug("Setting up time...")
# Time extents
self.nc.setncattr("time_coverage_start", starting.isoformat())
self.nc.setncattr("time_coverage_end", ending.isoformat())
# duration (ISO8601 format)
self.nc.setncattr("time_coverage_duration", "P%sS" % unicode(int(round((ending - starting).total_seconds()))))
# resolution (ISO8601 format)
# subtract adjacent times to produce an array of differences, then get the most common occurance
diffs = unique_times[1:] - unique_times[:-1]
uniqs, inverse = np.unique(diffs, return_inverse=True)
if uniqs.size > 1:
time_diffs = diffs[np.bincount(inverse).argmax()]
self.nc.setncattr("time_coverage_resolution", "P%sS" % unicode(int(round(time_diffs))))
# Time - 32-bit unsigned integer
self.nc.createDimension("time")
self.time = self.nc.createVariable(self.time_axis_name, "f8", ("time",), chunksizes=(1000,))
self.time.units = "seconds since 1970-01-01T00:00:00Z"
self.time.standard_name = "time"
self.time.long_name = "time of measurement"
self.time.calendar = "gregorian"
self.time[:] = unique_times
logger.debug("Setting up {}...".format(self.vertical_axis_name))
# Figure out if we are creating a Profile or just a TimeSeries
if unique_verticals.size <= 1:
# TIMESERIES
self.nc.setncattr("featureType", "timeSeries")
# Fill in variable if we have an actual height. Else, the fillvalue remains.
if unique_verticals.any() and unique_verticals.size == 1:
# Vertical extents
self.nc.setncattr("geospatial_vertical_positive", self.vertical_positive)
self.nc.setncattr("geospatial_vertical_min", unique_verticals[0])
self.nc.setncattr("geospatial_vertical_max", unique_verticals[0])
self.z = self.nc.createVariable(self.vertical_axis_name, "f8", fill_value=self.vertical_fill)
elif unique_verticals.size > 1:
# TIMESERIES PROFILE
self.nc.setncattr("featureType", "timeSeriesProfile")
# Vertical extents
minvertical = float(np.min(unique_verticals))
maxvertical = float(np.max(unique_verticals))
vertical_diffs = unique_verticals[1:] - unique_verticals[:-1]
self.nc.setncattr("geospatial_vertical_positive", self.vertical_positive)
self.nc.setncattr("geospatial_vertical_min", minvertical)
self.nc.setncattr("geospatial_vertical_max", maxvertical)
self.nc.setncattr("geospatial_vertical_resolution", " ".join(map(unicode, list(vertical_diffs))))
# There is more than one vertical value for this variable, we need to create a vertical dimension
self.nc.createDimension("z", unique_verticals.size)
self.z = self.nc.createVariable(self.vertical_axis_name, "f8", ("z", ), fill_value=self.vertical_fill)
self.z.grid_mapping = 'crs'
self.z.long_name = "{} of the sensor relative to the water surface".format(self.vertical_axis_name)
if self.vertical_positive == 'up':
self.z.standard_name = 'height'
elif self.vertical_positive == 'down':
self.z.standard_name = 'depth'
self.z.positive = self.vertical_positive
self.z.units = "m"
self.z.axis = "Z"
self.z[:] = unique_verticals
self.nc.sync()
@property
def ncd(self):
return self.nc
def close(self):
try:
self.nc.close()
except:
pass
def get_dataframe_from_variable(nc, data_var):
""" Returns a Pandas DataFrame of the data """
time_var = nc.get_variables_by_attributes(standard_name='time')[0]
depth_vars = nc.get_variables_by_attributes(axis=lambda v: v is not None and v.lower() == 'z')
depth_vars += nc.get_variables_by_attributes(standard_name=lambda v: v in ['height', 'depth' 'surface_altitude'], positive=lambda x: x is not None)
# Find the correct depth variable
depth_var = None
for d in depth_vars:
try:
if d._name in data_var.coordinates.split(" ") or d._name in data_var.dimensions:
depth_var = d
break
except AttributeError:
continue
times = netCDF4.num2date(time_var[:], units=time_var.units)
original_times_size = times.size
if depth_var is None and hasattr(data_var, 'sensor_depth'):
depths = np.asarray([data_var.sensor_depth] * len(times)).flatten()
values = data_var[:].flatten()
elif depth_var is None:
depths = np.asarray([np.nan] * len(times)).flatten()
values = data_var[:].flatten()
else:
depths = depth_var[:]
if len(data_var.shape) > 1:
times = np.repeat(times, depths.size)
depths = np.tile(depths, original_times_size)
values = data_var[:, :].flatten()
else:
values = data_var[:].flatten()
df = pd.DataFrame({ 'time': times,
'value': values,
'unit': data_var.units,
'depth': depths })
df = df.set_index([pd.DatetimeIndex(df['time']), pd.Float64Index(df['depth'])])
return df
Set an inplace index so we don't copy the entire dataframe
#!python
# coding=utf-8
import os
import random
import bisect
import calendar
from datetime import datetime
import netCDF4
import numpy as np
import pandas as pd
from pyaxiom import logger
class TimeSeries(object):
@staticmethod
def from_dataframe(df, output_directory, output_filename, latitude, longitude, station_name, global_attributes, variable_name, variable_attributes, sensor_vertical_datum=None, fillvalue=None, data_column=None, vertical_axis_name=None, vertical_positive=None):
if fillvalue is None:
fillvalue = -9999.9
if data_column is None:
data_column = 'value'
df[data_column] = df[data_column].fillna(fillvalue)
times = np.asarray([ calendar.timegm(x.utctimetuple()) for x in df['time'] ])
df['depth'] = df['depth'].fillna(fillvalue)
depths = df['depth'].values
try:
ts = TimeSeries(output_directory, latitude, longitude, station_name, global_attributes, times=times, verticals=depths, output_filename=output_filename, vertical_fill=fillvalue, vertical_axis_name=vertical_axis_name, vertical_positive=vertical_positive)
ts.add_variable(variable_name, df[data_column].values, attributes=variable_attributes, sensor_vertical_datum=sensor_vertical_datum, raise_on_error=True)
except ValueError:
logger.warning("Failed first attempt, trying again with unique times.")
try:
# Try uniquing time
newtimes = np.unique(times)
ts = TimeSeries(output_directory, latitude, longitude, station_name, global_attributes, times=newtimes, verticals=depths, output_filename=output_filename, vertical_fill=fillvalue, vertical_axis_name=vertical_axis_name, vertical_positive=vertical_positive)
ts.add_variable(variable_name, df[data_column].values, attributes=variable_attributes, sensor_vertical_datum=sensor_vertical_datum, raise_on_error=True)
except ValueError:
logger.warning("Failed second attempt, trying again with unique depths.")
try:
# Try uniquing depths
newdepths = np.unique(df['depth'].values)
ts = TimeSeries(output_directory, latitude, longitude, station_name, global_attributes, times=times, verticals=newdepths, output_filename=output_filename, vertical_fill=fillvalue, vertical_axis_name=vertical_axis_name, vertical_positive=vertical_positive)
ts.add_variable(variable_name, df[data_column].values, attributes=variable_attributes, sensor_vertical_datum=sensor_vertical_datum, raise_on_error=True)
except ValueError:
logger.warning("Failed third attempt, uniquing time and depth.")
try:
# Unique both time and depth
newdepths = np.unique(df['depth'].values)
ts = TimeSeries(output_directory, latitude, longitude, station_name, global_attributes, times=newtimes, verticals=newdepths, output_filename=output_filename, vertical_fill=fillvalue, vertical_axis_name=vertical_axis_name, vertical_positive=vertical_positive)
ts.add_variable(variable_name, df[data_column].values, attributes=variable_attributes, sensor_vertical_datum=sensor_vertical_datum, raise_on_error=True)
except ValueError:
logger.warning("Failed fourth attempt, manually matching indexes (this is slow).")
# Manually match
ts = TimeSeries(output_directory, latitude, longitude, station_name, global_attributes, times=times, verticals=depths, output_filename=output_filename, vertical_fill=fillvalue, vertical_axis_name=vertical_axis_name, vertical_positive=vertical_positive)
ts.add_variable(variable_name, df[data_column].values, attributes=variable_attributes, times=times, verticals=depths, sensor_vertical_datum=sensor_vertical_datum, raise_on_error=False)
return ts
def __init__(self, output_directory, latitude, longitude, station_name, global_attributes, times=None, verticals=None, vertical_fill=None, output_filename=None, vertical_axis_name=None, vertical_positive=None):
if output_filename is None:
output_filename = '{}_{}.nc'.format(station_name, int(random.random()*100000))
logger.info("No output filename specified, saving as {}".format(output_filename))
self.vertical_positive = vertical_positive or 'down'
self.vertical_axis_name = vertical_axis_name or 'z'
self.time_axis_name = 'time'
# Make directory
if not os.path.exists(output_directory):
os.makedirs(output_directory)
out_file = os.path.abspath(os.path.join(output_directory, output_filename))
self.nc = netCDF4.Dataset(out_file, 'w')
self.time = None
# Global attributes
# These are set by this script, we don't someone to be able to set them manually
global_skips = ["time_coverage_start", "time_coverage_end", "time_coverage_duration", "time_coverage_resolution",
"featureType", "geospatial_vertical_positive", "geospatial_vertical_min", "geospatial_vertical_max",
"geospatial_lat_min", "geospatial_lon_min", "geospatial_lat_max", "geospatial_lon_max",
"geospatial_vertical_resolution", "Conventions", "date_created"]
for k, v in global_attributes.iteritems():
if v is None:
v = "None"
if k not in global_skips:
self.nc.setncattr(k, v)
self.nc.setncattr("Conventions", "CF-1.6")
self.nc.setncattr("date_created", datetime.utcnow().strftime("%Y-%m-%dT%H:%M:00Z"))
# Station name
self.nc.createDimension("feature_type_instance", len(station_name))
name = self.nc.createVariable("feature_type_instance", "S1", ("feature_type_instance",))
name.cf_role = "timeseries_id"
name.long_name = "Identifier for each feature type instance"
name[:] = list(station_name)
# Location
lat = self.nc.createVariable("latitude", "f8")
lat.units = "degrees_north"
lat.standard_name = "latitude"
lat.long_name = "sensor latitude"
lat[:] = latitude
self.nc.setncattr("geospatial_lat_min", latitude)
self.nc.setncattr("geospatial_lat_max", latitude)
lon = self.nc.createVariable("longitude", "f8")
lon.units = "degrees_east"
lon.standard_name = "longitude"
lon.long_name = "sensor longitude"
lon[:] = longitude
self.nc.setncattr("geospatial_lon_min", longitude)
self.nc.setncattr("geospatial_lon_max", longitude)
# Metadata variables
self.crs = self.nc.createVariable("crs", "i4")
self.crs.long_name = "http://www.opengis.net/def/crs/EPSG/0/4326"
self.crs.grid_mapping_name = "latitude_longitude"
self.crs.epsg_code = "EPSG:4326"
self.crs.semi_major_axis = float(6378137.0)
self.crs.inverse_flattening = float(298.257223563)
platform = self.nc.createVariable("platform", "i4")
platform.ioos_code = station_name
platform.short_name = global_attributes.get("title", station_name)
platform.long_name = global_attributes.get("description", station_name)
platform.definition = "http://mmisw.org/ont/ioos/definition/stationID"
if vertical_fill is None:
vertical_fill = -9999.9
self.vertical_fill = vertical_fill
self.setup_times_and_verticals(times, verticals)
logger.info("Created file at '{}'".format(out_file))
def add_instrument_metadata(self, urn):
instrument = self.nc.createVariable("instrument", "i4")
instrument.definition = "http://mmisw.org/ont/ioos/definition/sensorID"
instrument.long_name = urn
instrument.ioos_code = urn
def add_time_bounds(self, delta=None, position=None):
self.nc.createDimension("bounds", 2)
time_bounds = self.nc.createVariable('{}_bounds'.format(self.time_axis_name), "f8", ("time", "bounds",), chunksizes=(1000, 2,))
time_bounds.units = "seconds since 1970-01-01T00:00:00Z"
time_bounds.calendar = "gregorian"
time_objs = netCDF4.num2date(self.time[:], units=self.time.units, calendar=self.time.calendar)
bounds_kwargs = dict(units=time_bounds.units, calendar=time_bounds.calendar)
if position == "start":
time_bounds[:] = np.asarray(zip(self.time[:], netCDF4.date2num(time_objs + delta, **bounds_kwargs)))
elif position == "middle":
time_bounds[:] = np.asarray(zip(netCDF4.date2num(time_objs - delta/2, **bounds_kwargs), netCDF4.date2num(time_objs + delta/2, **bounds_kwargs)))
elif position == "end":
time_bounds[:] = np.asarray(zip(netCDF4.date2num(time_objs - delta, **bounds_kwargs), self.time[:]))
def add_variable(self, variable_name, values, times=None, verticals=None, sensor_vertical_datum=None, attributes=None, unlink_from_profile=None, fillvalue=None, raise_on_error=False):
if isinstance(values, (list, tuple,)) and values:
values = np.asarray(values)
if isinstance(times, (list, tuple,)) and times:
times = np.asarray(times)
if isinstance(verticals, (list, tuple,)) and verticals:
verticals = np.asarray(verticals)
# Set vertical datum on the CRS variable
if sensor_vertical_datum is not None:
try:
self.crs.geoid_name = sensor_vertical_datum
self.crs.vertical_datum = sensor_vertical_datum
self.crs.water_surface_reference_datum = sensor_vertical_datum
except AttributeError:
pass
# Set default fillvalue for new variables
if fillvalue is None:
fillvalue = -9999.9
used_values = None
try:
if unlink_from_profile is True:
used_values = np.ma.reshape(values, (self.time.size, ))
used_values = used_values[self.time_indexes]
# These next two cases should work for all but a few cases, which are caught below
elif self.z.size == 1:
used_values = np.ma.reshape(values, (self.time.size, ))
used_values = used_values[self.time_indexes]
else:
used_values = np.ma.reshape(values, (self.time.size, self.z.size, ))
used_values = used_values[self.time_indexes]
try:
used_values = used_values[:, self.vertical_indexes]
except IndexError:
# The vertical values most likely had duplicates. Ignore the
# falty index here and try to save the values as is.
pass
except ValueError:
if raise_on_error is True:
self.close()
raise
else:
logger.exception("Could not do a simple reshape of data, trying to match manually! Time:{!s}, Heights:{!s}, Values:{!s}".format(self.time.size, self.z.size, values.size))
if self.z.size > 1:
if times is not None and verticals is not None:
# Hmmm, we have two actual height values for this station.
# Not cool man, not cool.
# Reindex the entire values array. This is slow.
indexed = ((bisect.bisect_left(self.time[:], times[i]), bisect.bisect_left(self.z[:], verticals[i]), values[i]) for i in xrange(values.size))
used_values = np.ndarray((self.time.size, self.z.size, ), dtype=np.float64)
used_values.fill(float(fillvalue))
for (tzi, zzi, vz) in indexed:
if zzi < self.z.size and tzi < self.time.size:
used_values[tzi, zzi] = vz
else:
self.close()
raise ValueError("You need to pass in both 'times' and 'verticals' parameters that matches the size of the 'values' parameter.")
else:
if times is not None:
# Ugh, find the time indexes manually
indexed = ((bisect.bisect_left(self.time[:], times[i]), values[i]) for i in xrange(values.size))
used_values = np.ndarray((self.time.size, ), dtype=np.float64)
used_values.fill(float(fillvalue))
for (tzi, vz) in indexed:
if tzi < self.time.size:
used_values[tzi] = vz
else:
self.close()
raise ValueError("You need to pass in a 'times' parameter that matches the size of the 'values' parameter.")
logger.info("Setting values for {}...".format(variable_name))
if len(used_values.shape) == 1:
var = self.nc.createVariable(variable_name, "f8", ("time",), fill_value=fillvalue, chunksizes=(1000,), zlib=True)
if self.z.size == 1:
var.coordinates = "{} {} latitude longitude".format(self.time_axis_name, self.vertical_axis_name)
else:
# This is probably a bottom sensor on an ADCP or something, don't add the height coordinate
var.coordinates = "{} latitude longitude".format(self.time_axis_name)
if unlink_from_profile is True:
# Create metadata variable for the sensor_depth
if self.nc.variables.get('sensor_depth') is None:
logger.info("Setting the special case 'sensor_depth' metadata variable")
inst_depth = self.nc.createVariable('sensor_depth', 'f4')
inst_depth.units = 'm'
inst_depth.standard_name = 'surface_altitude'
inst_depth.positive = self.vertical_positive
if self.vertical_positive.lower() == 'down':
inst_depth.long_name = 'sensor depth below datum'
elif self.vertical_positive.lower() == 'up':
inst_depth.long_name = 'sensor height above datum'
inst_depth.datum = sensor_vertical_datum or 'Unknown'
inst_depth[:] = verticals[0]
elif len(used_values.shape) == 2:
var = self.nc.createVariable(variable_name, "f8", ("time", "z",), fill_value=fillvalue, chunksizes=(1000, self.z.size,), zlib=True)
var.coordinates = "{} {} latitude longitude".format(self.time_axis_name, self.vertical_axis_name)
else:
raise ValueError("Could not create variable. Shape of data is {!s}. Expected a dimension of 1 or 2, not {!s}.".format(used_values.shape, len(used_values.shape)))
# Set the variable attributes as passed in
if attributes:
for k, v in attributes.iteritems():
if k == 'vertical_datum' and sensor_vertical_datum is None and v is not None:
# Use this as the vertical datum if it is specified and we didn't already have one
try:
self.crs.geoid_name = v
self.crs.vertical_datum = v
self.crs.water_surface_reference_datum = v
except AttributeError:
pass
if k not in ['coordinates', '_FillValue'] and v is not None:
try:
var.setncattr(k, v)
except BaseException:
logger.info('Could not add attribute {}: {}, skipping.'.format(k, v))
var.grid_mapping = 'crs'
var[:] = used_values
return var
def setup_times_and_verticals(self, times, verticals):
if isinstance(times, (list, tuple,)):
times = np.asarray(times)
# If nothing is passed in, set to the vertical_fill value.
if not isinstance(verticals, np.ndarray) and not verticals:
verticals = np.ma.masked_values([self.vertical_fill], self.vertical_fill)
# Convert to masked array
if isinstance(verticals, (list, tuple,)) or isinstance(verticals, np.ndarray):
verticals = np.ma.masked_values(verticals, self.vertical_fill)
# Don't unique Time... rely on the person submitting the data correctly.
# That means we allow duplicate times, as long as the data contains duplicate times as well.
self.time_indexes = np.argsort(times)
unique_times = times[self.time_indexes]
# Unique the vertical values
# Special case for all zeros. Added here for greater readability.
if np.isclose(verticals, 0).all():
save_mask = verticals.mask
verticals.mask = False
unique_verticals, self.vertical_indexes = np.ma.unique(verticals, return_index=True)
if save_mask.size > 1:
unique_verticals.mask = save_mask[self.vertical_indexes]
elif verticals is not None and verticals.any():
save_mask = verticals.mask
verticals.mask = False
unique_verticals, self.vertical_indexes = np.ma.unique(verticals, return_index=True)
if save_mask.size > 1:
unique_verticals.mask = save_mask[self.vertical_indexes]
else:
unique_verticals = verticals
self.vertical_indexes = np.arange(len(verticals))
starting = datetime.utcfromtimestamp(unique_times[0])
ending = datetime.utcfromtimestamp(unique_times[-1])
logger.debug("Setting up time...")
# Time extents
self.nc.setncattr("time_coverage_start", starting.isoformat())
self.nc.setncattr("time_coverage_end", ending.isoformat())
# duration (ISO8601 format)
self.nc.setncattr("time_coverage_duration", "P%sS" % unicode(int(round((ending - starting).total_seconds()))))
# resolution (ISO8601 format)
# subtract adjacent times to produce an array of differences, then get the most common occurance
diffs = unique_times[1:] - unique_times[:-1]
uniqs, inverse = np.unique(diffs, return_inverse=True)
if uniqs.size > 1:
time_diffs = diffs[np.bincount(inverse).argmax()]
self.nc.setncattr("time_coverage_resolution", "P%sS" % unicode(int(round(time_diffs))))
# Time - 32-bit unsigned integer
self.nc.createDimension("time")
self.time = self.nc.createVariable(self.time_axis_name, "f8", ("time",), chunksizes=(1000,))
self.time.units = "seconds since 1970-01-01T00:00:00Z"
self.time.standard_name = "time"
self.time.long_name = "time of measurement"
self.time.calendar = "gregorian"
self.time[:] = unique_times
logger.debug("Setting up {}...".format(self.vertical_axis_name))
# Figure out if we are creating a Profile or just a TimeSeries
if unique_verticals.size <= 1:
# TIMESERIES
self.nc.setncattr("featureType", "timeSeries")
# Fill in variable if we have an actual height. Else, the fillvalue remains.
if unique_verticals.any() and unique_verticals.size == 1:
# Vertical extents
self.nc.setncattr("geospatial_vertical_positive", self.vertical_positive)
self.nc.setncattr("geospatial_vertical_min", unique_verticals[0])
self.nc.setncattr("geospatial_vertical_max", unique_verticals[0])
self.z = self.nc.createVariable(self.vertical_axis_name, "f8", fill_value=self.vertical_fill)
elif unique_verticals.size > 1:
# TIMESERIES PROFILE
self.nc.setncattr("featureType", "timeSeriesProfile")
# Vertical extents
minvertical = float(np.min(unique_verticals))
maxvertical = float(np.max(unique_verticals))
vertical_diffs = unique_verticals[1:] - unique_verticals[:-1]
self.nc.setncattr("geospatial_vertical_positive", self.vertical_positive)
self.nc.setncattr("geospatial_vertical_min", minvertical)
self.nc.setncattr("geospatial_vertical_max", maxvertical)
self.nc.setncattr("geospatial_vertical_resolution", " ".join(map(unicode, list(vertical_diffs))))
# There is more than one vertical value for this variable, we need to create a vertical dimension
self.nc.createDimension("z", unique_verticals.size)
self.z = self.nc.createVariable(self.vertical_axis_name, "f8", ("z", ), fill_value=self.vertical_fill)
self.z.grid_mapping = 'crs'
self.z.long_name = "{} of the sensor relative to the water surface".format(self.vertical_axis_name)
if self.vertical_positive == 'up':
self.z.standard_name = 'height'
elif self.vertical_positive == 'down':
self.z.standard_name = 'depth'
self.z.positive = self.vertical_positive
self.z.units = "m"
self.z.axis = "Z"
self.z[:] = unique_verticals
self.nc.sync()
@property
def ncd(self):
return self.nc
def close(self):
try:
self.nc.close()
except:
pass
def get_dataframe_from_variable(nc, data_var):
""" Returns a Pandas DataFrame of the data """
time_var = nc.get_variables_by_attributes(standard_name='time')[0]
depth_vars = nc.get_variables_by_attributes(axis=lambda v: v is not None and v.lower() == 'z')
depth_vars += nc.get_variables_by_attributes(standard_name=lambda v: v in ['height', 'depth' 'surface_altitude'], positive=lambda x: x is not None)
# Find the correct depth variable
depth_var = None
for d in depth_vars:
try:
if d._name in data_var.coordinates.split(" ") or d._name in data_var.dimensions:
depth_var = d
break
except AttributeError:
continue
times = netCDF4.num2date(time_var[:], units=time_var.units)
original_times_size = times.size
if depth_var is None and hasattr(data_var, 'sensor_depth'):
depths = np.asarray([data_var.sensor_depth] * len(times)).flatten()
values = data_var[:].flatten()
elif depth_var is None:
depths = np.asarray([np.nan] * len(times)).flatten()
values = data_var[:].flatten()
else:
depths = depth_var[:]
if len(data_var.shape) > 1:
times = np.repeat(times, depths.size)
depths = np.tile(depths, original_times_size)
values = data_var[:, :].flatten()
else:
values = data_var[:].flatten()
df = pd.DataFrame({ 'time': times,
'value': values,
'unit': data_var.units,
'depth': depths })
df.set_index([pd.DatetimeIndex(df['time']), pd.Float64Index(df['depth'])], inplace=True)
return df
|
#!/usr/bin/python
import os
import numpy as np
import shutil
import common
from segment import normalizefile, segmentfile
def runAll(args):
print('\n\n\nYou have requested to normalize and segment bincounts files')
print('\tWARNING:')
print('\t\tIF USING ANY REFERENCES OTHER THAN THOSE I PROVIDE I CANNOT GUARANTEE RESULT ACCURACY')
print('\n')
#Set up environment#
args.CountDirectory = common.fixDirName(args.CountDirectory)
lowessDir = os.path.dirname(args.CountDirectory[:-1]) + '/LowessBinCounts/'
segmentDir = os.path.dirname(args.CountDirectory[:-1]) + '/Segments/'
tempDir = os.path.dirname(args.CountDirectory[:-1]) + '/Temp/'
if args.output:
lowessDir = common.fixDirName(args.output) + 'LowessBinCounts/'
segmentDir = common.fixDirName(args.output) + 'Segments/'
common.makeDir(lowessDir)
if not args.normalizeonly:
common.makeDir(segmentDir)
common.makeDir(tempDir)
sampleFiles = common.getSampleList(args.CountDirectory, args.samples, 'bincounts')
info = common.importInfoFile(args.infofile, args.columns, 'normalize')
if args.infofile:
refArray = info
else:
thisDtype = info
refArray = np.array(
[ (basename(x)[:-14], 'unk', 1,) for x in sampleFiles],
dtype=thisDtype)
sampleDict = {x: [y for y in sampleFiles if x == os.path.basename(y)[:len(x)]][0] for x in refArray['name']}
#Run normalization for all samples#
methodDict = {x: False for x in np.unique(refArray['method'])}
methodDict['NA'] = False
sampleNormMethodDict = {x['name']: 'NA' for x in methodDict}
if not args.gconly:
for i in methodDict:
refSlice = refArray[(refArray['method'] == i) & (refArray['cells'] == 1)]
methodSamples = [sampleDict[x] for x in refSlice['name']]
methodDict[i] = normalizefile.runMakeMethodRef(args.species, methodSamples, i, lowessDir)
if methodDict[i] != False:
for j in refSlice['name']:
sampleNormMethodDict[j] = i
#run multiprocessing for gc (+ method) correction
normArgs = [(args.species, sampleDict[x], methodDict[sampleNormMethodDict[x]], lowessDir + x + '.lowess.txt') for x in sampleDict.keys()]
common.daemon(normalizefile.runNormalizeOne, normArgs, 'normalize bincount files')
print('\nNormalization complete\n\n\n')
if args.normalizeonly:
shutil.rmtree(tempDir[:-1])
return 0
#Run CBS for all samples#
segArgs = [(x, args.species, tempDir, lowessDir, segmentDir) for x in refArray['name']]
common.daemon(segmentfile.segmentOne, segArgs, 'segment bincount data')
shutil.rmtree(tempDir[:-1])
print('\nSegmentation complete\n\n\n')
Update runsegment.py
Debugging temp changes
#!/usr/bin/python
import os
import numpy as np
import shutil
import common
from segment import normalizefile, segmentfile
def runAll(args):
print('\n\n\nYou have requested to normalize and segment bincounts files')
print('\tWARNING:')
print('\t\tIF USING ANY REFERENCES OTHER THAN THOSE I PROVIDE I CANNOT GUARANTEE RESULT ACCURACY')
print('\n')
#Set up environment#
args.CountDirectory = common.fixDirName(args.CountDirectory)
lowessDir = os.path.dirname(args.CountDirectory[:-1]) + '/LowessBinCounts/'
segmentDir = os.path.dirname(args.CountDirectory[:-1]) + '/Segments/'
tempDir = os.path.dirname(args.CountDirectory[:-1]) + '/Temp/'
if args.output:
lowessDir = common.fixDirName(args.output) + 'LowessBinCounts/'
segmentDir = common.fixDirName(args.output) + 'Segments/'
common.makeDir(lowessDir)
if not args.normalizeonly:
common.makeDir(segmentDir)
common.makeDir(tempDir)
sampleFiles = common.getSampleList(args.CountDirectory, args.samples, 'bincounts')
info = common.importInfoFile(args.infofile, args.columns, 'normalize')
if args.infofile:
refArray = info
else:
thisDtype = info
refArray = np.array(
[ (basename(x)[:-14], 'unk', 1,) for x in sampleFiles],
dtype=thisDtype)
sampleDict = {x: [y for y in sampleFiles if x == os.path.basename(y)[:len(x)]][0] for x in refArray['name']}
print refArray
print sampleDict
raise SystemExit
#Run normalization for all samples#
methodDict = {x: False for x in np.unique(refArray['method'])}
methodDict['NA'] = False
sampleNormMethodDict = {x['name']: 'NA' for x in methodDict}
if not args.gconly:
for i in methodDict:
refSlice = refArray[(refArray['method'] == i) & (refArray['cells'] == 1)]
methodSamples = [sampleDict[x] for x in refSlice['name']]
methodDict[i] = normalizefile.runMakeMethodRef(args.species, methodSamples, i, lowessDir)
if methodDict[i] != False:
for j in refSlice['name']:
sampleNormMethodDict[j] = i
#run multiprocessing for gc (+ method) correction
normArgs = [(args.species, sampleDict[x], methodDict[sampleNormMethodDict[x]], lowessDir + x + '.lowess.txt') for x in sampleDict.keys()]
common.daemon(normalizefile.runNormalizeOne, normArgs, 'normalize bincount files')
print('\nNormalization complete\n\n\n')
if args.normalizeonly:
shutil.rmtree(tempDir[:-1])
return 0
#Run CBS for all samples#
segArgs = [(x, args.species, tempDir, lowessDir, segmentDir) for x in refArray['name']]
common.daemon(segmentfile.segmentOne, segArgs, 'segment bincount data')
shutil.rmtree(tempDir[:-1])
print('\nSegmentation complete\n\n\n')
|
#!/usr/bin/python
# import urllib for url calling
import urllib
#import webbrowser to open auth webpage if necissary
import webbrowser
#import ElementTree to parse / work with XML
from xml.etree import ElementTree as ET
#import NSDictionary and NSString from Cocoa to work with the plist that will hold the token
from Cocoa import NSDictionary, NSString
#import os to work with paths. Needed to see if file exists, and to write to XML for dev.
import os
#Import hashlib for MD5 encoding
import hashlib
#import time to be used for the pause during auth. This should be done more gracefully.
import time
def main():
# Define some main variables. Don't change these.
api_url='http://api.rememberthemilk.com/services/rest/?'
auth_url='http://www.rememberthemilk.com/services/auth/?'
api_key='60a9369798aa92cc5cc291b2280422f1'
api_secret='6fdf8ca0e501715f'
the_plist='~/Library/Preferences/com.google.RTM-QSB.plist'
the_plist=NSString.stringByExpandingTildeInPath(the_plist)
m=hashlib.md5()
# Get the location of the response XML file. May not need this.
xml_resp = os.path.abspath(__file__)
xml_resp = os.path.dirname(xml_resp)
xml_resp = os.path.join(xml_resp, 'resp.xml')
def getLocalToken():
if os.path.exists(the_plist):
mydict = NSDictionary.dictionaryWithContentsOfFile_(the_plist)
token = mydict['Token']
return token
pass
else:
print 'No Token Found'
return 0
#Function to call specified URL, get response
def checkToken(token):
method = 'rtm.auth.checkToken'
url=api_url+'method='+method+'&api_key='+api_key+'&auth_token='+token
page = urllib.urlopen(url)
#Seperate the variable from the file. Used to write the Resp to disk. Used for development. May not need this.
the_resp=ET.parse(page)
tree=the_resp.getroot()
#Parse the XML
#the_resp=ET.parse(page).getroot()
#Grab the response message
var = 0
for element in tree.findall('token/'):
var = element.text
#Write the response to the local XML file. Used for Dev only.
the_resp.write(xml_resp)
return var
pass
#Function to write to the Plist. Only sets the token for now. Could add in more parameters later.
def writePlist(token):
mydict = {}
mydict['Token']=token
NSDictionary.dictionaryWithDictionary_(mydict).writeToFile_atomically_(the_plist, True)
pass
#get the Frob to begin auth process, because the token came back false
def getFrob():
method = 'rtm.auth.getFrob'
the_sig = api_secret+'api_key'+api_key+'method'+method
hashed_sig= createMD5(the_sig)
url=api_url+'method='+method+'&api_key='+api_key+'&api_sig='+(str(hashed_sig))
page = urllib.urlopen(url)
the_resp=ET.parse(page).getroot()
var = 0
for element in the_resp.findall('frob/'):
var = element.text
return var
pass
def createMD5(the_string):
m.update(the_string)
the_hash = m.hexdigest()
return the_hash
pass
def doAuth(the_frob):
the_sig = api_secret+'api_key'+api_key+'frob'+the_frob
hashed_sig= createMD5(the_sig)
url=auth_url+'api_key='+api_key+'&perms=delete&frob='+the_frob+'&api_sig='+(str(hashed_sig))
webbrowser.open(url)
print 'Website Opened:'
print url
#sleep for 30 seconds to allow user to grant auth before proceeding with getting the token. This needs to be implimented better.
time.sleep(30)
method = 'rtm.auth.getToken'
the_sig = api_secret+'api_key'+api_key+'frob'+the_frob+'method'+method
hashed_sig= createMD5(the_sig)
url=api_url+'method='+method+'&api_key='+api_key+'&frob='+the_frob+'&api_sig='+(str(hashed_sig))
page = urllib.urlopen(url)
#Seperate the variable from the file. Used to write the Resp to disk. Used for development. May not need this.
the_resp=ET.parse(page)
tree=the_resp.getroot()
#Parse the XML
#the_resp=ET.parse(page).getroot()
#Grab the response message
var = 0
for element in tree.findall('token/'):
var = element.text
#Write the response to the local XML file. Used for Dev only.
the_resp.write(xml_resp)
return var
pass
#define the method to be used, use rtm.test.echo for testing
#method = 'rtm.test.echo'
#Read the plist, grab the Token (using test string for dev)
token=getLocalToken()
#see if token var contains actual value
if token != 0:
#call the URL, pass the resp back to variable result
result = checkToken(str(token))
if result == 0:
the_frob=getFrob()
the_token=doAuth(the_frob)
if the_token != 0:
print 'Sucess'
else:
print 'Failure'
if __name__ == '__main__':
main()
Progress on auth
#!/usr/bin/python
# import urllib for url calling
import urllib
#import webbrowser to open auth webpage if necissary
import webbrowser
#import ElementTree to parse / work with XML
from xml.etree import ElementTree as ET
#import NSDictionary and NSString from Cocoa to work with the plist that will hold the token
from Cocoa import NSDictionary, NSString
#import os to work with paths. Needed to see if file exists, and to write to XML for dev.
import os
#Import hashlib for MD5 encoding
import hashlib
#import time to be used for the pause during auth. This should be done more gracefully.
import time
def main():
# Define some main variables. Don't change these.
api_url='http://api.rememberthemilk.com/services/rest/?'
auth_url='http://www.rememberthemilk.com/services/auth/?'
api_key='60a9369798aa92cc5cc291b2280422f1'
api_secret='6fdf8ca0e501715f'
the_plist='~/Library/Preferences/com.google.RTM-QSB.plist'
the_plist=NSString.stringByExpandingTildeInPath(the_plist)
m=hashlib.md5()
# Get the location of the response XML file. May not need this.
xml_resp = os.path.abspath(__file__)
xml_resp = os.path.dirname(xml_resp)
xml_resp = os.path.join(xml_resp, 'resp.xml')
def getLocalToken():
if os.path.exists(the_plist):
mydict = NSDictionary.dictionaryWithContentsOfFile_(the_plist)
token = mydict['Token']
return token
pass
else:
print 'No Token Found'
return 0
#Function to call specified URL, get response
def checkToken(token):
method = 'rtm.auth.checkToken'
url=api_url+'method='+method+'&api_key='+api_key+'&auth_token='+token
page = urllib.urlopen(url)
#Seperate the variable from the file. Used to write the Resp to disk. Used for development. May not need this.
the_resp=ET.parse(page)
tree=the_resp.getroot()
#Parse the XML
#the_resp=ET.parse(page).getroot()
#Grab the response message
var = 0
for element in tree.findall('token/'):
var = element.text
#Write the response to the local XML file. Used for Dev only.
the_resp.write(xml_resp)
return var
pass
#Function to write to the Plist. Only sets the token for now. Could add in more parameters later.
def writePlist(token):
mydict = {}
mydict['Token']=token
NSDictionary.dictionaryWithDictionary_(mydict).writeToFile_atomically_(the_plist, True)
pass
#get the Frob to begin auth process, because the token came back false
def getFrob():
method = 'rtm.auth.getFrob'
the_sig = api_secret+'api_key'+api_key+'method'+method
hashed_sig= createMD5(the_sig)
url=api_url+'method='+method+'&api_key='+api_key+'&api_sig='+(str(hashed_sig))
page = urllib.urlopen(url)
the_resp=ET.parse(page).getroot()
var = 0
for element in the_resp.findall('frob/'):
var = element.text
return var
pass
def createMD5(the_string):
m.update(the_string)
the_hash = m.hexdigest()
return the_hash
pass
def doAuth(the_frob):
the_sig = api_secret+'api_key'+api_key+'frob'+the_frob+'permswrite'
hashed_sig= createMD5(the_sig)
url=auth_url+'api_key='+api_key+'&perms=write&frob='+the_frob+'&api_sig='+(str(hashed_sig))
webbrowser.open(url)
print 'Website Opened:'
print url
#sleep for 30 seconds to allow user to grant auth before proceeding with getting the token. This needs to be implimented better.
time.sleep(30)
method = 'rtm.auth.getToken'
the_sig = api_secret+'api_key'+api_key+'frob'+the_frob+'method'+method
hashed_sig= createMD5(the_sig)
url=api_url+'method='+method+'&api_key='+api_key+'&frob='+the_frob+'&api_sig='+(str(hashed_sig))
page = urllib.urlopen(url)
#Seperate the variable from the file. Used to write the Resp to disk. Used for development. May not need this.
the_resp=ET.parse(page)
tree=the_resp.getroot()
#Parse the XML
#the_resp=ET.parse(page).getroot()
#Grab the response message
var = 0
for element in tree.findall('token/'):
var = element.text
#Write the response to the local XML file. Used for Dev only.
the_resp.write(xml_resp)
return var
pass
#define the method to be used, use rtm.test.echo for testing
#method = 'rtm.test.echo'
#Read the plist, grab the Token (using test string for dev)
token=getLocalToken()
#see if token var contains actual value
if token != 0:
#call the URL, pass the resp back to variable result
result = checkToken(str(token))
if result == 0:
the_frob=getFrob()
the_token=doAuth(the_frob)
if the_token != 0:
print 'Sucess'
print 'Token: '+the_token
else:
print 'Failure'
if __name__ == '__main__':
main() |
# -*- coding: utf-8 -*-
# #############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
_logger = logging.getLogger(__name__)
from openerp.osv import fields, osv
from openerp.tools.translate import _
import time
import unicodedata
class doctor_professional(osv.osv):
_name = "doctor.professional"
_description = "Information about the healthcare professional"
_rec_name = 'username'
def write(self, cr, uid, ids, vals, context=None):
datos = {'lastname': '', 'surname': '', 'firstname': '', 'middlename': ''}
nombre = ''
if context is None:
context = {}
for professional in self.browse(cr, uid, ids, context=context):
if 'lastname' in vals:
datos['lastname'] = vals['lastname'] or ' '
if 'surname' in vals:
datos['surname'] = vals['surname'] or ' '
if 'firstname' in vals:
datos['firstname'] = vals['firstname'] or ' '
if 'middlename' in vals:
datos['middlename'] = vals['middlename'] or ' '
nombre = "%s %s %s %s" % (datos['lastname'] or professional.lastname, datos['surname'] or professional.surname or '',
datos['firstname'] or professional.firtsname , datos['middlename'] or professional.middlename or '')
vals['nombreUsuario'] = nombre.upper()
return super(doctor_professional, self).write(cr, uid, ids, vals, context=context)
_columns = {
'professional': fields.many2one('res.partner', 'Healthcare Professional', ondelete='cascade',
domain=[('is_company', '=', False)]),
'username': fields.char('Username', size=64, required=True),
'photo': fields.binary('patient'),
'speciality_id': fields.many2one('doctor.speciality', 'Speciality', required=True),
'professional_card': fields.char('Professional card', size=64, required=True),
'authority': fields.char('Authority', size=64, required=True),
'work_phone': fields.char('Work Phone', size=64),
'work_mobile': fields.char('Work Mobile', size=64),
'work_email': fields.char('Work Email', size=240),
'user_id': fields.many2one('res.users', 'User', help='Related user name', required=False, ondelete='cascade'),
'active': fields.boolean('Active'),
'procedures_ids': fields.many2many('product.product', id1='professional_ids', id2='procedures_ids',
string='My health procedures', required=False, ondelete='restrict'),
}
_defaults = {
'active': lambda *a: 1,
}
def name_get(self, cr, uid, ids, context={}):
if not len(ids):
return []
rec_name = 'professional'
res = [(r['id'], r[rec_name][1])
for r in self.read(cr, uid, ids, [rec_name], context)]
return res
def onchange_photo(self, cr, uid, ids, professional, photo, context=None):
values = {}
if not professional:
return values
professional_data = self.pool.get('res.partner').browse(cr, uid, professional, context=context)
professional_img = professional_data.image_medium
values.update({
'photo': professional_img,
})
return {'value': values}
def onchange_user(self, cr, uid, ids, user_id, context=None):
work_email = False
if user_id:
work_email = self.pool.get('res.users').browse(cr, uid, user_id, context=context).email
return {'value': {'work_email': work_email}}
def onchange_username(self, cr, uid, ids, username, context=None):
if self.pool.get('res.users').search(cr, uid, [('login', '=', username),], context):
return {'value': {'username': False,}, 'warning': {'title': 'The username exists', 'message': "Please change the username"}}
else:
user_id = self.pool.get('res.users').create(cr, uid, {'login': username, 'name': username})
return {'value': {'user_id': user_id, },}
doctor_professional()
class doctor_patient(osv.osv):
_name = "doctor.patient"
_description = "Information about the patient"
def write(self, cr, uid, ids, vals, context=None):
datos = {'lastname': '', 'surname': '', 'firstname': '', 'middlename': ''}
nombre = ''
u = {}
if context is None:
context = {}
for patient in self.browse(cr, uid, ids, context=context):
partner_id = patient.patient
if 'birth_date' in vals:
birth_date = vals['birth_date']
current_date = time.strftime('%Y-%m-%d')
if birth_date > current_date:
raise osv.except_osv(_('Warning !'), _("Birth Date Can not be a future date "))
if 'lastname' in vals:
datos['lastname'] = vals['lastname'] or ' '
if 'surname' in vals:
datos['surname'] = vals['surname'] or ' '
if 'firstname' in vals:
datos['firstname'] = vals['firstname'] or ' '
if 'middlename' in vals:
datos['middlename'] = vals['middlename'] or ' '
nombre = "%s %s %s %s" % (datos['lastname'] or patient.lastname, datos['surname'] or patient.surname or '',
datos['firstname'] or patient.firstname , datos['middlename'] or patient.middlename or '')
firstname = vals['firstname'] if 'firstname' in vals else partner_id.firtsname
lastname = vals['lastname'] if 'lastname' in vals else partner_id.lastname
surname = vals['surname'] if 'surname' in vals else partner_id.surname
middlename = vals['middlename'] if 'middlename' in vals else partner_id.middlename
u['name'] = unicodedata.normalize('NFKD', nombre).encode('ASCII', 'ignore').upper()
u['display_name'] = unicodedata.normalize('NFKD', nombre).encode('ASCII', 'ignore').upper()
vals['nombre'] = unicodedata.normalize('NFKD', nombre).encode('ASCII', 'ignore').upper()
_logger.info(type(vals['firstname'] if 'firstname' in vals else partner_id.firtsname) is unicode)
_logger.info(type(vals['lastname'] if 'lastname' in vals else partner_id.lastname) is unicode)
_logger.info(type(vals['surname'] if 'surname' in vals else partner_id.surname) is unicode)
_logger.info(type(vals['middlename'] if 'middlename' in vals else partner_id.middlename) is unicode)
if(type(firstname) is unicode):
u['firtsname'] = unicodedata.normalize('NFKD', firtsname).encode('ASCII', 'ignore').upper()
elif(type(firstname) is str):
u['firtsname'] = firtsname.upper()
if(type(lastname) is unicode):
u['lastname'] = unicodedata.normalize('NFKD', lastname).encode('ASCII', 'ignore').upper()
elif(type(lastname) is str):
u['lastname'] = lastname.upper()
if(type(surname) is unicode):
u['surname'] = unicodedata.normalize('NFKD', surname).encode('ASCII', 'ignore').upper()
elif(type(surname) is str):
u['surname'] = surname.upper()
if(type(middlename) is unicode):
u['middlename'] = unicodedata.normalize('NFKD', middlename).encode('ASCII', 'ignore').upper()
elif(type(middlename) is str):
u['middlename'] = middlename.upper()
_logger.info(u)
self.pool.get('res.partner').write(cr, uid, partner_id.id, u, context=context)
return super(doctor_patient, self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
if 'birth_date' in vals:
birth_date = vals['birth_date']
current_date = time.strftime('%Y-%m-%d')
if birth_date > current_date:
raise osv.except_osv(_('Warning !'), _("Birth Date Can not be a future date "))
if vals['middlename']:
vals.update({'middlename': vals['middlename'].upper() })
if vals['surname']:
vals.update({'surname': vals['surname'].upper() })
vals.update({'lastname': vals['lastname'].upper() })
vals.update({'firstname': vals['firstname'].upper() })
vals.update({'name' : "%s %s %s %s" % (vals['lastname'] , vals['surname'] or '' , vals['firstname'] , vals['middlename'] or '')})
vals.update({'nombre' : vals['name'].upper()})
if not vals['es_profesionalsalud']:
id_tercero = self.pool.get('res.partner').create(cr, uid, {'ref': vals['ref'], 'tdoc': vals['tdoc'], 'middlename' : vals['middlename'] or '', 'surname' : vals['surname'] or '', 'lastname': vals['lastname'], 'firtsname': vals['firstname'], 'name': vals['name'], 'image': vals['photo'], 'city_id': vals['city_id'], 'state_id': vals['state_id'], 'street': vals['street'], 'phone': vals['telefono'], 'mobile': vals['movil'], 'email': vals['email'],'es_paciente': True, 'es_profesional_salud': False}, context)
vals.update({'patient' : id_tercero}) #una vez creamos el tercero podemos añadir el partner al campo patient.
else:
partner_id = self.pool.get('res.partner').search(cr, uid, [('ref','=', vals['ref'])])
self.pool.get('res.partner').write(cr, uid, partner_id, {'es_paciente': True}, context=context)
return super(doctor_patient, self).create(cr, uid, vals, context=context)
def _get_profesional_id(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for datos in self.browse(cr, uid, ids):
doctor_id = self.pool.get('doctor.professional').search(cr,uid,[('user_id','=',uid)],context=context)
if doctor_id:
res[datos.id] = doctor_id[0]
else:
res[datos.id] = False
return res
_columns = {
'patient': fields.many2one('res.partner', 'Paciente', ondelete='cascade',
domain=[('is_company', '=', False)]),
'firstname' : fields.char('Primer Nombre', size=15, required=True),
'middlename' : fields.char('Segundo Nombre', size=15),
'lastname' : fields.char('Primer Apellido', size=15, required=True),
'surname' : fields.char('Segundo Apellido', size=15),
'photo': fields.binary('patient'),
'birth_date': fields.date('Date of Birth', required=True),
'sex': fields.selection([('m', 'Male'), ('f', 'Female'), ], 'Sex', select=True, required=True),
'blood_type': fields.selection([('A', 'A'), ('B', 'B'), ('AB', 'AB'), ('O', 'O'), ], 'Blood Type'),
'rh': fields.selection([('+', '+'), ('-', '-'), ], 'Rh'),
'insurer': fields.many2one('doctor.insurer', 'Insurer', required=False, help='Insurer'),
'deceased': fields.boolean('Deceased', help="Mark if the patient has died"),
'death_date': fields.date('Date of Death'),
'death_cause': fields.many2one('doctor.diseases', 'Cause of Death'),
'attentions_ids': fields.one2many('doctor.attentions', 'patient_id', 'Attentions'),
'appointments_ids': fields.one2many('doctor.appointment', 'patient_id', 'Attentions'),
'get_professional_id': fields.function(_get_profesional_id, type="integer", store= False,
readonly=True, method=True),
}
def name_get(self,cr,uid,ids,context=None):
if context is None:
context = {}
if not ids:
return []
if isinstance(ids,(long,int)):
ids=[ids]
res=[]
for record in self.browse(cr,uid,ids):
res.append((record['id'],record.nombre or ''))
return res
def onchange_patient_data(self, cr, uid, ids, patient, photo, context=None):
values = {}
if not patient:
return values
patient_data = self.pool.get('res.partner').browse(cr, uid, patient, context=context)
patient_img = patient_data.image_medium
values.update({
'photo': patient_img,
})
return {'value': values}
doctor_patient()
agregando cambios a write
# -*- coding: utf-8 -*-
# #############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
_logger = logging.getLogger(__name__)
from openerp.osv import fields, osv
from openerp.tools.translate import _
import time
import unicodedata
class doctor_professional(osv.osv):
_name = "doctor.professional"
_description = "Information about the healthcare professional"
_rec_name = 'username'
def write(self, cr, uid, ids, vals, context=None):
datos = {'lastname': '', 'surname': '', 'firstname': '', 'middlename': ''}
nombre = ''
if context is None:
context = {}
for professional in self.browse(cr, uid, ids, context=context):
if 'lastname' in vals:
datos['lastname'] = vals['lastname'] or ' '
if 'surname' in vals:
datos['surname'] = vals['surname'] or ' '
if 'firstname' in vals:
datos['firstname'] = vals['firstname'] or ' '
if 'middlename' in vals:
datos['middlename'] = vals['middlename'] or ' '
nombre = "%s %s %s %s" % (datos['lastname'] or professional.lastname, datos['surname'] or professional.surname or '',
datos['firstname'] or professional.firtsname , datos['middlename'] or professional.middlename or '')
vals['nombreUsuario'] = nombre.upper()
return super(doctor_professional, self).write(cr, uid, ids, vals, context=context)
_columns = {
'professional': fields.many2one('res.partner', 'Healthcare Professional', ondelete='cascade',
domain=[('is_company', '=', False)]),
'username': fields.char('Username', size=64, required=True),
'photo': fields.binary('patient'),
'speciality_id': fields.many2one('doctor.speciality', 'Speciality', required=True),
'professional_card': fields.char('Professional card', size=64, required=True),
'authority': fields.char('Authority', size=64, required=True),
'work_phone': fields.char('Work Phone', size=64),
'work_mobile': fields.char('Work Mobile', size=64),
'work_email': fields.char('Work Email', size=240),
'user_id': fields.many2one('res.users', 'User', help='Related user name', required=False, ondelete='cascade'),
'active': fields.boolean('Active'),
'procedures_ids': fields.many2many('product.product', id1='professional_ids', id2='procedures_ids',
string='My health procedures', required=False, ondelete='restrict'),
}
_defaults = {
'active': lambda *a: 1,
}
def name_get(self, cr, uid, ids, context={}):
if not len(ids):
return []
rec_name = 'professional'
res = [(r['id'], r[rec_name][1])
for r in self.read(cr, uid, ids, [rec_name], context)]
return res
def onchange_photo(self, cr, uid, ids, professional, photo, context=None):
values = {}
if not professional:
return values
professional_data = self.pool.get('res.partner').browse(cr, uid, professional, context=context)
professional_img = professional_data.image_medium
values.update({
'photo': professional_img,
})
return {'value': values}
def onchange_user(self, cr, uid, ids, user_id, context=None):
work_email = False
if user_id:
work_email = self.pool.get('res.users').browse(cr, uid, user_id, context=context).email
return {'value': {'work_email': work_email}}
def onchange_username(self, cr, uid, ids, username, context=None):
if self.pool.get('res.users').search(cr, uid, [('login', '=', username),], context):
return {'value': {'username': False,}, 'warning': {'title': 'The username exists', 'message': "Please change the username"}}
else:
user_id = self.pool.get('res.users').create(cr, uid, {'login': username, 'name': username})
return {'value': {'user_id': user_id, },}
doctor_professional()
class doctor_patient(osv.osv):
_name = "doctor.patient"
_description = "Information about the patient"
def write(self, cr, uid, ids, vals, context=None):
datos = {'lastname': '', 'surname': '', 'firstname': '', 'middlename': ''}
nombre = ''
u = {}
if context is None:
context = {}
for patient in self.browse(cr, uid, ids, context=context):
partner_id = patient.patient
if 'birth_date' in vals:
birth_date = vals['birth_date']
current_date = time.strftime('%Y-%m-%d')
if birth_date > current_date:
raise osv.except_osv(_('Warning !'), _("Birth Date Can not be a future date "))
if 'lastname' in vals:
datos['lastname'] = vals['lastname'] or ' '
if 'surname' in vals:
datos['surname'] = vals['surname'] or ' '
if 'firstname' in vals:
datos['firstname'] = vals['firstname'] or ' '
if 'middlename' in vals:
datos['middlename'] = vals['middlename'] or ' '
nombre = "%s %s %s %s" % (datos['lastname'] or patient.lastname, datos['surname'] or patient.surname or '',
datos['firstname'] or patient.firstname , datos['middlename'] or patient.middlename or '')
firstname = vals['firstname'] if 'firstname' in vals else partner_id.firtsname
lastname = vals['lastname'] if 'lastname' in vals else partner_id.lastname
surname = vals['surname'] if 'surname' in vals else partner_id.surname
middlename = vals['middlename'] if 'middlename' in vals else partner_id.middlename
u['name'] = unicodedata.normalize('NFKD', nombre).encode('ASCII', 'ignore').upper()
u['display_name'] = unicodedata.normalize('NFKD', nombre).encode('ASCII', 'ignore').upper()
vals['nombre'] = unicodedata.normalize('NFKD', nombre).encode('ASCII', 'ignore').upper()
if(type(firstname) is unicode):
u['firtsname'] = unicodedata.normalize('NFKD', firstname).encode('ASCII', 'ignore').upper()
elif(type(firstname) is str):
u['firtsname'] = firtsname.upper()
if(type(lastname) is unicode):
u['lastname'] = unicodedata.normalize('NFKD', lastname).encode('ASCII', 'ignore').upper()
elif(type(lastname) is str):
u['lastname'] = lastname.upper()
if(type(surname) is unicode):
u['surname'] = unicodedata.normalize('NFKD', surname).encode('ASCII', 'ignore').upper()
elif(type(surname) is str):
u['surname'] = surname.upper()
if(type(middlename) is unicode):
u['middlename'] = unicodedata.normalize('NFKD', middlename).encode('ASCII', 'ignore').upper()
elif(type(middlename) is str):
u['middlename'] = middlename.upper()
_logger.info(u)
self.pool.get('res.partner').write(cr, uid, partner_id.id, u, context=context)
return super(doctor_patient, self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
if 'birth_date' in vals:
birth_date = vals['birth_date']
current_date = time.strftime('%Y-%m-%d')
if birth_date > current_date:
raise osv.except_osv(_('Warning !'), _("Birth Date Can not be a future date "))
if vals['middlename']:
vals.update({'middlename': vals['middlename'].upper() })
if vals['surname']:
vals.update({'surname': vals['surname'].upper() })
vals.update({'lastname': vals['lastname'].upper() })
vals.update({'firstname': vals['firstname'].upper() })
vals.update({'name' : "%s %s %s %s" % (vals['lastname'] , vals['surname'] or '' , vals['firstname'] , vals['middlename'] or '')})
vals.update({'nombre' : vals['name'].upper()})
if not vals['es_profesionalsalud']:
id_tercero = self.pool.get('res.partner').create(cr, uid, {'ref': vals['ref'], 'tdoc': vals['tdoc'], 'middlename' : vals['middlename'] or '', 'surname' : vals['surname'] or '', 'lastname': vals['lastname'], 'firtsname': vals['firstname'], 'name': vals['name'], 'image': vals['photo'], 'city_id': vals['city_id'], 'state_id': vals['state_id'], 'street': vals['street'], 'phone': vals['telefono'], 'mobile': vals['movil'], 'email': vals['email'],'es_paciente': True, 'es_profesional_salud': False}, context)
vals.update({'patient' : id_tercero}) #una vez creamos el tercero podemos añadir el partner al campo patient.
else:
partner_id = self.pool.get('res.partner').search(cr, uid, [('ref','=', vals['ref'])])
self.pool.get('res.partner').write(cr, uid, partner_id, {'es_paciente': True}, context=context)
return super(doctor_patient, self).create(cr, uid, vals, context=context)
def _get_profesional_id(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for datos in self.browse(cr, uid, ids):
doctor_id = self.pool.get('doctor.professional').search(cr,uid,[('user_id','=',uid)],context=context)
if doctor_id:
res[datos.id] = doctor_id[0]
else:
res[datos.id] = False
return res
_columns = {
'patient': fields.many2one('res.partner', 'Paciente', ondelete='cascade',
domain=[('is_company', '=', False)]),
'firstname' : fields.char('Primer Nombre', size=15, required=True),
'middlename' : fields.char('Segundo Nombre', size=15),
'lastname' : fields.char('Primer Apellido', size=15, required=True),
'surname' : fields.char('Segundo Apellido', size=15),
'photo': fields.binary('patient'),
'birth_date': fields.date('Date of Birth', required=True),
'sex': fields.selection([('m', 'Male'), ('f', 'Female'), ], 'Sex', select=True, required=True),
'blood_type': fields.selection([('A', 'A'), ('B', 'B'), ('AB', 'AB'), ('O', 'O'), ], 'Blood Type'),
'rh': fields.selection([('+', '+'), ('-', '-'), ], 'Rh'),
'insurer': fields.many2one('doctor.insurer', 'Insurer', required=False, help='Insurer'),
'deceased': fields.boolean('Deceased', help="Mark if the patient has died"),
'death_date': fields.date('Date of Death'),
'death_cause': fields.many2one('doctor.diseases', 'Cause of Death'),
'attentions_ids': fields.one2many('doctor.attentions', 'patient_id', 'Attentions'),
'appointments_ids': fields.one2many('doctor.appointment', 'patient_id', 'Attentions'),
'get_professional_id': fields.function(_get_profesional_id, type="integer", store= False,
readonly=True, method=True),
}
def name_get(self,cr,uid,ids,context=None):
if context is None:
context = {}
if not ids:
return []
if isinstance(ids,(long,int)):
ids=[ids]
res=[]
for record in self.browse(cr,uid,ids):
res.append((record['id'],record.nombre or ''))
return res
def onchange_patient_data(self, cr, uid, ids, patient, photo, context=None):
values = {}
if not patient:
return values
patient_data = self.pool.get('res.partner').browse(cr, uid, patient, context=context)
patient_img = patient_data.image_medium
values.update({
'photo': patient_img,
})
return {'value': values}
doctor_patient()
|
from morepath import Config
import reg
from reg import ClassIndex, KeyIndex
import morepath
def setup_module(module):
morepath.disable_implicit()
def test_dispatch():
config = Config()
class App(morepath.App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch('obj')
def f(obj):
return "fallback"
@App.function(f, obj=Foo)
def f_foo(obj):
return "foo"
@App.function(f, obj=Bar)
def f_bar(obj):
return "bar"
config.commit()
a = App()
lookup = a.lookup
assert f(Foo(), lookup=lookup) == 'foo'
assert f(Bar(), lookup=lookup) == 'bar'
assert f(Other(), lookup=lookup) == 'fallback'
def test_dispatch_external_predicates():
config = Config()
class App(morepath.App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj):
return "fallback"
@App.predicate(f, name='model', default=None, index=ClassIndex)
def f_obj(obj):
return obj.__class__
@App.function(f, model=Foo)
def f_foo(obj):
return "foo"
@App.function(f, model=Bar)
def f_bar(obj):
return "bar"
config.commit()
a = App()
lookup = a.lookup
assert f(Foo(), lookup=lookup) == 'foo'
assert f(Bar(), lookup=lookup) == 'bar'
assert f(Other(), lookup=lookup) == 'fallback'
def test_dispatch_external_predicates_predicate_fallback():
config = Config()
class App(morepath.App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj):
return "dispatch function"
@App.predicate(f, name='model', default=None, index=ClassIndex)
def f_obj(obj):
return obj.__class__
@App.predicate_fallback(f, f_obj)
def f_obj_fallback(obj):
return "f_obj_fallback"
@App.function(f, model=Foo)
def f_foo(obj):
return "foo"
@App.function(f, model=Bar)
def f_bar(obj):
return "bar"
config.commit()
a = App()
lookup = a.lookup
assert f(Foo(), lookup=lookup) == 'foo'
assert f(Bar(), lookup=lookup) == 'bar'
assert f(Other(), lookup=lookup) == 'f_obj_fallback'
def test_dispatch_external_predicates_ordering_after():
config = Config()
class App(morepath.App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj, name):
return "fallback"
@App.predicate(f, name='model', default=None, index=ClassIndex)
def pred_obj(obj):
return obj.__class__
@App.predicate(f, name='name', default='', index=KeyIndex, after=pred_obj)
def pred_name(name):
return name
@App.function(f, model=Foo, name='')
def f_foo_default(obj, name):
return "foo default"
@App.function(f, model=Foo, name='edit')
def f_foo_edit(obj, name):
return "foo edit"
@App.function(f, model=Bar, name='')
def f_bar_default(obj, name):
return "bar default"
@App.function(f, model=Bar, name='edit')
def f_bar_edit(obj, name):
return "bar edit"
config.commit()
a = App()
lookup = a.lookup
assert f(Foo(), '', lookup=lookup) == 'foo default'
assert f(Bar(), '', lookup=lookup) == 'bar default'
assert f(Foo(), 'edit', lookup=lookup) == 'foo edit'
assert f(Bar(), 'edit', lookup=lookup) == 'bar edit'
assert f(Other(), '', lookup=lookup) == 'fallback'
assert f(Other(), 'edit', lookup=lookup) == 'fallback'
def test_dispatch_external_predicates_ordering_before():
config = Config()
class App(morepath.App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj, name):
return "fallback"
@App.predicate(f, name='name', default='', index=KeyIndex)
def pred_name(name):
return name
@App.predicate(f, name='model', default=None, index=ClassIndex,
before=pred_name)
def pred_obj(obj):
return obj.__class__
@App.function(f, model=Foo, name='')
def f_foo_default(obj, name):
return "foo default"
@App.function(f, model=Foo, name='edit')
def f_foo_edit(obj, name):
return "foo edit"
@App.function(f, model=Bar, name='')
def f_bar_default(obj, name):
return "bar default"
@App.function(f, model=Bar, name='edit')
def f_bar_edit(obj, name):
return "bar edit"
config.commit()
a = App()
lookup = a.lookup
assert f(Foo(), '', lookup=lookup) == 'foo default'
assert f(Bar(), '', lookup=lookup) == 'bar default'
assert f(Foo(), 'edit', lookup=lookup) == 'foo edit'
assert f(Bar(), 'edit', lookup=lookup) == 'bar edit'
assert f(Other(), '', lookup=lookup) == 'fallback'
assert f(Other(), 'edit', lookup=lookup) == 'fallback'
def test_dispatch_external_override_fallback():
config = Config()
class App(morepath.App):
testing_config = config
class Sub(App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj):
return "dispatch function"
@App.predicate(f, name='model', default=None, index=ClassIndex)
def f_obj(obj):
return obj.__class__
@App.predicate_fallback(f, f_obj)
def f_obj_fallback(obj):
return "f_obj_fallback"
@Sub.predicate_fallback(f, f_obj)
def f_obj_fallback_sub(obj):
return "f_obj_fallback sub"
@App.function(f, model=Foo)
def f_foo(obj):
return "foo"
@Sub.function(f, model=Foo)
def f_foo_sub(obj):
return "foo sub"
@App.function(f, model=Bar)
def f_bar(obj):
return "bar"
config.commit()
s = Sub()
lookup = s.lookup
assert f(Foo(), lookup=lookup) == 'foo sub'
assert f(Bar(), lookup=lookup) == 'bar'
assert f(Other(), lookup=lookup) == 'f_obj_fallback sub'
# original is unaffected
a = App()
lookup = a.lookup
assert f(Foo(), lookup=lookup) == 'foo'
assert f(Bar(), lookup=lookup) == 'bar'
assert f(Other(), lookup=lookup) == 'f_obj_fallback'
def test_dispatch_external_override_predicate():
config = Config()
class App(morepath.App):
testing_config = config
class Sub(App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj):
return "dispatch function"
@App.predicate(f, name='model', default=None, index=ClassIndex)
def f_obj(obj):
return obj.__class__
@Sub.predicate(f, name='model', default=None, index=ClassIndex)
def f_obj_sub(obj):
return Bar # ridiculous, but lets us test this
@App.predicate_fallback(f, f_obj)
def f_obj_fallback(obj):
return "f_obj_fallback"
@App.function(f, model=Foo)
def f_foo(obj):
return "foo"
@Sub.function(f, model=Foo)
def f_foo_sub(obj):
return "foo"
@App.function(f, model=Bar)
def f_bar(obj):
return "bar"
@Sub.function(f, model=Bar)
def f_bar_sub(obj):
return "bar sub"
config.commit()
s = Sub()
lookup = s.lookup
assert f(Foo(), lookup=lookup) == 'bar sub'
assert f(Bar(), lookup=lookup) == 'bar sub'
assert f(Other(), lookup=lookup) == 'bar sub'
a = App()
lookup = a.lookup
assert f(Foo(), lookup=lookup) == 'foo'
assert f(Bar(), lookup=lookup) == 'bar'
assert f(Other(), lookup=lookup) == 'f_obj_fallback'
Demonstrate that additional wrong keys are ignored by Reg. Needs
MASTER Reg.
from morepath import Config
import reg
from reg import ClassIndex, KeyIndex
import morepath
def setup_module(module):
morepath.disable_implicit()
def test_dispatch():
config = Config()
class App(morepath.App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch('obj')
def f(obj):
return "fallback"
@App.function(f, obj=Foo)
def f_foo(obj):
return "foo"
@App.function(f, obj=Bar)
def f_bar(obj):
return "bar"
config.commit()
a = App()
lookup = a.lookup
assert f(Foo(), lookup=lookup) == 'foo'
assert f(Bar(), lookup=lookup) == 'bar'
assert f(Other(), lookup=lookup) == 'fallback'
def test_dispatch_external_predicates():
config = Config()
class App(morepath.App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj):
return "fallback"
@App.predicate(f, name='model', default=None, index=ClassIndex)
def f_obj(obj):
return obj.__class__
@App.function(f, model=Foo)
def f_foo(obj):
return "foo"
@App.function(f, model=Bar)
def f_bar(obj):
return "bar"
config.commit()
a = App()
lookup = a.lookup
assert f(Foo(), lookup=lookup) == 'foo'
assert f(Bar(), lookup=lookup) == 'bar'
assert f(Other(), lookup=lookup) == 'fallback'
def test_dispatch_external_predicates_predicate_fallback():
config = Config()
class App(morepath.App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj):
return "dispatch function"
@App.predicate(f, name='model', default=None, index=ClassIndex)
def f_obj(obj):
return obj.__class__
@App.predicate_fallback(f, f_obj)
def f_obj_fallback(obj):
return "f_obj_fallback"
@App.function(f, model=Foo)
def f_foo(obj):
return "foo"
@App.function(f, model=Bar)
def f_bar(obj):
return "bar"
config.commit()
a = App()
lookup = a.lookup
assert f(Foo(), lookup=lookup) == 'foo'
assert f(Bar(), lookup=lookup) == 'bar'
assert f(Other(), lookup=lookup) == 'f_obj_fallback'
def test_dispatch_external_predicates_ordering_after():
config = Config()
class App(morepath.App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj, name):
return "fallback"
@App.predicate(f, name='model', default=None, index=ClassIndex)
def pred_obj(obj):
return obj.__class__
@App.predicate(f, name='name', default='', index=KeyIndex, after=pred_obj)
def pred_name(name):
return name
@App.function(f, model=Foo, name='')
def f_foo_default(obj, name):
return "foo default"
@App.function(f, model=Foo, name='edit')
def f_foo_edit(obj, name):
return "foo edit"
@App.function(f, model=Bar, name='')
def f_bar_default(obj, name):
return "bar default"
@App.function(f, model=Bar, name='edit')
def f_bar_edit(obj, name):
return "bar edit"
config.commit()
a = App()
lookup = a.lookup
assert f(Foo(), '', lookup=lookup) == 'foo default'
assert f(Bar(), '', lookup=lookup) == 'bar default'
assert f(Foo(), 'edit', lookup=lookup) == 'foo edit'
assert f(Bar(), 'edit', lookup=lookup) == 'bar edit'
assert f(Other(), '', lookup=lookup) == 'fallback'
assert f(Other(), 'edit', lookup=lookup) == 'fallback'
def test_dispatch_external_predicates_ordering_before():
config = Config()
class App(morepath.App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj, name):
return "fallback"
@App.predicate(f, name='name', default='', index=KeyIndex)
def pred_name(name):
return name
@App.predicate(f, name='model', default=None, index=ClassIndex,
before=pred_name)
def pred_obj(obj):
return obj.__class__
@App.function(f, model=Foo, name='')
def f_foo_default(obj, name):
return "foo default"
@App.function(f, model=Foo, name='edit')
def f_foo_edit(obj, name):
return "foo edit"
@App.function(f, model=Bar, name='')
def f_bar_default(obj, name):
return "bar default"
@App.function(f, model=Bar, name='edit')
def f_bar_edit(obj, name):
return "bar edit"
config.commit()
a = App()
lookup = a.lookup
assert f(Foo(), '', lookup=lookup) == 'foo default'
assert f(Bar(), '', lookup=lookup) == 'bar default'
assert f(Foo(), 'edit', lookup=lookup) == 'foo edit'
assert f(Bar(), 'edit', lookup=lookup) == 'bar edit'
assert f(Other(), '', lookup=lookup) == 'fallback'
assert f(Other(), 'edit', lookup=lookup) == 'fallback'
def test_dispatch_external_override_fallback():
config = Config()
class App(morepath.App):
testing_config = config
class Sub(App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj):
return "dispatch function"
@App.predicate(f, name='model', default=None, index=ClassIndex)
def f_obj(obj):
return obj.__class__
@App.predicate_fallback(f, f_obj)
def f_obj_fallback(obj):
return "f_obj_fallback"
@Sub.predicate_fallback(f, f_obj)
def f_obj_fallback_sub(obj):
return "f_obj_fallback sub"
@App.function(f, model=Foo)
def f_foo(obj):
return "foo"
@Sub.function(f, model=Foo)
def f_foo_sub(obj):
return "foo sub"
@App.function(f, model=Bar)
def f_bar(obj):
return "bar"
config.commit()
s = Sub()
lookup = s.lookup
assert f(Foo(), lookup=lookup) == 'foo sub'
assert f(Bar(), lookup=lookup) == 'bar'
assert f(Other(), lookup=lookup) == 'f_obj_fallback sub'
# original is unaffected
a = App()
lookup = a.lookup
assert f(Foo(), lookup=lookup) == 'foo'
assert f(Bar(), lookup=lookup) == 'bar'
assert f(Other(), lookup=lookup) == 'f_obj_fallback'
def test_dispatch_external_override_predicate():
config = Config()
class App(morepath.App):
testing_config = config
class Sub(App):
testing_config = config
class Foo(object):
pass
class Bar(object):
pass
class Other(object):
pass
@reg.dispatch_external_predicates()
def f(obj):
return "dispatch function"
@App.predicate(f, name='model', default=None, index=ClassIndex)
def f_obj(obj):
return obj.__class__
@Sub.predicate(f, name='model', default=None, index=ClassIndex)
def f_obj_sub(obj):
return Bar # ridiculous, but lets us test this
@App.predicate_fallback(f, f_obj)
def f_obj_fallback(obj):
return "f_obj_fallback"
@App.function(f, model=Foo)
def f_foo(obj):
return "foo"
@Sub.function(f, model=Foo)
def f_foo_sub(obj):
return "foo"
@App.function(f, model=Bar)
def f_bar(obj):
return "bar"
@Sub.function(f, model=Bar)
def f_bar_sub(obj):
return "bar sub"
config.commit()
s = Sub()
lookup = s.lookup
assert f(Foo(), lookup=lookup) == 'bar sub'
assert f(Bar(), lookup=lookup) == 'bar sub'
assert f(Other(), lookup=lookup) == 'bar sub'
a = App()
lookup = a.lookup
assert f(Foo(), lookup=lookup) == 'foo'
assert f(Bar(), lookup=lookup) == 'bar'
assert f(Other(), lookup=lookup) == 'f_obj_fallback'
def test_wrong_predicate_arguments_single():
config = Config()
class App(morepath.App):
testing_config = config
@reg.dispatch('obj')
def f(obj):
return "fallback"
class Foo(object):
pass
def bar(object):
pass
@App.function(f, wrong=Foo)
def f_foo(obj):
return "foo"
config.commit()
def test_wrong_predicate_arguments_multi():
config = Config()
class App(morepath.App):
testing_config = config
@reg.dispatch('a', 'b')
def f(a, b):
return "fallback"
class Foo(object):
pass
def bar(object):
pass
@App.function(f, wrong=Foo)
def f_foo(a, b):
return "foo"
config.commit()
|
from django import forms
from django.utils.translation import ugettext_noop, ugettext as _
from bootstrap3_crispy import bootstrap as twbs
from bootstrap3_crispy.helper import FormHelper
from bootstrap3_crispy import layout as crispy
from corehq.apps.style.crispy import FormActions
class ExampleUserLoginForm(forms.Form):
"""
This is an EXAMPLE form that demonstrates the use of Crispy Forms in HQ.
"""
full_name = forms.CharField(
label=ugettext_noop("Full Name"),
)
email = forms.EmailField(
label=ugettext_noop("Email"),
)
password = forms.CharField(
label=ugettext_noop("Password"),
widget=forms.PasswordInput(),
)
password_repeat = forms.CharField(
label=ugettext_noop("Password (Repeat)"),
widget=forms.PasswordInput(),
)
phone_number = forms.CharField(
label=ugettext_noop("Phone Number"),
required=False,
)
is_staff = forms.BooleanField(
label=ugettext_noop("Has Staff Privileges"),
required=False,
)
def __init__(self, *args, **kwargs):
super(ExampleUserLoginForm, self).__init__(*args, **kwargs)
# Here's what makes the form a Crispy Form:
self.helper = FormHelper()
# This is necessary to make the form a horizontal form
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-3'
self.helper.field_class = 'col-lg-6'
# This is the layout of the form where we can explicitly specify the
# order of fields and group fields into fieldsets.
self.helper.layout = crispy.Layout(
crispy.Fieldset(
# This is the title for the group of fields that follows:
_("Basic Information"),
'full_name', # crispy.Field is used as the default display component
crispy.Field('email'), # effectively the same as the line above
'password',
'password_repeat',
),
crispy.Fieldset(
_("Advanced Information"),
'is_staff',
twbs.PrependedText('phone_number', '+',
placeholder='15555555555'),
),
FormActions(
twbs.StrictButton(_("Create User"),
type='submit',
css_class='btn-primary'),
twbs.StrictButton(_("Cancel"), css_class='btn-default'),
),
)
update docs for crispy form
from django import forms
from django.utils.translation import ugettext_noop, ugettext as _
from bootstrap3_crispy.helper import FormHelper
from bootstrap3_crispy import layout as crispy
from bootstrap3_crispy import bootstrap as twbscrispy
from corehq.apps.style import crispy as hqcrispy
class ExampleUserLoginForm(forms.Form):
"""
This is an EXAMPLE form that demonstrates the use of Crispy Forms in HQ.
"""
full_name = forms.CharField(
label=ugettext_noop("Full Name"),
)
email = forms.EmailField(
label=ugettext_noop("Email"),
)
password = forms.CharField(
label=ugettext_noop("Password"),
widget=forms.PasswordInput(),
)
password_repeat = forms.CharField(
label=ugettext_noop("Password (Repeat)"),
widget=forms.PasswordInput(),
)
phone_number = forms.CharField(
label=ugettext_noop("Phone Number"),
required=False,
)
is_staff = forms.BooleanField(
label=ugettext_noop("Has Staff Privileges"),
required=False,
)
language = forms.ChoiceField(
label=ugettext_noop("Language"),
required=False,
)
def __init__(self, *args, **kwargs):
super(ExampleUserLoginForm, self).__init__(*args, **kwargs)
# Here's what makes the form a Crispy Form:
self.helper = FormHelper()
# This is necessary to make the form a horizontal form
self.helper.form_class = 'form-horizontal'
# What do all these col-sm-3, col-md-2, col-lg-6 things mean? They
# specify the column sizes for the label and field columns depending
# on what the screen size is. This is called Responsive Design, and
# you should visit
# [Bootstrap 3's Responsive Docs](http://getbootstrap.com/css/#responsive-utilities)
# for more information.
self.helper.label_class = 'col-sm-3 col-md-2 col-lg-2'
self.helper.field_class = 'col-sm-9 col-md-8 col-lg-6'
# This is the layout of the form where we can explicitly specify the
# order of fields and group fields into fieldsets.
self.helper.layout = crispy.Layout(
crispy.Fieldset(
# This is the title for the group of fields that follows:
_("Basic Information"),
'full_name', # crispy.Field is used as the default display component
crispy.Field('email'), # effectively the same as the line above
'password',
'password_repeat',
),
crispy.Fieldset(
_("Advanced Information"),
'is_staff',
twbscrispy.PrependedText('phone_number', '+',
placeholder='15555555555'),
),
hqcrispy.FormActions(
twbscrispy.StrictButton(_("Create User"),
type='submit',
css_class='btn-primary'),
twbscrispy.StrictButton(_("Cancel"), css_class='btn-default'),
),
)
|
import os
import platform
import ssl
import docker
import pytest
import compose
from compose.cli import errors
from compose.cli.docker_client import docker_client
from compose.cli.docker_client import get_tls_version
from compose.cli.docker_client import tls_config_from_options
from compose.config.environment import Environment
from tests import mock
from tests import unittest
class DockerClientTestCase(unittest.TestCase):
def test_docker_client_no_home(self):
with mock.patch.dict(os.environ):
try:
del os.environ['HOME']
except KeyError:
pass
docker_client(os.environ)
@mock.patch.dict(os.environ)
def test_docker_client_with_custom_timeout(self):
os.environ['COMPOSE_HTTP_TIMEOUT'] = '123'
client = docker_client(os.environ)
assert client.timeout == 123
@mock.patch.dict(os.environ)
def test_custom_timeout_error(self):
os.environ['COMPOSE_HTTP_TIMEOUT'] = '123'
client = docker_client(os.environ)
with mock.patch('compose.cli.errors.log') as fake_log:
with pytest.raises(errors.ConnectionError):
with errors.handle_connection_errors(client):
raise errors.RequestsConnectionError(
errors.ReadTimeoutError(None, None, None))
assert fake_log.error.call_count == 1
assert '123' in fake_log.error.call_args[0][0]
with mock.patch('compose.cli.errors.log') as fake_log:
with pytest.raises(errors.ConnectionError):
with errors.handle_connection_errors(client):
raise errors.ReadTimeout()
assert fake_log.error.call_count == 1
assert '123' in fake_log.error.call_args[0][0]
def test_user_agent(self):
client = docker_client(os.environ)
expected = "docker-compose/{} docker-py/{} {}/{}".format(
compose.__version__,
docker.__version__,
platform.system(),
platform.release()
)
assert client.headers['User-Agent'] == expected
class TLSConfigTestCase(unittest.TestCase):
cert_path = 'tests/fixtures/tls/'
ca_cert = os.path.join(cert_path, 'ca.pem')
client_cert = os.path.join(cert_path, 'cert.pem')
key = os.path.join(cert_path, 'key.pem')
def test_simple_tls(self):
options = {'--tls': True}
result = tls_config_from_options(options)
assert result is True
def test_tls_ca_cert(self):
options = {
'--tlscacert': self.ca_cert, '--tlsverify': True
}
result = tls_config_from_options(options)
assert isinstance(result, docker.tls.TLSConfig)
assert result.ca_cert == options['--tlscacert']
assert result.verify is True
def test_tls_ca_cert_explicit(self):
options = {
'--tlscacert': self.ca_cert, '--tls': True,
'--tlsverify': True
}
result = tls_config_from_options(options)
assert isinstance(result, docker.tls.TLSConfig)
assert result.ca_cert == options['--tlscacert']
assert result.verify is True
def test_tls_client_cert(self):
options = {
'--tlscert': self.client_cert, '--tlskey': self.key
}
result = tls_config_from_options(options)
assert isinstance(result, docker.tls.TLSConfig)
assert result.cert == (options['--tlscert'], options['--tlskey'])
def test_tls_client_cert_explicit(self):
options = {
'--tlscert': self.client_cert, '--tlskey': self.key,
'--tls': True
}
result = tls_config_from_options(options)
assert isinstance(result, docker.tls.TLSConfig)
assert result.cert == (options['--tlscert'], options['--tlskey'])
def test_tls_client_and_ca(self):
options = {
'--tlscert': self.client_cert, '--tlskey': self.key,
'--tlsverify': True, '--tlscacert': self.ca_cert
}
result = tls_config_from_options(options)
assert isinstance(result, docker.tls.TLSConfig)
assert result.cert == (options['--tlscert'], options['--tlskey'])
assert result.ca_cert == options['--tlscacert']
assert result.verify is True
def test_tls_client_and_ca_explicit(self):
options = {
'--tlscert': self.client_cert, '--tlskey': self.key,
'--tlsverify': True, '--tlscacert': self.ca_cert,
'--tls': True
}
result = tls_config_from_options(options)
assert isinstance(result, docker.tls.TLSConfig)
assert result.cert == (options['--tlscert'], options['--tlskey'])
assert result.ca_cert == options['--tlscacert']
assert result.verify is True
def test_tls_client_missing_key(self):
options = {'--tlscert': self.client_cert}
with pytest.raises(docker.errors.TLSParameterError):
tls_config_from_options(options)
options = {'--tlskey': self.key}
with pytest.raises(docker.errors.TLSParameterError):
tls_config_from_options(options)
def test_assert_hostname_explicit_skip(self):
options = {'--tlscacert': self.ca_cert, '--skip-hostname-check': True}
result = tls_config_from_options(options)
assert isinstance(result, docker.tls.TLSConfig)
assert result.assert_hostname is False
def test_tls_client_and_ca_quoted_paths(self):
options = {
'--tlscacert': '"{}"'.format(self.ca_cert),
'--tlscert': '"{}"'.format(self.client_cert),
'--tlskey': '"{}"'.format(self.key),
'--tlsverify': True
}
result = tls_config_from_options(options)
assert isinstance(result, docker.tls.TLSConfig)
assert result.cert == (self.client_cert, self.key)
assert result.ca_cert == self.ca_cert
assert result.verify is True
def test_tls_simple_with_tls_version(self):
tls_version = 'TLSv1'
options = {'--tls': True}
environment = Environment({'COMPOSE_TLS_VERSION': tls_version})
result = tls_config_from_options(options, environment)
assert isinstance(result, docker.tls.TLSConfig)
assert result.ssl_version == ssl.PROTOCOL_TLSv1
def test_tls_mixed_environment_and_flags(self):
options = {'--tls': True, '--tlsverify': False}
environment = Environment({'DOCKER_CERT_PATH': 'tests/fixtures/tls/'})
result = tls_config_from_options(options, environment)
assert isinstance(result, docker.tls.TLSConfig)
assert result.cert == (self.client_cert, self.key)
assert result.ca_cert == self.ca_cert
assert result.verify is False
def test_tls_flags_override_environment(self):
environment = Environment({
'DOCKER_CERT_PATH': '/completely/wrong/path',
'DOCKER_TLS_VERIFY': 'false'
})
options = {
'--tlscacert': '"{}"'.format(self.ca_cert),
'--tlscert': '"{}"'.format(self.client_cert),
'--tlskey': '"{}"'.format(self.key),
'--tlsverify': True
}
result = tls_config_from_options(options, environment)
assert isinstance(result, docker.tls.TLSConfig)
assert result.cert == (self.client_cert, self.key)
assert result.ca_cert == self.ca_cert
assert result.verify is True
def test_tls_verify_flag_no_override(self):
environment = Environment({
'DOCKER_TLS_VERIFY': 'true',
'COMPOSE_TLS_VERSION': 'TLSv1',
'DOCKER_CERT_PATH': self.cert_path
})
options = {'--tls': True, '--tlsverify': False}
result = tls_config_from_options(options, environment)
assert isinstance(result, docker.tls.TLSConfig)
assert result.ssl_version == ssl.PROTOCOL_TLSv1
# verify is a special case - since `--tlsverify` = False means it
# wasn't used, we set it if either the environment or the flag is True
# see https://github.com/docker/compose/issues/5632
assert result.verify is True
def test_tls_verify_env_falsy_value(self):
environment = Environment({'DOCKER_TLS_VERIFY': '0'})
options = {'--tls': True}
assert tls_config_from_options(options, environment) is True
def test_tls_verify_default_cert_path(self):
environment = Environment({'DOCKER_TLS_VERIFY': '1'})
options = {'--tls': True}
with mock.patch('compose.cli.docker_client.default_cert_path') as dcp:
dcp.return_value = 'tests/fixtures/tls/'
result = tls_config_from_options(options, environment)
assert isinstance(result, docker.tls.TLSConfig)
assert result.verify is True
assert result.ca_cert == self.ca_cert
assert result.cert == (self.client_cert, self.key)
class TestGetTlsVersion:
def test_get_tls_version_default(self):
environment = {}
assert get_tls_version(environment) is None
@pytest.mark.skipif(not hasattr(ssl, 'PROTOCOL_TLSv1_2'), reason='TLS v1.2 unsupported')
def test_get_tls_version_upgrade(self):
environment = {'COMPOSE_TLS_VERSION': 'TLSv1_2'}
assert get_tls_version(environment) == ssl.PROTOCOL_TLSv1_2
def test_get_tls_version_unavailable(self):
environment = {'COMPOSE_TLS_VERSION': 'TLSv5_5'}
with mock.patch('compose.cli.docker_client.log') as mock_log:
tls_version = get_tls_version(environment)
mock_log.warning.assert_called_once_with(mock.ANY)
assert tls_version is None
Update tests to use the version on docker client
Signed-off-by: Ulysses Souza <9b58b28cc7619bff4119b8572e41bbb4dd363aab@gmail.com>
import os
import platform
import ssl
import docker
import pytest
import compose
from compose import const
from compose.cli import errors
from compose.cli.docker_client import docker_client
from compose.cli.docker_client import get_tls_version
from compose.cli.docker_client import tls_config_from_options
from compose.config.environment import Environment
from tests import mock
from tests import unittest
class DockerClientTestCase(unittest.TestCase):
def test_docker_client_no_home(self):
with mock.patch.dict(os.environ):
try:
del os.environ['HOME']
except KeyError:
pass
docker_client(os.environ, version=const.API_VERSIONS[const.COMPOSE_SPEC])
@mock.patch.dict(os.environ)
def test_docker_client_with_custom_timeout(self):
os.environ['COMPOSE_HTTP_TIMEOUT'] = '123'
client = docker_client(os.environ, version=const.API_VERSIONS[const.COMPOSE_SPEC])
assert client.timeout == 123
@mock.patch.dict(os.environ)
def test_custom_timeout_error(self):
os.environ['COMPOSE_HTTP_TIMEOUT'] = '123'
client = docker_client(os.environ, version=const.API_VERSIONS[const.COMPOSE_SPEC])
with mock.patch('compose.cli.errors.log') as fake_log:
with pytest.raises(errors.ConnectionError):
with errors.handle_connection_errors(client):
raise errors.RequestsConnectionError(
errors.ReadTimeoutError(None, None, None))
assert fake_log.error.call_count == 1
assert '123' in fake_log.error.call_args[0][0]
with mock.patch('compose.cli.errors.log') as fake_log:
with pytest.raises(errors.ConnectionError):
with errors.handle_connection_errors(client):
raise errors.ReadTimeout()
assert fake_log.error.call_count == 1
assert '123' in fake_log.error.call_args[0][0]
def test_user_agent(self):
client = docker_client(os.environ, version=const.API_VERSIONS[const.COMPOSE_SPEC])
expected = "docker-compose/{} docker-py/{} {}/{}".format(
compose.__version__,
docker.__version__,
platform.system(),
platform.release()
)
assert client.headers['User-Agent'] == expected
class TLSConfigTestCase(unittest.TestCase):
cert_path = 'tests/fixtures/tls/'
ca_cert = os.path.join(cert_path, 'ca.pem')
client_cert = os.path.join(cert_path, 'cert.pem')
key = os.path.join(cert_path, 'key.pem')
def test_simple_tls(self):
options = {'--tls': True}
result = tls_config_from_options(options)
assert result is True
def test_tls_ca_cert(self):
options = {
'--tlscacert': self.ca_cert, '--tlsverify': True
}
result = tls_config_from_options(options)
assert isinstance(result, docker.tls.TLSConfig)
assert result.ca_cert == options['--tlscacert']
assert result.verify is True
def test_tls_ca_cert_explicit(self):
options = {
'--tlscacert': self.ca_cert, '--tls': True,
'--tlsverify': True
}
result = tls_config_from_options(options)
assert isinstance(result, docker.tls.TLSConfig)
assert result.ca_cert == options['--tlscacert']
assert result.verify is True
def test_tls_client_cert(self):
options = {
'--tlscert': self.client_cert, '--tlskey': self.key
}
result = tls_config_from_options(options)
assert isinstance(result, docker.tls.TLSConfig)
assert result.cert == (options['--tlscert'], options['--tlskey'])
def test_tls_client_cert_explicit(self):
options = {
'--tlscert': self.client_cert, '--tlskey': self.key,
'--tls': True
}
result = tls_config_from_options(options)
assert isinstance(result, docker.tls.TLSConfig)
assert result.cert == (options['--tlscert'], options['--tlskey'])
def test_tls_client_and_ca(self):
options = {
'--tlscert': self.client_cert, '--tlskey': self.key,
'--tlsverify': True, '--tlscacert': self.ca_cert
}
result = tls_config_from_options(options)
assert isinstance(result, docker.tls.TLSConfig)
assert result.cert == (options['--tlscert'], options['--tlskey'])
assert result.ca_cert == options['--tlscacert']
assert result.verify is True
def test_tls_client_and_ca_explicit(self):
options = {
'--tlscert': self.client_cert, '--tlskey': self.key,
'--tlsverify': True, '--tlscacert': self.ca_cert,
'--tls': True
}
result = tls_config_from_options(options)
assert isinstance(result, docker.tls.TLSConfig)
assert result.cert == (options['--tlscert'], options['--tlskey'])
assert result.ca_cert == options['--tlscacert']
assert result.verify is True
def test_tls_client_missing_key(self):
options = {'--tlscert': self.client_cert}
with pytest.raises(docker.errors.TLSParameterError):
tls_config_from_options(options)
options = {'--tlskey': self.key}
with pytest.raises(docker.errors.TLSParameterError):
tls_config_from_options(options)
def test_assert_hostname_explicit_skip(self):
options = {'--tlscacert': self.ca_cert, '--skip-hostname-check': True}
result = tls_config_from_options(options)
assert isinstance(result, docker.tls.TLSConfig)
assert result.assert_hostname is False
def test_tls_client_and_ca_quoted_paths(self):
options = {
'--tlscacert': '"{}"'.format(self.ca_cert),
'--tlscert': '"{}"'.format(self.client_cert),
'--tlskey': '"{}"'.format(self.key),
'--tlsverify': True
}
result = tls_config_from_options(options)
assert isinstance(result, docker.tls.TLSConfig)
assert result.cert == (self.client_cert, self.key)
assert result.ca_cert == self.ca_cert
assert result.verify is True
def test_tls_simple_with_tls_version(self):
tls_version = 'TLSv1'
options = {'--tls': True}
environment = Environment({'COMPOSE_TLS_VERSION': tls_version})
result = tls_config_from_options(options, environment)
assert isinstance(result, docker.tls.TLSConfig)
assert result.ssl_version == ssl.PROTOCOL_TLSv1
def test_tls_mixed_environment_and_flags(self):
options = {'--tls': True, '--tlsverify': False}
environment = Environment({'DOCKER_CERT_PATH': 'tests/fixtures/tls/'})
result = tls_config_from_options(options, environment)
assert isinstance(result, docker.tls.TLSConfig)
assert result.cert == (self.client_cert, self.key)
assert result.ca_cert == self.ca_cert
assert result.verify is False
def test_tls_flags_override_environment(self):
environment = Environment({
'DOCKER_CERT_PATH': '/completely/wrong/path',
'DOCKER_TLS_VERIFY': 'false'
})
options = {
'--tlscacert': '"{}"'.format(self.ca_cert),
'--tlscert': '"{}"'.format(self.client_cert),
'--tlskey': '"{}"'.format(self.key),
'--tlsverify': True
}
result = tls_config_from_options(options, environment)
assert isinstance(result, docker.tls.TLSConfig)
assert result.cert == (self.client_cert, self.key)
assert result.ca_cert == self.ca_cert
assert result.verify is True
def test_tls_verify_flag_no_override(self):
environment = Environment({
'DOCKER_TLS_VERIFY': 'true',
'COMPOSE_TLS_VERSION': 'TLSv1',
'DOCKER_CERT_PATH': self.cert_path
})
options = {'--tls': True, '--tlsverify': False}
result = tls_config_from_options(options, environment)
assert isinstance(result, docker.tls.TLSConfig)
assert result.ssl_version == ssl.PROTOCOL_TLSv1
# verify is a special case - since `--tlsverify` = False means it
# wasn't used, we set it if either the environment or the flag is True
# see https://github.com/docker/compose/issues/5632
assert result.verify is True
def test_tls_verify_env_falsy_value(self):
environment = Environment({'DOCKER_TLS_VERIFY': '0'})
options = {'--tls': True}
assert tls_config_from_options(options, environment) is True
def test_tls_verify_default_cert_path(self):
environment = Environment({'DOCKER_TLS_VERIFY': '1'})
options = {'--tls': True}
with mock.patch('compose.cli.docker_client.default_cert_path') as dcp:
dcp.return_value = 'tests/fixtures/tls/'
result = tls_config_from_options(options, environment)
assert isinstance(result, docker.tls.TLSConfig)
assert result.verify is True
assert result.ca_cert == self.ca_cert
assert result.cert == (self.client_cert, self.key)
class TestGetTlsVersion:
def test_get_tls_version_default(self):
environment = {}
assert get_tls_version(environment) is None
@pytest.mark.skipif(not hasattr(ssl, 'PROTOCOL_TLSv1_2'), reason='TLS v1.2 unsupported')
def test_get_tls_version_upgrade(self):
environment = {'COMPOSE_TLS_VERSION': 'TLSv1_2'}
assert get_tls_version(environment) == ssl.PROTOCOL_TLSv1_2
def test_get_tls_version_unavailable(self):
environment = {'COMPOSE_TLS_VERSION': 'TLSv5_5'}
with mock.patch('compose.cli.docker_client.log') as mock_log:
tls_version = get_tls_version(environment)
mock_log.warning.assert_called_once_with(mock.ANY)
assert tls_version is None
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2015-2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`HassaniAtkinson2020SInter`
:class:`HassaniAtkinson2020SSlab`
:class:`HassaniAtkinson2020Asc`
"""
import math
import numpy as np
from openquake.hazardlib import const
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib.imt import PGA, SA, PGV
CONSTANTS = {"mlf0": 5.5, "mlf1": 7, "f1": 0, "f3": 98.1,
"b1": -1.3, "b2": -0.5, "v0": 100, "v1": 250, "v2": 1000,
"zx0": 150, "zx1": 800, "zx2": 4200,
"cfp0": -0.011, "cfp1": 0.421, "cfp2": -0.604, "cfp3": 0.086,
"x0": 0.5, "x1": 1, "x2": 2.5, "vref": 760, "vmin": 360}
def _clf(suffix, C, mag):
"""
Low frequency calibration factor.
"""
clf0 = C['clf0' + suffix]
clf1 = C['clf1' + suffix]
mlf0 = CONSTANTS["mlf0"]
if mag > mlf0:
return clf0 + clf1 * (min(mag, CONSTANTS["mlf1"]) - mlf0)
return clf0
def _dsigma(creg, hypo_depth):
"""
Hypocentre depth factor.
"""
out = creg['cd0']
dp0 = creg['dp0']
if hypo_depth > dp0:
out += creg['cd1'] * (min(hypo_depth, creg['dp1']) - dp0)
return 10 ** out
def _fds_ha18(C, mag, dsigma):
"""
Dsigma factor.
"""
eds1 = np.polyval([C['g14'], C['g13'], C['g12'], C['g11'], C['g10']],
mag)
eds2 = np.polyval([C['g24'], C['g23'], C['g22'], C['g21'], C['g20']],
mag)
eds0 = -2 * eds1 - 4 * eds2
return eds0 + eds1 * math.log10(dsigma) \
+ eds2 * math.log10(dsigma) ** 2
def _ffpeak(C, imt, fpeak):
"""
Fpeak factor.
"""
if imt.string[:2] != "SA" or max(fpeak) <= 0:
# pgv, pga or unknown fpeak
return 0
s = CONSTANTS
x = fpeak / 10 ** C['f']
ffpeak = np.where(fpeak <= 0, 0, s['cfp0'])
idx = np.where((s['x0'] < x) & (x <= s['x1']))
ffpeak[idx] = s['cfp0'] + s['cfp1'] * np.log10(x[idx] / s['x0'])
idx = np.where((s['x1'] < x) & (x <= s['x2']))
ffpeak[idx] = s['cfp0'] + s['cfp1'] * math.log10(s['x1'] / s['x0']) \
+ s['cfp2'] * np.log10(x[idx] / s['x1'])
idx = np.where(s['x2'] < x)
ffpeak[idx] = s['cfp0'] + s['cfp1'] * math.log10(s['x1'] / s['x0']) \
+ s['cfp2'] * math.log10(s['x2'] / s['x1']) \
+ s['cfp3'] * np.log10(x[idx] / s['x2'])
return ffpeak
def _fgamma(suffix, backarc, forearc_ne, forearc_sw, C, rrup):
"""
Gamma factor.
"""
# proportion sum for normalised values with rrup factor
p_sum = rrup / (backarc + forearc_ne + forearc_sw)
return C['barc' + suffix] * backarc * p_sum \
+ C['farc_ne' + suffix] * forearc_ne * p_sum \
+ C['farc_sw' + suffix] * forearc_sw * p_sum
def _fkp_ha18(kappa, C, mag, dsigma):
"""
Kappa factor for B/C site condition of Japan.
"""
l10kp = math.log10(kappa)
p = np.zeros(4)
ek0 = np.zeros(4)
for i in range(4):
for j in range(4):
p[j] = np.polyval([C[f'd{i}{j}2'], C[f'd{i}{j}1'],
C[f'd{i}{j}0']], math.log10(dsigma))
ek0[i] = np.polyval(p[::-1], math.log10(mag))
return 3 * ek0[0] - 9 * ek0[1] + 27 * ek0[2] - 81 * ek0[3] \
+ ek0[0] * l10kp + ek0[1] * l10kp ** 2 \
+ ek0[2] * l10kp ** 3 + ek0[3] * l10kp ** 4
def _fm_ha18(C, mag):
"""
Magnitude factor.
"""
if mag <= C['mh']:
return C['e0'] + C['e1'] * (mag - C['mh']) \
+ C['e2'] * (mag - C['mh']) ** 2
return C['e0'] + C['e3'] * (mag - C['mh'])
def _fsnonlin_ss14(C, vs30, pga_rock):
"""
Non-linear factor.
"""
s = CONSTANTS
f2 = C['f4'] * (np.exp(C['f5']
* (np.minimum(vs30, s['vref']) - s['vmin']))
- math.exp(C['f5'] * (s['vref'] - s['vmin'])))
return s['f1'] + f2 * np.log((pga_rock + s['f3']) / s['f3'])
def _fvs30(C, vs30):
"""
Vs30 factor.
"""
s = CONSTANTS
fvs30 = np.where(vs30 <= s['v0'],
C['cv1'] * math.log10(s['v0'] / s['vref'])
+ (C['cv2'] - C['cv1'])
* math.log10(s['v1'] / s['vref']),
C['cv2'] * math.log10(s['v2'] / s['vref']))
fvs30 = np.where((s['v0'] < vs30) & (vs30 <= s['v1']),
C['cv1'] * np.log10(vs30 / s['vref'])
+ (C['cv2'] - C['cv1'])
* math.log10(s['v1'] / s['vref']), fvs30)
return np.where((s['v1'] < vs30) & (vs30 <= s['v2']),
C['cv2'] * np.log10(vs30 / s['vref']), fvs30)
def _fz2pt5(C, z2pt5):
"""
Z2pt5 factor.
"""
s = CONSTANTS
fz2pt5 = np.where(z2pt5 >= 0, C['cz0'], 0)
idx = np.where((s['zx0'] < z2pt5) & (z2pt5 <= s['zx1']))
fz2pt5[idx] = C['cz0'] + C['cz1'] * np.log10(z2pt5[idx] / s['zx0'])
idx = np.where((s['zx1'] < z2pt5) & (z2pt5 <= s['zx2']))
fz2pt5[idx] = C['cz0'] + C['cz1'] * math.log10(s['zx1'] / s['zx0']) \
+ C['cz2'] * np.log10(z2pt5[idx] / s['zx1'])
idx = np.where(s['zx2'] < z2pt5)
fz2pt5[idx] = C['cz0'] + C['cz1'] * math.log10(s['zx1'] / s['zx0']) \
+ C['cz2'] * math.log10(s['zx2'] / s['zx1'])
return fz2pt5
def _fz_ha18(rt, C, mag, rrup):
"""
Z factor.
"""
s = CONSTANTS
h = 10 ** (-0.405 + 0.235 * mag)
ref = np.sqrt(rrup ** 2 + h ** 2)
rref = math.sqrt(1 ** 2 + h ** 2)
return np.where(ref <= rt, s['b1'] * np.log10(ref)
+ (C['b3'] + C['b4'] * mag) * np.log10(ref / rref),
s['b1'] * math.log10(rt)
+ s['b2'] * np.log10(ref / rt)
+ (C['b3'] + C['b4'] * mag) * np.log10(ref / rref))
def get_stddevs(suffix, C, stddev_types):
"""
Between event standard deviations as tau.
Intra event from site to site stddev and within site stddev.
Total given in COEFFS to 3dp.
"""
stddevs = []
for stddev in stddev_types:
if stddev == const.StdDev.TOTAL:
stddevs.append(C['s' + suffix])
elif stddev == const.StdDev.INTER_EVENT:
stddevs.append(C['tau'])
elif stddev == const.StdDev.INTRA_EVENT:
stddevs.append(math.sqrt(C['ps2s'] ** 2 + C['pss' + suffix] ** 2))
return stddevs
class HassaniAtkinson2020SInter(GMPE):
"""
Hassani Atkinson (2020) for Subduction Interface.
"""
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.SUBDUCTION_INTERFACE
#: Supported intensity measure types are spectral acceleration,
#: peak ground acceleration and peak ground velocity
DEFINED_FOR_INTENSITY_MEASURE_TYPES = {PGV, PGA, SA}
#: Supported intensity measure component is the geometric mean component
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.AVERAGE_HORIZONTAL
DEFINED_FOR_STANDARD_DEVIATION_TYPES = {
const.StdDev.TOTAL, const.StdDev.INTER_EVENT, const.StdDev.INTRA_EVENT}
REQUIRES_DISTANCES = {'rrup'}
REQUIRES_RUPTURE_PARAMETERS = {'hypo_depth', 'mag'}
REQUIRES_SITES_PARAMETERS = {'fpeak', 'vs30', 'z2pt5'}
REQUIRES_ATTRIBUTES = {'kappa', 'backarc', 'forearc_ne', 'forearc_sw'}
def __init__(self, kappa=0.04, backarc=0, forearc_ne=1, forearc_sw=0,
**kwargs):
"""
Aditional parameters.
"""
super().__init__(kappa=kappa, backarc=backarc, forearc_ne=forearc_ne,
forearc_sw=forearc_sw, **kwargs)
# kappa parameter
self.kappa = kappa
# set proportion of rrups in backarc, forearc_ne and forearc_sw
self.backarc = backarc
self.forearc_ne = forearc_ne
self.forearc_sw = forearc_sw
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extract dictionaries of coefficients specific to required
# intensity measure type
C = self.COEFFS[imt]
C_PGA = self.COEFFS[PGA()]
dsigma = _dsigma(self.CONST_REGION, rup.hypo_depth)
fm = _fm_ha18(C, rup.mag)
fm_pga = _fm_ha18(C_PGA, rup.mag)
fz = _fz_ha18(self.CONST_REGION['rt'], C, rup.mag, dists.rrup)
fz_pga = _fz_ha18(self.CONST_REGION['rt'], C_PGA, rup.mag, dists.rrup)
fdsigma = _fds_ha18(C, rup.mag, dsigma)
fdsigma_pga = _fds_ha18(C_PGA, rup.mag, dsigma)
fkappa = _fkp_ha18(self.kappa, C, rup.mag, dsigma)
fkappa_pga = _fkp_ha18(self.kappa, C_PGA, rup.mag, dsigma)
fgamma = _fgamma(self.SUFFIX, self.backarc, self.forearc_ne,
self.forearc_sw, C, dists.rrup)
fgamma_pga = _fgamma(self.SUFFIX, self.backarc, self.forearc_ne,
self.forearc_sw, C_PGA, dists.rrup)
clf = _clf(self.SUFFIX, C, rup.mag)
clf_pga = _clf(self.SUFFIX, C_PGA, rup.mag)
pga_rock = 10 ** (fm_pga + fz_pga + fdsigma_pga +
fkappa_pga + fgamma_pga + self.CONST_REGION['cc'] +
clf_pga + C_PGA['chf'] + C_PGA['amp_cr'])
fsnonlin = _fsnonlin_ss14(C, sites.vs30, pga_rock)
fvs30 = _fvs30(C, sites.vs30)
fz2pt5 = _fz2pt5(C, sites.z2pt5)
ffpeak = _ffpeak(C, imt, sites.fpeak)
mean = 10 ** (fm + fdsigma + fz + fkappa + fgamma
+ self.CONST_REGION['cc'] + clf + C['chf']
+ C['amp_cr'] + fvs30 + fz2pt5 + ffpeak + fsnonlin)
if imt.string != "PGV":
# pgv in cm/s
# sa and psa in cm/s^2
mean = mean / 981
mean = np.log(mean)
stddevs = get_stddevs(self.SUFFIX, C, stddev_types)
return mean, stddevs
# periods given by 1 / 10 ** COEFFS['f']
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT f amp_cr b3 b4 chf clf0_if clf1_if clf0_is clf1_is clf0_cr clf1_cr cv1 cv2 cz0 cz1 cz2 barc_if farc_ne_if farc_sw_if barc_is farc_ne_is farc_sw_is barc_cr farc_ne_cr farc_sw_cr d000 d001 d002 d010 d011 d012 d020 d021 d022 d030 d031 d032 d100 d101 d102 d110 d111 d112 d120 d121 d122 d130 d131 d132 d200 d201 d202 d210 d211 d212 d220 d221 d222 d230 d231 d232 d300 d301 d302 d310 d311 d312 d320 d321 d322 d330 d331 d332 mh e0 e1 e2 e3 f4 f5 g10 g11 g12 g13 g14 g20 g21 g22 g23 g24 tau ps2s pss_if pss_is pss_cr s_if s_is s_cr
pgv -9 0 -0.4414 0.0394 0.204 0 0 0 0 0 0 -0.667 -0.866 -0.046 0.166 -0.068 -0.00281 -0.0015 -0.00048 -0.00327 -0.00148 -0.0024 -0.00406 -0.00242 -0.00278 -40.826164 40.113039 -5.102129 155.252313 -182.36868 27.033349 -196.671754 258.210795 -42.36533 82.357479 -116.454408 20.537609 -31.558927 36.874774 -5.739549 125.277771 -161.306518 27.874045 -163.436869 223.266447 -41.391833 69.903157 -99.25266 19.350221 -10.37628 13.051401 -2.256509 41.867738 -56.055028 10.530332 -55.192594 76.696014 -15.238462 23.77607 -33.832713 6.996974 -1.229433 1.604051 -0.296017 4.993482 -6.816711 1.350202 -6.609082 9.262415 -1.92463 2.854701 -4.066423 0.874373 8.8 3.30208 0.43897 -0.01672 0.41464 -0.0434 -0.0084 0.444251 0.056972 0.003644 -0.004046 0.000353 -0.094203 0.053343 -0.014163 0.001919 -0.0001 0.482 0.293 0.386 0.376 0.401 0.683 0.678 0.692
pga -9 0 -0.4565 0.0382 0.574 0 0 0 0 0 0 -0.16 -0.612 0 0 0 -0.00684 -0.00273 -0.00456 -0.00541 -0.00167 -0.00396 -0.00688 -0.00476 -0.00484 -51.098718 32.578796 -0.602711 193.376939 -154.536656 8.970141 -246.377014 224.30934 -19.056039 103.867407 -102.862801 10.767404 -44.562063 38.708279 -3.922374 176.725188 -170.793471 21.015529 -230.932652 237.86616 -33.083719 99.136459 -106.274886 16.078647 -16.422438 15.847931 -2.168642 65.997737 -67.973039 10.385514 -87.070182 93.033435 -15.329737 37.638662 -41.09486 7.148396 -2.134042 2.150893 -0.337012 8.618758 -9.089596 1.542771 -11.416684 12.32449 -2.213236 4.950046 -5.410116 1.012283 7.4 4.37376 0.33001 -0.0094 0.30153 -0.0651 -0.007 0.709663 -0.038392 0.013081 -0.001138 0.000024 -0.032385 0.019043 -0.004517 0.000375 -0.000009 0.513 0.422 0.411 0.452 0.496 0.781 0.803 0.829
0.01 2.0 0.678 -0.4321 0.0353 -0.09 0 0 0 0 0 0 -0.163 -0.598 0 0 0 -0.00686 -0.00274 -0.00457 -0.00544 -0.00168 -0.00398 -0.00693 -0.0048 -0.00489 -41.010798 20.984834 4.332932 143.725915 -112.573964 -8.798757 -185.502717 174.262586 2.145698 78.793199 -83.128703 2.380509 -35.56513 27.797776 0.50508 130.2847 -131.140942 4.976749 -173.646134 190.435518 -13.863902 75.476551 -87.53478 8.453463 -13.145747 11.601671 -0.552587 48.068218 -52.451102 4.478831 -64.765944 74.387032 -8.20745 28.388967 -33.704532 4.310521 -1.715884 1.576223 -0.13206 6.206072 -6.977314 0.786463 -8.387874 9.776758 -1.295275 3.688342 -4.397405 0.644874 7.15 4.64514 0.34404 -0.00816 0.30565 -0.0644 -0.007 1.472416 -0.541245 0.133779 -0.013606 0.000492 -0.222277 0.146276 -0.035353 0.00358 -0.00013 0.513 0.423 0.411 0.450 0.496 0.782 0.803 0.830
0.01259 1.9 0.677 -0.4335 0.036 -0.089 0 0 0 0 0 0 -0.165 -0.593 0 0 0 -0.00688 -0.00275 -0.00458 -0.00546 -0.00169 -0.00399 -0.00696 -0.00482 -0.00492 -36.828522 19.569252 6.125909 133.799687 -108.928441 -14.579428 -175.108029 171.43759 8.453326 74.844597 -82.543522 0.054735 -31.17228 25.351334 2.531652 118.752325 -123.338464 -1.805442 -161.173356 182.153805 -6.194116 70.665151 -84.601815 5.529234 -11.281178 10.204519 0.356264 42.754302 -47.641396 1.35097 -58.876236 68.854243 -4.5789 26.088198 -31.574179 2.895149 -1.449797 1.338067 0.003661 5.403172 -6.129093 0.310717 -7.485634 8.768873 -0.734349 3.333472 -3.997333 0.422956 7.4 4.69775 0.33848 -0.00825 0.3049 -0.0642 -0.0071 1.554224 -0.561608 0.1314 -0.012766 0.000443 -0.26475 0.165203 -0.037932 0.003683 -0.000129 0.513 0.424 0.411 0.451 0.496 0.782 0.804 0.830
0.01585 1.8 0.676 -0.4282 0.0351 -0.092 0 0 0 0 0 0 -0.168 -0.587 0 0 0 -0.0069 -0.00276 -0.00459 -0.00547 -0.0017 -0.004 -0.00699 -0.00485 -0.00494 -33.843087 21.852772 6.415627 132.533545 -119.657522 -14.289425 -178.466424 186.886925 6.835534 77.884983 -89.602175 1.106684 -28.577173 27.547838 3.040521 118.556209 -134.307003 -2.241058 -166.146429 198.547813 -7.035861 74.454737 -92.292678 6.313616 -10.409129 11.098325 0.623752 43.180509 -52.297863 0.97445 -61.611402 75.987898 -4.711912 27.946807 -34.97869 3.144502 -1.350461 1.465128 0.046401 5.522435 -6.807942 0.240613 -7.948998 9.823776 -0.734948 3.628394 -4.505746 0.452039 7.5 4.69913 0.33602 -0.00894 0.30604 -0.0641 -0.0072 1.714652 -0.6427 0.145153 -0.013649 0.000459 -0.34998 0.21312 -0.047606 0.004516 -0.000155 0.513 0.424 0.412 0.451 0.496 0.783 0.805 0.831
0.01995 1.7 0.672 -0.4209 0.0344 -0.102 0 0 0 0 0 0 -0.171 -0.578 0 0 0 -0.00693 -0.00277 -0.00461 -0.00549 -0.00171 -0.00402 -0.00704 -0.00488 -0.00498 -42.827733 41.107588 1.767006 175.999485 -195.196015 4.995207 -233.844386 282.760331 -18.604775 100.440836 -129.207506 11.927987 -36.362289 45.183985 -0.914573 157.333408 -204.021974 14.524812 -216.204378 287.545597 -29.45818 95.015702 -129.226338 15.940451 -13.041058 17.526987 -0.713597 56.776641 -77.875531 6.778782 -79.377281 108.793041 -12.585143 35.289638 -48.639106 6.556052 -1.659777 2.280063 -0.111625 7.178509 -10.068016 0.942713 -10.137835 14.021054 -1.700233 4.53801 -6.258265 0.873887 7.8 4.75561 0.32694 -0.01031 0.30339 -0.0639 -0.0073 1.912143 -0.751222 0.167414 -0.015674 0.000528 -0.447888 0.269464 -0.059636 0.005644 -0.000194 0.515 0.426 0.412 0.452 0.496 0.785 0.806 0.832
0.02512 1.6 0.667 -0.4098 0.0331 -0.109 0 0 0 0 0 0 -0.174 -0.557 0 0 0 -0.007 -0.0028 -0.00466 -0.00554 -0.00173 -0.00405 -0.00715 -0.00497 -0.00508 -48.479722 55.7785 -4.013399 203.174441 -245.465754 25.938538 -266.708211 340.075945 -43.592027 113.93429 -151.062403 21.79371 -42.838283 60.841219 -6.458254 188.720316 -259.25022 35.007115 -255.708778 352.33067 -54.311583 111.649687 -154.593101 25.898767 -15.836581 23.981785 -2.828785 70.439152 -101.059928 14.712066 -96.989307 136.457275 -22.332544 42.812057 -59.638179 10.503095 -2.064808 3.184205 -0.391809 9.16487 -13.355305 2.005714 -12.736528 17.987595 -3.018564 5.657377 -7.850825 1.411863 8.95 5.06565 0.29638 -0.01193 0.29694 -0.0653 -0.0074 2.153914 -0.892759 0.198231 -0.018627 0.000633 -0.571979 0.342742 -0.075675 0.007183 -0.000249 0.519 0.428 0.414 0.455 0.496 0.790 0.812 0.836
0.03162 1.5 0.658 -0.4075 0.0327 -0.105 0 0 0 0 0 0 -0.17 -0.522 0 0 0 -0.00712 -0.00285 -0.00474 -0.00565 -0.00177 -0.00411 -0.00737 -0.00513 -0.00524 -36.454347 50.747067 -7.854934 150.78109 -212.767887 36.872429 -186.167731 284.265373 -53.190934 75.382423 -122.580834 24.242413 -31.514516 58.500243 -10.430031 140.047747 -237.762319 46.97275 -180.764568 311.480621 -65.709208 75.653022 -132.580092 29.251673 -11.52124 23.815783 -4.438532 52.126256 -95.730629 19.745268 -68.777242 124.527905 -27.359875 29.228937 -52.757417 12.088909 -1.492253 3.233297 -0.61419 6.757361 -12.929789 2.718016 -9.027904 16.764317 -3.750698 3.868932 -7.087305 1.651818 8.25 4.80422 0.30666 -0.01375 0.30784 -0.068 -0.0073 2.277944 -0.964799 0.213693 -0.020064 0.000681 -0.682212 0.407086 -0.089573 0.008493 -0.000294 0.528 0.433 0.417 0.459 0.496 0.800 0.822 0.844
0.03981 1.4 0.643 -0.3943 0.0311 -0.087 0 0 0 0 0 0 -0.154 -0.466 0 0 0 -0.00733 -0.00291 -0.00488 -0.0058 -0.00182 -0.0042 -0.00769 -0.00536 -0.00547 -10.329681 9.721835 -2.428972 43.883537 -42.554849 10.081315 -39.435885 56.809793 -13.528548 10.662441 -23.902673 5.852527 -11.523864 27.277669 -7.084519 59.612501 -107.034131 29.134065 -69.920435 135.78203 -38.279502 26.646272 -56.063821 16.245126 -5.065915 13.83251 -3.600758 26.701714 -53.571284 14.804109 -33.683484 67.573516 -19.42278 13.70416 -27.873688 8.233201 -0.742827 2.08603 -0.542555 3.869187 -8.046313 2.231618 -5.039166 10.136535 -2.928032 2.104843 -4.183466 1.24144 8.1 4.71025 0.30748 -0.01584 0.30733 -0.0727 -0.007 2.425762 -1.048628 0.232494 -0.021999 0.000756 -0.811617 0.481302 -0.105652 0.010042 -0.00035 0.544 0.441 0.422 0.466 0.498 0.818 0.841 0.860
0.05012 1.3 0.62 -0.3872 0.0303 -0.037 0 0 0 0 0 0 -0.134 -0.368 0 0 0 -0.00761 -0.00299 -0.00508 -0.00598 -0.00187 -0.00431 -0.00806 -0.0056 -0.00571 27.085064 -63.719099 13.834259 -111.853248 249.144665 -59.288528 168.949914 -319.851084 80.35663 -78.758592 134.893708 -35.057929 23.637977 -38.463885 6.9208 -85.747788 155.869439 -31.159347 125.072682 -205.760151 43.923771 -57.313362 88.712074 -19.794897 8.194186 -10.055622 1.357385 -27.794144 42.435636 -6.686058 39.56707 -57.711116 10.038086 -17.922068 25.449342 -4.743307 0.997066 -0.962233 0.078463 -3.249763 4.250549 -0.473888 4.544393 -5.964419 0.796484 -2.04153 2.690324 -0.404916 7.4 4.42934 0.31661 -0.02109 0.31654 -0.0835 -0.0065 2.215697 -0.886227 0.191129 -0.017643 0.000592 -0.856308 0.494428 -0.106361 0.009953 -0.000343 0.563 0.455 0.426 0.474 0.501 0.840 0.865 0.881
0.06310 1.2 0.59 -0.3816 0.0302 0.076 0 0 0 0 0 0 -0.097 -0.249 0 0 0 -0.00794 -0.00307 -0.00535 -0.00618 -0.00188 -0.00445 -0.00842 -0.0058 -0.00595 51.166641 -130.666484 34.967023 -210.719519 498.951885 -142.564784 294.373089 -626.397294 186.575608 -129.806949 258.885062 -79.262114 46.691884 -99.668192 25.418002 -179.966129 386.435965 -104.697277 245.410515 -490.923198 138.370241 -106.562235 204.801216 -59.317817 16.994651 -32.642702 8.000598 -63.51458 127.996417 -33.238646 85.315364 -163.991495 44.279226 -36.683227 68.862481 -19.117712 2.163192 -3.880007 0.920976 -7.954353 15.342591 -3.853207 10.577232 -19.779754 5.165791 -4.517738 8.345031 -2.242844 6.85 4.18536 0.32679 -0.02767 0.32638 -0.0951 -0.006 1.501368 -0.385625 0.067907 -0.004809 0.00011 -0.780202 0.424442 -0.086695 0.00774 -0.000255 0.577 0.476 0.430 0.482 0.506 0.863 0.890 0.903
0.07943 1.1 0.55 -0.3694 0.0284 0.179 0 0 0 0 0 0 -0.01 -0.205 0 0 0 -0.00819 -0.00314 -0.00558 -0.0063 -0.00186 -0.00457 -0.0086 -0.00589 -0.00603 -7.763642 -66.149606 26.952316 8.97581 219.649317 -103.379076 15.405799 -240.457636 127.104568 -12.572634 86.735333 -50.71788 0.064604 -52.779583 20.630675 -6.928645 179.893747 -79.818698 26.556561 -201.956797 99.041055 -14.74867 74.692232 -39.890352 1.371378 -17.938684 6.785619 -5.622 62.274894 -26.43558 12.308399 -71.110282 33.03583 -6.09636 26.730934 -13.400089 0.278993 -2.198066 0.810585 -0.973691 7.731894 -3.174988 1.792855 -8.934275 3.98935 -0.841024 3.395967 -1.626842 6.1 3.86847 0.33558 -0.04859 0.33554 -0.1038 -0.0057 0.234847 0.470479 -0.13762 0.016237 -0.000672 -0.55048 0.252179 -0.042792 0.003069 -0.000077 0.577 0.498 0.434 0.490 0.512 0.877 0.906 0.918
0.1 1.0 0.501 -0.3524 0.0265 0.204 0 0 0 0 0 0 0.119 -0.304 0 0 0 -0.00829 -0.00318 -0.00571 -0.00633 -0.00183 -0.00457 -0.00852 -0.00582 -0.00584 -68.458481 28.829689 4.605562 237.82803 -164.266697 -8.836444 -274.764841 260.022051 -0.933937 107.138761 -125.374848 5.317074 -49.29738 21.631191 3.518638 177.983317 -122.615957 -7.061507 -206.833056 194.017768 0.06255 81.241286 -93.659822 3.599962 -15.600124 6.982888 1.149204 57.790512 -39.46235 -2.385281 -67.51015 62.458687 0.211002 26.679145 -30.191159 1.065238 -1.812877 0.816535 0.13729 6.835188 -4.612347 -0.294438 -8.019372 7.308278 0.048217 3.184593 -3.538278 0.113795 5.9 3.73063 0.34123 -0.06587 0.34086 -0.1082 -0.0056 -1.574873 1.682296 -0.42639 0.045587 -0.001754 -0.151856 -0.03276 0.027751 -0.004278 0.000198 0.560 0.512 0.436 0.494 0.520 0.875 0.905 0.920
0.12589 0.9 0.448 -0.3418 0.0252 0.167 0 0 0 0 0 0 0.239 -0.531 0 0 0 -0.00823 -0.00317 -0.00571 -0.00622 -0.00178 -0.00447 -0.00818 -0.00565 -0.00541 -57.661409 52.210153 -6.534009 177.243465 -234.368311 33.479926 -183.23805 322.707297 -52.189627 62.323522 -140.821243 25.32931 -42.110184 41.044747 -5.388708 133.944972 -182.937031 27.078094 -138.757168 251.119517 -41.730651 47.425905 -109.454024 20.112694 -13.483786 13.758383 -1.88179 43.870741 -61.008017 9.304197 -45.579787 83.569682 -14.2053 15.662299 -36.39955 6.807954 -1.58149 1.661143 -0.233866 5.224073 -7.340905 1.143415 -5.441877 10.042552 -1.734476 1.878435 -4.372726 0.828058 5.7 3.59587 0.34574 -0.0912 0.34554 -0.111 -0.0057 -3.703106 3.066794 -0.749185 0.077859 -0.002928 0.381196 -0.399304 0.116176 -0.013321 0.000533 0.534 0.509 0.435 0.493 0.528 0.856 0.887 0.907
0.15849 0.8 0.4 -0.3386 0.0252 0.116 0 0 0 0 0 0 0.298 -0.774 0 0 0 -0.00805 -0.00312 -0.00563 -0.00601 -0.00174 -0.00426 -0.00772 -0.00535 -0.00491 -24.177127 31.505749 -6.301202 42.856647 -131.442129 28.590153 -12.536823 166.623715 -40.386786 -9.725772 -66.403364 18.060282 -17.00014 26.159992 -5.545691 31.632532 -107.972584 24.720766 -8.170103 136.316367 -34.518011 -7.964177 -54.294589 15.318878 -5.341652 9.099537 -2.01801 10.347768 -37.283796 8.875047 -2.606959 46.936269 -12.284179 -2.643744 -18.685157 5.420392 -0.62009 1.12699 -0.257915 1.236343 -4.595344 1.123957 -0.31152 5.773783 -1.546427 -0.314799 -2.297616 0.679679 5.5 3.44976 0.35261 -0.12539 0.35251 -0.1113 -0.0059 -5.461329 4.154378 -0.991766 0.101168 -0.003747 0.890829 -0.737591 0.195269 -0.021186 0.000816 0.509 0.492 0.434 0.485 0.532 0.830 0.858 0.886
0.19953 0.7 0.356 -0.3262 0.0244 0.072 0 0 0 0 0 0 0.245 -0.963 0 0 0 -0.0077 -0.00304 -0.00544 -0.0057 -0.00171 -0.00401 -0.00712 -0.00496 -0.00445 -23.660344 26.464031 -5.426108 55.791231 -112.060621 24.636964 -47.626855 146.51806 -35.041156 11.280835 -60.888577 15.837994 -16.828254 21.447259 -4.63974 41.810074 -89.728015 20.704709 -35.876491 116.590896 -29.106723 8.681616 -48.298949 13.051859 -5.288994 7.319696 -1.66621 13.58268 -30.351731 7.324407 -11.670177 39.245374 -10.194418 2.853302 -16.212637 4.540574 -0.611269 0.893067 -0.211358 1.602063 -3.68013 0.919167 -1.373911 4.741356 -1.2702 0.336447 -1.954443 0.562998 5.55 3.39236 0.3603 -0.14134 0.35792 -0.1071 -0.0061 -6.701839 4.842848 -1.128498 0.112821 -0.004109 1.341265 -1.022197 0.25856 -0.027186 0.001023 0.498 0.470 0.433 0.475 0.531 0.810 0.834 0.866
0.25119 0.6 0.318 -0.3078 0.0219 0.043 0 0 0 0 0 0 0.104 -1.076 0 0 0 -0.00721 -0.00292 -0.00517 -0.00535 -0.00167 -0.00375 -0.0064 -0.00452 -0.00404 -14.445389 18.496667 -3.731559 24.548144 -76.141144 16.60388 -11.975991 97.214791 -23.169101 -2.324617 -39.552576 10.302406 -9.917548 15.168281 -3.258594 17.609536 -61.529618 14.221533 -8.005212 77.893013 -19.573251 -2.102768 -31.522354 8.616295 -2.996519 5.146787 -1.183173 5.366689 -20.638785 5.071155 -2.11815 25.924107 -6.892427 -0.886377 -10.42874 3.006621 -0.33248 0.62014 -0.150501 0.586776 -2.465349 0.636599 -0.184214 3.076553 -0.85724 -0.133498 -1.230828 0.371403 5.75 3.40241 0.35949 -0.14483 0.35761 -0.1022 -0.0064 -7.161937 4.9757 -1.125765 0.109686 -0.003908 1.647322 -1.197509 0.293011 -0.030007 0.001106 0.501 0.443 0.429 0.463 0.527 0.795 0.813 0.851
0.31623 0.5 0.287 -0.3085 0.0223 0.006 0 0 0 0 0 0 -0.117 -1.124 0 0 0 -0.00661 -0.00276 -0.00482 -0.00496 -0.00161 -0.00348 -0.00568 -0.00406 -0.00374 -12.457198 13.452604 -2.296076 25.041919 -56.590695 10.665132 -20.429261 74.083436 -15.332033 4.098787 -30.944402 6.964706 -8.496994 11.193186 -2.133387 17.892765 -46.107003 9.553985 -14.466532 59.589844 -13.399191 2.838654 -24.677585 5.981079 -2.575012 3.845386 -0.820948 5.531501 -15.578438 3.558698 -4.345622 19.906482 -4.882163 0.78274 -8.173392 2.145381 -0.287494 0.467366 -0.108764 0.618599 -1.870106 0.461095 -0.465766 2.367969 -0.622809 0.072099 -0.965107 0.270594 5.9 3.39048 0.36101 -0.14953 0.36028 -0.0931 -0.0068 -6.541656 4.34703 -0.933851 0.086626 -0.002949 1.717464 -1.199601 0.282951 -0.028015 0.001001 0.510 0.416 0.424 0.453 0.522 0.783 0.799 0.840
0.39810 0.4 0.26 -0.2966 0.0212 -0.036 0 0 0 0 0 0 -0.39 -1.125 0 0 0 -0.006 -0.00254 -0.00444 -0.00454 -0.00152 -0.00327 -0.00504 -0.00363 -0.00348 -11.171429 13.42476 -2.312676 23.828545 -55.212966 10.512492 -20.516676 71.266493 -14.856756 4.629921 -29.469083 6.663018 -8.78464 12.532116 -2.431218 21.431952 -50.473874 10.609339 -20.202742 64.49718 -14.605681 5.595161 -26.559974 6.436415 -2.889597 4.488225 -0.964385 7.435233 -17.804979 4.085591 -7.148481 22.53445 -5.513494 2.060159 -9.219452 2.395702 -0.339177 0.554542 -0.128487 0.890425 -2.174981 0.533644 -0.853481 2.73019 -0.710181 0.245231 -1.109827 0.305441 6.2 3.43447 0.35471 -0.14217 0.35926 -0.0852 -0.0071 -4.815439 2.969329 -0.56384 0.045648 -0.001339 1.511291 -1.006868 0.22483 -0.02103 0.00071 0.511 0.393 0.420 0.440 0.512 0.769 0.780 0.823
0.50119 0.3 0.236 -0.2813 0.0202 -0.068 0 0 0 0 0 0 -0.682 -1.105 -0.044 0.19 -0.202 -0.00534 -0.00228 -0.00397 -0.00411 -0.00142 -0.00295 -0.00443 -0.00316 -0.00318 -11.812408 13.810381 -2.093803 31.444778 -57.894833 9.877971 -33.699963 76.336005 -14.273426 11.435244 -32.281562 6.49964 -9.513158 13.27323 -2.411077 27.756507 -54.194612 10.699909 -30.847394 70.408832 -14.890386 11.001245 -29.52527 6.614863 -3.121738 4.754601 -0.982266 9.388295 -19.088831 4.203363 -10.460817 24.522232 -5.710879 3.74871 -10.200835 2.494789 -0.363552 0.584758 -0.132423 1.10234 -2.319098 0.55367 -1.219101 2.952493 -0.740247 0.433316 -1.219535 0.319613 6.45 3.4499 0.36074 -0.13591 0.36323 -0.0759 -0.0074 -2.478112 1.203996 -0.107894 -0.003352 0.000541 1.121871 -0.689355 0.136977 -0.011065 0.000311 0.503 0.380 0.417 0.426 0.497 0.756 0.761 0.803
0.63096 0.2 0.217 -0.265 0.0185 -0.123 0 0 0 0 0 0 -0.992 -1.079 -0.064 0.244 -0.193 -0.00468 -0.00201 -0.00345 -0.0037 -0.00135 -0.00258 -0.0037 -0.00276 -0.00294 -13.540851 17.055776 -2.804665 41.258829 -70.294928 12.710257 -48.22679 92.592344 -18.045488 18.39273 -39.458509 8.170882 -10.562035 16.115678 -3.036013 33.662132 -64.896749 13.201129 -39.691195 84.206632 -18.222828 15.298954 -35.52086 8.087504 -3.348491 5.651546 -1.17536 10.766342 -22.430135 4.978746 -12.609158 28.801371 -6.748344 4.830425 -12.053701 2.95528 -0.376572 0.680627 -0.152351 1.204459 -2.672388 0.633989 -1.39319 3.40324 -0.848446 0.527028 -1.414898 0.368028 6.8 3.52557 0.36154 -0.1252 0.36002 -0.0669 -0.0078 0.52648 -0.970882 0.435757 -0.060272 0.002678 0.511349 -0.224278 0.014578 0.002319 -0.000209 0.491 0.374 0.414 0.418 0.476 0.743 0.745 0.779
0.79433 0.1 0.201 -0.265 0.0188 -0.158 0 0 0 0 0 0 -1.223 -1.056 -0.083 0.296 -0.175 -0.00402 -0.00175 -0.00294 -0.0033 -0.00127 -0.00216 -0.00298 -0.00231 -0.00274 -9.791213 13.769991 -2.058023 28.968177 -57.064269 9.664295 -33.509414 75.202024 -13.947098 12.611617 -31.974638 6.360024 -8.121296 14.810735 -2.766638 25.532426 -59.413122 12.080842 -29.81416 76.696416 -16.65731 11.338682 -32.161415 7.365446 -2.735425 5.545806 -1.166749 8.765384 -21.891175 4.931566 -10.186372 27.94221 -6.652381 3.856514 -11.620005 2.896002 -0.324788 0.698059 -0.158691 1.044729 -2.726708 0.657992 -1.203449 3.453349 -0.875747 0.451513 -1.427377 0.377501 7.1 3.57136 0.35653 -0.11932 0.35702 -0.0576 -0.0082 3.425185 -2.991325 0.921437 -0.10922 0.00445 -0.149513 0.260358 -0.108515 0.015353 -0.000702 0.483 0.375 0.409 0.416 0.458 0.736 0.740 0.764
1.0 0.0 0.189 -0.2662 0.0201 -0.177 0 0 0 0 0 0 -1.308 -1.028 -0.103 0.341 -0.168 -0.00341 -0.00154 -0.00251 -0.0029 -0.00121 -0.00183 -0.00234 -0.00187 -0.00261 -7.325605 13.614002 -2.386626 20.485389 -55.648435 10.84517 -22.683851 72.724816 -15.358254 8.102878 -30.762067 6.924623 -6.60002 15.889768 -3.329883 20.220607 -62.917428 14.24876 -22.94835 80.523509 -19.391391 8.442674 -33.570142 8.501097 -2.399851 6.205665 -1.42624 7.626841 -24.238636 5.9471 -8.706471 30.726631 -7.950366 3.229578 -12.720858 3.440594 -0.304166 0.805239 -0.196305 0.983223 -3.119945 0.806391 -1.12337 3.932134 -1.066722 0.417681 -1.620806 0.458069 7.4 3.61643 0.35977 -0.11193 0.35949 -0.0457 -0.0084 5.949047 -4.671327 1.304611 -0.14567 0.00569 -0.807548 0.726202 -0.222759 0.027034 -0.001128 0.472 0.383 0.404 0.413 0.439 0.730 0.735 0.750
1.25892 -0.1 0.179 -0.2782 0.0225 -0.177 -0.007 -0.052 0.019 -0.034 -0.017 0 -1.209 -1.015 -0.12 0.388 -0.146 -0.00282 -0.00137 -0.00209 -0.00255 -0.00118 -0.00147 -0.002 -0.00136 -0.00225 -8.939958 14.674685 -2.50588 28.48766 -60.533728 11.447177 -33.74954 79.839562 -16.272449 13.110558 -34.093951 7.363532 -6.823299 16.349183 -3.378252 21.945586 -64.788429 14.473168 -25.43702 83.014323 -19.700187 9.622979 -34.666492 8.636822 -2.230057 6.227992 -1.418814 7.139707 -24.239425 5.910868 -8.089198 30.632422 -7.8882 2.98057 -12.650068 3.407453 -0.262857 0.794673 -0.192817 0.833591 -3.058636 0.790049 -0.924593 3.831317 -1.041746 0.332274 -1.570612 0.445901 7.7 3.65455 0.3576 -0.10572 0.35653 -0.0344 -0.0083 7.368131 -5.499974 1.459253 -0.156522 0.00591 -1.284003 1.045681 -0.29604 0.033957 -0.001359 0.457 0.390 0.403 0.408 0.429 0.724 0.727 0.738
1.58489 -0.2 0.168 -0.2895 0.0246 -0.177 0.017 -0.118 -0.006 -0.033 -0.015 0 -1.038 -1.003 -0.134 0.411 -0.126 -0.00227 -0.00124 -0.00162 -0.00228 -0.0012 -0.00102 -0.00185 -0.00098 -0.00195 -15.626776 19.508059 -3.560531 56.985234 -81.272413 15.949639 -71.946571 108.418298 -22.436338 29.788741 -46.830402 10.092356 -11.768413 20.485075 -4.373144 43.018477 -82.174059 18.668326 -53.780421 106.598489 -25.382767 22.029363 -45.049168 11.130096 -3.820418 7.664231 -1.778515 13.91816 -30.2218 7.420629 -17.228546 38.688876 -9.924873 6.988037 -16.176321 4.297893 -0.449459 0.970762 -0.237817 1.629299 -3.788807 0.978539 -1.999441 4.811048 -1.295541 0.804229 -1.998174 0.556679 7 3.28577 0.60973 -0.08858 0.3841 -0.0247 -0.0073 7.648531 -5.478711 1.393184 -0.143506 0.00521 -1.556335 1.20724 -0.326813 0.03613 -0.001401 0.435 0.394 0.407 0.400 0.422 0.714 0.710 0.723
1.99526 -0.3 0.155 -0.3002 0.0273 -0.177 0.029 -0.167 -0.038 -0.022 0.012 0 -0.877 -1.005 -0.141 0.422 -0.074 -0.00174 -0.00116 -0.0011 -0.00209 -0.00121 -0.00055 -0.00183 -0.00081 -0.00177 -20.196064 21.605585 -3.967159 76.955882 -91.937133 18.009102 -99.17849 124.888604 -25.625691 41.854937 -54.793264 11.641878 -14.237646 21.4583 -4.630733 54.204954 -87.447024 19.980398 -69.387392 115.040466 -27.41286 29.065915 -49.224908 12.113529 -4.450964 7.879438 -1.854116 16.869748 -31.468194 7.806779 -21.431777 40.755464 -10.521754 8.912003 -17.220691 4.586453 -0.512949 0.990385 -0.246374 1.934245 -3.908465 1.022173 -2.440762 5.014605 -1.362831 1.0086 -2.102644 0.589139 7.05 3.205 0.68673 -0.07303 0.40216 -0.0158 -0.0048 6.500901 -4.413972 1.059609 -0.101708 0.003404 -1.529644 1.14411 -0.298233 0.031732 -0.001184 0.421 0.392 0.409 0.394 0.421 0.706 0.697 0.713
2.51189 -0.4 0.142 -0.3243 0.0323 -0.177 0.046 -0.200 -0.067 -0.014 0.070 0 -0.787 -0.989 -0.145 0.405 -0.012 -0.00127 -0.0011 -0.00071 -0.00188 -0.00117 -0.00018 -0.00176 -0.00081 -0.00169 -21.025654 20.914681 -3.65759 80.254159 -89.929962 16.838693 -102.867295 122.982399 -24.163864 43.215191 -54.20406 11.039272 -14.058824 20.33818 -4.315139 53.578857 -83.280809 18.729752 -68.270807 109.860542 -25.776517 28.477607 -47.081453 11.408887 -4.220327 7.384426 -1.728543 16.004347 -29.551045 7.303437 -20.246004 38.293665 -9.856116 8.382355 -16.175944 4.297066 -0.471012 0.920307 -0.229126 1.77601 -3.633509 0.952913 -2.231373 4.65817 -1.271094 0.917647 -1.950334 0.549199 7.2 3.15761 0.71829 -0.06331 0.42226 -0.0092 -0.0027 4.471238 -2.731619 0.575555 -0.044609 0.001042 -1.296131 0.930304 -0.231209 0.023231 -0.000811 0.422 0.390 0.401 0.386 0.413 0.701 0.693 0.708
3.16228 -0.5 0.129 -0.3363 0.0348 -0.177 0.068 -0.233 -0.029 -0.039 0.146 0 -0.739 -0.957 -0.147 0.38 0.088 -0.00087 -0.00108 -0.00052 -0.00167 -0.00112 0 -0.00177 -0.00082 -0.00153 -22.741065 22.411323 -4.096293 86.63212 -97.020411 18.794135 -110.234252 133.365048 -26.940861 46.034113 -59.021914 12.307843 -15.263972 21.425521 -4.717232 58.109433 -88.201374 20.443717 -73.576682 116.846948 -28.124593 30.529589 -50.252 12.45084 -4.61871 7.768048 -1.881403 17.508884 -31.215017 7.940036 -22.024665 40.586946 -10.711395 9.075048 -17.193783 4.670423 -0.520072 0.970347 -0.249365 1.961472 -3.844432 1.036151 -2.451769 4.94279 -1.381728 1.003782 -2.074601 0.597048 7.25 3.07444 0.79168 -0.0485 0.43945 -0.005 -0.0017 1.70041 -0.551013 -0.02409 0.023559 -0.001692 -0.856979 0.570437 -0.12792 0.010975 -0.0003 0.427 0.387 0.386 0.381 0.401 0.694 0.691 0.702
3.98107 -0.6 0.114 -0.353 0.0379 -0.177 0.097 -0.252 0.047 -0.071 0.222 0 -0.715 -0.892 -0.148 0.331 0.199 -0.0006 -0.00104 -0.00046 -0.00149 -0.0011 0 -0.00188 -0.00064 -0.00117 -26.264268 23.771252 -4.060054 100.472301 -103.646308 18.814282 -127.685929 143.161093 -27.132822 53.278586 -63.57275 12.444219 -17.751774 21.96526 -4.627096 68.116276 -91.178818 20.183777 -86.467332 121.521772 -27.87899 35.975417 -52.500411 12.374512 -5.401081 7.859048 -1.842007 20.706062 -31.830584 7.810706 -26.201897 41.632888 -10.56744 10.860052 -17.718161 4.616053 -0.611228 0.975707 -0.244152 2.338044 -3.894667 1.018348 -2.948565 5.036198 -1.361074 1.217733 -2.123459 0.588934 7.35 3.00688 0.83749 -0.03816 0.45682 -0.0015 -0.0015 -1.21286 1.659951 -0.609563 0.087637 -0.004167 -0.330958 0.159703 -0.015464 -0.001793 0.000211 0.429 0.381 0.368 0.373 0.395 0.681 0.684 0.697
5.01187 -0.7 0.101 -0.3689 0.041 -0.177 0.104 -0.261 0.119 -0.088 0.298 0 -0.727 -0.81 -0.14 0.266 0.313 -0.00045 -0.00095 -0.00026 -0.00133 -0.00109 0 -0.00191 -0.00048 -0.00078 -25.705775 24.076487 -3.870359 96.629913 -105.020322 18.090218 -120.70367 145.026697 -26.228237 49.610109 -64.360305 12.071078 -16.84009 21.159053 -4.272283 63.439586 -87.991892 18.758983 -79.104292 117.348296 -26.011167 32.390672 -50.693008 11.572278 -4.974984 7.359398 -1.677937 18.699921 -29.825945 7.145804 -23.214685 38.994195 -9.688467 9.45493 -16.577788 4.235971 -0.5486 0.896211 -0.220551 2.05407 -3.574324 0.922361 -2.537028 4.613769 -1.233766 1.027578 -1.940914 0.533737 7.4 2.8864 0.87617 -0.03008 0.49447 -0.0001 -0.0014 -3.889662 3.647243 -1.123764 0.142588 -0.006239 0.20708 -0.251394 0.094773 -0.014072 0.000694 0.416 0.368 0.350 0.356 0.384 0.657 0.660 0.676
6.30957 -0.8 0.089 -0.3897 0.0446 -0.177 0.127 -0.265 0.151 -0.101 0.370 0 -0.744 -0.721 -0.126 0.2 0.395 -0.00035 -0.00088 -0.00008 -0.00115 -0.00105 0 -0.00168 -0.00035 -0.00054 -24.77411 26.224526 -4.200479 90.85638 -112.942284 19.297359 -110.881403 154.63744 -27.703051 44.643788 -68.197225 12.669506 -16.399063 22.499296 -4.454105 60.312115 -92.857 19.391601 -73.538359 123.20744 -26.757826 29.522271 -53.029484 11.868641 -4.882647 7.738961 -1.723739 17.92392 -31.186636 7.297028 -21.769156 40.625276 -9.860528 8.69684 -17.22899 4.30281 -0.541658 0.936975 -0.225009 1.98114 -3.719234 0.936385 -2.394693 4.787192 -1.249196 0.951699 -2.010391 0.539632 7.4 2.71458 0.91369 -0.02248 0.54244 0.0001 -0.0014 -6.070844 5.214049 -1.513383 0.182277 -0.007654 0.699129 -0.617005 0.189848 -0.024315 0.001083 0.402 0.349 0.331 0.340 0.367 0.627 0.631 0.647
7.94328 -0.9 0.08 -0.4051 0.0472 -0.177 0.161 -0.270 0.184 -0.115 0.422 0 -0.758 -0.623 -0.111 0.151 0.42 -0.00032 -0.00091 -0.00003 -0.00093 -0.00098 0 -0.00124 -0.00031 -0.00049 -23.2554 27.269174 -4.355136 83.882593 -116.925661 19.896133 -100.723717 159.512369 -28.450802 39.959542 -70.128341 12.972515 -15.584863 23.227516 -4.54862 56.449509 -95.598076 19.739125 -67.769774 126.51015 -27.164746 26.812408 -54.31465 12.021431 -4.678472 7.958405 -1.748796 16.923356 -32.002733 7.384073 -20.239549 41.594188 -9.954634 7.966198 -17.599496 4.334423 -0.52156 0.961043 -0.22741 1.879887 -3.807686 0.944166 -2.236969 4.890666 -1.256707 0.87537 -2.049251 0.541691 7.45 2.59172 0.94723 -0.0155 0.57479 0 -0.0014 -7.967855 6.53621 -1.828753 0.212558 -0.008649 1.149224 -0.944491 0.272771 -0.032948 0.001398 0.392 0.321 0.312 0.327 0.334 0.596 0.603 0.607
10.000 -1.0 0.073 -0.4203 0.0498 -0.177 0.223 -0.271 0.246 -0.139 0.451 0 -0.729 -0.514 -0.098 0.121 0.395 -0.0003 -0.00106 -0.00022 -0.00072 -0.00092 0 -0.00076 -0.00018 -0.00062 -21.116565 26.950596 -4.295478 74.840821 -115.458924 19.652997 -88.397431 157.343534 -28.131669 34.546793 -69.08876 12.833902 -14.309344 23.080364 -4.515178 51.065975 -94.912402 19.60508 -60.439985 125.48986 -26.99314 23.597517 -53.822052 11.948725 -4.333707 7.93206 -1.740815 15.469595 -31.875118 7.352615 -18.261556 41.39963 -9.915528 7.099148 -17.503328 4.318339 -0.486169 0.95965 -0.226702 1.730713 -3.800122 0.941414 -2.034029 4.878348 -1.253388 0.786412 -2.042806 0.540368 7.45 2.41501 0.97695 -0.00945 0.61307 0 -0.0014 -8.935422 7.144835 -1.952403 0.221535 -0.008813 1.461981 -1.163289 0.32556 -0.038128 0.001573 0.392 0.289 0.293 0.324 0.303 0.568 0.585 0.573
""")
# constant table suffix
SUFFIX = "_if"
CONST_REGION = {"cc": 0.85, "rt": 150,
"cd0": 1.606, "cd1": 0.0097, "dp0": 25, "dp1": 55}
class HassaniAtkinson2020SSlab(HassaniAtkinson2020SInter):
"""
Hassani Atkinson (2020) for Subduction IntraSlab.
"""
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.SUBDUCTION_INTRASLAB
# constant table suffix
SUFFIX = "_is"
CONST_REGION = {"cc": 0.9, "rt": 250,
"cd0": 1.9241, "cd1": 0.0133, "dp0": 40, "dp1": 90}
class HassaniAtkinson2020Asc(HassaniAtkinson2020SInter):
"""
Hassani Atkinson (2020) for Crustal.
"""
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
# constant table suffix
SUFFIX = "_cr"
CONST_REGION = {"cc": 0.45, "rt": 50,
"cd0": 2.5011, "cd1": 0, "dp0": 0, "dp1": 30}
Ported more GMPEs
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2015-2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`HassaniAtkinson2020SInter`
:class:`HassaniAtkinson2020SSlab`
:class:`HassaniAtkinson2020Asc`
"""
import math
import numpy as np
from openquake.hazardlib import const
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib.imt import PGA, SA, PGV
CONSTANTS = {"mlf0": 5.5, "mlf1": 7, "f1": 0, "f3": 98.1,
"b1": -1.3, "b2": -0.5, "v0": 100, "v1": 250, "v2": 1000,
"zx0": 150, "zx1": 800, "zx2": 4200,
"cfp0": -0.011, "cfp1": 0.421, "cfp2": -0.604, "cfp3": 0.086,
"x0": 0.5, "x1": 1, "x2": 2.5, "vref": 760, "vmin": 360}
def _clf(suffix, C, mag):
"""
Low frequency calibration factor.
"""
clf0 = C['clf0' + suffix]
clf1 = C['clf1' + suffix]
mlf0 = CONSTANTS["mlf0"]
if mag > mlf0:
return clf0 + clf1 * (min(mag, CONSTANTS["mlf1"]) - mlf0)
return clf0
def _dsigma(creg, hypo_depth):
"""
Hypocentre depth factor.
"""
out = creg['cd0']
dp0 = creg['dp0']
if hypo_depth > dp0:
out += creg['cd1'] * (min(hypo_depth, creg['dp1']) - dp0)
return 10 ** out
def _fds_ha18(C, mag, dsigma):
"""
Dsigma factor.
"""
eds1 = np.polyval([C['g14'], C['g13'], C['g12'], C['g11'], C['g10']],
mag)
eds2 = np.polyval([C['g24'], C['g23'], C['g22'], C['g21'], C['g20']],
mag)
eds0 = -2 * eds1 - 4 * eds2
return eds0 + eds1 * math.log10(dsigma) \
+ eds2 * math.log10(dsigma) ** 2
def _ffpeak(C, imt, fpeak):
"""
Fpeak factor.
"""
if imt.string[:2] != "SA" or max(fpeak) <= 0:
# pgv, pga or unknown fpeak
return 0
s = CONSTANTS
x = fpeak / 10 ** C['f']
ffpeak = np.where(fpeak <= 0, 0, s['cfp0'])
idx = np.where((s['x0'] < x) & (x <= s['x1']))
ffpeak[idx] = s['cfp0'] + s['cfp1'] * np.log10(x[idx] / s['x0'])
idx = np.where((s['x1'] < x) & (x <= s['x2']))
ffpeak[idx] = s['cfp0'] + s['cfp1'] * math.log10(s['x1'] / s['x0']) \
+ s['cfp2'] * np.log10(x[idx] / s['x1'])
idx = np.where(s['x2'] < x)
ffpeak[idx] = s['cfp0'] + s['cfp1'] * math.log10(s['x1'] / s['x0']) \
+ s['cfp2'] * math.log10(s['x2'] / s['x1']) \
+ s['cfp3'] * np.log10(x[idx] / s['x2'])
return ffpeak
def _fgamma(suffix, backarc, forearc_ne, forearc_sw, C, rrup):
"""
Gamma factor.
"""
# proportion sum for normalised values with rrup factor
p_sum = rrup / (backarc + forearc_ne + forearc_sw)
return C['barc' + suffix] * backarc * p_sum \
+ C['farc_ne' + suffix] * forearc_ne * p_sum \
+ C['farc_sw' + suffix] * forearc_sw * p_sum
def _fkp_ha18(kappa, C, mag, dsigma):
"""
Kappa factor for B/C site condition of Japan.
"""
l10kp = math.log10(kappa)
p = np.zeros(4)
ek0 = np.zeros(4)
for i in range(4):
for j in range(4):
p[j] = np.polyval([C[f'd{i}{j}2'], C[f'd{i}{j}1'],
C[f'd{i}{j}0']], math.log10(dsigma))
ek0[i] = np.polyval(p[::-1], math.log10(mag))
return 3 * ek0[0] - 9 * ek0[1] + 27 * ek0[2] - 81 * ek0[3] \
+ ek0[0] * l10kp + ek0[1] * l10kp ** 2 \
+ ek0[2] * l10kp ** 3 + ek0[3] * l10kp ** 4
def _fm_ha18(C, mag):
"""
Magnitude factor.
"""
if mag <= C['mh']:
return C['e0'] + C['e1'] * (mag - C['mh']) \
+ C['e2'] * (mag - C['mh']) ** 2
return C['e0'] + C['e3'] * (mag - C['mh'])
def _fsnonlin_ss14(C, vs30, pga_rock):
"""
Non-linear factor.
"""
s = CONSTANTS
f2 = C['f4'] * (np.exp(C['f5']
* (np.minimum(vs30, s['vref']) - s['vmin']))
- math.exp(C['f5'] * (s['vref'] - s['vmin'])))
return s['f1'] + f2 * np.log((pga_rock + s['f3']) / s['f3'])
def _fvs30(C, vs30):
"""
Vs30 factor.
"""
s = CONSTANTS
fvs30 = np.where(vs30 <= s['v0'],
C['cv1'] * math.log10(s['v0'] / s['vref'])
+ (C['cv2'] - C['cv1'])
* math.log10(s['v1'] / s['vref']),
C['cv2'] * math.log10(s['v2'] / s['vref']))
fvs30 = np.where((s['v0'] < vs30) & (vs30 <= s['v1']),
C['cv1'] * np.log10(vs30 / s['vref'])
+ (C['cv2'] - C['cv1'])
* math.log10(s['v1'] / s['vref']), fvs30)
return np.where((s['v1'] < vs30) & (vs30 <= s['v2']),
C['cv2'] * np.log10(vs30 / s['vref']), fvs30)
def _fz2pt5(C, z2pt5):
"""
Z2pt5 factor.
"""
s = CONSTANTS
fz2pt5 = np.where(z2pt5 >= 0, C['cz0'], 0)
idx = np.where((s['zx0'] < z2pt5) & (z2pt5 <= s['zx1']))
fz2pt5[idx] = C['cz0'] + C['cz1'] * np.log10(z2pt5[idx] / s['zx0'])
idx = np.where((s['zx1'] < z2pt5) & (z2pt5 <= s['zx2']))
fz2pt5[idx] = C['cz0'] + C['cz1'] * math.log10(s['zx1'] / s['zx0']) \
+ C['cz2'] * np.log10(z2pt5[idx] / s['zx1'])
idx = np.where(s['zx2'] < z2pt5)
fz2pt5[idx] = C['cz0'] + C['cz1'] * math.log10(s['zx1'] / s['zx0']) \
+ C['cz2'] * math.log10(s['zx2'] / s['zx1'])
return fz2pt5
def _fz_ha18(rt, C, mag, rrup):
"""
Z factor.
"""
s = CONSTANTS
h = 10 ** (-0.405 + 0.235 * mag)
ref = np.sqrt(rrup ** 2 + h ** 2)
rref = math.sqrt(1 ** 2 + h ** 2)
return np.where(ref <= rt, s['b1'] * np.log10(ref)
+ (C['b3'] + C['b4'] * mag) * np.log10(ref / rref),
s['b1'] * math.log10(rt)
+ s['b2'] * np.log10(ref / rt)
+ (C['b3'] + C['b4'] * mag) * np.log10(ref / rref))
def get_stddevs(suffix, C):
"""
Between event standard deviations as tau.
Intra event from site to site stddev and within site stddev.
Total given in COEFFS to 3dp.
"""
return [C['s' + suffix], C['tau'],
math.sqrt(C['ps2s'] ** 2 + C['pss' + suffix] ** 2)]
class HassaniAtkinson2020SInter(GMPE):
"""
Hassani Atkinson (2020) for Subduction Interface.
"""
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.SUBDUCTION_INTERFACE
#: Supported intensity measure types are spectral acceleration,
#: peak ground acceleration and peak ground velocity
DEFINED_FOR_INTENSITY_MEASURE_TYPES = {PGV, PGA, SA}
#: Supported intensity measure component is the geometric mean component
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.AVERAGE_HORIZONTAL
DEFINED_FOR_STANDARD_DEVIATION_TYPES = {
const.StdDev.TOTAL, const.StdDev.INTER_EVENT, const.StdDev.INTRA_EVENT}
REQUIRES_DISTANCES = {'rrup'}
REQUIRES_RUPTURE_PARAMETERS = {'hypo_depth', 'mag'}
REQUIRES_SITES_PARAMETERS = {'fpeak', 'vs30', 'z2pt5'}
REQUIRES_ATTRIBUTES = {'kappa', 'backarc', 'forearc_ne', 'forearc_sw'}
def __init__(self, kappa=0.04, backarc=0, forearc_ne=1, forearc_sw=0,
**kwargs):
"""
Aditional parameters.
"""
super().__init__(kappa=kappa, backarc=backarc, forearc_ne=forearc_ne,
forearc_sw=forearc_sw, **kwargs)
# kappa parameter
self.kappa = kappa
# set proportion of rrups in backarc, forearc_ne and forearc_sw
self.backarc = backarc
self.forearc_ne = forearc_ne
self.forearc_sw = forearc_sw
def compute(self, ctx, imts, mean, sig, tau, phi):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.compute>`
for spec of input and result values.
"""
C_PGA = self.COEFFS[PGA()]
dsigma = _dsigma(self.CONST_REGION, ctx.hypo_depth)
fm_pga = _fm_ha18(C_PGA, ctx.mag)
fz_pga = _fz_ha18(self.CONST_REGION['rt'], C_PGA, ctx.mag, ctx.rrup)
fdsigma_pga = _fds_ha18(C_PGA, ctx.mag, dsigma)
fgamma_pga = _fgamma(self.SUFFIX, self.backarc, self.forearc_ne,
self.forearc_sw, C_PGA, ctx.rrup)
fkappa_pga = _fkp_ha18(self.kappa, C_PGA, ctx.mag, dsigma)
clf_pga = _clf(self.SUFFIX, C_PGA, ctx.mag)
pga_rock = 10 ** (fm_pga + fz_pga + fdsigma_pga + fkappa_pga +
fgamma_pga + self.CONST_REGION['cc'] +
clf_pga + C_PGA['chf'] + C_PGA['amp_cr'])
for m, imt in enumerate(imts):
C = self.COEFFS[imt]
fm = _fm_ha18(C, ctx.mag)
fz = _fz_ha18(self.CONST_REGION['rt'], C, ctx.mag, ctx.rrup)
fdsigma = _fds_ha18(C, ctx.mag, dsigma)
fkappa = _fkp_ha18(self.kappa, C, ctx.mag, dsigma)
fgamma = _fgamma(self.SUFFIX, self.backarc, self.forearc_ne,
self.forearc_sw, C, ctx.rrup)
clf = _clf(self.SUFFIX, C, ctx.mag)
fsnonlin = _fsnonlin_ss14(C, ctx.vs30, pga_rock)
fvs30 = _fvs30(C, ctx.vs30)
fz2pt5 = _fz2pt5(C, ctx.z2pt5)
ffpeak = _ffpeak(C, imt, ctx.fpeak)
mean[m] = 10 ** (fm + fdsigma + fz + fkappa + fgamma
+ self.CONST_REGION['cc'] + clf + C['chf']
+ C['amp_cr'] + fvs30 + fz2pt5 + ffpeak +
fsnonlin)
if imt.string != "PGV":
# pgv in cm/s
# sa and psa in cm/s^2
mean[m] /= 981.
mean[m] = np.log(mean[m])
sig[m], tau[m], phi[m] = get_stddevs(self.SUFFIX, C)
# periods given by 1 / 10 ** COEFFS['f']
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT f amp_cr b3 b4 chf clf0_if clf1_if clf0_is clf1_is clf0_cr clf1_cr cv1 cv2 cz0 cz1 cz2 barc_if farc_ne_if farc_sw_if barc_is farc_ne_is farc_sw_is barc_cr farc_ne_cr farc_sw_cr d000 d001 d002 d010 d011 d012 d020 d021 d022 d030 d031 d032 d100 d101 d102 d110 d111 d112 d120 d121 d122 d130 d131 d132 d200 d201 d202 d210 d211 d212 d220 d221 d222 d230 d231 d232 d300 d301 d302 d310 d311 d312 d320 d321 d322 d330 d331 d332 mh e0 e1 e2 e3 f4 f5 g10 g11 g12 g13 g14 g20 g21 g22 g23 g24 tau ps2s pss_if pss_is pss_cr s_if s_is s_cr
pgv -9 0 -0.4414 0.0394 0.204 0 0 0 0 0 0 -0.667 -0.866 -0.046 0.166 -0.068 -0.00281 -0.0015 -0.00048 -0.00327 -0.00148 -0.0024 -0.00406 -0.00242 -0.00278 -40.826164 40.113039 -5.102129 155.252313 -182.36868 27.033349 -196.671754 258.210795 -42.36533 82.357479 -116.454408 20.537609 -31.558927 36.874774 -5.739549 125.277771 -161.306518 27.874045 -163.436869 223.266447 -41.391833 69.903157 -99.25266 19.350221 -10.37628 13.051401 -2.256509 41.867738 -56.055028 10.530332 -55.192594 76.696014 -15.238462 23.77607 -33.832713 6.996974 -1.229433 1.604051 -0.296017 4.993482 -6.816711 1.350202 -6.609082 9.262415 -1.92463 2.854701 -4.066423 0.874373 8.8 3.30208 0.43897 -0.01672 0.41464 -0.0434 -0.0084 0.444251 0.056972 0.003644 -0.004046 0.000353 -0.094203 0.053343 -0.014163 0.001919 -0.0001 0.482 0.293 0.386 0.376 0.401 0.683 0.678 0.692
pga -9 0 -0.4565 0.0382 0.574 0 0 0 0 0 0 -0.16 -0.612 0 0 0 -0.00684 -0.00273 -0.00456 -0.00541 -0.00167 -0.00396 -0.00688 -0.00476 -0.00484 -51.098718 32.578796 -0.602711 193.376939 -154.536656 8.970141 -246.377014 224.30934 -19.056039 103.867407 -102.862801 10.767404 -44.562063 38.708279 -3.922374 176.725188 -170.793471 21.015529 -230.932652 237.86616 -33.083719 99.136459 -106.274886 16.078647 -16.422438 15.847931 -2.168642 65.997737 -67.973039 10.385514 -87.070182 93.033435 -15.329737 37.638662 -41.09486 7.148396 -2.134042 2.150893 -0.337012 8.618758 -9.089596 1.542771 -11.416684 12.32449 -2.213236 4.950046 -5.410116 1.012283 7.4 4.37376 0.33001 -0.0094 0.30153 -0.0651 -0.007 0.709663 -0.038392 0.013081 -0.001138 0.000024 -0.032385 0.019043 -0.004517 0.000375 -0.000009 0.513 0.422 0.411 0.452 0.496 0.781 0.803 0.829
0.01 2.0 0.678 -0.4321 0.0353 -0.09 0 0 0 0 0 0 -0.163 -0.598 0 0 0 -0.00686 -0.00274 -0.00457 -0.00544 -0.00168 -0.00398 -0.00693 -0.0048 -0.00489 -41.010798 20.984834 4.332932 143.725915 -112.573964 -8.798757 -185.502717 174.262586 2.145698 78.793199 -83.128703 2.380509 -35.56513 27.797776 0.50508 130.2847 -131.140942 4.976749 -173.646134 190.435518 -13.863902 75.476551 -87.53478 8.453463 -13.145747 11.601671 -0.552587 48.068218 -52.451102 4.478831 -64.765944 74.387032 -8.20745 28.388967 -33.704532 4.310521 -1.715884 1.576223 -0.13206 6.206072 -6.977314 0.786463 -8.387874 9.776758 -1.295275 3.688342 -4.397405 0.644874 7.15 4.64514 0.34404 -0.00816 0.30565 -0.0644 -0.007 1.472416 -0.541245 0.133779 -0.013606 0.000492 -0.222277 0.146276 -0.035353 0.00358 -0.00013 0.513 0.423 0.411 0.450 0.496 0.782 0.803 0.830
0.01259 1.9 0.677 -0.4335 0.036 -0.089 0 0 0 0 0 0 -0.165 -0.593 0 0 0 -0.00688 -0.00275 -0.00458 -0.00546 -0.00169 -0.00399 -0.00696 -0.00482 -0.00492 -36.828522 19.569252 6.125909 133.799687 -108.928441 -14.579428 -175.108029 171.43759 8.453326 74.844597 -82.543522 0.054735 -31.17228 25.351334 2.531652 118.752325 -123.338464 -1.805442 -161.173356 182.153805 -6.194116 70.665151 -84.601815 5.529234 -11.281178 10.204519 0.356264 42.754302 -47.641396 1.35097 -58.876236 68.854243 -4.5789 26.088198 -31.574179 2.895149 -1.449797 1.338067 0.003661 5.403172 -6.129093 0.310717 -7.485634 8.768873 -0.734349 3.333472 -3.997333 0.422956 7.4 4.69775 0.33848 -0.00825 0.3049 -0.0642 -0.0071 1.554224 -0.561608 0.1314 -0.012766 0.000443 -0.26475 0.165203 -0.037932 0.003683 -0.000129 0.513 0.424 0.411 0.451 0.496 0.782 0.804 0.830
0.01585 1.8 0.676 -0.4282 0.0351 -0.092 0 0 0 0 0 0 -0.168 -0.587 0 0 0 -0.0069 -0.00276 -0.00459 -0.00547 -0.0017 -0.004 -0.00699 -0.00485 -0.00494 -33.843087 21.852772 6.415627 132.533545 -119.657522 -14.289425 -178.466424 186.886925 6.835534 77.884983 -89.602175 1.106684 -28.577173 27.547838 3.040521 118.556209 -134.307003 -2.241058 -166.146429 198.547813 -7.035861 74.454737 -92.292678 6.313616 -10.409129 11.098325 0.623752 43.180509 -52.297863 0.97445 -61.611402 75.987898 -4.711912 27.946807 -34.97869 3.144502 -1.350461 1.465128 0.046401 5.522435 -6.807942 0.240613 -7.948998 9.823776 -0.734948 3.628394 -4.505746 0.452039 7.5 4.69913 0.33602 -0.00894 0.30604 -0.0641 -0.0072 1.714652 -0.6427 0.145153 -0.013649 0.000459 -0.34998 0.21312 -0.047606 0.004516 -0.000155 0.513 0.424 0.412 0.451 0.496 0.783 0.805 0.831
0.01995 1.7 0.672 -0.4209 0.0344 -0.102 0 0 0 0 0 0 -0.171 -0.578 0 0 0 -0.00693 -0.00277 -0.00461 -0.00549 -0.00171 -0.00402 -0.00704 -0.00488 -0.00498 -42.827733 41.107588 1.767006 175.999485 -195.196015 4.995207 -233.844386 282.760331 -18.604775 100.440836 -129.207506 11.927987 -36.362289 45.183985 -0.914573 157.333408 -204.021974 14.524812 -216.204378 287.545597 -29.45818 95.015702 -129.226338 15.940451 -13.041058 17.526987 -0.713597 56.776641 -77.875531 6.778782 -79.377281 108.793041 -12.585143 35.289638 -48.639106 6.556052 -1.659777 2.280063 -0.111625 7.178509 -10.068016 0.942713 -10.137835 14.021054 -1.700233 4.53801 -6.258265 0.873887 7.8 4.75561 0.32694 -0.01031 0.30339 -0.0639 -0.0073 1.912143 -0.751222 0.167414 -0.015674 0.000528 -0.447888 0.269464 -0.059636 0.005644 -0.000194 0.515 0.426 0.412 0.452 0.496 0.785 0.806 0.832
0.02512 1.6 0.667 -0.4098 0.0331 -0.109 0 0 0 0 0 0 -0.174 -0.557 0 0 0 -0.007 -0.0028 -0.00466 -0.00554 -0.00173 -0.00405 -0.00715 -0.00497 -0.00508 -48.479722 55.7785 -4.013399 203.174441 -245.465754 25.938538 -266.708211 340.075945 -43.592027 113.93429 -151.062403 21.79371 -42.838283 60.841219 -6.458254 188.720316 -259.25022 35.007115 -255.708778 352.33067 -54.311583 111.649687 -154.593101 25.898767 -15.836581 23.981785 -2.828785 70.439152 -101.059928 14.712066 -96.989307 136.457275 -22.332544 42.812057 -59.638179 10.503095 -2.064808 3.184205 -0.391809 9.16487 -13.355305 2.005714 -12.736528 17.987595 -3.018564 5.657377 -7.850825 1.411863 8.95 5.06565 0.29638 -0.01193 0.29694 -0.0653 -0.0074 2.153914 -0.892759 0.198231 -0.018627 0.000633 -0.571979 0.342742 -0.075675 0.007183 -0.000249 0.519 0.428 0.414 0.455 0.496 0.790 0.812 0.836
0.03162 1.5 0.658 -0.4075 0.0327 -0.105 0 0 0 0 0 0 -0.17 -0.522 0 0 0 -0.00712 -0.00285 -0.00474 -0.00565 -0.00177 -0.00411 -0.00737 -0.00513 -0.00524 -36.454347 50.747067 -7.854934 150.78109 -212.767887 36.872429 -186.167731 284.265373 -53.190934 75.382423 -122.580834 24.242413 -31.514516 58.500243 -10.430031 140.047747 -237.762319 46.97275 -180.764568 311.480621 -65.709208 75.653022 -132.580092 29.251673 -11.52124 23.815783 -4.438532 52.126256 -95.730629 19.745268 -68.777242 124.527905 -27.359875 29.228937 -52.757417 12.088909 -1.492253 3.233297 -0.61419 6.757361 -12.929789 2.718016 -9.027904 16.764317 -3.750698 3.868932 -7.087305 1.651818 8.25 4.80422 0.30666 -0.01375 0.30784 -0.068 -0.0073 2.277944 -0.964799 0.213693 -0.020064 0.000681 -0.682212 0.407086 -0.089573 0.008493 -0.000294 0.528 0.433 0.417 0.459 0.496 0.800 0.822 0.844
0.03981 1.4 0.643 -0.3943 0.0311 -0.087 0 0 0 0 0 0 -0.154 -0.466 0 0 0 -0.00733 -0.00291 -0.00488 -0.0058 -0.00182 -0.0042 -0.00769 -0.00536 -0.00547 -10.329681 9.721835 -2.428972 43.883537 -42.554849 10.081315 -39.435885 56.809793 -13.528548 10.662441 -23.902673 5.852527 -11.523864 27.277669 -7.084519 59.612501 -107.034131 29.134065 -69.920435 135.78203 -38.279502 26.646272 -56.063821 16.245126 -5.065915 13.83251 -3.600758 26.701714 -53.571284 14.804109 -33.683484 67.573516 -19.42278 13.70416 -27.873688 8.233201 -0.742827 2.08603 -0.542555 3.869187 -8.046313 2.231618 -5.039166 10.136535 -2.928032 2.104843 -4.183466 1.24144 8.1 4.71025 0.30748 -0.01584 0.30733 -0.0727 -0.007 2.425762 -1.048628 0.232494 -0.021999 0.000756 -0.811617 0.481302 -0.105652 0.010042 -0.00035 0.544 0.441 0.422 0.466 0.498 0.818 0.841 0.860
0.05012 1.3 0.62 -0.3872 0.0303 -0.037 0 0 0 0 0 0 -0.134 -0.368 0 0 0 -0.00761 -0.00299 -0.00508 -0.00598 -0.00187 -0.00431 -0.00806 -0.0056 -0.00571 27.085064 -63.719099 13.834259 -111.853248 249.144665 -59.288528 168.949914 -319.851084 80.35663 -78.758592 134.893708 -35.057929 23.637977 -38.463885 6.9208 -85.747788 155.869439 -31.159347 125.072682 -205.760151 43.923771 -57.313362 88.712074 -19.794897 8.194186 -10.055622 1.357385 -27.794144 42.435636 -6.686058 39.56707 -57.711116 10.038086 -17.922068 25.449342 -4.743307 0.997066 -0.962233 0.078463 -3.249763 4.250549 -0.473888 4.544393 -5.964419 0.796484 -2.04153 2.690324 -0.404916 7.4 4.42934 0.31661 -0.02109 0.31654 -0.0835 -0.0065 2.215697 -0.886227 0.191129 -0.017643 0.000592 -0.856308 0.494428 -0.106361 0.009953 -0.000343 0.563 0.455 0.426 0.474 0.501 0.840 0.865 0.881
0.06310 1.2 0.59 -0.3816 0.0302 0.076 0 0 0 0 0 0 -0.097 -0.249 0 0 0 -0.00794 -0.00307 -0.00535 -0.00618 -0.00188 -0.00445 -0.00842 -0.0058 -0.00595 51.166641 -130.666484 34.967023 -210.719519 498.951885 -142.564784 294.373089 -626.397294 186.575608 -129.806949 258.885062 -79.262114 46.691884 -99.668192 25.418002 -179.966129 386.435965 -104.697277 245.410515 -490.923198 138.370241 -106.562235 204.801216 -59.317817 16.994651 -32.642702 8.000598 -63.51458 127.996417 -33.238646 85.315364 -163.991495 44.279226 -36.683227 68.862481 -19.117712 2.163192 -3.880007 0.920976 -7.954353 15.342591 -3.853207 10.577232 -19.779754 5.165791 -4.517738 8.345031 -2.242844 6.85 4.18536 0.32679 -0.02767 0.32638 -0.0951 -0.006 1.501368 -0.385625 0.067907 -0.004809 0.00011 -0.780202 0.424442 -0.086695 0.00774 -0.000255 0.577 0.476 0.430 0.482 0.506 0.863 0.890 0.903
0.07943 1.1 0.55 -0.3694 0.0284 0.179 0 0 0 0 0 0 -0.01 -0.205 0 0 0 -0.00819 -0.00314 -0.00558 -0.0063 -0.00186 -0.00457 -0.0086 -0.00589 -0.00603 -7.763642 -66.149606 26.952316 8.97581 219.649317 -103.379076 15.405799 -240.457636 127.104568 -12.572634 86.735333 -50.71788 0.064604 -52.779583 20.630675 -6.928645 179.893747 -79.818698 26.556561 -201.956797 99.041055 -14.74867 74.692232 -39.890352 1.371378 -17.938684 6.785619 -5.622 62.274894 -26.43558 12.308399 -71.110282 33.03583 -6.09636 26.730934 -13.400089 0.278993 -2.198066 0.810585 -0.973691 7.731894 -3.174988 1.792855 -8.934275 3.98935 -0.841024 3.395967 -1.626842 6.1 3.86847 0.33558 -0.04859 0.33554 -0.1038 -0.0057 0.234847 0.470479 -0.13762 0.016237 -0.000672 -0.55048 0.252179 -0.042792 0.003069 -0.000077 0.577 0.498 0.434 0.490 0.512 0.877 0.906 0.918
0.1 1.0 0.501 -0.3524 0.0265 0.204 0 0 0 0 0 0 0.119 -0.304 0 0 0 -0.00829 -0.00318 -0.00571 -0.00633 -0.00183 -0.00457 -0.00852 -0.00582 -0.00584 -68.458481 28.829689 4.605562 237.82803 -164.266697 -8.836444 -274.764841 260.022051 -0.933937 107.138761 -125.374848 5.317074 -49.29738 21.631191 3.518638 177.983317 -122.615957 -7.061507 -206.833056 194.017768 0.06255 81.241286 -93.659822 3.599962 -15.600124 6.982888 1.149204 57.790512 -39.46235 -2.385281 -67.51015 62.458687 0.211002 26.679145 -30.191159 1.065238 -1.812877 0.816535 0.13729 6.835188 -4.612347 -0.294438 -8.019372 7.308278 0.048217 3.184593 -3.538278 0.113795 5.9 3.73063 0.34123 -0.06587 0.34086 -0.1082 -0.0056 -1.574873 1.682296 -0.42639 0.045587 -0.001754 -0.151856 -0.03276 0.027751 -0.004278 0.000198 0.560 0.512 0.436 0.494 0.520 0.875 0.905 0.920
0.12589 0.9 0.448 -0.3418 0.0252 0.167 0 0 0 0 0 0 0.239 -0.531 0 0 0 -0.00823 -0.00317 -0.00571 -0.00622 -0.00178 -0.00447 -0.00818 -0.00565 -0.00541 -57.661409 52.210153 -6.534009 177.243465 -234.368311 33.479926 -183.23805 322.707297 -52.189627 62.323522 -140.821243 25.32931 -42.110184 41.044747 -5.388708 133.944972 -182.937031 27.078094 -138.757168 251.119517 -41.730651 47.425905 -109.454024 20.112694 -13.483786 13.758383 -1.88179 43.870741 -61.008017 9.304197 -45.579787 83.569682 -14.2053 15.662299 -36.39955 6.807954 -1.58149 1.661143 -0.233866 5.224073 -7.340905 1.143415 -5.441877 10.042552 -1.734476 1.878435 -4.372726 0.828058 5.7 3.59587 0.34574 -0.0912 0.34554 -0.111 -0.0057 -3.703106 3.066794 -0.749185 0.077859 -0.002928 0.381196 -0.399304 0.116176 -0.013321 0.000533 0.534 0.509 0.435 0.493 0.528 0.856 0.887 0.907
0.15849 0.8 0.4 -0.3386 0.0252 0.116 0 0 0 0 0 0 0.298 -0.774 0 0 0 -0.00805 -0.00312 -0.00563 -0.00601 -0.00174 -0.00426 -0.00772 -0.00535 -0.00491 -24.177127 31.505749 -6.301202 42.856647 -131.442129 28.590153 -12.536823 166.623715 -40.386786 -9.725772 -66.403364 18.060282 -17.00014 26.159992 -5.545691 31.632532 -107.972584 24.720766 -8.170103 136.316367 -34.518011 -7.964177 -54.294589 15.318878 -5.341652 9.099537 -2.01801 10.347768 -37.283796 8.875047 -2.606959 46.936269 -12.284179 -2.643744 -18.685157 5.420392 -0.62009 1.12699 -0.257915 1.236343 -4.595344 1.123957 -0.31152 5.773783 -1.546427 -0.314799 -2.297616 0.679679 5.5 3.44976 0.35261 -0.12539 0.35251 -0.1113 -0.0059 -5.461329 4.154378 -0.991766 0.101168 -0.003747 0.890829 -0.737591 0.195269 -0.021186 0.000816 0.509 0.492 0.434 0.485 0.532 0.830 0.858 0.886
0.19953 0.7 0.356 -0.3262 0.0244 0.072 0 0 0 0 0 0 0.245 -0.963 0 0 0 -0.0077 -0.00304 -0.00544 -0.0057 -0.00171 -0.00401 -0.00712 -0.00496 -0.00445 -23.660344 26.464031 -5.426108 55.791231 -112.060621 24.636964 -47.626855 146.51806 -35.041156 11.280835 -60.888577 15.837994 -16.828254 21.447259 -4.63974 41.810074 -89.728015 20.704709 -35.876491 116.590896 -29.106723 8.681616 -48.298949 13.051859 -5.288994 7.319696 -1.66621 13.58268 -30.351731 7.324407 -11.670177 39.245374 -10.194418 2.853302 -16.212637 4.540574 -0.611269 0.893067 -0.211358 1.602063 -3.68013 0.919167 -1.373911 4.741356 -1.2702 0.336447 -1.954443 0.562998 5.55 3.39236 0.3603 -0.14134 0.35792 -0.1071 -0.0061 -6.701839 4.842848 -1.128498 0.112821 -0.004109 1.341265 -1.022197 0.25856 -0.027186 0.001023 0.498 0.470 0.433 0.475 0.531 0.810 0.834 0.866
0.25119 0.6 0.318 -0.3078 0.0219 0.043 0 0 0 0 0 0 0.104 -1.076 0 0 0 -0.00721 -0.00292 -0.00517 -0.00535 -0.00167 -0.00375 -0.0064 -0.00452 -0.00404 -14.445389 18.496667 -3.731559 24.548144 -76.141144 16.60388 -11.975991 97.214791 -23.169101 -2.324617 -39.552576 10.302406 -9.917548 15.168281 -3.258594 17.609536 -61.529618 14.221533 -8.005212 77.893013 -19.573251 -2.102768 -31.522354 8.616295 -2.996519 5.146787 -1.183173 5.366689 -20.638785 5.071155 -2.11815 25.924107 -6.892427 -0.886377 -10.42874 3.006621 -0.33248 0.62014 -0.150501 0.586776 -2.465349 0.636599 -0.184214 3.076553 -0.85724 -0.133498 -1.230828 0.371403 5.75 3.40241 0.35949 -0.14483 0.35761 -0.1022 -0.0064 -7.161937 4.9757 -1.125765 0.109686 -0.003908 1.647322 -1.197509 0.293011 -0.030007 0.001106 0.501 0.443 0.429 0.463 0.527 0.795 0.813 0.851
0.31623 0.5 0.287 -0.3085 0.0223 0.006 0 0 0 0 0 0 -0.117 -1.124 0 0 0 -0.00661 -0.00276 -0.00482 -0.00496 -0.00161 -0.00348 -0.00568 -0.00406 -0.00374 -12.457198 13.452604 -2.296076 25.041919 -56.590695 10.665132 -20.429261 74.083436 -15.332033 4.098787 -30.944402 6.964706 -8.496994 11.193186 -2.133387 17.892765 -46.107003 9.553985 -14.466532 59.589844 -13.399191 2.838654 -24.677585 5.981079 -2.575012 3.845386 -0.820948 5.531501 -15.578438 3.558698 -4.345622 19.906482 -4.882163 0.78274 -8.173392 2.145381 -0.287494 0.467366 -0.108764 0.618599 -1.870106 0.461095 -0.465766 2.367969 -0.622809 0.072099 -0.965107 0.270594 5.9 3.39048 0.36101 -0.14953 0.36028 -0.0931 -0.0068 -6.541656 4.34703 -0.933851 0.086626 -0.002949 1.717464 -1.199601 0.282951 -0.028015 0.001001 0.510 0.416 0.424 0.453 0.522 0.783 0.799 0.840
0.39810 0.4 0.26 -0.2966 0.0212 -0.036 0 0 0 0 0 0 -0.39 -1.125 0 0 0 -0.006 -0.00254 -0.00444 -0.00454 -0.00152 -0.00327 -0.00504 -0.00363 -0.00348 -11.171429 13.42476 -2.312676 23.828545 -55.212966 10.512492 -20.516676 71.266493 -14.856756 4.629921 -29.469083 6.663018 -8.78464 12.532116 -2.431218 21.431952 -50.473874 10.609339 -20.202742 64.49718 -14.605681 5.595161 -26.559974 6.436415 -2.889597 4.488225 -0.964385 7.435233 -17.804979 4.085591 -7.148481 22.53445 -5.513494 2.060159 -9.219452 2.395702 -0.339177 0.554542 -0.128487 0.890425 -2.174981 0.533644 -0.853481 2.73019 -0.710181 0.245231 -1.109827 0.305441 6.2 3.43447 0.35471 -0.14217 0.35926 -0.0852 -0.0071 -4.815439 2.969329 -0.56384 0.045648 -0.001339 1.511291 -1.006868 0.22483 -0.02103 0.00071 0.511 0.393 0.420 0.440 0.512 0.769 0.780 0.823
0.50119 0.3 0.236 -0.2813 0.0202 -0.068 0 0 0 0 0 0 -0.682 -1.105 -0.044 0.19 -0.202 -0.00534 -0.00228 -0.00397 -0.00411 -0.00142 -0.00295 -0.00443 -0.00316 -0.00318 -11.812408 13.810381 -2.093803 31.444778 -57.894833 9.877971 -33.699963 76.336005 -14.273426 11.435244 -32.281562 6.49964 -9.513158 13.27323 -2.411077 27.756507 -54.194612 10.699909 -30.847394 70.408832 -14.890386 11.001245 -29.52527 6.614863 -3.121738 4.754601 -0.982266 9.388295 -19.088831 4.203363 -10.460817 24.522232 -5.710879 3.74871 -10.200835 2.494789 -0.363552 0.584758 -0.132423 1.10234 -2.319098 0.55367 -1.219101 2.952493 -0.740247 0.433316 -1.219535 0.319613 6.45 3.4499 0.36074 -0.13591 0.36323 -0.0759 -0.0074 -2.478112 1.203996 -0.107894 -0.003352 0.000541 1.121871 -0.689355 0.136977 -0.011065 0.000311 0.503 0.380 0.417 0.426 0.497 0.756 0.761 0.803
0.63096 0.2 0.217 -0.265 0.0185 -0.123 0 0 0 0 0 0 -0.992 -1.079 -0.064 0.244 -0.193 -0.00468 -0.00201 -0.00345 -0.0037 -0.00135 -0.00258 -0.0037 -0.00276 -0.00294 -13.540851 17.055776 -2.804665 41.258829 -70.294928 12.710257 -48.22679 92.592344 -18.045488 18.39273 -39.458509 8.170882 -10.562035 16.115678 -3.036013 33.662132 -64.896749 13.201129 -39.691195 84.206632 -18.222828 15.298954 -35.52086 8.087504 -3.348491 5.651546 -1.17536 10.766342 -22.430135 4.978746 -12.609158 28.801371 -6.748344 4.830425 -12.053701 2.95528 -0.376572 0.680627 -0.152351 1.204459 -2.672388 0.633989 -1.39319 3.40324 -0.848446 0.527028 -1.414898 0.368028 6.8 3.52557 0.36154 -0.1252 0.36002 -0.0669 -0.0078 0.52648 -0.970882 0.435757 -0.060272 0.002678 0.511349 -0.224278 0.014578 0.002319 -0.000209 0.491 0.374 0.414 0.418 0.476 0.743 0.745 0.779
0.79433 0.1 0.201 -0.265 0.0188 -0.158 0 0 0 0 0 0 -1.223 -1.056 -0.083 0.296 -0.175 -0.00402 -0.00175 -0.00294 -0.0033 -0.00127 -0.00216 -0.00298 -0.00231 -0.00274 -9.791213 13.769991 -2.058023 28.968177 -57.064269 9.664295 -33.509414 75.202024 -13.947098 12.611617 -31.974638 6.360024 -8.121296 14.810735 -2.766638 25.532426 -59.413122 12.080842 -29.81416 76.696416 -16.65731 11.338682 -32.161415 7.365446 -2.735425 5.545806 -1.166749 8.765384 -21.891175 4.931566 -10.186372 27.94221 -6.652381 3.856514 -11.620005 2.896002 -0.324788 0.698059 -0.158691 1.044729 -2.726708 0.657992 -1.203449 3.453349 -0.875747 0.451513 -1.427377 0.377501 7.1 3.57136 0.35653 -0.11932 0.35702 -0.0576 -0.0082 3.425185 -2.991325 0.921437 -0.10922 0.00445 -0.149513 0.260358 -0.108515 0.015353 -0.000702 0.483 0.375 0.409 0.416 0.458 0.736 0.740 0.764
1.0 0.0 0.189 -0.2662 0.0201 -0.177 0 0 0 0 0 0 -1.308 -1.028 -0.103 0.341 -0.168 -0.00341 -0.00154 -0.00251 -0.0029 -0.00121 -0.00183 -0.00234 -0.00187 -0.00261 -7.325605 13.614002 -2.386626 20.485389 -55.648435 10.84517 -22.683851 72.724816 -15.358254 8.102878 -30.762067 6.924623 -6.60002 15.889768 -3.329883 20.220607 -62.917428 14.24876 -22.94835 80.523509 -19.391391 8.442674 -33.570142 8.501097 -2.399851 6.205665 -1.42624 7.626841 -24.238636 5.9471 -8.706471 30.726631 -7.950366 3.229578 -12.720858 3.440594 -0.304166 0.805239 -0.196305 0.983223 -3.119945 0.806391 -1.12337 3.932134 -1.066722 0.417681 -1.620806 0.458069 7.4 3.61643 0.35977 -0.11193 0.35949 -0.0457 -0.0084 5.949047 -4.671327 1.304611 -0.14567 0.00569 -0.807548 0.726202 -0.222759 0.027034 -0.001128 0.472 0.383 0.404 0.413 0.439 0.730 0.735 0.750
1.25892 -0.1 0.179 -0.2782 0.0225 -0.177 -0.007 -0.052 0.019 -0.034 -0.017 0 -1.209 -1.015 -0.12 0.388 -0.146 -0.00282 -0.00137 -0.00209 -0.00255 -0.00118 -0.00147 -0.002 -0.00136 -0.00225 -8.939958 14.674685 -2.50588 28.48766 -60.533728 11.447177 -33.74954 79.839562 -16.272449 13.110558 -34.093951 7.363532 -6.823299 16.349183 -3.378252 21.945586 -64.788429 14.473168 -25.43702 83.014323 -19.700187 9.622979 -34.666492 8.636822 -2.230057 6.227992 -1.418814 7.139707 -24.239425 5.910868 -8.089198 30.632422 -7.8882 2.98057 -12.650068 3.407453 -0.262857 0.794673 -0.192817 0.833591 -3.058636 0.790049 -0.924593 3.831317 -1.041746 0.332274 -1.570612 0.445901 7.7 3.65455 0.3576 -0.10572 0.35653 -0.0344 -0.0083 7.368131 -5.499974 1.459253 -0.156522 0.00591 -1.284003 1.045681 -0.29604 0.033957 -0.001359 0.457 0.390 0.403 0.408 0.429 0.724 0.727 0.738
1.58489 -0.2 0.168 -0.2895 0.0246 -0.177 0.017 -0.118 -0.006 -0.033 -0.015 0 -1.038 -1.003 -0.134 0.411 -0.126 -0.00227 -0.00124 -0.00162 -0.00228 -0.0012 -0.00102 -0.00185 -0.00098 -0.00195 -15.626776 19.508059 -3.560531 56.985234 -81.272413 15.949639 -71.946571 108.418298 -22.436338 29.788741 -46.830402 10.092356 -11.768413 20.485075 -4.373144 43.018477 -82.174059 18.668326 -53.780421 106.598489 -25.382767 22.029363 -45.049168 11.130096 -3.820418 7.664231 -1.778515 13.91816 -30.2218 7.420629 -17.228546 38.688876 -9.924873 6.988037 -16.176321 4.297893 -0.449459 0.970762 -0.237817 1.629299 -3.788807 0.978539 -1.999441 4.811048 -1.295541 0.804229 -1.998174 0.556679 7 3.28577 0.60973 -0.08858 0.3841 -0.0247 -0.0073 7.648531 -5.478711 1.393184 -0.143506 0.00521 -1.556335 1.20724 -0.326813 0.03613 -0.001401 0.435 0.394 0.407 0.400 0.422 0.714 0.710 0.723
1.99526 -0.3 0.155 -0.3002 0.0273 -0.177 0.029 -0.167 -0.038 -0.022 0.012 0 -0.877 -1.005 -0.141 0.422 -0.074 -0.00174 -0.00116 -0.0011 -0.00209 -0.00121 -0.00055 -0.00183 -0.00081 -0.00177 -20.196064 21.605585 -3.967159 76.955882 -91.937133 18.009102 -99.17849 124.888604 -25.625691 41.854937 -54.793264 11.641878 -14.237646 21.4583 -4.630733 54.204954 -87.447024 19.980398 -69.387392 115.040466 -27.41286 29.065915 -49.224908 12.113529 -4.450964 7.879438 -1.854116 16.869748 -31.468194 7.806779 -21.431777 40.755464 -10.521754 8.912003 -17.220691 4.586453 -0.512949 0.990385 -0.246374 1.934245 -3.908465 1.022173 -2.440762 5.014605 -1.362831 1.0086 -2.102644 0.589139 7.05 3.205 0.68673 -0.07303 0.40216 -0.0158 -0.0048 6.500901 -4.413972 1.059609 -0.101708 0.003404 -1.529644 1.14411 -0.298233 0.031732 -0.001184 0.421 0.392 0.409 0.394 0.421 0.706 0.697 0.713
2.51189 -0.4 0.142 -0.3243 0.0323 -0.177 0.046 -0.200 -0.067 -0.014 0.070 0 -0.787 -0.989 -0.145 0.405 -0.012 -0.00127 -0.0011 -0.00071 -0.00188 -0.00117 -0.00018 -0.00176 -0.00081 -0.00169 -21.025654 20.914681 -3.65759 80.254159 -89.929962 16.838693 -102.867295 122.982399 -24.163864 43.215191 -54.20406 11.039272 -14.058824 20.33818 -4.315139 53.578857 -83.280809 18.729752 -68.270807 109.860542 -25.776517 28.477607 -47.081453 11.408887 -4.220327 7.384426 -1.728543 16.004347 -29.551045 7.303437 -20.246004 38.293665 -9.856116 8.382355 -16.175944 4.297066 -0.471012 0.920307 -0.229126 1.77601 -3.633509 0.952913 -2.231373 4.65817 -1.271094 0.917647 -1.950334 0.549199 7.2 3.15761 0.71829 -0.06331 0.42226 -0.0092 -0.0027 4.471238 -2.731619 0.575555 -0.044609 0.001042 -1.296131 0.930304 -0.231209 0.023231 -0.000811 0.422 0.390 0.401 0.386 0.413 0.701 0.693 0.708
3.16228 -0.5 0.129 -0.3363 0.0348 -0.177 0.068 -0.233 -0.029 -0.039 0.146 0 -0.739 -0.957 -0.147 0.38 0.088 -0.00087 -0.00108 -0.00052 -0.00167 -0.00112 0 -0.00177 -0.00082 -0.00153 -22.741065 22.411323 -4.096293 86.63212 -97.020411 18.794135 -110.234252 133.365048 -26.940861 46.034113 -59.021914 12.307843 -15.263972 21.425521 -4.717232 58.109433 -88.201374 20.443717 -73.576682 116.846948 -28.124593 30.529589 -50.252 12.45084 -4.61871 7.768048 -1.881403 17.508884 -31.215017 7.940036 -22.024665 40.586946 -10.711395 9.075048 -17.193783 4.670423 -0.520072 0.970347 -0.249365 1.961472 -3.844432 1.036151 -2.451769 4.94279 -1.381728 1.003782 -2.074601 0.597048 7.25 3.07444 0.79168 -0.0485 0.43945 -0.005 -0.0017 1.70041 -0.551013 -0.02409 0.023559 -0.001692 -0.856979 0.570437 -0.12792 0.010975 -0.0003 0.427 0.387 0.386 0.381 0.401 0.694 0.691 0.702
3.98107 -0.6 0.114 -0.353 0.0379 -0.177 0.097 -0.252 0.047 -0.071 0.222 0 -0.715 -0.892 -0.148 0.331 0.199 -0.0006 -0.00104 -0.00046 -0.00149 -0.0011 0 -0.00188 -0.00064 -0.00117 -26.264268 23.771252 -4.060054 100.472301 -103.646308 18.814282 -127.685929 143.161093 -27.132822 53.278586 -63.57275 12.444219 -17.751774 21.96526 -4.627096 68.116276 -91.178818 20.183777 -86.467332 121.521772 -27.87899 35.975417 -52.500411 12.374512 -5.401081 7.859048 -1.842007 20.706062 -31.830584 7.810706 -26.201897 41.632888 -10.56744 10.860052 -17.718161 4.616053 -0.611228 0.975707 -0.244152 2.338044 -3.894667 1.018348 -2.948565 5.036198 -1.361074 1.217733 -2.123459 0.588934 7.35 3.00688 0.83749 -0.03816 0.45682 -0.0015 -0.0015 -1.21286 1.659951 -0.609563 0.087637 -0.004167 -0.330958 0.159703 -0.015464 -0.001793 0.000211 0.429 0.381 0.368 0.373 0.395 0.681 0.684 0.697
5.01187 -0.7 0.101 -0.3689 0.041 -0.177 0.104 -0.261 0.119 -0.088 0.298 0 -0.727 -0.81 -0.14 0.266 0.313 -0.00045 -0.00095 -0.00026 -0.00133 -0.00109 0 -0.00191 -0.00048 -0.00078 -25.705775 24.076487 -3.870359 96.629913 -105.020322 18.090218 -120.70367 145.026697 -26.228237 49.610109 -64.360305 12.071078 -16.84009 21.159053 -4.272283 63.439586 -87.991892 18.758983 -79.104292 117.348296 -26.011167 32.390672 -50.693008 11.572278 -4.974984 7.359398 -1.677937 18.699921 -29.825945 7.145804 -23.214685 38.994195 -9.688467 9.45493 -16.577788 4.235971 -0.5486 0.896211 -0.220551 2.05407 -3.574324 0.922361 -2.537028 4.613769 -1.233766 1.027578 -1.940914 0.533737 7.4 2.8864 0.87617 -0.03008 0.49447 -0.0001 -0.0014 -3.889662 3.647243 -1.123764 0.142588 -0.006239 0.20708 -0.251394 0.094773 -0.014072 0.000694 0.416 0.368 0.350 0.356 0.384 0.657 0.660 0.676
6.30957 -0.8 0.089 -0.3897 0.0446 -0.177 0.127 -0.265 0.151 -0.101 0.370 0 -0.744 -0.721 -0.126 0.2 0.395 -0.00035 -0.00088 -0.00008 -0.00115 -0.00105 0 -0.00168 -0.00035 -0.00054 -24.77411 26.224526 -4.200479 90.85638 -112.942284 19.297359 -110.881403 154.63744 -27.703051 44.643788 -68.197225 12.669506 -16.399063 22.499296 -4.454105 60.312115 -92.857 19.391601 -73.538359 123.20744 -26.757826 29.522271 -53.029484 11.868641 -4.882647 7.738961 -1.723739 17.92392 -31.186636 7.297028 -21.769156 40.625276 -9.860528 8.69684 -17.22899 4.30281 -0.541658 0.936975 -0.225009 1.98114 -3.719234 0.936385 -2.394693 4.787192 -1.249196 0.951699 -2.010391 0.539632 7.4 2.71458 0.91369 -0.02248 0.54244 0.0001 -0.0014 -6.070844 5.214049 -1.513383 0.182277 -0.007654 0.699129 -0.617005 0.189848 -0.024315 0.001083 0.402 0.349 0.331 0.340 0.367 0.627 0.631 0.647
7.94328 -0.9 0.08 -0.4051 0.0472 -0.177 0.161 -0.270 0.184 -0.115 0.422 0 -0.758 -0.623 -0.111 0.151 0.42 -0.00032 -0.00091 -0.00003 -0.00093 -0.00098 0 -0.00124 -0.00031 -0.00049 -23.2554 27.269174 -4.355136 83.882593 -116.925661 19.896133 -100.723717 159.512369 -28.450802 39.959542 -70.128341 12.972515 -15.584863 23.227516 -4.54862 56.449509 -95.598076 19.739125 -67.769774 126.51015 -27.164746 26.812408 -54.31465 12.021431 -4.678472 7.958405 -1.748796 16.923356 -32.002733 7.384073 -20.239549 41.594188 -9.954634 7.966198 -17.599496 4.334423 -0.52156 0.961043 -0.22741 1.879887 -3.807686 0.944166 -2.236969 4.890666 -1.256707 0.87537 -2.049251 0.541691 7.45 2.59172 0.94723 -0.0155 0.57479 0 -0.0014 -7.967855 6.53621 -1.828753 0.212558 -0.008649 1.149224 -0.944491 0.272771 -0.032948 0.001398 0.392 0.321 0.312 0.327 0.334 0.596 0.603 0.607
10.000 -1.0 0.073 -0.4203 0.0498 -0.177 0.223 -0.271 0.246 -0.139 0.451 0 -0.729 -0.514 -0.098 0.121 0.395 -0.0003 -0.00106 -0.00022 -0.00072 -0.00092 0 -0.00076 -0.00018 -0.00062 -21.116565 26.950596 -4.295478 74.840821 -115.458924 19.652997 -88.397431 157.343534 -28.131669 34.546793 -69.08876 12.833902 -14.309344 23.080364 -4.515178 51.065975 -94.912402 19.60508 -60.439985 125.48986 -26.99314 23.597517 -53.822052 11.948725 -4.333707 7.93206 -1.740815 15.469595 -31.875118 7.352615 -18.261556 41.39963 -9.915528 7.099148 -17.503328 4.318339 -0.486169 0.95965 -0.226702 1.730713 -3.800122 0.941414 -2.034029 4.878348 -1.253388 0.786412 -2.042806 0.540368 7.45 2.41501 0.97695 -0.00945 0.61307 0 -0.0014 -8.935422 7.144835 -1.952403 0.221535 -0.008813 1.461981 -1.163289 0.32556 -0.038128 0.001573 0.392 0.289 0.293 0.324 0.303 0.568 0.585 0.573
""")
# constant table suffix
SUFFIX = "_if"
CONST_REGION = {"cc": 0.85, "rt": 150,
"cd0": 1.606, "cd1": 0.0097, "dp0": 25, "dp1": 55}
class HassaniAtkinson2020SSlab(HassaniAtkinson2020SInter):
"""
Hassani Atkinson (2020) for Subduction IntraSlab.
"""
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.SUBDUCTION_INTRASLAB
# constant table suffix
SUFFIX = "_is"
CONST_REGION = {"cc": 0.9, "rt": 250,
"cd0": 1.9241, "cd1": 0.0133, "dp0": 40, "dp1": 90}
class HassaniAtkinson2020Asc(HassaniAtkinson2020SInter):
"""
Hassani Atkinson (2020) for Crustal.
"""
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
# constant table suffix
SUFFIX = "_cr"
CONST_REGION = {"cc": 0.45, "rt": 50,
"cd0": 2.5011, "cd1": 0, "dp0": 0, "dp1": 30}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Seq and secondary structure prediction"""
import os
import tempfile
import shutil
VARNA_PATH = '/Users/magnus/skills/rnax/varna_tut/'
def draw_ss(title,seq, ss, img_out):
""""""
curr = os.getcwd()
os.chdir(VARNA_PATH)#VARNAv3-93-src')
print os.getcwd()
t = tempfile.NamedTemporaryFile(delete=False)
t.name += '.png'
os.system('java -cp VARNA.jar fr.orsay.lri.varna.applications.VARNAcmd -sequenceDBN ' + seq + " -structureDBN '" + ss + "' -o " + t.name + " -title " + title + " -resolution '2.0'")
os.chdir(curr)
print img_out
shutil.move(t.name, img_out)
if __name__ == '__main__':
seq = 'AAAAAAA'
ss = '((...))'
img_out = 'out.png'
draw_ss('rna', seq, ss, img_out)
ss: use rpt_config
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Seq and secondary structure prediction"""
import os
import tempfile
import shutil
import subprocess
from rpt_config import *
def draw_ss(title,seq, ss, img_out):
"""Draw Secondary Structure using VARNA (you need correct configuration for this).
If everything is OK, return None, if an error (=exception) return stderr.
Can be used with http://geekbook.readthedocs.io/en/latest/rna.html"""
curr = os.getcwd()
os.chdir(VARNA_PATH)#VARNAv3-93-src')
t = tempfile.NamedTemporaryFile(delete=False)
t.name += '.png'
cmd = 'java -cp ' + VARNA_JAR_NAME + ' fr.orsay.lri.varna.applications.VARNAcmd -sequenceDBN ' + seq + " -structureDBN '" + ss + "' -o " + t.name + " -title " + title + " -resolution '2.0'"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
out = p.stderr.read().strip()
os.chdir(curr)
if out.find('Exception') > -1:
return stderr
else:
shutil.move(t.name, img_out)
if __name__ == '__main__':
seq = 'AAAAAAA'
ss = '((...))'
img_out = 'demo.png'
draw_ss('rna', seq, ss, img_out)
print 'Made %s' % img_out
|
"""Methods for plotting Fourier spectra."""
import matplotlib.pylab as plt
import numpy as np
def plot_rapsd(fft_freq, fft_power, x_units=None, y_units=None, wavelength_ticks=None, color='k', lw=1.0, label=None, ax=None):
"""
Function to plot in log-log a radially averaged Fourier spectrum.
Parameters
----------
fft_freq: array-like
1d array containing the Fourier frequencies computed by the function 'rapsd' in utils/spectral.py
fft_power: array-like
1d array containing the radially averaged Fourier power spectrum computed by the function 'rapsd' in utils/spectral.py
x_units: str
Units of the X variable (distance, e.g. km)
y_units: str
Units of the Y variable (amplitude, e.g. dBR)
wavelength_ticks_ array-like
List of wavelengths where to show xticklabels
color: str
Line color
lw: float
Line width
label: str
Label (for legend)
ax: Axes
Plot axes
Returns
-------
ax: Axes
Plot axes
"""
# Check input dimensions
n_freq = len(fft_freq)
n_pow = len(fft_power)
if n_freq != n_pow:
raise ValueError("Dimensions of the 1d input arrays must be equal. %s vs %s" % (n_freq, n_pow))
if ax is None:
ax = plt.subplot(111)
# Plot spectrum in log-log scale
ax.plot(10.0*np.log10(fft_freq), 10.0*np.log10(fft_power), color=color, linewidth=lw, label=label)
# X-axis
if wavelength_ticks is not None:
wavelength_ticks = np.array(wavelength_ticks)
freq_ticks = 1.0/wavelength_ticks
ax.set_xticks(10.0*np.log10(freq_ticks))
ax.set_xticklabels(wavelength_ticks)
if x_units is not None:
ax.set_xlabel('Wavelength [' + x_units + ']')
else:
if x_units is not None:
ax.set_xlabel('Frequency [1/' + x_units + ']')
# Y-axis
if y_units is not None:
ax.set_ylabel(r'Power [10log$_{10}(\frac{' + y_units + '^2}{' + x_units + '})$]')
return(ax)
Add kwargs function arguments for more flexible choice of plot parameters.
"""Methods for plotting Fourier spectra."""
import matplotlib.pylab as plt
import numpy as np
def plot_rapsd(fft_freq, fft_power, x_units=None, y_units=None, wavelength_ticks=None, color='k', lw=1.0, label=None, ax=None, **kwargs):
"""
Function to plot in log-log a radially averaged Fourier spectrum.
Parameters
----------
fft_freq: array-like
1d array containing the Fourier frequencies computed by the function 'rapsd' in utils/spectral.py
fft_power: array-like
1d array containing the radially averaged Fourier power spectrum computed by the function 'rapsd' in utils/spectral.py
x_units: str
Units of the X variable (distance, e.g. km)
y_units: str
Units of the Y variable (amplitude, e.g. dBR)
wavelength_ticks_ array-like
List of wavelengths where to show xticklabels
color: str
Line color
lw: float
Line width
label: str
Label (for legend)
ax: Axes
Plot axes
Returns
-------
ax: Axes
Plot axes
"""
# Check input dimensions
n_freq = len(fft_freq)
n_pow = len(fft_power)
if n_freq != n_pow:
raise ValueError("Dimensions of the 1d input arrays must be equal. %s vs %s" % (n_freq, n_pow))
if ax is None:
ax = plt.subplot(111)
# Plot spectrum in log-log scale
ax.plot(10.0*np.log10(fft_freq), 10.0*np.log10(fft_power), color=color, linewidth=lw, label=label, **kwargs)
# X-axis
if wavelength_ticks is not None:
wavelength_ticks = np.array(wavelength_ticks)
freq_ticks = 1.0/wavelength_ticks
ax.set_xticks(10.0*np.log10(freq_ticks))
ax.set_xticklabels(wavelength_ticks)
if x_units is not None:
ax.set_xlabel('Wavelength [' + x_units + ']')
else:
if x_units is not None:
ax.set_xlabel('Frequency [1/' + x_units + ']')
# Y-axis
if y_units is not None:
ax.set_ylabel(r'Power [10log$_{10}(\frac{' + y_units + '^2}{' + x_units + '})$]')
return(ax)
|
# -*- coding: utf-8 -*-
"""
Created on Thursday July 27 2017
@author: Rama Vasudevan, Suhas Somnath, Chris R. Smith
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import sys
from os import path, remove, listdir # File Path formatting
import re
import numpy as np # For array operations
import h5py
from scipy.io import loadmat
from pyUSID.io.translator import Translator
from pyUSID.io.write_utils import Dimension
from pyUSID.io.hdf_utils import write_simple_attrs, write_main_dataset, \
create_indexed_group
if sys.version_info.major == 3:
unicode = str
class TRKPFMTranslator(Translator):
"""
Translates trKPFM datasets from .mat and .dat files to .h5
"""
def __init__(self, *args, **kwargs):
super(TRKPFMTranslator, self).__init__(*args, **kwargs)
self.raw_datasets = None
@staticmethod
def is_valid_file(data_path):
"""
Checks whether the provided file can be read by this translator
Parameters
----------
data_path : str
Path to folder or any data / parameter file within the folder
Returns
-------
obj : str
Path to file that will be accepted by the translate() function if
this translator is indeed capable of translating the provided file.
Otherwise, None will be returned
"""
def get_chan_ind(line):
match_obj = re.match(r'(.*)_ch(..).dat', line, re.M | re.I)
type_list = [str, int]
if match_obj:
return \
[type_caster(match_obj.group(ind)) for ind, type_caster in
zip(range(1, 1 + len(type_list)), type_list)][-1]
else:
return None
if path.isfile(data_path):
# Assume that the file is amongst all other data files
folder_path, _ = path.split(data_path)
else:
folder_path = data_path
# Now looking at the folder with all necessary files:
file_list = listdir(path=folder_path)
parm_file_name = None
raw_data_paths = list()
for item in file_list:
if item.endswith('parm.mat'):
parm_file_name = item
elif isinstance(get_chan_ind(item), int):
raw_data_paths.append(item)
# Both the parameter and data files MUST be found:
if parm_file_name is not None and len(raw_data_paths) > 0:
# Returning the path to the parameter file since this is what the translate() expects:
return path.join(folder_path, parm_file_name)
return None
def _parse_file_path(self, input_path):
folder_path, base_name = path.split(input_path)
base_name = base_name[:-8]
h5_path = path.join(folder_path, base_name + '.h5')
if path.exists(h5_path):
remove(h5_path)
self.h5_path = h5_path
# Until a better method is provided....
self.file_list = list()
for file in listdir(folder_path):
if '.dat' in file:
self.file_list.append(path.join(folder_path, file))
self.file_list = sorted(self.file_list)
@staticmethod
def _parse_spectrogram_size(file_handle):
"""
Parameters
----------
file_handle
Returns
-------
data_length: int, size of the spectrogram
count: int, number of pixels in dataset +1
""""""
"""
f = file_handle
cont_cond = True
count = 0
data_lengths = []
while cont_cond:
#print(count, f.tell())
count += 1
data_length = np.fromfile(f, dtype=np.float32, count=1)
if data_length > 0:
data_lengths.append(int(data_length))
f.seek(int(data_length - 1) * 4, 1)
else:
cont_cond = False
if len(np.unique(np.array(data_lengths))) > 1:
print("Unequal data lengths! Cannot continue")
else:
print("Equal data lengths")
return data_lengths[0], count
def translate(self, parm_path):
"""
The main function that translates the provided file into a .h5 file
Parameters
------------
parm_path : string / unicode
Absolute file path of the parameters .mat file.
Returns
----------
h5_path : string / unicode
Absolute path of the translated h5 file
"""
parm_path = path.abspath(parm_path)
parm_dict, excit_wfm = self._read_parms(parm_path)
excit_wfm = excit_wfm[1::2]
self._parse_file_path(parm_path)
num_dat_files = len(self.file_list)
f = open(self.file_list[0], 'rb')
spectrogram_size, count_vals = self._parse_spectrogram_size(f)
print("Excitation waveform shape: ", excit_wfm.shape)
print("spectrogram size:", spectrogram_size)
num_pixels = parm_dict['grid_num_rows'] * parm_dict['grid_num_cols']
print('Number of pixels: ', num_pixels)
print('Count Values: ', count_vals)
#if (num_pixels + 1) != count_vals:
# print("Data size does not match number of pixels expected. Cannot continue")
#Find how many channels we have to make
num_ai_chans = num_dat_files // 2 # Division by 2 due to real/imaginary
# Now start creating datasets and populating:
#Start with getting an h5 file
h5_file = h5py.File(self.h5_path)
#First create a measurement group
h5_meas_group = create_indexed_group(h5_file, 'Measurement')
#Set up some parameters that will be written as attributes to this Measurement group
global_parms = dict()
global_parms['data_type'] = 'trKPFM'
global_parms['translator'] = 'trKPFM'
write_simple_attrs(h5_meas_group, global_parms)
write_simple_attrs(h5_meas_group, parm_dict)
#Now start building the position and spectroscopic dimension containers
#There's only one spectroscpoic dimension and two position dimensions
#The excit_wfm only has the DC values without any information on cycles, time, etc.
#What we really need is to add the time component. For every DC step there are some time steps.
num_time_steps = (spectrogram_size-5) //excit_wfm.size //2 #Need to divide by 2 because it considers on and off field
#There should be three spectroscopic axes
#In order of fastest to slowest varying, we have
#time, voltage, field
time_vec = np.linspace(0, parm_dict['IO_time'], num_time_steps)
print('Num time steps: {}'.format(num_time_steps))
print('DC Vec size: {}'.format(excit_wfm.shape))
print('Spectrogram size: {}'.format(spectrogram_size))
field_vec = np.array([0,1])
spec_dims = [Dimension ('Time', 's', time_vec),Dimension('Field', 'Binary', field_vec),
Dimension('Bias', 'V', excit_wfm)]
pos_dims = [Dimension('Cols', 'nm', parm_dict['grid_num_rows']),
Dimension('Rows', 'um', parm_dict['grid_num_cols'])]
self.raw_datasets = list()
for chan_index in range(num_ai_chans):
chan_grp = create_indexed_group(h5_meas_group,'Channel')
if chan_index == 0:
write_simple_attrs(chan_grp,{'Harmonic': 1})
else:
write_simple_attrs(chan_grp,{'Harmonic': 2})
h5_raw = write_main_dataset(chan_grp, # parent HDF5 group
(num_pixels, spectrogram_size - 5),
# shape of Main dataset
'Raw_Data', # Name of main dataset
'Deflection', # Physical quantity contained in Main dataset
'V', # Units for the physical quantity
pos_dims, # Position dimensions
spec_dims, # Spectroscopic dimensions
dtype=np.complex64, # data type / precision
compression='gzip',
chunks=(1, spectrogram_size - 5),
main_dset_attrs={'quantity': 'Complex'})
#h5_refs = hdf.write(chan_grp, print_log=False)
#h5_raw = get_h5_obj_refs(['Raw_Data'], h5_refs)[0]
#link_h5_objects_as_attrs(h5_raw, get_h5_obj_refs(aux_ds_names, h5_refs))
self.raw_datasets.append(h5_raw)
self.raw_datasets.append(h5_raw)
# Now that the N channels have been made, populate them with the actual data....
self._read_data(parm_dict, parm_path, spectrogram_size)
h5_file.file.close()
#hdf.close()
return self.h5_path
def _read_data(self, parm_dict, parm_path, data_length):
"""
Reads raw data and populates the h5 datasets
Parameters
----------
parm_dict : Dictionary
dictionary containing parameters for this data
folder_path : string / unicode
Absolute path of folder containing the data
"""
# Determine number of pixels
num_pixels = parm_dict['grid_num_rows'] * parm_dict['grid_num_cols']
# The four files in TRKPFM are for real and imaginary parts for 1st, 2nd harmonic
# Create a list of [True,False,True,False] so files can be written to
# the appropraite channel
real_imag = np.zeros(shape=(len(self.file_list), 1))
real_imag[::2] = 1
real_cond = []
for entry in real_imag:
if entry > 0:
real_cond.append(True)
else:
real_cond.append(False)
# Scan through all the .dat files available
for ifile, file_path in enumerate(self.file_list):
f = open(file_path, 'rb')
results_p = self.read_file(data_length, f)
spectrogram_matrix = np.array(results_p[:])
b_axis = spectrogram_matrix.shape[2]
c_axis = spectrogram_matrix.shape[1]
# dall = np.transpose(spectrogram_matrix, (0, 2, 1)).reshape(num_pixels * c_axis, b_axis)
dall = np.transpose(spectrogram_matrix, (0, 2, 1)).reshape(-1, b_axis)
_, ia, ic = np.unique(dall, axis=0, return_index=True, return_inverse=True)
reprowind = np.setdiff1d(ic, ia)
if len(reprowind > 0):
dall[reprowind, :] = np.nan
# Write to the datasets
h5_main = self.raw_datasets[ifile]
if real_cond[ifile]:
print('Dall Size is: ', dall.shape)
# Do some error catching. In case the last pixel is absent, then just ignore it.
try:
h5_main[:, :] = dall.reshape(h5_main.shape) + 1j * 0
except ValueError:
h5_main[:-1, :] = dall.reshape(h5_main.shape[0] - 1, h5_main.shape[1]) + 1j * 0
else:
# Error catching. In case the last pixel is absent, then just ignore it.
try:
h5_main[:, :] += 0 + 1j * dall.reshape(h5_main.shape)
except ValueError:
h5_main[:-1, :] += 0 + 1j * dall.reshape(h5_main.shape[0] - 1, h5_main.shape[1])
h5_main.file.flush()
@staticmethod
def read_file(data_length, f):
start_point = 0
count = 0
count_vals = []
f.seek(start_point * 4, 0)
cont_cond = True
results_p = []
while cont_cond:
count_vals.append(count)
count += 1
data_vec = np.fromfile(f, dtype=np.float32, count=int(data_length))
data_vec1 = data_vec[5:int(data_length)]
if len(data_vec) > 1:
s1 = data_vec[3]
s2 = data_vec[4]
# print('Data_mat and s1,s2:', data_vec1.shape, s1, s2)
data_mat1 = data_vec1.reshape(int(s2), int(s1)).T
results_p.append(data_mat1)
else:
cont_cond = False
f.close()
return results_p
@staticmethod
def _read_parms(parm_path):
"""
Copies experimental parameters from the .mat file to a dictionary
Parameters
----------
parm_path : string / unicode
Absolute path of the parameters file
Returns
-------
parm_dict : dictionary
Dictionary containing all relevant parameters
excit_wfm : 1d numpy float array
Excitation waveform containing the full DC amplitude vector
"""
h5_f = loadmat(parm_path)
parm_dict = dict()
parm_dict['IO_samp_rate_[Hz]'] = np.uint32(h5_f['IO_rate'][0][0])
parm_dict['IO_time'] = np.float32(h5_f['IO_time'][0][0])
excit_wfm = np.float32(np.squeeze(h5_f['dc_amp_vec']))
parm_dict['grid_num_rows'] = np.int(h5_f['num_rows'][0][0])
parm_dict['grid_num_cols'] = np.int(h5_f['num_cols'][0][0])
return parm_dict, excit_wfm
Fixed Spec and Position Dims in TRKPFM translator
# -*- coding: utf-8 -*-
"""
Created on Thursday July 27 2017
@author: Rama Vasudevan, Suhas Somnath, Chris R. Smith
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import sys
from os import path, remove, listdir # File Path formatting
import re
import numpy as np # For array operations
import h5py
from scipy.io import loadmat
from pyUSID.io.translator import Translator
from pyUSID.io.write_utils import Dimension
from pyUSID.io.hdf_utils import write_simple_attrs, write_main_dataset, \
create_indexed_group
if sys.version_info.major == 3:
unicode = str
class TRKPFMTranslator(Translator):
"""
Translates trKPFM datasets from .mat and .dat files to .h5
"""
def __init__(self, *args, **kwargs):
super(TRKPFMTranslator, self).__init__(*args, **kwargs)
self.raw_datasets = None
@staticmethod
def is_valid_file(data_path):
"""
Checks whether the provided file can be read by this translator
Parameters
----------
data_path : str
Path to folder or any data / parameter file within the folder
Returns
-------
obj : str
Path to file that will be accepted by the translate() function if
this translator is indeed capable of translating the provided file.
Otherwise, None will be returned
"""
def get_chan_ind(line):
match_obj = re.match(r'(.*)_ch(..).dat', line, re.M | re.I)
type_list = [str, int]
if match_obj:
return \
[type_caster(match_obj.group(ind)) for ind, type_caster in
zip(range(1, 1 + len(type_list)), type_list)][-1]
else:
return None
if path.isfile(data_path):
# Assume that the file is amongst all other data files
folder_path, _ = path.split(data_path)
else:
folder_path = data_path
# Now looking at the folder with all necessary files:
file_list = listdir(path=folder_path)
parm_file_name = None
raw_data_paths = list()
for item in file_list:
if item.endswith('parm.mat'):
parm_file_name = item
elif isinstance(get_chan_ind(item), int):
raw_data_paths.append(item)
# Both the parameter and data files MUST be found:
if parm_file_name is not None and len(raw_data_paths) > 0:
# Returning the path to the parameter file since this is what the translate() expects:
return path.join(folder_path, parm_file_name)
return None
def _parse_file_path(self, input_path):
folder_path, base_name = path.split(input_path)
base_name = base_name[:-8]
h5_path = path.join(folder_path, base_name + '.h5')
if path.exists(h5_path):
remove(h5_path)
self.h5_path = h5_path
# Until a better method is provided....
self.file_list = list()
for file in listdir(folder_path):
if '.dat' in file:
self.file_list.append(path.join(folder_path, file))
self.file_list = sorted(self.file_list)
@staticmethod
def _parse_spectrogram_size(file_handle):
"""
Parameters
----------
file_handle
Returns
-------
data_length: int, size of the spectrogram
count: int, number of pixels in dataset +1
""""""
"""
f = file_handle
cont_cond = True
count = 0
data_lengths = []
while cont_cond:
#print(count, f.tell())
count += 1
data_length = np.fromfile(f, dtype=np.float32, count=1)
if data_length > 0:
data_lengths.append(int(data_length))
f.seek(int(data_length - 1) * 4, 1)
else:
cont_cond = False
if len(np.unique(np.array(data_lengths))) > 1:
print("Unequal data lengths! Cannot continue")
else:
print("Equal data lengths")
return data_lengths[0], count
def translate(self, parm_path):
"""
The main function that translates the provided file into a .h5 file
Parameters
------------
parm_path : string / unicode
Absolute file path of the parameters .mat file.
Returns
----------
h5_path : string / unicode
Absolute path of the translated h5 file
"""
parm_path = path.abspath(parm_path)
parm_dict, excit_wfm = self._read_parms(parm_path)
excit_wfm = excit_wfm[1::2]
self._parse_file_path(parm_path)
num_dat_files = len(self.file_list)
f = open(self.file_list[0], 'rb')
spectrogram_size, count_vals = self._parse_spectrogram_size(f)
print("Excitation waveform shape: ", excit_wfm.shape)
print("spectrogram size:", spectrogram_size)
num_pixels = parm_dict['grid_num_rows'] * parm_dict['grid_num_cols']
print('Number of pixels: ', num_pixels)
print('Count Values: ', count_vals)
#if (num_pixels + 1) != count_vals:
# print("Data size does not match number of pixels expected. Cannot continue")
#Find how many channels we have to make
num_ai_chans = num_dat_files // 2 # Division by 2 due to real/imaginary
# Now start creating datasets and populating:
#Start with getting an h5 file
h5_file = h5py.File(self.h5_path)
#First create a measurement group
h5_meas_group = create_indexed_group(h5_file, 'Measurement')
#Set up some parameters that will be written as attributes to this Measurement group
global_parms = dict()
global_parms['data_type'] = 'trKPFM'
global_parms['translator'] = 'trKPFM'
write_simple_attrs(h5_meas_group, global_parms)
write_simple_attrs(h5_meas_group, parm_dict)
#Now start building the position and spectroscopic dimension containers
#There's only one spectroscpoic dimension and two position dimensions
#The excit_wfm only has the DC values without any information on cycles, time, etc.
#What we really need is to add the time component. For every DC step there are some time steps.
num_time_steps = (spectrogram_size-5) //excit_wfm.size //2 #Need to divide by 2 because it considers on and off field
#There should be three spectroscopic axes
#In order of fastest to slowest varying, we have
#time, voltage, field
time_vec = np.linspace(0, parm_dict['IO_time'], num_time_steps)
print('Num time steps: {}'.format(num_time_steps))
print('DC Vec size: {}'.format(excit_wfm.shape))
print('Spectrogram size: {}'.format(spectrogram_size))
field_vec = np.array([0,1])
spec_dims = [Dimension ('Time', 's', time_vec),Dimension('Field', 'Binary', field_vec),
Dimension('Bias', 'V', excit_wfm)]
pos_dims = [Dimension('Cols', 'm', int(parm_dict['grid_num_cols'])),
Dimension('Rows', 'm', int(parm_dict['grid_num_nums']))]
self.raw_datasets = list()
for chan_index in range(num_ai_chans):
chan_grp = create_indexed_group(h5_meas_group,'Channel')
if chan_index == 0:
write_simple_attrs(chan_grp,{'Harmonic': 1})
else:
write_simple_attrs(chan_grp,{'Harmonic': 2})
h5_raw = write_main_dataset(chan_grp, # parent HDF5 group
(num_pixels, spectrogram_size - 5),
# shape of Main dataset
'Raw_Data', # Name of main dataset
'Deflection', # Physical quantity contained in Main dataset
'V', # Units for the physical quantity
pos_dims, # Position dimensions
spec_dims, # Spectroscopic dimensions
dtype=np.complex64, # data type / precision
compression='gzip',
chunks=(1, spectrogram_size - 5),
main_dset_attrs={'quantity': 'Complex'})
#h5_refs = hdf.write(chan_grp, print_log=False)
#h5_raw = get_h5_obj_refs(['Raw_Data'], h5_refs)[0]
#link_h5_objects_as_attrs(h5_raw, get_h5_obj_refs(aux_ds_names, h5_refs))
self.raw_datasets.append(h5_raw)
self.raw_datasets.append(h5_raw)
# Now that the N channels have been made, populate them with the actual data....
self._read_data(parm_dict, parm_path, spectrogram_size)
h5_file.file.close()
#hdf.close()
return self.h5_path
def _read_data(self, parm_dict, parm_path, data_length):
"""
Reads raw data and populates the h5 datasets
Parameters
----------
parm_dict : Dictionary
dictionary containing parameters for this data
folder_path : string / unicode
Absolute path of folder containing the data
"""
# Determine number of pixels
num_pixels = parm_dict['grid_num_rows'] * parm_dict['grid_num_cols']
# The four files in TRKPFM are for real and imaginary parts for 1st, 2nd harmonic
# Create a list of [True,False,True,False] so files can be written to
# the appropraite channel
real_imag = np.zeros(shape=(len(self.file_list), 1))
real_imag[::2] = 1
real_cond = []
for entry in real_imag:
if entry > 0:
real_cond.append(True)
else:
real_cond.append(False)
# Scan through all the .dat files available
for ifile, file_path in enumerate(self.file_list):
f = open(file_path, 'rb')
results_p = self.read_file(data_length, f)
spectrogram_matrix = np.array(results_p[:])
b_axis = spectrogram_matrix.shape[2]
c_axis = spectrogram_matrix.shape[1]
# dall = np.transpose(spectrogram_matrix, (0, 2, 1)).reshape(num_pixels * c_axis, b_axis)
dall = np.transpose(spectrogram_matrix, (0, 2, 1)).reshape(-1, b_axis)
_, ia, ic = np.unique(dall, axis=0, return_index=True, return_inverse=True)
reprowind = np.setdiff1d(ic, ia)
if len(reprowind > 0):
dall[reprowind, :] = np.nan
# Write to the datasets
h5_main = self.raw_datasets[ifile]
if real_cond[ifile]:
print('Dall Size is: ', dall.shape)
# Do some error catching. In case the last pixel is absent, then just ignore it.
try:
h5_main[:, :] = dall.reshape(h5_main.shape) + 1j * 0
except ValueError:
h5_main[:-1, :] = dall.reshape(h5_main.shape[0] - 1, h5_main.shape[1]) + 1j * 0
else:
# Error catching. In case the last pixel is absent, then just ignore it.
try:
h5_main[:, :] += 0 + 1j * dall.reshape(h5_main.shape)
except ValueError:
h5_main[:-1, :] += 0 + 1j * dall.reshape(h5_main.shape[0] - 1, h5_main.shape[1])
h5_main.file.flush()
@staticmethod
def read_file(data_length, f):
start_point = 0
count = 0
count_vals = []
f.seek(start_point * 4, 0)
cont_cond = True
results_p = []
while cont_cond:
count_vals.append(count)
count += 1
data_vec = np.fromfile(f, dtype=np.float32, count=int(data_length))
data_vec1 = data_vec[5:int(data_length)]
if len(data_vec) > 1:
s1 = data_vec[3]
s2 = data_vec[4]
# print('Data_mat and s1,s2:', data_vec1.shape, s1, s2)
data_mat1 = data_vec1.reshape(int(s2), int(s1)).T
results_p.append(data_mat1)
else:
cont_cond = False
f.close()
return results_p
@staticmethod
def _read_parms(parm_path):
"""
Copies experimental parameters from the .mat file to a dictionary
Parameters
----------
parm_path : string / unicode
Absolute path of the parameters file
Returns
-------
parm_dict : dictionary
Dictionary containing all relevant parameters
excit_wfm : 1d numpy float array
Excitation waveform containing the full DC amplitude vector
"""
h5_f = loadmat(parm_path)
parm_dict = dict()
parm_dict['IO_samp_rate_[Hz]'] = np.uint32(h5_f['IO_rate'][0][0])
parm_dict['IO_time'] = np.float32(h5_f['IO_time'][0][0])
excit_wfm = np.float32(np.squeeze(h5_f['dc_amp_vec']))
parm_dict['grid_num_rows'] = np.int(h5_f['num_rows'][0][0])
parm_dict['grid_num_cols'] = np.int(h5_f['num_cols'][0][0])
return parm_dict, excit_wfm
|
import json
import random
from utils import RangedNum, RangedInt, RangedJSONEncoder, flip_coin
from builder.network_builder import Network
import os
import math
import copy
import logging
class CandidateNN:
# ---------------------- Static class attributes ----------------------
OPTIMIZER_CHOICES = ('AdamOptimizer', 'AdadeltaOptimizer', 'AdagradOptimizer',
'FtrlOptimizer', 'ProximalGradientDescentOptimizer', 'ProximalAdagradOptimizer',
'RMSPropOptimizer', 'GradientDescentOptimizer')
ACTIVATION_CHOICES = ('relu', 'relu6', 'sigmoid', 'tanh', 'elu', 'softplus', 'softsign')
LAYER_TYPES = ("conv_layer", "maxpool_layer", "feedforward_layer")
ACCURACY_WEIGHT = 20
LAYER_CNT_WEIGHT = 2
WEIGHTS_CNT_WEIGHT = 0.1
OPTIMIZING_PARMS = {
'conv_layer':{
('filter','height'):{
'min': 1,
'max': 5,
'type': RangedInt
},
('filter','width'):{
'min': 1,
'max': 5,
'type': RangedInt
},
('filter','outchannel'):{
'min': 1,
'max': 64,
'type': RangedInt
},
('strides', 'x'): {
'min': 1,
'max': 2,
'type': RangedInt
},
('strides', 'y'): {
'min': 1,
'max': 2,
'type': RangedInt
},
('strides','inchannels'): {
'min': 1,
'max': 1,
'type': RangedInt
},
('strides', 'batch'): {
'min': 1,
'max': 1,
'type': RangedInt
}
},
'maxpool_layer':{
('kernel', 'height'): {
'min': 1,
'max': 5,
'type': RangedInt
},
('kernel', 'width'): {
'min': 1,
'max': 5,
'type': RangedInt
},
('kernel', 'outchannels'): {
'min': 1,
'max': 1,
'type': RangedInt
},
('strides', 'x'): {
'min': 1,
'max': 5,
'type': RangedInt
},
('strides', 'y'): {
'min': 1,
'max': 5,
'type': RangedInt
},
('strides', 'inchannels'): {
'min': 1,
'max': 1,
'type': RangedInt
},
('strides', 'batch'): {
'min': 1,
'max': 1,
'type': RangedInt
}
},
'feedforward_layer': {
('size'): {
'min': 256,
'max': 2048,
'type': RangedInt
}
}
}
def __init__(self, candidate_id, start_time_str, runtime_spec, network_spec=None):
self.runtime_spec = copy.deepcopy(runtime_spec)
self._base_logdir = os.path.join(self.runtime_spec['logdir'], str(start_time_str))
self._candidate_id = candidate_id
self._fitness = None
if network_spec is None:
network_spec = self._create_random_network()
self.network_spec = network_spec
def to_next_generation(self, generation):
"""Transfer the candidate to the next generation."""
generation_dir = 'generation_{}/'.format(generation)
id_dir = '{}/'.format(self._candidate_id)
self.runtime_spec['logdir'] = os.path.join(self._base_logdir, generation_dir, id_dir)
self.network_spec.update(self.runtime_spec)
def crossover(self, crossover_parms, other_candidate):
self._fitness = None
if crossover_parms['strategy'] == 'uniform_crossover':
self._crossover_uniform(crossover_rate=crossover_parms['rate'],
other_candidate=other_candidate,
uniform_method=crossover_parms['uniform_method'])
else:
raise ValueError('not implemented crossover strategy')
def _crossover_uniform2(self, crossover_rate, other_candidate, uniform_method):
"""Performs a unifrom Crossover between two Candidates"""
if uniform_method == 'swap':
min_layers = min(len(self.network_spec['layers']), len(other_candidate.network_spec['layers']))
for layer_idx, layer in enumerate(self.network_spec['layers'][:min_layers]):
layer_dict = layer
other_layer_dict = other_candidate.network_spec['layers'][layer_idx]
# Cross whole layer
if flip_coin(crossover_rate / 5):
tmp = copy.deepcopy(other_layer_dict)
other_candidate.network_spec['layers'][layer_idx] = copy.deepcopy(layer)
self.network_spec['layers'][layer_idx] = tmp
else:
if ('activation_function' in layer_dict and
'activation_function' in other_layer_dict and
flip_coin(crossover_rate)):
layer_dict['activation_function'] = other_layer_dict['activation_function']
if layer_dict['type'] == other_layer_dict['type']:
self._swap_values(layer_dict, other_layer_dict, crossover_rate)
else:
raise NotImplementedError('Not implemented uniform_crossover_method')
def _crossover_uniform(self, crossover_rate, other_candidate, uniform_method):
min_layers = min(len(self.network_spec['layers']), len(other_candidate.network_spec['layers']))
num_layer_crossover = max(1, int(min_layers * crossover_rate))
for swap_idx in range(num_layer_crossover):
layer_idx1 = random.randint(0, len(self.network_spec['layers']) - 1)
layer_idx2 = random.randint(0, len(other_candidate.network_spec['layers']) - 1)
# If type is the same
if self.network_spec['layers'][layer_idx1]['type'] == other_candidate.network_spec['layers'][layer_idx2][
'type']:
# Make complete or parm cross
if flip_coin(): # Cross complete layer with lower probability
logging.info("crossing:sameType:layer")
tmp = self.network_spec['layers'][layer_idx1]
self.network_spec['layers'][layer_idx1] = other_candidate.network_spec['layers'][layer_idx2]
other_candidate.network_spec['layers'][layer_idx2] = tmp
else: # Same Type and cross elementwise
logging.info("crossing:sameType:parms")
self._swap_values(self.network_spec['layers'][layer_idx1],
other_candidate.network_spec['layers'][layer_idx2], crossover_rate)
# Cross activation functino
if ('activation_function' in self.network_spec['layers'][layer_idx1]
and 'activation_function' in other_candidate.network_spec['layers'][layer_idx2]
and flip_coin(crossover_rate)):
self.network_spec['layers'][layer_idx1]['activation_function'] \
= other_candidate.network_spec['layers'][layer_idx2]['activation_function']
else: # not the same, swap layer
logging.info("crossing:layer")
tmp = self.network_spec['layers'][layer_idx1]
self.network_spec['layers'][layer_idx1] = other_candidate.network_spec['layers'][layer_idx2]
other_candidate.network_spec['layers'][layer_idx2] = tmp
def _swap_values(self, dict, other_dict, rate):
"""Swaps Properties between two Layers of the same type with Propapility rate"""
for parm in self.OPTIMIZING_PARMS[dict['type']]:
if flip_coin(rate):
parm_h = parm['parms']['hierarchy']
if len(parm_h) == 1:
# Save old own
tmp = dict[parm_h[0]]
# own in other
dict[parm_h[0]] = \
other_dict[parm_h[0]]
# saved in own
other_dict[parm_h[0]] = tmp
elif len(parm_h) == 2:
# Save old own
tmp = dict[parm_h[0]][parm_h[1]]
# own in other
dict[parm_h[0]][parm_h[1]] = \
other_dict[parm_h[0]][parm_h[1]]
# saved in own
other_dict[parm_h[0]][parm_h[1]] = tmp
elif len(parm_h) == 3:
# Save old own
tmp = dict[parm_h[0]][parm_h[1]][parm_h[2]]
# own in other
dict[parm_h[0]][parm_h[1]][parm_h[2]] = \
other_dict[parm_h[0]][parm_h[1]][parm_h[2]]
# saved in own
other_dict[parm_h[0]][parm_h[1]][parm_h[2]] = tmp
else:
raise ValueError('length of hierarchy must 1,2 or 3')
def mutation(self, mutation_rate):
# TODO: Check the mutation of a layer and the mutation of properties, layer mutation can hide value mutation
"""
Mutate properties(layer-structure and layer-values of a Candidate)
"""
self._fitness = None
# Determine whether to change number of layers.
if flip_coin(mutation_rate):
if flip_coin():
if len(self.network_spec['layers']) < self.runtime_spec['max_layer']:
# Get random index for insertion.
insertion_idx = random.randint(0, len(self.network_spec['layers']))
# Add random layer.
self.network_spec['layers'].insert(insertion_idx, self._create_randomize_layer())
else:
# Get random index for deletion.
deletion_idx = random.randint(0, len(self.network_spec['layers']) - 1)
# Delete one of the layers.
del self.network_spec['layers'][deletion_idx]
# Mutate layer
for i, layer_spec in enumerate(self.network_spec['layers']):
# Mutate complete layer.
if flip_coin(mutation_rate / 10):
self.network_spec['layers'][i] = self._create_randomize_layer()
else:
# Only mutate Values if no new random layer
self._mutate_layer_values(layer_spec=self.network_spec['layers'][i], mutation_rate=mutation_rate)
def _mutate_layer_values(self, layer_spec, mutation_rate):
"""
Mutate each value of a layer with a probability of `mutation_rate`.
"""
if flip_coin(mutation_rate):
layer_spec['activation_function'] = random.choice(self.ACTIVATION_CHOICES)
for parms in self.OPTIMIZING_PARMS[layer_spec['type']]:
if parms['parms']['max'] != parms['parms']['min']:
parm_h = parms['parms']['hierarchy']
variance = (parms['parms']['max'] - parms['parms']['min']) / 2
if variance == 0:
variance = 1
if parms['parms']['type'] == 'int':
variance = int(variance)
if len(parm_h) == 1:
layer_spec[parm_h[0]] = self._mutation_value_strategy(
old_value=layer_spec[parm_h[0]],
variance=variance)
elif len(parm_h) == 2:
layer_spec[parm_h[0]][parm_h[1]] = self._mutation_value_strategy(
old_value=layer_spec[parm_h[0]][parm_h[1]],
variance=variance)
elif len(parm_h) == 3:
layer_spec[parm_h[0]][parm_h[1]][parm_h[2]] = self._mutation_value_strategy(
old_value=layer_spec[parm_h[0]][parm_h[1]][parm_h[2]],
variance=variance)
else:
raise ValueError('length of hierarchy must 1,2 or 3')
def _mutation_value_strategy(self, old_value, variance):
""" sub/add a number between -variance and variance"""
return old_value + old_value.__class__(-variance, variance).value
def get_diversity(self, other_candidate):
div = 0
div += abs(len(self.network_spec['layers']) - len(other_candidate.network_spec['layers']))
min_layers = min(len(self.network_spec['layers']), len(other_candidate.network_spec['layers']))
for layer_idx, layer in enumerate(self.network_spec['layers'][:min_layers]):
layer_dict = layer
other_layer_dict = other_candidate.network_spec['layers'][layer_idx]
if layer_dict['type'] == other_layer_dict['type']:
# make deeper compare
mutable_parms = 0
div_parms = 0
for parms in self.OPTIMIZING_PARMS[layer_dict['type']]:
if parms['parms']['max'] == parms['parms']['min']: # don't check on not mutable parms
break
mutable_parms += 1
parm_h = parms['parms']['hierarchy']
if len(parm_h) == 1:
if layer_dict[parm_h[0]] != other_layer_dict[parm_h[0]]:
div_parms += 1
elif len(parm_h) == 2:
if layer_dict[parm_h[0]][parm_h[1]] != other_layer_dict[parm_h[0]][parm_h[1]]:
div_parms += 1
elif len(parm_h) == 3:
if layer_dict[parm_h[0]][parm_h[1]][parm_h[2]] != other_layer_dict[parm_h[0]][parm_h[1]][parm_h[2]]:
div_parms += 1
else:
raise ValueError('length of hierarchy must 1,2 or 3')
div += (div_parms / mutable_parms)
else:
div += 1
max_layers = max(len(self.network_spec['layers']), len(other_candidate.network_spec['layers']))
return div / max_layers
def get_fitness(self, ):
"""Get fitness of the candidate. If not yet tested, test the fitness based on the network specificaton."""
if self._fitness is None:
network = Network(self._serialize_network_spec())
extended_spec_json = network.evaluate()
extended_spec = json.loads(extended_spec_json)
result_spec = extended_spec['results']
print(result_spec)
del network
if self.runtime_spec['fitness_strategy'] == 'accuracy':
self._fitness = self._fitness_function_accuracy(result_spec, self.runtime_spec['fitness_power'])
elif self.runtime_spec['fitness_strategy'] == 's1':
self._fitness = self._fitness_function_s1(result_spec)
else:
raise ValueError('fitnesss strategy {} is not implemented.'.format(self.runtime_spec['fitness_strategy']))
return self._fitness
def _fitness_function_accuracy(self, results, power=1):
return results['accuracy'] ** power
def _fitness_function_s1(self, results):
"""Calculate the fitness based on the network evaluation."""
# TODO: get the number of weights as penalty?
return 1 / (- self.ACCURACY_WEIGHT * math.log(results['accuracy'])
+ self.LAYER_CNT_WEIGHT * len(self.network_spec['layers'])
+ self.WEIGHTS_CNT_WEIGHT * results['n_weights'])
def _create_random_network(self):
"""Construct a random network specification."""
# Finalize runtime specification.
layer_cnt = RangedInt(1, self.runtime_spec['max_layer'])
network_spec = {
'hyperparameters': {
'learningrate': RangedNum(1e-4, 1e-3),
'optimizer': random.choice(self.OPTIMIZER_CHOICES),
'batchsize': 100 # Fixed batch size for comparability.
},
'layers': []
}
cnt_layer_conv = RangedInt(0, layer_cnt.value)
cnt_layer_max_pool = RangedInt(0, layer_cnt.value - cnt_layer_conv.value)
cnt_layer_ff = layer_cnt.value - cnt_layer_conv.value - cnt_layer_max_pool.value
layer_types = ['conv_layer' for _ in range(cnt_layer_conv.value)]
layer_types += ['maxpool_layer' for _ in range(cnt_layer_max_pool.value)]
random.shuffle(layer_types)
layer_types += ['feedforward_layer' for _ in range(cnt_layer_ff)]
for layer_type in layer_types:
layer_spec = self._create_randomize_layer(layer_type=layer_type)
# layer_spec = self._generate_network_layer(type=layer_type)
# Add layer to the network spec.
network_spec['layers'].append(layer_spec)
return network_spec
def _create_randomize_layer(self, layer_type=None):
"""
Create a layer based on layer_type
"""
if layer_type is None:
layer_type = random.choice(self.LAYER_TYPES)
if layer_type == 'conv_layer':
layer_spec = self._create_conv_layer()
elif layer_type == 'maxpool_layer':
layer_spec = self._create_maxpool_layer()
elif layer_type == 'feedforward_layer':
layer_spec = self._create_ff_layer()
else:
raise ValueError('Invalid layer type {}'.format(layer_type))
return layer_spec
def _create_ff_layer(self):
"""
Create dict for a random initialized Feedforward network
:return:
"""
layer = {
'type': 'feedforward_layer',
'size': self.OPTIMIZING_PARMS['feedforward_layer']['type'](
self.OPTIMIZING_PARMS['feedforward_layer']['size']['min'],
self.OPTIMIZING_PARMS['feedforward_layer']['size']['max']),
'activation_function': random.choice(self.ACTIVATION_CHOICES)
}
return layer
def _create_conv_layer(self):
"""
Create dict for a random initialized convolutional Layer
"""
layer = {
'type': 'conv_layer',
'filter': {
'height': self.OPTIMIZING_PARMS['conv_layer']['filter','height']['type'](
self.OPTIMIZING_PARMS['conv_layer']['filter','height']['min'],
self.OPTIMIZING_PARMS['conv_layer']['filter','height']['max']),
'width': self.OPTIMIZING_PARMS['conv_layer']['filter','width']['type'](
self.OPTIMIZING_PARMS['conv_layer']['filter','width']['min'],
self.OPTIMIZING_PARMS['conv_layer']['filter','width']['max']),
'outchannels': self.OPTIMIZING_PARMS['conv_layer']['filter','outchannels']['type'](
self.OPTIMIZING_PARMS['conv_layer']['filter','outchannels']['min'],
self.OPTIMIZING_PARMS['conv_layer']['filter','outchannels']['max'])
},
'strides': {
'x': self.OPTIMIZING_PARMS['conv_layer']['strides','x']['type'](
self.OPTIMIZING_PARMS['conv_layer']['strides','x']['min'],
self.OPTIMIZING_PARMS['conv_layer']['strides','x']['max']),
'y': self.OPTIMIZING_PARMS['conv_layer']['strides','y']['type'](
self.OPTIMIZING_PARMS['conv_layer']['strides','y']['min'],
self.OPTIMIZING_PARMS['conv_layer']['strides','y']['max']),
'inchannels': 1, # Must be 1. See https://www.tensorflow.org/api_docs/python/tf/nn/conv2d
'batch': 1
},
'activation_function': random.choice(self.ACTIVATION_CHOICES)
}
return layer
def _create_maxpool_layer(self):
"""
Create dict for a random initialized Maxpool-layer
"""
layer = {
'type': 'maxpool_layer',
'kernel': {
'height': self.OPTIMIZING_PARMS['maxpool_layer']['kernel','height']['type'](
self.OPTIMIZING_PARMS['maxpool_layer']['kernel','height']['min'],
self.OPTIMIZING_PARMS['maxpool_layer']['kernel','height']['max']),
'width': self.OPTIMIZING_PARMS['maxpool_layer']['kernel','width']['type'](
self.OPTIMIZING_PARMS['maxpool_layer']['kernel','width']['min'],
self.OPTIMIZING_PARMS['maxpool_layer']['kernel','width']['max']),
'outchannels': 1,
},
'strides': {
'y': self.OPTIMIZING_PARMS['maxpool_layer']['strides','y']['type'](
self.OPTIMIZING_PARMS['maxpool_layer']['strides','y']['min'],
self.OPTIMIZING_PARMS['maxpool_layer']['strides','y']['max']),
'x': self.OPTIMIZING_PARMS['maxpool_layer']['strides','x']['type'](
self.OPTIMIZING_PARMS['maxpool_layer']['strides','x']['min'],
self.OPTIMIZING_PARMS['maxpool_layer']['strides','x']['max']),
'inchannels': 1,
# Must probably be 1 as well. See https://www.tensorflow.org/api_docs/python/tf/nn/conv2d
'batch': 1
}
}
return layer
def _serialize_network_spec(self):
return RangedJSONEncoder().encode(self.network_spec)
OPTIMIZING_PARMS optimiert
import json
import random
from utils import RangedNum, RangedInt, RangedJSONEncoder, flip_coin
from builder.network_builder import Network
import os
import math
import copy
import logging
class CandidateNN:
# ---------------------- Static class attributes ----------------------
OPTIMIZER_CHOICES = ('AdamOptimizer', 'AdadeltaOptimizer', 'AdagradOptimizer',
'FtrlOptimizer', 'ProximalGradientDescentOptimizer', 'ProximalAdagradOptimizer',
'RMSPropOptimizer', 'GradientDescentOptimizer')
ACTIVATION_CHOICES = ('relu', 'relu6', 'sigmoid', 'tanh', 'elu', 'softplus', 'softsign')
LAYER_TYPES = ("conv_layer", "maxpool_layer", "feedforward_layer")
ACCURACY_WEIGHT = 20
LAYER_CNT_WEIGHT = 2
WEIGHTS_CNT_WEIGHT = 0.1
OPTIMIZING_PARMS = {
'conv_layer':{
('filter','height'):{
'min': 1,
'max': 5,
'type': RangedInt
},
('filter','width'):{
'min': 1,
'max': 5,
'type': RangedInt
},
('filter','outchannel'):{
'min': 1,
'max': 64,
'type': RangedInt
},
('strides', 'x'): {
'min': 1,
'max': 2,
'type': RangedInt
},
('strides', 'y'): {
'min': 1,
'max': 2,
'type': RangedInt
},
('strides','inchannels'): {
'min': 1,
'max': 1,
'type': RangedInt
},
('strides', 'batch'): {
'min': 1,
'max': 1,
'type': RangedInt
}
},
'maxpool_layer':{
('kernel', 'height'): {
'min': 1,
'max': 5,
'type': RangedInt
},
('kernel', 'width'): {
'min': 1,
'max': 5,
'type': RangedInt
},
('kernel', 'outchannels'): {
'min': 1,
'max': 1,
'type': RangedInt
},
('strides', 'x'): {
'min': 1,
'max': 5,
'type': RangedInt
},
('strides', 'y'): {
'min': 1,
'max': 5,
'type': RangedInt
},
('strides', 'inchannels'): {
'min': 1,
'max': 1,
'type': RangedInt
},
('strides', 'batch'): {
'min': 1,
'max': 1,
'type': RangedInt
}
},
'feedforward_layer': {
('size'): {
'min': 256,
'max': 2048,
'type': RangedInt
}
}
}
def __init__(self, candidate_id, start_time_str, runtime_spec, network_spec=None):
self.runtime_spec = copy.deepcopy(runtime_spec)
self._base_logdir = os.path.join(self.runtime_spec['logdir'], str(start_time_str))
self._candidate_id = candidate_id
self._fitness = None
if network_spec is None:
network_spec = self._create_random_network()
self.network_spec = network_spec
def to_next_generation(self, generation):
"""Transfer the candidate to the next generation."""
generation_dir = 'generation_{}/'.format(generation)
id_dir = '{}/'.format(self._candidate_id)
self.runtime_spec['logdir'] = os.path.join(self._base_logdir, generation_dir, id_dir)
self.network_spec.update(self.runtime_spec)
def crossover(self, crossover_parms, other_candidate):
self._fitness = None
if crossover_parms['strategy'] == 'uniform_crossover':
self._crossover_uniform(crossover_rate=crossover_parms['rate'],
other_candidate=other_candidate,
uniform_method=crossover_parms['uniform_method'])
else:
raise ValueError('not implemented crossover strategy')
def _crossover_uniform(self, crossover_rate, other_candidate, uniform_method):
min_layers = min(len(self.network_spec['layers']), len(other_candidate.network_spec['layers']))
num_layer_crossover = max(1, int(min_layers * crossover_rate))
for swap_idx in range(num_layer_crossover):
layer_idx1 = random.randint(0, len(self.network_spec['layers']) - 1)
layer_idx2 = random.randint(0, len(other_candidate.network_spec['layers']) - 1)
# If type is the same
if self.network_spec['layers'][layer_idx1]['type'] == other_candidate.network_spec['layers'][layer_idx2][
'type']:
# Make complete or parm cross
if flip_coin():
logging.info("crossing:sameType:layer")
tmp = self.network_spec['layers'][layer_idx1]
self.network_spec['layers'][layer_idx1] = other_candidate.network_spec['layers'][layer_idx2]
other_candidate.network_spec['layers'][layer_idx2] = tmp
else: # Same Type and cross elementwise
logging.info("crossing:sameType:parms")
self._swap_values(self.network_spec['layers'][layer_idx1],
other_candidate.network_spec['layers'][layer_idx2], crossover_rate)
# Cross activation functino
if ('activation_function' in self.network_spec['layers'][layer_idx1]
and 'activation_function' in other_candidate.network_spec['layers'][layer_idx2]
and flip_coin(crossover_rate)):
self.network_spec['layers'][layer_idx1]['activation_function'] \
= other_candidate.network_spec['layers'][layer_idx2]['activation_function']
else: # not the same, swap layer
logging.info("crossing:layer")
tmp = self.network_spec['layers'][layer_idx1]
self.network_spec['layers'][layer_idx1] = other_candidate.network_spec['layers'][layer_idx2]
other_candidate.network_spec['layers'][layer_idx2] = tmp
def _swap_values(self, dict, other_dict, rate):
"""Swaps Properties between two Layers of the same type with Propapility rate"""
for idx, layer_parm in enumerate(self.OPTIMIZING_PARMS[dict['type']]):
if flip_coin(rate):
if len(layer_parm) == 1:
# Save old own
tmp = dict[layer_parm[0]]
# own in other
dict[layer_parm[0]] = other_dict[layer_parm[0]]
# saved in own
other_dict[layer_parm[0]] = tmp
elif len(layer_parm) == 2:
# Save old own
tmp = dict[layer_parm[0]][layer_parm[1]]
# own in other
dict[layer_parm[0]][layer_parm[1]] = other_dict[layer_parm[0]][layer_parm[1]]
# saved in own
other_dict[layer_parm[0]][layer_parm[1]] = tmp
else:
raise ValueError('length of hierarchy must 1 or 2')
def mutation(self, mutation_rate):
# TODO: Check the mutation of a layer and the mutation of properties, layer mutation can hide value mutation
"""
Mutate properties(layer-structure and layer-values of a Candidate)
"""
self._fitness = None
# Determine whether to change number of layers.
if flip_coin(mutation_rate):
if flip_coin():
if len(self.network_spec['layers']) < self.runtime_spec['max_layer']:
# Get random index for insertion.
insertion_idx = random.randint(0, len(self.network_spec['layers']))
# Add random layer.
self.network_spec['layers'].insert(insertion_idx, self._create_randomize_layer())
else:
# Get random index for deletion.
deletion_idx = random.randint(0, len(self.network_spec['layers']) - 1)
# Delete one of the layers.
del self.network_spec['layers'][deletion_idx]
# Mutate layer
for i, layer_spec in enumerate(self.network_spec['layers']):
# Mutate complete layer.
if flip_coin(mutation_rate / 10):
self.network_spec['layers'][i] = self._create_randomize_layer()
else:
# Only mutate Values if no new random layer
self._mutate_layer_values(layer_spec=self.network_spec['layers'][i], mutation_rate=mutation_rate)
def _mutate_layer_values(self, layer_spec, mutation_rate):
"""
Mutate each value of a layer with a probability of `mutation_rate`.
"""
if flip_coin(mutation_rate):
layer_spec['activation_function'] = random.choice(self.ACTIVATION_CHOICES)
for idx, layer_parm in enumerate(self.OPTIMIZING_PARMS[layer_spec['type']]):
if layer_parm['max'] != layer_parm['min']:
variance = (layer_parm['max'] - layer_parm['min']) / 2
if variance == 0:
variance = 1
if layer_parm['type'] is RangedInt:
variance = round(variance,0)
if len(layer_parm) == 1:
layer_spec[layer_parm[0]] = self._mutation_value_strategy(
old_value=layer_spec[layer_parm[0]],
variance=variance)
elif len(layer_parm) == 2:
layer_spec[layer_parm[0]][layer_parm[1]] = self._mutation_value_strategy(
old_value=layer_spec[layer_parm[0]][layer_parm[1]],
variance=variance)
else:
raise ValueError('length of hierarchy must 1 or 2')
def _mutation_value_strategy(self, old_value, variance):
""" sub/add a number between -variance and variance"""
return old_value + old_value.__class__(-variance, variance).value
def get_diversity(self, other_candidate):
div = 0
div += abs(len(self.network_spec['layers']) - len(other_candidate.network_spec['layers']))
min_layers = min(len(self.network_spec['layers']), len(other_candidate.network_spec['layers']))
for layer_idx, layer in enumerate(self.network_spec['layers'][:min_layers]):
layer_dict = layer
other_layer_dict = other_candidate.network_spec['layers'][layer_idx]
if layer_dict['type'] == other_layer_dict['type']:
# make deeper compare
mutable_parms = 0
div_parms = 0
for idx, layer_parm in enumerate(self.OPTIMIZING_PARMS[layer_dict['type']]):
if layer_parm['max'] == layer_parm['min']: # don't check on not mutable parms
break
mutable_parms += 1
if len(layer_parm) == 1:
if layer_dict[layer_parm[0]] != other_layer_dict[layer_parm[0]]:
div_parms += 1
elif len(layer_parm) == 2:
if layer_dict[layer_parm[0]][layer_parm[1]] != other_layer_dict[layer_parm[0]][layer_parm[1]]:
div_parms += 1
else:
raise ValueError('length of hierarchy must 1 or 2')
div += (div_parms / mutable_parms)
else:
div += 1
max_layers = max(len(self.network_spec['layers']), len(other_candidate.network_spec['layers']))
return div / max_layers
def get_fitness(self, ):
"""Get fitness of the candidate. If not yet tested, test the fitness based on the network specificaton."""
if self._fitness is None:
network = Network(self._serialize_network_spec())
extended_spec_json = network.evaluate()
extended_spec = json.loads(extended_spec_json)
result_spec = extended_spec['results']
print(result_spec)
del network
if self.runtime_spec['fitness_strategy'] == 'accuracy':
self._fitness = self._fitness_function_accuracy(result_spec, self.runtime_spec['fitness_power'])
elif self.runtime_spec['fitness_strategy'] == 's1':
self._fitness = self._fitness_function_s1(result_spec)
else:
raise ValueError('fitnesss strategy {} is not implemented.'.format(self.runtime_spec['fitness_strategy']))
return self._fitness
def _fitness_function_accuracy(self, results, power=1):
return results['accuracy'] ** power
def _fitness_function_s1(self, results):
"""Calculate the fitness based on the network evaluation."""
# TODO: get the number of weights as penalty?
return 1 / (- self.ACCURACY_WEIGHT * math.log(results['accuracy'])
+ self.LAYER_CNT_WEIGHT * len(self.network_spec['layers'])
+ self.WEIGHTS_CNT_WEIGHT * results['n_weights'])
def _create_random_network(self):
"""Construct a random network specification."""
# Finalize runtime specification.
layer_cnt = RangedInt(1, self.runtime_spec['max_layer'])
network_spec = {
'hyperparameters': {
'learningrate': RangedNum(1e-4, 1e-3),
'optimizer': random.choice(self.OPTIMIZER_CHOICES),
'batchsize': 100 # Fixed batch size for comparability.
},
'layers': []
}
cnt_layer_conv = RangedInt(0, layer_cnt.value)
cnt_layer_max_pool = RangedInt(0, layer_cnt.value - cnt_layer_conv.value)
cnt_layer_ff = layer_cnt.value - cnt_layer_conv.value - cnt_layer_max_pool.value
layer_types = ['conv_layer' for _ in range(cnt_layer_conv.value)]
layer_types += ['maxpool_layer' for _ in range(cnt_layer_max_pool.value)]
random.shuffle(layer_types)
layer_types += ['feedforward_layer' for _ in range(cnt_layer_ff)]
for layer_type in layer_types:
layer_spec = self._create_randomize_layer(layer_type=layer_type)
# layer_spec = self._generate_network_layer(type=layer_type)
# Add layer to the network spec.
network_spec['layers'].append(layer_spec)
return network_spec
def _create_randomize_layer(self, layer_type=None):
"""
Create a layer based on layer_type
"""
if layer_type is None:
layer_type = random.choice(self.LAYER_TYPES)
if layer_type == 'conv_layer':
layer_spec = self._create_conv_layer()
elif layer_type == 'maxpool_layer':
layer_spec = self._create_maxpool_layer()
elif layer_type == 'feedforward_layer':
layer_spec = self._create_ff_layer()
else:
raise ValueError('Invalid layer type {}'.format(layer_type))
return layer_spec
def _create_ff_layer(self):
"""
Create dict for a random initialized Feedforward network
:return:
"""
layer = {
'type': 'feedforward_layer',
'size': self.OPTIMIZING_PARMS['feedforward_layer']['type'](
self.OPTIMIZING_PARMS['feedforward_layer']['size']['min'],
self.OPTIMIZING_PARMS['feedforward_layer']['size']['max']),
'activation_function': random.choice(self.ACTIVATION_CHOICES)
}
return layer
def _create_conv_layer(self):
"""
Create dict for a random initialized convolutional Layer
"""
layer = {
'type': 'conv_layer',
'filter': {
'height': self.OPTIMIZING_PARMS['conv_layer']['filter','height']['type'](
self.OPTIMIZING_PARMS['conv_layer']['filter','height']['min'],
self.OPTIMIZING_PARMS['conv_layer']['filter','height']['max']),
'width': self.OPTIMIZING_PARMS['conv_layer']['filter','width']['type'](
self.OPTIMIZING_PARMS['conv_layer']['filter','width']['min'],
self.OPTIMIZING_PARMS['conv_layer']['filter','width']['max']),
'outchannels': self.OPTIMIZING_PARMS['conv_layer']['filter','outchannels']['type'](
self.OPTIMIZING_PARMS['conv_layer']['filter','outchannels']['min'],
self.OPTIMIZING_PARMS['conv_layer']['filter','outchannels']['max'])
},
'strides': {
'x': self.OPTIMIZING_PARMS['conv_layer']['strides','x']['type'](
self.OPTIMIZING_PARMS['conv_layer']['strides','x']['min'],
self.OPTIMIZING_PARMS['conv_layer']['strides','x']['max']),
'y': self.OPTIMIZING_PARMS['conv_layer']['strides','y']['type'](
self.OPTIMIZING_PARMS['conv_layer']['strides','y']['min'],
self.OPTIMIZING_PARMS['conv_layer']['strides','y']['max']),
'inchannels': 1, # Must be 1. See https://www.tensorflow.org/api_docs/python/tf/nn/conv2d
'batch': 1
},
'activation_function': random.choice(self.ACTIVATION_CHOICES)
}
return layer
def _create_maxpool_layer(self):
"""
Create dict for a random initialized Maxpool-layer
"""
layer = {
'type': 'maxpool_layer',
'kernel': {
'height': self.OPTIMIZING_PARMS['maxpool_layer']['kernel','height']['type'](
self.OPTIMIZING_PARMS['maxpool_layer']['kernel','height']['min'],
self.OPTIMIZING_PARMS['maxpool_layer']['kernel','height']['max']),
'width': self.OPTIMIZING_PARMS['maxpool_layer']['kernel','width']['type'](
self.OPTIMIZING_PARMS['maxpool_layer']['kernel','width']['min'],
self.OPTIMIZING_PARMS['maxpool_layer']['kernel','width']['max']),
'outchannels': 1,
},
'strides': {
'y': self.OPTIMIZING_PARMS['maxpool_layer']['strides','y']['type'](
self.OPTIMIZING_PARMS['maxpool_layer']['strides','y']['min'],
self.OPTIMIZING_PARMS['maxpool_layer']['strides','y']['max']),
'x': self.OPTIMIZING_PARMS['maxpool_layer']['strides','x']['type'](
self.OPTIMIZING_PARMS['maxpool_layer']['strides','x']['min'],
self.OPTIMIZING_PARMS['maxpool_layer']['strides','x']['max']),
'inchannels': 1,
# Must probably be 1 as well. See https://www.tensorflow.org/api_docs/python/tf/nn/conv2d
'batch': 1
}
}
return layer
def _serialize_network_spec(self):
return RangedJSONEncoder().encode(self.network_spec)
|
# -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
version_info = (1, 0, 23)
__version__ = ".".join(map(str, version_info))
bump version
# -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
version_info = (1, 0, 24)
__version__ = ".".join(map(str, version_info))
|
# -*- coding: utf-8 -*-
# Copyright 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import random
import time
import attr
from netaddr import IPAddress
from zope.interface import implementer
from twisted.internet import defer
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
from twisted.internet.interfaces import IStreamClientEndpoint
from twisted.web.client import URI, Agent, HTTPConnectionPool, RedirectAgent, readBody
from twisted.web.http import stringToDatetime
from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent
from synapse.http.federation.srv_resolver import SrvResolver, pick_server_from_list
from synapse.util.caches.ttlcache import TTLCache
from synapse.util.logcontext import make_deferred_yieldable
# period to cache .well-known results for by default
WELL_KNOWN_DEFAULT_CACHE_PERIOD = 24 * 3600
# jitter to add to the .well-known default cache ttl
WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 10 * 60
# period to cache failure to fetch .well-known for
WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600
# cap for .well-known cache period
WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600
logger = logging.getLogger(__name__)
well_known_cache = TTLCache('well-known')
@implementer(IAgent)
class MatrixFederationAgent(object):
"""An Agent-like thing which provides a `request` method which will look up a matrix
server and send an HTTP request to it.
Doesn't implement any retries. (Those are done in MatrixFederationHttpClient.)
Args:
reactor (IReactor): twisted reactor to use for underlying requests
tls_client_options_factory (ClientTLSOptionsFactory|None):
factory to use for fetching client tls options, or none to disable TLS.
_well_known_tls_policy (IPolicyForHTTPS|None):
TLS policy to use for fetching .well-known files. None to use a default
(browser-like) implementation.
srv_resolver (SrvResolver|None):
SRVResolver impl to use for looking up SRV records. None to use a default
implementation.
"""
def __init__(
self, reactor, tls_client_options_factory,
_well_known_tls_policy=None,
_srv_resolver=None,
_well_known_cache=well_known_cache,
):
self._reactor = reactor
self._tls_client_options_factory = tls_client_options_factory
if _srv_resolver is None:
_srv_resolver = SrvResolver()
self._srv_resolver = _srv_resolver
self._pool = HTTPConnectionPool(reactor)
self._pool.retryAutomatically = False
self._pool.maxPersistentPerHost = 5
self._pool.cachedConnectionTimeout = 2 * 60
agent_args = {}
if _well_known_tls_policy is not None:
# the param is called 'contextFactory', but actually passing a
# contextfactory is deprecated, and it expects an IPolicyForHTTPS.
agent_args['contextFactory'] = _well_known_tls_policy
_well_known_agent = RedirectAgent(
Agent(self._reactor, pool=self._pool, **agent_args),
)
self._well_known_agent = _well_known_agent
self._well_known_cache = _well_known_cache
@defer.inlineCallbacks
def request(self, method, uri, headers=None, bodyProducer=None):
"""
Args:
method (bytes): HTTP method: GET/POST/etc
uri (bytes): Absolute URI to be retrieved
headers (twisted.web.http_headers.Headers|None):
HTTP headers to send with the request, or None to
send no extra headers.
bodyProducer (twisted.web.iweb.IBodyProducer|None):
An object which can generate bytes to make up the
body of this request (for example, the properly encoded contents of
a file for a file upload). Or None if the request is to have
no body.
Returns:
Deferred[twisted.web.iweb.IResponse]:
fires when the header of the response has been received (regardless of the
response status code). Fails if there is any problem which prevents that
response from being received (including problems that prevent the request
from being sent).
"""
parsed_uri = URI.fromBytes(uri, defaultPort=-1)
res = yield self._route_matrix_uri(parsed_uri)
# set up the TLS connection params
#
# XXX disabling TLS is really only supported here for the benefit of the
# unit tests. We should make the UTs cope with TLS rather than having to make
# the code support the unit tests.
if self._tls_client_options_factory is None:
tls_options = None
else:
tls_options = self._tls_client_options_factory.get_options(
res.tls_server_name.decode("ascii")
)
# make sure that the Host header is set correctly
if headers is None:
headers = Headers()
else:
headers = headers.copy()
if not headers.hasHeader(b'host'):
headers.addRawHeader(b'host', res.host_header)
class EndpointFactory(object):
@staticmethod
def endpointForURI(_uri):
ep = LoggingHostnameEndpoint(
self._reactor, res.target_host, res.target_port,
)
if tls_options is not None:
ep = wrapClientTLS(tls_options, ep)
return ep
agent = Agent.usingEndpointFactory(self._reactor, EndpointFactory(), self._pool)
res = yield make_deferred_yieldable(
agent.request(method, uri, headers, bodyProducer)
)
defer.returnValue(res)
@defer.inlineCallbacks
def _route_matrix_uri(self, parsed_uri, lookup_well_known=True):
"""Helper for `request`: determine the routing for a Matrix URI
Args:
parsed_uri (twisted.web.client.URI): uri to route. Note that it should be
parsed with URI.fromBytes(uri, defaultPort=-1) to set the `port` to -1
if there is no explicit port given.
lookup_well_known (bool): True if we should look up the .well-known file if
there is no SRV record.
Returns:
Deferred[_RoutingResult]
"""
# check for an IP literal
try:
ip_address = IPAddress(parsed_uri.host.decode("ascii"))
except Exception:
# not an IP address
ip_address = None
if ip_address:
port = parsed_uri.port
if port == -1:
port = 8448
defer.returnValue(_RoutingResult(
host_header=parsed_uri.netloc,
tls_server_name=parsed_uri.host,
target_host=parsed_uri.host,
target_port=port,
))
if parsed_uri.port != -1:
# there is an explicit port
defer.returnValue(_RoutingResult(
host_header=parsed_uri.netloc,
tls_server_name=parsed_uri.host,
target_host=parsed_uri.host,
target_port=parsed_uri.port,
))
if lookup_well_known:
# try a .well-known lookup
well_known_server = yield self._get_well_known(parsed_uri.host)
if well_known_server:
# if we found a .well-known, start again, but don't do another
# .well-known lookup.
# parse the server name in the .well-known response into host/port.
# (This code is lifted from twisted.web.client.URI.fromBytes).
if b':' in well_known_server:
well_known_host, well_known_port = well_known_server.rsplit(b':', 1)
try:
well_known_port = int(well_known_port)
except ValueError:
# the part after the colon could not be parsed as an int
# - we assume it is an IPv6 literal with no port (the closing
# ']' stops it being parsed as an int)
well_known_host, well_known_port = well_known_server, -1
else:
well_known_host, well_known_port = well_known_server, -1
new_uri = URI(
scheme=parsed_uri.scheme,
netloc=well_known_server,
host=well_known_host,
port=well_known_port,
path=parsed_uri.path,
params=parsed_uri.params,
query=parsed_uri.query,
fragment=parsed_uri.fragment,
)
res = yield self._route_matrix_uri(new_uri, lookup_well_known=False)
defer.returnValue(res)
# try a SRV lookup
service_name = b"_matrix._tcp.%s" % (parsed_uri.host,)
server_list = yield self._srv_resolver.resolve_service(service_name)
if not server_list:
target_host = parsed_uri.host
port = 8448
logger.debug(
"No SRV record for %s, using %s:%i",
parsed_uri.host.decode("ascii"), target_host.decode("ascii"), port,
)
else:
target_host, port = pick_server_from_list(server_list)
logger.debug(
"Picked %s:%i from SRV records for %s",
target_host.decode("ascii"), port, parsed_uri.host.decode("ascii"),
)
defer.returnValue(_RoutingResult(
host_header=parsed_uri.netloc,
tls_server_name=parsed_uri.host,
target_host=target_host,
target_port=port,
))
@defer.inlineCallbacks
def _get_well_known(self, server_name):
"""Attempt to fetch and parse a .well-known file for the given server
Args:
server_name (bytes): name of the server, from the requested url
Returns:
Deferred[bytes|None]: either the new server name, from the .well-known, or
None if there was no .well-known file.
"""
try:
cached = self._well_known_cache[server_name]
defer.returnValue(cached)
except KeyError:
pass
# TODO: should we linearise so that we don't end up doing two .well-known requests
# for the same server in parallel?
uri = b"https://%s/.well-known/matrix/server" % (server_name, )
uri_str = uri.decode("ascii")
logger.info("Fetching %s", uri_str)
try:
response = yield make_deferred_yieldable(
self._well_known_agent.request(b"GET", uri),
)
body = yield make_deferred_yieldable(readBody(response))
if response.code != 200:
raise Exception("Non-200 response %s" % (response.code, ))
except Exception as e:
logger.info("Error fetching %s: %s", uri_str, e)
# add some randomness to the TTL to avoid a stampeding herd every hour
# after startup
cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD
cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
self._well_known_cache.set(server_name, None, cache_period)
defer.returnValue(None)
try:
parsed_body = json.loads(body.decode('utf-8'))
logger.info("Response from .well-known: %s", parsed_body)
if not isinstance(parsed_body, dict):
raise Exception("not a dict")
if "m.server" not in parsed_body:
raise Exception("Missing key 'm.server'")
except Exception as e:
raise Exception("invalid .well-known response from %s: %s" % (uri_str, e,))
result = parsed_body["m.server"].encode("ascii")
cache_period = _cache_period_from_headers(
response.headers,
time_now=self._reactor.seconds,
)
if cache_period is None:
cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD
# add some randomness to the TTL to avoid a stampeding herd every 24 hours
# after startup
cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
else:
cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD)
if cache_period > 0:
self._well_known_cache.set(server_name, result, cache_period)
defer.returnValue(result)
@implementer(IStreamClientEndpoint)
class LoggingHostnameEndpoint(object):
"""A wrapper for HostnameEndpint which logs when it connects"""
def __init__(self, reactor, host, port, *args, **kwargs):
self.host = host
self.port = port
self.ep = HostnameEndpoint(reactor, host, port, *args, **kwargs)
def connect(self, protocol_factory):
logger.info("Connecting to %s:%i", self.host, self.port)
return self.ep.connect(protocol_factory)
def _cache_period_from_headers(headers, time_now=time.time):
cache_controls = _parse_cache_control(headers)
if b'no-store' in cache_controls:
return 0
if b'max-age' in cache_controls:
try:
max_age = int(cache_controls[b'max-age'])
return max_age
except ValueError:
pass
expires = headers.getRawHeaders(b'expires')
if expires is not None:
try:
expires_date = stringToDatetime(expires[-1])
return expires_date - time_now()
except ValueError:
# RFC7234 says 'A cache recipient MUST interpret invalid date formats,
# especially the value "0", as representing a time in the past (i.e.,
# "already expired").
return 0
return None
def _parse_cache_control(headers):
cache_controls = {}
for hdr in headers.getRawHeaders(b'cache-control', []):
for directive in hdr.split(b','):
splits = [x.strip() for x in directive.split(b'=', 1)]
k = splits[0].lower()
v = splits[1] if len(splits) > 1 else None
cache_controls[k] = v
return cache_controls
@attr.s
class _RoutingResult(object):
"""The result returned by `_route_matrix_uri`.
Contains the parameters needed to direct a federation connection to a particular
server.
Where a SRV record points to several servers, this object contains a single server
chosen from the list.
"""
host_header = attr.ib()
"""
The value we should assign to the Host header (host:port from the matrix
URI, or .well-known).
:type: bytes
"""
tls_server_name = attr.ib()
"""
The server name we should set in the SNI (typically host, without port, from the
matrix URI or .well-known)
:type: bytes
"""
target_host = attr.ib()
"""
The hostname (or IP literal) we should route the TCP connection to (the target of the
SRV record, or the hostname from the URL/.well-known)
:type: bytes
"""
target_port = attr.ib()
"""
The port we should route the TCP connection to (the target of the SRV record, or
the port from the URL/.well-known, or 8448)
:type: int
"""
Cache failures to parse .well-known
Also add a Measure block around the .well-known fetch
# -*- coding: utf-8 -*-
# Copyright 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import random
import time
import attr
from netaddr import IPAddress
from zope.interface import implementer
from twisted.internet import defer
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
from twisted.internet.interfaces import IStreamClientEndpoint
from twisted.web.client import URI, Agent, HTTPConnectionPool, RedirectAgent, readBody
from twisted.web.http import stringToDatetime
from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent
from synapse.http.federation.srv_resolver import SrvResolver, pick_server_from_list
from synapse.util import Clock
from synapse.util.caches.ttlcache import TTLCache
from synapse.util.logcontext import make_deferred_yieldable
from synapse.util.metrics import Measure
# period to cache .well-known results for by default
WELL_KNOWN_DEFAULT_CACHE_PERIOD = 24 * 3600
# jitter to add to the .well-known default cache ttl
WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 10 * 60
# period to cache failure to fetch .well-known for
WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600
# cap for .well-known cache period
WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600
# magic value to mark an invalid well-known
INVALID_WELL_KNOWN = object()
logger = logging.getLogger(__name__)
well_known_cache = TTLCache('well-known')
@implementer(IAgent)
class MatrixFederationAgent(object):
"""An Agent-like thing which provides a `request` method which will look up a matrix
server and send an HTTP request to it.
Doesn't implement any retries. (Those are done in MatrixFederationHttpClient.)
Args:
reactor (IReactor): twisted reactor to use for underlying requests
tls_client_options_factory (ClientTLSOptionsFactory|None):
factory to use for fetching client tls options, or none to disable TLS.
_well_known_tls_policy (IPolicyForHTTPS|None):
TLS policy to use for fetching .well-known files. None to use a default
(browser-like) implementation.
srv_resolver (SrvResolver|None):
SRVResolver impl to use for looking up SRV records. None to use a default
implementation.
"""
def __init__(
self, reactor, tls_client_options_factory,
_well_known_tls_policy=None,
_srv_resolver=None,
_well_known_cache=well_known_cache,
):
self._reactor = reactor
self._clock = Clock(reactor)
self._tls_client_options_factory = tls_client_options_factory
if _srv_resolver is None:
_srv_resolver = SrvResolver()
self._srv_resolver = _srv_resolver
self._pool = HTTPConnectionPool(reactor)
self._pool.retryAutomatically = False
self._pool.maxPersistentPerHost = 5
self._pool.cachedConnectionTimeout = 2 * 60
agent_args = {}
if _well_known_tls_policy is not None:
# the param is called 'contextFactory', but actually passing a
# contextfactory is deprecated, and it expects an IPolicyForHTTPS.
agent_args['contextFactory'] = _well_known_tls_policy
_well_known_agent = RedirectAgent(
Agent(self._reactor, pool=self._pool, **agent_args),
)
self._well_known_agent = _well_known_agent
# our cache of .well-known lookup results, mapping from server name
# to delegated name. The values can be:
# `bytes`: a valid server-name
# `None`: there is no .well-known here
# INVALID_WELL_KNWOWN: the .well-known here is invalid
self._well_known_cache = _well_known_cache
@defer.inlineCallbacks
def request(self, method, uri, headers=None, bodyProducer=None):
"""
Args:
method (bytes): HTTP method: GET/POST/etc
uri (bytes): Absolute URI to be retrieved
headers (twisted.web.http_headers.Headers|None):
HTTP headers to send with the request, or None to
send no extra headers.
bodyProducer (twisted.web.iweb.IBodyProducer|None):
An object which can generate bytes to make up the
body of this request (for example, the properly encoded contents of
a file for a file upload). Or None if the request is to have
no body.
Returns:
Deferred[twisted.web.iweb.IResponse]:
fires when the header of the response has been received (regardless of the
response status code). Fails if there is any problem which prevents that
response from being received (including problems that prevent the request
from being sent).
"""
parsed_uri = URI.fromBytes(uri, defaultPort=-1)
res = yield self._route_matrix_uri(parsed_uri)
# set up the TLS connection params
#
# XXX disabling TLS is really only supported here for the benefit of the
# unit tests. We should make the UTs cope with TLS rather than having to make
# the code support the unit tests.
if self._tls_client_options_factory is None:
tls_options = None
else:
tls_options = self._tls_client_options_factory.get_options(
res.tls_server_name.decode("ascii")
)
# make sure that the Host header is set correctly
if headers is None:
headers = Headers()
else:
headers = headers.copy()
if not headers.hasHeader(b'host'):
headers.addRawHeader(b'host', res.host_header)
class EndpointFactory(object):
@staticmethod
def endpointForURI(_uri):
ep = LoggingHostnameEndpoint(
self._reactor, res.target_host, res.target_port,
)
if tls_options is not None:
ep = wrapClientTLS(tls_options, ep)
return ep
agent = Agent.usingEndpointFactory(self._reactor, EndpointFactory(), self._pool)
res = yield make_deferred_yieldable(
agent.request(method, uri, headers, bodyProducer)
)
defer.returnValue(res)
@defer.inlineCallbacks
def _route_matrix_uri(self, parsed_uri, lookup_well_known=True):
"""Helper for `request`: determine the routing for a Matrix URI
Args:
parsed_uri (twisted.web.client.URI): uri to route. Note that it should be
parsed with URI.fromBytes(uri, defaultPort=-1) to set the `port` to -1
if there is no explicit port given.
lookup_well_known (bool): True if we should look up the .well-known file if
there is no SRV record.
Returns:
Deferred[_RoutingResult]
"""
# check for an IP literal
try:
ip_address = IPAddress(parsed_uri.host.decode("ascii"))
except Exception:
# not an IP address
ip_address = None
if ip_address:
port = parsed_uri.port
if port == -1:
port = 8448
defer.returnValue(_RoutingResult(
host_header=parsed_uri.netloc,
tls_server_name=parsed_uri.host,
target_host=parsed_uri.host,
target_port=port,
))
if parsed_uri.port != -1:
# there is an explicit port
defer.returnValue(_RoutingResult(
host_header=parsed_uri.netloc,
tls_server_name=parsed_uri.host,
target_host=parsed_uri.host,
target_port=parsed_uri.port,
))
if lookup_well_known:
# try a .well-known lookup
well_known_server = yield self._get_well_known(parsed_uri.host)
if well_known_server:
# if we found a .well-known, start again, but don't do another
# .well-known lookup.
# parse the server name in the .well-known response into host/port.
# (This code is lifted from twisted.web.client.URI.fromBytes).
if b':' in well_known_server:
well_known_host, well_known_port = well_known_server.rsplit(b':', 1)
try:
well_known_port = int(well_known_port)
except ValueError:
# the part after the colon could not be parsed as an int
# - we assume it is an IPv6 literal with no port (the closing
# ']' stops it being parsed as an int)
well_known_host, well_known_port = well_known_server, -1
else:
well_known_host, well_known_port = well_known_server, -1
new_uri = URI(
scheme=parsed_uri.scheme,
netloc=well_known_server,
host=well_known_host,
port=well_known_port,
path=parsed_uri.path,
params=parsed_uri.params,
query=parsed_uri.query,
fragment=parsed_uri.fragment,
)
res = yield self._route_matrix_uri(new_uri, lookup_well_known=False)
defer.returnValue(res)
# try a SRV lookup
service_name = b"_matrix._tcp.%s" % (parsed_uri.host,)
server_list = yield self._srv_resolver.resolve_service(service_name)
if not server_list:
target_host = parsed_uri.host
port = 8448
logger.debug(
"No SRV record for %s, using %s:%i",
parsed_uri.host.decode("ascii"), target_host.decode("ascii"), port,
)
else:
target_host, port = pick_server_from_list(server_list)
logger.debug(
"Picked %s:%i from SRV records for %s",
target_host.decode("ascii"), port, parsed_uri.host.decode("ascii"),
)
defer.returnValue(_RoutingResult(
host_header=parsed_uri.netloc,
tls_server_name=parsed_uri.host,
target_host=target_host,
target_port=port,
))
@defer.inlineCallbacks
def _get_well_known(self, server_name):
"""Attempt to fetch and parse a .well-known file for the given server
Args:
server_name (bytes): name of the server, from the requested url
Returns:
Deferred[bytes|None]: either the new server name, from the .well-known, or
None if there was no .well-known file.
"""
try:
result = self._well_known_cache[server_name]
except KeyError:
# TODO: should we linearise so that we don't end up doing two .well-known
# requests for the same server in parallel?
with Measure(self._clock, "get_well_known"):
result, cache_period = yield self._do_get_well_known(server_name)
if cache_period > 0:
self._well_known_cache.set(server_name, result, cache_period)
if result == INVALID_WELL_KNOWN:
raise Exception("invalid .well-known on this server")
defer.returnValue(result)
@defer.inlineCallbacks
def _do_get_well_known(self, server_name):
"""Actually fetch and parse a .well-known, without checking the cache
Args:
server_name (bytes): name of the server, from the requested url
Returns:
Deferred[Tuple[bytes|None|object],int]:
result, cache period, where result is one of:
- the new server name from the .well-known (as a `bytes`)
- None if there was no .well-known file.
- INVALID_WELL_KNOWN if the .well-known was invalid
"""
uri = b"https://%s/.well-known/matrix/server" % (server_name, )
uri_str = uri.decode("ascii")
logger.info("Fetching %s", uri_str)
try:
response = yield make_deferred_yieldable(
self._well_known_agent.request(b"GET", uri),
)
body = yield make_deferred_yieldable(readBody(response))
if response.code != 200:
raise Exception("Non-200 response %s" % (response.code, ))
except Exception as e:
logger.info("Error fetching %s: %s", uri_str, e)
# add some randomness to the TTL to avoid a stampeding herd every hour
# after startup
cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD
cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
defer.returnValue((None, cache_period))
try:
parsed_body = json.loads(body.decode('utf-8'))
logger.info("Response from .well-known: %s", parsed_body)
if not isinstance(parsed_body, dict):
raise Exception("not a dict")
if "m.server" not in parsed_body:
raise Exception("Missing key 'm.server'")
except Exception as e:
logger.info("invalid .well-known response from %s: %s", uri_str, e)
cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD
cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
defer.returnValue((INVALID_WELL_KNOWN, cache_period))
result = parsed_body["m.server"].encode("ascii")
cache_period = _cache_period_from_headers(
response.headers,
time_now=self._reactor.seconds,
)
if cache_period is None:
cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD
# add some randomness to the TTL to avoid a stampeding herd every 24 hours
# after startup
cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
else:
cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD)
defer.returnValue((result, cache_period))
@implementer(IStreamClientEndpoint)
class LoggingHostnameEndpoint(object):
"""A wrapper for HostnameEndpint which logs when it connects"""
def __init__(self, reactor, host, port, *args, **kwargs):
self.host = host
self.port = port
self.ep = HostnameEndpoint(reactor, host, port, *args, **kwargs)
def connect(self, protocol_factory):
logger.info("Connecting to %s:%i", self.host, self.port)
return self.ep.connect(protocol_factory)
def _cache_period_from_headers(headers, time_now=time.time):
cache_controls = _parse_cache_control(headers)
if b'no-store' in cache_controls:
return 0
if b'max-age' in cache_controls:
try:
max_age = int(cache_controls[b'max-age'])
return max_age
except ValueError:
pass
expires = headers.getRawHeaders(b'expires')
if expires is not None:
try:
expires_date = stringToDatetime(expires[-1])
return expires_date - time_now()
except ValueError:
# RFC7234 says 'A cache recipient MUST interpret invalid date formats,
# especially the value "0", as representing a time in the past (i.e.,
# "already expired").
return 0
return None
def _parse_cache_control(headers):
cache_controls = {}
for hdr in headers.getRawHeaders(b'cache-control', []):
for directive in hdr.split(b','):
splits = [x.strip() for x in directive.split(b'=', 1)]
k = splits[0].lower()
v = splits[1] if len(splits) > 1 else None
cache_controls[k] = v
return cache_controls
@attr.s
class _RoutingResult(object):
"""The result returned by `_route_matrix_uri`.
Contains the parameters needed to direct a federation connection to a particular
server.
Where a SRV record points to several servers, this object contains a single server
chosen from the list.
"""
host_header = attr.ib()
"""
The value we should assign to the Host header (host:port from the matrix
URI, or .well-known).
:type: bytes
"""
tls_server_name = attr.ib()
"""
The server name we should set in the SNI (typically host, without port, from the
matrix URI or .well-known)
:type: bytes
"""
target_host = attr.ib()
"""
The hostname (or IP literal) we should route the TCP connection to (the target of the
SRV record, or the hostname from the URL/.well-known)
:type: bytes
"""
target_port = attr.ib()
"""
The port we should route the TCP connection to (the target of the SRV record, or
the port from the URL/.well-known, or 8448)
:type: int
"""
|
from __future__ import print_function, absolute_import
import contextlib
from functools import partial
import itertools
from Qt import QtWidgets
from Qt.QtCore import Qt
from pymel.core import cmds
from ... import add
from . import cardRigging
'''
class Cell(QtWidgets.QTableWidgetItem):
def __init__(self, label='', checked=None):
QtWidgets.QTableWidgetItem.__init__(self, label)
self.setFlags( Qt.ItemIsEnabled | Qt.ItemIsSelectable )
if checked is not None:
self.setCheckState(Qt.Checked if checked else Qt.Unchecked)
'''
class Label(QtWidgets.QTableWidgetItem):
def __init__(self, label='', checked=None):
QtWidgets.QTableWidgetItem.__init__(self, label)
self.setFlags( Qt.ItemIsEnabled ) # | Qt.ItemIsSelectable )
#if checked is not None:
# self.setCheckState(Qt.Checked if checked else Qt.Unchecked)
class NOT_FOUND:
pass
class CardParams(QtWidgets.QTableWidget):
def __init__(self, *args, **kwargs):
QtWidgets.QTableWidget.__init__(self, *args, **kwargs)
self._disabled = False
self._prevState = []
self.currentCard = None
self.params = []
self.paramSceneOptions = {}
def clearContents(self):
try:
self.cellChanged.disconnect(self.dataChange)
except:
pass
QtWidgets.QTableWidget.clearContents(self)
@contextlib.contextmanager
def disableChangeCallback(self):
'''
Wrap programmatic changes to the ui to prevent it from updating the object.
'''
self._prevState.append( self._disabled )
self._disabled = True
yield
self._disabled = self._prevState.pop()
def dataChange(self, row, col):
'''
Callback for when data changes.
'''
if self._disabled or not self.params:
return
rigData = self.card.rigData
params = rigData.setdefault('ikParams', {})
#newVal = self.item(row, col).text()
param = self.params[row]
if param.type == cardRigging.ParamInfo.BOOL:
params[param.kwargName] = bool(self.cellWidget(row, col).state() == Qt.Checked)
elif param.type == cardRigging.ParamInfo.INT:
try:
params[param.kwargName] = int( self.item(row, col).text() )
except Exception:
with self.disableChangeCallback():
self.item(row, col).setText( params.get(param.kwargName, param.default))
elif param.type == cardRigging.ParamInfo.FLOAT:
try:
params[param.kwargName] = float( self.item(row, col).text() )
except Exception:
with self.disableChangeCallback():
self.item(row, col).setText( params.get(param.kwargName, param.default))
elif param.type == cardRigging.ParamInfo.ENUM:
params[param.kwargName] = param.enum.values[self.cellWidget(row, col).currentIndex()]
elif param.type == cardRigging.ParamInfo.STR:
params[param.kwargName] = self.item(row, col).text()
elif param.type == cardRigging.ParamInfo.NODE_0:
self.card.extraNode[0] = self.paramSceneOptions[param][self.cellWidget(row, col).currentIndex()].message
params[param.kwargName] = 'NODE_0'
self.card.rigData = rigData
def setInputField(self, card, row, param):
'''
Given a `Param`, build, and place, the correct ui element for the param's data type.
'''
self.params.append(param)
if param.type == param.BOOL:
checkBox = QtWidgets.QTableWidgetItem()
state = Qt.Checked if card.rigData.get('ikParams', {}).get(param.kwargName, param.default) else Qt.Unchecked
checkBox.setCheckState( state )
self.setItem( row, 1, checkBox )
elif param.type == param.INT:
self.setItem( row, 1, QtWidgets.QTableWidgetItem(str(0 if not card.rigData.get('ikParams', {}).get(param.kwargName, param.default) else param.default)) )
elif param.type == param.FLOAT:
self.setItem( row, 1, QtWidgets.QTableWidgetItem(str(0.0 if not card.rigData.get('ikParams', {}).get(param.kwargName, param.default) else param.default)) )
elif param.type == param.ENUM:
dropdown = QtWidgets.QComboBox()
dropdown.addItems(param.enum.keys())
#dropdown.currentIndexChanged.connect( partial(self.enumChange, param) )
#for key, val in param.enum.items():
# dropdown.addItem( key ).triggered.connect( partial(self.changeEnum, param.kwargName, val) )
self.setCellWidget(row, 1, dropdown)
try:
enumVal = param.enum.values().index( card.rigData.get('ikParams', {}).get(param.kwargName, param.default) )
dropdown.setCurrentIndex(enumVal)
dropdown.currentIndexChanged.connect( partial(self.enumChange, param=param) )
except:
print( 'oerror with', param.kwargName, param.default, card, row )
elif param.type == param.STR:
val = card.rigData.get('ikParams', {}).get(param.kwargName, param.default)
self.setItem( row, 1, QtWidgets.QTableWidgetItem(val) )
elif param.type in (param.CURVE, param.NODE_0): # Just accept curves, they are all I connect to
dropdown = QtWidgets.QComboBox()
# Get all the curve transforms under the skeletonBlueprint
curves = cmds.listRelatives( cmds.listRelatives('skeletonBlueprint', type='nurbsCurve', f=True, ad=True), p=True, f=True)
self.paramSceneOptions[param] = curves
dropdown.addItems( curves )
self.setCellWidget(row, 1, dropdown)
def enumChange(self, index, param):
rigData = self.card.rigData
key, val = param.enum.items()[index]
rigData.get('ikParams')[param.kwargName] = val
self.card.rigData = rigData
def addParams(self, card):
self.card = card
self.clearContents()
self.params = []
self.paramSceneOptions = {}
#cardSettings = cardRigging.ParamInfo.toDict( card.rigParams )
#cardSettings = card.rigData.get('ikParams', {})
metaControl = cardRigging.availableControlTypes[card.rigData['rigCmd']]
# &&& I'm looking at ik and fk args, but all the data is set to "ik", does fk have anything?
totalCount = len( metaControl.ikInput ) + len(metaControl.fkInput)
if totalCount == 0:
self.setRowCount( 1 )
self.setItem(0, 0, Label('No options'))
self.setItem(0, 1, Label(''))
return
self.setRowCount( totalCount )
# &&& I don't think there is a shared param #for kwargName, param in metaControl.shared.items() + metaControl.ikInput.items():
for row, (kwargName, param) in enumerate(itertools.chain(metaControl.ikInput.items(), metaControl.fkInput.items())):
#with columnLayout( p=self.controlSpecificParams, co=('both', 9)) as paramLayout:
# Param takes multiple input types
if isinstance( param, list ):
dropdown = QtWidgets.QComboBox()
dropdown.addItems( [p.name for p in param] )
self.setCellWidget(row, 0, dropdown)
value = card.rigData.get('ikKwargs', {}).get(kwargName, NOT_FOUND)
if value is not NOT_FOUND:
type = cardRigging.ParamInfo.determineDataType(value)
for i, p in enumerate(param):
if p.type == type:
dropdown.setCurrentIndex(i)
self.setInputField(card, row, p)
break
else:
self.setInputField(card, row, param[0])
else:
self.setInputField(card, row, param[0])
'''
menu = optionMenu(h=20, cc=alt.Callback(self.changeInputType, paramLayout, param, kwargName))
for p in param:
menuItem( l=p.name )
# Figure out which kind of input the existing data is if the card has the setting
if kwargName in cardSettings:
type = cardRigging.ParamInfo.determineDataType(cardSettings[kwargName])
for p in param:
if p.type == type:
menu.setValue(p.name)
p.buildUI(card)
break
else:
p.buildUI(card)
else:
param[0].buildUI(card)
'''
# Param only takes one data type
else:
self.setItem(row, 0, Label(param.name))
self.setInputField(card, row, param)
self.cellChanged.connect(self.dataChange)
def update(self, card):
if self.ui.cardParams.currentCard == card:
return
else:
self.ui.cardParams.currentCard = card
if card:
self.ui.cardName.setText( add.shortName(card) )
try:
rigClass = cardRigging.availableControlTypes[ card.rigData.get('rigCmd') ]
except Exception:
rigClass = None
if rigClass:
if rigClass.__doc__:
self.ui.cardDescription.setText(rigClass.__doc__)
else:
self.ui.cardName.setText( '<None selected>' )
if card and card.rigData.get('rigCmd') in cardRigging.availableControlTypes:
self.ui.cardType.setText( card.rigData['rigCmd'] )
metaControl = cardRigging.availableControlTypes[card.rigData['rigCmd']]
with self.ui.cardParams.disableChangeCallback():
if metaControl.ik or metaControl.fk:
self.ui.cardParams.addParams(card)
else:
self.ui.cardParams.clearContents()
else:
self.ui.cardParams.clearContents()
self.ui.cardParams.setRowCount( 1 )
self.ui.cardParams.setItem(0, 0, Label('No options'))
self.ui.cardParams.setItem(0, 1, Label(''))
Fixed boolean params
from __future__ import print_function, absolute_import
import contextlib
from functools import partial
import itertools
from Qt import QtWidgets
from Qt.QtCore import Qt
from pymel.core import cmds
from ... import add
from . import cardRigging
'''
class Cell(QtWidgets.QTableWidgetItem):
def __init__(self, label='', checked=None):
QtWidgets.QTableWidgetItem.__init__(self, label)
self.setFlags( Qt.ItemIsEnabled | Qt.ItemIsSelectable )
if checked is not None:
self.setCheckState(Qt.Checked if checked else Qt.Unchecked)
'''
class Label(QtWidgets.QTableWidgetItem):
def __init__(self, label='', checked=None):
QtWidgets.QTableWidgetItem.__init__(self, label)
self.setFlags( Qt.ItemIsEnabled ) # | Qt.ItemIsSelectable )
#if checked is not None:
# self.setCheckState(Qt.Checked if checked else Qt.Unchecked)
class NOT_FOUND:
pass
class CardParams(QtWidgets.QTableWidget):
def __init__(self, *args, **kwargs):
QtWidgets.QTableWidget.__init__(self, *args, **kwargs)
self._disabled = False
self._prevState = []
self.currentCard = None
self.params = []
self.paramSceneOptions = {}
def clearContents(self):
try:
self.cellChanged.disconnect(self.dataChange)
except:
pass
QtWidgets.QTableWidget.clearContents(self)
@contextlib.contextmanager
def disableChangeCallback(self):
'''
Wrap programmatic changes to the ui to prevent it from updating the object.
'''
self._prevState.append( self._disabled )
self._disabled = True
yield
self._disabled = self._prevState.pop()
def dataChange(self, row, col):
'''
Callback for when data changes.
'''
if self._disabled or not self.params:
return
rigData = self.card.rigData
params = rigData.setdefault('ikParams', {})
#newVal = self.item(row, col).text()
param = self.params[row]
if param.type == cardRigging.ParamInfo.BOOL:
params[param.kwargName] = bool(self.item(row, col).checkState() == Qt.Checked)
elif param.type == cardRigging.ParamInfo.INT:
try:
params[param.kwargName] = int( self.item(row, col).text() )
except Exception:
with self.disableChangeCallback():
self.item(row, col).setText( params.get(param.kwargName, param.default))
elif param.type == cardRigging.ParamInfo.FLOAT:
try:
params[param.kwargName] = float( self.item(row, col).text() )
except Exception:
with self.disableChangeCallback():
self.item(row, col).setText( params.get(param.kwargName, param.default))
elif param.type == cardRigging.ParamInfo.ENUM:
params[param.kwargName] = param.enum.values[self.cellWidget(row, col).currentIndex()]
elif param.type == cardRigging.ParamInfo.STR:
params[param.kwargName] = self.item(row, col).text()
elif param.type == cardRigging.ParamInfo.NODE_0:
self.card.extraNode[0] = self.paramSceneOptions[param][self.cellWidget(row, col).currentIndex()].message
params[param.kwargName] = 'NODE_0'
self.card.rigData = rigData
def setInputField(self, card, row, param):
'''
Given a `Param`, build, and place, the correct ui element for the param's data type.
'''
self.params.append(param)
if param.type == param.BOOL:
checkBox = QtWidgets.QTableWidgetItem()
state = Qt.Checked if card.rigData.get('ikParams', {}).get(param.kwargName, param.default) else Qt.Unchecked
checkBox.setCheckState( state )
self.setItem( row, 1, checkBox )
elif param.type == param.INT:
self.setItem( row, 1, QtWidgets.QTableWidgetItem(str(0 if not card.rigData.get('ikParams', {}).get(param.kwargName, param.default) else param.default)) )
elif param.type == param.FLOAT:
self.setItem( row, 1, QtWidgets.QTableWidgetItem(str(0.0 if not card.rigData.get('ikParams', {}).get(param.kwargName, param.default) else param.default)) )
elif param.type == param.ENUM:
dropdown = QtWidgets.QComboBox()
dropdown.addItems(param.enum.keys())
#dropdown.currentIndexChanged.connect( partial(self.enumChange, param) )
#for key, val in param.enum.items():
# dropdown.addItem( key ).triggered.connect( partial(self.changeEnum, param.kwargName, val) )
self.setCellWidget(row, 1, dropdown)
try:
enumVal = param.enum.values().index( card.rigData.get('ikParams', {}).get(param.kwargName, param.default) )
dropdown.setCurrentIndex(enumVal)
dropdown.currentIndexChanged.connect( partial(self.enumChange, param=param) )
except:
print( 'oerror with', param.kwargName, param.default, card, row )
elif param.type == param.STR:
val = card.rigData.get('ikParams', {}).get(param.kwargName, param.default)
self.setItem( row, 1, QtWidgets.QTableWidgetItem(val) )
elif param.type in (param.CURVE, param.NODE_0): # Just accept curves, they are all I connect to
dropdown = QtWidgets.QComboBox()
# Get all the curve transforms under the skeletonBlueprint
curves = cmds.listRelatives( cmds.listRelatives('skeletonBlueprint', type='nurbsCurve', f=True, ad=True), p=True, f=True)
self.paramSceneOptions[param] = curves
dropdown.addItems( curves )
self.setCellWidget(row, 1, dropdown)
def enumChange(self, index, param):
rigData = self.card.rigData
key, val = param.enum.items()[index]
rigData.get('ikParams')[param.kwargName] = val
self.card.rigData = rigData
def addParams(self, card):
self.card = card
self.clearContents()
self.params = []
self.paramSceneOptions = {}
#cardSettings = cardRigging.ParamInfo.toDict( card.rigParams )
#cardSettings = card.rigData.get('ikParams', {})
metaControl = cardRigging.availableControlTypes[card.rigData['rigCmd']]
# &&& I'm looking at ik and fk args, but all the data is set to "ik", does fk have anything?
totalCount = len( metaControl.ikInput ) + len(metaControl.fkInput)
if totalCount == 0:
self.setRowCount( 1 )
self.setItem(0, 0, Label('No options'))
self.setItem(0, 1, Label(''))
return
self.setRowCount( totalCount )
# &&& I don't think there is a shared param #for kwargName, param in metaControl.shared.items() + metaControl.ikInput.items():
for row, (kwargName, param) in enumerate(itertools.chain(metaControl.ikInput.items(), metaControl.fkInput.items())):
#with columnLayout( p=self.controlSpecificParams, co=('both', 9)) as paramLayout:
# Param takes multiple input types
if isinstance( param, list ):
dropdown = QtWidgets.QComboBox()
dropdown.addItems( [p.name for p in param] )
self.setCellWidget(row, 0, dropdown)
value = card.rigData.get('ikKwargs', {}).get(kwargName, NOT_FOUND)
if value is not NOT_FOUND:
type = cardRigging.ParamInfo.determineDataType(value)
for i, p in enumerate(param):
if p.type == type:
dropdown.setCurrentIndex(i)
self.setInputField(card, row, p)
break
else:
self.setInputField(card, row, param[0])
else:
self.setInputField(card, row, param[0])
'''
menu = optionMenu(h=20, cc=alt.Callback(self.changeInputType, paramLayout, param, kwargName))
for p in param:
menuItem( l=p.name )
# Figure out which kind of input the existing data is if the card has the setting
if kwargName in cardSettings:
type = cardRigging.ParamInfo.determineDataType(cardSettings[kwargName])
for p in param:
if p.type == type:
menu.setValue(p.name)
p.buildUI(card)
break
else:
p.buildUI(card)
else:
param[0].buildUI(card)
'''
# Param only takes one data type
else:
self.setItem(row, 0, Label(param.name))
self.setInputField(card, row, param)
self.cellChanged.connect(self.dataChange)
def update(self, card):
if self.ui.cardParams.currentCard == card:
return
else:
self.ui.cardParams.currentCard = card
if card:
self.ui.cardName.setText( add.shortName(card) )
try:
rigClass = cardRigging.availableControlTypes[ card.rigData.get('rigCmd') ]
except Exception:
rigClass = None
if rigClass:
if rigClass.__doc__:
self.ui.cardDescription.setText(rigClass.__doc__)
else:
self.ui.cardName.setText( '<None selected>' )
if card and card.rigData.get('rigCmd') in cardRigging.availableControlTypes:
self.ui.cardType.setText( card.rigData['rigCmd'] )
metaControl = cardRigging.availableControlTypes[card.rigData['rigCmd']]
with self.ui.cardParams.disableChangeCallback():
if metaControl.ik or metaControl.fk:
self.ui.cardParams.addParams(card)
else:
self.ui.cardParams.clearContents()
else:
self.ui.cardParams.clearContents()
self.ui.cardParams.setRowCount( 1 )
self.ui.cardParams.setItem(0, 0, Label('No options'))
self.ui.cardParams.setItem(0, 1, Label('')) |
from django.test import TestCase
from corehq.apps.accounting.bootstrap.config.testing import BOOTSTRAP_CONFIG_TESTING
from corehq.apps.accounting.models import SoftwarePlanEdition, FeatureType
from corehq.apps.accounting.tests.utils import DomainSubscriptionMixin
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.users.models import CommCareUser
from corehq.apps.users.util import format_username
from corehq.project_limits.rate_limiter import get_n_users_for_rate_limiting
class GetNUsersForRateLimitingTest(TestCase, DomainSubscriptionMixin):
def test_no_subscription(self):
domain = 'domain-no-subscription'
domain_obj = create_domain(domain)
self.addCleanup(domain_obj.delete)
get_n_users_for_rate_limiting.clear(domain)
self.assertEqual(get_n_users_for_rate_limiting(domain), 0)
self._set_n_users(domain, 1)
get_n_users_for_rate_limiting.clear(domain)
self.assertEqual(get_n_users_for_rate_limiting(domain), 1)
def test_with_subscription(self):
domain = 'domain-with-subscription'
def _setup():
domain_obj = create_domain(domain)
self.setup_subscription(domain_obj.name, SoftwarePlanEdition.ADVANCED)
self.addCleanup(lambda: self.teardown_subscription(domain))
self.addCleanup(domain_obj.delete)
assert CommCareUser.total_by_domain(domain, is_active=True) == 0
def _get_included_in_subscription():
n = (
BOOTSTRAP_CONFIG_TESTING[(SoftwarePlanEdition.ADVANCED, False, False)]
['feature_rates'][FeatureType.USER]['monthly_limit']
)
assert n == 8
return n
_setup()
# With no real users, it's the number of users in the subscription
get_n_users_for_rate_limiting.clear(domain)
self.assertEqual(get_n_users_for_rate_limiting(domain),
_get_included_in_subscription())
self._set_n_users(domain, 9)
# With more users than included in subscription, it's the number of users
get_n_users_for_rate_limiting.clear(domain)
self.assertEqual(get_n_users_for_rate_limiting(domain), 9)
def _set_n_users(self, domain, n_users):
start_n_users = CommCareUser.total_by_domain(domain, is_active=True)
assert n_users >= start_n_users, 'this helper can only add users'
for i in range(start_n_users, n_users):
user = CommCareUser.create(domain, format_username('user{}'.format(i), domain),
password='123')
user.is_active = True
user.save()
self.addCleanup(user.delete)
assert CommCareUser.total_by_domain(domain, is_active=True) == n_users
Add test for customer plans in get_n_users_for_rate_limiting
from django.test import TestCase
from corehq.apps.accounting.bootstrap.config.testing import BOOTSTRAP_CONFIG_TESTING
from corehq.apps.accounting.models import SoftwarePlanEdition, FeatureType, Subscription
from corehq.apps.accounting.tests.utils import DomainSubscriptionMixin
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.users.models import CommCareUser
from corehq.apps.users.util import format_username
from corehq.project_limits.rate_limiter import get_n_users_for_rate_limiting
class GetNUsersForRateLimitingTest(TestCase, DomainSubscriptionMixin):
def test_no_subscription(self):
domain = 'domain-no-subscription'
domain_obj = create_domain(domain)
self.addCleanup(domain_obj.delete)
self._assert_value_equals(domain, 0)
self._set_n_users(domain, 1)
self._assert_value_equals(domain, 1)
def test_with_subscription(self):
domain_1 = 'domain-with-subscription'
domain_2 = 'other-domain-in-same-customer-account'
def _setup(domain):
domain_obj = create_domain(domain)
self.setup_subscription(domain_obj.name, SoftwarePlanEdition.ADVANCED)
self.addCleanup(lambda: self.teardown_subscription(domain))
self.addCleanup(domain_obj.delete)
assert CommCareUser.total_by_domain(domain, is_active=True) == 0
def _get_included_in_subscription():
n = (
BOOTSTRAP_CONFIG_TESTING[(SoftwarePlanEdition.ADVANCED, False, False)]
['feature_rates'][FeatureType.USER]['monthly_limit']
)
assert n == 8
return n
def _link_domains(domain, other_domain):
plan_version = Subscription.get_active_subscription_by_domain(domain).plan_version
plan = plan_version.plan
plan.is_customer_software_plan = True
plan.save()
other_subscription = Subscription.get_active_subscription_by_domain(other_domain)
other_subscription.plan_version = plan_version
other_subscription.save()
_setup(domain_1)
# With no real users, it's the number of users in the subscription
self._assert_value_equals(domain_1, _get_included_in_subscription())
self._set_n_users(domain_1, 9)
# With more users than included in subscription, it's the number of users
self._assert_value_equals(domain_1, 9)
_setup(domain_2)
_link_domains(domain_1, domain_2)
# No change on the original domain
self._assert_value_equals(domain_1, 9)
# The new domain should get half the total included users for the shared account
self._assert_value_equals(domain_2, 4)
def _assert_value_equals(self, domain, value):
get_n_users_for_rate_limiting.clear(domain)
self.assertEqual(get_n_users_for_rate_limiting(domain), value)
def _set_n_users(self, domain, n_users):
start_n_users = CommCareUser.total_by_domain(domain, is_active=True)
assert n_users >= start_n_users, 'this helper can only add users'
for i in range(start_n_users, n_users):
user = CommCareUser.create(domain, format_username('user{}'.format(i), domain),
password='123')
user.is_active = True
user.save()
self.addCleanup(user.delete)
assert CommCareUser.total_by_domain(domain, is_active=True) == n_users
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Rupesh Tare <rupesht@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
Mock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
try:
import pytest
except ImportError as import_error:
pytest = None
# Import Salt Libs
import salt.modules.localemod as localemod
from salt.exceptions import CommandExecutionError
@skipIf(not pytest, False)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class LocalemodTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.localemod
'''
def setup_loader_modules(self):
return {localemod: {}}
def test_list_avail(self):
'''
Test for Lists available (compiled) locales
'''
with patch.dict(localemod.__salt__,
{'cmd.run': MagicMock(return_value='A\nB')}):
self.assertEqual(localemod.list_avail(), ['A', 'B'])
@patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl"))
@patch('salt.modules.localemod.__grains__', {'os_family': 'Ubuntu', 'osmajorrelease': 42})
@patch('salt.modules.localemod.HAS_DBUS', False)
@patch('salt.modules.localemod._parse_dbus_locale', MagicMock(return_value={'LANG': 'en_US.utf8'}))
@patch('salt.modules.localemod._localectl_status', MagicMock(return_value={'system_locale': {'LANG': 'de_DE.utf8'}}))
@patch('salt.utils.systemd.booted', MagicMock(return_value=True))
def test_get_locale_with_systemd_nodbus(self):
'''
Test getting current system locale with systemd but no dbus available.
:return:
'''
assert localemod.get_locale() == 'de_DE.utf8'
@patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl"))
@patch('salt.modules.localemod.__grains__', {'os_family': 'Ubuntu', 'osmajorrelease': 42})
@patch('salt.modules.localemod.HAS_DBUS', True)
@patch('salt.modules.localemod._parse_dbus_locale', MagicMock(return_value={'LANG': 'en_US.utf8'}))
@patch('salt.modules.localemod._localectl_status', MagicMock(return_value={'system_locale': {'LANG': 'de_DE.utf8'}}))
@patch('salt.utils.systemd.booted', MagicMock(return_value=True))
def test_get_locale_with_systemd_and_dbus(self):
'''
Test getting current system locale with systemd and dbus available.
:return:
'''
assert localemod.get_locale() == 'en_US.utf8'
@patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl"))
@patch('salt.modules.localemod.__grains__', {'os_family': 'Suse', 'osmajorrelease': 12})
@patch('salt.modules.localemod.HAS_DBUS', True)
@patch('salt.modules.localemod._parse_dbus_locale', MagicMock(return_value={'LANG': 'en_US.utf8'}))
@patch('salt.modules.localemod._localectl_status', MagicMock(return_value={'system_locale': {'LANG': 'de_DE.utf8'}}))
@patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()})
@patch('salt.utils.systemd.booted', MagicMock(return_value=True))
def test_get_locale_with_systemd_and_dbus_sle12(self):
'''
Test getting current system locale with systemd and dbus available on SLE12.
:return:
'''
localemod.get_locale()
assert localemod.__salt__['cmd.run'].call_args[0][0] == 'grep "^RC_LANG" /etc/sysconfig/language'
@patch('salt.utils.which', MagicMock(return_value=None))
@patch('salt.modules.localemod.__grains__', {'os_family': 'RedHat', 'osmajorrelease': 12})
@patch('salt.modules.localemod.HAS_DBUS', False)
@patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()})
@patch('salt.utils.systemd.booted', MagicMock(return_value=False))
def test_get_locale_with_no_systemd_redhat(self):
'''
Test getting current system locale with systemd and dbus available on RedHat.
:return:
'''
localemod.get_locale()
assert localemod.__salt__['cmd.run'].call_args[0][0] == 'grep "^LANG=" /etc/sysconfig/i18n'
@patch('salt.utils.which', MagicMock(return_value=None))
@patch('salt.modules.localemod.__grains__', {'os_family': 'Debian', 'osmajorrelease': 12})
@patch('salt.modules.localemod.HAS_DBUS', False)
@patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()})
@patch('salt.utils.systemd.booted', MagicMock(return_value=False))
def test_get_locale_with_no_systemd_debian(self):
'''
Test getting current system locale with systemd and dbus available on Debian.
:return:
'''
localemod.get_locale()
assert localemod.__salt__['cmd.run'].call_args[0][0] == 'grep "^LANG=" /etc/default/locale'
@patch('salt.utils.which', MagicMock(return_value=None))
@patch('salt.modules.localemod.__grains__', {'os_family': 'Gentoo', 'osmajorrelease': 12})
@patch('salt.modules.localemod.HAS_DBUS', False)
@patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()})
@patch('salt.utils.systemd.booted', MagicMock(return_value=False))
def test_get_locale_with_no_systemd_gentoo(self):
'''
Test getting current system locale with systemd and dbus available on Gentoo.
:return:
'''
localemod.get_locale()
assert localemod.__salt__['cmd.run'].call_args[0][0] == 'eselect --brief locale show'
@patch('salt.utils.which', MagicMock(return_value=None))
@patch('salt.modules.localemod.__grains__', {'os_family': 'Solaris', 'osmajorrelease': 12})
@patch('salt.modules.localemod.HAS_DBUS', False)
@patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()})
@patch('salt.utils.systemd.booted', MagicMock(return_value=False))
def test_get_locale_with_no_systemd_slowlaris(self):
'''
Test getting current system locale with systemd and dbus available on Solaris.
:return:
'''
localemod.get_locale()
assert localemod.__salt__['cmd.run'].call_args[0][0] == 'grep "^LANG=" /etc/default/init'
def test_get_locale(self):
'''
Test for Get the current system locale
'''
with patch.dict(localemod.__context__, {'salt.utils.systemd.booted': True}):
with patch.dict(localemod.__grains__, {'os_family': ['Unknown']}):
with patch.multiple(localemod,
_parse_dbus_locale=MagicMock(return_value={'LANG': 'A'}),
HAS_DBUS=True):
self.assertEqual('A', localemod.get_locale())
localemod._parse_dbus_locale.assert_called_once_with()
with patch.multiple(localemod,
_parse_localectl=MagicMock(return_value={'LANG': 'A'}),
HAS_DBUS=False):
self.assertEqual('A', localemod.get_locale())
localemod._parse_localectl.assert_called_once_with()
with patch.dict(localemod.__context__, {'salt.utils.systemd.booted': False}):
with patch.dict(localemod.__grains__, {'os_family': ['Gentoo']}):
with patch.dict(localemod.__salt__, {'cmd.run': MagicMock(return_value='A')}):
with patch.object(localemod,
'_parse_localectl',
return_value={'LANG': 'A'}):
self.assertEqual(localemod.get_locale(), 'A')
with patch.dict(localemod.__grains__, {'os_family': ['RedHat']}):
with patch.dict(localemod.__salt__, {'cmd.run': MagicMock(return_value='A=B')}):
with patch.object(localemod,
'_parse_localectl',
return_value={'LANG': 'B'}):
self.assertEqual(localemod.get_locale(), 'B')
with patch.dict(localemod.__grains__, {'os_family': ['Unknown']}):
with patch.dict(localemod.__salt__, {'cmd.run': MagicMock(return_value='A=B')}):
self.assertRaises(CommandExecutionError, localemod.get_locale)
def test_set_locale(self):
'''
Test for Sets the current system locale
'''
with patch.dict(localemod.__context__, {'salt.utils.systemd.booted': True}):
with patch.dict(localemod.__grains__, {'os_family': ['Unknown']}):
with patch.object(localemod, '_localectl_set', return_value=True):
self.assertTrue(localemod.set_locale('l'))
with patch.dict(localemod.__context__, {'salt.utils.systemd.booted': False}):
with patch.dict(localemod.__grains__, {'os_family': ['Gentoo']}):
with patch.dict(localemod.__salt__, {'cmd.retcode': MagicMock(return_value='A')}):
with patch.object(localemod,
'_parse_localectl',
return_value={'LANG': 'B'}):
self.assertFalse(localemod.set_locale('l'))
with patch.dict(localemod.__grains__, {'os_family': ['A']}):
with patch.dict(localemod.__salt__, {'cmd.retcode': MagicMock(return_value=0)}):
with patch('salt.utils.systemd.booted', return_value=False):
self.assertRaises(CommandExecutionError, localemod.set_locale, 'A')
def test_avail(self):
'''
Test for Check if a locale is available
'''
with patch('salt.utils.locales.normalize_locale',
MagicMock(return_value='en_US.UTF-8 UTF-8')):
with patch.dict(localemod.__salt__,
{'locale.list_avail':
MagicMock(return_value=['A', 'B'])}):
self.assertTrue(localemod.avail('locale'))
def test_gen_locale_not_valid(self):
'''
Tests the return of gen_locale when the provided locale is not found
'''
with patch.dict(localemod.__grains__, {'os': 'Debian'}), \
patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \
patch.dict(localemod.__salt__,
{'file.search': MagicMock(return_value=False)}):
self.assertFalse(localemod.gen_locale('foo'))
def test_gen_locale_debian(self):
'''
Tests the return of successful gen_locale on Debian system
'''
ret = {'stdout': 'saltines', 'stderr': 'biscuits', 'retcode': 0, 'pid': 1337}
with patch.dict(localemod.__grains__, {'os': 'Debian'}), \
patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \
patch.dict(localemod.__salt__,
{'file.search': MagicMock(return_value=True),
'file.replace': MagicMock(return_value=True),
'cmd.run_all': MagicMock(return_value=ret)}):
self.assertTrue(localemod.gen_locale('en_US.UTF-8 UTF-8'))
def test_gen_locale_debian_no_charmap(self):
'''
Tests the return of successful gen_locale on Debian system without a charmap
'''
def file_search(search, pattern, flags):
'''
mock file.search
'''
if len(pattern.split()) == 1:
return False
else: # charmap was supplied
return True
ret = {'stdout': 'saltines', 'stderr': 'biscuits', 'retcode': 0, 'pid': 1337}
with patch.dict(localemod.__grains__, {'os': 'Debian'}), \
patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \
patch.dict(localemod.__salt__,
{'file.search': file_search,
'file.replace': MagicMock(return_value=True),
'cmd.run_all': MagicMock(return_value=ret)}):
self.assertTrue(localemod.gen_locale('en_US.UTF-8'))
def test_gen_locale_ubuntu(self):
'''
Test the return of successful gen_locale on Ubuntu system
'''
ret = {'stdout': 'saltines', 'stderr': 'biscuits', 'retcode': 0, 'pid': 1337}
with patch.dict(localemod.__salt__,
{'file.replace': MagicMock(return_value=True),
'file.touch': MagicMock(return_value=None),
'file.append': MagicMock(return_value=None),
'cmd.run_all': MagicMock(return_value=ret)}), \
patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \
patch('os.listdir', MagicMock(return_value=['en_US'])), \
patch.dict(localemod.__grains__, {'os': 'Ubuntu'}):
self.assertTrue(localemod.gen_locale('en_US.UTF-8'))
def test_gen_locale_gentoo(self):
'''
Tests the return of successful gen_locale on Gentoo system
'''
ret = {'stdout': 'saltines', 'stderr': 'biscuits', 'retcode': 0, 'pid': 1337}
with patch.dict(localemod.__grains__, {'os_family': 'Gentoo'}), \
patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \
patch('os.listdir', MagicMock(return_value=['en_US.UTF-8'])), \
patch.dict(localemod.__salt__,
{'file.search': MagicMock(return_value=True),
'file.replace': MagicMock(return_value=True),
'cmd.run_all': MagicMock(return_value=ret)}):
self.assertTrue(localemod.gen_locale('en_US.UTF-8 UTF-8'))
def test_gen_locale_gentoo_no_charmap(self):
'''
Tests the return of successful gen_locale on Gentoo system without a charmap
'''
def file_search(search, pattern, flags):
'''
mock file.search
'''
if len(pattern.split()) == 1:
return False
else: # charmap was supplied
return True
ret = {'stdout': 'saltines', 'stderr': 'biscuits', 'retcode': 0, 'pid': 1337}
with patch.dict(localemod.__grains__, {'os_family': 'Gentoo'}), \
patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \
patch('os.listdir', MagicMock(return_value=['en_US.UTF-8'])), \
patch.dict(localemod.__salt__,
{'file.search': file_search,
'file.replace': MagicMock(return_value=True),
'cmd.run_all': MagicMock(return_value=ret)}):
self.assertTrue(localemod.gen_locale('en_US.UTF-8'))
def test_gen_locale(self):
'''
Tests the return of successful gen_locale
'''
ret = {'stdout': 'saltines', 'stderr': 'biscuits', 'retcode': 0, 'pid': 1337}
with patch.dict(localemod.__salt__,
{'cmd.run_all': MagicMock(return_value=ret),
'file.replace': MagicMock()}), \
patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \
patch('os.listdir', MagicMock(return_value=['en_US'])):
self.assertTrue(localemod.gen_locale('en_US.UTF-8'))
def test_gen_locale_verbose(self):
'''
Tests the return of successful gen_locale
'''
ret = {'stdout': 'saltines', 'stderr': 'biscuits', 'retcode': 0, 'pid': 1337}
with patch.dict(localemod.__salt__,
{'cmd.run_all': MagicMock(return_value=ret),
'file.replace': MagicMock()}), \
patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \
patch('os.listdir', MagicMock(return_value=['en_US'])):
self.assertEqual(localemod.gen_locale('en_US.UTF-8', verbose=True), ret)
def test_parse_localectl(self):
localectl_out = (' System Locale: LANG=en_US.UTF-8\n'
' LANGUAGE=en_US:en\n'
' VC Keymap: n/a')
mock_cmd = Mock(return_value=localectl_out)
with patch.dict(localemod.__salt__, {'cmd.run': mock_cmd}):
ret = localemod._parse_localectl()
self.assertEqual({'LANG': 'en_US.UTF-8', 'LANGUAGE': 'en_US:en'}, ret)
Add unit test for calling unknown platform
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Rupesh Tare <rupesht@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
Mock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
try:
import pytest
except ImportError as import_error:
pytest = None
# Import Salt Libs
import salt.modules.localemod as localemod
from salt.exceptions import CommandExecutionError
from salt.ext import six
@skipIf(not pytest, False)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class LocalemodTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.localemod
'''
def setup_loader_modules(self):
return {localemod: {}}
def test_list_avail(self):
'''
Test for Lists available (compiled) locales
'''
with patch.dict(localemod.__salt__,
{'cmd.run': MagicMock(return_value='A\nB')}):
self.assertEqual(localemod.list_avail(), ['A', 'B'])
@patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl"))
@patch('salt.modules.localemod.__grains__', {'os_family': 'Ubuntu', 'osmajorrelease': 42})
@patch('salt.modules.localemod.HAS_DBUS', False)
@patch('salt.modules.localemod._parse_dbus_locale', MagicMock(return_value={'LANG': 'en_US.utf8'}))
@patch('salt.modules.localemod._localectl_status', MagicMock(return_value={'system_locale': {'LANG': 'de_DE.utf8'}}))
@patch('salt.utils.systemd.booted', MagicMock(return_value=True))
def test_get_locale_with_systemd_nodbus(self):
'''
Test getting current system locale with systemd but no dbus available.
:return:
'''
assert localemod.get_locale() == 'de_DE.utf8'
@patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl"))
@patch('salt.modules.localemod.__grains__', {'os_family': 'Ubuntu', 'osmajorrelease': 42})
@patch('salt.modules.localemod.HAS_DBUS', True)
@patch('salt.modules.localemod._parse_dbus_locale', MagicMock(return_value={'LANG': 'en_US.utf8'}))
@patch('salt.modules.localemod._localectl_status', MagicMock(return_value={'system_locale': {'LANG': 'de_DE.utf8'}}))
@patch('salt.utils.systemd.booted', MagicMock(return_value=True))
def test_get_locale_with_systemd_and_dbus(self):
'''
Test getting current system locale with systemd and dbus available.
:return:
'''
assert localemod.get_locale() == 'en_US.utf8'
@patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl"))
@patch('salt.modules.localemod.__grains__', {'os_family': 'Suse', 'osmajorrelease': 12})
@patch('salt.modules.localemod.HAS_DBUS', True)
@patch('salt.modules.localemod._parse_dbus_locale', MagicMock(return_value={'LANG': 'en_US.utf8'}))
@patch('salt.modules.localemod._localectl_status', MagicMock(return_value={'system_locale': {'LANG': 'de_DE.utf8'}}))
@patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()})
@patch('salt.utils.systemd.booted', MagicMock(return_value=True))
def test_get_locale_with_systemd_and_dbus_sle12(self):
'''
Test getting current system locale with systemd and dbus available on SLE12.
:return:
'''
localemod.get_locale()
assert localemod.__salt__['cmd.run'].call_args[0][0] == 'grep "^RC_LANG" /etc/sysconfig/language'
@patch('salt.utils.which', MagicMock(return_value=None))
@patch('salt.modules.localemod.__grains__', {'os_family': 'RedHat', 'osmajorrelease': 12})
@patch('salt.modules.localemod.HAS_DBUS', False)
@patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()})
@patch('salt.utils.systemd.booted', MagicMock(return_value=False))
def test_get_locale_with_no_systemd_redhat(self):
'''
Test getting current system locale with systemd and dbus available on RedHat.
:return:
'''
localemod.get_locale()
assert localemod.__salt__['cmd.run'].call_args[0][0] == 'grep "^LANG=" /etc/sysconfig/i18n'
@patch('salt.utils.which', MagicMock(return_value=None))
@patch('salt.modules.localemod.__grains__', {'os_family': 'Debian', 'osmajorrelease': 12})
@patch('salt.modules.localemod.HAS_DBUS', False)
@patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()})
@patch('salt.utils.systemd.booted', MagicMock(return_value=False))
def test_get_locale_with_no_systemd_debian(self):
'''
Test getting current system locale with systemd and dbus available on Debian.
:return:
'''
localemod.get_locale()
assert localemod.__salt__['cmd.run'].call_args[0][0] == 'grep "^LANG=" /etc/default/locale'
@patch('salt.utils.which', MagicMock(return_value=None))
@patch('salt.modules.localemod.__grains__', {'os_family': 'Gentoo', 'osmajorrelease': 12})
@patch('salt.modules.localemod.HAS_DBUS', False)
@patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()})
@patch('salt.utils.systemd.booted', MagicMock(return_value=False))
def test_get_locale_with_no_systemd_gentoo(self):
'''
Test getting current system locale with systemd and dbus available on Gentoo.
:return:
'''
localemod.get_locale()
assert localemod.__salt__['cmd.run'].call_args[0][0] == 'eselect --brief locale show'
@patch('salt.utils.which', MagicMock(return_value=None))
@patch('salt.modules.localemod.__grains__', {'os_family': 'Solaris', 'osmajorrelease': 12})
@patch('salt.modules.localemod.HAS_DBUS', False)
@patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()})
@patch('salt.utils.systemd.booted', MagicMock(return_value=False))
def test_get_locale_with_no_systemd_slowlaris(self):
'''
Test getting current system locale with systemd and dbus available on Solaris.
:return:
'''
localemod.get_locale()
assert localemod.__salt__['cmd.run'].call_args[0][0] == 'grep "^LANG=" /etc/default/init'
@patch('salt.utils.which', MagicMock(return_value=None))
@patch('salt.modules.localemod.__grains__', {'os_family': 'BSD', 'osmajorrelease': 8, 'oscodename': 'DrunkDragon'})
@patch('salt.modules.localemod.HAS_DBUS', False)
@patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()})
@patch('salt.utils.systemd.booted', MagicMock(return_value=False))
def test_get_locale_with_no_systemd_unknown(self):
'''
Test getting current system locale with systemd and dbus available on Gentoo.
:return:
'''
with pytest.raises(CommandExecutionError) as err:
localemod.get_locale()
assert '"DrunkDragon" is unsupported' in six.text_type(err)
def test_set_locale(self):
'''
Test for Sets the current system locale
'''
with patch.dict(localemod.__context__, {'salt.utils.systemd.booted': True}):
with patch.dict(localemod.__grains__, {'os_family': ['Unknown']}):
with patch.object(localemod, '_localectl_set', return_value=True):
self.assertTrue(localemod.set_locale('l'))
with patch.dict(localemod.__context__, {'salt.utils.systemd.booted': False}):
with patch.dict(localemod.__grains__, {'os_family': ['Gentoo']}):
with patch.dict(localemod.__salt__, {'cmd.retcode': MagicMock(return_value='A')}):
with patch.object(localemod,
'_parse_localectl',
return_value={'LANG': 'B'}):
self.assertFalse(localemod.set_locale('l'))
with patch.dict(localemod.__grains__, {'os_family': ['A']}):
with patch.dict(localemod.__salt__, {'cmd.retcode': MagicMock(return_value=0)}):
with patch('salt.utils.systemd.booted', return_value=False):
self.assertRaises(CommandExecutionError, localemod.set_locale, 'A')
def test_avail(self):
'''
Test for Check if a locale is available
'''
with patch('salt.utils.locales.normalize_locale',
MagicMock(return_value='en_US.UTF-8 UTF-8')):
with patch.dict(localemod.__salt__,
{'locale.list_avail':
MagicMock(return_value=['A', 'B'])}):
self.assertTrue(localemod.avail('locale'))
def test_gen_locale_not_valid(self):
'''
Tests the return of gen_locale when the provided locale is not found
'''
with patch.dict(localemod.__grains__, {'os': 'Debian'}), \
patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \
patch.dict(localemod.__salt__,
{'file.search': MagicMock(return_value=False)}):
self.assertFalse(localemod.gen_locale('foo'))
def test_gen_locale_debian(self):
'''
Tests the return of successful gen_locale on Debian system
'''
ret = {'stdout': 'saltines', 'stderr': 'biscuits', 'retcode': 0, 'pid': 1337}
with patch.dict(localemod.__grains__, {'os': 'Debian'}), \
patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \
patch.dict(localemod.__salt__,
{'file.search': MagicMock(return_value=True),
'file.replace': MagicMock(return_value=True),
'cmd.run_all': MagicMock(return_value=ret)}):
self.assertTrue(localemod.gen_locale('en_US.UTF-8 UTF-8'))
def test_gen_locale_debian_no_charmap(self):
'''
Tests the return of successful gen_locale on Debian system without a charmap
'''
def file_search(search, pattern, flags):
'''
mock file.search
'''
if len(pattern.split()) == 1:
return False
else: # charmap was supplied
return True
ret = {'stdout': 'saltines', 'stderr': 'biscuits', 'retcode': 0, 'pid': 1337}
with patch.dict(localemod.__grains__, {'os': 'Debian'}), \
patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \
patch.dict(localemod.__salt__,
{'file.search': file_search,
'file.replace': MagicMock(return_value=True),
'cmd.run_all': MagicMock(return_value=ret)}):
self.assertTrue(localemod.gen_locale('en_US.UTF-8'))
def test_gen_locale_ubuntu(self):
'''
Test the return of successful gen_locale on Ubuntu system
'''
ret = {'stdout': 'saltines', 'stderr': 'biscuits', 'retcode': 0, 'pid': 1337}
with patch.dict(localemod.__salt__,
{'file.replace': MagicMock(return_value=True),
'file.touch': MagicMock(return_value=None),
'file.append': MagicMock(return_value=None),
'cmd.run_all': MagicMock(return_value=ret)}), \
patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \
patch('os.listdir', MagicMock(return_value=['en_US'])), \
patch.dict(localemod.__grains__, {'os': 'Ubuntu'}):
self.assertTrue(localemod.gen_locale('en_US.UTF-8'))
def test_gen_locale_gentoo(self):
'''
Tests the return of successful gen_locale on Gentoo system
'''
ret = {'stdout': 'saltines', 'stderr': 'biscuits', 'retcode': 0, 'pid': 1337}
with patch.dict(localemod.__grains__, {'os_family': 'Gentoo'}), \
patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \
patch('os.listdir', MagicMock(return_value=['en_US.UTF-8'])), \
patch.dict(localemod.__salt__,
{'file.search': MagicMock(return_value=True),
'file.replace': MagicMock(return_value=True),
'cmd.run_all': MagicMock(return_value=ret)}):
self.assertTrue(localemod.gen_locale('en_US.UTF-8 UTF-8'))
def test_gen_locale_gentoo_no_charmap(self):
'''
Tests the return of successful gen_locale on Gentoo system without a charmap
'''
def file_search(search, pattern, flags):
'''
mock file.search
'''
if len(pattern.split()) == 1:
return False
else: # charmap was supplied
return True
ret = {'stdout': 'saltines', 'stderr': 'biscuits', 'retcode': 0, 'pid': 1337}
with patch.dict(localemod.__grains__, {'os_family': 'Gentoo'}), \
patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \
patch('os.listdir', MagicMock(return_value=['en_US.UTF-8'])), \
patch.dict(localemod.__salt__,
{'file.search': file_search,
'file.replace': MagicMock(return_value=True),
'cmd.run_all': MagicMock(return_value=ret)}):
self.assertTrue(localemod.gen_locale('en_US.UTF-8'))
def test_gen_locale(self):
'''
Tests the return of successful gen_locale
'''
ret = {'stdout': 'saltines', 'stderr': 'biscuits', 'retcode': 0, 'pid': 1337}
with patch.dict(localemod.__salt__,
{'cmd.run_all': MagicMock(return_value=ret),
'file.replace': MagicMock()}), \
patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \
patch('os.listdir', MagicMock(return_value=['en_US'])):
self.assertTrue(localemod.gen_locale('en_US.UTF-8'))
def test_gen_locale_verbose(self):
'''
Tests the return of successful gen_locale
'''
ret = {'stdout': 'saltines', 'stderr': 'biscuits', 'retcode': 0, 'pid': 1337}
with patch.dict(localemod.__salt__,
{'cmd.run_all': MagicMock(return_value=ret),
'file.replace': MagicMock()}), \
patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \
patch('os.listdir', MagicMock(return_value=['en_US'])):
self.assertEqual(localemod.gen_locale('en_US.UTF-8', verbose=True), ret)
def test_parse_localectl(self):
localectl_out = (' System Locale: LANG=en_US.UTF-8\n'
' LANGUAGE=en_US:en\n'
' VC Keymap: n/a')
mock_cmd = Mock(return_value=localectl_out)
with patch.dict(localemod.__salt__, {'cmd.run': mock_cmd}):
ret = localemod._parse_localectl()
self.assertEqual({'LANG': 'en_US.UTF-8', 'LANGUAGE': 'en_US:en'}, ret)
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software.
#The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied.
"""
Tests the behaviour of the local_storage storage implementation.
Note that this test suite is meant to also test the behaviour of concurrent
operations to the file system. This requires actual file writes. This sort of
thing is very dependent on the operating system the tests are run on, and the
atomicity of operations even more so. The concurrency tests should therefore be
assumed to only hold for the operating system that the tests are running on.
"""
import functools #For partialmethod, to wrap arbitrary method calls with the __getattr__ function.
import io #To get the default buffer size.
import os #Cleaning up test files afterwards, and getting file size to design a good test.
import unittest.mock #To replace file reading/writing with something that simulates external influence.
import luna.tests #To get parametrised tests.
import localstorage.local_storage #The module we're testing.
_unsafe_target_file = "test.txt"
"""
A file that is being used by multiple threads at a time, in a simulation.
The mock functions simulate file I/O being applied to this file concurrently
with the actual operation. Interaction with this file is considered
thread-unsafe. Interaction with other files is considered thread-safe, and
should only be done in tests as long as it can be (almost) guaranteed that
no other process will interfere with the file.
"""
_concurrent_write_bytes_written = 0
"""
How many bytes the concurrent writing mock has written so far.
It writes up to 10 bytes.
"""
_original_open = open
"""
The original open function that opens a file normally.
This is stored to write normally to a file within the ``ConcurrentIOWrapper``,
even if the ``open`` function is patched to return a ``ConcurrentIOWrapper``
instead of its normal behaviour.
"""
class ConcurrentIOWrapper:
"""
Simulates concurrent writes to the I/O stream being wrapped.
"""
_written_bytes = 0
"""
How many bytes are written in this test.
Stop writing if the whole string is written. This allows algorithms that are
not wait-free to still pass the test.
"""
@classmethod
def reset(cls):
"""
Resets the written bytes count, to appear as if newly constructed.
This causes the inserted bytes to start from the beginning again.
"""
cls._written_bytes = 0
def __init__(self, stream, write_string):
"""
Creates a new I/O wrapper around a specified stream.
All calls to this wrapper are passed on to the stream, but during this
time, some other streams are passed on to the
:param stream: The stream to wrap around.
:param write_string: A string of characters to write during I/O
operations with the stream. Note that this string is always written
to the unsafe file, which is not necessarily the file for this
stream.
"""
self._stream = stream
self._write_string = write_string
def __getattr__(self, item):
"""
Writes data to the concurrent stream, gets an attribute from the actual
stream, then writes more data to the concurrent stream.
:param item: The name of the attribute to get.
:return: The value of the requested attribute.
"""
if hasattr(self._stream.__getattribute__(item), "__self__"): #Only catch method calls.
if item == "read": #Catch ``read`` with a special function that also interjects halfway.
return self._concurrent_write_and_read
else:
return functools.partial(self._concurrent_write_and_call, self._stream.__getattribute__(item))
else:
return self._stream.__getattribute__(item)
def __enter__(self, *args, **kwargs):
"""
Enters the scope of the I/O stream.
:param args: Positional arguments to pass to the I/O stream.
:param kwargs: Key-word arguments to pass to the I/O stream.
:return: The wrapping I/O stream.
"""
self._stream.__enter__(*args, **kwargs) #Return self instead of the stream's enter result, so I/O operations inside a with-clause happen on the wrapper.
return self
def __exit__(self, *args, **kwargs):
"""
Exits the scope of the I/O stream.
This needs to exist for the with-clause to allow being called on the
wrapper. It is a completely transparent wrapper around the actual I/O
stream.
:param args: Positional arguments to pass to the I/O stream.
:param kwargs: Key-word arguments to pass to the I/O stream.
:return: The result of exiting the I/O stream's scope.
"""
return self._stream.__exit__(*args, **kwargs)
def _concurrent_write_and_call(self, function, *args, **kwargs):
"""
Calls a function, but concurrently writes to the unsafe target file.
A byte is written to the unsafe target file before and after a function
call.
:param function: The function to call.
:param args: The positional arguments to call the function with.
:param kwargs: The key-word arguments to call the function with.
:return: The result of the function call.
"""
if ConcurrentIOWrapper._written_bytes == 0: #The first time, completely overwrite the original file.
with _original_open(_unsafe_target_file, "wb", buffering=0) as concurrent_handle:
concurrent_handle.write(b"") #Clear the file.
if ConcurrentIOWrapper._written_bytes < len(self._write_string): #Append one byte.
with _original_open(_unsafe_target_file, "ab", buffering=0) as concurrent_handle:
concurrent_handle.write(self._write_string[ConcurrentIOWrapper._written_bytes:ConcurrentIOWrapper._written_bytes + 1])
ConcurrentIOWrapper._written_bytes += 1
result = function(*args, **kwargs) #The actual call in between.
if ConcurrentIOWrapper._written_bytes < len(self._write_string): #Append one byte again.
with _original_open(_unsafe_target_file, "ab", buffering=0) as concurrent_handle:
concurrent_handle.write(self._write_string[ConcurrentIOWrapper._written_bytes:ConcurrentIOWrapper._written_bytes + 1])
ConcurrentIOWrapper._written_bytes += 1
return result
def _concurrent_write_and_read(self, *args, **kwargs):
"""
Calls the ``read`` function twice and inserts a concurrent write in
between.
:param args: The positional arguments passed to the ``read`` function.
:param kwargs: The key-word arguments passed to the ``read`` function.
:return: The result of the ``read`` function.
"""
if ConcurrentIOWrapper._written_bytes < len(self._write_string):
first_part = self._stream.read(1) #If this fails, the file is empty. That is really a wrong way to test read atomicity with.
if ConcurrentIOWrapper._written_bytes < len(self._write_string): #Append one byte.
if ConcurrentIOWrapper._written_bytes == 0: #The first time, completely overwrite the original file.
with _original_open(_unsafe_target_file, "wb", buffering=0) as concurrent_handle:
concurrent_handle.write(b"") #Clear the file.
with _original_open(_unsafe_target_file, "ab", buffering=0) as concurrent_handle:
concurrent_handle.write(self._write_string[ConcurrentIOWrapper._written_bytes:ConcurrentIOWrapper._written_bytes + 1])
ConcurrentIOWrapper._written_bytes += 1
second_part = self._stream.read(*args, **kwargs) #Read the rest of the file.
return first_part + second_part
else: #Don't do the concurrent write. After some amount of calls the "writing" is done. We assume that there comes a time where this is the case in real situations.
return self._stream.read(*args, **kwargs)
def _open_simulate_concurrency(file, *args, **kwargs):
"""
Opens a file, but simulates concurrent reads/writes to some files.
The call to ``open`` is made transparently, but the resulting I/O stream is
wrapped around by a class that is completely transparent, except that it
writes data to the unsafe target file each time you call a method. This
simulates concurrent writes to the file.
The arguments and key-word arguments are explicitly not specified in this
function, as they must be translucent towards the real ``open`` function,
even when the real ``open`` function changes.
:param file: The path to the file to open.
:param args: Any additional arguments supplied to the open function.
:param kwargs: Any additional key-word arguments supplied to the open
function.
:return: A wrapped IO stream that simulates concurrent writes to the
"""
if len(args) >= 2:
args_list = list(args)
args_list[1] = 0 #Change the "buffering" parameter.
args = tuple(args_list)
original_io_stream = _original_open(file, *args, **kwargs)
else: #Provide our own "buffering" parameter.
if "buffering" in kwargs:
del kwargs["buffering"]
original_io_stream = _original_open(file, buffering=0, *args, **kwargs)
return ConcurrentIOWrapper(original_io_stream, b"1234567890")
class TestLocalStorage(luna.tests.TestCase):
"""
Tests the behaviour of the local_storage storage implementation.
"""
_bad_uris = {
"http": {
"uri": "http://www.example.com/file.txt"
},
"empty": {
"uri": ""
},
"parse error": {
"uri": "http://[invalid/file.txt"
}
}
_good_uris = {
"unix file": {
"uri": "file:///home/username/file.txt"
},
"windows file": {
"uri": "file://C:/Users/username/file.txt"
},
"windows network file": {
"uri": "file://server/file.txt"
}
}
_test_bytes = {
"word": {
"content": b"Test"
},
"empty": {
"content": b""
},
"null_character": {
"content": b"null\x00character"
},
"last_character": {
"content": b"last\xFFcharacter"
},
"long": {
"content": b"x" * (io.DEFAULT_BUFFER_SIZE + 10) #Be larger than the default buffer size so it has to do at least 2 reads or writes.
}
}
"""
Simple sequences of bytes to write and read from files to test with.
These include some of the special cases that may result in problems, such as
empty content and null characters.
"""
def setUp(self):
"""
Resets the number of bytes written concurrently in this test.
"""
ConcurrentIOWrapper.reset()
def tearDown(self):
"""
Removes any files that may have been written during these tests.
"""
if os.path.isfile(_unsafe_target_file):
os.remove(_unsafe_target_file)
@luna.tests.parametrise(_good_uris)
def test_can_read(self, uri):
"""
Tests whether the plug-in says it can read files that it should be able
to read.
:param uri: A URI of a file that the local storage plug-in should be
able to read.
"""
self.assertTrue(localstorage.local_storage.can_read(uri))
@luna.tests.parametrise(_good_uris)
def test_can_write(self, uri):
"""
Tests whether the plug-in says it can write files that it should be able
to write.
:param uri: A URI of a file that the local storage plug-in should be
able to write.
"""
self.assertTrue(localstorage.local_storage.can_write(uri))
@luna.tests.parametrise(_bad_uris)
def test_cannot_read(self, uri):
"""
Tests whether the plug-in says it cannot read files that it should not
be able to read.
:param uri: A URI of a resource that the local storage plug-in should
not be able to read.
"""
self.assertFalse(localstorage.local_storage.can_read(uri))
@luna.tests.parametrise(_bad_uris)
def test_cannot_write(self, uri):
"""
Tests whether the plug-in says it cannot write files that it should not
be able to write.
:param uri: A URI of a resource that the local storage plug-in should
not be able to write.
"""
self.assertFalse(localstorage.local_storage.can_write(uri))
def test_delete(self):
"""
Tests deleting a file.
"""
with open(_unsafe_target_file, "w") as file_handle: #Create the file.
file_handle.write("Test!")
localstorage.local_storage.delete(_unsafe_target_file)
self.assertFalse(os.path.isfile(_unsafe_target_file))
def test_exists_after_deleting(self):
"""
Tests whether a file is said to exist if it was just deleted.
"""
with open(_unsafe_target_file, "w") as file_handle: #Create the file.
file_handle.write("Test!")
os.remove(_unsafe_target_file) #Delete it.
self.assertFalse(localstorage.local_storage.exists(_unsafe_target_file), msg="The file {file_name} was reported to exist, but it was just deleted.".format(file_name=_unsafe_target_file))
def test_exists_just_created(self):
"""
Tests whether a file is said to exist if it has just been created.
"""
with open(_unsafe_target_file, "w") as file_handle: #Create the file.
file_handle.write("Test!")
self.assertTrue(localstorage.local_storage.exists(_unsafe_target_file), msg="The file {file_name} was reported to not exist, but it was just created.".format(file_name=_unsafe_target_file))
def test_exists_never_created(self):
"""
Tests whether a file is said to not exist if it was never created.
"""
self.assertFalse(localstorage.local_storage.exists(_unsafe_target_file), msg="The file {file_name} was reported to be existing, though it shouldn't exist.".format(file_name=_unsafe_target_file)) #If stuff was cleaned up properly after each test, this should not exist.
@luna.tests.parametrise(_test_bytes)
def test_open_read(self, content):
"""
Tests whether reading a simple file is successful.
This writes some content to a file, reads it back and sees whether it is
the same.
:param content: The content to put in the file before reading.
"""
with open(_unsafe_target_file, "wb") as file_handle: #Create the file with simple content.
file_handle.write(content)
with localstorage.local_storage.open_read(_unsafe_target_file) as file_handle:
result = file_handle.read()
self.assertEqual(result, content, "Read must be exactly equal to what was written to the file.")
def test_move(self):
"""
Tests moving a file.
This is just a simple move.
"""
with open("start.txt", "w") as file_handle: #Create a file.
file_handle.write("Test!")
#Assumes that the file exists now.
try:
localstorage.local_storage.move("start.txt", "end.txt")
self.assertFalse(os.path.isfile("start.txt"), msg="Move origin file may no longer exist after the move.")
self.assertTrue(os.path.isfile("end.txt"), msg="Move destination file must exist after the move.")
finally: #Clean up.
if os.path.isfile("start.txt"):
os.remove("start.txt")
if os.path.isfile("end.txt"):
os.remove("end.txt")
def test_open_read_atomicity(self):
"""
Tests the ``open_read`` function to see whether it is an atomic read.
"""
with open(_unsafe_target_file, "wb") as unsafe_file_handle:
unsafe_file_handle.write(b"Test") #Some initial data to test with. This is not tested.
with unittest.mock.patch("builtins.open", _open_simulate_concurrency):
with localstorage.local_storage.open_read(_unsafe_target_file) as tested_file_handle:
result = tested_file_handle.read()
self.assertIn(result, [ #At any stage during the writing.
b"Test",
b"1",
b"12",
b"123",
b"1234",
b"12345",
b"123456",
b"1234567",
b"12345678",
b"123456789",
b"1234567890"
], result.decode("utf-8") + " is not a snapshot of the file at any point in time, and as such is not atomic.")
@luna.tests.parametrise(_test_bytes)
def test_write(self, content):
"""
Tests whether writing a simple file is successful.
This uses the write functionality to write content to a file, then reads
it back to verify that the content was written correctly.
:param content: The content to write to the file, as `bytes`.
"""
with localstorage.local_storage.open_write(_unsafe_target_file) as file_handle:
file_handle.write(content)
with open(_unsafe_target_file, "rb") as file_handle:
result = file_handle.read()
self.assertEqual(result, content, "Write must put the exact content in the file.")
def test_write_atomicity(self):
"""
Tests the ``open_write`` function to see whether it is an atomic write.
"""
test_bytes = b"Test"
with unittest.mock.patch("builtins.open", _open_simulate_concurrency):
with localstorage.local_storage.open_write(_unsafe_target_file) as tested_file_handle:
tested_file_handle.write(test_bytes)
with open(_unsafe_target_file, "rb") as written_file_handle:
result = written_file_handle.read()
self.assertEqual(result, test_bytes, "File write is not atomic.")
@luna.tests.parametrise(_test_bytes)
def test_write_existing(self, content):
"""
Tests writing to a file that already exists.
The old file must get overwritten by the new file.
:param content: The content to write to the file, as `bytes`.
"""
with open(_unsafe_target_file, "w") as file_handle: #Make sure the file exists.
file_handle.write("Original file contents.")
with localstorage.local_storage.open_write(_unsafe_target_file) as file_handle:
file_handle.write(content) #Overwrite with new data.
with open(_unsafe_target_file, "rb") as file_handle:
result = file_handle.read()
self.assertEqual(result, content, "Write must overwrite the old file and put the exact content in the file.")
Reorder tests
Alphabetically, which puts the tests for the open_read function properly together.
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software.
#The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied.
"""
Tests the behaviour of the local_storage storage implementation.
Note that this test suite is meant to also test the behaviour of concurrent
operations to the file system. This requires actual file writes. This sort of
thing is very dependent on the operating system the tests are run on, and the
atomicity of operations even more so. The concurrency tests should therefore be
assumed to only hold for the operating system that the tests are running on.
"""
import functools #For partialmethod, to wrap arbitrary method calls with the __getattr__ function.
import io #To get the default buffer size.
import os #Cleaning up test files afterwards, and getting file size to design a good test.
import unittest.mock #To replace file reading/writing with something that simulates external influence.
import luna.tests #To get parametrised tests.
import localstorage.local_storage #The module we're testing.
_unsafe_target_file = "test.txt"
"""
A file that is being used by multiple threads at a time, in a simulation.
The mock functions simulate file I/O being applied to this file concurrently
with the actual operation. Interaction with this file is considered
thread-unsafe. Interaction with other files is considered thread-safe, and
should only be done in tests as long as it can be (almost) guaranteed that
no other process will interfere with the file.
"""
_concurrent_write_bytes_written = 0
"""
How many bytes the concurrent writing mock has written so far.
It writes up to 10 bytes.
"""
_original_open = open
"""
The original open function that opens a file normally.
This is stored to write normally to a file within the ``ConcurrentIOWrapper``,
even if the ``open`` function is patched to return a ``ConcurrentIOWrapper``
instead of its normal behaviour.
"""
class ConcurrentIOWrapper:
"""
Simulates concurrent writes to the I/O stream being wrapped.
"""
_written_bytes = 0
"""
How many bytes are written in this test.
Stop writing if the whole string is written. This allows algorithms that are
not wait-free to still pass the test.
"""
@classmethod
def reset(cls):
"""
Resets the written bytes count, to appear as if newly constructed.
This causes the inserted bytes to start from the beginning again.
"""
cls._written_bytes = 0
def __init__(self, stream, write_string):
"""
Creates a new I/O wrapper around a specified stream.
All calls to this wrapper are passed on to the stream, but during this
time, some other streams are passed on to the
:param stream: The stream to wrap around.
:param write_string: A string of characters to write during I/O
operations with the stream. Note that this string is always written
to the unsafe file, which is not necessarily the file for this
stream.
"""
self._stream = stream
self._write_string = write_string
def __getattr__(self, item):
"""
Writes data to the concurrent stream, gets an attribute from the actual
stream, then writes more data to the concurrent stream.
:param item: The name of the attribute to get.
:return: The value of the requested attribute.
"""
if hasattr(self._stream.__getattribute__(item), "__self__"): #Only catch method calls.
if item == "read": #Catch ``read`` with a special function that also interjects halfway.
return self._concurrent_write_and_read
else:
return functools.partial(self._concurrent_write_and_call, self._stream.__getattribute__(item))
else:
return self._stream.__getattribute__(item)
def __enter__(self, *args, **kwargs):
"""
Enters the scope of the I/O stream.
:param args: Positional arguments to pass to the I/O stream.
:param kwargs: Key-word arguments to pass to the I/O stream.
:return: The wrapping I/O stream.
"""
self._stream.__enter__(*args, **kwargs) #Return self instead of the stream's enter result, so I/O operations inside a with-clause happen on the wrapper.
return self
def __exit__(self, *args, **kwargs):
"""
Exits the scope of the I/O stream.
This needs to exist for the with-clause to allow being called on the
wrapper. It is a completely transparent wrapper around the actual I/O
stream.
:param args: Positional arguments to pass to the I/O stream.
:param kwargs: Key-word arguments to pass to the I/O stream.
:return: The result of exiting the I/O stream's scope.
"""
return self._stream.__exit__(*args, **kwargs)
def _concurrent_write_and_call(self, function, *args, **kwargs):
"""
Calls a function, but concurrently writes to the unsafe target file.
A byte is written to the unsafe target file before and after a function
call.
:param function: The function to call.
:param args: The positional arguments to call the function with.
:param kwargs: The key-word arguments to call the function with.
:return: The result of the function call.
"""
if ConcurrentIOWrapper._written_bytes == 0: #The first time, completely overwrite the original file.
with _original_open(_unsafe_target_file, "wb", buffering=0) as concurrent_handle:
concurrent_handle.write(b"") #Clear the file.
if ConcurrentIOWrapper._written_bytes < len(self._write_string): #Append one byte.
with _original_open(_unsafe_target_file, "ab", buffering=0) as concurrent_handle:
concurrent_handle.write(self._write_string[ConcurrentIOWrapper._written_bytes:ConcurrentIOWrapper._written_bytes + 1])
ConcurrentIOWrapper._written_bytes += 1
result = function(*args, **kwargs) #The actual call in between.
if ConcurrentIOWrapper._written_bytes < len(self._write_string): #Append one byte again.
with _original_open(_unsafe_target_file, "ab", buffering=0) as concurrent_handle:
concurrent_handle.write(self._write_string[ConcurrentIOWrapper._written_bytes:ConcurrentIOWrapper._written_bytes + 1])
ConcurrentIOWrapper._written_bytes += 1
return result
def _concurrent_write_and_read(self, *args, **kwargs):
"""
Calls the ``read`` function twice and inserts a concurrent write in
between.
:param args: The positional arguments passed to the ``read`` function.
:param kwargs: The key-word arguments passed to the ``read`` function.
:return: The result of the ``read`` function.
"""
if ConcurrentIOWrapper._written_bytes < len(self._write_string):
first_part = self._stream.read(1) #If this fails, the file is empty. That is really a wrong way to test read atomicity with.
if ConcurrentIOWrapper._written_bytes < len(self._write_string): #Append one byte.
if ConcurrentIOWrapper._written_bytes == 0: #The first time, completely overwrite the original file.
with _original_open(_unsafe_target_file, "wb", buffering=0) as concurrent_handle:
concurrent_handle.write(b"") #Clear the file.
with _original_open(_unsafe_target_file, "ab", buffering=0) as concurrent_handle:
concurrent_handle.write(self._write_string[ConcurrentIOWrapper._written_bytes:ConcurrentIOWrapper._written_bytes + 1])
ConcurrentIOWrapper._written_bytes += 1
second_part = self._stream.read(*args, **kwargs) #Read the rest of the file.
return first_part + second_part
else: #Don't do the concurrent write. After some amount of calls the "writing" is done. We assume that there comes a time where this is the case in real situations.
return self._stream.read(*args, **kwargs)
def _open_simulate_concurrency(file, *args, **kwargs):
"""
Opens a file, but simulates concurrent reads/writes to some files.
The call to ``open`` is made transparently, but the resulting I/O stream is
wrapped around by a class that is completely transparent, except that it
writes data to the unsafe target file each time you call a method. This
simulates concurrent writes to the file.
The arguments and key-word arguments are explicitly not specified in this
function, as they must be translucent towards the real ``open`` function,
even when the real ``open`` function changes.
:param file: The path to the file to open.
:param args: Any additional arguments supplied to the open function.
:param kwargs: Any additional key-word arguments supplied to the open
function.
:return: A wrapped IO stream that simulates concurrent writes to the
"""
if len(args) >= 2:
args_list = list(args)
args_list[1] = 0 #Change the "buffering" parameter.
args = tuple(args_list)
original_io_stream = _original_open(file, *args, **kwargs)
else: #Provide our own "buffering" parameter.
if "buffering" in kwargs:
del kwargs["buffering"]
original_io_stream = _original_open(file, buffering=0, *args, **kwargs)
return ConcurrentIOWrapper(original_io_stream, b"1234567890")
class TestLocalStorage(luna.tests.TestCase):
"""
Tests the behaviour of the local_storage storage implementation.
"""
_bad_uris = {
"http": {
"uri": "http://www.example.com/file.txt"
},
"empty": {
"uri": ""
},
"parse error": {
"uri": "http://[invalid/file.txt"
}
}
_good_uris = {
"unix file": {
"uri": "file:///home/username/file.txt"
},
"windows file": {
"uri": "file://C:/Users/username/file.txt"
},
"windows network file": {
"uri": "file://server/file.txt"
}
}
_test_bytes = {
"word": {
"content": b"Test"
},
"empty": {
"content": b""
},
"null_character": {
"content": b"null\x00character"
},
"last_character": {
"content": b"last\xFFcharacter"
},
"long": {
"content": b"x" * (io.DEFAULT_BUFFER_SIZE + 10) #Be larger than the default buffer size so it has to do at least 2 reads or writes.
}
}
"""
Simple sequences of bytes to write and read from files to test with.
These include some of the special cases that may result in problems, such as
empty content and null characters.
"""
def setUp(self):
"""
Resets the number of bytes written concurrently in this test.
"""
ConcurrentIOWrapper.reset()
def tearDown(self):
"""
Removes any files that may have been written during these tests.
"""
if os.path.isfile(_unsafe_target_file):
os.remove(_unsafe_target_file)
@luna.tests.parametrise(_good_uris)
def test_can_read(self, uri):
"""
Tests whether the plug-in says it can read files that it should be able
to read.
:param uri: A URI of a file that the local storage plug-in should be
able to read.
"""
self.assertTrue(localstorage.local_storage.can_read(uri))
@luna.tests.parametrise(_good_uris)
def test_can_write(self, uri):
"""
Tests whether the plug-in says it can write files that it should be able
to write.
:param uri: A URI of a file that the local storage plug-in should be
able to write.
"""
self.assertTrue(localstorage.local_storage.can_write(uri))
@luna.tests.parametrise(_bad_uris)
def test_cannot_read(self, uri):
"""
Tests whether the plug-in says it cannot read files that it should not
be able to read.
:param uri: A URI of a resource that the local storage plug-in should
not be able to read.
"""
self.assertFalse(localstorage.local_storage.can_read(uri))
@luna.tests.parametrise(_bad_uris)
def test_cannot_write(self, uri):
"""
Tests whether the plug-in says it cannot write files that it should not
be able to write.
:param uri: A URI of a resource that the local storage plug-in should
not be able to write.
"""
self.assertFalse(localstorage.local_storage.can_write(uri))
def test_delete(self):
"""
Tests deleting a file.
"""
with open(_unsafe_target_file, "w") as file_handle: #Create the file.
file_handle.write("Test!")
localstorage.local_storage.delete(_unsafe_target_file)
self.assertFalse(os.path.isfile(_unsafe_target_file))
def test_exists_after_deleting(self):
"""
Tests whether a file is said to exist if it was just deleted.
"""
with open(_unsafe_target_file, "w") as file_handle: #Create the file.
file_handle.write("Test!")
os.remove(_unsafe_target_file) #Delete it.
self.assertFalse(localstorage.local_storage.exists(_unsafe_target_file), msg="The file {file_name} was reported to exist, but it was just deleted.".format(file_name=_unsafe_target_file))
def test_exists_just_created(self):
"""
Tests whether a file is said to exist if it has just been created.
"""
with open(_unsafe_target_file, "w") as file_handle: #Create the file.
file_handle.write("Test!")
self.assertTrue(localstorage.local_storage.exists(_unsafe_target_file), msg="The file {file_name} was reported to not exist, but it was just created.".format(file_name=_unsafe_target_file))
def test_exists_never_created(self):
"""
Tests whether a file is said to not exist if it was never created.
"""
self.assertFalse(localstorage.local_storage.exists(_unsafe_target_file), msg="The file {file_name} was reported to be existing, though it shouldn't exist.".format(file_name=_unsafe_target_file)) #If stuff was cleaned up properly after each test, this should not exist.
def test_move(self):
"""
Tests moving a file.
This is just a simple move.
"""
with open("start.txt", "w") as file_handle: #Create a file.
file_handle.write("Test!")
#Assumes that the file exists now.
try:
localstorage.local_storage.move("start.txt", "end.txt")
self.assertFalse(os.path.isfile("start.txt"), msg="Move origin file may no longer exist after the move.")
self.assertTrue(os.path.isfile("end.txt"), msg="Move destination file must exist after the move.")
finally: #Clean up.
if os.path.isfile("start.txt"):
os.remove("start.txt")
if os.path.isfile("end.txt"):
os.remove("end.txt")
@luna.tests.parametrise(_test_bytes)
def test_open_read(self, content):
"""
Tests whether reading a simple file is successful.
This writes some content to a file, reads it back and sees whether it is
the same.
:param content: The content to put in the file before reading.
"""
with open(_unsafe_target_file, "wb") as file_handle: #Create the file with simple content.
file_handle.write(content)
with localstorage.local_storage.open_read(_unsafe_target_file) as file_handle:
result = file_handle.read()
self.assertEqual(result, content, "Read must be exactly equal to what was written to the file.")
def test_open_read_atomicity(self):
"""
Tests the ``open_read`` function to see whether it is an atomic read.
"""
with open(_unsafe_target_file, "wb") as unsafe_file_handle:
unsafe_file_handle.write(b"Test") #Some initial data to test with. This is not tested.
with unittest.mock.patch("builtins.open", _open_simulate_concurrency):
with localstorage.local_storage.open_read(_unsafe_target_file) as tested_file_handle:
result = tested_file_handle.read()
self.assertIn(result, [ #At any stage during the writing.
b"Test",
b"1",
b"12",
b"123",
b"1234",
b"12345",
b"123456",
b"1234567",
b"12345678",
b"123456789",
b"1234567890"
], result.decode("utf-8") + " is not a snapshot of the file at any point in time, and as such is not atomic.")
@luna.tests.parametrise(_test_bytes)
def test_write(self, content):
"""
Tests whether writing a simple file is successful.
This uses the write functionality to write content to a file, then reads
it back to verify that the content was written correctly.
:param content: The content to write to the file, as `bytes`.
"""
with localstorage.local_storage.open_write(_unsafe_target_file) as file_handle:
file_handle.write(content)
with open(_unsafe_target_file, "rb") as file_handle:
result = file_handle.read()
self.assertEqual(result, content, "Write must put the exact content in the file.")
def test_write_atomicity(self):
"""
Tests the ``open_write`` function to see whether it is an atomic write.
"""
test_bytes = b"Test"
with unittest.mock.patch("builtins.open", _open_simulate_concurrency):
with localstorage.local_storage.open_write(_unsafe_target_file) as tested_file_handle:
tested_file_handle.write(test_bytes)
with open(_unsafe_target_file, "rb") as written_file_handle:
result = written_file_handle.read()
self.assertEqual(result, test_bytes, "File write is not atomic.")
@luna.tests.parametrise(_test_bytes)
def test_write_existing(self, content):
"""
Tests writing to a file that already exists.
The old file must get overwritten by the new file.
:param content: The content to write to the file, as `bytes`.
"""
with open(_unsafe_target_file, "w") as file_handle: #Make sure the file exists.
file_handle.write("Original file contents.")
with localstorage.local_storage.open_write(_unsafe_target_file) as file_handle:
file_handle.write(content) #Overwrite with new data.
with open(_unsafe_target_file, "rb") as file_handle:
result = file_handle.read()
self.assertEqual(result, content, "Write must overwrite the old file and put the exact content in the file.") |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2013-2021 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.allen_2012 import Allen2012
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
import numpy
# Test data generated from EQRM implementation.
class Allen2012TestCase(BaseGSIMTestCase):
GSIM_CLASS = Allen2012
def test_mean(self):
self.check('A12/ALLEN2012_MEAN.csv',
max_discrep_percentage=0.4)
def test_std_total(self):
self.check('A12/ALLEN2012_STD_TOTAL.csv',
max_discrep_percentage=0.1)
Ported more
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2013-2021 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.allen_2012 import Allen2012
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
# Test data generated from EQRM implementation.
class Allen2012TestCase(BaseGSIMTestCase):
GSIM_CLASS = Allen2012
def test_all(self):
self.check_all('A12/ALLEN2012_MEAN.csv',
'A12/ALLEN2012_STD_TOTAL.csv',
mean_discrep_percentage=0.4,
std_discrep_percentage=0.1)
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import gc
import functools
import IECore
import Gaffer
import GafferUI
from Qt import QtCore
from Qt import QtGui
from Qt import QtWidgets
## \todo Implement an option to float in a new window, and an option to anchor back - drag and drop of tabs?
class CompoundEditor( GafferUI.EditorWidget ) :
__transitionDuration = 400
def __init__( self, scriptNode, children=None, **kw ) :
self.__splitContainer = _SplitContainer()
GafferUI.EditorWidget.__init__( self, self.__splitContainer, scriptNode, **kw )
self.__splitContainer.append( _TabbedContainer() )
self.__keyPressConnection = self.__splitContainer.keyPressSignal().connect( Gaffer.WeakMethod( self.__keyPress ) )
self.__editorAddedSignal = Gaffer.Signal2()
if children :
self.__addChildren( self.__splitContainer, children )
## Returns all the editors that comprise this CompoundEditor, optionally
# filtered by type.
def editors( self, type = GafferUI.EditorWidget ) :
def __recurse( w ) :
assert( isinstance( w, GafferUI.SplitContainer ) )
if len( w ) > 1 :
# it's split
return __recurse( w[0] ) + __recurse( w[1] )
else :
return [ e for e in w[0] if isinstance( e, type ) ]
return __recurse( self.__splitContainer )
## Adds an editor to the layout, trying to place it in the same place
# as editors of the same type.
def addEditor( self, editor ) :
def __findContainer( w, editorType ) :
if len( w ) > 1 :
ideal, backup = __findContainer( w[0], editorType )
if ideal is not None :
return ideal, backup
return __findContainer( w[1], editorType )
else :
for e in w[0] :
if isinstance( e, editorType ) :
return w, w
return None, w
ideal, backup = __findContainer( self.__splitContainer, editor.__class__ )
container = ideal if ideal is not None else backup
self.__addChild( container, editor )
## A signal emitted whenever an editor is added -
# the signature is ( compoundEditor, childEditor ).
def editorAddedSignal( self ) :
return self.__editorAddedSignal
def __serialise( self, w ) :
assert( isinstance( w, GafferUI.SplitContainer ) )
if len( w ) > 1 :
# it's split
sizes = w.getSizes()
splitPosition = ( float( sizes[0] ) / sum( sizes ) ) if sum( sizes ) else 0
return "( GafferUI.SplitContainer.Orientation.%s, %f, ( %s, %s ) )" % ( str( w.getOrientation() ), splitPosition, self.__serialise( w[0] ), self.__serialise( w[1] ) )
else :
# not split - a tabbed container full of editors
tabbedContainer = w[0]
tabDict = { "tabs" : tuple( tabbedContainer[:] ) }
if tabbedContainer.getCurrent() is not None :
tabDict["currentTab"] = tabbedContainer.index( tabbedContainer.getCurrent() )
tabDict["tabsVisible"] = tabbedContainer.getTabsVisible()
tabDict["pinned"] = []
for editor in tabbedContainer :
if isinstance( editor, GafferUI.NodeSetEditor ) :
tabDict["pinned"].append( not editor.getNodeSet().isSame( self.scriptNode().selection() ) )
else :
tabDict["pinned"].append( None )
return repr( tabDict )
def __repr__( self ) :
return "GafferUI.CompoundEditor( scriptNode, children = %s )" % self.__serialise( self.__splitContainer )
def _layoutMenuDefinition( self, tabbedContainer ) :
splitContainer = tabbedContainer.ancestor( _SplitContainer )
m = IECore.MenuDefinition()
layouts = GafferUI.Layouts.acquire( self.scriptNode().applicationRoot() )
for c in layouts.registeredEditors() :
m.append( "/" + IECore.CamelCase.toSpaced( c ), { "command" : functools.partial( self.__addChild, splitContainer, c ) } )
m.append( "/divider", { "divider" : True } )
removeItemAdded = False
splitContainerParent = splitContainer.parent()
if isinstance( splitContainerParent, GafferUI.SplitContainer ) :
m.append( "Remove Panel", { "command" : functools.partial( self.__join, splitContainerParent, 1 - splitContainerParent.index( splitContainer ) ) } )
removeItemAdded = True
currentTab = tabbedContainer.getCurrent()
if currentTab :
m.append( "/Remove " + tabbedContainer.getLabel( currentTab ), { "command" : functools.partial( self.__removeCurrentTab, tabbedContainer ) } )
removeItemAdded = True
if removeItemAdded :
m.append( "/divider2", { "divider" : True } )
tabsVisible = tabbedContainer.getTabsVisible()
# because the menu isn't visible most of the time, the Ctrl+T shortcut doesn't work - it's just there to let
# users know it exists. it is actually implemented directly in __keyPress.
m.append( "/Hide Tabs" if tabsVisible else "/Show Tabs", { "command" : functools.partial( Gaffer.WeakMethod( tabbedContainer.setTabsVisible ), not tabsVisible ), "shortCut" : "Ctrl+T" } )
m.append( "/TabsDivider", { "divider" : True } )
m.append( "/Split Left", { "command" : functools.partial( self.__split, splitContainer, GafferUI.SplitContainer.Orientation.Horizontal, 0 ) } )
m.append( "/Split Right", { "command" : functools.partial( self.__split, splitContainer, GafferUI.SplitContainer.Orientation.Horizontal, 1 ) } )
m.append( "/Split Bottom", { "command" : functools.partial( self.__split, splitContainer, GafferUI.SplitContainer.Orientation.Vertical, 1 ) } )
m.append( "/Split Top", { "command" : functools.partial( self.__split, splitContainer, GafferUI.SplitContainer.Orientation.Vertical, 0 ) } )
return m
def __keyPress( self, unused, event ) :
if event.key == "Space" :
# we receive the event for whichever SplitContainer has keyboard focus, but that's not
# necessarily the one we want to modify. examine the splitter hierarchy and find
# the target container we want to modify, and the new state we want to put it in.
## \todo Decide how and where we provide this widget-under-the-cursor functionality in
# the public api.
qWidget = QtWidgets.QApplication.instance().widgetAt( QtGui.QCursor.pos() )
widget = GafferUI.Widget._owner( qWidget )
State = IECore.Enum.create( "None", "Open", "Closed", "Opening", "Closing" )
targetContainer, targetState, targetIndex = None, State.None, -1
prevContainer, prevState, prevIndex = None, State.None, -1
while widget is not None :
widgetParent = widget.parent()
if isinstance( widgetParent, GafferUI.SplitContainer ) and hasattr( widgetParent, "_CompoundEditor__preferredHandlePosition" ) :
currentContainer = widgetParent
currentIndex = 1 - currentContainer.index( widget )
currentDestSizes = currentContainer.targetSizes()
if len( currentContainer ) == 1 :
currentState = State.None
elif 0 in currentContainer.getSizes() :
currentState = State.Closed
elif currentDestSizes is not None :
if currentDestSizes[currentIndex] == 0 :
currentState = State.Closing
else :
currentState = State.Opening
else :
currentState = State.Open
if prevState in ( State.Closing, State.None ) and currentState in ( State.Open, State.Opening ) :
targetContainer, targetState, targetIndex = currentContainer, State.Closing, currentIndex
break
if prevState == State.Closed and currentState in ( State.Open, State.Opening ) :
targetContainer, targetState, targetIndex = prevContainer, State.Opening, prevIndex
break
elif currentState == State.Closed and currentContainer.parent() is self :
targetContainer, targetState, targetIndex = currentContainer, State.Opening, currentIndex
break
prevContainer, prevState, prevIndex = currentContainer, currentState, currentIndex
widget = widgetParent
if targetContainer is None :
return False
if targetState == State.Closing :
newSizes = [ 0, 1 ]
if targetIndex :
newSizes.reverse()
else :
newSizes = [ targetContainer.__preferredHandlePosition, 1 - targetContainer.__preferredHandlePosition ]
targetContainer.setSizes( newSizes, self.__transitionDuration )
for child in targetContainer :
child.__enterConnection = None
return True
elif event.key == "T" and event.modifiers == event.Modifiers.Control :
tabbedContainer = GafferUI.Widget.widgetAt( GafferUI.Widget.mousePosition(), _TabbedContainer )
if tabbedContainer is not None :
tabbedContainer.setTabsVisible( not tabbedContainer.getTabsVisible() )
return False
def __addChildren( self, splitContainer, children ) :
if isinstance( children, tuple ) and len( children ) and isinstance( children[0], GafferUI.SplitContainer.Orientation ) :
self.__split( splitContainer, children[0], 0 )
self.__addChildren( splitContainer[0], children[2][0] )
self.__addChildren( splitContainer[1], children[2][1] )
splitContainer.setSizes( [ children[1], 1.0 - children[1] ] )
else :
if isinstance( children, tuple ) :
# backwards compatibility - tabs provided as a tuple
for c in children :
self.__addChild( splitContainer, c )
else :
# new format - various fields provided by a dictionary
for i, c in enumerate( children["tabs"] ) :
editor = self.__addChild( splitContainer, c )
if "pinned" in children and isinstance( editor, GafferUI.NodeSetEditor ) and children["pinned"][i] :
editor.setNodeSet( Gaffer.StandardSet() )
if "currentTab" in children :
splitContainer[0].setCurrent( splitContainer[0][children["currentTab"]] )
splitContainer[0].setTabsVisible( children.get( "tabsVisible", True ) )
# this is a shame-faced hack to make sure the timeline in the default layout can't be compressed
# or stretched vertically. fixing this properly is quite involved, because we'd need to find a sensible
# generic way for TabbedContainer to set a min/max height based on it's children, and then a sensible
# generic rule for what SplitContainer should do in its __applySizePolicy() method.
if len( splitContainer[0] ) == 1 and isinstance( splitContainer[0][0], GafferUI.Timeline ) :
splitContainer[0]._qtWidget().setFixedHeight( splitContainer[0][0]._qtWidget().sizeHint().height() )
splitContainer._qtWidget().setSizePolicy( QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed )
def __addChild( self, splitContainer, nameOrEditor ) :
assert( len( splitContainer ) == 1 )
tabbedContainer = splitContainer[0]
if isinstance( nameOrEditor, basestring ) :
editor = GafferUI.EditorWidget.create( nameOrEditor, self.scriptNode() )
else :
editor = nameOrEditor
assert( editor.scriptNode().isSame( self.scriptNode() ) )
tabbedContainer.insert( len( tabbedContainer ), editor )
tabbedContainer.setCurrent( editor )
return editor
def __split( self, splitContainer, orientation, subPanelIndex ) :
assert( len( splitContainer ) == 1 ) # we should not be split already
sc1 = _SplitContainer()
sc1.append( splitContainer[0] )
assert( len( splitContainer ) == 0 )
sc2 = _SplitContainer()
sc2.append( _TabbedContainer() )
if subPanelIndex==1 :
splitContainer.append( sc1 )
splitContainer.append( sc2 )
else :
splitContainer.append( sc2 )
splitContainer.append( sc1 )
assert( len( splitContainer ) == 2 )
handle = splitContainer.handle( 0 )
splitContainer.__handleEnterConnection = handle.enterSignal().connect( CompoundEditor.__handleEnter )
splitContainer.__handleButtonReleaseConnection = handle.buttonReleaseSignal().connect( CompoundEditor.__handleButtonRelease )
splitContainer.__preferredHandlePosition = 0.5 # where the user put it last
splitContainer.setOrientation( orientation )
def __join( self, splitContainer, subPanelIndex ) :
subPanelToKeepFrom = splitContainer[subPanelIndex]
del splitContainer[:]
for w in subPanelToKeepFrom[:] :
splitContainer.append( w )
splitContainer.setOrientation( subPanelToKeepFrom.getOrientation() )
# schedule some garbage collection to hoover up the remains. we do this in a delayed
# way in case the menu we're called from is holding on to references to the ui elements
# which are going to die.
## \todo I don't think this should be necessary now we're using WeakMethods for slots. It
# may be a good idea to remove it, as it may otherwise mask problems temporarily.
GafferUI.EventLoop.addIdleCallback( self.__collect )
def __removeCurrentTab( self, tabbedContainer ) :
currentTab = tabbedContainer.getCurrent()
tabbedContainer.remove( currentTab )
# schedule some garbage collection to hoover up the remains. we do this in a delayed
# way in case the menu we're called from is holding on to references to the ui elements
# which are going to die.
GafferUI.EventLoop.addIdleCallback( self.__collect )
@staticmethod
def __handlePosition( splitContainer ) :
assert( len( splitContainer ) == 2 )
sizes = splitContainer.getSizes()
return float( sizes[0] ) / sum( sizes )
## Used to remember where people like the handle to be.
@staticmethod
def __handleButtonRelease( handle, event ) :
splitContainer = handle.parent()
handlePosition = CompoundEditor.__handlePosition( splitContainer )
if handlePosition != 0 and handlePosition != 1 :
splitContainer.__preferredHandlePosition = handlePosition
for child in splitContainer :
child.__enterConnection = None
return False
## Used to dynamically show collapsed editors when the handle is entered
@staticmethod
def __handleEnter( handle ) :
splitContainer = handle.parent()
sizes = splitContainer.getSizes()
if 0 in sizes :
preferredContainer = splitContainer[ 1 - sizes.index( 0 ) ]
preferredContainer.__enterConnection = preferredContainer.enterSignal().connect( CompoundEditor.__preferredIndexEnter )
sizes = [ splitContainer.__preferredHandlePosition, 1 - splitContainer.__preferredHandlePosition ]
splitContainer.setSizes( sizes, CompoundEditor.__transitionDuration )
return False
## Used to dynamically hide editors automatically after being dynamically shown
@staticmethod
def __preferredIndexEnter( splitContainer ) :
parent = splitContainer.parent()
index = parent.index( splitContainer )
sizes = [ 1 if index==0 else 0, 0 if index==0 else 1 ]
parent.setSizes( sizes, CompoundEditor.__transitionDuration )
splitContainer.__enterConnection = None
@staticmethod
def __collect() :
try :
while gc.collect() :
pass
except :
pass
return False
# The internal class used to allow hierarchical splitting of the layout.
class _SplitContainer( GafferUI.SplitContainer ) :
def __init__( self, **kw ) :
GafferUI.SplitContainer.__init__( self, **kw )
# The internal class used to keep a bunch of editors in tabs, updating the titles
# when appropriate, and keeping a track of the pinning of nodes.
class _TabbedContainer( GafferUI.TabbedContainer ) :
def __init__( self, cornerWidget=None, **kw ) :
GafferUI.TabbedContainer.__init__( self, cornerWidget, **kw )
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 2, borderWidth=1 ) as cornerWidget :
self.__pinningButton = GafferUI.Button( image="targetNodesUnlocked.png", hasFrame=False )
layoutButton = GafferUI.MenuButton( image="layoutButton.png", hasFrame=False )
layoutButton.setMenu( GafferUI.Menu( Gaffer.WeakMethod( self.__layoutMenuDefinition ) ) )
layoutButton.setToolTip( "Click to modify the layout" )
self.setCornerWidget( cornerWidget )
self.__pinningButtonClickedConnection = self.__pinningButton.clickedSignal().connect( Gaffer.WeakMethod( self.__pinningButtonClicked ) )
self.__currentTabChangedConnection = self.currentChangedSignal().connect( Gaffer.WeakMethod( self.__currentTabChanged ) )
self.__dragEnterConnection = self.dragEnterSignal().connect( Gaffer.WeakMethod( self.__dragEnter ) )
self.__dragLeaveConnection = self.dragLeaveSignal().connect( Gaffer.WeakMethod( self.__dragLeave ) )
self.__dropConnection = self.dropSignal().connect( Gaffer.WeakMethod( self.__drop ) )
## \todo We're overriding this so we can connect to titleChanged(). This works OK
# because we know that CompoundEditor only uses insert(), and not any other means of
# adding a child. A more general solution might be to have a Container.childAddedSignal()
# method so we can get called no matter how we get a new child.
def insert( self, index, editor ) :
GafferUI.TabbedContainer.insert( self, index, editor )
self.setLabel( editor, editor.getTitle() )
editor.__titleChangedConnection = editor.titleChangedSignal().connect( Gaffer.WeakMethod( self.__titleChanged ) )
self.ancestor( CompoundEditor ).editorAddedSignal()( self.ancestor( CompoundEditor ), editor )
def __layoutMenuDefinition( self ) :
# the menu definition for the layout is dealt with by the CompoundEditor, because only it
# has the high-level view necessary to do splitting of layouts and so on.
return self.ancestor( CompoundEditor )._layoutMenuDefinition( self )
def __titleChanged( self, editor ) :
self.setLabel( editor, editor.getTitle() )
def __currentTabChanged( self, tabbedContainer, currentEditor ) :
if isinstance( currentEditor, GafferUI.NodeSetEditor ) :
self.__nodeSetChangedConnection = currentEditor.nodeSetChangedSignal().connect( Gaffer.WeakMethod( self.__updatePinningButton ) )
else :
self.__nodeSetChangedConnection = None
self.__updatePinningButton()
def __updatePinningButton( self, *unused ) :
editor = self.getCurrent()
if isinstance( editor, GafferUI.NodeSetEditor ) and editor.scriptNode() is not None :
self.__pinningButton.setVisible( True )
if editor.getNodeSet().isSame( editor.scriptNode().selection() ) :
self.__pinningButton.setToolTip( "Click to lock view to current selection" )
self.__pinningButton.setImage( "targetNodesUnlocked.png" )
else :
self.__pinningButton.setToolTip( "Click to unlock view and follow selection" )
self.__pinningButton.setImage( "targetNodesLocked.png" )
else :
self.__pinningButton.setVisible( False )
def __pinningButtonClicked( self, button ) :
editor = self.getCurrent()
assert( isinstance( editor, GafferUI.NodeSetEditor ) )
nodeSet = editor.getNodeSet()
selectionSet = editor.scriptNode().selection()
if nodeSet.isSame( selectionSet ) :
nodeSet = Gaffer.StandardSet( list( nodeSet ) )
else :
nodeSet = selectionSet
editor.setNodeSet( nodeSet )
def __dragEnter( self, tabbedContainer, event ) :
currentEditor = self.getCurrent()
if not isinstance( currentEditor, GafferUI.NodeSetEditor ) :
return False
if currentEditor.isAncestorOf( event.sourceWidget ) :
return False
result = False
if isinstance( event.data, Gaffer.Node ) :
result = True
elif isinstance( event.data, Gaffer.Set ) :
if event.data.size() and isinstance( event.data[0], Gaffer.Node ) :
result = True
if result :
self.setHighlighted( True )
self.__pinningButton.setHighlighted( True )
return result
def __dragLeave( self, tabbedContainer, event ) :
self.setHighlighted( False )
self.__pinningButton.setHighlighted( False )
def __drop( self, tabbedContainer, event ) :
if isinstance( event.data, Gaffer.Node ) :
nodeSet = Gaffer.StandardSet( [ event.data ] )
else :
nodeSet = Gaffer.StandardSet( [ x for x in event.data if isinstance( x, Gaffer.Node ) ] )
if event.modifiers & event.Modifiers.Shift :
currentEditor = self.getCurrent()
newEditor = currentEditor.__class__( currentEditor.scriptNode() )
newEditor.setNodeSet( nodeSet )
self.insert( 0, newEditor )
self.setCurrent( newEditor )
else :
self.getCurrent().setNodeSet( nodeSet )
self.setHighlighted( False )
self.__pinningButton.setHighlighted( False )
return True
CompoundEditor : Make Timeline sizing more robust
There were two problems here :
- We were applying the timeline size hack only when loading a layout. This meant that the layout behaved differently to when it was freshly authored.
- We were applying the timeline size hack even when the tabs were visible. This resulted in a vertically cropped timeline, and also meant that if other tabs were added later, they too would have a useless size.
We now apply the timeline sizing whenever tab visibility changes, and keep with the standard sizing whenever the tabs are visible, so other tabs may be added.
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import gc
import functools
import IECore
import Gaffer
import GafferUI
from Qt import QtCore
from Qt import QtGui
from Qt import QtWidgets
## \todo Implement an option to float in a new window, and an option to anchor back - drag and drop of tabs?
class CompoundEditor( GafferUI.EditorWidget ) :
__transitionDuration = 400
def __init__( self, scriptNode, children=None, **kw ) :
self.__splitContainer = _SplitContainer()
GafferUI.EditorWidget.__init__( self, self.__splitContainer, scriptNode, **kw )
self.__splitContainer.append( _TabbedContainer() )
self.__keyPressConnection = self.__splitContainer.keyPressSignal().connect( Gaffer.WeakMethod( self.__keyPress ) )
self.__editorAddedSignal = Gaffer.Signal2()
if children :
self.__addChildren( self.__splitContainer, children )
## Returns all the editors that comprise this CompoundEditor, optionally
# filtered by type.
def editors( self, type = GafferUI.EditorWidget ) :
def __recurse( w ) :
assert( isinstance( w, GafferUI.SplitContainer ) )
if len( w ) > 1 :
# it's split
return __recurse( w[0] ) + __recurse( w[1] )
else :
return [ e for e in w[0] if isinstance( e, type ) ]
return __recurse( self.__splitContainer )
## Adds an editor to the layout, trying to place it in the same place
# as editors of the same type.
def addEditor( self, editor ) :
def __findContainer( w, editorType ) :
if len( w ) > 1 :
ideal, backup = __findContainer( w[0], editorType )
if ideal is not None :
return ideal, backup
return __findContainer( w[1], editorType )
else :
for e in w[0] :
if isinstance( e, editorType ) :
return w, w
return None, w
ideal, backup = __findContainer( self.__splitContainer, editor.__class__ )
container = ideal if ideal is not None else backup
self.__addChild( container, editor )
## A signal emitted whenever an editor is added -
# the signature is ( compoundEditor, childEditor ).
def editorAddedSignal( self ) :
return self.__editorAddedSignal
def __serialise( self, w ) :
assert( isinstance( w, GafferUI.SplitContainer ) )
if len( w ) > 1 :
# it's split
sizes = w.getSizes()
splitPosition = ( float( sizes[0] ) / sum( sizes ) ) if sum( sizes ) else 0
return "( GafferUI.SplitContainer.Orientation.%s, %f, ( %s, %s ) )" % ( str( w.getOrientation() ), splitPosition, self.__serialise( w[0] ), self.__serialise( w[1] ) )
else :
# not split - a tabbed container full of editors
tabbedContainer = w[0]
tabDict = { "tabs" : tuple( tabbedContainer[:] ) }
if tabbedContainer.getCurrent() is not None :
tabDict["currentTab"] = tabbedContainer.index( tabbedContainer.getCurrent() )
tabDict["tabsVisible"] = tabbedContainer.getTabsVisible()
tabDict["pinned"] = []
for editor in tabbedContainer :
if isinstance( editor, GafferUI.NodeSetEditor ) :
tabDict["pinned"].append( not editor.getNodeSet().isSame( self.scriptNode().selection() ) )
else :
tabDict["pinned"].append( None )
return repr( tabDict )
def __repr__( self ) :
return "GafferUI.CompoundEditor( scriptNode, children = %s )" % self.__serialise( self.__splitContainer )
def _layoutMenuDefinition( self, tabbedContainer ) :
splitContainer = tabbedContainer.ancestor( _SplitContainer )
m = IECore.MenuDefinition()
layouts = GafferUI.Layouts.acquire( self.scriptNode().applicationRoot() )
for c in layouts.registeredEditors() :
m.append( "/" + IECore.CamelCase.toSpaced( c ), { "command" : functools.partial( self.__addChild, splitContainer, c ) } )
m.append( "/divider", { "divider" : True } )
removeItemAdded = False
splitContainerParent = splitContainer.parent()
if isinstance( splitContainerParent, GafferUI.SplitContainer ) :
m.append( "Remove Panel", { "command" : functools.partial( self.__join, splitContainerParent, 1 - splitContainerParent.index( splitContainer ) ) } )
removeItemAdded = True
currentTab = tabbedContainer.getCurrent()
if currentTab :
m.append( "/Remove " + tabbedContainer.getLabel( currentTab ), { "command" : functools.partial( self.__removeCurrentTab, tabbedContainer ) } )
removeItemAdded = True
if removeItemAdded :
m.append( "/divider2", { "divider" : True } )
tabsVisible = tabbedContainer.getTabsVisible()
# because the menu isn't visible most of the time, the Ctrl+T shortcut doesn't work - it's just there to let
# users know it exists. it is actually implemented directly in __keyPress.
m.append( "/Hide Tabs" if tabsVisible else "/Show Tabs", { "command" : functools.partial( Gaffer.WeakMethod( self.__updateTabVisibility ), tabbedContainer, not tabsVisible ), "shortCut" : "Ctrl+T" } )
m.append( "/TabsDivider", { "divider" : True } )
m.append( "/Split Left", { "command" : functools.partial( self.__split, splitContainer, GafferUI.SplitContainer.Orientation.Horizontal, 0 ) } )
m.append( "/Split Right", { "command" : functools.partial( self.__split, splitContainer, GafferUI.SplitContainer.Orientation.Horizontal, 1 ) } )
m.append( "/Split Bottom", { "command" : functools.partial( self.__split, splitContainer, GafferUI.SplitContainer.Orientation.Vertical, 1 ) } )
m.append( "/Split Top", { "command" : functools.partial( self.__split, splitContainer, GafferUI.SplitContainer.Orientation.Vertical, 0 ) } )
return m
def __keyPress( self, unused, event ) :
if event.key == "Space" :
# we receive the event for whichever SplitContainer has keyboard focus, but that's not
# necessarily the one we want to modify. examine the splitter hierarchy and find
# the target container we want to modify, and the new state we want to put it in.
## \todo Decide how and where we provide this widget-under-the-cursor functionality in
# the public api.
qWidget = QtWidgets.QApplication.instance().widgetAt( QtGui.QCursor.pos() )
widget = GafferUI.Widget._owner( qWidget )
State = IECore.Enum.create( "None", "Open", "Closed", "Opening", "Closing" )
targetContainer, targetState, targetIndex = None, State.None, -1
prevContainer, prevState, prevIndex = None, State.None, -1
while widget is not None :
widgetParent = widget.parent()
if isinstance( widgetParent, GafferUI.SplitContainer ) and hasattr( widgetParent, "_CompoundEditor__preferredHandlePosition" ) :
currentContainer = widgetParent
currentIndex = 1 - currentContainer.index( widget )
currentDestSizes = currentContainer.targetSizes()
if len( currentContainer ) == 1 :
currentState = State.None
elif 0 in currentContainer.getSizes() :
currentState = State.Closed
elif currentDestSizes is not None :
if currentDestSizes[currentIndex] == 0 :
currentState = State.Closing
else :
currentState = State.Opening
else :
currentState = State.Open
if prevState in ( State.Closing, State.None ) and currentState in ( State.Open, State.Opening ) :
targetContainer, targetState, targetIndex = currentContainer, State.Closing, currentIndex
break
if prevState == State.Closed and currentState in ( State.Open, State.Opening ) :
targetContainer, targetState, targetIndex = prevContainer, State.Opening, prevIndex
break
elif currentState == State.Closed and currentContainer.parent() is self :
targetContainer, targetState, targetIndex = currentContainer, State.Opening, currentIndex
break
prevContainer, prevState, prevIndex = currentContainer, currentState, currentIndex
widget = widgetParent
if targetContainer is None :
return False
if targetState == State.Closing :
newSizes = [ 0, 1 ]
if targetIndex :
newSizes.reverse()
else :
newSizes = [ targetContainer.__preferredHandlePosition, 1 - targetContainer.__preferredHandlePosition ]
targetContainer.setSizes( newSizes, self.__transitionDuration )
for child in targetContainer :
child.__enterConnection = None
return True
elif event.key == "T" and event.modifiers == event.Modifiers.Control :
tabbedContainer = GafferUI.Widget.widgetAt( GafferUI.Widget.mousePosition(), _TabbedContainer )
if tabbedContainer is not None :
self.__updateTabVisibility( tabbedContainer, not tabbedContainer.getTabsVisible() )
return False
def __addChildren( self, splitContainer, children ) :
if isinstance( children, tuple ) and len( children ) and isinstance( children[0], GafferUI.SplitContainer.Orientation ) :
self.__split( splitContainer, children[0], 0 )
self.__addChildren( splitContainer[0], children[2][0] )
self.__addChildren( splitContainer[1], children[2][1] )
splitContainer.setSizes( [ children[1], 1.0 - children[1] ] )
else :
if isinstance( children, tuple ) :
# backwards compatibility - tabs provided as a tuple
for c in children :
self.__addChild( splitContainer, c )
else :
# new format - various fields provided by a dictionary
for i, c in enumerate( children["tabs"] ) :
editor = self.__addChild( splitContainer, c )
if "pinned" in children and isinstance( editor, GafferUI.NodeSetEditor ) and children["pinned"][i] :
editor.setNodeSet( Gaffer.StandardSet() )
if "currentTab" in children :
splitContainer[0].setCurrent( splitContainer[0][children["currentTab"]] )
self.__updateTabVisibility( splitContainer[0], children.get( "tabsVisible", True ) )
def __addChild( self, splitContainer, nameOrEditor ) :
assert( len( splitContainer ) == 1 )
tabbedContainer = splitContainer[0]
if isinstance( nameOrEditor, basestring ) :
editor = GafferUI.EditorWidget.create( nameOrEditor, self.scriptNode() )
else :
editor = nameOrEditor
assert( editor.scriptNode().isSame( self.scriptNode() ) )
tabbedContainer.insert( len( tabbedContainer ), editor )
tabbedContainer.setCurrent( editor )
return editor
def __split( self, splitContainer, orientation, subPanelIndex ) :
assert( len( splitContainer ) == 1 ) # we should not be split already
sc1 = _SplitContainer()
sc1.append( splitContainer[0] )
assert( len( splitContainer ) == 0 )
sc2 = _SplitContainer()
sc2.append( _TabbedContainer() )
if subPanelIndex==1 :
splitContainer.append( sc1 )
splitContainer.append( sc2 )
else :
splitContainer.append( sc2 )
splitContainer.append( sc1 )
assert( len( splitContainer ) == 2 )
handle = splitContainer.handle( 0 )
splitContainer.__handleEnterConnection = handle.enterSignal().connect( CompoundEditor.__handleEnter )
splitContainer.__handleButtonReleaseConnection = handle.buttonReleaseSignal().connect( CompoundEditor.__handleButtonRelease )
splitContainer.__preferredHandlePosition = 0.5 # where the user put it last
splitContainer.setOrientation( orientation )
def __join( self, splitContainer, subPanelIndex ) :
subPanelToKeepFrom = splitContainer[subPanelIndex]
del splitContainer[:]
for w in subPanelToKeepFrom[:] :
splitContainer.append( w )
splitContainer.setOrientation( subPanelToKeepFrom.getOrientation() )
# schedule some garbage collection to hoover up the remains. we do this in a delayed
# way in case the menu we're called from is holding on to references to the ui elements
# which are going to die.
## \todo I don't think this should be necessary now we're using WeakMethods for slots. It
# may be a good idea to remove it, as it may otherwise mask problems temporarily.
GafferUI.EventLoop.addIdleCallback( self.__collect )
def __removeCurrentTab( self, tabbedContainer ) :
currentTab = tabbedContainer.getCurrent()
tabbedContainer.remove( currentTab )
# schedule some garbage collection to hoover up the remains. we do this in a delayed
# way in case the menu we're called from is holding on to references to the ui elements
# which are going to die.
GafferUI.EventLoop.addIdleCallback( self.__collect )
def __updateTabVisibility( self, tabbedContainer, tabsVisible ) :
tabbedContainer.setTabsVisible( tabsVisible )
# This is a shame-faced hack to make sure the timeline in the default layout can't be compressed
# or stretched vertically. Fixing this properly is quite involved, because we'd need to find a sensible
# generic way for TabbedContainer to set a min/max height based on it's children, and then a sensible
# generic rule for what SplitContainer should do in its __applySizePolicy() method.
if len( tabbedContainer ) == 1 and isinstance( tabbedContainer[0], GafferUI.Timeline ) :
if not tabsVisible :
# Fix height so timeline is not resizable
tabbedContainer._qtWidget().setFixedHeight( tabbedContainer[0]._qtWidget().sizeHint().height() )
tabbedContainer.parent()._qtWidget().setSizePolicy( QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Fixed )
else :
# Undo the above
QWIDGETSIZE_MAX = 16777215 # Macro not exposed by Qt.py, but needed to remove fixed height
tabbedContainer._qtWidget().setFixedHeight( QWIDGETSIZE_MAX )
tabbedContainer.parent()._qtWidget().setSizePolicy( QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Ignored )
@staticmethod
def __handlePosition( splitContainer ) :
assert( len( splitContainer ) == 2 )
sizes = splitContainer.getSizes()
return float( sizes[0] ) / sum( sizes )
## Used to remember where people like the handle to be.
@staticmethod
def __handleButtonRelease( handle, event ) :
splitContainer = handle.parent()
handlePosition = CompoundEditor.__handlePosition( splitContainer )
if handlePosition != 0 and handlePosition != 1 :
splitContainer.__preferredHandlePosition = handlePosition
for child in splitContainer :
child.__enterConnection = None
return False
## Used to dynamically show collapsed editors when the handle is entered
@staticmethod
def __handleEnter( handle ) :
splitContainer = handle.parent()
sizes = splitContainer.getSizes()
if 0 in sizes :
preferredContainer = splitContainer[ 1 - sizes.index( 0 ) ]
preferredContainer.__enterConnection = preferredContainer.enterSignal().connect( CompoundEditor.__preferredIndexEnter )
sizes = [ splitContainer.__preferredHandlePosition, 1 - splitContainer.__preferredHandlePosition ]
splitContainer.setSizes( sizes, CompoundEditor.__transitionDuration )
return False
## Used to dynamically hide editors automatically after being dynamically shown
@staticmethod
def __preferredIndexEnter( splitContainer ) :
parent = splitContainer.parent()
index = parent.index( splitContainer )
sizes = [ 1 if index==0 else 0, 0 if index==0 else 1 ]
parent.setSizes( sizes, CompoundEditor.__transitionDuration )
splitContainer.__enterConnection = None
@staticmethod
def __collect() :
try :
while gc.collect() :
pass
except :
pass
return False
# The internal class used to allow hierarchical splitting of the layout.
class _SplitContainer( GafferUI.SplitContainer ) :
def __init__( self, **kw ) :
GafferUI.SplitContainer.__init__( self, **kw )
# The internal class used to keep a bunch of editors in tabs, updating the titles
# when appropriate, and keeping a track of the pinning of nodes.
class _TabbedContainer( GafferUI.TabbedContainer ) :
def __init__( self, cornerWidget=None, **kw ) :
GafferUI.TabbedContainer.__init__( self, cornerWidget, **kw )
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 2, borderWidth=1 ) as cornerWidget :
self.__pinningButton = GafferUI.Button( image="targetNodesUnlocked.png", hasFrame=False )
layoutButton = GafferUI.MenuButton( image="layoutButton.png", hasFrame=False )
layoutButton.setMenu( GafferUI.Menu( Gaffer.WeakMethod( self.__layoutMenuDefinition ) ) )
layoutButton.setToolTip( "Click to modify the layout" )
self.setCornerWidget( cornerWidget )
self.__pinningButtonClickedConnection = self.__pinningButton.clickedSignal().connect( Gaffer.WeakMethod( self.__pinningButtonClicked ) )
self.__currentTabChangedConnection = self.currentChangedSignal().connect( Gaffer.WeakMethod( self.__currentTabChanged ) )
self.__dragEnterConnection = self.dragEnterSignal().connect( Gaffer.WeakMethod( self.__dragEnter ) )
self.__dragLeaveConnection = self.dragLeaveSignal().connect( Gaffer.WeakMethod( self.__dragLeave ) )
self.__dropConnection = self.dropSignal().connect( Gaffer.WeakMethod( self.__drop ) )
## \todo We're overriding this so we can connect to titleChanged(). This works OK
# because we know that CompoundEditor only uses insert(), and not any other means of
# adding a child. A more general solution might be to have a Container.childAddedSignal()
# method so we can get called no matter how we get a new child.
def insert( self, index, editor ) :
GafferUI.TabbedContainer.insert( self, index, editor )
self.setLabel( editor, editor.getTitle() )
editor.__titleChangedConnection = editor.titleChangedSignal().connect( Gaffer.WeakMethod( self.__titleChanged ) )
self.ancestor( CompoundEditor ).editorAddedSignal()( self.ancestor( CompoundEditor ), editor )
def __layoutMenuDefinition( self ) :
# the menu definition for the layout is dealt with by the CompoundEditor, because only it
# has the high-level view necessary to do splitting of layouts and so on.
return self.ancestor( CompoundEditor )._layoutMenuDefinition( self )
def __titleChanged( self, editor ) :
self.setLabel( editor, editor.getTitle() )
def __currentTabChanged( self, tabbedContainer, currentEditor ) :
if isinstance( currentEditor, GafferUI.NodeSetEditor ) :
self.__nodeSetChangedConnection = currentEditor.nodeSetChangedSignal().connect( Gaffer.WeakMethod( self.__updatePinningButton ) )
else :
self.__nodeSetChangedConnection = None
self.__updatePinningButton()
def __updatePinningButton( self, *unused ) :
editor = self.getCurrent()
if isinstance( editor, GafferUI.NodeSetEditor ) and editor.scriptNode() is not None :
self.__pinningButton.setVisible( True )
if editor.getNodeSet().isSame( editor.scriptNode().selection() ) :
self.__pinningButton.setToolTip( "Click to lock view to current selection" )
self.__pinningButton.setImage( "targetNodesUnlocked.png" )
else :
self.__pinningButton.setToolTip( "Click to unlock view and follow selection" )
self.__pinningButton.setImage( "targetNodesLocked.png" )
else :
self.__pinningButton.setVisible( False )
def __pinningButtonClicked( self, button ) :
editor = self.getCurrent()
assert( isinstance( editor, GafferUI.NodeSetEditor ) )
nodeSet = editor.getNodeSet()
selectionSet = editor.scriptNode().selection()
if nodeSet.isSame( selectionSet ) :
nodeSet = Gaffer.StandardSet( list( nodeSet ) )
else :
nodeSet = selectionSet
editor.setNodeSet( nodeSet )
def __dragEnter( self, tabbedContainer, event ) :
currentEditor = self.getCurrent()
if not isinstance( currentEditor, GafferUI.NodeSetEditor ) :
return False
if currentEditor.isAncestorOf( event.sourceWidget ) :
return False
result = False
if isinstance( event.data, Gaffer.Node ) :
result = True
elif isinstance( event.data, Gaffer.Set ) :
if event.data.size() and isinstance( event.data[0], Gaffer.Node ) :
result = True
if result :
self.setHighlighted( True )
self.__pinningButton.setHighlighted( True )
return result
def __dragLeave( self, tabbedContainer, event ) :
self.setHighlighted( False )
self.__pinningButton.setHighlighted( False )
def __drop( self, tabbedContainer, event ) :
if isinstance( event.data, Gaffer.Node ) :
nodeSet = Gaffer.StandardSet( [ event.data ] )
else :
nodeSet = Gaffer.StandardSet( [ x for x in event.data if isinstance( x, Gaffer.Node ) ] )
if event.modifiers & event.Modifiers.Shift :
currentEditor = self.getCurrent()
newEditor = currentEditor.__class__( currentEditor.scriptNode() )
newEditor.setNodeSet( nodeSet )
self.insert( 0, newEditor )
self.setCurrent( newEditor )
else :
self.getCurrent().setNodeSet( nodeSet )
self.setHighlighted( False )
self.__pinningButton.setHighlighted( False )
return True
|
# -*- coding: utf-8 -*-
"""
Created on Thursday July 27 2017
@author: Rama Vasudevan, Suhas Somnath, Chris R. Smith
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import sys
from os import path, remove, listdir # File Path formatting
import re
import numpy as np # For array operations
import h5py
from scipy.io import loadmat
from pyUSID.io.translator import Translator
from pyUSID.io.write_utils import Dimension
from pyUSID.io.hdf_utils import write_simple_attrs, write_main_dataset, \
create_indexed_group
if sys.version_info.major == 3:
unicode = str
class TRKPFMTranslator(Translator):
"""
Translates trKPFM datasets from .mat and .dat files to .h5
"""
def __init__(self, *args, **kwargs):
super(TRKPFMTranslator, self).__init__(*args, **kwargs)
self.raw_datasets = None
@staticmethod
def is_valid_file(data_path):
"""
Checks whether the provided file can be read by this translator
Parameters
----------
data_path : str
Path to folder or any data / parameter file within the folder
Returns
-------
obj : str
Path to file that will be accepted by the translate() function if
this translator is indeed capable of translating the provided file.
Otherwise, None will be returned
"""
def get_chan_ind(line):
match_obj = re.match(r'(.*)_ch(..).dat', line, re.M | re.I)
type_list = [str, int]
if match_obj:
return \
[type_caster(match_obj.group(ind)) for ind, type_caster in
zip(range(1, 1 + len(type_list)), type_list)][-1]
else:
return None
if path.isfile(data_path):
# Assume that the file is amongst all other data files
folder_path, _ = path.split(data_path)
else:
folder_path = data_path
# Now looking at the folder with all necessary files:
file_list = listdir(path=folder_path)
parm_file_name = None
raw_data_paths = list()
for item in file_list:
if item.endswith('parm.mat'):
parm_file_name = item
elif isinstance(get_chan_ind(item), int):
raw_data_paths.append(item)
# Both the parameter and data files MUST be found:
if parm_file_name is not None and len(raw_data_paths) > 0:
# Returning the path to the parameter file since this is what the translate() expects:
return path.join(folder_path, parm_file_name)
return None
def _parse_file_path(self, input_path):
folder_path, base_name = path.split(input_path)
base_name = base_name[:-8]
h5_path = path.join(folder_path, base_name + '.h5')
if path.exists(h5_path):
remove(h5_path)
self.h5_path = h5_path
# Until a better method is provided....
self.file_list = list()
for file in listdir(folder_path):
if '.dat' in file:
self.file_list.append(path.join(folder_path, file))
self.file_list = sorted(self.file_list)
@staticmethod
def _parse_spectrogram_size(file_handle):
"""
Parameters
----------
file_handle
Returns
-------
data_length: int, size of the spectrogram
count: int, number of pixels in dataset +1
""""""
"""
f = file_handle
cont_cond = True
count = 0
data_lengths = []
while cont_cond:
#print(count, f.tell())
count += 1
data_length = np.fromfile(f, dtype=np.float32, count=1)
if data_length > 0:
data_lengths.append(int(data_length))
f.seek(int(data_length - 1) * 4, 1)
else:
cont_cond = False
if len(np.unique(np.array(data_lengths))) > 1:
print("Unequal data lengths! Cannot continue")
else:
print("Equal data lengths")
return data_lengths[0], count
def translate(self, parm_path):
"""
The main function that translates the provided file into a .h5 file
Parameters
------------
parm_path : string / unicode
Absolute file path of the parameters .mat file.
Returns
----------
h5_path : string / unicode
Absolute path of the translated h5 file
"""
parm_path = path.abspath(parm_path)
parm_dict, excit_wfm = self._read_parms(parm_path)
excit_wfm = excit_wfm[1::2]
self._parse_file_path(parm_path)
num_dat_files = len(self.file_list)
f = open(self.file_list[0], 'rb')
spectrogram_size, count_vals = self._parse_spectrogram_size(f)
print("Excitation waveform shape: ", excit_wfm.shape)
print("spectrogram size:", spectrogram_size)
num_pixels = parm_dict['grid_num_rows'] * parm_dict['grid_num_cols']
print('Number of pixels: ', num_pixels)
print('Count Values: ', count_vals)
#if (num_pixels + 1) != count_vals:
# print("Data size does not match number of pixels expected. Cannot continue")
#Find how many channels we have to make
num_ai_chans = num_dat_files // 2 # Division by 2 due to real/imaginary
# Now start creating datasets and populating:
#Start with getting an h5 file
h5_file = h5py.File(self.h5_path)
#First create a measurement group
h5_meas_group = create_indexed_group(h5_file, 'Measurement')
#Set up some parameters that will be written as attributes to this Measurement group
global_parms = dict()
global_parms['data_type'] = 'trKPFM'
global_parms['translator'] = 'trKPFM'
write_simple_attrs(h5_meas_group, global_parms)
write_simple_attrs(h5_meas_group, parm_dict)
#Now start building the position and spectroscopic dimension containers
#There's only one spectroscpoic dimension and two position dimensions
#The excit_wfm only has the DC values without any information on cycles, time, etc.
#What we really need is to add the time component. For every DC step there are some time steps.
num_time_steps = (spectrogram_size-5) //excit_wfm.size //2 #Need to divide by 2 because it considers on and off field
#There should be three spectroscopic axes
#In order of fastest to slowest varying, we have
#time, voltage, field
time_vec = np.linspace(0, parm_dict['IO_time'], num_time_steps)
print('Num time steps: {}'.format(num_time_steps))
print('DC Vec size: {}'.format(excit_wfm.shape))
print('Spectrogram size: {}'.format(spectrogram_size))
field_vec = np.array([0,1])
spec_dims = [Dimension ('Time', 's', time_vec),Dimension('Field', 'Binary', field_vec),
Dimension('Bias', 'V', excit_wfm)]
pos_dims = [Dimension('Cols', 'nm', np.arrange(parm_dict['grid_num_rows']),
Dimension('Rows', 'um', np.arrange(parm_dict['grid_num_cols'])]
self.raw_datasets = list()
for chan_index in range(num_ai_chans):
chan_grp = create_indexed_group(h5_meas_group,'Channel')
if chan_index == 0:
write_simple_attrs(chan_grp,{'Harmonic': 1})
else:
write_simple_attrs(chan_grp,{'Harmonic': 2})
h5_raw = write_main_dataset(chan_grp, # parent HDF5 group
(num_pixels, spectrogram_size - 5),
# shape of Main dataset
'Raw_Data', # Name of main dataset
'Deflection', # Physical quantity contained in Main dataset
'V', # Units for the physical quantity
pos_dims, # Position dimensions
spec_dims, # Spectroscopic dimensions
dtype=np.complex64, # data type / precision
compression='gzip',
chunks=(1, spectrogram_size - 5),
main_dset_attrs={'quantity': 'Complex'})
#h5_refs = hdf.write(chan_grp, print_log=False)
#h5_raw = get_h5_obj_refs(['Raw_Data'], h5_refs)[0]
#link_h5_objects_as_attrs(h5_raw, get_h5_obj_refs(aux_ds_names, h5_refs))
self.raw_datasets.append(h5_raw)
self.raw_datasets.append(h5_raw)
# Now that the N channels have been made, populate them with the actual data....
self._read_data(parm_dict, parm_path, spectrogram_size)
h5_file.file.close()
#hdf.close()
return self.h5_path
def _read_data(self, parm_dict, parm_path, data_length):
"""
Reads raw data and populates the h5 datasets
Parameters
----------
parm_dict : Dictionary
dictionary containing parameters for this data
folder_path : string / unicode
Absolute path of folder containing the data
"""
# Determine number of pixels
num_pixels = parm_dict['grid_num_rows'] * parm_dict['grid_num_cols']
# The four files in TRKPFM are for real and imaginary parts for 1st, 2nd harmonic
# Create a list of [True,False,True,False] so files can be written to
# the appropraite channel
real_imag = np.zeros(shape=(len(self.file_list), 1))
real_imag[::2] = 1
real_cond = []
for entry in real_imag:
if entry > 0:
real_cond.append(True)
else:
real_cond.append(False)
# Scan through all the .dat files available
for ifile, file_path in enumerate(self.file_list):
f = open(file_path, 'rb')
results_p = self.read_file(data_length, f)
spectrogram_matrix = np.array(results_p[:])
b_axis = spectrogram_matrix.shape[2]
c_axis = spectrogram_matrix.shape[1]
# dall = np.transpose(spectrogram_matrix, (0, 2, 1)).reshape(num_pixels * c_axis, b_axis)
dall = np.transpose(spectrogram_matrix, (0, 2, 1)).reshape(-1, b_axis)
_, ia, ic = np.unique(dall, axis=0, return_index=True, return_inverse=True)
reprowind = np.setdiff1d(ic, ia)
if len(reprowind > 0):
dall[reprowind, :] = np.nan
# Write to the datasets
h5_main = self.raw_datasets[ifile]
if real_cond[ifile]:
print('Dall Size is: ', dall.shape)
# Do some error catching. In case the last pixel is absent, then just ignore it.
try:
h5_main[:, :] = dall.reshape(h5_main.shape) + 1j * 0
except ValueError:
h5_main[:-1, :] = dall.reshape(h5_main.shape[0] - 1, h5_main.shape[1]) + 1j * 0
else:
# Error catching. In case the last pixel is absent, then just ignore it.
try:
h5_main[:, :] += 0 + 1j * dall.reshape(h5_main.shape)
except ValueError:
h5_main[:-1, :] += 0 + 1j * dall.reshape(h5_main.shape[0] - 1, h5_main.shape[1])
h5_main.file.flush()
@staticmethod
def read_file(data_length, f):
start_point = 0
count = 0
count_vals = []
f.seek(start_point * 4, 0)
cont_cond = True
results_p = []
while cont_cond:
count_vals.append(count)
count += 1
data_vec = np.fromfile(f, dtype=np.float32, count=int(data_length))
data_vec1 = data_vec[5:int(data_length)]
if len(data_vec) > 1:
s1 = data_vec[3]
s2 = data_vec[4]
# print('Data_mat and s1,s2:', data_vec1.shape, s1, s2)
data_mat1 = data_vec1.reshape(int(s2), int(s1)).T
results_p.append(data_mat1)
else:
cont_cond = False
f.close()
return results_p
@staticmethod
def _read_parms(parm_path):
"""
Copies experimental parameters from the .mat file to a dictionary
Parameters
----------
parm_path : string / unicode
Absolute path of the parameters file
Returns
-------
parm_dict : dictionary
Dictionary containing all relevant parameters
excit_wfm : 1d numpy float array
Excitation waveform containing the full DC amplitude vector
"""
h5_f = loadmat(parm_path)
parm_dict = dict()
parm_dict['IO_samp_rate_[Hz]'] = np.uint32(h5_f['IO_rate'][0][0])
parm_dict['IO_time'] = np.float32(h5_f['IO_time'][0][0])
excit_wfm = np.float32(np.squeeze(h5_f['dc_amp_vec']))
parm_dict['grid_num_rows'] = np.int(h5_f['num_rows'][0][0])
parm_dict['grid_num_cols'] = np.int(h5_f['num_cols'][0][0])
return parm_dict, excit_wfm
Fixed Spectroscopic and Posistion Dims
# -*- coding: utf-8 -*-
"""
Created on Thursday July 27 2017
@author: Rama Vasudevan, Suhas Somnath, Chris R. Smith
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import sys
from os import path, remove, listdir # File Path formatting
import re
import numpy as np # For array operations
import h5py
from scipy.io import loadmat
from pyUSID.io.translator import Translator
from pyUSID.io.write_utils import Dimension
from pyUSID.io.hdf_utils import write_simple_attrs, write_main_dataset, \
create_indexed_group
if sys.version_info.major == 3:
unicode = str
class TRKPFMTranslator(Translator):
"""
Translates trKPFM datasets from .mat and .dat files to .h5
"""
def __init__(self, *args, **kwargs):
super(TRKPFMTranslator, self).__init__(*args, **kwargs)
self.raw_datasets = None
@staticmethod
def is_valid_file(data_path):
"""
Checks whether the provided file can be read by this translator
Parameters
----------
data_path : str
Path to folder or any data / parameter file within the folder
Returns
-------
obj : str
Path to file that will be accepted by the translate() function if
this translator is indeed capable of translating the provided file.
Otherwise, None will be returned
"""
def get_chan_ind(line):
match_obj = re.match(r'(.*)_ch(..).dat', line, re.M | re.I)
type_list = [str, int]
if match_obj:
return \
[type_caster(match_obj.group(ind)) for ind, type_caster in
zip(range(1, 1 + len(type_list)), type_list)][-1]
else:
return None
if path.isfile(data_path):
# Assume that the file is amongst all other data files
folder_path, _ = path.split(data_path)
else:
folder_path = data_path
# Now looking at the folder with all necessary files:
file_list = listdir(path=folder_path)
parm_file_name = None
raw_data_paths = list()
for item in file_list:
if item.endswith('parm.mat'):
parm_file_name = item
elif isinstance(get_chan_ind(item), int):
raw_data_paths.append(item)
# Both the parameter and data files MUST be found:
if parm_file_name is not None and len(raw_data_paths) > 0:
# Returning the path to the parameter file since this is what the translate() expects:
return path.join(folder_path, parm_file_name)
return None
def _parse_file_path(self, input_path):
folder_path, base_name = path.split(input_path)
base_name = base_name[:-8]
h5_path = path.join(folder_path, base_name + '.h5')
if path.exists(h5_path):
remove(h5_path)
self.h5_path = h5_path
# Until a better method is provided....
self.file_list = list()
for file in listdir(folder_path):
if '.dat' in file:
self.file_list.append(path.join(folder_path, file))
self.file_list = sorted(self.file_list)
@staticmethod
def _parse_spectrogram_size(file_handle):
"""
Parameters
----------
file_handle
Returns
-------
data_length: int, size of the spectrogram
count: int, number of pixels in dataset +1
""""""
"""
f = file_handle
cont_cond = True
count = 0
data_lengths = []
while cont_cond:
#print(count, f.tell())
count += 1
data_length = np.fromfile(f, dtype=np.float32, count=1)
if data_length > 0:
data_lengths.append(int(data_length))
f.seek(int(data_length - 1) * 4, 1)
else:
cont_cond = False
if len(np.unique(np.array(data_lengths))) > 1:
print("Unequal data lengths! Cannot continue")
else:
print("Equal data lengths")
return data_lengths[0], count
def translate(self, parm_path):
"""
The main function that translates the provided file into a .h5 file
Parameters
------------
parm_path : string / unicode
Absolute file path of the parameters .mat file.
Returns
----------
h5_path : string / unicode
Absolute path of the translated h5 file
"""
parm_path = path.abspath(parm_path)
parm_dict, excit_wfm = self._read_parms(parm_path)
excit_wfm = excit_wfm[1::2]
self._parse_file_path(parm_path)
num_dat_files = len(self.file_list)
f = open(self.file_list[0], 'rb')
spectrogram_size, count_vals = self._parse_spectrogram_size(f)
print("Excitation waveform shape: ", excit_wfm.shape)
print("spectrogram size:", spectrogram_size)
num_pixels = parm_dict['grid_num_rows'] * parm_dict['grid_num_cols']
print('Number of pixels: ', num_pixels)
print('Count Values: ', count_vals)
#if (num_pixels + 1) != count_vals:
# print("Data size does not match number of pixels expected. Cannot continue")
#Find how many channels we have to make
num_ai_chans = num_dat_files // 2 # Division by 2 due to real/imaginary
# Now start creating datasets and populating:
#Start with getting an h5 file
h5_file = h5py.File(self.h5_path)
#First create a measurement group
h5_meas_group = create_indexed_group(h5_file, 'Measurement')
#Set up some parameters that will be written as attributes to this Measurement group
global_parms = dict()
global_parms['data_type'] = 'trKPFM'
global_parms['translator'] = 'trKPFM'
write_simple_attrs(h5_meas_group, global_parms)
write_simple_attrs(h5_meas_group, parm_dict)
#Now start building the position and spectroscopic dimension containers
#There's only one spectroscpoic dimension and two position dimensions
#The excit_wfm only has the DC values without any information on cycles, time, etc.
#What we really need is to add the time component. For every DC step there are some time steps.
num_time_steps = (spectrogram_size-5) //excit_wfm.size //2 #Need to divide by 2 because it considers on and off field
#There should be three spectroscopic axes
#In order of fastest to slowest varying, we have
#time, voltage, field
time_vec = np.linspace(0, parm_dict['IO_time'], num_time_steps)
print('Num time steps: {}'.format(num_time_steps))
print('DC Vec size: {}'.format(excit_wfm.shape))
print('Spectrogram size: {}'.format(spectrogram_size))
field_vec = np.array([0,1])
spec_dims = [Dimension ('Time', 's', time_vec),Dimension('Field', 'Binary', field_vec),
Dimension('Bias', 'V', excit_wfm)]
pos_dims = [Dimension('Cols', 'nm', np.arrange(parm_dict['grid_num_rows'])),
Dimension('Rows', 'um', np.arrange(parm_dict['grid_num_cols']))]
self.raw_datasets = list()
for chan_index in range(num_ai_chans):
chan_grp = create_indexed_group(h5_meas_group,'Channel')
if chan_index == 0:
write_simple_attrs(chan_grp,{'Harmonic': 1})
else:
write_simple_attrs(chan_grp,{'Harmonic': 2})
h5_raw = write_main_dataset(chan_grp, # parent HDF5 group
(num_pixels, spectrogram_size - 5),
# shape of Main dataset
'Raw_Data', # Name of main dataset
'Deflection', # Physical quantity contained in Main dataset
'V', # Units for the physical quantity
pos_dims, # Position dimensions
spec_dims, # Spectroscopic dimensions
dtype=np.complex64, # data type / precision
compression='gzip',
chunks=(1, spectrogram_size - 5),
main_dset_attrs={'quantity': 'Complex'})
#h5_refs = hdf.write(chan_grp, print_log=False)
#h5_raw = get_h5_obj_refs(['Raw_Data'], h5_refs)[0]
#link_h5_objects_as_attrs(h5_raw, get_h5_obj_refs(aux_ds_names, h5_refs))
self.raw_datasets.append(h5_raw)
self.raw_datasets.append(h5_raw)
# Now that the N channels have been made, populate them with the actual data....
self._read_data(parm_dict, parm_path, spectrogram_size)
h5_file.file.close()
#hdf.close()
return self.h5_path
def _read_data(self, parm_dict, parm_path, data_length):
"""
Reads raw data and populates the h5 datasets
Parameters
----------
parm_dict : Dictionary
dictionary containing parameters for this data
folder_path : string / unicode
Absolute path of folder containing the data
"""
# Determine number of pixels
num_pixels = parm_dict['grid_num_rows'] * parm_dict['grid_num_cols']
# The four files in TRKPFM are for real and imaginary parts for 1st, 2nd harmonic
# Create a list of [True,False,True,False] so files can be written to
# the appropraite channel
real_imag = np.zeros(shape=(len(self.file_list), 1))
real_imag[::2] = 1
real_cond = []
for entry in real_imag:
if entry > 0:
real_cond.append(True)
else:
real_cond.append(False)
# Scan through all the .dat files available
for ifile, file_path in enumerate(self.file_list):
f = open(file_path, 'rb')
results_p = self.read_file(data_length, f)
spectrogram_matrix = np.array(results_p[:])
b_axis = spectrogram_matrix.shape[2]
c_axis = spectrogram_matrix.shape[1]
# dall = np.transpose(spectrogram_matrix, (0, 2, 1)).reshape(num_pixels * c_axis, b_axis)
dall = np.transpose(spectrogram_matrix, (0, 2, 1)).reshape(-1, b_axis)
_, ia, ic = np.unique(dall, axis=0, return_index=True, return_inverse=True)
reprowind = np.setdiff1d(ic, ia)
if len(reprowind > 0):
dall[reprowind, :] = np.nan
# Write to the datasets
h5_main = self.raw_datasets[ifile]
if real_cond[ifile]:
print('Dall Size is: ', dall.shape)
# Do some error catching. In case the last pixel is absent, then just ignore it.
try:
h5_main[:, :] = dall.reshape(h5_main.shape) + 1j * 0
except ValueError:
h5_main[:-1, :] = dall.reshape(h5_main.shape[0] - 1, h5_main.shape[1]) + 1j * 0
else:
# Error catching. In case the last pixel is absent, then just ignore it.
try:
h5_main[:, :] += 0 + 1j * dall.reshape(h5_main.shape)
except ValueError:
h5_main[:-1, :] += 0 + 1j * dall.reshape(h5_main.shape[0] - 1, h5_main.shape[1])
h5_main.file.flush()
@staticmethod
def read_file(data_length, f):
start_point = 0
count = 0
count_vals = []
f.seek(start_point * 4, 0)
cont_cond = True
results_p = []
while cont_cond:
count_vals.append(count)
count += 1
data_vec = np.fromfile(f, dtype=np.float32, count=int(data_length))
data_vec1 = data_vec[5:int(data_length)]
if len(data_vec) > 1:
s1 = data_vec[3]
s2 = data_vec[4]
# print('Data_mat and s1,s2:', data_vec1.shape, s1, s2)
data_mat1 = data_vec1.reshape(int(s2), int(s1)).T
results_p.append(data_mat1)
else:
cont_cond = False
f.close()
return results_p
@staticmethod
def _read_parms(parm_path):
"""
Copies experimental parameters from the .mat file to a dictionary
Parameters
----------
parm_path : string / unicode
Absolute path of the parameters file
Returns
-------
parm_dict : dictionary
Dictionary containing all relevant parameters
excit_wfm : 1d numpy float array
Excitation waveform containing the full DC amplitude vector
"""
h5_f = loadmat(parm_path)
parm_dict = dict()
parm_dict['IO_samp_rate_[Hz]'] = np.uint32(h5_f['IO_rate'][0][0])
parm_dict['IO_time'] = np.float32(h5_f['IO_time'][0][0])
excit_wfm = np.float32(np.squeeze(h5_f['dc_amp_vec']))
parm_dict['grid_num_rows'] = np.int(h5_f['num_rows'][0][0])
parm_dict['grid_num_cols'] = np.int(h5_f['num_cols'][0][0])
return parm_dict, excit_wfm
|
import numpy as np
from .small_classes import Strings, Numbers
from .posyarray import PosyArray
from .varkey import VarKey
from .small_scripts import diff, mono_approx
from .small_scripts import latex_num
from .small_scripts import sort_and_simplify
from .small_scripts import locate_vars
from .small_scripts import invalid_types_for_oper
from .small_scripts import mag, unitstr
from . import units as ureg
from . import DimensionalityError
Quantity = ureg.Quantity
Numbers += (Quantity,)
def vkSortBy(exp_item):
"Returns description from exps.items() elements, for sorting by exponent."
return list(exp_item[0].descr.items())
class Signomial(object):
"""A representation of a signomial.
Parameters
----------
exps: tuple of dicts
Exponent dicts for each monomial term
cs: tuple
Coefficient values for each monomial term
varlocsandkeys: dict
mapping from variable name to list of indices of monomial terms
that variable appears in
require_positive: bool
If True and signomials not enabled, c <= 0 will raise ValueError
Returns
-------
Signomial
Posynomial (if the input has only positive cs)
Monomial (if the input has one term and only positive cs)
"""
def __init__(self, exps=None, cs=1, varlocsandkeys=None,
require_positive=True, **descr):
units = None
if isinstance(exps, Numbers):
cs = exps
exps = {}
if (isinstance(cs, Numbers)
and (exps is None or isinstance(exps, Strings + (VarKey, dict)))):
# building a Monomial
if isinstance(exps, VarKey):
exp = {exps: 1}
units = exps.units
elif exps is None or isinstance(exps, Strings):
exp = {VarKey(exps, **descr): 1}
descr = list(exp)[0].descr
units = descr["units"] if "units" in descr else None
elif isinstance(exps, dict):
exp = dict(exps)
for key in exps:
if isinstance(key, Strings):
exp[VarKey(key)] = exp.pop(key)
else:
raise TypeError("could not make Monomial with %s" % type(exps))
if isinstance(units, Quantity):
cs = cs * units
cs = [cs]
exps = [exp]
elif isinstance(exps, Signomial):
cs = exps.cs
varlocs = exps.varlocs
exps = exps.exps
else:
# test for presence of length and identical lengths
try:
assert len(cs) == len(exps)
exps_ = list(range(len(exps)))
if not isinstance(cs[0], Quantity):
try:
cs = np.array(cs, dtype='float')
except ValueError:
raise ValueError("cannot add dimensioned and"
" dimensionless monomials together.")
else:
units = cs[0]/cs[0].magnitude
if units.dimensionless:
cs = [c * ureg.dimensionless for c in cs]
cs = [c.to(units).magnitude for c in cs] * units
if not all([c.dimensionality == units.dimensionality
for c in cs]):
raise ValueError("cannot add monomials of"
" different units together")
for i in range(len(exps)):
exps_[i] = dict(exps[i])
for key in exps_[i]:
if isinstance(key, Strings+(Monomial,)):
exps_[i][VarKey(key)] = exps_[i].pop(key)
exps = exps_
except AssertionError:
raise TypeError("cs and exps must have the same length.")
exps, cs = sort_and_simplify(exps, cs)
if isinstance(cs, Quantity):
any_negative = any((c.magnitude <= 0 for c in cs))
else:
any_negative = any((c <= 0 for c in cs))
if any_negative:
from . import SIGNOMIALS_ENABLED
if require_positive and not SIGNOMIALS_ENABLED:
raise ValueError("each c must be positive.")
else:
self.__class__ = Posynomial
if isinstance(cs[0], Quantity):
units = cs[0]/cs[0].magnitude
elif "units" in descr:
units = descr["units"]
if isinstance(units, Quantity):
cs = cs*units
else:
units = None
self.cs = cs
self.exps = exps
self.units = units
if len(exps) == 1:
if self.__class__ is Posynomial:
self.__class__ = Monomial
self.exp = exps[0]
self.c = cs[0]
if varlocsandkeys is None:
varlocsandkeys = locate_vars(exps)
self.varlocs, self.varkeys = varlocsandkeys
self._hashvalue = hash(tuple(zip(self.exps, tuple(self.cs))))
@property
def value(self):
values = {vk: vk.descr["value"] for vk in self.varlocs.keys()
if "value" in vk.descr}
p = self.sub(values)
if isinstance(p, Monomial):
if not p.exp:
return p.c
return p
def to(self, arg):
return Signomial(self.exps, self.cs.to(arg).tolist())
def diff(self, var):
if var in self.varkeys:
var = self.varkeys[var]
elif isinstance(var, Monomial):
vks = list(var.exp)
if len(vks) == 1:
var = vks[0]
exps, cs = diff(self, var)
return Signomial(exps, cs, require_positive=False)
def mono_approximation(self, x0):
if isinstance(self, Monomial):
raise TypeError("making a Monomial approximation of %s"
" is unnecessary; it's already a Monomial."
"" % str(self))
else:
c, exp = mono_approx(self, getsubs(self.varkeys, self.varlocs, x0))
return Monomial(exp, c)
def sub(self, substitutions, val=None, require_positive=True):
"""Make substitutions and return a new object, without modifying self.
Parameters
----------
substitutions: dict
Mapping from variables to substituted values or monomials
val: number
Optional way to substitute singlet variables
require_positive: bool
If True, require positive cs for returned Signomial.
"""
varlocs, exps, cs, subs = substitution(self.varlocs, self.varkeys,
self.exps, self.cs,
substitutions, val)
return Signomial(exps, cs, units=self.units,
require_positive=require_positive)
def subcmag(self, substitutions, val=None):
varlocs, exps, cs, subs = substitution(self.varlocs, self.varkeys,
self.exps, mag(self.cs),
substitutions, val)
if any(exps):
raise ValueError("could not substitute for all variables.")
return mag(cs).sum()
def prod(self):
return self
def sum(self):
return self
# hashing, immutability, Signomial inequality
def __hash__(self):
return self._hashvalue
def __ne__(self, other):
if isinstance(other, Signomial):
return not (self.exps == other.exps and self.cs == other.cs)
else:
return False
# constraint generation
def __eq__(self, other):
# if at least one is a monomial, return a constraint
mons = Numbers+(Monomial,)
if isinstance(other, mons) and isinstance(self, mons):
return MonoEQConstraint(self, other)
elif isinstance(other, Signomial) and isinstance(self, Signomial):
if self.exps == other.exps:
if isinstance(self.cs, Quantity):
return all(self.cs.magnitude <= other.cs)
else:
return all(self.cs <= other.cs)
else:
return False
else:
return False
def __le__(self, other):
if isinstance(other, PosyArray):
return NotImplemented
else:
return Constraint(self, other)
def __ge__(self, other):
if isinstance(other, PosyArray):
return NotImplemented
else:
return Constraint(other, self)
def __lt__(self, other):
invalid_types_for_oper("<", self, other)
def __gt__(self, other):
invalid_types_for_oper(">", self, other)
def __str__(self, mult_symbol='*'):
mstrs = []
for c, exp in zip(self.cs, self.exps):
varstrs = ['%s**%.2g' % (var, x) if x != 1 else "%s" % var
for (var, x) in sorted(exp.items(), key=vkSortBy) if x != 0]
c = mag(c)
cstr = "%.2g" % c
if cstr == "-1":
mstrs.append("-" + mult_symbol.join(varstrs))
else:
cstr = [cstr] if cstr != "1" or not varstrs else []
mstrs.append(mult_symbol.join(cstr + varstrs))
return " + ".join(sorted(mstrs)) + unitstr(self.units, ", units='%s'")
def descr(self, descr):
self.descr = descr
return self
def __repr__(self):
return "gpkit.%s(%s)" % (self.__class__.__name__, str(self))
def _latex(self, unused=None):
"For pretty printing with Sympy"
mstrs = []
for c, exp in zip(self.cs, self.exps):
pos_vars, neg_vars = [], []
for var, x in sorted(exp.items(), key=vkSortBy):
if x > 0:
pos_vars.append((var._latex(), x))
elif x < 0:
neg_vars.append((var._latex(), x))
pvarstrs = ['%s^{%.2g}' % (varl, x) if "%.2g" % x != "1" else varl
for (varl, x) in pos_vars]
nvarstrs = ['%s^{%.2g}' % (varl, -x)
if "%.2g" % -x != "1" else varl
for (varl, x) in neg_vars]
pvarstr = ' '.join(pvarstrs)
nvarstr = ' '.join(nvarstrs)
c = mag(c)
cstr = "%.2g" % c
if pos_vars and (cstr == "1" or cstr == "-1"):
cstr = cstr[:-1]
else:
cstr = latex_num(c)
if not pos_vars and not neg_vars:
mstrs.append("%s" % cstr)
elif pos_vars and not neg_vars:
mstrs.append("%s%s" % (cstr, pvarstr))
elif neg_vars and not pos_vars:
mstrs.append("\\frac{%s}{%s}" % (cstr, nvarstr))
elif pos_vars and neg_vars:
mstrs.append("%s\\frac{%s}{%s}" % (cstr, pvarstr, nvarstr))
units = unitstr(self.units, "\mathrm{\\left[ %s \\right]}", "L~")
units_tf = units.replace("frac", "tfrac").replace("\\cdot", "\\cdot ")
return " + ".join(sorted(mstrs)) + units_tf
# posynomial arithmetic
def __add__(self, other):
if isinstance(other, Numbers):
if other == 0:
return Signomial(self.exps, self.cs,
(self.varlocs, self.varkeys))
else:
return Signomial(self.exps + ({},),
self.cs.tolist() + [other])
elif isinstance(other, Signomial):
return Signomial(self.exps + other.exps,
self.cs.tolist() + other.cs.tolist())
elif isinstance(other, PosyArray):
return np.array(self)+other
else:
return NotImplemented
def __radd__(self, other):
return self + other
def __mul__(self, other):
if isinstance(other, Numbers):
if not other:
# assume other is multiplicative zero
return other
return Signomial(self.exps, other*self.cs,
(self.varlocs, self.varkeys))
elif isinstance(other, Signomial):
C = np.outer(self.cs, other.cs)
if isinstance(self.cs, Quantity) or isinstance(other.cs, Quantity):
if not isinstance(self.cs, Quantity):
sunits = ureg.dimensionless
else:
sunits = self.cs[0]/self.cs[0].magnitude
if not isinstance(other.cs, Quantity):
ounits = ureg.dimensionless
else:
ounits = other.cs[0]/other.cs[0].magnitude
# hack fix for pint not working with np.outer
C = C * sunits * ounits
Exps = np.empty((len(self.exps), len(other.exps)), dtype="object")
for i, exp_s in enumerate(self.exps):
for j, exp_o in enumerate(other.exps):
Exps[i, j] = exp_s + exp_o
return Signomial(Exps.flatten(), C.flatten())
elif isinstance(other, PosyArray):
return np.array(self)*other
else:
return NotImplemented
def __rmul__(self, other):
return self * other
def __div__(self, other):
if isinstance(other, Numbers):
return Signomial(self.exps, self.cs/other)
elif isinstance(other, Monomial):
exps = [exp - other.exp for exp in self.exps]
return Signomial(exps, self.cs/other.c)
elif isinstance(other, PosyArray):
return np.array(self)/other
else:
return NotImplemented
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, x):
if isinstance(x, int):
if x >= 0:
p = Monomial({}, 1)
while x > 0:
p *= self
x -= 1
return p
else:
raise ValueError("Signomial are only closed under"
" nonnegative integer exponents.")
else:
return NotImplemented
def __neg__(self):
from . import SIGNOMIALS_ENABLED
if not SIGNOMIALS_ENABLED:
return NotImplemented
else:
return -1*self
def __sub__(self, other):
from . import SIGNOMIALS_ENABLED
if not SIGNOMIALS_ENABLED:
return NotImplemented
else:
return self + -other
def __rsub__(self, other):
from . import SIGNOMIALS_ENABLED
if not SIGNOMIALS_ENABLED:
return NotImplemented
else:
return other + -self
def __float__(self):
if len(self.exps) == 1:
if not self.exps[0]:
return mag(self.c)
raise AttributeError("float() can only be called"
" on monomials with no variable terms")
class Posynomial(Signomial):
pass
class Monomial(Posynomial):
'''
TODO: Add docstring
'''
def __rdiv__(self, other):
if isinstance(other, Numbers+(Posynomial,)):
return other * self**-1
else:
return NotImplemented
def __rtruediv__(self, other):
return self.__rdiv__(other)
def __pow__(self, other):
if isinstance(other, Numbers):
return Monomial(self.exp*other, self.c**other)
else:
return NotImplemented
class Constraint(Posynomial):
'''
TODO: Add docstring
'''
def _set_operator(self, p1, p2):
if self.left is p1:
self.oper_s = " <= "
self.oper_l = " \\leq "
else:
self.oper_s = " >= "
self.oper_l = " \\geq "
def __str__(self):
return str(self.left) + self.oper_s + str(self.right)
def __repr__(self):
return repr(self.left) + self.oper_s + repr(self.right)
def _latex(self, unused=None):
return self.left._latex() + self.oper_l + self.right._latex()
def __init__(self, p1, p2):
p1 = Signomial(p1)
p2 = Signomial(p2)
from . import SIGNOMIALS_ENABLED
if SIGNOMIALS_ENABLED and not isinstance(p2, Monomial):
if p1.units:
p = (p1 - p2)/p1.units + 1.0
else:
p = (p1 - p2) + 1.0
else:
p = p1 / p2
if isinstance(p.cs, Quantity):
try:
p = p.to('dimensionless')
except DimensionalityError:
raise ValueError("constraints must have the same units"
" on both sides: '%s' and '%s' can not"
" be converted into each other."
"" % (p1.units.units, p2.units.units))
p1.units = None if all(p1.exps) else p1.units
p2.units = None if all(p2.exps) else p2.units
for i, exp in enumerate(p.exps):
if not exp:
if p.cs[i] < 1:
if SIGNOMIALS_ENABLED:
const = p.cs[i]
p -= const
p /= (1-const)
else:
coeff = float(1 - p.cs[i])
p.cs = np.hstack((p.cs[:i], p.cs[i+1:]))
p.exps = p.exps[:i] + p.exps[i+1:]
p = p/coeff
elif p.cs[i] > 1 and not SIGNOMIALS_ENABLED:
raise ValueError("infeasible constraint:"
"constant term too large.")
self.cs = p.cs
self.exps = p.exps
self.varlocs = p.varlocs
if len(p1.exps) == len(p2.exps):
if len(p1.exps[0]) <= len(p2.exps[0]):
self.left, self.right = p1, p2
else:
self.left, self.right = p2, p1
elif len(p1.exps) < len(p2.exps):
self.left, self.right = p1, p2
else:
self.left, self.right = p2, p1
self._set_operator(p1, p2)
class MonoEQConstraint(Constraint):
'''
TODO: Add docstring
'''
def _set_operator(self, p1, p2):
self.oper_l = " = "
self.oper_s = " == "
self.leq = Constraint(p2, p1)
self.geq = Constraint(p1, p2)
def __nonzero__(self):
# a constraint not guaranteed to be satisfied
# evaluates as "False"
return bool(mag(self.cs[0]) == 1.0 and self.exps[0] == {})
def __bool__(self):
return self.__nonzero__()
from .substitution import substitution, getsubs
replace nomials.sub docstring with master version to enable auto-merge
import numpy as np
from .small_classes import Strings, Numbers
from .posyarray import PosyArray
from .varkey import VarKey
from .small_scripts import diff, mono_approx
from .small_scripts import latex_num
from .small_scripts import sort_and_simplify
from .small_scripts import locate_vars
from .small_scripts import invalid_types_for_oper
from .small_scripts import mag, unitstr
from . import units as ureg
from . import DimensionalityError
Quantity = ureg.Quantity
Numbers += (Quantity,)
def vkSortBy(exp_item):
"Returns description from exps.items() elements, for sorting by exponent."
return list(exp_item[0].descr.items())
class Signomial(object):
"""A representation of a signomial.
Parameters
----------
exps: tuple of dicts
Exponent dicts for each monomial term
cs: tuple
Coefficient values for each monomial term
varlocsandkeys: dict
mapping from variable name to list of indices of monomial terms
that variable appears in
require_positive: bool
If True and signomials not enabled, c <= 0 will raise ValueError
Returns
-------
Signomial
Posynomial (if the input has only positive cs)
Monomial (if the input has one term and only positive cs)
"""
def __init__(self, exps=None, cs=1, varlocsandkeys=None,
require_positive=True, **descr):
units = None
if isinstance(exps, Numbers):
cs = exps
exps = {}
if (isinstance(cs, Numbers)
and (exps is None or isinstance(exps, Strings + (VarKey, dict)))):
# building a Monomial
if isinstance(exps, VarKey):
exp = {exps: 1}
units = exps.units
elif exps is None or isinstance(exps, Strings):
exp = {VarKey(exps, **descr): 1}
descr = list(exp)[0].descr
units = descr["units"] if "units" in descr else None
elif isinstance(exps, dict):
exp = dict(exps)
for key in exps:
if isinstance(key, Strings):
exp[VarKey(key)] = exp.pop(key)
else:
raise TypeError("could not make Monomial with %s" % type(exps))
if isinstance(units, Quantity):
cs = cs * units
cs = [cs]
exps = [exp]
elif isinstance(exps, Signomial):
cs = exps.cs
varlocs = exps.varlocs
exps = exps.exps
else:
# test for presence of length and identical lengths
try:
assert len(cs) == len(exps)
exps_ = list(range(len(exps)))
if not isinstance(cs[0], Quantity):
try:
cs = np.array(cs, dtype='float')
except ValueError:
raise ValueError("cannot add dimensioned and"
" dimensionless monomials together.")
else:
units = cs[0]/cs[0].magnitude
if units.dimensionless:
cs = [c * ureg.dimensionless for c in cs]
cs = [c.to(units).magnitude for c in cs] * units
if not all([c.dimensionality == units.dimensionality
for c in cs]):
raise ValueError("cannot add monomials of"
" different units together")
for i in range(len(exps)):
exps_[i] = dict(exps[i])
for key in exps_[i]:
if isinstance(key, Strings+(Monomial,)):
exps_[i][VarKey(key)] = exps_[i].pop(key)
exps = exps_
except AssertionError:
raise TypeError("cs and exps must have the same length.")
exps, cs = sort_and_simplify(exps, cs)
if isinstance(cs, Quantity):
any_negative = any((c.magnitude <= 0 for c in cs))
else:
any_negative = any((c <= 0 for c in cs))
if any_negative:
from . import SIGNOMIALS_ENABLED
if require_positive and not SIGNOMIALS_ENABLED:
raise ValueError("each c must be positive.")
else:
self.__class__ = Posynomial
if isinstance(cs[0], Quantity):
units = cs[0]/cs[0].magnitude
elif "units" in descr:
units = descr["units"]
if isinstance(units, Quantity):
cs = cs*units
else:
units = None
self.cs = cs
self.exps = exps
self.units = units
if len(exps) == 1:
if self.__class__ is Posynomial:
self.__class__ = Monomial
self.exp = exps[0]
self.c = cs[0]
if varlocsandkeys is None:
varlocsandkeys = locate_vars(exps)
self.varlocs, self.varkeys = varlocsandkeys
self._hashvalue = hash(tuple(zip(self.exps, tuple(self.cs))))
@property
def value(self):
values = {vk: vk.descr["value"] for vk in self.varlocs.keys()
if "value" in vk.descr}
p = self.sub(values)
if isinstance(p, Monomial):
if not p.exp:
return p.c
return p
def to(self, arg):
return Signomial(self.exps, self.cs.to(arg).tolist())
def diff(self, var):
if var in self.varkeys:
var = self.varkeys[var]
elif isinstance(var, Monomial):
vks = list(var.exp)
if len(vks) == 1:
var = vks[0]
exps, cs = diff(self, var)
return Signomial(exps, cs, require_positive=False)
def mono_approximation(self, x0):
if isinstance(self, Monomial):
raise TypeError("making a Monomial approximation of %s"
" is unnecessary; it's already a Monomial."
"" % str(self))
else:
c, exp = mono_approx(self, getsubs(self.varkeys, self.varlocs, x0))
return Monomial(exp, c)
def sub(self, substitutions, val=None, require_positive=True):
"""Returns a nomial with substitued values.
Usage
-----
3 == (x**2 + y).sub({'x': 1, y: 2})
3 == (x).gp.sub(x, 3)
Arguments
---------
substitutions : dict or key
Either a dictionary whose keys are strings, Variables, or VarKeys,
and whose values are numbers, or a string, Variable or Varkey.
val : number (optional)
If the substitutions entry is a single key, val holds the value
require_positive : boolean (optional, default is True)
Controls whether the returned value can be a signomial.
Returns
-------
Returns substituted nomial.
"""
varlocs, exps, cs, subs = substitution(self.varlocs, self.varkeys,
self.exps, self.cs,
substitutions, val)
return Signomial(exps, cs, units=self.units,
require_positive=require_positive)
def subcmag(self, substitutions, val=None):
varlocs, exps, cs, subs = substitution(self.varlocs, self.varkeys,
self.exps, mag(self.cs),
substitutions, val)
if any(exps):
raise ValueError("could not substitute for all variables.")
return mag(cs).sum()
def prod(self):
return self
def sum(self):
return self
# hashing, immutability, Signomial inequality
def __hash__(self):
return self._hashvalue
def __ne__(self, other):
if isinstance(other, Signomial):
return not (self.exps == other.exps and self.cs == other.cs)
else:
return False
# constraint generation
def __eq__(self, other):
# if at least one is a monomial, return a constraint
mons = Numbers+(Monomial,)
if isinstance(other, mons) and isinstance(self, mons):
return MonoEQConstraint(self, other)
elif isinstance(other, Signomial) and isinstance(self, Signomial):
if self.exps == other.exps:
if isinstance(self.cs, Quantity):
return all(self.cs.magnitude <= other.cs)
else:
return all(self.cs <= other.cs)
else:
return False
else:
return False
def __le__(self, other):
if isinstance(other, PosyArray):
return NotImplemented
else:
return Constraint(self, other)
def __ge__(self, other):
if isinstance(other, PosyArray):
return NotImplemented
else:
return Constraint(other, self)
def __lt__(self, other):
invalid_types_for_oper("<", self, other)
def __gt__(self, other):
invalid_types_for_oper(">", self, other)
def __str__(self, mult_symbol='*'):
mstrs = []
for c, exp in zip(self.cs, self.exps):
varstrs = ['%s**%.2g' % (var, x) if x != 1 else "%s" % var
for (var, x) in sorted(exp.items(), key=vkSortBy) if x != 0]
c = mag(c)
cstr = "%.2g" % c
if cstr == "-1":
mstrs.append("-" + mult_symbol.join(varstrs))
else:
cstr = [cstr] if cstr != "1" or not varstrs else []
mstrs.append(mult_symbol.join(cstr + varstrs))
return " + ".join(sorted(mstrs)) + unitstr(self.units, ", units='%s'")
def descr(self, descr):
self.descr = descr
return self
def __repr__(self):
return "gpkit.%s(%s)" % (self.__class__.__name__, str(self))
def _latex(self, unused=None):
"For pretty printing with Sympy"
mstrs = []
for c, exp in zip(self.cs, self.exps):
pos_vars, neg_vars = [], []
for var, x in sorted(exp.items(), key=vkSortBy):
if x > 0:
pos_vars.append((var._latex(), x))
elif x < 0:
neg_vars.append((var._latex(), x))
pvarstrs = ['%s^{%.2g}' % (varl, x) if "%.2g" % x != "1" else varl
for (varl, x) in pos_vars]
nvarstrs = ['%s^{%.2g}' % (varl, -x)
if "%.2g" % -x != "1" else varl
for (varl, x) in neg_vars]
pvarstr = ' '.join(pvarstrs)
nvarstr = ' '.join(nvarstrs)
c = mag(c)
cstr = "%.2g" % c
if pos_vars and (cstr == "1" or cstr == "-1"):
cstr = cstr[:-1]
else:
cstr = latex_num(c)
if not pos_vars and not neg_vars:
mstrs.append("%s" % cstr)
elif pos_vars and not neg_vars:
mstrs.append("%s%s" % (cstr, pvarstr))
elif neg_vars and not pos_vars:
mstrs.append("\\frac{%s}{%s}" % (cstr, nvarstr))
elif pos_vars and neg_vars:
mstrs.append("%s\\frac{%s}{%s}" % (cstr, pvarstr, nvarstr))
units = unitstr(self.units, "\mathrm{\\left[ %s \\right]}", "L~")
units_tf = units.replace("frac", "tfrac").replace("\\cdot", "\\cdot ")
return " + ".join(sorted(mstrs)) + units_tf
# posynomial arithmetic
def __add__(self, other):
if isinstance(other, Numbers):
if other == 0:
return Signomial(self.exps, self.cs,
(self.varlocs, self.varkeys))
else:
return Signomial(self.exps + ({},),
self.cs.tolist() + [other])
elif isinstance(other, Signomial):
return Signomial(self.exps + other.exps,
self.cs.tolist() + other.cs.tolist())
elif isinstance(other, PosyArray):
return np.array(self)+other
else:
return NotImplemented
def __radd__(self, other):
return self + other
def __mul__(self, other):
if isinstance(other, Numbers):
if not other:
# assume other is multiplicative zero
return other
return Signomial(self.exps, other*self.cs,
(self.varlocs, self.varkeys))
elif isinstance(other, Signomial):
C = np.outer(self.cs, other.cs)
if isinstance(self.cs, Quantity) or isinstance(other.cs, Quantity):
if not isinstance(self.cs, Quantity):
sunits = ureg.dimensionless
else:
sunits = self.cs[0]/self.cs[0].magnitude
if not isinstance(other.cs, Quantity):
ounits = ureg.dimensionless
else:
ounits = other.cs[0]/other.cs[0].magnitude
# hack fix for pint not working with np.outer
C = C * sunits * ounits
Exps = np.empty((len(self.exps), len(other.exps)), dtype="object")
for i, exp_s in enumerate(self.exps):
for j, exp_o in enumerate(other.exps):
Exps[i, j] = exp_s + exp_o
return Signomial(Exps.flatten(), C.flatten())
elif isinstance(other, PosyArray):
return np.array(self)*other
else:
return NotImplemented
def __rmul__(self, other):
return self * other
def __div__(self, other):
if isinstance(other, Numbers):
return Signomial(self.exps, self.cs/other)
elif isinstance(other, Monomial):
exps = [exp - other.exp for exp in self.exps]
return Signomial(exps, self.cs/other.c)
elif isinstance(other, PosyArray):
return np.array(self)/other
else:
return NotImplemented
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, x):
if isinstance(x, int):
if x >= 0:
p = Monomial({}, 1)
while x > 0:
p *= self
x -= 1
return p
else:
raise ValueError("Signomial are only closed under"
" nonnegative integer exponents.")
else:
return NotImplemented
def __neg__(self):
from . import SIGNOMIALS_ENABLED
if not SIGNOMIALS_ENABLED:
return NotImplemented
else:
return -1*self
def __sub__(self, other):
from . import SIGNOMIALS_ENABLED
if not SIGNOMIALS_ENABLED:
return NotImplemented
else:
return self + -other
def __rsub__(self, other):
from . import SIGNOMIALS_ENABLED
if not SIGNOMIALS_ENABLED:
return NotImplemented
else:
return other + -self
def __float__(self):
if len(self.exps) == 1:
if not self.exps[0]:
return mag(self.c)
raise AttributeError("float() can only be called"
" on monomials with no variable terms")
class Posynomial(Signomial):
pass
class Monomial(Posynomial):
'''
TODO: Add docstring
'''
def __rdiv__(self, other):
if isinstance(other, Numbers+(Posynomial,)):
return other * self**-1
else:
return NotImplemented
def __rtruediv__(self, other):
return self.__rdiv__(other)
def __pow__(self, other):
if isinstance(other, Numbers):
return Monomial(self.exp*other, self.c**other)
else:
return NotImplemented
class Constraint(Posynomial):
'''
TODO: Add docstring
'''
def _set_operator(self, p1, p2):
if self.left is p1:
self.oper_s = " <= "
self.oper_l = " \\leq "
else:
self.oper_s = " >= "
self.oper_l = " \\geq "
def __str__(self):
return str(self.left) + self.oper_s + str(self.right)
def __repr__(self):
return repr(self.left) + self.oper_s + repr(self.right)
def _latex(self, unused=None):
return self.left._latex() + self.oper_l + self.right._latex()
def __init__(self, p1, p2):
p1 = Signomial(p1)
p2 = Signomial(p2)
from . import SIGNOMIALS_ENABLED
if SIGNOMIALS_ENABLED and not isinstance(p2, Monomial):
if p1.units:
p = (p1 - p2)/p1.units + 1.0
else:
p = (p1 - p2) + 1.0
else:
p = p1 / p2
if isinstance(p.cs, Quantity):
try:
p = p.to('dimensionless')
except DimensionalityError:
raise ValueError("constraints must have the same units"
" on both sides: '%s' and '%s' can not"
" be converted into each other."
"" % (p1.units.units, p2.units.units))
p1.units = None if all(p1.exps) else p1.units
p2.units = None if all(p2.exps) else p2.units
for i, exp in enumerate(p.exps):
if not exp:
if p.cs[i] < 1:
if SIGNOMIALS_ENABLED:
const = p.cs[i]
p -= const
p /= (1-const)
else:
coeff = float(1 - p.cs[i])
p.cs = np.hstack((p.cs[:i], p.cs[i+1:]))
p.exps = p.exps[:i] + p.exps[i+1:]
p = p/coeff
elif p.cs[i] > 1 and not SIGNOMIALS_ENABLED:
raise ValueError("infeasible constraint:"
"constant term too large.")
self.cs = p.cs
self.exps = p.exps
self.varlocs = p.varlocs
if len(p1.exps) == len(p2.exps):
if len(p1.exps[0]) <= len(p2.exps[0]):
self.left, self.right = p1, p2
else:
self.left, self.right = p2, p1
elif len(p1.exps) < len(p2.exps):
self.left, self.right = p1, p2
else:
self.left, self.right = p2, p1
self._set_operator(p1, p2)
class MonoEQConstraint(Constraint):
'''
TODO: Add docstring
'''
def _set_operator(self, p1, p2):
self.oper_l = " = "
self.oper_s = " == "
self.leq = Constraint(p2, p1)
self.geq = Constraint(p1, p2)
def __nonzero__(self):
# a constraint not guaranteed to be satisfied
# evaluates as "False"
return bool(mag(self.cs[0]) == 1.0 and self.exps[0] == {})
def __bool__(self):
return self.__nonzero__()
from .substitution import substitution, getsubs
|
#!/usr/bin/python
## Binary Analysis Tool
## Copyright 2009-2015 Armijn Hemel for Tjaldur Software Governance Solutions
## Licensed under Apache 2.0, see LICENSE file for details
'''
This script tries to analyse binary blobs, using a "brute force" approach
and pretty print the analysis in a simple XML format.
The script has a few separate scanning phases:
1. marker scanning phase, to search for specific markers (compression, file systems,
media formats), if available. This information is later used to filter scans and to
carve files.
2. prerun phase for tagging files. This is a first big rough sweep of determining what
files are to prevent spending too much time on useless scanning in the following phases.
Some things that are tagged here are text files, XML files, various graphics formats and
some other files.
3. unpack phase for unpacking files. In this phase several methods for unpacking files are
run, using the information from the marker scanning phase (if a file system file or
compressed file actually uses markers, which is not always the case). Also some simple
metadata about files is recorded in this phase. This method runs recursively: if a file
system was found and unpacked all the scans from steps 1, 2, 3 are run on the files that
were unpacked.
4. individual file scanning phase. Here each file will be inspected individually. Based on
the configuration that was given this could be basically anything.
5. output phase. Using a pretty printer a report is pretty printed. The pretty printer is
set in the configuration file and is optional.
6. postrun phase. In this phase methods that are not necessary for generating output, but
which should be run anyway, are run. Examples are generating pictures or running statistics.
7. packing phase. In this phase several datafiles, plus the state of the running program,
are packed in a tar file.
'''
import sys, os, os.path, magic, hashlib, subprocess, tempfile, shutil, stat, multiprocessing, cPickle, glob, tarfile, copy, gzip, Queue
from optparse import OptionParser
import datetime, re
import extractor
import prerun, fsmagic
from multiprocessing import Process, Lock
from multiprocessing.sharedctypes import Value, Array
ms = magic.open(magic.MAGIC_NONE)
ms.load()
## convenience method to merge ranges that overlap in a blacklist
## We do multiple passes to make sure everything is correctly merged
## Example:
## [(1,3), (2,4), (5,7), (3,7)] would result in [(1,7)]
def mergeBlacklist(blacklist):
if len(blacklist) == 0:
return []
blacklistold = []
while (blacklistold != blacklist):
res = []
res.append(blacklist[0])
for i in xrange(1,len(blacklist)):
lower = res[-1][0]
upper = res[-1][1]
if upper >= blacklist[i][0] or lower >= blacklist[i][0]:
if upper <= blacklist[i][1]:
upper = blacklist[i][1]
if lower >= blacklist[i][0]:
lower = blacklist[i][0]
res[-1] = (lower,upper)
continue
## no overlapping ranges, so just append
res.append(blacklist[i])
blacklistold = blacklist
blacklist = res
return blacklist
def runSetup(setupscan, debug=False):
module = setupscan['module']
method = setupscan['setup']
if debug:
print >>sys.stderr, module, method
sys.stderr.flush()
exec "from %s import %s as bat_%s" % (module, method, method)
scanres = eval("bat_%s(setupscan['environment'], debug=debug)" % (method))
return scanres
## method to filter scans, based on the tags that were found for a
## file, plus a list of tags that the scan should skip.
## This is done to avoid scans running unnecessarily.
def filterScans(scans, tags):
filteredscans = []
for scan in scans:
if scan['scanonly'] != None:
scanonly = scan['scanonly'].split(':')
if set(tags).intersection(set(scanonly)) == set():
continue
if scan['noscan'] != None:
noscans = scan['noscan'].split(':')
if set(noscans).intersection(set(tags)) != set():
continue
else:
filteredscans.append(scan)
else:
filteredscans.append(scan)
return filteredscans
## compute a SHA256 hash. This is done in chunks to prevent a big file from
## being read in its entirety at once, slowing down a machine.
def gethash(path, filename):
scanfile = open(os.path.join(path, filename), 'r')
h = hashlib.new('sha256')
scanfile.seek(0)
hashdata = scanfile.read(10000000)
while hashdata != '':
h.update(hashdata)
hashdata = scanfile.read(10000000)
scanfile.close()
return h.hexdigest()
## tag files based on extension and a few simple tests and possibly skip
## the generic marker search based on the results. This is to prevent
## a lot of I/O for large files.
## Example: ZIP files and JAR files often have a known extension. With
## a few simple tests it is easy to see if the entire file is a ZIP file
## or not.
## returns a dictionary with offsets
## TODO: refactor so code can be shared with fwunpack.py
def tagKnownExtension(filename):
offsets = {}
tags = []
extensions = filename.rsplit('.', 1)
if len(extensions) == 1:
return (tags, offsets)
extension = extensions[-1].lower()
if extension == 'zip' or extension == 'jar' or extension == 'apk':
datafile = open(filename, 'rb')
databuffer = datafile.read(10)
datafile.close()
if databuffer.find(fsmagic.fsmagic['zip']) != 0:
return (tags, offsets)
p = subprocess.Popen(['zipinfo', '-v', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
(stanout, stanerr) = p.communicate()
res = re.search("Actual[\w\s]*end-(?:of-)?cent(?:ral)?-dir record[\w\s]*:\s*(\d+) \(", stanout)
if res != None:
endofcentraldir = int(res.groups(0)[0])
else:
return (tags, offsets)
## TODO: determine commentsize
commentsize = 0
if endofcentraldir + 22 + commentsize == os.stat(filename).st_size:
offsets['zip'] = [0]
tags.append('zip')
## check if the file is encrypted, if so bail out
res = re.search("file security status:\s+(\w*)\sencrypted", stanout)
if res == None:
return ([], offsets)
if res.groups(0)[0] != 'not':
tags.append('encrypted')
return (tags, offsets)
return (tags, offsets)
## scan a single file, possibly unpack and recurse
def scan(scanqueue, reportqueue, leafqueue, scans, prerunscans, magicscans, optmagicscans, processid, hashdict, llock, template, unpacktempdir):
prerunignore = {}
prerunmagic = {}
for prerunscan in prerunscans:
if prerunscan.has_key('noscan'):
if not prerunscan['noscan'] == None:
noscans = prerunscan['noscan'].split(':')
prerunignore[prerunscan['name']] = noscans
if prerunscan.has_key('magic'):
if not prerunscan['magic'] == None:
magics = prerunscan['magic'].split(':')
if not prerunmagic.has_key(prerunscan['name']):
prerunmagic[prerunscan['name']] = magics
else:
prerunmagic[prerunscan['name']] = prerunmagic[prerunscan['name']] + magics
if prerunscan.has_key('optmagic'):
if not prerunscan['optmagic'] == None:
magics = prerunscan['optmagic'].split(':')
if not prerunmagic.has_key(prerunscan['name']):
prerunmagic[prerunscan['name']] = magics
else:
prerunmagic[prerunscan['name']] = prerunmagic[prerunscan['name']] + magics
while True:
## reset the reports, blacklist, offsets and tags for each new scan
leaftasks = []
unpackreports = {}
blacklist = []
(path, filename, lenscandir, tempdir, debug, tags) = scanqueue.get()
lentempdir = len(tempdir)
## absolute path of the file in the file system (so including temporary dir)
filetoscan = os.path.join(path, filename)
## relative path of the file in the temporary dir
relfiletoscan = filetoscan[lentempdir:]
if relfiletoscan.startswith('/'):
relfiletoscan = relfiletoscan[1:]
unpackreports[relfiletoscan] = {}
unpackreports[relfiletoscan]['name'] = filename
magic = ms.file(filetoscan)
unpackreports[relfiletoscan]['magic'] = magic
## Add both the path to indicate the position inside the file sytem
## or file that was unpacked, as well as the position of the files as unpacked
## by BAT, convenient for later analysis of binaries.
## In case of squashfs remove the "squashfs-root" part of the temporary
## directory too, if it is present (not always).
## TODO: validate if this is stil needed
storepath = path[lenscandir:].replace("/squashfs-root", "")
unpackreports[relfiletoscan]['path'] = storepath
unpackreports[relfiletoscan]['realpath'] = path
if os.path.islink(filetoscan):
tags.append('symlink')
unpackreports[relfiletoscan]['tags'] = tags
for l in leaftasks:
leafqueue.put(l)
for u in unpackreports:
reportqueue.put({u: unpackreports[u]})
scanqueue.task_done()
continue
## no use checking pipes, sockets, device files, etcetera
if not os.path.isfile(filetoscan) and not os.path.isdir(filetoscan):
for l in leaftasks:
leafqueue.put(l)
for u in unpackreports:
reportqueue.put({u: unpackreports[u]})
scanqueue.task_done()
continue
filesize = os.lstat(filetoscan).st_size
unpackreports[relfiletoscan]['size'] = filesize
## empty file, not interested in further scanning
if filesize == 0:
tags.append('empty')
unpackreports[relfiletoscan]['tags'] = tags
for l in leaftasks:
leafqueue.put(l)
for u in unpackreports:
reportqueue.put({u: unpackreports[u]})
scanqueue.task_done()
continue
## Store the hash of the file for identification and for possibly
## querying the knowledgebase later on.
filehash = gethash(path, filename)
unpackreports[relfiletoscan]['sha256'] = filehash
## scan for markers
tagOffsets = tagKnownExtension(filetoscan)
(newtags, offsets) = tagOffsets
tags = tags + newtags
if offsets == {}:
offsets = prerun.genericMarkerSearch(filetoscan, magicscans, optmagicscans)
if "encrypted" in tags:
leaftasks.append((filetoscan, magic, tags, blacklist, filehash, filesize))
for l in leaftasks:
leafqueue.put(l)
unpackreports[relfiletoscan]['tags'] = tags
for u in unpackreports:
reportqueue.put({u: unpackreports[u]})
scanqueue.task_done()
## we have all offsets with markers here, so sscans that are not needed
## can be filtered out.
## Also keep track of the "most promising" scans (offset 0) to try
## them first.
filterscans = set()
zerooffsets = set()
for magictype in offsets:
if offsets[magictype] != []:
filterscans.add(magictype)
if offsets[magictype][0] - fsmagic.correction.get(magictype, 0) == 0:
zerooffsets.add(magictype)
## acquire the lock for the shared dictionary to see if this file was already
## scanned, or is in the process of being scanned.
llock.acquire()
if hashdict.has_key(filehash):
## if the hash is alreay there, return
unpackreports[relfiletoscan]['tags'] = ['duplicate']
for u in unpackreports:
reportqueue.put({u: unpackreports[u]})
llock.release()
scanqueue.task_done()
continue
else:
hashdict[filehash] = relfiletoscan
llock.release()
## prerun scans should be run before any of the other scans
for prerunscan in prerunscans:
ignore = False
if prerunscan.has_key('extensionsignore'):
extensionsignore = prerunscan['extensionsignore'].split(':')
for e in extensionsignore:
if filetoscan.endswith(e):
ignore = True
break
if ignore:
continue
if prerunignore.has_key(prerunscan['name']):
if set(tags).intersection(set(prerunignore[prerunscan['name']])) != set():
continue
if prerunmagic.has_key(prerunscan['name']):
if set(prerunmagic[prerunscan['name']]).intersection(filterscans) == set():
continue
module = prerunscan['module']
method = prerunscan['method']
if debug:
print >>sys.stderr, module, method, filetoscan, datetime.datetime.utcnow().isoformat()
sys.stderr.flush()
exec "from %s import %s as bat_%s" % (module, method, method)
scantags = eval("bat_%s(filetoscan, tempdir, tags, offsets, prerunscan['environment'], debug=debug, unpacktempdir=unpacktempdir)" % (method))
## append the tag results. These will be used later to be able to specifically filter
## out files
if scantags != []:
tags = tags + scantags
## Reorder the scans based on information about offsets. If one scan has a
## match for offset 0 (after correction of the offset, like for tar, gzip,
## iso9660, etc.) make sure it is run first.
unpackscans = []
scanfirst = []
## Filter scans
filteredscans = filterScans(scans, tags)
for unpackscan in filteredscans:
if unpackscan['magic'] != None:
scanmagic = unpackscan['magic'].split(':')
if set(scanmagic).intersection(filterscans) != set():
if set(scanmagic).intersection(zerooffsets) != set():
scanfirst.append(unpackscan)
else:
unpackscans.append(unpackscan)
else:
unpackscans.append(unpackscan)
## sort 'unpackscans' in decreasing priority, so highest
## priority scans are run first.
## TODO: sort per priority per offset for scans that are the most promising
## but only for files that are fairly big, otherwise it has no use at all
## since scanning smaller files is very fast.
unpackscans = sorted(unpackscans, key=lambda x: x['priority'], reverse=True)
'''
if unpackscans != [] and filesize > 10000000:
## first determine the priorities
prios = map(lambda x: x['priority'], unpackscans)
## sort them in reverse order
prios = sorted(prios, reverse=True)
## sort per priority based on first offset for each scan
for p in prios:
sortprios = filter(lambda x: x['priority'] == p, unpackscans)
## now sort sortprios based on value of the first offset
'''
## prepend the most promising scans at offset 0 (if any)
scanfirst = sorted(scanfirst, key=lambda x: x['priority'], reverse=True)
unpackscans = scanfirst + unpackscans
unpackreports[relfiletoscan]['scans'] = []
unpacked = False
for unpackscan in unpackscans:
## the whole file has already been scanned by other scans, so
## continue with the leaf scans.
if extractor.inblacklist(0, blacklist) == filesize:
break
if unpackscan['noscan'] != None:
noscans = unpackscan['noscan'].split(':')
if list(set(tags).intersection(set(noscans))) != []:
continue
ignore = False
if unpackscan.has_key('extensionsignore'):
extensionsignore = unpackscan['extensionsignore'].split(':')
for e in extensionsignore:
if filetoscan.endswith(e):
ignore = True
break
if ignore:
continue
module = unpackscan['module']
method = unpackscan['method']
if debug:
print >>sys.stderr, module, method, filetoscan, datetime.datetime.utcnow().isoformat()
sys.stderr.flush()
## make a copy before changing the environment
newenv = copy.deepcopy(unpackscan['environment'])
newenv['BAT_UNPACKED'] = unpacked
if template != None:
templen = len(re.findall('%s', template))
if templen == 2:
newenv['TEMPLATE'] = template % (os.path.basename(filetoscan), unpackscan['name'])
elif templen == 1:
newenv['TEMPLATE'] = template % unpackscan['name']
else:
newenv['TEMPLATE'] = template
## return value is the temporary dir, plus offset in the parent file
## plus a blacklist containing blacklisted ranges for the *original*
## file and a hash with offsets for each marker.
exec "from %s import %s as bat_%s" % (module, method, method)
scanres = eval("bat_%s(filetoscan, tempdir, blacklist, offsets, newenv, debug=debug)" % (method))
## result is either empty, or contains offsets, tags and hints
if len(scanres) == 4:
(diroffsets, blacklist, scantags, hints) = scanres
tags = list(set(tags + scantags))
if len(diroffsets) == 0:
continue
#blacklist = mergeBlacklist(blacklist)
## each diroffset is a (path, offset) tuple
for diroffset in diroffsets:
report = {}
if diroffset == None:
continue
unpacked = True
scandir = diroffset[0]
## recursively scan all files in the directory
osgen = os.walk(scandir)
scanreports = []
scantasks = []
try:
while True:
i = osgen.next()
## make sure all directories can be accessed
for d in i[1]:
directoryname = os.path.join(i[0], d)
if not os.path.islink(directoryname):
os.chmod(directoryname, stat.S_IRUSR|stat.S_IWUSR|stat.S_IXUSR)
for p in i[2]:
try:
if not os.path.islink("%s/%s" % (i[0], p)):
os.chmod("%s/%s" % (i[0], p), stat.S_IRUSR|stat.S_IWUSR|stat.S_IXUSR)
if "temporary" in tags and diroffset[1] == 0 and diroffset[2] == filesize:
scantasks.append((i[0], p, len(scandir), tempdir, debug, ['temporary']))
else:
scantasks.append((i[0], p, len(scandir), tempdir, debug, []))
relscanpath = "%s/%s" % (i[0][lentempdir:], p)
if relscanpath.startswith('/'):
relscanpath = relscanpath[1:]
scanreports.append(relscanpath)
except Exception, e:
pass
except StopIteration:
for s in scantasks:
scanqueue.put(s)
unpackreports[relfiletoscan]['scans'].append({'scanname': unpackscan['name'], 'scanreports': scanreports, 'offset': diroffset[1], 'size': diroffset[2]})
unpackreports[relfiletoscan]['tags'] = tags
if not unpacked and 'temporary' in tags:
os.unlink(filetoscan)
for l in leaftasks:
leafqueue.put(l)
for u in unpackreports:
reportqueue.put({u: unpackreports[u]})
else:
leaftasks.append((filetoscan, magic, tags, blacklist, filehash, filesize))
for l in leaftasks:
leafqueue.put(l)
for u in unpackreports:
reportqueue.put({u: unpackreports[u]})
scanqueue.task_done()
def leafScan((filetoscan, magic, scans, tags, blacklist, filehash, topleveldir, debug, unpacktempdir)):
reports = {}
newtags = []
for leafscan in scans:
ignore = False
if leafscan.has_key('extensionsignore'):
extensionsignore = leafscan['extensionsignore'].split(':')
for e in extensionsignore:
if filetoscan.endswith(e):
ignore = True
break
if ignore:
continue
report = {}
module = leafscan['module']
method = leafscan['method']
scandebug = False
if leafscan.has_key('debug'):
scandebug = True
debug = True
if debug:
print >>sys.stderr, method, filetoscan, datetime.datetime.utcnow().isoformat()
sys.stderr.flush()
scandebug = True
exec "from %s import %s as bat_%s" % (module, method, method)
res = eval("bat_%s(filetoscan, tags, blacklist, leafscan['environment'], scandebug=scandebug, unpacktempdir=unpacktempdir)" % (method))
if res != None:
(nt, leafres) = res
reports[leafscan['name']] = leafres
newtags = newtags + nt
tags += list(set(newtags))
reports['tags'] = list(set(tags))
## write pickles with information to disk here to reduce memory usage
try:
os.stat('%s/filereports/%s-filereport.pickle' % (topleveldir,filehash))
except:
picklefile = open('%s/filereports/%s-filereport.pickle' % (topleveldir,filehash), 'wb')
cPickle.dump(reports, picklefile)
picklefile.close()
return (filehash, list(set(newtags)))
def aggregatescan(unpackreports, scans, scantempdir, topleveldir, scan_binary, debug, unpacktempdir):
## aggregate scans look at the entire result and possibly modify it.
## The best example is JAR files: individual .class files will not be
## very significant (or even insignificant), but combined results are.
## Because aggregate scans have to look at everything as a whole, these
## cannot be run in parallel.
if scans['batconfig'].has_key('processors'):
processors = scans['batconfig']['processors']
else:
try:
processors = multiprocessing.cpu_count()
except NotImplementedError:
processors = None
filehash = unpackreports[scan_binary]['sha256']
leaf_file_path = os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash)
## first record what the top level element is. This will be used by other scans
leaf_file = open(leaf_file_path, 'rb')
leafreports = cPickle.load(leaf_file)
leaf_file.close()
unpackreports[scan_binary]['tags'].append('toplevel')
leafreports['tags'].append('toplevel')
leaf_file = open(leaf_file_path, 'wb')
leafreports = cPickle.dump(leafreports, leaf_file)
leaf_file.close()
for aggregatescan in scans['aggregatescans']:
module = aggregatescan['module']
method = aggregatescan['method']
scandebug = False
if aggregatescan.has_key('debug'):
scandebug = True
debug = True
if debug:
print >>sys.stderr, "AGGREGATE BEGIN", method, datetime.datetime.utcnow().isoformat()
sys.stderr.flush()
scandebug = True
exec "from %s import %s as bat_%s" % (module, method, method)
res = eval("bat_%s(unpackreports, scantempdir, topleveldir, processors, aggregatescan['environment'], scandebug=scandebug, unpacktempdir=unpacktempdir)" % (method))
if res != None:
if res.keys() != []:
leaf_file = open(leaf_file_path, 'rb')
leafreports = cPickle.load(leaf_file)
leaf_file.close()
for reskey in set(res.keys()):
leafreports[reskey] = res[reskey]
unpackreports[scan_binary]['tags'].append(reskey)
leafreports['tags'].append(reskey)
leaf_file = open(leaf_file_path, 'wb')
leafreports = cPickle.dump(leafreports, leaf_file)
leaf_file.close()
if debug:
print >>sys.stderr, "AGGREGATE END", method, datetime.datetime.utcnow().isoformat()
def postrunscan((filetoscan, unpackreports, scans, scantempdir, topleveldir, debug)):
for postrunscan in scans:
ignore = False
if postrunscan.has_key('extensionsignore'):
extensionsignore = postrunscan['extensionsignore'].split(':')
for e in extensionsignore:
if filetoscan.endswith(e):
ignore = True
break
if ignore:
continue
module = postrunscan['module']
method = postrunscan['method']
if debug:
print >>sys.stderr, module, method, filetoscan, datetime.datetime.utcnow().isoformat()
sys.stderr.flush()
exec "from %s import %s as bat_%s" % (module, method, method)
res = eval("bat_%s(filetoscan, unpackreports, scantempdir, topleveldir, postrunscan['environment'], debug=debug)" % (method))
## TODO: find out what to do with this
if res != None:
pass
## arrays for storing data for the scans
## unpackscans: {name, module, method, ppoutput, priority}
## These are sorted by priority
## leafscans: {name, module, method, ppoutput}
def readconfig(config):
unpackscans = []
leafscans = []
prerunscans = []
postrunscans = []
aggregatescans = []
batconf = {}
tmpbatconfdebug = set()
## first create an environment so every scan has the same one
oldenv = os.environ.copy()
scanenv = {}
for i in ['PATH', 'PWD', 'HOME', 'HOSTNAME', 'LANG', 'USER']:
if i in oldenv:
scanenv[i] = copy.deepcopy(oldenv[i])
sectionstoprocess = set()
## process sections, make sure that the global configuration is
## always processed first.
for section in config.sections():
if section != "batconfig":
sectionstoprocess.add(section)
continue
## first set the environment
newenv = copy.deepcopy(scanenv)
try:
## global set of environment variables
envvars = config.get(section, 'envvars')
if envvars == None:
pass
else:
for en in envvars.split(':'):
try:
(envname, envvalue) = en.split('=')
newenv[envname] = envvalue
except Exception, e:
pass
except:
pass
batconf['environment'] = newenv
try:
mp = config.get(section, 'multiprocessing')
if mp == 'yes':
batconf['multiprocessing'] = True
else:
batconf['multiprocessing'] = False
except:
batconf['multiprocessing'] = False
try:
batconf['output'] = config.get(section, 'output')
batconf['module'] = config.get(section, 'module')
batconf['method'] = config.get(section, 'method')
except:
pass
try:
reporthash = config.get(section, 'reporthash')
## TODO: make more configurable, perform checks, etc. etc.
if reporthash in ['sha256', 'sha1', 'md5', 'crc32']:
batconf['reporthash'] = reporthash
except:
pass
try:
batconf['processors'] = int(config.get(section, 'processors'))
except:
pass
try:
extrapack = config.get(section, 'extrapack')
batconf['extrapack'] = extrapack.split(':')
except:
batconf['extrapack'] = []
try:
scrub = config.get(section, 'scrub')
batconf['scrub'] = scrub.split(':')
except:
batconf['scrub'] = []
try:
dbbackend = config.get(section, 'dbbackend')
if dbbackend in ['sqlite3', 'postgresql']:
batconf['dbbackend'] = dbbackend
if dbbackend == 'postgresql':
try:
postgresql_user = config.get(section, 'postgresql_user')
postgresql_password = config.get(section, 'postgresql_password')
postgresql_db = config.get(section, 'postgresql_db')
batconf['environment']['POSTGRESQL_USER'] = postgresql_user
batconf['environment']['POSTGRESQL_PASSWORD'] = postgresql_password
batconf['environment']['POSTGRESQL_DB'] = postgresql_db
except:
del batconf['dbbackend']
except:
pass
try:
reportendofphase = config.get(section, 'reportendofphase')
if reportendofphase == 'yes':
batconf['reportendofphase'] = True
else:
batconf['reportendofphase'] = False
except:
batconf['reportendofphase'] = False
try:
debug = config.get(section, 'debug')
if debug == 'yes':
batconf['debug'] = True
else:
batconf['debug'] = False
except:
batconf['debug'] = False
try:
debugphases = config.get(section, 'debugphases')
if debugphases.strip() == "":
batconf['debugphases'] = []
else:
batconf['debugphases'] = debugphases.split(':')
except:
batconf['debugphases'] = []
try:
outputlite = config.get(section, 'outputlite')
if outputlite == 'yes':
batconf['outputlite'] = True
else:
batconf['outputlite'] = False
except:
batconf['outputlite'] = False
try:
unpacktempdir = config.get(section, 'tempdir')
if not os.path.isdir(unpacktempdir):
batconf['tempdir'] = None
else:
batconf['tempdir'] = unpacktempdir
## TODO: try to create a temporary directory
## to see if the directory is writable
except:
batconf['tempdir'] = None
try:
template = config.get(section, 'template')
## check for certain values and reset template if necessary
if '/' in template:
template = None
batconf['template'] = None
continue
if '%' in template:
batconf['template'] = template
template = template + "-%s"
batconf['template'] = template
except Exception, e:
batconf['template'] = None
for section in sectionstoprocess:
if config.has_option(section, 'type'):
debug = False
## scans have to be explicitely enabled
if not config.has_option(section, 'enabled'):
continue
if config.get(section, 'enabled') == 'no':
continue
conf = {}
conf['module'] = config.get(section, 'module')
conf['method'] = config.get(section, 'method')
## some scans might, or might not, have these defined
try:
conf['name'] = config.get(section, 'name')
except:
conf['name'] = section
## see if a dbbackend is defined. If not, check if the
## top level configuration has it defined.
try:
dbbackend = config.get(section, 'dbbackend')
if dbbackend in ['sqlite3']:
conf['dbbackend'] = dbbackend
except:
if 'dbbackend' in batconf:
conf['dbbackend'] = copy.deepcopy(batconf['dbbackend'])
## deal with the environment
newenv = copy.deepcopy(scanenv)
try:
envvars = config.get(section, 'envvars')
if envvars == None:
pass
else:
for en in envvars.split(':'):
try:
(envname, envvalue) = en.split('=')
newenv[envname] = envvalue
except Exception, e:
print >>sys.stderr, "EXCEPTION", e
pass
except:
pass
conf['environment'] = newenv
try:
conf['magic'] = config.get(section, 'magic')
except:
conf['magic'] = None
try:
conf['optmagic'] = config.get(section, 'optmagic')
except:
conf['optmagic'] = None
try:
conf['noscan'] = config.get(section, 'noscan')
except:
conf['noscan'] = None
try:
conf['scanonly'] = config.get(section, 'scanonly')
except:
conf['scanonly'] = None
try:
conf['extensionsignore'] = config.get(section, 'extensionsignore')
except:
pass
try:
scandebug = config.get(section, 'debug')
if scandebug == 'yes':
debug = True
conf['debug'] = True
except:
pass
try:
parallel = config.get(section, 'parallel')
if parallel == 'yes':
conf['parallel'] = True
else:
conf['parallel'] = False
except:
conf['parallel'] = True
try:
conf['priority'] = int(config.get(section, 'priority'))
except:
conf['priority'] = 0
try:
conf['ppoutput'] = config.get(section, 'ppoutput')
except:
pass
try:
conf['ppmodule'] = config.get(section, 'ppmodule')
except:
pass
try:
conf['setup'] = config.get(section, 'setup')
except:
pass
try:
conf['conflicts'] = config.get(section, 'conflicts').split(':')
except:
pass
## some things only make sense in a particular context
if config.get(section, 'type') == 'postrun' or config.get(section, 'type') == 'aggregate':
try:
## all three parameters should be there together
conf['storedir'] = config.get(section, 'storedir')
conf['storetarget'] = config.get(section, 'storetarget')
conf['storetype'] = config.get(section, 'storetype')
try:
cleanup = config.get(section, 'cleanup')
if cleanup == 'yes':
conf['cleanup'] = True
else:
conf['cleanup'] = False
except:
conf['cleanup'] = False
except:
conf['storedir'] = None
conf['storetarget'] = None
conf['storetype'] = None
conf['cleanup'] = False
if config.get(section, 'type') == 'leaf':
leafscans.append(conf)
if debug:
tmpbatconfdebug.add('leaf')
elif config.get(section, 'type') == 'unpack':
unpackscans.append(conf)
if debug:
tmpbatconfdebug.add('unpack')
elif config.get(section, 'type') == 'prerun':
prerunscans.append(conf)
if debug:
tmpbatconfdebug.add('prerun')
elif config.get(section, 'type') == 'postrun':
postrunscans.append(conf)
if debug:
tmpbatconfdebug.add('postrun')
elif config.get(section, 'type') == 'aggregate':
aggregatescans.append(conf)
if debug:
tmpbatconfdebug.add('aggregate')
if tmpbatconfdebug != set():
tmpbatconfdebug.update(batconf['debugphases'])
batconf['debugphases'] = list(tmpbatconfdebug)
## set and/or amend environment for prerun scans
for s in prerunscans:
if not 'environment' in s:
s['environment'] = copy.deepcopy(scanenv)
else:
for e in batconf['environment']:
if not e in s['environment']:
s['environment'][e] = copy.deepcopy(batconf['environment'][e])
if 'dbbackend' in s:
s['environment']['DBBACKEND'] = s['dbbackend']
## set and/or amend environment for unpack scans
for s in unpackscans:
if not 'environment' in s:
s['environment'] = copy.deepcopy(scanenv)
else:
for e in batconf['environment']:
if not e in s['environment']:
s['environment'][e] = copy.deepcopy(batconf['environment'][e])
if 'dbbackend' in s:
s['environment']['DBBACKEND'] = s['dbbackend']
## set and/or amend environment for leaf scans
for s in leafscans:
if not 'environment' in s:
s['environment'] = copy.deepcopy(scanenv)
else:
for e in batconf['environment']:
if not e in s['environment']:
s['environment'][e] = copy.deepcopy(batconf['environment'][e])
if 'dbbackend' in s:
s['environment']['DBBACKEND'] = s['dbbackend']
## set and/or amend environment for aggregate scans
for s in aggregatescans:
if not 'environment' in s:
s['environment'] = copy.deepcopy(scanenv)
else:
for e in batconf['environment']:
if not e in s['environment']:
s['environment'][e] = copy.deepcopy(batconf['environment'][e])
if 'dbbackend' in s:
s['environment']['DBBACKEND'] = s['dbbackend']
if s['cleanup']:
## this is an ugly hack *cringe*
s['environment']['overridedir'] = True
if 'reporthash' in batconf:
s['environment']['OUTPUTHASH'] = batconf['reporthash']
if 'template' in batconf:
s['environment']['TEMPLATE'] = batconf['template']
## set and/or amend environment for postrun scans
for s in postrunscans:
if not 'environment' in s:
s['environment'] = copy.deepcopy(scanenv)
else:
for e in batconf['environment']:
if not e in s['environment']:
s['environment'][e] = copy.deepcopy(batconf['environment'][e])
if s['cleanup']:
## this is an ugly hack *cringe*
s['environment']['overridedir'] = True
if 'dbbackend' in s:
s['environment']['DBBACKEND'] = s['dbbackend']
## sort scans on priority (highest priority first)
prerunscans = sorted(prerunscans, key=lambda x: x['priority'], reverse=True)
leafscans = sorted(leafscans, key=lambda x: x['priority'], reverse=True)
aggregatescans = sorted(aggregatescans, key=lambda x: x['priority'], reverse=True)
return {'batconfig': batconf, 'unpackscans': unpackscans, 'leafscans': leafscans, 'prerunscans': prerunscans, 'postrunscans': postrunscans, 'aggregatescans': aggregatescans}
def prettyprint(batconf, res, scandate, scans, toplevelfile, topleveldir):
module = batconf['module']
method = batconf['output']
exec "from %s import %s as bat_%s" % (module, method, method)
output = eval("bat_%s(res, scandate, scans, toplevelfile, topleveldir, batconf['environment'])" % (method))
return output
def dumpData(unpackreports, scans, tempdir):
## a dump of all the result contains:
## * a copy of all the unpacked data
## * whatever results from postrunscans that should be stored (defined in the config file)
## * a pickle of all data, it saves parsing the XML report (or any other format for that matter),
## minus the data from the ranking scan
## * separate pickles of the data of the ranking scan
sha256spack = set([])
for p in unpackreports:
if unpackreports[p].has_key('sha256'):
sha256spack.add(unpackreports[p]['sha256'])
oldstoredir = None
oldlistdir = []
for i in (scans['postrunscans'] + scans['aggregatescans']):
## use parameters from configuration file. This assumes that the names of the
## all output files of a particular scan start with the checksum of the scanned
## file and have a common suffix.
if i['storedir'] != None and i['storetarget'] != None and i['storetype'] != None:
if not os.path.exists(i['storedir']):
continue
if not os.path.exists(os.path.join(tempdir, i['storetarget'])):
os.mkdir(os.path.join(tempdir, i['storetarget']))
target = os.path.join(tempdir, i['storetarget'])
copyfiles = []
filetypes = i['storetype'].split(':')
## in case the storedir was also used in the previous run just reuse
## the data instead of rereading it using os.listdir.
if oldstoredir == i['storedir']:
listdir = oldlistdir
else:
listdir = os.listdir(i['storedir'])
oldstoredir = i['storedir']
oldlistdir = listdir
for f in filetypes:
dirlisting = filter(lambda x: x.endswith(f), listdir)
## apply a few filters to more efficiently grab only the files
## that are really needed. This pays off in case there are tons
## of files that need to be copied.
dirfilter = set(map(lambda x: x.split('-')[0], dirlisting))
inter = sha256spack.intersection(dirfilter)
for s in inter:
copyfiles = filter(lambda x: s in x, dirlisting)
for c in copyfiles:
dirlisting.remove(c)
for c in set(copyfiles):
shutil.copy(os.path.join(i['storedir'], c), target)
if i['cleanup']:
try:
os.unlink(os.path.join(i['storedir'],c))
except Exception, e:
print >>sys.stderr, "dumpData: removing failed", c, e
else:
## nothing will be dumped if one of the three parameters is missing
pass
## Remove any results for which 'cleanup' has been set to True. For this at least 'storedir'
## and 'storetype' have to be specified and 'cleanup' has to be set to True. For example, this
## could be fluff from a previous run.
if i['storedir'] != None and i['storetype'] != None and i['cleanup']:
removefiles = []
filetypes = i['storetype'].split(':')
listdir = os.listdir(i['storedir'])
for f in filetypes:
dirlisting = filter(lambda x: x.endswith(f), listdir)
for s in sha256spack:
removefiles = removefiles + filter(lambda x: x.startswith(s), dirlisting)
for r in set(removefiles):
try:
os.unlink(os.path.join(i['storedir'],r))
except Exception, e:
print >>sys.stderr, "dumpData: removing failed", r, e
pass
picklefile = open(os.path.join(tempdir, 'scandata.pickle'), 'wb')
cPickle.dump(unpackreports, picklefile)
picklefile.close()
def compressPickle((infile)):
fin = open(infile, 'rb')
fout = gzip.open("%s.gz" % infile, 'wb')
fout.write(fin.read())
fout.close()
fin.close()
os.unlink(fin.name)
## Write everything to a dump file. A few directories that always should be
## packed are hardcoded, the other files are determined from the configuration.
## The configuration option 'lite' allows to leave out the extracted data, to
## speed up extraction of data in the GUI.
def writeDumpfile(unpackreports, scans, outputfile, configfile, tempdir, lite=False, debug=False):
dumpData(unpackreports, scans, tempdir)
dumpfile = tarfile.open(outputfile, 'w:gz')
oldcwd = os.getcwd()
os.chdir(tempdir)
if scans['batconfig']['scrub'] != []:
## TODO pretty print the configuration file, scrubbed of
## any of the values in 'scrub'
pass
shutil.copy(configfile, '.')
dumpfile.add('scandata.pickle')
if scans['batconfig']['extrapack'] != []:
for e in scans['batconfig']['extrapack']:
if os.path.isabs(e):
continue
if os.path.islink(e):
continue
## TODO: many more checks
if os.path.exists(e):
dumpfile.add(e)
if not lite:
dumpfile.add('data')
try:
os.stat('filereports')
## compress pickle files in parallel
filereports = os.listdir('filereports')
if scans['batconfig'].has_key('processors'):
pool = multiprocessing.Pool(processes=scans['batconfig']['processors'])
else:
pool = multiprocessing.Pool()
fnames = map(lambda x: os.path.join(tempdir, "filereports", x), filereports)
pool.map(compressPickle, fnames, 1)
pool.terminate()
dumpfile.add('filereports')
except Exception,e:
if debug:
print >>sys.stderr, "writeDumpfile", e
sys.stderr.flush()
dumpadds = set()
for i in (scans['postrunscans'] + scans['aggregatescans']):
if i['storedir'] != None and i['storetarget'] != None and i['storetype'] != None:
try:
os.stat(i['storetarget'])
dumpadds.add(i['storetarget'])
except Exception, e:
if debug:
print >>sys.stderr, "writeDumpfile:", e
sys.stderr.flush()
else:
pass
for i in dumpadds:
dumpfile.add(i)
dumpfile.close()
os.chdir(oldcwd)
def runscan(scans, scan_binary):
unpacktempdir = scans['batconfig']['tempdir']
if unpacktempdir != None:
if not os.path.exists(unpacktempdir):
unpacktempdir = None
try:
## test if unpacktempdir is actually writable
topleveldir = tempfile.mkdtemp(dir=unpacktempdir)
except:
unpacktempdir = None
topleveldir = tempfile.mkdtemp(dir=unpacktempdir)
os.makedirs("%s/data" % (topleveldir,))
scantempdir = "%s/data" % (topleveldir,)
shutil.copy(scan_binary, scantempdir)
debug = scans['batconfig']['debug']
debugphases = scans['batconfig']['debugphases']
magicscans = []
optmagicscans = []
for k in ["prerunscans", "unpackscans", "leafscans", "postrunscans"]:
for s in scans[k]:
if s['magic'] != None:
magicscans = magicscans + s['magic'].split(':')
if s['optmagic'] != None:
optmagicscans = optmagicscans + s['optmagic'].split(':')
magicscans = list(set(magicscans))
optmagicscans = list(set(optmagicscans))
## Per binary scanned we get a list with results.
## Each file system or compressed file we can unpack gives a list with
## reports back as its result, so we have a list of lists
## within the inner list there is a result tuple, which could contain
## more lists in some fields, like libraries, or more result lists if
## the file inside a file system we looked at was in fact a file system.
leaftasks = []
unpackreports_tmp = []
unpackreports = {}
tmpdebug = False
if debug:
tmpdebug = True
if debugphases != []:
if not ('prerun' in debugphases or 'unpack' in debugphases):
tmpdebug = False
tags = []
scantasks = [(scantempdir, os.path.basename(scan_binary), len(scantempdir), scantempdir, tmpdebug, tags)]
## Use multithreading to speed up scanning. Sometimes we hit http://bugs.python.org/issue9207
## Threading can be configured in the configuration file, but
## often it is wise to have it set to 'no'. This is because ranking writes
## to databases and you don't want concurrent writes.
## some categories of scans can still be run in parallel. For example
## if only one of the leaf scans has a side effect, then prerun, unpack
## and unpack scans can still be run in parallel.
## By setting 'multiprocessing' to 'yes' and indicating that some scans should
## not be run in parallel (which will be for the whole category of scans) it is
## possible to have partial parallel scanning.
parallel = True
if scans['batconfig']['multiprocessing']:
if False in map(lambda x: x['parallel'], scans['unpackscans'] + scans['prerunscans']):
parallel = False
else:
parallel = False
if debug:
if debugphases == []:
parallel = False
else:
if 'unpack' in debugphases or 'prerun' in debugphases:
parallel = False
if parallel:
if scans['batconfig'].has_key('processors'):
processamount = min(multiprocessing.cpu_count(),scans['batconfig']['processors'])
else:
processamount = multiprocessing.cpu_count()
else:
processamount = 1
template = scans['batconfig']['template']
## use a queue made with a manager to avoid some issues, see:
## http://docs.python.org/2/library/multiprocessing.html#pipes-and-queues
if debug:
print >>sys.stderr, "PRERUN UNPACK BEGIN", datetime.datetime.utcnow().isoformat()
lock = Lock()
scanmanager = multiprocessing.Manager()
scanqueue = multiprocessing.JoinableQueue(maxsize=0)
reportqueue = scanmanager.Queue(maxsize=0)
leafqueue = scanmanager.Queue(maxsize=0)
processpool = []
hashdict = scanmanager.dict()
map(lambda x: scanqueue.put(x), scantasks)
for i in range(0,processamount):
p = multiprocessing.Process(target=scan, args=(scanqueue,reportqueue,leafqueue, scans['unpackscans'], scans['prerunscans'], magicscans, optmagicscans, i, hashdict, lock, template, unpacktempdir))
processpool.append(p)
p.start()
scanqueue.join()
while True:
try:
val = reportqueue.get_nowait()
unpackreports_tmp.append(val)
reportqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
while True:
try:
val = leafqueue.get_nowait()
leaftasks.append(val)
leafqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
leafqueue.join()
reportqueue.join()
for p in processpool:
p.terminate()
if debug:
print >>sys.stderr, "PRERUN UNPACK END", datetime.datetime.utcnow().isoformat()
if scans['batconfig']['reportendofphase']:
print "PRERUN UNPACK END %s" % os.path.basename(scan_binary), datetime.datetime.utcnow().isoformat()
if debug:
print >>sys.stderr, "LEAF BEGIN", datetime.datetime.utcnow().isoformat()
poolresult = []
tagdict = {}
finalscans = []
if scans['leafscans'] != []:
if scans['batconfig']['multiprocessing']:
parallel = True
else:
parallel = False
tmpdebug=False
if debug:
tmpdebug = True
if debugphases != []:
if not 'leaf' in debugphases:
tmpdebug = False
## First run the 'setup' hooks for the scans and pass
## results via the environment. This should keep the
## code cleaner.
for sscan in scans['leafscans']:
if not sscan.has_key('setup'):
finalscans.append(sscan)
continue
setupres = runSetup(sscan, tmpdebug)
(setuprun, newenv) = setupres
if not setuprun:
continue
## 'parallel' can be used to modify whether or not the
## scans should be run in parallel. This is right now
## the only 'special' keyword.
if newenv.has_key('parallel'):
if newenv['parallel'] == False:
parallel = False
sscan['environment'] = newenv
finalscans.append(sscan)
## Sometimes there are identical files inside a blob.
## To minimize time spent on scanning these should only be
## scanned once. Since the results are independent anyway (the
## unpacking phase is where unique paths are determined after all)
## each sha256 can be scanned only once. If there are more files
## with the same sha256 the result can simply be copied.
##
## * keep a list of which sha256 have duplicates.
## * filter out the checksums
## * for each sha256 scan once
## * copy results in case there are duplicates
sha256leaf = {}
for i in leaftasks:
if sha256leaf.has_key(i[-2]):
sha256leaf[i[-2]].append(i[0])
else:
sha256leaf[i[-2]] = [i[0]]
sha256_tmp = {}
for i in sha256leaf:
if len(sha256leaf[i]) > 0:
sha256_tmp[i] = sha256leaf[i][0]
leaftasks_tmp = []
for i in leaftasks:
if sha256_tmp[i[-2]] == i[0]:
leaftasks_tmp.append(i)
## reverse sort on size: scan largest files first
leaftasks_tmp.sort(key=lambda x: x[-1], reverse=True)
leaftasks_tmp = map(lambda x: x[:2] + (filterScans(finalscans, x[2]),) + x[2:-1] + (topleveldir, tmpdebug, unpacktempdir), leaftasks_tmp)
if scans['batconfig']['multiprocessing']:
if False in map(lambda x: x['parallel'], finalscans):
parallel = False
else:
parallel = False
if debug:
if debugphases == []:
parallel = False
else:
if 'leaf' in debugphases:
parallel = False
if parallel:
if scans['batconfig'].has_key('processors'):
pool = multiprocessing.Pool(scans['batconfig']['processors'])
else:
pool = multiprocessing.Pool()
else:
pool = multiprocessing.Pool(processes=1)
if not os.path.exists(os.path.join(topleveldir, 'filereports')):
os.mkdir(os.path.join(topleveldir, 'filereports'))
poolresult = pool.map(leafScan, leaftasks_tmp, 1)
pool.terminate()
## filter the results for the leafscans. These are the ones that
## returned tags so need to be merged into unpackreports.
mergetags = filter(lambda x: x[1] != [], poolresult)
for m in mergetags:
tagdict[m[0]] = m[1]
dupes = []
## the result is a list of dicts which needs to be turned into one dict
for i in unpackreports_tmp:
for k in i:
if i[k].has_key('tags'):
## the file is a duplicate, store for later
if 'duplicate' in i[k]['tags']:
dupes.append(i)
continue
unpackreports[k] = i[k]
for i in dupes:
for k in i:
dupesha256 = i[k]['sha256']
origname = i[k]['name']
origrealpath = i[k]['realpath']
origpath = i[k]['path']
## keep: name, realpath, path, copy the rest of the original
dupecopy = copy.deepcopy(unpackreports[hashdict[dupesha256]])
dupecopy['name'] = origname
dupecopy['path'] = origpath
dupecopy['realpath'] = origrealpath
dupecopy['tags'].append('duplicate')
unpackreports[k] = dupecopy
for i in unpackreports.keys():
if not unpackreports[i].has_key('sha256'):
continue
unpacksha256 = unpackreports[i]['sha256']
if tagdict.has_key(unpacksha256):
if unpackreports[i].has_key('tags'):
unpackreports[i]['tags'] = list(set(unpackreports[i]['tags'] + tagdict[unpacksha256]))
if debug:
print >>sys.stderr, "LEAF END", datetime.datetime.utcnow().isoformat()
if scans['batconfig']['reportendofphase']:
print "LEAF END %s" % os.path.basename(scan_binary), datetime.datetime.utcnow().isoformat()
if debug:
print >>sys.stderr, "AGGREGATE BEGIN", datetime.datetime.utcnow().isoformat()
if scans['aggregatescans'] != []:
tmpdebug=False
if debug:
tmpdebug = True
if debugphases != []:
if not 'aggregate' in debugphases:
tmpdebug = False
aggregatescan(unpackreports, scans, scantempdir, topleveldir, os.path.basename(scan_binary), tmpdebug, unpacktempdir)
if debug:
print >>sys.stderr, "AGGREGATE END", datetime.datetime.utcnow().isoformat()
if scans['batconfig']['reportendofphase']:
print "AGGREGATE END %s" % os.path.basename(scan_binary), datetime.datetime.utcnow().isoformat()
for i in unpackreports:
if unpackreports[i].has_key('tags'):
unpackreports[i]['tags'] = list(set(unpackreports[i]['tags']))
if debug:
print >>sys.stderr, "POSTRUN BEGIN", datetime.datetime.utcnow().isoformat()
## run postrunscans here, again in parallel, if needed/wanted
## These scans typically only have a few side effects, but don't change
## the reporting/scanning, just process the results. Examples: generate
## fancier reports, use microblogging to post scan results, etc.
## Duplicates that are tagged as 'duplicate' are not processed.
if scans['postrunscans'] != [] and unpackreports != {}:
## if unpackreports != {} since deduplication has already been done
dedupes = filter(lambda x: 'duplicate' not in unpackreports[x]['tags'], filter(lambda x: unpackreports[x].has_key('tags'), filter(lambda x: unpackreports[x].has_key('sha256'), unpackreports.keys())))
postrunscans = []
for i in dedupes:
## results might have been changed by aggregate scans, so check if it still exists
if unpackreports.has_key(i):
tmpdebug = False
if debug:
tmpdebug = True
if debugphases != []:
if not 'postrun' in debugphases:
tmpdebug = False
postrunscans.append((i, unpackreports[i], scans['postrunscans'], scantempdir, topleveldir, tmpdebug))
parallel = True
if scans['batconfig']['multiprocessing']:
if False in map(lambda x: x['parallel'], scans['postrunscans']):
parallel = False
else:
parallel = False
if debug:
if debugphases == []:
parallel = False
else:
if 'postrun' in debugphases:
parallel = False
if parallel:
if scans['batconfig'].has_key('processors'):
pool = multiprocessing.Pool(scans['batconfig']['processors'])
else:
pool = multiprocessing.Pool()
else:
pool = multiprocessing.Pool(processes=1)
postrunresults = pool.map(postrunscan, postrunscans, 1)
pool.terminate()
if debug:
print >>sys.stderr, "POSTRUN END", datetime.datetime.utcnow().isoformat()
if scans['batconfig']['reportendofphase']:
print "POSTRUN END %s" % os.path.basename(scan_binary), datetime.datetime.utcnow().isoformat()
return (topleveldir, unpackreports)
more environment fixes
#!/usr/bin/python
## Binary Analysis Tool
## Copyright 2009-2015 Armijn Hemel for Tjaldur Software Governance Solutions
## Licensed under Apache 2.0, see LICENSE file for details
'''
This script tries to analyse binary blobs, using a "brute force" approach
and pretty print the analysis in a simple XML format.
The script has a few separate scanning phases:
1. marker scanning phase, to search for specific markers (compression, file systems,
media formats), if available. This information is later used to filter scans and to
carve files.
2. prerun phase for tagging files. This is a first big rough sweep of determining what
files are to prevent spending too much time on useless scanning in the following phases.
Some things that are tagged here are text files, XML files, various graphics formats and
some other files.
3. unpack phase for unpacking files. In this phase several methods for unpacking files are
run, using the information from the marker scanning phase (if a file system file or
compressed file actually uses markers, which is not always the case). Also some simple
metadata about files is recorded in this phase. This method runs recursively: if a file
system was found and unpacked all the scans from steps 1, 2, 3 are run on the files that
were unpacked.
4. individual file scanning phase. Here each file will be inspected individually. Based on
the configuration that was given this could be basically anything.
5. output phase. Using a pretty printer a report is pretty printed. The pretty printer is
set in the configuration file and is optional.
6. postrun phase. In this phase methods that are not necessary for generating output, but
which should be run anyway, are run. Examples are generating pictures or running statistics.
7. packing phase. In this phase several datafiles, plus the state of the running program,
are packed in a tar file.
'''
import sys, os, os.path, magic, hashlib, subprocess, tempfile, shutil, stat, multiprocessing, cPickle, glob, tarfile, copy, gzip, Queue
from optparse import OptionParser
import datetime, re
import extractor
import prerun, fsmagic
from multiprocessing import Process, Lock
from multiprocessing.sharedctypes import Value, Array
ms = magic.open(magic.MAGIC_NONE)
ms.load()
## convenience method to merge ranges that overlap in a blacklist
## We do multiple passes to make sure everything is correctly merged
## Example:
## [(1,3), (2,4), (5,7), (3,7)] would result in [(1,7)]
def mergeBlacklist(blacklist):
if len(blacklist) == 0:
return []
blacklistold = []
while (blacklistold != blacklist):
res = []
res.append(blacklist[0])
for i in xrange(1,len(blacklist)):
lower = res[-1][0]
upper = res[-1][1]
if upper >= blacklist[i][0] or lower >= blacklist[i][0]:
if upper <= blacklist[i][1]:
upper = blacklist[i][1]
if lower >= blacklist[i][0]:
lower = blacklist[i][0]
res[-1] = (lower,upper)
continue
## no overlapping ranges, so just append
res.append(blacklist[i])
blacklistold = blacklist
blacklist = res
return blacklist
def runSetup(setupscan, debug=False):
module = setupscan['module']
method = setupscan['setup']
if debug:
print >>sys.stderr, module, method
sys.stderr.flush()
exec "from %s import %s as bat_%s" % (module, method, method)
scanres = eval("bat_%s(setupscan['environment'], debug=debug)" % (method))
return scanres
## method to filter scans, based on the tags that were found for a
## file, plus a list of tags that the scan should skip.
## This is done to avoid scans running unnecessarily.
def filterScans(scans, tags):
filteredscans = []
for scan in scans:
if scan['scanonly'] != None:
scanonly = scan['scanonly'].split(':')
if set(tags).intersection(set(scanonly)) == set():
continue
if scan['noscan'] != None:
noscans = scan['noscan'].split(':')
if set(noscans).intersection(set(tags)) != set():
continue
else:
filteredscans.append(scan)
else:
filteredscans.append(scan)
return filteredscans
## compute a SHA256 hash. This is done in chunks to prevent a big file from
## being read in its entirety at once, slowing down a machine.
def gethash(path, filename):
scanfile = open(os.path.join(path, filename), 'r')
h = hashlib.new('sha256')
scanfile.seek(0)
hashdata = scanfile.read(10000000)
while hashdata != '':
h.update(hashdata)
hashdata = scanfile.read(10000000)
scanfile.close()
return h.hexdigest()
## tag files based on extension and a few simple tests and possibly skip
## the generic marker search based on the results. This is to prevent
## a lot of I/O for large files.
## Example: ZIP files and JAR files often have a known extension. With
## a few simple tests it is easy to see if the entire file is a ZIP file
## or not.
## returns a dictionary with offsets
## TODO: refactor so code can be shared with fwunpack.py
def tagKnownExtension(filename):
offsets = {}
tags = []
extensions = filename.rsplit('.', 1)
if len(extensions) == 1:
return (tags, offsets)
extension = extensions[-1].lower()
if extension == 'zip' or extension == 'jar' or extension == 'apk':
datafile = open(filename, 'rb')
databuffer = datafile.read(10)
datafile.close()
if databuffer.find(fsmagic.fsmagic['zip']) != 0:
return (tags, offsets)
p = subprocess.Popen(['zipinfo', '-v', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
(stanout, stanerr) = p.communicate()
res = re.search("Actual[\w\s]*end-(?:of-)?cent(?:ral)?-dir record[\w\s]*:\s*(\d+) \(", stanout)
if res != None:
endofcentraldir = int(res.groups(0)[0])
else:
return (tags, offsets)
## TODO: determine commentsize
commentsize = 0
if endofcentraldir + 22 + commentsize == os.stat(filename).st_size:
offsets['zip'] = [0]
tags.append('zip')
## check if the file is encrypted, if so bail out
res = re.search("file security status:\s+(\w*)\sencrypted", stanout)
if res == None:
return ([], offsets)
if res.groups(0)[0] != 'not':
tags.append('encrypted')
return (tags, offsets)
return (tags, offsets)
## scan a single file, possibly unpack and recurse
def scan(scanqueue, reportqueue, leafqueue, scans, prerunscans, magicscans, optmagicscans, processid, hashdict, llock, template, unpacktempdir):
prerunignore = {}
prerunmagic = {}
for prerunscan in prerunscans:
if prerunscan.has_key('noscan'):
if not prerunscan['noscan'] == None:
noscans = prerunscan['noscan'].split(':')
prerunignore[prerunscan['name']] = noscans
if prerunscan.has_key('magic'):
if not prerunscan['magic'] == None:
magics = prerunscan['magic'].split(':')
if not prerunmagic.has_key(prerunscan['name']):
prerunmagic[prerunscan['name']] = magics
else:
prerunmagic[prerunscan['name']] = prerunmagic[prerunscan['name']] + magics
if prerunscan.has_key('optmagic'):
if not prerunscan['optmagic'] == None:
magics = prerunscan['optmagic'].split(':')
if not prerunmagic.has_key(prerunscan['name']):
prerunmagic[prerunscan['name']] = magics
else:
prerunmagic[prerunscan['name']] = prerunmagic[prerunscan['name']] + magics
while True:
## reset the reports, blacklist, offsets and tags for each new scan
leaftasks = []
unpackreports = {}
blacklist = []
(path, filename, lenscandir, tempdir, debug, tags) = scanqueue.get()
lentempdir = len(tempdir)
## absolute path of the file in the file system (so including temporary dir)
filetoscan = os.path.join(path, filename)
## relative path of the file in the temporary dir
relfiletoscan = filetoscan[lentempdir:]
if relfiletoscan.startswith('/'):
relfiletoscan = relfiletoscan[1:]
unpackreports[relfiletoscan] = {}
unpackreports[relfiletoscan]['name'] = filename
magic = ms.file(filetoscan)
unpackreports[relfiletoscan]['magic'] = magic
## Add both the path to indicate the position inside the file sytem
## or file that was unpacked, as well as the position of the files as unpacked
## by BAT, convenient for later analysis of binaries.
## In case of squashfs remove the "squashfs-root" part of the temporary
## directory too, if it is present (not always).
## TODO: validate if this is stil needed
storepath = path[lenscandir:].replace("/squashfs-root", "")
unpackreports[relfiletoscan]['path'] = storepath
unpackreports[relfiletoscan]['realpath'] = path
if os.path.islink(filetoscan):
tags.append('symlink')
unpackreports[relfiletoscan]['tags'] = tags
for l in leaftasks:
leafqueue.put(l)
for u in unpackreports:
reportqueue.put({u: unpackreports[u]})
scanqueue.task_done()
continue
## no use checking pipes, sockets, device files, etcetera
if not os.path.isfile(filetoscan) and not os.path.isdir(filetoscan):
for l in leaftasks:
leafqueue.put(l)
for u in unpackreports:
reportqueue.put({u: unpackreports[u]})
scanqueue.task_done()
continue
filesize = os.lstat(filetoscan).st_size
unpackreports[relfiletoscan]['size'] = filesize
## empty file, not interested in further scanning
if filesize == 0:
tags.append('empty')
unpackreports[relfiletoscan]['tags'] = tags
for l in leaftasks:
leafqueue.put(l)
for u in unpackreports:
reportqueue.put({u: unpackreports[u]})
scanqueue.task_done()
continue
## Store the hash of the file for identification and for possibly
## querying the knowledgebase later on.
filehash = gethash(path, filename)
unpackreports[relfiletoscan]['sha256'] = filehash
## scan for markers
tagOffsets = tagKnownExtension(filetoscan)
(newtags, offsets) = tagOffsets
tags = tags + newtags
if offsets == {}:
offsets = prerun.genericMarkerSearch(filetoscan, magicscans, optmagicscans)
if "encrypted" in tags:
leaftasks.append((filetoscan, magic, tags, blacklist, filehash, filesize))
for l in leaftasks:
leafqueue.put(l)
unpackreports[relfiletoscan]['tags'] = tags
for u in unpackreports:
reportqueue.put({u: unpackreports[u]})
scanqueue.task_done()
## we have all offsets with markers here, so sscans that are not needed
## can be filtered out.
## Also keep track of the "most promising" scans (offset 0) to try
## them first.
filterscans = set()
zerooffsets = set()
for magictype in offsets:
if offsets[magictype] != []:
filterscans.add(magictype)
if offsets[magictype][0] - fsmagic.correction.get(magictype, 0) == 0:
zerooffsets.add(magictype)
## acquire the lock for the shared dictionary to see if this file was already
## scanned, or is in the process of being scanned.
llock.acquire()
if hashdict.has_key(filehash):
## if the hash is alreay there, return
unpackreports[relfiletoscan]['tags'] = ['duplicate']
for u in unpackreports:
reportqueue.put({u: unpackreports[u]})
llock.release()
scanqueue.task_done()
continue
else:
hashdict[filehash] = relfiletoscan
llock.release()
## prerun scans should be run before any of the other scans
for prerunscan in prerunscans:
ignore = False
if prerunscan.has_key('extensionsignore'):
extensionsignore = prerunscan['extensionsignore'].split(':')
for e in extensionsignore:
if filetoscan.endswith(e):
ignore = True
break
if ignore:
continue
if prerunignore.has_key(prerunscan['name']):
if set(tags).intersection(set(prerunignore[prerunscan['name']])) != set():
continue
if prerunmagic.has_key(prerunscan['name']):
if set(prerunmagic[prerunscan['name']]).intersection(filterscans) == set():
continue
module = prerunscan['module']
method = prerunscan['method']
if debug:
print >>sys.stderr, module, method, filetoscan, datetime.datetime.utcnow().isoformat()
sys.stderr.flush()
exec "from %s import %s as bat_%s" % (module, method, method)
scantags = eval("bat_%s(filetoscan, tempdir, tags, offsets, prerunscan['environment'], debug=debug, unpacktempdir=unpacktempdir)" % (method))
## append the tag results. These will be used later to be able to specifically filter
## out files
if scantags != []:
tags = tags + scantags
## Reorder the scans based on information about offsets. If one scan has a
## match for offset 0 (after correction of the offset, like for tar, gzip,
## iso9660, etc.) make sure it is run first.
unpackscans = []
scanfirst = []
## Filter scans
filteredscans = filterScans(scans, tags)
for unpackscan in filteredscans:
if unpackscan['magic'] != None:
scanmagic = unpackscan['magic'].split(':')
if set(scanmagic).intersection(filterscans) != set():
if set(scanmagic).intersection(zerooffsets) != set():
scanfirst.append(unpackscan)
else:
unpackscans.append(unpackscan)
else:
unpackscans.append(unpackscan)
## sort 'unpackscans' in decreasing priority, so highest
## priority scans are run first.
## TODO: sort per priority per offset for scans that are the most promising
## but only for files that are fairly big, otherwise it has no use at all
## since scanning smaller files is very fast.
unpackscans = sorted(unpackscans, key=lambda x: x['priority'], reverse=True)
'''
if unpackscans != [] and filesize > 10000000:
## first determine the priorities
prios = map(lambda x: x['priority'], unpackscans)
## sort them in reverse order
prios = sorted(prios, reverse=True)
## sort per priority based on first offset for each scan
for p in prios:
sortprios = filter(lambda x: x['priority'] == p, unpackscans)
## now sort sortprios based on value of the first offset
'''
## prepend the most promising scans at offset 0 (if any)
scanfirst = sorted(scanfirst, key=lambda x: x['priority'], reverse=True)
unpackscans = scanfirst + unpackscans
unpackreports[relfiletoscan]['scans'] = []
unpacked = False
for unpackscan in unpackscans:
## the whole file has already been scanned by other scans, so
## continue with the leaf scans.
if extractor.inblacklist(0, blacklist) == filesize:
break
if unpackscan['noscan'] != None:
noscans = unpackscan['noscan'].split(':')
if list(set(tags).intersection(set(noscans))) != []:
continue
ignore = False
if unpackscan.has_key('extensionsignore'):
extensionsignore = unpackscan['extensionsignore'].split(':')
for e in extensionsignore:
if filetoscan.endswith(e):
ignore = True
break
if ignore:
continue
module = unpackscan['module']
method = unpackscan['method']
if debug:
print >>sys.stderr, module, method, filetoscan, datetime.datetime.utcnow().isoformat()
sys.stderr.flush()
## make a copy before changing the environment
newenv = copy.deepcopy(unpackscan['environment'])
newenv['BAT_UNPACKED'] = unpacked
if template != None:
templen = len(re.findall('%s', template))
if templen == 2:
newenv['TEMPLATE'] = template % (os.path.basename(filetoscan), unpackscan['name'])
elif templen == 1:
newenv['TEMPLATE'] = template % unpackscan['name']
else:
newenv['TEMPLATE'] = template
## return value is the temporary dir, plus offset in the parent file
## plus a blacklist containing blacklisted ranges for the *original*
## file and a hash with offsets for each marker.
exec "from %s import %s as bat_%s" % (module, method, method)
scanres = eval("bat_%s(filetoscan, tempdir, blacklist, offsets, newenv, debug=debug)" % (method))
## result is either empty, or contains offsets, tags and hints
if len(scanres) == 4:
(diroffsets, blacklist, scantags, hints) = scanres
tags = list(set(tags + scantags))
if len(diroffsets) == 0:
continue
#blacklist = mergeBlacklist(blacklist)
## each diroffset is a (path, offset) tuple
for diroffset in diroffsets:
report = {}
if diroffset == None:
continue
unpacked = True
scandir = diroffset[0]
## recursively scan all files in the directory
osgen = os.walk(scandir)
scanreports = []
scantasks = []
try:
while True:
i = osgen.next()
## make sure all directories can be accessed
for d in i[1]:
directoryname = os.path.join(i[0], d)
if not os.path.islink(directoryname):
os.chmod(directoryname, stat.S_IRUSR|stat.S_IWUSR|stat.S_IXUSR)
for p in i[2]:
try:
if not os.path.islink("%s/%s" % (i[0], p)):
os.chmod("%s/%s" % (i[0], p), stat.S_IRUSR|stat.S_IWUSR|stat.S_IXUSR)
if "temporary" in tags and diroffset[1] == 0 and diroffset[2] == filesize:
scantasks.append((i[0], p, len(scandir), tempdir, debug, ['temporary']))
else:
scantasks.append((i[0], p, len(scandir), tempdir, debug, []))
relscanpath = "%s/%s" % (i[0][lentempdir:], p)
if relscanpath.startswith('/'):
relscanpath = relscanpath[1:]
scanreports.append(relscanpath)
except Exception, e:
pass
except StopIteration:
for s in scantasks:
scanqueue.put(s)
unpackreports[relfiletoscan]['scans'].append({'scanname': unpackscan['name'], 'scanreports': scanreports, 'offset': diroffset[1], 'size': diroffset[2]})
unpackreports[relfiletoscan]['tags'] = tags
if not unpacked and 'temporary' in tags:
os.unlink(filetoscan)
for l in leaftasks:
leafqueue.put(l)
for u in unpackreports:
reportqueue.put({u: unpackreports[u]})
else:
leaftasks.append((filetoscan, magic, tags, blacklist, filehash, filesize))
for l in leaftasks:
leafqueue.put(l)
for u in unpackreports:
reportqueue.put({u: unpackreports[u]})
scanqueue.task_done()
def leafScan((filetoscan, magic, scans, tags, blacklist, filehash, topleveldir, debug, unpacktempdir)):
reports = {}
newtags = []
for leafscan in scans:
ignore = False
if leafscan.has_key('extensionsignore'):
extensionsignore = leafscan['extensionsignore'].split(':')
for e in extensionsignore:
if filetoscan.endswith(e):
ignore = True
break
if ignore:
continue
report = {}
module = leafscan['module']
method = leafscan['method']
scandebug = False
if leafscan.has_key('debug'):
scandebug = True
debug = True
if debug:
print >>sys.stderr, method, filetoscan, datetime.datetime.utcnow().isoformat()
sys.stderr.flush()
scandebug = True
exec "from %s import %s as bat_%s" % (module, method, method)
res = eval("bat_%s(filetoscan, tags, blacklist, leafscan['environment'], scandebug=scandebug, unpacktempdir=unpacktempdir)" % (method))
if res != None:
(nt, leafres) = res
reports[leafscan['name']] = leafres
newtags = newtags + nt
tags += list(set(newtags))
reports['tags'] = list(set(tags))
## write pickles with information to disk here to reduce memory usage
try:
os.stat('%s/filereports/%s-filereport.pickle' % (topleveldir,filehash))
except:
picklefile = open('%s/filereports/%s-filereport.pickle' % (topleveldir,filehash), 'wb')
cPickle.dump(reports, picklefile)
picklefile.close()
return (filehash, list(set(newtags)))
def aggregatescan(unpackreports, scans, scantempdir, topleveldir, scan_binary, debug, unpacktempdir):
## aggregate scans look at the entire result and possibly modify it.
## The best example is JAR files: individual .class files will not be
## very significant (or even insignificant), but combined results are.
## Because aggregate scans have to look at everything as a whole, these
## cannot be run in parallel.
if scans['batconfig'].has_key('processors'):
processors = scans['batconfig']['processors']
else:
try:
processors = multiprocessing.cpu_count()
except NotImplementedError:
processors = None
filehash = unpackreports[scan_binary]['sha256']
leaf_file_path = os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash)
## first record what the top level element is. This will be used by other scans
leaf_file = open(leaf_file_path, 'rb')
leafreports = cPickle.load(leaf_file)
leaf_file.close()
unpackreports[scan_binary]['tags'].append('toplevel')
leafreports['tags'].append('toplevel')
leaf_file = open(leaf_file_path, 'wb')
leafreports = cPickle.dump(leafreports, leaf_file)
leaf_file.close()
for aggregatescan in scans['aggregatescans']:
module = aggregatescan['module']
method = aggregatescan['method']
scandebug = False
if aggregatescan.has_key('debug'):
scandebug = True
debug = True
if debug:
print >>sys.stderr, "AGGREGATE BEGIN", method, datetime.datetime.utcnow().isoformat()
sys.stderr.flush()
scandebug = True
exec "from %s import %s as bat_%s" % (module, method, method)
res = eval("bat_%s(unpackreports, scantempdir, topleveldir, processors, aggregatescan['environment'], scandebug=scandebug, unpacktempdir=unpacktempdir)" % (method))
if res != None:
if res.keys() != []:
leaf_file = open(leaf_file_path, 'rb')
leafreports = cPickle.load(leaf_file)
leaf_file.close()
for reskey in set(res.keys()):
leafreports[reskey] = res[reskey]
unpackreports[scan_binary]['tags'].append(reskey)
leafreports['tags'].append(reskey)
leaf_file = open(leaf_file_path, 'wb')
leafreports = cPickle.dump(leafreports, leaf_file)
leaf_file.close()
if debug:
print >>sys.stderr, "AGGREGATE END", method, datetime.datetime.utcnow().isoformat()
def postrunscan((filetoscan, unpackreports, scans, scantempdir, topleveldir, debug)):
for postrunscan in scans:
ignore = False
if postrunscan.has_key('extensionsignore'):
extensionsignore = postrunscan['extensionsignore'].split(':')
for e in extensionsignore:
if filetoscan.endswith(e):
ignore = True
break
if ignore:
continue
module = postrunscan['module']
method = postrunscan['method']
if debug:
print >>sys.stderr, module, method, filetoscan, datetime.datetime.utcnow().isoformat()
sys.stderr.flush()
exec "from %s import %s as bat_%s" % (module, method, method)
res = eval("bat_%s(filetoscan, unpackreports, scantempdir, topleveldir, postrunscan['environment'], debug=debug)" % (method))
## TODO: find out what to do with this
if res != None:
pass
## arrays for storing data for the scans
## unpackscans: {name, module, method, ppoutput, priority}
## These are sorted by priority
## leafscans: {name, module, method, ppoutput}
def readconfig(config):
unpackscans = []
leafscans = []
prerunscans = []
postrunscans = []
aggregatescans = []
batconf = {}
tmpbatconfdebug = set()
## first create an environment so every scan has the same one
oldenv = os.environ.copy()
scanenv = {}
for i in ['PATH', 'PWD', 'HOME', 'HOSTNAME', 'LANG', 'USER']:
if i in oldenv:
scanenv[i] = copy.deepcopy(oldenv[i])
sectionstoprocess = set()
## process sections, make sure that the global configuration is
## always processed first.
for section in config.sections():
if section != "batconfig":
sectionstoprocess.add(section)
continue
## first set the environment
newenv = copy.deepcopy(scanenv)
try:
## global set of environment variables
envvars = config.get(section, 'envvars')
if envvars == None:
pass
else:
for en in envvars.split(':'):
try:
(envname, envvalue) = en.split('=')
newenv[envname] = envvalue
except Exception, e:
pass
except:
pass
batconf['environment'] = newenv
try:
mp = config.get(section, 'multiprocessing')
if mp == 'yes':
batconf['multiprocessing'] = True
else:
batconf['multiprocessing'] = False
except:
batconf['multiprocessing'] = False
try:
batconf['output'] = config.get(section, 'output')
batconf['module'] = config.get(section, 'module')
batconf['method'] = config.get(section, 'method')
except:
pass
try:
reporthash = config.get(section, 'reporthash')
## TODO: make more configurable, perform checks, etc. etc.
if reporthash in ['sha256', 'sha1', 'md5', 'crc32']:
batconf['reporthash'] = reporthash
except:
pass
try:
batconf['processors'] = int(config.get(section, 'processors'))
except:
pass
try:
extrapack = config.get(section, 'extrapack')
batconf['extrapack'] = extrapack.split(':')
except:
batconf['extrapack'] = []
try:
scrub = config.get(section, 'scrub')
batconf['scrub'] = scrub.split(':')
except:
batconf['scrub'] = []
try:
dbbackend = config.get(section, 'dbbackend')
if dbbackend in ['sqlite3', 'postgresql']:
batconf['dbbackend'] = dbbackend
if dbbackend == 'postgresql':
try:
postgresql_user = config.get(section, 'postgresql_user')
postgresql_password = config.get(section, 'postgresql_password')
postgresql_db = config.get(section, 'postgresql_db')
batconf['environment']['POSTGRESQL_USER'] = postgresql_user
batconf['environment']['POSTGRESQL_PASSWORD'] = postgresql_password
batconf['environment']['POSTGRESQL_DB'] = postgresql_db
except:
del batconf['dbbackend']
except:
pass
try:
reportendofphase = config.get(section, 'reportendofphase')
if reportendofphase == 'yes':
batconf['reportendofphase'] = True
else:
batconf['reportendofphase'] = False
except:
batconf['reportendofphase'] = False
try:
debug = config.get(section, 'debug')
if debug == 'yes':
batconf['debug'] = True
else:
batconf['debug'] = False
except:
batconf['debug'] = False
try:
debugphases = config.get(section, 'debugphases')
if debugphases.strip() == "":
batconf['debugphases'] = []
else:
batconf['debugphases'] = debugphases.split(':')
except:
batconf['debugphases'] = []
try:
outputlite = config.get(section, 'outputlite')
if outputlite == 'yes':
batconf['outputlite'] = True
else:
batconf['outputlite'] = False
except:
batconf['outputlite'] = False
try:
unpacktempdir = config.get(section, 'tempdir')
if not os.path.isdir(unpacktempdir):
batconf['tempdir'] = None
else:
batconf['tempdir'] = unpacktempdir
## TODO: try to create a temporary directory
## to see if the directory is writable
except:
batconf['tempdir'] = None
try:
template = config.get(section, 'template')
## check for certain values and reset template if necessary
if '/' in template:
template = None
batconf['template'] = None
continue
if '%' in template:
batconf['template'] = template
template = template + "-%s"
batconf['template'] = template
except Exception, e:
batconf['template'] = None
for section in sectionstoprocess:
if config.has_option(section, 'type'):
debug = False
## scans have to be explicitely enabled
if not config.has_option(section, 'enabled'):
continue
if config.get(section, 'enabled') == 'no':
continue
conf = {}
conf['module'] = config.get(section, 'module')
conf['method'] = config.get(section, 'method')
## some scans might, or might not, have these defined
try:
conf['name'] = config.get(section, 'name')
except:
conf['name'] = section
## see if a dbbackend is defined. If not, check if the
## top level configuration has it defined.
try:
dbbackend = config.get(section, 'dbbackend')
if dbbackend in ['sqlite3', 'postgresql']:
conf['dbbackend'] = dbbackend
if dbbackend == 'postgresql':
try:
postgresql_user = config.get(section, 'postgresql_user')
postgresql_password = config.get(section, 'postgresql_password')
postgresql_db = config.get(section, 'postgresql_db')
conf['environment']['POSTGRESQL_USER'] = postgresql_user
conf['environment']['POSTGRESQL_PASSWORD'] = postgresql_password
conf['environment']['POSTGRESQL_DB'] = postgresql_db
except:
del conf['dbbackend']
except:
if 'dbbackend' in batconf:
conf['dbbackend'] = copy.deepcopy(batconf['dbbackend'])
dbbackend = conf['dbbackend']
if dbbackend in ['sqlite3', 'postgresql']:
conf['dbbackend'] = dbbackend
if dbbackend == 'postgresql':
try:
postgresql_user = config.get(section, 'postgresql_user')
postgresql_password = config.get(section, 'postgresql_password')
postgresql_db = config.get(section, 'postgresql_db')
conf['environment']['POSTGRESQL_USER'] = postgresql_user
conf['environment']['POSTGRESQL_PASSWORD'] = postgresql_password
conf['environment']['POSTGRESQL_DB'] = postgresql_db
except:
del conf['dbbackend']
## deal with the environment
newenv = copy.deepcopy(scanenv)
try:
envvars = config.get(section, 'envvars')
if envvars == None:
pass
else:
for en in envvars.split(':'):
try:
(envname, envvalue) = en.split('=')
newenv[envname] = envvalue
except Exception, e:
print >>sys.stderr, "EXCEPTION", e
pass
except:
pass
conf['environment'] = newenv
try:
conf['magic'] = config.get(section, 'magic')
except:
conf['magic'] = None
try:
conf['optmagic'] = config.get(section, 'optmagic')
except:
conf['optmagic'] = None
try:
conf['noscan'] = config.get(section, 'noscan')
except:
conf['noscan'] = None
try:
conf['scanonly'] = config.get(section, 'scanonly')
except:
conf['scanonly'] = None
try:
conf['extensionsignore'] = config.get(section, 'extensionsignore')
except:
pass
try:
scandebug = config.get(section, 'debug')
if scandebug == 'yes':
debug = True
conf['debug'] = True
except:
pass
try:
parallel = config.get(section, 'parallel')
if parallel == 'yes':
conf['parallel'] = True
else:
conf['parallel'] = False
except:
conf['parallel'] = True
try:
conf['priority'] = int(config.get(section, 'priority'))
except:
conf['priority'] = 0
try:
conf['ppoutput'] = config.get(section, 'ppoutput')
except:
pass
try:
conf['ppmodule'] = config.get(section, 'ppmodule')
except:
pass
try:
conf['setup'] = config.get(section, 'setup')
except:
pass
try:
conf['conflicts'] = config.get(section, 'conflicts').split(':')
except:
pass
## some things only make sense in a particular context
if config.get(section, 'type') == 'postrun' or config.get(section, 'type') == 'aggregate':
try:
## all three parameters should be there together
conf['storedir'] = config.get(section, 'storedir')
conf['storetarget'] = config.get(section, 'storetarget')
conf['storetype'] = config.get(section, 'storetype')
try:
cleanup = config.get(section, 'cleanup')
if cleanup == 'yes':
conf['cleanup'] = True
else:
conf['cleanup'] = False
except:
conf['cleanup'] = False
except:
conf['storedir'] = None
conf['storetarget'] = None
conf['storetype'] = None
conf['cleanup'] = False
if config.get(section, 'type') == 'leaf':
leafscans.append(conf)
if debug:
tmpbatconfdebug.add('leaf')
elif config.get(section, 'type') == 'unpack':
unpackscans.append(conf)
if debug:
tmpbatconfdebug.add('unpack')
elif config.get(section, 'type') == 'prerun':
prerunscans.append(conf)
if debug:
tmpbatconfdebug.add('prerun')
elif config.get(section, 'type') == 'postrun':
postrunscans.append(conf)
if debug:
tmpbatconfdebug.add('postrun')
elif config.get(section, 'type') == 'aggregate':
aggregatescans.append(conf)
if debug:
tmpbatconfdebug.add('aggregate')
if tmpbatconfdebug != set():
tmpbatconfdebug.update(batconf['debugphases'])
batconf['debugphases'] = list(tmpbatconfdebug)
## set and/or amend environment for prerun scans
for s in prerunscans:
if not 'environment' in s:
s['environment'] = copy.deepcopy(scanenv)
else:
for e in batconf['environment']:
if not e in s['environment']:
s['environment'][e] = copy.deepcopy(batconf['environment'][e])
if 'dbbackend' in s:
s['environment']['DBBACKEND'] = s['dbbackend']
## set and/or amend environment for unpack scans
for s in unpackscans:
if not 'environment' in s:
s['environment'] = copy.deepcopy(scanenv)
else:
for e in batconf['environment']:
if not e in s['environment']:
s['environment'][e] = copy.deepcopy(batconf['environment'][e])
if 'dbbackend' in s:
s['environment']['DBBACKEND'] = s['dbbackend']
## set and/or amend environment for leaf scans
for s in leafscans:
if not 'environment' in s:
s['environment'] = copy.deepcopy(scanenv)
else:
for e in batconf['environment']:
if not e in s['environment']:
s['environment'][e] = copy.deepcopy(batconf['environment'][e])
if 'dbbackend' in s:
s['environment']['DBBACKEND'] = s['dbbackend']
## set and/or amend environment for aggregate scans
for s in aggregatescans:
if not 'environment' in s:
s['environment'] = copy.deepcopy(scanenv)
else:
for e in batconf['environment']:
if not e in s['environment']:
s['environment'][e] = copy.deepcopy(batconf['environment'][e])
if 'dbbackend' in s:
s['environment']['DBBACKEND'] = s['dbbackend']
if s['cleanup']:
## this is an ugly hack *cringe*
s['environment']['overridedir'] = True
if 'reporthash' in batconf:
s['environment']['OUTPUTHASH'] = batconf['reporthash']
if 'template' in batconf:
s['environment']['TEMPLATE'] = batconf['template']
## set and/or amend environment for postrun scans
for s in postrunscans:
if not 'environment' in s:
s['environment'] = copy.deepcopy(scanenv)
else:
for e in batconf['environment']:
if not e in s['environment']:
s['environment'][e] = copy.deepcopy(batconf['environment'][e])
if s['cleanup']:
## this is an ugly hack *cringe*
s['environment']['overridedir'] = True
if 'dbbackend' in s:
s['environment']['DBBACKEND'] = s['dbbackend']
## sort scans on priority (highest priority first)
prerunscans = sorted(prerunscans, key=lambda x: x['priority'], reverse=True)
leafscans = sorted(leafscans, key=lambda x: x['priority'], reverse=True)
aggregatescans = sorted(aggregatescans, key=lambda x: x['priority'], reverse=True)
return {'batconfig': batconf, 'unpackscans': unpackscans, 'leafscans': leafscans, 'prerunscans': prerunscans, 'postrunscans': postrunscans, 'aggregatescans': aggregatescans}
def prettyprint(batconf, res, scandate, scans, toplevelfile, topleveldir):
module = batconf['module']
method = batconf['output']
exec "from %s import %s as bat_%s" % (module, method, method)
output = eval("bat_%s(res, scandate, scans, toplevelfile, topleveldir, batconf['environment'])" % (method))
return output
def dumpData(unpackreports, scans, tempdir):
## a dump of all the result contains:
## * a copy of all the unpacked data
## * whatever results from postrunscans that should be stored (defined in the config file)
## * a pickle of all data, it saves parsing the XML report (or any other format for that matter),
## minus the data from the ranking scan
## * separate pickles of the data of the ranking scan
sha256spack = set([])
for p in unpackreports:
if unpackreports[p].has_key('sha256'):
sha256spack.add(unpackreports[p]['sha256'])
oldstoredir = None
oldlistdir = []
for i in (scans['postrunscans'] + scans['aggregatescans']):
## use parameters from configuration file. This assumes that the names of the
## all output files of a particular scan start with the checksum of the scanned
## file and have a common suffix.
if i['storedir'] != None and i['storetarget'] != None and i['storetype'] != None:
if not os.path.exists(i['storedir']):
continue
if not os.path.exists(os.path.join(tempdir, i['storetarget'])):
os.mkdir(os.path.join(tempdir, i['storetarget']))
target = os.path.join(tempdir, i['storetarget'])
copyfiles = []
filetypes = i['storetype'].split(':')
## in case the storedir was also used in the previous run just reuse
## the data instead of rereading it using os.listdir.
if oldstoredir == i['storedir']:
listdir = oldlistdir
else:
listdir = os.listdir(i['storedir'])
oldstoredir = i['storedir']
oldlistdir = listdir
for f in filetypes:
dirlisting = filter(lambda x: x.endswith(f), listdir)
## apply a few filters to more efficiently grab only the files
## that are really needed. This pays off in case there are tons
## of files that need to be copied.
dirfilter = set(map(lambda x: x.split('-')[0], dirlisting))
inter = sha256spack.intersection(dirfilter)
for s in inter:
copyfiles = filter(lambda x: s in x, dirlisting)
for c in copyfiles:
dirlisting.remove(c)
for c in set(copyfiles):
shutil.copy(os.path.join(i['storedir'], c), target)
if i['cleanup']:
try:
os.unlink(os.path.join(i['storedir'],c))
except Exception, e:
print >>sys.stderr, "dumpData: removing failed", c, e
else:
## nothing will be dumped if one of the three parameters is missing
pass
## Remove any results for which 'cleanup' has been set to True. For this at least 'storedir'
## and 'storetype' have to be specified and 'cleanup' has to be set to True. For example, this
## could be fluff from a previous run.
if i['storedir'] != None and i['storetype'] != None and i['cleanup']:
removefiles = []
filetypes = i['storetype'].split(':')
listdir = os.listdir(i['storedir'])
for f in filetypes:
dirlisting = filter(lambda x: x.endswith(f), listdir)
for s in sha256spack:
removefiles = removefiles + filter(lambda x: x.startswith(s), dirlisting)
for r in set(removefiles):
try:
os.unlink(os.path.join(i['storedir'],r))
except Exception, e:
print >>sys.stderr, "dumpData: removing failed", r, e
pass
picklefile = open(os.path.join(tempdir, 'scandata.pickle'), 'wb')
cPickle.dump(unpackreports, picklefile)
picklefile.close()
def compressPickle((infile)):
fin = open(infile, 'rb')
fout = gzip.open("%s.gz" % infile, 'wb')
fout.write(fin.read())
fout.close()
fin.close()
os.unlink(fin.name)
## Write everything to a dump file. A few directories that always should be
## packed are hardcoded, the other files are determined from the configuration.
## The configuration option 'lite' allows to leave out the extracted data, to
## speed up extraction of data in the GUI.
def writeDumpfile(unpackreports, scans, outputfile, configfile, tempdir, lite=False, debug=False):
dumpData(unpackreports, scans, tempdir)
dumpfile = tarfile.open(outputfile, 'w:gz')
oldcwd = os.getcwd()
os.chdir(tempdir)
if scans['batconfig']['scrub'] != []:
## TODO pretty print the configuration file, scrubbed of
## any of the values in 'scrub'
pass
shutil.copy(configfile, '.')
dumpfile.add('scandata.pickle')
if scans['batconfig']['extrapack'] != []:
for e in scans['batconfig']['extrapack']:
if os.path.isabs(e):
continue
if os.path.islink(e):
continue
## TODO: many more checks
if os.path.exists(e):
dumpfile.add(e)
if not lite:
dumpfile.add('data')
try:
os.stat('filereports')
## compress pickle files in parallel
filereports = os.listdir('filereports')
if scans['batconfig'].has_key('processors'):
pool = multiprocessing.Pool(processes=scans['batconfig']['processors'])
else:
pool = multiprocessing.Pool()
fnames = map(lambda x: os.path.join(tempdir, "filereports", x), filereports)
pool.map(compressPickle, fnames, 1)
pool.terminate()
dumpfile.add('filereports')
except Exception,e:
if debug:
print >>sys.stderr, "writeDumpfile", e
sys.stderr.flush()
dumpadds = set()
for i in (scans['postrunscans'] + scans['aggregatescans']):
if i['storedir'] != None and i['storetarget'] != None and i['storetype'] != None:
try:
os.stat(i['storetarget'])
dumpadds.add(i['storetarget'])
except Exception, e:
if debug:
print >>sys.stderr, "writeDumpfile:", e
sys.stderr.flush()
else:
pass
for i in dumpadds:
dumpfile.add(i)
dumpfile.close()
os.chdir(oldcwd)
def runscan(scans, scan_binary):
unpacktempdir = scans['batconfig']['tempdir']
if unpacktempdir != None:
if not os.path.exists(unpacktempdir):
unpacktempdir = None
try:
## test if unpacktempdir is actually writable
topleveldir = tempfile.mkdtemp(dir=unpacktempdir)
except:
unpacktempdir = None
topleveldir = tempfile.mkdtemp(dir=unpacktempdir)
os.makedirs("%s/data" % (topleveldir,))
scantempdir = "%s/data" % (topleveldir,)
shutil.copy(scan_binary, scantempdir)
debug = scans['batconfig']['debug']
debugphases = scans['batconfig']['debugphases']
magicscans = []
optmagicscans = []
for k in ["prerunscans", "unpackscans", "leafscans", "postrunscans"]:
for s in scans[k]:
if s['magic'] != None:
magicscans = magicscans + s['magic'].split(':')
if s['optmagic'] != None:
optmagicscans = optmagicscans + s['optmagic'].split(':')
magicscans = list(set(magicscans))
optmagicscans = list(set(optmagicscans))
## Per binary scanned we get a list with results.
## Each file system or compressed file we can unpack gives a list with
## reports back as its result, so we have a list of lists
## within the inner list there is a result tuple, which could contain
## more lists in some fields, like libraries, or more result lists if
## the file inside a file system we looked at was in fact a file system.
leaftasks = []
unpackreports_tmp = []
unpackreports = {}
tmpdebug = False
if debug:
tmpdebug = True
if debugphases != []:
if not ('prerun' in debugphases or 'unpack' in debugphases):
tmpdebug = False
tags = []
scantasks = [(scantempdir, os.path.basename(scan_binary), len(scantempdir), scantempdir, tmpdebug, tags)]
## Use multithreading to speed up scanning. Sometimes we hit http://bugs.python.org/issue9207
## Threading can be configured in the configuration file, but
## often it is wise to have it set to 'no'. This is because ranking writes
## to databases and you don't want concurrent writes.
## some categories of scans can still be run in parallel. For example
## if only one of the leaf scans has a side effect, then prerun, unpack
## and unpack scans can still be run in parallel.
## By setting 'multiprocessing' to 'yes' and indicating that some scans should
## not be run in parallel (which will be for the whole category of scans) it is
## possible to have partial parallel scanning.
parallel = True
if scans['batconfig']['multiprocessing']:
if False in map(lambda x: x['parallel'], scans['unpackscans'] + scans['prerunscans']):
parallel = False
else:
parallel = False
if debug:
if debugphases == []:
parallel = False
else:
if 'unpack' in debugphases or 'prerun' in debugphases:
parallel = False
if parallel:
if scans['batconfig'].has_key('processors'):
processamount = min(multiprocessing.cpu_count(),scans['batconfig']['processors'])
else:
processamount = multiprocessing.cpu_count()
else:
processamount = 1
template = scans['batconfig']['template']
## use a queue made with a manager to avoid some issues, see:
## http://docs.python.org/2/library/multiprocessing.html#pipes-and-queues
if debug:
print >>sys.stderr, "PRERUN UNPACK BEGIN", datetime.datetime.utcnow().isoformat()
lock = Lock()
scanmanager = multiprocessing.Manager()
scanqueue = multiprocessing.JoinableQueue(maxsize=0)
reportqueue = scanmanager.Queue(maxsize=0)
leafqueue = scanmanager.Queue(maxsize=0)
processpool = []
hashdict = scanmanager.dict()
map(lambda x: scanqueue.put(x), scantasks)
for i in range(0,processamount):
p = multiprocessing.Process(target=scan, args=(scanqueue,reportqueue,leafqueue, scans['unpackscans'], scans['prerunscans'], magicscans, optmagicscans, i, hashdict, lock, template, unpacktempdir))
processpool.append(p)
p.start()
scanqueue.join()
while True:
try:
val = reportqueue.get_nowait()
unpackreports_tmp.append(val)
reportqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
while True:
try:
val = leafqueue.get_nowait()
leaftasks.append(val)
leafqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
leafqueue.join()
reportqueue.join()
for p in processpool:
p.terminate()
if debug:
print >>sys.stderr, "PRERUN UNPACK END", datetime.datetime.utcnow().isoformat()
if scans['batconfig']['reportendofphase']:
print "PRERUN UNPACK END %s" % os.path.basename(scan_binary), datetime.datetime.utcnow().isoformat()
if debug:
print >>sys.stderr, "LEAF BEGIN", datetime.datetime.utcnow().isoformat()
poolresult = []
tagdict = {}
finalscans = []
if scans['leafscans'] != []:
if scans['batconfig']['multiprocessing']:
parallel = True
else:
parallel = False
tmpdebug=False
if debug:
tmpdebug = True
if debugphases != []:
if not 'leaf' in debugphases:
tmpdebug = False
## First run the 'setup' hooks for the scans and pass
## results via the environment. This should keep the
## code cleaner.
for sscan in scans['leafscans']:
if not sscan.has_key('setup'):
finalscans.append(sscan)
continue
setupres = runSetup(sscan, tmpdebug)
(setuprun, newenv) = setupres
if not setuprun:
continue
## 'parallel' can be used to modify whether or not the
## scans should be run in parallel. This is right now
## the only 'special' keyword.
if newenv.has_key('parallel'):
if newenv['parallel'] == False:
parallel = False
sscan['environment'] = newenv
finalscans.append(sscan)
## Sometimes there are identical files inside a blob.
## To minimize time spent on scanning these should only be
## scanned once. Since the results are independent anyway (the
## unpacking phase is where unique paths are determined after all)
## each sha256 can be scanned only once. If there are more files
## with the same sha256 the result can simply be copied.
##
## * keep a list of which sha256 have duplicates.
## * filter out the checksums
## * for each sha256 scan once
## * copy results in case there are duplicates
sha256leaf = {}
for i in leaftasks:
if sha256leaf.has_key(i[-2]):
sha256leaf[i[-2]].append(i[0])
else:
sha256leaf[i[-2]] = [i[0]]
sha256_tmp = {}
for i in sha256leaf:
if len(sha256leaf[i]) > 0:
sha256_tmp[i] = sha256leaf[i][0]
leaftasks_tmp = []
for i in leaftasks:
if sha256_tmp[i[-2]] == i[0]:
leaftasks_tmp.append(i)
## reverse sort on size: scan largest files first
leaftasks_tmp.sort(key=lambda x: x[-1], reverse=True)
leaftasks_tmp = map(lambda x: x[:2] + (filterScans(finalscans, x[2]),) + x[2:-1] + (topleveldir, tmpdebug, unpacktempdir), leaftasks_tmp)
if scans['batconfig']['multiprocessing']:
if False in map(lambda x: x['parallel'], finalscans):
parallel = False
else:
parallel = False
if debug:
if debugphases == []:
parallel = False
else:
if 'leaf' in debugphases:
parallel = False
if parallel:
if scans['batconfig'].has_key('processors'):
pool = multiprocessing.Pool(scans['batconfig']['processors'])
else:
pool = multiprocessing.Pool()
else:
pool = multiprocessing.Pool(processes=1)
if not os.path.exists(os.path.join(topleveldir, 'filereports')):
os.mkdir(os.path.join(topleveldir, 'filereports'))
poolresult = pool.map(leafScan, leaftasks_tmp, 1)
pool.terminate()
## filter the results for the leafscans. These are the ones that
## returned tags so need to be merged into unpackreports.
mergetags = filter(lambda x: x[1] != [], poolresult)
for m in mergetags:
tagdict[m[0]] = m[1]
dupes = []
## the result is a list of dicts which needs to be turned into one dict
for i in unpackreports_tmp:
for k in i:
if i[k].has_key('tags'):
## the file is a duplicate, store for later
if 'duplicate' in i[k]['tags']:
dupes.append(i)
continue
unpackreports[k] = i[k]
for i in dupes:
for k in i:
dupesha256 = i[k]['sha256']
origname = i[k]['name']
origrealpath = i[k]['realpath']
origpath = i[k]['path']
## keep: name, realpath, path, copy the rest of the original
dupecopy = copy.deepcopy(unpackreports[hashdict[dupesha256]])
dupecopy['name'] = origname
dupecopy['path'] = origpath
dupecopy['realpath'] = origrealpath
dupecopy['tags'].append('duplicate')
unpackreports[k] = dupecopy
for i in unpackreports.keys():
if not unpackreports[i].has_key('sha256'):
continue
unpacksha256 = unpackreports[i]['sha256']
if tagdict.has_key(unpacksha256):
if unpackreports[i].has_key('tags'):
unpackreports[i]['tags'] = list(set(unpackreports[i]['tags'] + tagdict[unpacksha256]))
if debug:
print >>sys.stderr, "LEAF END", datetime.datetime.utcnow().isoformat()
if scans['batconfig']['reportendofphase']:
print "LEAF END %s" % os.path.basename(scan_binary), datetime.datetime.utcnow().isoformat()
if debug:
print >>sys.stderr, "AGGREGATE BEGIN", datetime.datetime.utcnow().isoformat()
if scans['aggregatescans'] != []:
tmpdebug=False
if debug:
tmpdebug = True
if debugphases != []:
if not 'aggregate' in debugphases:
tmpdebug = False
aggregatescan(unpackreports, scans, scantempdir, topleveldir, os.path.basename(scan_binary), tmpdebug, unpacktempdir)
if debug:
print >>sys.stderr, "AGGREGATE END", datetime.datetime.utcnow().isoformat()
if scans['batconfig']['reportendofphase']:
print "AGGREGATE END %s" % os.path.basename(scan_binary), datetime.datetime.utcnow().isoformat()
for i in unpackreports:
if unpackreports[i].has_key('tags'):
unpackreports[i]['tags'] = list(set(unpackreports[i]['tags']))
if debug:
print >>sys.stderr, "POSTRUN BEGIN", datetime.datetime.utcnow().isoformat()
## run postrunscans here, again in parallel, if needed/wanted
## These scans typically only have a few side effects, but don't change
## the reporting/scanning, just process the results. Examples: generate
## fancier reports, use microblogging to post scan results, etc.
## Duplicates that are tagged as 'duplicate' are not processed.
if scans['postrunscans'] != [] and unpackreports != {}:
## if unpackreports != {} since deduplication has already been done
dedupes = filter(lambda x: 'duplicate' not in unpackreports[x]['tags'], filter(lambda x: unpackreports[x].has_key('tags'), filter(lambda x: unpackreports[x].has_key('sha256'), unpackreports.keys())))
postrunscans = []
for i in dedupes:
## results might have been changed by aggregate scans, so check if it still exists
if unpackreports.has_key(i):
tmpdebug = False
if debug:
tmpdebug = True
if debugphases != []:
if not 'postrun' in debugphases:
tmpdebug = False
postrunscans.append((i, unpackreports[i], scans['postrunscans'], scantempdir, topleveldir, tmpdebug))
parallel = True
if scans['batconfig']['multiprocessing']:
if False in map(lambda x: x['parallel'], scans['postrunscans']):
parallel = False
else:
parallel = False
if debug:
if debugphases == []:
parallel = False
else:
if 'postrun' in debugphases:
parallel = False
if parallel:
if scans['batconfig'].has_key('processors'):
pool = multiprocessing.Pool(scans['batconfig']['processors'])
else:
pool = multiprocessing.Pool()
else:
pool = multiprocessing.Pool(processes=1)
postrunresults = pool.map(postrunscan, postrunscans, 1)
pool.terminate()
if debug:
print >>sys.stderr, "POSTRUN END", datetime.datetime.utcnow().isoformat()
if scans['batconfig']['reportendofphase']:
print "POSTRUN END %s" % os.path.basename(scan_binary), datetime.datetime.utcnow().isoformat()
return (topleveldir, unpackreports)
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from cosmo_tester.framework.testenv import TestCase
class MonitoringTest(TestCase):
def test_monitoring(self):
blueprint_path = self.copy_blueprint('monitoring')
self.blueprint_yaml = blueprint_path / 'blueprint.yaml'
expected_service_contains = 'example'
expected_metric = 42.0
self.upload_deploy_and_execute_install(inputs={
'image_name': self.env.ubuntu_image_name,
'flavor_name': self.env.flavor_name,
})
self.wait_for_expected_outputs(
expected_service_contains,
expected_metric,
timeout=300)
self.execute_uninstall()
def wait_for_expected_outputs(self,
expected_service_contains,
expected_metric,
timeout):
def assertion():
outputs = self.client.deployments.outputs.get(self.test_id)
outputs = outputs['outputs']
self.assertIn(expected_service_contains, outputs['service'])
self.assertEqual(expected_metric, outputs['metric'])
self.repetitive(assertion, timeout=timeout)
fix test
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from cosmo_tester.framework.testenv import TestCase
class MonitoringTest(TestCase):
def test_monitoring(self):
blueprint_path = self.copy_blueprint('monitoring')
self.blueprint_yaml = blueprint_path / 'blueprint.yaml'
expected_service_contains = 'example'
expected_metric = 42.0
self.upload_deploy_and_execute_install(inputs={
'image_name': self.env.ubuntu_image_name,
'flavor_name': self.env.flavor_name,
})
self.wait_for_expected_outputs(
expected_service_contains,
expected_metric,
timeout=300)
self.execute_uninstall()
def wait_for_expected_outputs(self,
expected_service_contains,
expected_metric,
timeout):
def assertion():
outputs = self.client.deployments.outputs.get(self.test_id)
outputs = outputs['outputs']
self.assertIn(expected_service_contains, outputs['service'] or '')
self.assertEqual(expected_metric, outputs['metric'])
self.repetitive(assertion, timeout=timeout)
|
# -*- coding: utf-8 -*-
import csv, datetime, json, logging, pprint
from django.http import HttpResponse
class CatalogingCSVwriter( object ):
def __init__( self ):
pass
def get_csv_response( self, context ):
response = HttpResponse( content_type='text/csv; charset=utf-8' )
return response
## end def get_csv_response()
## end class CatalogingCSVwriter()
implements cataloging download.
# -*- coding: utf-8 -*-
import csv, datetime, json, logging, pprint
from django.http import HttpResponse
from tech_services_reports.lib.cataloging_report_view_helper import CatalogingReport
class CatalogingCSVwriter( object ):
def __init__( self ):
pass
def get_csv_response( self, context ):
report = context['report']
header = context['report_header']
header_details = "%s to %s" % (context['start'], context['end'])
# location_sort_order, format_sort_order = utility_code.load_sort_orders()
#Prep CSV response with HTTP mimetype.
response = HttpResponse( content_type='text/csv; charset=utf-8' )
response['Content-Disposition'] = 'attachment; filename=cataloging_%s.csv'\
% header_details.replace(' to ', '_')
rw = csv.writer(response, dialect='excel')
#Begin csv creation
rw.writerow([header])
#Add last updated to header_details and convert to list
header_details = [header_details]
header_details += ['', '', 'Last updated: %s' % context['last_updated']]
rw.writerow(header_details)
#By type
#rw.writerow([])
#rw.writerow(['By cataloging type'])
#report object
cr = CatalogingReport(context['start'], context['end'])
#By type
#by_type_data = cr.by_type(report_format='csv')
#for row in by_type_data:
# rw.writerow(row)
#By format
rw.writerow([])
rw.writerow(['By format'])
#report object
data = cr.by_format(report_format='csv')
for row in data:
rw.writerow(row)
#By format and type
rw.writerow([])
rw.writerow(['By format and type'])
#report object
data = cr.by_format_and_type(report_format='csv')
for row in data:
rw.writerow(row)
#By cataloger
rw.writerow([])
rw.writerow(['By cataloger'])
by_cataloger = cr.by_cataloger(report_format='csv')
for row in by_cataloger:
rw.writerow(row)
#By cataloging edit type
rw.writerow([])
rw.writerow(['By cataloging edit type'])
by_cataloger = cr.by_edit_type(report_format='csv')
for row in by_cataloger:
rw.writerow(row)
#By cataloger and edit type
#By cataloger and format
rw.writerow([])
rw.writerow(['By cataloger and format'])
#report object
data = cr.by_cataloger_and_edit_type(report_format='csv')
for row in data:
rw.writerow(row)
#By cataloger and format
rw.writerow([])
rw.writerow(['By cataloger and format'])
#report object
data = cr.by_cataloger_and_format(report_format='csv')
for row in data:
rw.writerow(row)
return response
## end def get_csv_response()
## end class CatalogingCSVwriter()
"""
from utility_code import CatalogingReport
import csv
report = context['report']
header = context['report_header']
header_details = "%s to %s" % (context['start'], context['end'])
# location_sort_order, format_sort_order = utility_code.load_sort_orders()
#Prep CSV response with HTTP mimetype.
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=cataloging_%s.csv'\
% header_details.replace(' to ', '_')
rw = csv.writer(response, dialect='excel')
#Begin csv creation
rw.writerow([header])
#Add last updated to header_details and convert to list
header_details = [header_details]
header_details += ['', '', 'Last updated: %s' % context['last_updated']]
rw.writerow(header_details)
#By type
#rw.writerow([])
#rw.writerow(['By cataloging type'])
#report object
cr = CatalogingReport(context['start'], context['end'])
#By type
#by_type_data = cr.by_type(report_format='csv')
#for row in by_type_data:
# rw.writerow(row)
#By format
rw.writerow([])
rw.writerow(['By format'])
#report object
data = cr.by_format(report_format='csv')
for row in data:
rw.writerow(row)
#By format and type
rw.writerow([])
rw.writerow(['By format and type'])
#report object
data = cr.by_format_and_type(report_format='csv')
for row in data:
rw.writerow(row)
#By cataloger
rw.writerow([])
rw.writerow(['By cataloger'])
by_cataloger = cr.by_cataloger(report_format='csv')
for row in by_cataloger:
rw.writerow(row)
#By cataloging edit type
rw.writerow([])
rw.writerow(['By cataloging edit type'])
by_cataloger = cr.by_edit_type(report_format='csv')
for row in by_cataloger:
rw.writerow(row)
#By cataloger and edit type
#By cataloger and format
rw.writerow([])
rw.writerow(['By cataloger and format'])
#report object
data = cr.by_cataloger_and_edit_type(report_format='csv')
for row in data:
rw.writerow(row)
#By cataloger and format
rw.writerow([])
rw.writerow(['By cataloger and format'])
#report object
data = cr.by_cataloger_and_format(report_format='csv')
for row in data:
rw.writerow(row)
return response
"""
|
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import, unicode_literals, print_function
import os
# Import Salt Testing libs
from tests.support.helpers import requires_network
from tests.support.paths import FILES
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
# Import Salt libs
import salt.utils.path
from tests.unit.modules.test_zcbuildout import Base, KNOWN_VIRTUALENV_BINARY_NAMES
import salt.modules.zcbuildout as modbuildout
import salt.states.zcbuildout as buildout
import salt.modules.cmdmod as cmd
ROOT = os.path.join(FILES, 'file/base/buildout')
@skipIf(salt.utils.path.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES) is None,
"The 'virtualenv' packaged needs to be installed")
class BuildoutTestCase(Base):
def setup_loader_modules(self):
module_globals = {
'__env__': 'base',
'__opts__': {'test': False},
'__salt__': {
'cmd.run_all': cmd.run_all,
'cmd.run': cmd.run,
'cmd.retcode': cmd.retcode,
'buildout.buildout': modbuildout.buildout,
}
}
return {buildout: module_globals, modbuildout: module_globals}
# I don't have the time to invest in learning more about buildout,
# and given we don't have support yet, and there are other priorities
# I'm going to punt on this for now - WW
@requires_network()
@skipIf(True, "Buildout is still in beta. Test needs fixing.")
def test_quiet(self):
c_dir = os.path.join(self.tdir, 'c')
assert False, os.listdir(self.rdir)
modbuildout.upgrade_bootstrap(c_dir)
cret = buildout.installed(c_dir, python=self.py_st)
self.assertFalse('OUTPUT:' in cret['comment'], cret['comment'])
self.assertFalse('Log summary:' in cret['comment'], cret['comment'])
self.assertTrue(cret['result'], cret['comment'])
@requires_network()
def test_error(self):
b_dir = os.path.join(self.tdir, 'e')
ret = buildout.installed(b_dir, python=self.py_st)
self.assertTrue(
'We did not get any expectable answer from buildout'
in ret['comment']
)
self.assertFalse(ret['result'])
@requires_network()
def test_installed(self):
b_dir = os.path.join(self.tdir, 'b')
ret = buildout.installed(b_dir,
python=self.py_st,
onlyif=RUNTIME_VARS.SHELL_FALSE_PATH)
self.assertEqual(ret['comment'], '\nonlyif condition is false')
self.assertEqual(ret['result'], True)
self.assertTrue('/b' in ret['name'])
b_dir = os.path.join(self.tdir, 'b')
ret = buildout.installed(b_dir,
python=self.py_st,
unless=RUNTIME_VARS.SHELL_TRUE_PATH)
self.assertEqual(ret['comment'], '\nunless condition is true')
self.assertEqual(ret['result'], True)
self.assertTrue('/b' in ret['name'])
ret = buildout.installed(b_dir, python=self.py_st)
self.assertEqual(ret['result'], True)
self.assertTrue('OUTPUT:' in ret['comment'])
self.assertTrue('Log summary:' in ret['comment'])
Add ch3ll's changes
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import, unicode_literals, print_function
import os
# Import Salt Testing libs
from tests.support.helpers import requires_network
from tests.support.paths import FILES
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
# Import Salt libs
import salt.utils.path
from tests.unit.modules.test_zcbuildout import Base, KNOWN_VIRTUALENV_BINARY_NAMES
import salt.modules.zcbuildout as modbuildout
import salt.states.zcbuildout as buildout
import salt.modules.cmdmod as cmd
ROOT = os.path.join(FILES, 'file/base/buildout')
@skipIf(salt.utils.path.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES) is None,
"The 'virtualenv' packaged needs to be installed")
class BuildoutTestCase(Base):
def setup_loader_modules(self):
module_globals = {
'__env__': 'base',
'__opts__': {'test': False},
'__salt__': {
'cmd.run_all': cmd.run_all,
'cmd.run': cmd.run,
'cmd.retcode': cmd.retcode,
'buildout.buildout': modbuildout.buildout,
}
}
return {buildout: module_globals, modbuildout: module_globals}
# I don't have the time to invest in learning more about buildout,
# and given we don't have support yet, and there are other priorities
# I'm going to punt on this for now - WW
@requires_network()
@skipIf(True, "Buildout is still in beta. Test needs fixing.")
def test_quiet(self):
c_dir = os.path.join(self.tdir, 'c')
assert False, os.listdir(self.rdir)
modbuildout.upgrade_bootstrap(c_dir)
cret = buildout.installed(c_dir, python=self.py_st)
self.assertFalse('OUTPUT:' in cret['comment'], cret['comment'])
self.assertFalse('Log summary:' in cret['comment'], cret['comment'])
self.assertTrue(cret['result'], cret['comment'])
@requires_network()
def test_error(self):
b_dir = os.path.join(self.tdir, 'e')
ret = buildout.installed(b_dir, python=self.py_st)
self.assertTrue(
'We did not get any expectable answer from buildout'
in ret['comment']
)
self.assertFalse(ret['result'])
@requires_network()
def test_installed(self):
b_dir = os.path.join(self.tdir, 'b')
ret = buildout.installed(b_dir,
python=self.py_st,
onlyif=RUNTIME_VARS.SHELL_FALSE_PATH)
self.assertEqual(ret['comment'], '\nonlyif condition is false')
self.assertEqual(ret['result'], True)
self.assertTrue(os.sep + 'b' in ret['name'])
b_dir = os.path.join(self.tdir, 'b')
ret = buildout.installed(b_dir,
python=self.py_st,
unless=RUNTIME_VARS.SHELL_TRUE_PATH)
self.assertEqual(ret['comment'], '\nunless condition is true')
self.assertEqual(ret['result'], True)
self.assertTrue(os.sep + 'b' in ret['name'])
ret = buildout.installed(b_dir, python=self.py_st)
self.assertEqual(ret['result'], True)
self.assertTrue('OUTPUT:' in ret['comment'])
self.assertTrue('Log summary:' in ret['comment'])
|
from subprocess import *
from time import sleep
from timeit import default_timer as clock
from tempfile import TemporaryFile
#from Queue import Queue
from collections import deque
from Tester import Tester
from signal import SIGTERM
import os, sys
## This class provides an interface to run commands in parallel
#
# To use this class, call the .run() method with the command and the test
# options. When the test is finished running it will call harness.testOutputAndFinish
# to complete the test. Be sure to call join() to make sure all the tests are finished.
#
class RunParallel:
## Return this return code if the process must be killed because of timeout
TIMEOUT = -999999
def __init__(self, harness, max_processes=None, average_load=64.0):
## The test harness to run callbacks on
self.harness = harness
# Retrieve and store the TestHarness options for use in this object
self.options = harness.getOptions()
# For backwards compatibitliy the RunParallel class can be initialized
# with no "max_processes" argument and it'll default to a soft limit.
# If however a max_processes is passed we'll treat it as a hard limit.
# The difference is whether or not we allow single jobs to exceed
# the number of slots.
if max_processes == None:
self.soft_limit = True
self.job_slots = 1
else:
self.soft_limit = False
self.job_slots = max_processes # hard limit
# Current slots in use
self.slots_in_use = 0
## List of currently running jobs as (Popen instance, command, test, time when expires, slots) tuples
# None means no job is running in this slot
self.jobs = [None] * self.job_slots
# Requested average load level to stay below
self.average_load = average_load
# queue for jobs needing a prereq
self.queue = deque()
# queue for jobs that are always too big (can run at the end if we have soft limits)
self.big_queue = deque()
# Jobs that have been finished
self.finished_jobs = set()
# List of skipped jobs to resolve prereq issues for tests that never run
self.skipped_jobs = set()
# Jobs we are reporting as taking longer then 10% of MAX_TIME
self.reported_jobs = set()
# Reporting timer which resets when ever data is printed to the screen.
self.reported_timer = clock()
## run the command asynchronously and call testharness.testOutputAndFinish when complete
def run(self, tester, command, recurse=True, slot_check=True):
# First see if any of the queued jobs can be run but only if recursion is allowed on this run
if recurse:
self.startReadyJobs(slot_check)
# Get the number of slots that this job takes
slots = tester.getProcs(self.options) * tester.getThreads(self.options)
# Is this job always too big?
if slot_check and slots > self.job_slots:
if self.soft_limit:
self.big_queue.append([tester, command, os.getcwd()])
else:
self.harness.handleTestResult(tester.specs, '', 'skipped (Insufficient slots)')
return
# Now make sure that this job doesn't have an unsatisfied prereq
if tester.specs['prereq'] != None and len(set(tester.specs['prereq']) - self.finished_jobs) and self.options.pbs is None:
self.queue.append([tester, command, os.getcwd()])
return
# Make sure we are complying with the requested load average
self.satisfyLoad()
# Wait for a job to finish if the jobs queue is full
while self.jobs.count(None) == 0 or self.slots_in_use >= self.job_slots:
self.spinwait()
# Will this new job fit without exceeding the available job slots?
if slot_check and self.slots_in_use + slots > self.job_slots:
self.queue.append([tester, command, os.getcwd()])
return
# Pre-run preperation
tester.prepare()
job_index = self.jobs.index(None) # find an empty slot
log( 'Command %d started: %s' % (job_index, command) )
# It seems that using PIPE doesn't work very well when launching multiple jobs.
# It deadlocks rather easy. Instead we will use temporary files
# to hold the output as it is produced
try:
if self.options.dry_run or not tester.shouldExecute():
tmp_command = command
command = "echo"
f = TemporaryFile()
# On Windows, there is an issue with path translation when the command is passed in
# as a list.
p = Popen(command,stdout=f,stderr=f,close_fds=False, shell=True)
if self.options.dry_run or not tester.shouldExecute():
command = tmp_command
except:
print "Error in launching a new task"
raise
self.jobs[job_index] = (p, command, tester, clock(), f, slots)
self.slots_in_use = self.slots_in_use + slots
def startReadyJobs(self, slot_check):
queue_items = len(self.queue)
for i in range(0, queue_items):
(tester, command, dirpath) = self.queue.popleft()
saved_dir = os.getcwd()
sys.path.append(os.path.abspath(dirpath))
os.chdir(dirpath)
# We want to avoid "dual" recursion so pass a False flag here
self.run(tester, command, recurse=False, slot_check=slot_check)
os.chdir(saved_dir)
sys.path.pop()
## Return control the the test harness by finalizing the test output and calling the callback
def returnToTestHarness(self, job_index):
(p, command, tester, time, f, slots) = self.jobs[job_index]
log( 'Command %d done: %s' % (job_index, command) )
did_pass = True
output = 'Working Directory: ' + tester.specs['test_dir'] + '\nRunning command: ' + command + '\n'
output += self.readOutput(f)
if p.poll() == None: # process has not completed, it timed out
output += '\n' + "#"*80 + '\nProcess terminated by test harness. Max time exceeded (' + str(tester.specs['max_time']) + ' seconds)\n' + "#"*80 + '\n'
f.close()
os.kill(p.pid, SIGTERM) # Python 2.4 compatibility
#p.terminate() # Python 2.6+
if not self.harness.testOutputAndFinish(tester, RunParallel.TIMEOUT, output, time, clock()):
did_pass = False
else:
f.close()
if tester in self.reported_jobs:
tester.specs.addParam('caveats', ['FINISHED'], "")
if not self.harness.testOutputAndFinish(tester, p.returncode, output, time, clock()):
did_pass = False
if did_pass:
self.finished_jobs.add(tester.specs['test_name'])
else:
self.skipped_jobs.add(tester.specs['test_name'])
self.jobs[job_index] = None
self.slots_in_use = self.slots_in_use - slots
## Don't return until one of the running processes exits.
#
# When a process exits (or times out) call returnToTestHarness and return from
# this function.
def spinwait(self, time_to_wait=0.05):
now = clock()
job_index = 0
slot_freed = False
for tuple in self.jobs:
if tuple != None:
(p, command, tester, start_time, f, slots) = tuple
if p.poll() != None or now > (start_time + float(tester.specs['max_time'])):
# finish up as many jobs as possible, don't sleep until
# we've cleared all of the finished jobs
self.returnToTestHarness(job_index)
# We just output to the screen so reset the test harness "activity" timer
self.reported_timer = now
slot_freed = True
# We just reset the timer so no need to check if we've been waiting for awhile in
# this iteration
# Has the TestHarness done nothing for awhile
elif now > (self.reported_timer + 10.0):
# Has the current test been previously reported?
if tester not in self.reported_jobs:
if tester.specs.isValid('min_reported_time'):
start_min_threshold = start_time + float(tester.specs['min_reported_time'])
else:
start_min_threshold = start_time + (0.1 * float(tester.specs['max_time']))
threshold = max(start_min_threshold, (0.1 * float(tester.specs['max_time'])))
if now >= threshold:
self.harness.handleTestResult(tester.specs, '', 'RUNNING...', start_time, now, False)
self.reported_jobs.add(tester)
self.reported_timer = now
job_index += 1
if not slot_freed:
sleep(time_to_wait)
def satisfyLoad(self):
# Get the current load average, or zero if it isn't available for some reason (such as being
# run on a non-posix operating system)
loadAverage = 0.0
try:
loadAverage = os.getloadavg()[0]
except AttributeError:
pass # getloadavg() not available in this implementation of os
# We'll always run at least one job regardless of load or we'll starve!
while self.jobs.count(None) < len(self.jobs) and loadAverage >= self.average_load:
# print "DEBUG: Sleeping... ", len(self.jobs) - self.jobs.count(None), " jobs running (load average: ", os.getloadavg()[0], ")\n"
self.spinwait(0.5) # If the load average is high we'll sleep longer here to let things clear out
# print "DEBUG: Ready to run (load average: ", os.getloadavg()[0], ")\n"
## Wait until all processes are done, then return
def join(self):
while self.jobs.count(None) != len(self.jobs):
self.spinwait()
self.startReadyJobs(slot_check=True)
# At this point there are no running jobs but there may still be jobs in queue
# for three reasons:
# 1) There are testers that require more slots than were available for this run.
# 2) There is a tester that is waiting on a prereq that was skipped.
# 3) There is an invalid or cyclic dependency in one or more test specifications
# Handle the first case if the user has not explicitely provided a jobs argument
# We'll allow larger jobs if the TestHarness is run with without any jobs argument
if len(self.big_queue) and self.soft_limit:
print "\nOversized Jobs:\n"
# Dump the big jobs into the front of the queue
self.queue.extendleft(self.big_queue)
# Run the queue again without the slot check
self.startReadyJobs(slot_check=False)
while self.jobs.count(None) != len(self.jobs):
self.spinwait()
self.startReadyJobs(slot_check=False)
# If we had a soft limit then we'll have run the oversized jobs but we still
# have three cases (see note above) of jobs left to handle. We'll do that here
if len(self.queue) != 0:
keep_going = True
while keep_going:
keep_going = False
queue_items = len(self.queue)
for i in range(0, queue_items):
(tester, command, dirpath) = self.queue.popleft()
slots = tester.getProcs(self.options) * tester.getThreads(self.options)
# If the user is running the script with no options, we'll just exceed the slots for
# these remaining big jobs. Otherwise, we'll skip them
if not self.soft_limit and slots > self.job_slots:
self.harness.handleTestResult(tester.specs, '', 'skipped (Insufficient slots)')
self.skipped_jobs.add(tester.specs['test_name'])
keep_going = True
# Do we have unsatisfied dependencies left?
elif len(set(tester.specs['prereq']) & self.skipped_jobs):
self.harness.handleTestResult(tester.specs, '', 'skipped (skipped dependency)')
self.skipped_jobs.add(tester.specs['test_name'])
keep_going = True
# We need to keep trying in case there is a chain of unresolved dependencies
# and we hit them out of order in this loop
else:
self.queue.append([tester, command, dirpath])
# Anything left is a cyclic dependency
if len(self.queue) != 0:
print "\nCyclic or Invalid Dependency Detected!"
for (tester, command, dirpath) in self.queue:
print tester.specs['test_name']
sys.exit(1)
# This function reads output from the file (i.e. the test output)
# but trims it down to the specified size. It'll save the first two thirds
# of the requested size and the last third trimming from the middle
def readOutput(self, f, max_size=100000):
first_part = int(max_size*(2.0/3.0))
second_part = int(max_size*(1.0/3.0))
output = ''
f.seek(0)
if self.harness.options.sep_files != True:
output = f.read(first_part) # Limit the output to 1MB
if len(output) == first_part: # This means we didn't read the whole file yet
output += "\n" + "#"*80 + "\n\nOutput trimmed\n\n" + "#"*80 + "\n"
f.seek(-second_part, 2) # Skip the middle part of the file
if (f.tell() <= first_part): # Don't re-read some of what you've already read
f.seek(first_part+1, 0)
output += f.read() # Now read the rest
return output
# Add a skipped job to the list
def jobSkipped(self, name):
self.skipped_jobs.add(name)
## Static logging string for debugging
LOG = []
LOG_ON = False
def log(msg):
if LOG_ON:
LOG.append(msg)
print msg
Create a process group for each test to allow for easier killing.
closes #7293
from subprocess import *
from time import sleep
from timeit import default_timer as clock
from tempfile import TemporaryFile
#from Queue import Queue
from collections import deque
from Tester import Tester
from signal import SIGTERM
import platform
import os, sys
## This class provides an interface to run commands in parallel
#
# To use this class, call the .run() method with the command and the test
# options. When the test is finished running it will call harness.testOutputAndFinish
# to complete the test. Be sure to call join() to make sure all the tests are finished.
#
class RunParallel:
## Return this return code if the process must be killed because of timeout
TIMEOUT = -999999
def __init__(self, harness, max_processes=None, average_load=64.0):
## The test harness to run callbacks on
self.harness = harness
# Retrieve and store the TestHarness options for use in this object
self.options = harness.getOptions()
# For backwards compatibitliy the RunParallel class can be initialized
# with no "max_processes" argument and it'll default to a soft limit.
# If however a max_processes is passed we'll treat it as a hard limit.
# The difference is whether or not we allow single jobs to exceed
# the number of slots.
if max_processes == None:
self.soft_limit = True
self.job_slots = 1
else:
self.soft_limit = False
self.job_slots = max_processes # hard limit
# Current slots in use
self.slots_in_use = 0
## List of currently running jobs as (Popen instance, command, test, time when expires, slots) tuples
# None means no job is running in this slot
self.jobs = [None] * self.job_slots
# Requested average load level to stay below
self.average_load = average_load
# queue for jobs needing a prereq
self.queue = deque()
# queue for jobs that are always too big (can run at the end if we have soft limits)
self.big_queue = deque()
# Jobs that have been finished
self.finished_jobs = set()
# List of skipped jobs to resolve prereq issues for tests that never run
self.skipped_jobs = set()
# Jobs we are reporting as taking longer then 10% of MAX_TIME
self.reported_jobs = set()
# Reporting timer which resets when ever data is printed to the screen.
self.reported_timer = clock()
## run the command asynchronously and call testharness.testOutputAndFinish when complete
def run(self, tester, command, recurse=True, slot_check=True):
# First see if any of the queued jobs can be run but only if recursion is allowed on this run
if recurse:
self.startReadyJobs(slot_check)
# Get the number of slots that this job takes
slots = tester.getProcs(self.options) * tester.getThreads(self.options)
# Is this job always too big?
if slot_check and slots > self.job_slots:
if self.soft_limit:
self.big_queue.append([tester, command, os.getcwd()])
else:
self.harness.handleTestResult(tester.specs, '', 'skipped (Insufficient slots)')
return
# Now make sure that this job doesn't have an unsatisfied prereq
if tester.specs['prereq'] != None and len(set(tester.specs['prereq']) - self.finished_jobs) and self.options.pbs is None:
self.queue.append([tester, command, os.getcwd()])
return
# Make sure we are complying with the requested load average
self.satisfyLoad()
# Wait for a job to finish if the jobs queue is full
while self.jobs.count(None) == 0 or self.slots_in_use >= self.job_slots:
self.spinwait()
# Will this new job fit without exceeding the available job slots?
if slot_check and self.slots_in_use + slots > self.job_slots:
self.queue.append([tester, command, os.getcwd()])
return
# Pre-run preperation
tester.prepare()
job_index = self.jobs.index(None) # find an empty slot
log( 'Command %d started: %s' % (job_index, command) )
# It seems that using PIPE doesn't work very well when launching multiple jobs.
# It deadlocks rather easy. Instead we will use temporary files
# to hold the output as it is produced
try:
if self.options.dry_run or not tester.shouldExecute():
tmp_command = command
command = "echo"
f = TemporaryFile()
# On Windows, there is an issue with path translation when the command is passed in
# as a list.
if platform.system() == "Windows":
p = Popen(command,stdout=f,stderr=f,close_fds=False, shell=True, creationflags=CREATE_NEW_PROCESS_GROUP)
else:
p = Popen(command,stdout=f,stderr=f,close_fds=False, shell=True, preexec_fn=os.setsid)
if self.options.dry_run or not tester.shouldExecute():
command = tmp_command
except:
print "Error in launching a new task"
raise
self.jobs[job_index] = (p, command, tester, clock(), f, slots)
self.slots_in_use = self.slots_in_use + slots
def startReadyJobs(self, slot_check):
queue_items = len(self.queue)
for i in range(0, queue_items):
(tester, command, dirpath) = self.queue.popleft()
saved_dir = os.getcwd()
sys.path.append(os.path.abspath(dirpath))
os.chdir(dirpath)
# We want to avoid "dual" recursion so pass a False flag here
self.run(tester, command, recurse=False, slot_check=slot_check)
os.chdir(saved_dir)
sys.path.pop()
## Return control the the test harness by finalizing the test output and calling the callback
def returnToTestHarness(self, job_index):
(p, command, tester, time, f, slots) = self.jobs[job_index]
log( 'Command %d done: %s' % (job_index, command) )
did_pass = True
output = 'Working Directory: ' + tester.specs['test_dir'] + '\nRunning command: ' + command + '\n'
output += self.readOutput(f)
if p.poll() == None: # process has not completed, it timed out
output += '\n' + "#"*80 + '\nProcess terminated by test harness. Max time exceeded (' + str(tester.specs['max_time']) + ' seconds)\n' + "#"*80 + '\n'
f.close()
if platform.system() == "Windows":
p.terminate()
else:
pgid = os.getpgid(p.pid)
os.killpg(pgid, SIGTERM)
if not self.harness.testOutputAndFinish(tester, RunParallel.TIMEOUT, output, time, clock()):
did_pass = False
else:
f.close()
if tester in self.reported_jobs:
tester.specs.addParam('caveats', ['FINISHED'], "")
if not self.harness.testOutputAndFinish(tester, p.returncode, output, time, clock()):
did_pass = False
if did_pass:
self.finished_jobs.add(tester.specs['test_name'])
else:
self.skipped_jobs.add(tester.specs['test_name'])
self.jobs[job_index] = None
self.slots_in_use = self.slots_in_use - slots
## Don't return until one of the running processes exits.
#
# When a process exits (or times out) call returnToTestHarness and return from
# this function.
def spinwait(self, time_to_wait=0.05):
now = clock()
job_index = 0
slot_freed = False
for tuple in self.jobs:
if tuple != None:
(p, command, tester, start_time, f, slots) = tuple
if p.poll() != None or now > (start_time + float(tester.specs['max_time'])):
# finish up as many jobs as possible, don't sleep until
# we've cleared all of the finished jobs
self.returnToTestHarness(job_index)
# We just output to the screen so reset the test harness "activity" timer
self.reported_timer = now
slot_freed = True
# We just reset the timer so no need to check if we've been waiting for awhile in
# this iteration
# Has the TestHarness done nothing for awhile
elif now > (self.reported_timer + 10.0):
# Has the current test been previously reported?
if tester not in self.reported_jobs:
if tester.specs.isValid('min_reported_time'):
start_min_threshold = start_time + float(tester.specs['min_reported_time'])
else:
start_min_threshold = start_time + (0.1 * float(tester.specs['max_time']))
threshold = max(start_min_threshold, (0.1 * float(tester.specs['max_time'])))
if now >= threshold:
self.harness.handleTestResult(tester.specs, '', 'RUNNING...', start_time, now, False)
self.reported_jobs.add(tester)
self.reported_timer = now
job_index += 1
if not slot_freed:
sleep(time_to_wait)
def satisfyLoad(self):
# Get the current load average, or zero if it isn't available for some reason (such as being
# run on a non-posix operating system)
loadAverage = 0.0
try:
loadAverage = os.getloadavg()[0]
except AttributeError:
pass # getloadavg() not available in this implementation of os
# We'll always run at least one job regardless of load or we'll starve!
while self.jobs.count(None) < len(self.jobs) and loadAverage >= self.average_load:
# print "DEBUG: Sleeping... ", len(self.jobs) - self.jobs.count(None), " jobs running (load average: ", os.getloadavg()[0], ")\n"
self.spinwait(0.5) # If the load average is high we'll sleep longer here to let things clear out
# print "DEBUG: Ready to run (load average: ", os.getloadavg()[0], ")\n"
## Wait until all processes are done, then return
def join(self):
while self.jobs.count(None) != len(self.jobs):
self.spinwait()
self.startReadyJobs(slot_check=True)
# At this point there are no running jobs but there may still be jobs in queue
# for three reasons:
# 1) There are testers that require more slots than were available for this run.
# 2) There is a tester that is waiting on a prereq that was skipped.
# 3) There is an invalid or cyclic dependency in one or more test specifications
# Handle the first case if the user has not explicitely provided a jobs argument
# We'll allow larger jobs if the TestHarness is run with without any jobs argument
if len(self.big_queue) and self.soft_limit:
print "\nOversized Jobs:\n"
# Dump the big jobs into the front of the queue
self.queue.extendleft(self.big_queue)
# Run the queue again without the slot check
self.startReadyJobs(slot_check=False)
while self.jobs.count(None) != len(self.jobs):
self.spinwait()
self.startReadyJobs(slot_check=False)
# If we had a soft limit then we'll have run the oversized jobs but we still
# have three cases (see note above) of jobs left to handle. We'll do that here
if len(self.queue) != 0:
keep_going = True
while keep_going:
keep_going = False
queue_items = len(self.queue)
for i in range(0, queue_items):
(tester, command, dirpath) = self.queue.popleft()
slots = tester.getProcs(self.options) * tester.getThreads(self.options)
# If the user is running the script with no options, we'll just exceed the slots for
# these remaining big jobs. Otherwise, we'll skip them
if not self.soft_limit and slots > self.job_slots:
self.harness.handleTestResult(tester.specs, '', 'skipped (Insufficient slots)')
self.skipped_jobs.add(tester.specs['test_name'])
keep_going = True
# Do we have unsatisfied dependencies left?
elif len(set(tester.specs['prereq']) & self.skipped_jobs):
self.harness.handleTestResult(tester.specs, '', 'skipped (skipped dependency)')
self.skipped_jobs.add(tester.specs['test_name'])
keep_going = True
# We need to keep trying in case there is a chain of unresolved dependencies
# and we hit them out of order in this loop
else:
self.queue.append([tester, command, dirpath])
# Anything left is a cyclic dependency
if len(self.queue) != 0:
print "\nCyclic or Invalid Dependency Detected!"
for (tester, command, dirpath) in self.queue:
print tester.specs['test_name']
sys.exit(1)
# This function reads output from the file (i.e. the test output)
# but trims it down to the specified size. It'll save the first two thirds
# of the requested size and the last third trimming from the middle
def readOutput(self, f, max_size=100000):
first_part = int(max_size*(2.0/3.0))
second_part = int(max_size*(1.0/3.0))
output = ''
f.seek(0)
if self.harness.options.sep_files != True:
output = f.read(first_part) # Limit the output to 1MB
if len(output) == first_part: # This means we didn't read the whole file yet
output += "\n" + "#"*80 + "\n\nOutput trimmed\n\n" + "#"*80 + "\n"
f.seek(-second_part, 2) # Skip the middle part of the file
if (f.tell() <= first_part): # Don't re-read some of what you've already read
f.seek(first_part+1, 0)
output += f.read() # Now read the rest
return output
# Add a skipped job to the list
def jobSkipped(self, name):
self.skipped_jobs.add(name)
## Static logging string for debugging
LOG = []
LOG_ON = False
def log(msg):
if LOG_ON:
LOG.append(msg)
print msg
|
from nltk import corpus
from collections import Counter
from math import log, pow, sqrt
from re import sub
class PreprocessLog:
def __init__(self, logfile):
self.logfile = logfile
self.logs = []
self.loglength = 0
self.events_list = []
self.events_unique = []
def read_log(self):
with open(self.logfile, 'r') as f:
logs = f.readlines()
self.logs = logs
self.loglength = len(logs)
def get_logs(self):
return self.logs
def get_loglength(self):
return self.loglength
def get_wordindocs(self, word, docs):
# find word occurence in all docs (logs)
count = 0
for doc in docs:
if word in doc:
count += 1
return float(count)
def get_tfidf(self, doc, total_docs, docs):
# remove number, stopwords
doc = sub('[^a-zA-Z]', ' ', doc)
additional_stopwords = ['preauth', 'from', 'xxxxx', 'for', 'port', 'sshd', 'ssh']
for a in additional_stopwords:
doc = doc.replace(a, '')
doc.replace('_', ' ')
doc = ' '.join(doc.split())
stopwords = corpus.stopwords.words('english')
stopwords_result = [w.lower() for w in doc.split() if w.lower() not in stopwords]
# count word frequency (tf)
tf = Counter(stopwords_result)
words_total = len(stopwords_result)
tfidf = []
for t in tf.most_common():
normalized_tf = float(t[1]) / float(words_total) # normalized word frequency
wid = self.get_wordindocs(t[0], docs) # calculate word occurrence in all documents
idf = 1 + log(total_docs / wid) # calculate idf
tfidf_val = normalized_tf * idf # calculate tf-idf
tfidf.append((t[0], tfidf_val))
return doc, tfidf
def get_doclength(self, tfidf):
# calculate doc's length for cosine similarity
length = 0
for ti in tfidf:
length += pow(ti[1], 2)
return sqrt(length)
def do_preprocess(self):
# read log file
self.read_log()
# convert to lower, count total logs
logs_lower = [' '.join(l.lower().split()[5:]) for l in self.logs[:]]
logs_total = self.loglength
# preprocess logs, add to ordinary list and unique list
events_list, events_unique = [], []
index, index_log = 0, 0
for l in logs_lower:
auth_split = l.split()
event_type, event_desc = auth_split[0].split('[')[0], ' '.join(auth_split[1:])
event = event_type + ' ' + event_desc
events_list.append(event)
preprocessed_event, tfidf = self.get_tfidf(event, logs_total, logs_lower)
check_events_unique = [e[1]['preprocessed_event'] for e in events_unique]
# if not exist, add new element
if preprocessed_event not in check_events_unique:
length = self.get_doclength(tfidf)
events_unique.append([index, {'event': event, 'tf-idf': tfidf, 'length': length, 'status': '',
'cluster': 0, 'frequency': 1, 'member': [index_log],
'preprocessed_event':preprocessed_event}])
index += 1
# if exist, increment the frequency
else:
for e in events_unique:
if preprocessed_event == e[1]['preprocessed_event']:
member = e[1]['member']
member.append(index_log)
e[1]['member'] = member
e[1]['frequency'] += 1
index_log += 1
# get inter-arrival time of unique event
timestamps = {}
for e in events_unique:
timestamps[e[1]['event']] = [' '.join(l.split()[:3]) for l in self.logs
if e[1]['event'] in ' '.join(l.lower().split())]
for e in events_unique:
for k, v in timestamps.iteritems():
if e[1]['event'] == k:
e[1]['start'], e[1]['end'] = v[0], v[-1]
self.events_list = events_list
self.events_unique = events_unique
def get_eventslist(self):
return self.events_list
def get_eventsunique(self):
return self.events_unique
Add exception handling for idf calculation
from nltk import corpus
from collections import Counter
from math import log, pow, sqrt
from re import sub
class PreprocessLog:
def __init__(self, logfile):
self.logfile = logfile
self.logs = []
self.loglength = 0
self.events_list = []
self.events_unique = []
def read_log(self):
with open(self.logfile, 'r') as f:
logs = f.readlines()
self.logs = logs
self.loglength = len(logs)
def get_logs(self):
return self.logs
def get_loglength(self):
return self.loglength
def get_wordindocs(self, word, docs):
# find word occurence in all docs (logs)
count = 0
for doc in docs:
if word in doc:
count += 1
return float(count)
def get_tfidf(self, doc, total_docs, docs):
# remove number, stopwords
doc = sub('[^a-zA-Z]', ' ', doc)
additional_stopwords = ['preauth', 'from', 'xxxxx', 'for', 'port', 'sshd', 'ssh']
for a in additional_stopwords:
doc = doc.replace(a, '')
doc.replace('_', ' ')
doc = ' '.join(doc.split())
stopwords = corpus.stopwords.words('english')
stopwords_result = [w.lower() for w in doc.split() if w.lower() not in stopwords]
# count word frequency (tf)
tf = Counter(stopwords_result)
words_total = len(stopwords_result)
tfidf = []
for t in tf.most_common():
normalized_tf = float(t[1]) / float(words_total) # normalized word frequency
wid = self.get_wordindocs(t[0], docs) # calculate word occurrence in all documents
try:
idf = 1 + log(total_docs / wid) # calculate idf
except ZeroDivisionError:
idf = 1
tfidf_val = normalized_tf * idf # calculate tf-idf
tfidf.append((t[0], tfidf_val))
return doc, tfidf
def get_doclength(self, tfidf):
# calculate doc's length for cosine similarity
length = 0
for ti in tfidf:
length += pow(ti[1], 2)
return sqrt(length)
def do_preprocess(self):
# read log file
self.read_log()
# convert to lower, count total logs
logs_lower = [' '.join(l.lower().split()[5:]) for l in self.logs[:]]
logs_total = self.loglength
# preprocess logs, add to ordinary list and unique list
events_list, events_unique = [], []
index, index_log = 0, 0
for l in logs_lower:
auth_split = l.split()
event_type, event_desc = auth_split[0].split('[')[0], ' '.join(auth_split[1:])
event = event_type + ' ' + event_desc
events_list.append(event)
preprocessed_event, tfidf = self.get_tfidf(event, logs_total, logs_lower)
check_events_unique = [e[1]['preprocessed_event'] for e in events_unique]
# if not exist, add new element
if preprocessed_event not in check_events_unique:
print index, preprocessed_event
length = self.get_doclength(tfidf)
events_unique.append([index, {'event': event, 'tf-idf': tfidf, 'length': length, 'status': '',
'cluster': 0, 'frequency': 1, 'member': [index_log],
'preprocessed_event':preprocessed_event}])
index += 1
# if exist, increment the frequency
else:
for e in events_unique:
if preprocessed_event == e[1]['preprocessed_event']:
member = e[1]['member']
member.append(index_log)
e[1]['member'] = member
e[1]['frequency'] += 1
index_log += 1
# get inter-arrival time of unique event
timestamps = {}
for e in events_unique:
timestamps[e[1]['event']] = [' '.join(l.split()[:3]) for l in self.logs
if e[1]['event'] in ' '.join(l.lower().split())]
for e in events_unique:
for k, v in timestamps.iteritems():
if e[1]['event'] == k:
e[1]['start'], e[1]['end'] = v[0], v[-1]
self.events_list = events_list
self.events_unique = events_unique
def get_eventslist(self):
return self.events_list
def get_eventsunique(self):
return self.events_unique
|
#!/usr/bin/env python
from pupa.utils import JSONEncoderPlus
from contextlib import contextmanager
from pymongo import Connection
import argparse
import json
import os
parser = argparse.ArgumentParser(description='Re-convert a jurisdiction.')
parser.add_argument('--server', type=str, help='Mongo Server',
default="localhost")
parser.add_argument('--database', type=str, help='Mongo Database',
default="opencivicdata")
parser.add_argument('--port', type=int, help='Mongo Server Port',
default=27017)
parser.add_argument('--output', type=str, help='Output Directory',
default="dump")
parser.add_argument('root', type=str, help='root', default='dump')
args = parser.parse_args()
connection = Connection(args.server, args.port)
db = getattr(connection, args.database)
jurisdiction = args.jurisdiction
@contextmanager
def cd(path):
pop = os.getcwd()
os.chdir(path)
try:
yield path
finally:
os.chdir(pop)
with cd(args.root):
print os.getcwd()
Add more to the restore script.
#!/usr/bin/env python
from pupa.utils import JSONEncoderPlus
from contextlib import contextmanager
from pymongo import Connection
import argparse
import json
import sys
import os
parser = argparse.ArgumentParser(description='Re-convert a jurisdiction.')
parser.add_argument('--server', type=str, help='Mongo Server',
default="localhost")
parser.add_argument('--database', type=str, help='Mongo Database',
default="opencivicdata")
parser.add_argument('--port', type=int, help='Mongo Server Port',
default=27017)
parser.add_argument('--output', type=str, help='Output Directory',
default="dump")
parser.add_argument('root', type=str, help='root', default='dump')
args = parser.parse_args()
connection = Connection(args.server, args.port)
db = getattr(connection, args.database)
TABLES = {
"ocd-jurisdiction": db.jurisdictions,
"ocd-bill": db.bills,
"ocd-organization": db.organizations,
"ocd-person": db.people,
"ocd-vote": db.votes,
}
@contextmanager
def cd(path):
pop = os.getcwd()
os.chdir(path)
try:
yield path
finally:
os.chdir(pop)
def insert(obj):
id_ = obj['_id']
etype, _ = id_.split("/", 1)
sys.stdout.write(etype.split("-")[1][0].lower())
sys.stdout.flush()
return TABLES[etype].save(obj)
with cd(args.root):
# OK. Let's load stuff up.
for path, dirs, nodes in os.walk("."):
for entry in (os.path.join(path, x) for x in nodes):
data = json.load(open(entry, 'r'))
insert(data)
|
http->https for element queries
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.