CombinedText stringlengths 4 3.42M |
|---|
# -*- coding: UTF-8 -*-
from flask import render_template, redirect, request, flash, abort
from flask.ext.login import login_user, logout_user, current_user, login_required
from flask.ext.babel import gettext
from wtforms.validators import NumberRange
from app import app, babel, db, lm
import json
from datetime import datetime
from .forms import *
from config import LANGUAGES, SERVER_IP, BABEL_DEFAULT_LOCALE, WEEKDAYS
from .system import *
from .data import *
@babel.localeselector
def get_locale():
#return request.accept_languages.best_match(LANGUAGES.keys())
return request.accept_languages.best_match(['pl'])
@app.errorhandler(400)
def catch_server_errors(e):
return redirect('/')
#@app.before_request
#def before_request():
# print(request.remote_addr)
# pass
@lm.user_loader
def load_user(id):
return User.get(id)
def next_is_valid(next):
if next is None: return True
next=next.strip('/')
valid = ['solar','tank','circulation','heater','schedule','options','dashboard','index']
return next in valid
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
print("redirecting")
return redirect('/')
form = LoginForm()
if form.validate_on_submit():
#user = User.get(request.form['username'])
user = User.get(form.username.data)
#if (user and user.password == request.form['password']):
if (user and user.password == form.password.data):
#login_user(user)
login_user(user, remember = form.remember.data)
next = request.args.get('next')
if not next_is_valid(next):
return abort(400)
return redirect(next or '/')
else:
flash(gettext('Username or password incorrect'))
return render_template('forms/login.html',
active='login',
title=gettext('Log in'),
form=form)
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect('/')
@app.route('/')
@app.route('/index')
@app.route('/dashboard')
def dashboard():
return render_template("content/dashboard.html",
active='dashboard',
title='',
refresh_rate=0.5,
data=dashboard_data())
#@app.route('/change-schedule_override_temp', methods=['GET', 'POST'])
#@app.route('/set-room-temp', methods=['GET', 'POST'])
#def room_temp():
# # get those data from SQL(name):
# slider = {'min' : 17,
# 'max' : 28,
# 'value' : round(float(get_SQL_value(SolarControlHeaterSettingsExpected)),1),
# 'step' : 0.1,
# 'unit' : u'°C'}
# description = {'title' : gettext('Example modal'),
# 'info' : gettext('move it'),
# 's_info' : gettext('Temperature') + ':',
# 'cancel' : gettext('Cancel'),
# 'submit' : gettext('Save')}
#
# form = RangeForm()
# from wtforms.validators import NumberRange
# form.slider.validate(form,[NumberRange(slider['min'],slider['max'])])
#
# if form.validate_on_submit():
# val = request.form['slider']
# print(val)
# return redirect('/')
#
# return render_template("forms/modal-range.html",
# action=request.path,
# slider=slider,
# desc=description,
# form=form)
@app.route('/status')
def status():
sensors = get_full_data('sensors','all')
data = {'sensors' : [
{'title' : gettext('Outside temperature'), 'name' : 'outside_temp',
'unit' : u'°C', 'value' : sensors['outside']['real_temp']},
{'title' : gettext('Inside temperature'), 'name' : 'inside_temperature',
'unit' : u'°C', 'value' : sensors['room']['inside_temperature'] },
{'title' : gettext('Apparent temperature'), 'name' : 'apparent_temperature',
'unit' : u'°C', 'value' : sensors['room']['apparent_temperature'] },
{'title' : gettext('Humidity'), 'name' : 'humidity',
'unit' : u'%', 'value' : sensors['room']['humidity'] },
{'title' : gettext('Pressure'), 'name' : 'pressure',
'unit' : u'hPa', 'value' : sensors['room']['pressure'] },
{'title' : gettext('Solar temperature'), 'name' : 'solar_temp',
'unit' : u'°C', 'value' : sensors['solar']['temp']},
{'title' : gettext('Solar input'), 'name' : 'solar_in',
'unit' : u'°C', 'value' : sensors['solar']['temp_in'] },
{'title' : gettext('Solar output'), 'name' : 'solar_out',
'unit' : u'°C', 'value' : sensors['solar']['temp_out'] },
{'title' : gettext('Solar difference'), 'name' : 'solar_diff',
'unit' : u'°C', 'value' : sensors['solar']['temp_diff'] },
{'title' : gettext('Tank'), 'name' : 'tank_up',
'unit' : u'°C', 'value' : sensors['tank']['temp_up'] },
{'title' : gettext('Heater input'), 'name' : 'heater_in',
'unit' : u'°C', 'value' : sensors['heater']['temp_in'] },
{'title' : gettext('Heater output'), 'name' : 'heater_out',
'unit' : u'°C', 'value' : sensors['heater']['temp_out'] },
{'title' : gettext('Heater diffrence'), 'name' : 'heater_diff',
'unit' : u'°C', 'value' : sensors['heater']['temp_diff'] }],
'states' : [
{'title' : gettext('Burner'), 'name' : 'burner',
'value' : gettext('ON') if sensors['state']['burner'] else gettext('OFF') },
{'title' : gettext('DHW/CH actuator'), 'name' : 'heater_switch',
'value' : gettext('DHW') if sensors['state']['heater_switch'] else gettext('CH') },
{'title' : gettext('Solar pump'), 'name' : 'solar_pump',
'value' : gettext('ON') if sensors['state']['solar_pump'] else gettext('OFF') },
{'title' : gettext('Solar system actuator'), 'name' : 'solar_switch',
'value' : gettext('ON') if sensors['state']['solar_switch'] else gettext('OFF') },
{'title' : gettext('Solar circuit flow'), 'name' : 'solar_flow',
'value' : sensors['solar']['flow'] },
{'title' : gettext('Circulation'), 'name' : 'circulation',
'value' : gettext('ON') if sensors['state']['circulation'] else gettext(gettext('OFF')) } ]
}
title=gettext('Sensors data')
return render_template("/content/status.html",
active='status',
data=data,
title=title)
#@app.route('/scheme')
#def scheme():
# return render_template("/content/scheme.html",
# active='scheme',
# data=get_full_data('sensors','all'),
# title=gettext('Scheme'))
@app.route('/heater/')
@app.route('/tank/')
@app.route('/solar/')
@app.route('/circulation/')
@login_required
def data_rows():
uri = request.base_url.replace(request.url_root,'').replace('/','')
data = []
if uri == 'circulation':
order = ['time_on', 'interval']
title = gettext('Circulation')
elif uri == 'solar':
order = ['critical','temp_on','temp_off']
data = refresh_data('solar','temp')
title = gettext('Solar')
elif uri == 'tank':
order = ['solar_max', 'heater_max', 'heater_min']
data = refresh_data('tank','temp_up')
title = gettext('Water')
elif uri == 'heater':
order = ['expected', 'critical', 'hysteresis']
title = gettext('Heater')
data += get_description(order,get_full_data('settings',uri))
return render_template("data_rows.html",
active=uri,
data=data,
refresh_rate=0.5,
#refresh_rate=settings['refresh_rate'],
title=title)
@app.route('/schedule/change', methods=['POST'])
@login_required
def schedule_validate():
print("----SCHEDULE RECEIVED---")
data = request.get_json(force=True)
#TODO exceptions
print(data)
try:
data['other'] = round(float(data['other']),2)
for i in range(7):
data['week'][i] = int(bool(int(data['week'][i])))
for day in ('work','free'):
for i in range(len(data[day])):
for when in ('from', 'to'):
time = data[day][i][when].split(":")
data[day][i][when] = []
for j in (0,1):
data[day][i][when].append(int(time[j]))
data[day][i]['temp'] = round(float(data[day][i]['temp']),2)
except (KeyError,IndexError):
pass
#return schedule(error=True)
change_setting('"'+json.dumps(data,separators=(',', ':'))+'"','schedule','heater')
print("--NEW SCHEDULE POSTED--")
#return schedule()
@app.route('/schedule', methods=['GET','POST'])
@login_required
def schedule():
save = True
schedule = get_data('schedule','heater','settings')
if schedule is None:
save=False
schedule = {"week":[0,0,0,0,0,0,0],
"work":[{"to":[0,0],"from":[0,0],"temp":0}],
"free":[{"to":[0,0],"from":[0,0],"temp":0}],
"other":0}
try:
diff = datetime.now() - datetime(*schedule['override']['start'])
duration = schedule['override']['duration']
if diff.seconds < duration * 60:
override_temp = schedule['override']['temp']
else:
override_temp = None
except KeyError:
override_temp = None
values = [{'title' : gettext('Work day'),
'id' : 'work_day',
'table' : {
'title' : gettext('Heating schedule'),
'col_names' : [gettext('FROM'),gettext('TO'),u'T [°C]'],
'data' : schedule['work'],
'footer' : [gettext('Other'),gettext('Hours'),schedule['other']]}},
{'title' : gettext('Free day'),
'id' : 'free_day',
'table' : {
'title' : gettext('Heating schedule'),
'col_names' : ['OD','DO',u'T [°C]'],
'data' : schedule['free'],
'footer' : [gettext('Other'),gettext('Hours'),schedule['other']]}},
{'title' : gettext('Week'),
'id' : 'week',
'states' : schedule['week'],
'days' : WEEKDAYS}
]
#Validator (move it to client-side JS)
# for i in range(len(list_FROM)):
# if list_TO[i] < list_FROM[i]:
# print("Error - hour_TO is earlier than hour_FROM")
# break
# for j in range(len(list_FROM)-i):
# if list_FROM[j] < list_FROM[i] < list_TO[j]:
# print("Error - conflicting ranges")
# if list_FROM[j] < list_TO[i] < list_TO[j]:
# print("Error - conflicting ranges")
return render_template("content/schedule_new.html",
active='schedule',
tabs=values,
save=save,
init_tab=1,
override=override_temp,
title=gettext('Heater'))
@app.route('/get/<category>/<subcategory>_<name>', methods=['POST'])
@app.route('/get/<category>/<subcategory>/<name>', methods=['POST'])
def refresh_data(subcategory,name,category='sensors'):
#failsafe
if '_' in subcategory:
t = subcategory.split('_')
subcategory = t[0]
for i in t[1:]:
name = i + '_' + name
data = {'title' : gettext('Current temperature'),
'value' : round(get_data(name,subcategory,category),1),
'name' : subcategory+'_'+name }
if request.method == "POST":
return json.dumps(data)
return([data])
@app.route('/dashboard/get_data', methods=['POST'])
def dashboard_data():
day = datetime.today().weekday()
try:
schedule_day = bool(int(get_data('schedule','heater','settings')['week'][day]))
except TypeError:
schedule_day = False
data = { "inside_temperature" : round(get_data('inside_temperature','room'),1),
"apparent_temperature" : round(get_data('apparent_temperature','room'),1),
"use_apparent" : get_data('use_apparent','room','settings'),
"humidity" : int(get_data('humidity','room')),
"pressure" : int(get_data('pressure','room')),
"outside_temperature" : round(get_data('real_temp','outside'),1),
"tank_temp_up" : round(get_data('temp_up','tank'),1),
"heater_schedule" : gettext('Free') if schedule_day else gettext('Normal'),
"heater_status" : gettext('ON') if get_data('burner','state') else gettext('OFF')}
if request.method == "POST":
return json.dumps(data)
return(data)
@app.route('/change-<name>', methods=['GET', 'POST'])
@app.route('/<category>/change-<name>', methods=['GET', 'POST'])
@login_required
def set_value(name,category=None):
if name.startswith('schedule'):
category = 'heater'
name = 'expected'
elif '_' in name:
t = name.split('_')
subcategory = t[0]
for i in t[2:]:
name = i + '_' + name
print(name)
val = get_description(name,category)[0]
val['value'] = get_data(name,category,'settings')
if 'step' not in val:
val['step'] = 1
slider = {'min' : val['range'][0],
'max' : val['range'][1],
'value' : val['value'],
'step' : val['step'],
'unit' : val['unit']}
description = {key:val[key] for key in ['title','desc']}
form = RangeForm()
form.slider.validate(form,[NumberRange(slider['min'],slider['max'])])
if form.validate_on_submit():
val = request.form['slider']
#save to SQL
if name == 'expected' and category == 'heater':
#FIXME insert new into schedule
schedule = get_data('schedule','heater','settings')
time = list(datetime.today().timetuple())[0:6]
schedule['override'] = {'temp' : float(val), 'start' : time, 'duration': 60}
s = '"' + json.dumps(schedule) + '"'
change_setting(s,'schedule','heater')
else:
change_setting(val,name,category)
if name is None:
return redirect('/')
return redirect('/' + request.path.split('/')[-2])
return render_template("forms/modal-range.html",
action=request.path,
slider=slider,
desc=description,
form=form)
@app.route('/options', methods=['GET', 'POST'])
@login_required
def options():
if request.remote_addr != SERVER_IP:
password = PasswordForm()
if password.validate_on_submit():
#TODO save password
pass_change(password.password.data)
flash(gettext("Password changed"))
return render_template("content/options.html",
active='options',
options = None,
password = password,
refresh_rate = 0.5)
else:
options = OptionsForm()
options.apparent.description = get_data('use_apparent','room','settings')
if not options.apparent.description:
options.apparent.label.text = gettext('Use apparent temperature')
else:
options.apparent.label.text = gettext('Use real temperature')
if options.validate_on_submit():
if options.data['apparent'] is not None and options.data['apparent']:
options.apparent.description = not options.apparent.description
if not options.apparent.description:
options.apparent.label.text = gettext('Use apparent temperature')
else:
options.apparent.label.text = gettext('Use real temperature')
change_setting(options.apparent.description,'room')
if options.data['reset_pass']:
pass_change("password")
if options.data['reboot']:
reboot()
# if options.data['reboot_mcu']:
# reboot_mcu()
return render_template("forms/options.html",
active='options',
options = options,
password = None,
refresh_rate = 0.5)
#TODO
def pass_change(new_pass):
print(new_pass)
fixed template path in options view
# -*- coding: UTF-8 -*-
from flask import render_template, redirect, request, flash, abort
from flask.ext.login import login_user, logout_user, current_user, login_required
from flask.ext.babel import gettext
from wtforms.validators import NumberRange
from app import app, babel, db, lm
import json
from datetime import datetime
from .forms import *
from config import LANGUAGES, SERVER_IP, BABEL_DEFAULT_LOCALE, WEEKDAYS
from .system import *
from .data import *
@babel.localeselector
def get_locale():
#return request.accept_languages.best_match(LANGUAGES.keys())
return request.accept_languages.best_match(['pl'])
@app.errorhandler(400)
def catch_server_errors(e):
return redirect('/')
#@app.before_request
#def before_request():
# print(request.remote_addr)
# pass
@lm.user_loader
def load_user(id):
return User.get(id)
def next_is_valid(next):
if next is None: return True
next=next.strip('/')
valid = ['solar','tank','circulation','heater','schedule','options','dashboard','index']
return next in valid
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
print("redirecting")
return redirect('/')
form = LoginForm()
if form.validate_on_submit():
#user = User.get(request.form['username'])
user = User.get(form.username.data)
#if (user and user.password == request.form['password']):
if (user and user.password == form.password.data):
#login_user(user)
login_user(user, remember = form.remember.data)
next = request.args.get('next')
if not next_is_valid(next):
return abort(400)
return redirect(next or '/')
else:
flash(gettext('Username or password incorrect'))
return render_template('forms/login.html',
active='login',
title=gettext('Log in'),
form=form)
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect('/')
@app.route('/')
@app.route('/index')
@app.route('/dashboard')
def dashboard():
return render_template("content/dashboard.html",
active='dashboard',
title='',
refresh_rate=0.5,
data=dashboard_data())
#@app.route('/change-schedule_override_temp', methods=['GET', 'POST'])
#@app.route('/set-room-temp', methods=['GET', 'POST'])
#def room_temp():
# # get those data from SQL(name):
# slider = {'min' : 17,
# 'max' : 28,
# 'value' : round(float(get_SQL_value(SolarControlHeaterSettingsExpected)),1),
# 'step' : 0.1,
# 'unit' : u'°C'}
# description = {'title' : gettext('Example modal'),
# 'info' : gettext('move it'),
# 's_info' : gettext('Temperature') + ':',
# 'cancel' : gettext('Cancel'),
# 'submit' : gettext('Save')}
#
# form = RangeForm()
# from wtforms.validators import NumberRange
# form.slider.validate(form,[NumberRange(slider['min'],slider['max'])])
#
# if form.validate_on_submit():
# val = request.form['slider']
# print(val)
# return redirect('/')
#
# return render_template("forms/modal-range.html",
# action=request.path,
# slider=slider,
# desc=description,
# form=form)
@app.route('/status')
def status():
sensors = get_full_data('sensors','all')
data = {'sensors' : [
{'title' : gettext('Outside temperature'), 'name' : 'outside_temp',
'unit' : u'°C', 'value' : sensors['outside']['real_temp']},
{'title' : gettext('Inside temperature'), 'name' : 'inside_temperature',
'unit' : u'°C', 'value' : sensors['room']['inside_temperature'] },
{'title' : gettext('Apparent temperature'), 'name' : 'apparent_temperature',
'unit' : u'°C', 'value' : sensors['room']['apparent_temperature'] },
{'title' : gettext('Humidity'), 'name' : 'humidity',
'unit' : u'%', 'value' : sensors['room']['humidity'] },
{'title' : gettext('Pressure'), 'name' : 'pressure',
'unit' : u'hPa', 'value' : sensors['room']['pressure'] },
{'title' : gettext('Solar temperature'), 'name' : 'solar_temp',
'unit' : u'°C', 'value' : sensors['solar']['temp']},
{'title' : gettext('Solar input'), 'name' : 'solar_in',
'unit' : u'°C', 'value' : sensors['solar']['temp_in'] },
{'title' : gettext('Solar output'), 'name' : 'solar_out',
'unit' : u'°C', 'value' : sensors['solar']['temp_out'] },
{'title' : gettext('Solar difference'), 'name' : 'solar_diff',
'unit' : u'°C', 'value' : sensors['solar']['temp_diff'] },
{'title' : gettext('Tank'), 'name' : 'tank_up',
'unit' : u'°C', 'value' : sensors['tank']['temp_up'] },
{'title' : gettext('Heater input'), 'name' : 'heater_in',
'unit' : u'°C', 'value' : sensors['heater']['temp_in'] },
{'title' : gettext('Heater output'), 'name' : 'heater_out',
'unit' : u'°C', 'value' : sensors['heater']['temp_out'] },
{'title' : gettext('Heater diffrence'), 'name' : 'heater_diff',
'unit' : u'°C', 'value' : sensors['heater']['temp_diff'] }],
'states' : [
{'title' : gettext('Burner'), 'name' : 'burner',
'value' : gettext('ON') if sensors['state']['burner'] else gettext('OFF') },
{'title' : gettext('DHW/CH actuator'), 'name' : 'heater_switch',
'value' : gettext('DHW') if sensors['state']['heater_switch'] else gettext('CH') },
{'title' : gettext('Solar pump'), 'name' : 'solar_pump',
'value' : gettext('ON') if sensors['state']['solar_pump'] else gettext('OFF') },
{'title' : gettext('Solar system actuator'), 'name' : 'solar_switch',
'value' : gettext('ON') if sensors['state']['solar_switch'] else gettext('OFF') },
{'title' : gettext('Solar circuit flow'), 'name' : 'solar_flow',
'value' : sensors['solar']['flow'] },
{'title' : gettext('Circulation'), 'name' : 'circulation',
'value' : gettext('ON') if sensors['state']['circulation'] else gettext(gettext('OFF')) } ]
}
title=gettext('Sensors data')
return render_template("/content/status.html",
active='status',
data=data,
title=title)
#@app.route('/scheme')
#def scheme():
# return render_template("/content/scheme.html",
# active='scheme',
# data=get_full_data('sensors','all'),
# title=gettext('Scheme'))
@app.route('/heater/')
@app.route('/tank/')
@app.route('/solar/')
@app.route('/circulation/')
@login_required
def data_rows():
uri = request.base_url.replace(request.url_root,'').replace('/','')
data = []
if uri == 'circulation':
order = ['time_on', 'interval']
title = gettext('Circulation')
elif uri == 'solar':
order = ['critical','temp_on','temp_off']
data = refresh_data('solar','temp')
title = gettext('Solar')
elif uri == 'tank':
order = ['solar_max', 'heater_max', 'heater_min']
data = refresh_data('tank','temp_up')
title = gettext('Water')
elif uri == 'heater':
order = ['expected', 'critical', 'hysteresis']
title = gettext('Heater')
data += get_description(order,get_full_data('settings',uri))
return render_template("data_rows.html",
active=uri,
data=data,
refresh_rate=0.5,
#refresh_rate=settings['refresh_rate'],
title=title)
@app.route('/schedule/change', methods=['POST'])
@login_required
def schedule_validate():
print("----SCHEDULE RECEIVED---")
data = request.get_json(force=True)
#TODO exceptions
print(data)
try:
data['other'] = round(float(data['other']),2)
for i in range(7):
data['week'][i] = int(bool(int(data['week'][i])))
for day in ('work','free'):
for i in range(len(data[day])):
for when in ('from', 'to'):
time = data[day][i][when].split(":")
data[day][i][when] = []
for j in (0,1):
data[day][i][when].append(int(time[j]))
data[day][i]['temp'] = round(float(data[day][i]['temp']),2)
except (KeyError,IndexError):
pass
#return schedule(error=True)
change_setting('"'+json.dumps(data,separators=(',', ':'))+'"','schedule','heater')
print("--NEW SCHEDULE POSTED--")
#return schedule()
@app.route('/schedule', methods=['GET','POST'])
@login_required
def schedule():
save = True
schedule = get_data('schedule','heater','settings')
if schedule is None:
save=False
schedule = {"week":[0,0,0,0,0,0,0],
"work":[{"to":[0,0],"from":[0,0],"temp":0}],
"free":[{"to":[0,0],"from":[0,0],"temp":0}],
"other":0}
try:
diff = datetime.now() - datetime(*schedule['override']['start'])
duration = schedule['override']['duration']
if diff.seconds < duration * 60:
override_temp = schedule['override']['temp']
else:
override_temp = None
except KeyError:
override_temp = None
values = [{'title' : gettext('Work day'),
'id' : 'work_day',
'table' : {
'title' : gettext('Heating schedule'),
'col_names' : [gettext('FROM'),gettext('TO'),u'T [°C]'],
'data' : schedule['work'],
'footer' : [gettext('Other'),gettext('Hours'),schedule['other']]}},
{'title' : gettext('Free day'),
'id' : 'free_day',
'table' : {
'title' : gettext('Heating schedule'),
'col_names' : ['OD','DO',u'T [°C]'],
'data' : schedule['free'],
'footer' : [gettext('Other'),gettext('Hours'),schedule['other']]}},
{'title' : gettext('Week'),
'id' : 'week',
'states' : schedule['week'],
'days' : WEEKDAYS}
]
#Validator (move it to client-side JS)
# for i in range(len(list_FROM)):
# if list_TO[i] < list_FROM[i]:
# print("Error - hour_TO is earlier than hour_FROM")
# break
# for j in range(len(list_FROM)-i):
# if list_FROM[j] < list_FROM[i] < list_TO[j]:
# print("Error - conflicting ranges")
# if list_FROM[j] < list_TO[i] < list_TO[j]:
# print("Error - conflicting ranges")
return render_template("content/schedule_new.html",
active='schedule',
tabs=values,
save=save,
init_tab=1,
override=override_temp,
title=gettext('Heater'))
@app.route('/get/<category>/<subcategory>_<name>', methods=['POST'])
@app.route('/get/<category>/<subcategory>/<name>', methods=['POST'])
def refresh_data(subcategory,name,category='sensors'):
#failsafe
if '_' in subcategory:
t = subcategory.split('_')
subcategory = t[0]
for i in t[1:]:
name = i + '_' + name
data = {'title' : gettext('Current temperature'),
'value' : round(get_data(name,subcategory,category),1),
'name' : subcategory+'_'+name }
if request.method == "POST":
return json.dumps(data)
return([data])
@app.route('/dashboard/get_data', methods=['POST'])
def dashboard_data():
day = datetime.today().weekday()
try:
schedule_day = bool(int(get_data('schedule','heater','settings')['week'][day]))
except TypeError:
schedule_day = False
data = { "inside_temperature" : round(get_data('inside_temperature','room'),1),
"apparent_temperature" : round(get_data('apparent_temperature','room'),1),
"use_apparent" : get_data('use_apparent','room','settings'),
"humidity" : int(get_data('humidity','room')),
"pressure" : int(get_data('pressure','room')),
"outside_temperature" : round(get_data('real_temp','outside'),1),
"tank_temp_up" : round(get_data('temp_up','tank'),1),
"heater_schedule" : gettext('Free') if schedule_day else gettext('Normal'),
"heater_status" : gettext('ON') if get_data('burner','state') else gettext('OFF')}
if request.method == "POST":
return json.dumps(data)
return(data)
@app.route('/change-<name>', methods=['GET', 'POST'])
@app.route('/<category>/change-<name>', methods=['GET', 'POST'])
@login_required
def set_value(name,category=None):
if name.startswith('schedule'):
category = 'heater'
name = 'expected'
elif '_' in name:
t = name.split('_')
subcategory = t[0]
for i in t[2:]:
name = i + '_' + name
print(name)
val = get_description(name,category)[0]
val['value'] = get_data(name,category,'settings')
if 'step' not in val:
val['step'] = 1
slider = {'min' : val['range'][0],
'max' : val['range'][1],
'value' : val['value'],
'step' : val['step'],
'unit' : val['unit']}
description = {key:val[key] for key in ['title','desc']}
form = RangeForm()
form.slider.validate(form,[NumberRange(slider['min'],slider['max'])])
if form.validate_on_submit():
val = request.form['slider']
#save to SQL
if name == 'expected' and category == 'heater':
#FIXME insert new into schedule
schedule = get_data('schedule','heater','settings')
time = list(datetime.today().timetuple())[0:6]
schedule['override'] = {'temp' : float(val), 'start' : time, 'duration': 60}
s = '"' + json.dumps(schedule) + '"'
change_setting(s,'schedule','heater')
else:
change_setting(val,name,category)
if name is None:
return redirect('/')
return redirect('/' + request.path.split('/')[-2])
return render_template("forms/modal-range.html",
action=request.path,
slider=slider,
desc=description,
form=form)
@app.route('/options', methods=['GET', 'POST'])
@login_required
def options():
if request.remote_addr != SERVER_IP:
password = PasswordForm()
if password.validate_on_submit():
#TODO save password
pass_change(password.password.data)
flash(gettext("Password changed"))
return render_template("forms/options.html",
active='options',
options = None,
password = password,
refresh_rate = 0.5)
else:
options = OptionsForm()
options.apparent.description = get_data('use_apparent','room','settings')
if not options.apparent.description:
options.apparent.label.text = gettext('Use apparent temperature')
else:
options.apparent.label.text = gettext('Use real temperature')
if options.validate_on_submit():
if options.data['apparent'] is not None and options.data['apparent']:
options.apparent.description = not options.apparent.description
if not options.apparent.description:
options.apparent.label.text = gettext('Use apparent temperature')
else:
options.apparent.label.text = gettext('Use real temperature')
change_setting(options.apparent.description,'room')
if options.data['reset_pass']:
pass_change("password")
if options.data['reboot']:
reboot()
# if options.data['reboot_mcu']:
# reboot_mcu()
return render_template("forms/options.html",
active='options',
options = options,
password = None,
refresh_rate = 0.5)
#TODO
def pass_change(new_pass):
print(new_pass)
|
"""
fs.remote: utilities for interfacing with remote filesystems
This module provides reusable utility functions that can be used to construct
FS subclasses interfacing with a remote filesystem. These include:
RemoteFileBuffer: a file-like object that locally buffers the contents
of a remote file, writing them back on flush() or close().
ConnectionManagerFS: a WrapFS subclass that tracks the connection state
of a remote FS, and allows client code to wait for
a connection to be re-established.
CacheFS: a WrapFS subclass that caces file and directory meta-data in
memory, to speed access to a remote FS.
"""
import time
import copy
from fs.base import FS, threading
from fs.wrapfs import WrapFS, wrap_fs_methods
from fs.path import *
from fs.errors import *
try:
from tempfile import SpooledTemporaryFile as TempFile
except ImportError:
from tempfile import NamedTemporaryFile as TempFile
class RemoteFileBuffer(object):
"""File-like object providing buffer for local file operations.
Instances of this class manage a local tempfile buffer corresponding
to the contents of a remote file. All reads and writes happen locally,
with the content being copied to the remote file only on flush() or
close(). Writes to the remote file are performed using the setcontents()
method on the owning FS object.
The intended use-case is for a remote filesystem (e.g. S3FS) to return
instances of this class from its open() method, and to provide the
file-uploading logic in its setcontents() method, as in the following
pseudo-code:
def open(self,path,mode="r"):
rf = self._get_remote_file(path)
return RemoteFileBuffer(self,path,mode,rf)
def setcontents(self,path,file):
self._put_remote_file(path,file)
The current implementation reads the entire contents of the file into
the buffer before returning. Future implementations may pull data into
the buffer on demand.
"""
def __init__(self,fs,path,mode,rfile=None):
"""RemoteFileBuffer constructor.
The owning filesystem, path and mode must be provided. If the
optional argument 'rfile' is provided, it must be a read()-able
object or a string containing the initial file contents.
"""
self.file = TempFile()
self.fs = fs
self.path = path
self.mode = mode
self.closed = False
self._flushed = False
if getattr(fs,"_lock",None) is not None:
self._lock = fs._lock.__class__()
else:
self._lock = threading.RLock()
if "r" in mode or "+" in mode or "a" in mode:
if rfile is not None:
if hasattr(rfile,"read"):
data = rfile.read(1024*256)
while data:
self.file.write(data)
data = rfile.read(1024*256)
else:
self.file.write(str(rfile))
if "a" not in mode:
self.file.seek(0)
def __del__(self):
if not self.closed:
self.close()
def __getattr__(self,name):
file = self.__dict__['file']
a = getattr(file, name)
if not callable(a):
return a
@wraps(a)
def call_with_lock(*args,**kwds):
self._lock.acquire()
try:
if "write" in name:
self._flushed = False
return a(*args,**kwds)
finally:
self._lock.release()
setattr(self, name, call_with_lock)
return call_with_lock
def __enter__(self):
self.file.__enter__()
return self
def __exit__(self,exc,value,tb):
self.close()
return False
def __iter__(self):
return iter(self.file)
def truncate(self,size=None):
self._lock.acquire()
try:
self.file.truncate(size)
self.flush()
finally:
self._lock.release()
def flush(self):
self._lock.acquire()
try:
self.file.flush()
if "w" in self.mode or "a" in self.mode or "+" in self.mode:
if not self._flushed:
pos = self.file.tell()
self.file.seek(0)
self.fs.setcontents(self.path,self.file)
self.file.seek(pos)
self._flushed = True
finally:
self._lock.release()
def close(self):
self._lock.acquire()
try:
if not self.closed:
self.closed = True
if "w" in self.mode or "a" in self.mode or "+" in self.mode:
if not self._flushed:
self.file.seek(0)
self.file.seek(0)
self.fs.setcontents(self.path,self.file)
self.file.close()
finally:
self._lock.release()
class ConnectionManagerFS(WrapFS):
"""FS wrapper providing simple connection management of a remote FS.
The ConnectionManagerFS class is designed to wrap a remote FS object
and provide some convenience methods for dealing with its remote
connection state.
The boolean attribute 'connected' indicates whether the remote fileystem
has an active connection, and is initially True. If any of the remote
filesystem methods raises a RemoteConnectionError, 'connected' will
switch to False and remain so until a successful remote method call.
Application code can use the method 'wait_for_connection' to block
until the connection is re-established. Currently this reconnection
is checked by a simple polling loop; eventually more sophisticated
operating-system integration may be added.
Since some remote FS classes can raise RemoteConnectionError during
initialisation, this class also provides a simple "lazy initialisation"
facility. The remote FS can be specified as an FS instance, an FS
subclass, or a (class,args) or (class,args,kwds) tuple. For example:
>>> fs = ConnectionManagerFS(MyRemoteFS("http://www.example.com/"))
Traceback (most recent call last):
...
RemoteConnectionError: couldn't connect to "http://www.example.com/"
>>> fs = ConnectionManagerFS((MyRemoteFS,["http://www.example.com/"]))
>>> fs.connected
False
>>>
"""
poll_interval = 1
def __init__(self,fs,poll_interval=None,connected=True):
if poll_interval is not None:
self.poll_interval = poll_interval
if isinstance(fs,FS):
self.__dict__["wrapped_fs"] = fs
elif isinstance(fs,type):
self._fsclass = fs
self._fsargs = []
self._fskwds = {}
else:
self._fsclass = fs[0]
try:
self._fsargs = fs[1]
except IndexError:
self._fsargs = []
try:
self._fskwds = fs[2]
except IndexError:
self._fskwds = {}
self._connection_cond = threading.Condition()
self._poll_thread = None
self._poll_sleeper = threading.Event()
self.connected = connected
@property
def wrapped_fs(self):
try:
return self.__dict__["wrapped_fs"]
except KeyError:
self._connection_cond.acquire()
try:
try:
return self.__dict__["wrapped_fs"]
except KeyError:
fs = self._fsclass(*self._fsargs,**self._fskwds)
self.__dict__["wrapped_fs"] = fs
return fs
finally:
self._connection_cond.release()
def setcontents(self,path,data):
self.wrapped_fs.setcontents(path,data)
def __getstate__(self):
state = super(ConnectionManagerFS,self).__getstate__()
del state["_connection_cond"]
del state["_poll_sleeper"]
state["_poll_thread"] = None
return state
def __setstate__(self,state):
super(ConnectionManagerFS,self).__setstate__(state)
self._connection_cond = threading.Condition()
self._poll_sleeper = threading.Event()
def wait_for_connection(self,timeout=None):
self._connection_cond.acquire()
try:
if not self.connected:
if not self._poll_thread:
target = self._poll_connection
self._poll_thread = threading.Thread(target=target)
self._poll_thread.start()
self._connection_cond.wait(timeout)
finally:
self._connection_cond.release()
def _poll_connection(self):
while not self.connected:
try:
self.wrapped_fs.isdir("")
except RemoteConnectionError:
self._poll_sleeper.wait(self.poll_interval)
self._poll_sleeper.clear()
except FSError:
break
else:
break
self._connection_cond.acquire()
try:
self.connected = True
self._poll_thread = None
self._connection_cond.notifyAll()
finally:
self._connection_cond.release()
def close(self):
# Don't close if we haven't created it
try:
fs = self.__dict__["wrapped_fs"]
except KeyError:
pass
else:
try:
fs.close()
except (RemoteConnectionError,AttributeError):
pass
if self._poll_thread:
self.connected = True
self._poll_sleeper.set()
self._poll_thread.join()
self._poll_thread = None
def _ConnectionManagerFS_method_wrapper(func):
"""Method wrapper for ConnectionManagerFS.
This method wrapper keeps an eye out for RemoteConnectionErrors and
adjusts self.connected accordingly.
"""
@wraps(func)
def wrapper(self,*args,**kwds):
try:
result = func(self,*args,**kwds)
except RemoteConnectionError:
self.connected = False
raise
except FSError:
self.connected = True
raise
else:
self.connected = True
return result
return wrapper
wrap_fs_methods(_ConnectionManagerFS_method_wrapper,ConnectionManagerFS)
def _cached_method(func):
"""Method decorator that caches results for CacheFS."""
@wraps(func)
def wrapper(self,path="",*args,**kwds):
try:
(success,result) = self._cache_get(path,func.__name__,args,kwds)
except KeyError:
try:
res = func(self,path,*args,**kwds)
except Exception, e:
self._cache_set(path,func.__name__,args,kwds,(False,e))
raise
else:
self._cache_set(path,func.__name__,args,kwds,(True,res))
return copy.copy(res)
else:
if not success:
raise result
else:
return copy.copy(result)
return wrapper
class CacheFS(WrapFS):
"""Simple wrapper to cache meta-data of a remote filesystems.
This FS wrapper implements a simplistic cache that can help speed up
access to a remote filesystem. File and directory meta-data is cached
but the actual file contents are not.
"""
def __init__(self,fs,timeout=1):
"""CacheFS constructor.
The optional argument 'timeout' specifies the cache timeout in
seconds. The default timeout is 1 second. To prevent cache
entries from ever timing out, set it to None.
"""
self.timeout = timeout
self._cache = {"":{}}
super(CacheFS,self).__init__(fs)
def _path_cache(self,path):
cache = self._cache
for name in iteratepath(path):
cache = cache.setdefault(name,{"":{}})
return cache
def _cache_get(self,path,func,args,kwds):
now = time.time()
cache = self._path_cache(path)
key = (tuple(args),tuple(sorted(kwds.iteritems())))
(t,v) = cache[""][func][key]
if self.timeout is not None:
if t < now - self.timeout:
raise KeyError
return v
def _cache_set(self,path,func,args,kwds,v):
t = time.time()
cache = self._path_cache(path)
key = (tuple(args),tuple(sorted(kwds.iteritems())))
cache[""].setdefault(func,{})[key] = (t,v)
def _uncache(self,path,added=False,removed=False,unmoved=False):
cache = self._cache
names = list(iteratepath(path))
# If it's not the root dir, also clear some items for ancestors
if names:
# Clear cached 'getinfo' and 'getsize' for all ancestors
for name in names[:-1]:
cache[""].pop("getinfo",None)
cache[""].pop("getsize",None)
cache = cache.get(name,None)
if cache is None:
return
# Adjust cached 'listdir' for parent directory.
# TODO: account for whether it was added, removed, or unmoved
cache[""].pop("getinfo",None)
cache[""].pop("getsize",None)
cache[""].pop("listdir",None)
# Clear all cached info for the path itself.
cache[names[-1]] = {"":{}}
@_cached_method
def exists(self,path):
return super(CacheFS,self).exists(path)
@_cached_method
def isdir(self,path):
return super(CacheFS,self).isdir(path)
@_cached_method
def isfile(self,path):
return super(CacheFS,self).isfile(path)
@_cached_method
def listdir(self,path="",**kwds):
return super(CacheFS,self).listdir(path,**kwds)
@_cached_method
def getinfo(self,path):
return super(CacheFS,self).getinfo(path)
@_cached_method
def getsize(self,path):
return super(CacheFS,self).getsize(path)
@_cached_method
def getxattr(self,path,name):
return super(CacheFS,self).getxattr(path,name)
@_cached_method
def listxattrs(self,path):
return super(CacheFS,self).listxattrs(path)
def open(self,path,mode="r"):
f = super(CacheFS,self).open(path,mode)
self._uncache(path,unmoved=True)
return f
def setcontents(self,path,contents):
res = super(CacheFS,self).setcontents(path,contents)
self._uncache(path,unmoved=True)
return res
def getcontents(self,path):
res = super(CacheFS,self).getcontents(path)
self._uncache(path,unmoved=True)
return res
def makedir(self,path,**kwds):
super(CacheFS,self).makedir(path,**kwds)
self._uncache(path,added=True)
def remove(self,path):
super(CacheFS,self).remove(path)
self._uncache(path,removed=True)
def removedir(self,path,**kwds):
super(CacheFS,self).removedir(path,**kwds)
self._uncache(path,removed=True)
def rename(self,src,dst):
super(CacheFS,self).rename(src,dst)
self._uncache(src,removed=True)
self._uncache(dst,added=True)
def copy(self,src,dst,**kwds):
super(CacheFS,self).copy(src,dst,**kwds)
self._uncache(dst,added=True)
def copydir(self,src,dst,**kwds):
super(CacheFS,self).copydir(src,dst,**kwds)
self._uncache(dst,added=True)
def move(self,src,dst,**kwds):
super(CacheFS,self).move(src,dst,**kwds)
self._uncache(src,removed=True)
self._uncache(dst,added=True)
def movedir(self,src,dst,**kwds):
super(CacheFS,self).movedir(src,dst,**kwds)
self._uncache(src,removed=True)
self._uncache(dst,added=True)
def setxattr(self,path,name,value):
self._uncache(path,unmoved=True)
return super(CacheFS,self).setxattr(path,name,value)
def delxattr(self,path,name):
self._uncache(path,unmoved=True)
return super(CacheFS,self).delxattr(path,name)
include default argument for getxattr
git-svn-id: 74b2def6592cf29d88d1a5d33b5c4a2732d8507c@268 67cdc799-7952-0410-af00-57a81ceafa0f
"""
fs.remote: utilities for interfacing with remote filesystems
This module provides reusable utility functions that can be used to construct
FS subclasses interfacing with a remote filesystem. These include:
RemoteFileBuffer: a file-like object that locally buffers the contents
of a remote file, writing them back on flush() or close().
ConnectionManagerFS: a WrapFS subclass that tracks the connection state
of a remote FS, and allows client code to wait for
a connection to be re-established.
CacheFS: a WrapFS subclass that caces file and directory meta-data in
memory, to speed access to a remote FS.
"""
import time
import copy
from fs.base import FS, threading
from fs.wrapfs import WrapFS, wrap_fs_methods
from fs.path import *
from fs.errors import *
try:
from tempfile import SpooledTemporaryFile as TempFile
except ImportError:
from tempfile import NamedTemporaryFile as TempFile
class RemoteFileBuffer(object):
"""File-like object providing buffer for local file operations.
Instances of this class manage a local tempfile buffer corresponding
to the contents of a remote file. All reads and writes happen locally,
with the content being copied to the remote file only on flush() or
close(). Writes to the remote file are performed using the setcontents()
method on the owning FS object.
The intended use-case is for a remote filesystem (e.g. S3FS) to return
instances of this class from its open() method, and to provide the
file-uploading logic in its setcontents() method, as in the following
pseudo-code:
def open(self,path,mode="r"):
rf = self._get_remote_file(path)
return RemoteFileBuffer(self,path,mode,rf)
def setcontents(self,path,file):
self._put_remote_file(path,file)
The current implementation reads the entire contents of the file into
the buffer before returning. Future implementations may pull data into
the buffer on demand.
"""
def __init__(self,fs,path,mode,rfile=None):
"""RemoteFileBuffer constructor.
The owning filesystem, path and mode must be provided. If the
optional argument 'rfile' is provided, it must be a read()-able
object or a string containing the initial file contents.
"""
self.file = TempFile()
self.fs = fs
self.path = path
self.mode = mode
self.closed = False
self._flushed = False
if getattr(fs,"_lock",None) is not None:
self._lock = fs._lock.__class__()
else:
self._lock = threading.RLock()
if "r" in mode or "+" in mode or "a" in mode:
if rfile is not None:
if hasattr(rfile,"read"):
data = rfile.read(1024*256)
while data:
self.file.write(data)
data = rfile.read(1024*256)
else:
self.file.write(str(rfile))
if "a" not in mode:
self.file.seek(0)
def __del__(self):
if not self.closed:
self.close()
def __getattr__(self,name):
file = self.__dict__['file']
a = getattr(file, name)
if not callable(a):
return a
@wraps(a)
def call_with_lock(*args,**kwds):
self._lock.acquire()
try:
if "write" in name:
self._flushed = False
return a(*args,**kwds)
finally:
self._lock.release()
setattr(self, name, call_with_lock)
return call_with_lock
def __enter__(self):
self.file.__enter__()
return self
def __exit__(self,exc,value,tb):
self.close()
return False
def __iter__(self):
return iter(self.file)
def truncate(self,size=None):
self._lock.acquire()
try:
self.file.truncate(size)
self.flush()
finally:
self._lock.release()
def flush(self):
self._lock.acquire()
try:
self.file.flush()
if "w" in self.mode or "a" in self.mode or "+" in self.mode:
if not self._flushed:
pos = self.file.tell()
self.file.seek(0)
self.fs.setcontents(self.path,self.file)
self.file.seek(pos)
self._flushed = True
finally:
self._lock.release()
def close(self):
self._lock.acquire()
try:
if not self.closed:
self.closed = True
if "w" in self.mode or "a" in self.mode or "+" in self.mode:
if not self._flushed:
self.file.seek(0)
self.file.seek(0)
self.fs.setcontents(self.path,self.file)
self.file.close()
finally:
self._lock.release()
class ConnectionManagerFS(WrapFS):
"""FS wrapper providing simple connection management of a remote FS.
The ConnectionManagerFS class is designed to wrap a remote FS object
and provide some convenience methods for dealing with its remote
connection state.
The boolean attribute 'connected' indicates whether the remote fileystem
has an active connection, and is initially True. If any of the remote
filesystem methods raises a RemoteConnectionError, 'connected' will
switch to False and remain so until a successful remote method call.
Application code can use the method 'wait_for_connection' to block
until the connection is re-established. Currently this reconnection
is checked by a simple polling loop; eventually more sophisticated
operating-system integration may be added.
Since some remote FS classes can raise RemoteConnectionError during
initialisation, this class also provides a simple "lazy initialisation"
facility. The remote FS can be specified as an FS instance, an FS
subclass, or a (class,args) or (class,args,kwds) tuple. For example:
>>> fs = ConnectionManagerFS(MyRemoteFS("http://www.example.com/"))
Traceback (most recent call last):
...
RemoteConnectionError: couldn't connect to "http://www.example.com/"
>>> fs = ConnectionManagerFS((MyRemoteFS,["http://www.example.com/"]))
>>> fs.connected
False
>>>
"""
poll_interval = 1
def __init__(self,fs,poll_interval=None,connected=True):
if poll_interval is not None:
self.poll_interval = poll_interval
if isinstance(fs,FS):
self.__dict__["wrapped_fs"] = fs
elif isinstance(fs,type):
self._fsclass = fs
self._fsargs = []
self._fskwds = {}
else:
self._fsclass = fs[0]
try:
self._fsargs = fs[1]
except IndexError:
self._fsargs = []
try:
self._fskwds = fs[2]
except IndexError:
self._fskwds = {}
self._connection_cond = threading.Condition()
self._poll_thread = None
self._poll_sleeper = threading.Event()
self.connected = connected
@property
def wrapped_fs(self):
try:
return self.__dict__["wrapped_fs"]
except KeyError:
self._connection_cond.acquire()
try:
try:
return self.__dict__["wrapped_fs"]
except KeyError:
fs = self._fsclass(*self._fsargs,**self._fskwds)
self.__dict__["wrapped_fs"] = fs
return fs
finally:
self._connection_cond.release()
def setcontents(self,path,data):
self.wrapped_fs.setcontents(path,data)
def __getstate__(self):
state = super(ConnectionManagerFS,self).__getstate__()
del state["_connection_cond"]
del state["_poll_sleeper"]
state["_poll_thread"] = None
return state
def __setstate__(self,state):
super(ConnectionManagerFS,self).__setstate__(state)
self._connection_cond = threading.Condition()
self._poll_sleeper = threading.Event()
def wait_for_connection(self,timeout=None):
self._connection_cond.acquire()
try:
if not self.connected:
if not self._poll_thread:
target = self._poll_connection
self._poll_thread = threading.Thread(target=target)
self._poll_thread.start()
self._connection_cond.wait(timeout)
finally:
self._connection_cond.release()
def _poll_connection(self):
while not self.connected:
try:
self.wrapped_fs.isdir("")
except RemoteConnectionError:
self._poll_sleeper.wait(self.poll_interval)
self._poll_sleeper.clear()
except FSError:
break
else:
break
self._connection_cond.acquire()
try:
self.connected = True
self._poll_thread = None
self._connection_cond.notifyAll()
finally:
self._connection_cond.release()
def close(self):
# Don't close if we haven't created it
try:
fs = self.__dict__["wrapped_fs"]
except KeyError:
pass
else:
try:
fs.close()
except (RemoteConnectionError,AttributeError):
pass
if self._poll_thread:
self.connected = True
self._poll_sleeper.set()
self._poll_thread.join()
self._poll_thread = None
def _ConnectionManagerFS_method_wrapper(func):
"""Method wrapper for ConnectionManagerFS.
This method wrapper keeps an eye out for RemoteConnectionErrors and
adjusts self.connected accordingly.
"""
@wraps(func)
def wrapper(self,*args,**kwds):
try:
result = func(self,*args,**kwds)
except RemoteConnectionError:
self.connected = False
raise
except FSError:
self.connected = True
raise
else:
self.connected = True
return result
return wrapper
wrap_fs_methods(_ConnectionManagerFS_method_wrapper,ConnectionManagerFS)
def _cached_method(func):
"""Method decorator that caches results for CacheFS."""
@wraps(func)
def wrapper(self,path="",*args,**kwds):
try:
(success,result) = self._cache_get(path,func.__name__,args,kwds)
except KeyError:
try:
res = func(self,path,*args,**kwds)
except Exception, e:
self._cache_set(path,func.__name__,args,kwds,(False,e))
raise
else:
self._cache_set(path,func.__name__,args,kwds,(True,res))
return copy.copy(res)
else:
if not success:
raise result
else:
return copy.copy(result)
return wrapper
class CacheFS(WrapFS):
"""Simple wrapper to cache meta-data of a remote filesystems.
This FS wrapper implements a simplistic cache that can help speed up
access to a remote filesystem. File and directory meta-data is cached
but the actual file contents are not.
"""
def __init__(self,fs,timeout=1):
"""CacheFS constructor.
The optional argument 'timeout' specifies the cache timeout in
seconds. The default timeout is 1 second. To prevent cache
entries from ever timing out, set it to None.
"""
self.timeout = timeout
self._cache = {"":{}}
super(CacheFS,self).__init__(fs)
def _path_cache(self,path):
cache = self._cache
for name in iteratepath(path):
cache = cache.setdefault(name,{"":{}})
return cache
def _cache_get(self,path,func,args,kwds):
now = time.time()
cache = self._path_cache(path)
key = (tuple(args),tuple(sorted(kwds.iteritems())))
(t,v) = cache[""][func][key]
if self.timeout is not None:
if t < now - self.timeout:
raise KeyError
return v
def _cache_set(self,path,func,args,kwds,v):
t = time.time()
cache = self._path_cache(path)
key = (tuple(args),tuple(sorted(kwds.iteritems())))
cache[""].setdefault(func,{})[key] = (t,v)
def _uncache(self,path,added=False,removed=False,unmoved=False):
cache = self._cache
names = list(iteratepath(path))
# If it's not the root dir, also clear some items for ancestors
if names:
# Clear cached 'getinfo' and 'getsize' for all ancestors
for name in names[:-1]:
cache[""].pop("getinfo",None)
cache[""].pop("getsize",None)
cache = cache.get(name,None)
if cache is None:
return
# Adjust cached 'listdir' for parent directory.
# TODO: account for whether it was added, removed, or unmoved
cache[""].pop("getinfo",None)
cache[""].pop("getsize",None)
cache[""].pop("listdir",None)
# Clear all cached info for the path itself.
cache[names[-1]] = {"":{}}
@_cached_method
def exists(self,path):
return super(CacheFS,self).exists(path)
@_cached_method
def isdir(self,path):
return super(CacheFS,self).isdir(path)
@_cached_method
def isfile(self,path):
return super(CacheFS,self).isfile(path)
@_cached_method
def listdir(self,path="",**kwds):
return super(CacheFS,self).listdir(path,**kwds)
@_cached_method
def getinfo(self,path):
return super(CacheFS,self).getinfo(path)
@_cached_method
def getsize(self,path):
return super(CacheFS,self).getsize(path)
@_cached_method
def getxattr(self,path,name,default=None):
return super(CacheFS,self).getxattr(path,name,default)
@_cached_method
def listxattrs(self,path):
return super(CacheFS,self).listxattrs(path)
def open(self,path,mode="r"):
f = super(CacheFS,self).open(path,mode)
self._uncache(path,unmoved=True)
return f
def setcontents(self,path,contents):
res = super(CacheFS,self).setcontents(path,contents)
self._uncache(path,unmoved=True)
return res
def getcontents(self,path):
res = super(CacheFS,self).getcontents(path)
self._uncache(path,unmoved=True)
return res
def makedir(self,path,**kwds):
super(CacheFS,self).makedir(path,**kwds)
self._uncache(path,added=True)
def remove(self,path):
super(CacheFS,self).remove(path)
self._uncache(path,removed=True)
def removedir(self,path,**kwds):
super(CacheFS,self).removedir(path,**kwds)
self._uncache(path,removed=True)
def rename(self,src,dst):
super(CacheFS,self).rename(src,dst)
self._uncache(src,removed=True)
self._uncache(dst,added=True)
def copy(self,src,dst,**kwds):
super(CacheFS,self).copy(src,dst,**kwds)
self._uncache(dst,added=True)
def copydir(self,src,dst,**kwds):
super(CacheFS,self).copydir(src,dst,**kwds)
self._uncache(dst,added=True)
def move(self,src,dst,**kwds):
super(CacheFS,self).move(src,dst,**kwds)
self._uncache(src,removed=True)
self._uncache(dst,added=True)
def movedir(self,src,dst,**kwds):
super(CacheFS,self).movedir(src,dst,**kwds)
self._uncache(src,removed=True)
self._uncache(dst,added=True)
def setxattr(self,path,name,value):
self._uncache(path,unmoved=True)
return super(CacheFS,self).setxattr(path,name,value)
def delxattr(self,path,name):
self._uncache(path,unmoved=True)
return super(CacheFS,self).delxattr(path,name)
|
__version__ = '2.2.1'
bump version to 3.0.0
__version__ = '3.0.0'
|
#!/usr/bin/env python
from ion.util.agent_launcher import AgentLauncher
from ion.services.sa.instrument.agent_configuration_builder import InstrumentAgentConfigurationBuilder, \
PlatformAgentConfigurationBuilder
from ion.util.enhanced_resource_registry_client import EnhancedResourceRegistryClient
from ion.util.resource_lcs_policy import AgentPolicy, ResourceLCSPolicy, ModelPolicy, DevicePolicy
__author__ = 'Maurice Manning, Ian Katz, Michael Meisinger'
import os
import pwd
import json
from datetime import datetime, timedelta
import time
from ooi.logging import log
from pyon.agent.agent import ResourceAgentClient
from pyon.core.bootstrap import IonObject
from pyon.core.exception import Inconsistent,BadRequest, NotFound
from pyon.ion.resource import ExtendedResourceContainer
from pyon.util.ion_time import IonTime
from pyon.public import LCE
from pyon.public import RT, PRED, OT
from pyon.util.containers import get_ion_ts
from pyon.agent.agent import ResourceAgentState
from coverage_model.parameter import ParameterDictionary
from ion.services.dm.inventory.dataset_management_service import DatasetManagementService
from ion.services.sa.instrument.flag import KeywordFlag
from ion.services.sa.observatory.observatory_util import ObservatoryUtil
from ion.util.module_uploader import RegisterModulePreparerEgg
from ion.util.qa_doc_parser import QADocParser
from ion.agents.port.port_agent_process import PortAgentProcess
from interface.objects import AttachmentType, ComputedValueAvailability, ComputedIntValue, StatusType, ProcessDefinition
from interface.services.sa.iinstrument_management_service import BaseInstrumentManagementService
from ion.services.sa.observatory.observatory_management_service import INSTRUMENT_OPERATOR_ROLE, OBSERVATORY_OPERATOR_ROLE
from pyon.core.governance import ORG_MANAGER_ROLE, GovernanceHeaderValues, has_org_role, is_system_actor, has_exclusive_resource_commitment
from pyon.core.governance import has_shared_resource_commitment, is_resource_owner
class InstrumentManagementService(BaseInstrumentManagementService):
"""
@brief Service to manage instrument, platform, and sensor resources, their relationships, and direct access
"""
def on_init(self):
#suppress a few "variable declared but not used" annoying pyflakes errors
IonObject("Resource")
self.override_clients(self.clients)
self.outil = ObservatoryUtil(self)
self.extended_resource_handler = ExtendedResourceContainer(self)
self.init_module_uploader()
# set up all of the policy interceptions
if self.container and self.container.governance_controller:
reg_precondition = self.container.governance_controller.register_process_operation_precondition
no_policy = ResourceLCSPolicy(self.clients)
agent_policy = AgentPolicy(self.clients)
model_policy = ModelPolicy(self.clients)
device_policy = DevicePolicy(self.clients)
#LCS
reg_precondition(self, 'execute_instrument_agent_lifecycle',
agent_policy.policy_fn_lcs_precondition("instrument_agent_id"))
reg_precondition(self, 'execute_instrument_agent_instance_lifecycle',
no_policy.policy_fn_lcs_precondition("instrument_agent_instance_id"))
reg_precondition(self, 'execute_instrument_model_lifecycle',
model_policy.policy_fn_lcs_precondition("instrument_model_id"))
reg_precondition(self, 'execute_instrument_device_lifecycle',
device_policy.policy_fn_lcs_precondition("instrument_device_id"))
reg_precondition(self, 'execute_platform_agent_lifecycle',
agent_policy.policy_fn_lcs_precondition("platform_agent_id"))
reg_precondition(self, 'execute_platform_agent_instance_lifecycle',
no_policy.policy_fn_lcs_precondition("platform_agent_instance_id"))
reg_precondition(self, 'execute_platform_model_lifecycle',
model_policy.policy_fn_lcs_precondition("platform_model_id"))
reg_precondition(self, 'execute_platform_device_lifecycle',
device_policy.policy_fn_lcs_precondition("platform_device_id"))
reg_precondition(self, 'execute_sensor_model_lifecycle',
model_policy.policy_fn_lcs_precondition("sensor_model_id"))
reg_precondition(self, 'execute_sensor_device_lifecycle',
device_policy.policy_fn_lcs_precondition("sensor_device_id"))
#Delete
reg_precondition(self, 'force_delete_instrument_agent',
agent_policy.policy_fn_delete_precondition("instrument_agent_id"))
reg_precondition(self, 'force_delete_instrument_agent_instance',
no_policy.policy_fn_delete_precondition("instrument_agent_instance_id"))
reg_precondition(self, 'force_delete_instrument_model',
model_policy.policy_fn_delete_precondition("instrument_model_id"))
reg_precondition(self, 'force_delete_instrument_device',
device_policy.policy_fn_delete_precondition("instrument_device_id"))
reg_precondition(self, 'force_delete_platform_agent',
agent_policy.policy_fn_delete_precondition("platform_agent_id"))
reg_precondition(self, 'force_delete_platform_agent_instance',
no_policy.policy_fn_delete_precondition("platform_agent_instance_id"))
reg_precondition(self, 'force_delete_platform_model',
model_policy.policy_fn_delete_precondition("platform_model_id"))
reg_precondition(self, 'force_delete_platform_device',
device_policy.policy_fn_delete_precondition("platform_device_id"))
reg_precondition(self, 'force_delete_sensor_model',
model_policy.policy_fn_delete_precondition("sensor_model_id"))
reg_precondition(self, 'force_delete_sensor_device',
device_policy.policy_fn_delete_precondition("sensor_device_id"))
def init_module_uploader(self):
if self.CFG:
# looking for forms like host=amoeba.ucsd.edu, remotepath=/var/www/release, user=steve
cfg_host = self.CFG.get_safe("service.instrument_management.driver_release_host", None)
cfg_remotepath = self.CFG.get_safe("service.instrument_management.driver_release_directory", None)
cfg_user = self.CFG.get_safe("service.instrument_management.driver_release_user",
pwd.getpwuid(os.getuid())[0])
cfg_wwwprefix = self.CFG.get_safe("service.instrument_management.driver_release_wwwprefix", None)
if cfg_host is None or cfg_remotepath is None or cfg_wwwprefix is None:
raise BadRequest("Missing configuration items; host='%s', directory='%s', wwwprefix='%s'" %
(cfg_host, cfg_remotepath, cfg_wwwprefix))
self.module_uploader = RegisterModulePreparerEgg(dest_user=cfg_user,
dest_host=cfg_host,
dest_path=cfg_remotepath,
dest_wwwprefix=cfg_wwwprefix)
def override_clients(self, new_clients):
"""
Replaces the service clients with a new set of them... and makes sure they go to the right places
"""
self.RR2 = EnhancedResourceRegistryClient(new_clients.resource_registry)
#shortcut names for the import sub-services
# we hide these behind checks even though we expect them so that
# the resource_impl_metatests will work
if hasattr(new_clients, "resource_registry"):
self.RR = new_clients.resource_registry
if hasattr(new_clients, "data_acquisition_management"):
self.DAMS = new_clients.data_acquisition_management
if hasattr(new_clients, "data_product_management"):
self.DPMS = new_clients.data_product_management
if hasattr(new_clients, "pubsub_management"):
self.PSMS = new_clients.pubsub_management
if hasattr(new_clients, "data_retriever"):
self.DRS = new_clients.data_retriever
def restore_resource_state(self, instrument_device_id='', attachment_id=''):
"""
restore a snapshot of an instrument agent instance config
"""
instrument_device_obj = self.RR.read(instrument_device_id)
resource_type = type(instrument_device_obj).__name__
if not RT.InstrumentDevice == resource_type:
raise BadRequest("Can only restore resource states for %s resources, got %s" %
(RT.InstrumentDevice, resource_type))
instrument_agent_instance_obj = self.RR2.find_instrument_agent_instance_of_instrument_device(instrument_device_id)
attachment = self.RR2.read_attachment(attachment_id, include_content=True)
if not KeywordFlag.CONFIG_SNAPSHOT in attachment.keywords:
raise BadRequest("Attachment '%s' does not seem to be a config snapshot" % attachment_id)
if not 'application/json' == attachment.content_type:
raise BadRequest("Attachment '%s' is not labeled as json")
snapshot = json.loads(attachment.content)
driver_config = snapshot["driver_config"]
instrument_agent_instance_obj.driver_config["comms_config"] = driver_config["comms_config"]
instrument_agent_instance_obj.driver_config["pagent_pid"] = driver_config["pagent_pid"]
self.RR2.update(instrument_agent_instance_obj)
#todo
#agent.set_config(snapshot["running_config"])
#todo
# re-launch agent?
def save_resource_state(self, instrument_device_id='', name=''):
"""
take a snapshot of the current instrument agent instance config for this instrument,
and save it as an attachment
"""
config_builder = InstrumentAgentConfigurationBuilder(self.clients)
instrument_device_obj = self.RR.read(instrument_device_id)
resource_type = type(instrument_device_obj).__name__
if not RT.InstrumentDevice == resource_type:
raise BadRequest("Can only save resource states for %s resources, got %s" %
(RT.InstrumentDevice, resource_type))
inst_agent_instance_obj = self.RR2.find_instrument_agent_instance_of_instrument_device(instrument_device_id)
config_builder.set_agent_instance_object(inst_agent_instance_obj)
agent_config = config_builder.prepare(will_launch=False)
epoch = time.mktime(datetime.now().timetuple())
snapshot_name = name or "Running Config Snapshot %s.js" % epoch
snapshot = {}
snapshot["driver_config"] = agent_config['driver_config']
snapshot["agent_config"] = agent_config
#todo
# Start a resource agent client to talk with the instrument agent.
# self._ia_client = ResourceAgentClient(instrument_device_id,
# to_name=inst_agent_instance_obj.agent_process_id,
# process=FakeProcess())
snapshot["running_config"] = {} #agent.get_config()
#make an attachment for the snapshot
attachment = IonObject(RT.Attachment,
name=snapshot_name,
description="Config snapshot at time %s" % epoch,
content=json.dumps(snapshot),
content_type="application/json", # RFC 4627
keywords=[KeywordFlag.CONFIG_SNAPSHOT],
attachment_type=AttachmentType.ASCII)
# return the attachment id
return self.RR2.create_attachment(instrument_device_id, attachment)
##########################################################################
#
# INSTRUMENT AGENT INSTANCE
#
##########################################################################
def create_instrument_agent_instance(self, instrument_agent_instance=None, instrument_agent_id="", instrument_device_id=""):
"""
create a new instance
@param instrument_agent_instance the object to be created as a resource
@retval instrument_agent_instance_id the id of the new object
@throws BadRequest if the incoming _id field is set
@throws BadReqeust if the incoming name already exists
"""
instrument_agent_instance_id = self.RR2.create(instrument_agent_instance, RT.InstrumentAgentInstance)
if instrument_agent_id:
self.assign_instrument_agent_to_instrument_agent_instance(instrument_agent_id, instrument_agent_instance_id)
if instrument_device_id:
self.assign_instrument_agent_instance_to_instrument_device(instrument_agent_instance_id, instrument_device_id)
log.debug("device %s now connected to instrument agent instance %s (L4-CI-SA-RQ-363)",
str(instrument_device_id), str(instrument_agent_instance_id))
return instrument_agent_instance_id
def update_instrument_agent_instance(self, instrument_agent_instance=None):
"""
update an existing instance
@param instrument_agent_instance the object to be created as a resource
@retval success whether we succeeded
@throws BadRequest if the incoming _id field is not set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.update(instrument_agent_instance, RT.InstrumentAgentInstance)
def read_instrument_agent_instance(self, instrument_agent_instance_id=''):
"""
fetch a resource by ID
@param instrument_agent_instance_id the id of the object to be fetched
@retval InstrumentAgentInstance resource
"""
return self.RR2.read(instrument_agent_instance_id, RT.InstrumentAgentInstance)
def delete_instrument_agent_instance(self, instrument_agent_instance_id=''):
"""
delete a resource, including its history (for less ominous deletion, use retire)
@param instrument_agent_instance_id the id of the object to be deleted
@retval success whether it succeeded
"""
self.RR2.retire(instrument_agent_instance_id, RT.InstrumentAgentInstance)
def force_delete_instrument_agent_instance(self, instrument_agent_instance_id=''):
self.RR2.pluck_delete(instrument_agent_instance_id, RT.InstrumentAgentInstance)
def record_instrument_producer_activation(self, instrument_device_id, instrument_agent_instance_id):
log.debug("update the producer context for provenance")
#todo: should get the time from process dispatcher
producer_obj = self._get_instrument_producer(instrument_device_id)
if OT.InstrumentProducerContext == producer_obj.producer_context.type_:
# reload resource as it has been updated by the launch function
instrument_agent_instance_obj = self.RR2.read(instrument_agent_instance_id)
producer_obj.producer_context.activation_time = IonTime().to_string()
producer_obj.producer_context.configuration = instrument_agent_instance_obj.agent_config
# get the site where this device is currently deploy instrument_device_id
try:
site_id = self.RR2.find_instrument_site_id_by_instrument_device(instrument_device_id)
producer_obj.producer_context.deployed_site_id = site_id
except NotFound:
pass
except:
raise
self.RR2.update(producer_obj)
def start_instrument_agent_instance(self, instrument_agent_instance_id=''):
"""
Agent instance must first be created and associated with a instrument device
Launch the instument agent instance and return the id
"""
instrument_agent_instance_obj = self.read_instrument_agent_instance(instrument_agent_instance_id)
# launch the port agent before verifying anything.
# if agent instance doesn't validate, port agent won't care and will be available for when it does validate
# if no comms_config specified in the driver config then we need to start a port agent
if not 'comms_config' in instrument_agent_instance_obj.driver_config:
log.info("IMS:start_instrument_agent_instance no comms_config specified in the driver_config so call _start_port_agent")
instrument_agent_instance_obj = self._start_port_agent(instrument_agent_instance_obj) # <-- this updates agent instance obj!
# if the comms_config host addr in the driver config is localhost
elif 'addr' in instrument_agent_instance_obj.driver_config.get('comms_config') and\
instrument_agent_instance_obj.driver_config['comms_config']['addr'] == 'localhost':
log.info("IMS:start_instrument_agent_instance comms_config host addr in the driver_config is localhost so call _start_port_agent")
instrument_agent_instance_obj = self._start_port_agent(instrument_agent_instance_obj) # <-- this updates agent instance obj!
config_builder = InstrumentAgentConfigurationBuilder(self.clients)
launcher = AgentLauncher(self.clients.process_dispatcher)
try:
config_builder.set_agent_instance_object(instrument_agent_instance_obj)
config = config_builder.prepare()
except:
self._stop_port_agent(instrument_agent_instance_obj.port_agent_config)
raise
process_id = launcher.launch(config, config_builder._get_process_definition()._id)
config_builder.record_launch_parameters(config, process_id)
self.record_instrument_producer_activation(config_builder._get_device()._id, instrument_agent_instance_id)
launcher.await_launch(20)
return process_id
def _start_port_agent(self, instrument_agent_instance_obj=None):
"""
Construct and start the port agent, ONLY NEEDED FOR INSTRUMENT AGENTS.
"""
_port_agent_config = instrument_agent_instance_obj.port_agent_config
#todo: ask bill if this blocks
# It blocks until the port agent starts up or a timeout
log.info("IMS:_start_pagent calling PortAgentProcess.launch_process ")
_pagent = PortAgentProcess.launch_process(_port_agent_config, test_mode = True)
pid = _pagent.get_pid()
port = _pagent.get_data_port()
cmd_port = _pagent.get_command_port()
log.info("IMS:_start_pagent returned from PortAgentProcess.launch_process pid: %s ", pid)
# Hack to get ready for DEMO. Further though needs to be put int
# how we pass this config info around.
host = 'localhost'
driver_config = instrument_agent_instance_obj.driver_config
comms_config = driver_config.get('comms_config')
if comms_config:
host = comms_config.get('addr')
else:
log.warn("No comms_config specified, using '%s'" % host)
# Configure driver to use port agent port number.
instrument_agent_instance_obj.driver_config['comms_config'] = {
'addr' : host,
'cmd_port' : cmd_port,
'port' : port
}
instrument_agent_instance_obj.driver_config['pagent_pid'] = pid
self.update_instrument_agent_instance(instrument_agent_instance_obj)
return self.read_instrument_agent_instance(instrument_agent_instance_obj._id)
def _stop_port_agent(self, port_agent_config):
log.debug("Stopping port agent")
try:
_port_agent_config = port_agent_config
process = PortAgentProcess.get_process(_port_agent_config, test_mode=True)
process.stop()
except NotFound:
log.debug("No port agent process found")
pass
except Exception as e:
raise e
else:
log.debug("Success stopping port agent")
def stop_instrument_agent_instance(self, instrument_agent_instance_id=''):
"""
Deactivate the instrument agent instance
"""
instance_obj, device_id = self.stop_agent_instance(instrument_agent_instance_id, RT.InstrumentDevice)
self._stop_port_agent(instance_obj.port_agent_config)
#update the producer context for provenance
producer_obj = self._get_instrument_producer(device_id)
if producer_obj.producer_context.type_ == OT.InstrumentProducerContext :
producer_obj.producer_context.deactivation_time = IonTime().to_string()
self.RR2.update(producer_obj)
def stop_agent_instance(self, agent_instance_id, device_type):
"""
Deactivate an agent instance, return device ID
"""
agent_instance_obj = self.RR2.read(agent_instance_id)
device_id = self.RR2.find_subject(subject_type=device_type,
predicate=PRED.hasAgentInstance,
object=agent_instance_id,
id_only=True)
log.debug("Canceling the execution of agent's process ID")
if None is agent_instance_obj.agent_process_id:
raise BadRequest("Agent Instance '%s' does not have an agent_process_id. Stopped already?"
% agent_instance_id)
try:
self.clients.process_dispatcher.cancel_process(process_id=agent_instance_obj.agent_process_id)
except NotFound:
log.debug("No agent process found")
pass
except Exception as e:
raise e
else:
log.debug("Success cancelling agent process")
#reset the process ids.
agent_instance_obj.agent_process_id = None
if "pagent_pid" in agent_instance_obj.driver_config:
agent_instance_obj.driver_config['pagent_pid'] = None
self.RR2.update(agent_instance_obj)
return agent_instance_obj, device_id
def _get_instrument_producer(self, instrument_device_id=""):
producer_objs, _ = self.clients.resource_registry.find_objects(subject=instrument_device_id,
predicate=PRED.hasDataProducer,
object_type=RT.DataProducer,
id_only=False)
if not producer_objs:
raise NotFound("No Producers created for this Instrument Device " + str(instrument_device_id))
return producer_objs[0]
##########################################################################
#
# INSTRUMENT AGENT
#
##########################################################################
def create_instrument_agent(self, instrument_agent=None):
"""
create a new instance
@param instrument_agent the object to be created as a resource
@retval instrument_agent_id the id of the new object
@throws BadRequest if the incoming _id field is set
@throws BadReqeust if the incoming name already exists
"""
instrument_agent_id = self.RR2.create(instrument_agent, RT.InstrumentAgent)
# Create the process definition to launch the agent
process_definition = ProcessDefinition()
process_definition.executable['module']='ion.agents.instrument.instrument_agent'
process_definition.executable['class'] = 'InstrumentAgent'
pd = self.clients.process_dispatcher
process_definition_id = pd.create_process_definition(process_definition=process_definition)
#associate the agent and the process def
self.RR2.assign_process_definition_to_instrument_agent(process_definition_id, instrument_agent_id)
return instrument_agent_id
def update_instrument_agent(self, instrument_agent=None):
"""
update an existing instance
@param instrument_agent the object to be created as a resource
@retval success whether we succeeded
@throws BadRequest if the incoming _id field is not set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.update(instrument_agent, RT.InstrumentAgent)
def read_instrument_agent(self, instrument_agent_id=''):
"""
fetch a resource by ID
@param instrument_agent_id the id of the object to be fetched
@retval InstrumentAgent resource
"""
return self.RR2.read(instrument_agent_id, RT.InstrumentAgent)
def delete_instrument_agent(self, instrument_agent_id=''):
"""
delete a resource, including its history (for less ominous deletion, use retire)
@param instrument_agent_id the id of the object to be deleted
@retval success whether it succeeded
"""
#retrieve the associated process definition
self.RR2.retire(instrument_agent_id, RT.InstrumentAgent)
def force_delete_instrument_agent(self, instrument_agent_id=''):
process_def_objs = self.RR2.find_process_definitions_of_instrument_agent(instrument_agent_id)
for pd_obj in process_def_objs:
self.RR2.unassign_process_definition_from_instrument_agent(pd_obj._id, instrument_agent_id)
self.clients.process_dispatcher.delete_process_definition(pd_obj._id)
self.RR2.pluck_delete(instrument_agent_id, RT.InstrumentAgent)
def register_instrument_agent(self, instrument_agent_id='', agent_egg='', qa_documents=''):
"""
register an instrument driver by putting it in a web-accessible location
@instrument_agent_id the agent receiving the driver
@agent_egg a base64-encoded egg file
@qa_documents a base64-encoded zip file containing a MANIFEST.csv file
MANIFEST.csv fields:
- filename
- name
- description
- content_type
- keywords
"""
# retrieve the resource
self.read_instrument_agent(instrument_agent_id)
qa_doc_parser = QADocParser()
#process the input files (base64-encoded qa documents)
qa_parse_result, err = qa_doc_parser.prepare(qa_documents)
if not qa_parse_result:
raise BadRequest("Processing qa_documents file failed: %s" % err)
#process the input files (base64-encoded egg)
uploader_obj, err = self.module_uploader.prepare(agent_egg)
if None is uploader_obj:
raise BadRequest("Egg failed validation: %s" % err)
attachments, err = qa_doc_parser.convert_to_attachments()
if None is attachments:
raise BadRequest("QA Docs processing failed: %s" % err)
# actually upload
up_success, err = uploader_obj.upload()
if not up_success:
raise BadRequest("Upload failed: %s" % err)
#now we can do the ION side of things
#make an attachment for the url
attachments.append(IonObject(RT.Attachment,
name=uploader_obj.get_egg_urlfile_name(),
description="url to egg",
content="[InternetShortcut]\nURL=%s" % uploader_obj.get_destination_url(),
content_type="text/url",
keywords=[KeywordFlag.EGG_URL],
attachment_type=AttachmentType.ASCII))
#insert all attachments
for att in attachments:
self.RR2.create_attachment(instrument_agent_id, att)
#updates the state of this InstAgent to integrated
self.RR2.advance_lcs(instrument_agent_id, LCE.INTEGRATE)
##########################################################################
#
# INSTRUMENT MODEL
#
##########################################################################
def create_instrument_model(self, instrument_model=None):
"""
create a new instance
@param instrument_model the object to be created as a resource
@retval instrument_model_id the id of the new object
@throws BadRequest if the incoming _id field is set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.create(instrument_model, RT.InstrumentModel)
def update_instrument_model(self, instrument_model=None):
"""
update an existing instance
@param instrument_model the object to be created as a resource
@retval success whether we succeeded
@throws BadRequest if the incoming _id field is not set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.update(instrument_model, RT.InstrumentModel)
def read_instrument_model(self, instrument_model_id=''):
"""
fetch a resource by ID
@param instrument_model_id the id of the object to be fetched
@retval InstrumentModel resource
"""
return self.RR2.read(instrument_model_id, RT.InstrumentModel)
def delete_instrument_model(self, instrument_model_id=''):
"""
delete a resource, including its history (for less ominous deletion, use retire)
@param instrument_model_id the id of the object to be deleted
@retval success whether it succeeded
"""
self.RR2.retire(instrument_model_id, RT.InstrumentModel)
def force_delete_instrument_model(self, instrument_model_id=''):
self.RR2.pluck_delete(instrument_model_id, RT.InstrumentModel)
##########################################################################
#
# PHYSICAL INSTRUMENT
#
##########################################################################
def create_instrument_device(self, instrument_device=None):
"""
create a new instance
@param instrument_device the object to be created as a resource
@retval instrument_device_id the id of the new object
@throws BadRequest if the incoming _id field is set
@throws BadReqeust if the incoming name already exists
"""
instrument_device_id = self.RR2.create(instrument_device, RT.InstrumentDevice)
#register the instrument as a data producer
self.DAMS.register_instrument(instrument_device_id)
return instrument_device_id
def update_instrument_device(self, instrument_device=None):
"""
update an existing instance
@param instrument_device the object to be created as a resource
@retval success whether we succeeded
@throws BadRequest if the incoming _id field is not set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.update(instrument_device, RT.InstrumentDevice)
def read_instrument_device(self, instrument_device_id=''):
"""
fetch a resource by ID
@param instrument_device_id the id of the object to be fetched
@retval InstrumentDevice resource
"""
return self.RR2.read(instrument_device_id, RT.InstrumentDevice)
def delete_instrument_device(self, instrument_device_id=''):
"""
delete a resource, including its history (for less ominous deletion, use retire)
@param instrument_device_id the id of the object to be deleted
@retval success whether it succeeded
"""
self.RR2.retire(instrument_device_id, RT.InstrumentDevice)
def force_delete_instrument_device(self, instrument_device_id=''):
self.RR2.pluck_delete(instrument_device_id, RT.InstrumentDevice)
##
##
## PRECONDITION FUNCTIONS
##
##
def check_direct_access_policy(self, msg, headers):
try:
gov_values = GovernanceHeaderValues(headers)
except Inconsistent, ex:
return False, ex.message
#The system actor can to anything
if is_system_actor(gov_values.actor_id):
return True, ''
#TODO - this shared commitment might not be with the right Org - may have to relook at how this is working.
if not has_exclusive_resource_commitment(gov_values.actor_id, gov_values.resource_id):
return False, '%s(%s) has been denied since the user %s has not acquired the resource exclusively' % (self.name, gov_values.op, gov_values.actor_id)
return True, ''
def check_device_lifecycle_policy(self, msg, headers):
try:
gov_values = GovernanceHeaderValues(headers)
except Inconsistent, ex:
return False, ex.message
#The system actor can to anything
if is_system_actor(gov_values.actor_id):
return True, ''
if msg.has_key('lifecycle_event'):
lifecycle_event = msg['lifecycle_event']
else:
raise Inconsistent('%s(%s) has been denied since the lifecycle_event can not be found in the message'% (self.name, gov_values.op))
orgs,_ = self.clients.resource_registry.find_subjects(RT.Org, PRED.hasResource, gov_values.resource_id)
if not orgs:
return False, '%s(%s) has been denied since the resource id %s has not been shared with any Orgs' % (self.name, gov_values.op, gov_values.resource_id)
#Handle these lifecycle transitions first
if lifecycle_event == LCE.INTEGRATE or lifecycle_event == LCE.DEPLOY or lifecycle_event == LCE.RETIRE:
#Check across Orgs which have shared this device for role which as proper level to allow lifecycle transition
for org in orgs:
if has_org_role(gov_values.actor_roles, org.org_governance_name, [OBSERVATORY_OPERATOR_ROLE,ORG_MANAGER_ROLE]):
return True, ''
else:
#The owner can do any of these other lifecycle transitions
is_owner = is_resource_owner(gov_values.actor_id, gov_values.resource_id)
if is_owner:
return True, ''
#TODO - this shared commitment might not be with the right Org - may have to relook at how this is working.
is_shared = has_shared_resource_commitment(gov_values.actor_id, gov_values.resource_id)
#Check across Orgs which have shared this device for role which as proper level to allow lifecycle transition
for org in orgs:
if has_org_role(gov_values.actor_roles, org.org_governance_name, [INSTRUMENT_OPERATOR_ROLE, OBSERVATORY_OPERATOR_ROLE,ORG_MANAGER_ROLE] ) and is_shared:
return True, ''
return False, '%s(%s) has been denied since the user %s has not acquired the resource or is not the proper role for this transition: %s' % (self.name, gov_values.op, gov_values.actor_id, lifecycle_event)
##
##
## DIRECT ACCESS
##
##
def request_direct_access(self, instrument_device_id=''):
"""
"""
# determine whether id is for physical or logical instrument
# look up instrument if not
# Validate request; current instrument state, policy, and other
# Retrieve and save current instrument settings
# Request DA channel, save reference
# Return direct access channel
raise NotImplementedError()
pass
def stop_direct_access(self, instrument_device_id=''):
"""
"""
# Return Value
# ------------
# {success: true}
#
raise NotImplementedError()
pass
##########################################################################
#
# PLATFORM AGENT INSTANCE
#
##########################################################################
def create_platform_agent_instance(self, platform_agent_instance=None, platform_agent_id="", platform_device_id=""):
"""
create a new instance
@param platform_agent_instance the object to be created as a resource
@retval platform_agent_instance_id the id of the new object
@throws BadRequest if the incoming _id field is set
@throws BadReqeust if the incoming name already exists
"""
platform_agent_instance_id = self.RR2.create(platform_agent_instance, RT.PlatformAgentInstance)
if platform_agent_id:
self.assign_platform_agent_to_platform_agent_instance(platform_agent_id, platform_agent_instance_id)
if platform_device_id:
self.assign_platform_agent_instance_to_platform_device(platform_agent_instance_id, platform_device_id)
return platform_agent_instance_id
def update_platform_agent_instance(self, platform_agent_instance=None):
"""
update an existing instance
@param platform_agent_instance the object to be created as a resource
@retval success whether we succeeded
@throws BadRequest if the incoming _id field is not set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.update(platform_agent_instance, RT.PlatformAgentInstance)
def read_platform_agent_instance(self, platform_agent_instance_id=''):
"""
fetch a resource by ID
@param platform_agent_instance_id the id of the object to be fetched
@retval PlatformAgentInstance resource
"""
return self.RR2.read(platform_agent_instance_id, RT.PlatformAgentInstance)
def delete_platform_agent_instance(self, platform_agent_instance_id=''):
"""
delete a resource, including its history (for less ominous deletion, use retire)
@param platform_agent_instance_id the id of the object to be deleted
@retval success whether it succeeded
"""
self.RR2.retire(platform_agent_instance_id, RT.PlatformAgentInstance)
def force_delete_platform_agent_instance(self, platform_agent_instance_id=''):
self.RR2.pluck_delete(platform_agent_instance_id, RT.PlatformAgentInstance)
# def _get_child_platforms(self, platform_device_id):
# """ recursively trace hasDevice relationships, return list of all PlatformDevice objects
# TODO: how to get platform ID from platform device?
# """
# children = [] # find by hasDevice relationship
# out = children[:]
# for obj in children:
# descendents = self._get_child_platforms(obj._id)
# out[0:] = descendents
# return out
def start_platform_agent_instance(self, platform_agent_instance_id=''):
"""
Agent instance must first be created and associated with a platform device
Launch the platform agent instance and return the id
"""
configuration_builder = PlatformAgentConfigurationBuilder(self.clients)
launcher = AgentLauncher(self.clients.process_dispatcher)
platform_agent_instance_obj = self.read_platform_agent_instance(platform_agent_instance_id)
configuration_builder.set_agent_instance_object(platform_agent_instance_obj)
config = configuration_builder.prepare()
platform_device_obj = configuration_builder._get_device()
log.debug("start_platform_agent_instance: device is %s connected to platform agent instance %s (L4-CI-SA-RQ-363)",
str(platform_device_obj._id), str(platform_agent_instance_id))
#retrive the stream info for this model
#todo: add stream info to the platform model create
# streams_dict = platform_model_obj.custom_attributes['streams']
# if not streams_dict:
# raise BadRequest("Device model does not contain stream configuation used in launching the agent. Model: '%s", str(platform_models_objs[0]) )
process_id = launcher.launch(config, configuration_builder._get_process_definition()._id)
configuration_builder.record_launch_parameters(config, process_id)
launcher.await_launch(20)
return process_id
def stop_platform_agent_instance(self, platform_agent_instance_id=''):
"""
Deactivate the platform agent instance
"""
self.stop_agent_instance(platform_agent_instance_id, RT.PlatformDevice)
##########################################################################
#
# PLATFORM AGENT
#
##########################################################################
def create_platform_agent(self, platform_agent=None):
"""
create a new instance
@param platform_agent the object to be created as a resource
@retval platform_agent_id the id of the new object
@throws BadRequest if the incoming _id field is set
@throws BadReqeust if the incoming name already exists
"""
platform_agent_id = self.RR2.create(platform_agent, RT.PlatformAgent)
# Create the process definition to launch the agent
process_definition = ProcessDefinition()
process_definition.executable['module']='ion.agents.platform.platform_agent'
process_definition.executable['class'] = 'PlatformAgent'
pd = self.clients.process_dispatcher
process_definition_id = pd.create_process_definition(process_definition=process_definition)
#associate the agent and the process def
self.RR2.assign_process_definition_to_platform_agent(process_definition_id, platform_agent_id)
return platform_agent_id
def update_platform_agent(self, platform_agent=None):
"""
update an existing instance
@param platform_agent the object to be created as a resource
@retval success whether we succeeded
@throws BadRequest if the incoming _id field is not set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.update(platform_agent, RT.PlatformAgent)
def read_platform_agent(self, platform_agent_id=''):
"""
fetch a resource by ID
@param platform_agent_id the id of the object to be fetched
@retval PlatformAgent resource
"""
return self.RR2.read(platform_agent_id, RT.PlatformAgent)
def delete_platform_agent(self, platform_agent_id=''):
"""
delete a resource, including its history (for less ominous deletion, use retire)
@param platform_agent_id the id of the object to be deleted
@retval success whether it succeeded
"""
self.RR2.retire(platform_agent_id, RT.PlatformAgent)
def force_delete_platform_agent(self, platform_agent_id=''):
self.RR2.pluck_delete(platform_agent_id, RT.PlatformAgent)
##########################################################################
#
# PLATFORM MODEL
#
##########################################################################
def create_platform_model(self, platform_model=None):
"""
create a new instance
@param platform_model the object to be created as a resource
@retval platform_model_id the id of the new object
@throws BadRequest if the incoming _id field is set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.create(platform_model, RT.PlatformModel)
def update_platform_model(self, platform_model=None):
"""
update an existing instance
@param platform_model the object to be created as a resource
@retval success whether we succeeded
@throws BadRequest if the incoming _id field is not set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.update(platform_model, RT.PlatformModel)
def read_platform_model(self, platform_model_id=''):
"""
fetch a resource by ID
@param platform_model_id the id of the object to be fetched
@retval PlatformModel resource
"""
return self.RR2.read(platform_model_id, RT.PlatformModel)
def delete_platform_model(self, platform_model_id=''):
"""
delete a resource, including its history (for less ominous deletion, use retire)
@param platform_model_id the id of the object to be deleted
@retval success whether it succeeded
"""
self.RR2.retire(platform_model_id, RT.PlatformModel)
def force_delete_platform_model(self, platform_model_id=''):
self.RR2.pluck_delete(platform_model_id, RT.PlatformModel)
##########################################################################
#
# PHYSICAL PLATFORM
#
##########################################################################
def create_platform_device(self, platform_device=None):
"""
create a new instance
@param platform_device the object to be created as a resource
@retval platform_device_id the id of the new object
@throws BadRequest if the incoming _id field is set
@throws BadReqeust if the incoming name already exists
"""
platform_device_id = self.RR2.create(platform_device, RT.PlatformDevice)
#register the platform as a data producer
self.DAMS.register_instrument(platform_device_id)
return platform_device_id
def update_platform_device(self, platform_device=None):
"""
update an existing instance
@param platform_device the object to be created as a resource
@retval success whether we succeeded
@throws BadRequest if the incoming _id field is not set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.update(platform_device, RT.PlatformDevice)
def read_platform_device(self, platform_device_id=''):
"""
fetch a resource by ID
@param platform_device_id the id of the object to be fetched
@retval PlatformDevice resource
"""
return self.RR2.read(platform_device_id, RT.PlatformDevice)
def delete_platform_device(self, platform_device_id=''):
"""
delete a resource, including its history (for less ominous deletion, use retire)
@param platform_device_id the id of the object to be deleted
@retval success whether it succeeded
"""
self.RR2.retire(platform_device_id, RT.PlatformDevice)
def force_delete_platform_device(self, platform_device_id=''):
self.RR2.pluck_delete(platform_device_id, RT.PlatformDevice)
##########################################################################
#
# SENSOR MODEL
#
##########################################################################
def create_sensor_model(self, sensor_model=None):
"""
create a new instance
@param sensor_model the object to be created as a resource
@retval sensor_model_id the id of the new object
@throws BadRequest if the incoming _id field is set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.create(sensor_model, RT.SensorModel)
def update_sensor_model(self, sensor_model=None):
"""
update an existing instance
@param sensor_model the object to be created as a resource
@retval success whether we succeeded
@throws BadRequest if the incoming _id field is not set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.update(sensor_model, RT.SensorModel)
def read_sensor_model(self, sensor_model_id=''):
"""
fetch a resource by ID
@param sensor_model_id the id of the object to be fetched
@retval SensorModel resource
"""
return self.RR2.read(sensor_model_id, RT.SensorModel)
def delete_sensor_model(self, sensor_model_id=''):
"""
delete a resource, including its history (for less ominous deletion, use retire)
@param sensor_model_id the id of the object to be deleted
@retval success whether it succeeded
"""
self.RR2.retire(sensor_model_id, RT.SensorModel)
def force_delete_sensor_model(self, sensor_model_id=''):
self.RR2.pluck_delete(sensor_model_id, RT.SensorModel)
##########################################################################
#
# PHYSICAL SENSOR
#
##########################################################################
def create_sensor_device(self, sensor_device=None):
"""
create a new instance
@param sensor_device the object to be created as a resource
@retval sensor_device_id the id of the new object
@throws BadRequest if the incoming _id field is set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.create(sensor_device, RT.SensorDevice)
def update_sensor_device(self, sensor_device=None):
"""
update an existing instance
@param sensor_device the object to be created as a resource
@retval success whether we succeeded
@throws BadRequest if the incoming _id field is not set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.update(sensor_device, RT.SensorDevice)
def read_sensor_device(self, sensor_device_id=''):
"""
fetch a resource by ID
@param sensor_device_id the id of the object to be fetched
@retval SensorDevice resource
"""
return self.RR2.read(sensor_device_id, RT.SensorDevice)
def delete_sensor_device(self, sensor_device_id=''):
"""
delete a resource, including its history (for less ominous deletion, use retire)
@param sensor_device_id the id of the object to be deleted
@retval success whether it succeeded
"""
self.RR2.retire(sensor_device_id, RT.SensorDevice)
def force_delete_sensor_device(self, sensor_device_id=''):
self.RR2.pluck_delete(sensor_device_id, RT.SensorDevice)
##########################################################################
#
# ASSOCIATIONS
#
##########################################################################
def assign_instrument_model_to_instrument_device(self, instrument_model_id='', instrument_device_id=''):
instrument_model_obj = self.RR2.read(instrument_model_id)
instrument_device_obj = self.RR2.read(instrument_device_id)
for k, v in instrument_device_obj.custom_attributes.iteritems():
if not k in instrument_model_obj.custom_attributes:
err_msg = ("InstrumentDevice '%s' contains custom attribute '%s' (value '%s'), but this attribute"
+ " is not defined by associated InstrumentModel '%s'") % (instrument_device_id,
k, v,
instrument_model_id)
#raise BadRequest(err_msg)
log.warn(err_msg)
self.RR2.assign_one_instrument_model_to_instrument_device(instrument_model_id, instrument_device_id)
def unassign_instrument_model_from_instrument_device(self, instrument_model_id='', instrument_device_id=''):
self.RR2.unassign_instrument_model_from_instrument_device(instrument_model_id, instrument_device_id)
def assign_instrument_model_to_instrument_agent(self, instrument_model_id='', instrument_agent_id=''):
self.RR2.assign_instrument_model_to_instrument_agent(instrument_model_id, instrument_agent_id)
def unassign_instrument_model_from_instrument_agent(self, instrument_model_id='', instrument_agent_id=''):
self.RR2.unassign_instrument_model_from_instrument_agent(instrument_agent_id, instrument_model_id)
def assign_platform_model_to_platform_agent(self, platform_model_id='', platform_agent_id=''):
self.RR2.assign_platform_model_to_platform_agent(platform_model_id, platform_agent_id)
def unassign_platform_model_from_platform_agent(self, platform_model_id='', platform_agent_id=''):
self.RR2.unassign_platform_model_from_platform_agent(platform_model_id, platform_agent_id)
def assign_sensor_model_to_sensor_device(self, sensor_model_id='', sensor_device_id=''):
self.RR2.assign_one_sensor_model_to_sensor_device(sensor_model_id, sensor_device_id)
def unassign_sensor_model_from_sensor_device(self, sensor_model_id='', sensor_device_id=''):
self.RR2.unassign_sensor_model_from_sensor_device(self, sensor_model_id, sensor_device_id)
def assign_platform_model_to_platform_device(self, platform_model_id='', platform_device_id=''):
self.RR2.assign_one_platform_model_to_platform_device(platform_model_id, platform_device_id)
def unassign_platform_model_from_platform_device(self, platform_model_id='', platform_device_id=''):
self.RR2.unassign_platform_model_from_platform_device(platform_model_id, platform_device_id)
def assign_instrument_device_to_platform_device(self, instrument_device_id='', platform_device_id=''):
self.RR2.assign_instrument_device_to_one_platform_device(instrument_device_id, platform_device_id)
def unassign_instrument_device_from_platform_device(self, instrument_device_id='', platform_device_id=''):
self.RR2.unassign_instrument_device_from_platform_device(instrument_device_id, platform_device_id)
def assign_platform_device_to_platform_device(self, child_platform_device_id='', platform_device_id=''):
self.RR2.assign_platform_device_to_one_platform_device(child_platform_device_id, platform_device_id)
def unassign_platform_device_from_platform_device(self, child_platform_device_id='', platform_device_id=''):
self.RR2.unassign_platform_device_from_platform_device(child_platform_device_id, platform_device_id)
def assign_platform_agent_to_platform_agent_instance(self, platform_agent_id='', platform_agent_instance_id=''):
self.RR2.assign_one_platform_agent_to_platform_agent_instance(platform_agent_id, platform_agent_instance_id)
def unassign_platform_agent_from_platform_agent_instance(self, platform_agent_id='', platform_agent_instance_id=''):
self.RR2.unassign_platform_agent_from_platform_agent_instance(platform_agent_id, platform_agent_instance_id)
def assign_instrument_agent_to_instrument_agent_instance(self, instrument_agent_id='', instrument_agent_instance_id=''):
self.RR2.assign_one_instrument_agent_to_instrument_agent_instance(instrument_agent_id, instrument_agent_instance_id)
def unassign_instrument_agent_from_instrument_agent_instance(self, instrument_agent_id='', instrument_agent_instance_id=''):
self.RR2.unassign_instrument_agent_from_instrument_agent_instance(instrument_agent_id, instrument_agent_instance_id)
def assign_instrument_agent_instance_to_instrument_device(self, instrument_agent_instance_id='', instrument_device_id=''):
self.RR2.assign_one_instrument_agent_instance_to_instrument_device(instrument_agent_instance_id, instrument_device_id)
def unassign_instrument_agent_instance_from_instrument_device(self, instrument_agent_instance_id='', instrument_device_id=''):
self.RR2.unassign_instrument_agent_instance_from_instrument_device(instrument_agent_instance_id, instrument_device_id)
def assign_platform_agent_instance_to_platform_device(self, platform_agent_instance_id='', platform_device_id=''):
self.RR2.assign_one_platform_agent_instance_to_platform_device(platform_agent_instance_id, platform_device_id)
def unassign_platform_agent_instance_from_platform_device(self, platform_agent_instance_id='', platform_device_id=''):
self.RR2.unassign_platform_agent_instance_from_platform_device(platform_agent_instance_id, platform_device_id)
def assign_sensor_device_to_instrument_device(self, sensor_device_id='', instrument_device_id=''):
self.RR2.assign_sensor_device_to_one_instrument_device(sensor_device_id, instrument_device_id)
def unassign_sensor_device_from_instrument_device(self, sensor_device_id='', instrument_device_id=''):
self.RR2.unassign_sensor_device_from_instrument_device(sensor_device_id, instrument_device_id)
##########################################################################
#
# DEPLOYMENTS
#
##########################################################################
def deploy_instrument_device(self, instrument_device_id='', deployment_id=''):
#def link_deployment(self, instrument_device_id='', deployment_id=''):
# # make sure that only 1 site-device-deployment triangle exists at one time
# sites, _ = self.RR.find_subjects(RT.InstrumentSite, PRED.hasDevice, instrument_device_id, False)
# if 1 < len(sites):
# raise Inconsistent("Device is assigned to more than one site")
# if 1 == len(sites):
# site_deployments = self._find_stemming(sites[0]._id, PRED.hasDeployment, RT.Deployment)
# if 1 < len(site_deployments):
# raise Inconsistent("Site has more than one deployment")
# if 1 == len(site_deployments):
# if site_deployments[0]._id != deployment_id:
# raise BadRequest("Site to which this device is assigned has a different deployment")
#
# for dev in self._find_stemming(sites[0]._id, PRED.hasDevice, RT.InstrumentDevice):
# if 0 < len(self._find_stemming(dev, PRED.hasDeployment, RT.Deployment)):
# raise BadRequest("Site already has a device with a deployment")
#
# return self._link_resources_single_object(instrument_device_id, PRED.hasDeployment, deployment_id)
self.RR2.assign_deployment_to_instrument_device(deployment_id, instrument_device_id)
def undeploy_instrument_device(self, instrument_device_id='', deployment_id=''):
self.RR2.unassign_deployment_from_instrument_device(deployment_id, instrument_device_id)
def deploy_platform_device(self, platform_device_id='', deployment_id=''):
#def link_deployment(self, platform_device_id='', deployment_id=''):
# # make sure that only 1 site-device-deployment triangle exists at one time
# sites, _ = self.RR.find_subjects(RT.PlatformSite, PRED.hasDevice, platform_device_id, False)
# if 1 < len(sites):
# raise Inconsistent("Device is assigned to more than one site")
# if 1 == len(sites):
# site_deployments = self._find_stemming(sites[0]._id, PRED.hasDeployment, RT.Deployment)
# if 1 < len(site_deployments):
# raise Inconsistent("Site has more than one deployment")
# if 1 == len(site_deployments):
# if site_deployments[0]._id != deployment_id:
# raise BadRequest("Site to which this device is assigned has a different deployment")
#
# for dev in self._find_stemming(sites[0]._id, PRED.hasDevice, RT.PlatformDevice):
# if 0 < len(self._find_stemming(dev, PRED.hasDeployment, RT.Deployment)):
# raise BadRequest("Site already has a device with a deployment")
#
# return self._link_resources_single_object(platform_device_id, PRED.hasDeployment, deployment_id)
self.RR2.assign_deployment_to_platform_device(deployment_id, platform_device_id)
def undeploy_platform_device(self, platform_device_id='', deployment_id=''):
self.RR2.unassign_deployent_from_platform_device(deployment_id, platform_device_id)
############################
#
# ASSOCIATION FIND METHODS
#
############################
def find_instrument_model_by_instrument_device(self, instrument_device_id=''):
return self.RR2.find_instrument_models_of_instrument_device(instrument_device_id)
def find_instrument_device_by_instrument_model(self, instrument_model_id=''):
return self.RR2.find_instrument_devices_by_instrument_model(instrument_model_id)
def find_platform_model_by_platform_device(self, platform_device_id=''):
return self.RR2.find_platform_models_of_platform_device(platform_device_id)
def find_platform_device_by_platform_model(self, platform_model_id=''):
return self.RR2.find_platform_devices_by_platform_model(platform_model_id)
def find_instrument_model_by_instrument_agent(self, instrument_agent_id=''):
return self.RR2.find_instrument_models_of_instrument_agent(instrument_agent_id)
def find_instrument_agent_by_instrument_model(self, instrument_model_id=''):
return self.RR2.find_instrument_agents_by_instrument_model(instrument_model_id)
def find_instrument_device_by_instrument_agent_instance(self, instrument_agent_instance_id=''):
return self.RR2.find_instrument_devices_by_instrument_agent_instance(instrument_agent_instance_id)
def find_instrument_agent_instance_by_instrument_device(self, instrument_device_id=''):
instrument_agent_instance_objs = self.RR2.find_instrument_agent_instances_of_instrument_device(instrument_device_id)
if 0 < len(instrument_agent_instance_objs):
log.debug("L4-CI-SA-RQ-363: device %s is connected to instrument agent instance %s",
str(instrument_device_id),
str(instrument_agent_instance_objs[0]._id))
return instrument_agent_instance_objs
def find_instrument_device_by_platform_device(self, platform_device_id=''):
return self.RR2.find_instrument_devices_of_platform_device(platform_device_id)
def find_platform_device_by_instrument_device(self, instrument_device_id=''):
return self.RR2.find_platform_devices_by_instrument_device(instrument_device_id)
def find_instrument_device_by_logical_instrument(self, logical_instrument_id=''):
raise NotImplementedError("TODO: this function will be removed")
def find_logical_instrument_by_instrument_device(self, instrument_device_id=''):
raise NotImplementedError("TODO: this function will be removed")
def find_platform_device_by_logical_platform(self, logical_platform_id=''):
raise NotImplementedError("TODO: this function will be removed")
def find_logical_platform_by_platform_device(self, platform_device_id=''):
raise NotImplementedError("TODO: this function will be removed")
def find_data_product_by_instrument_device(self, instrument_device_id=''):
raise NotImplementedError("TODO: this function will be removed")
def find_instrument_device_by_data_product(self, data_product_id=''):
raise NotImplementedError("TODO: this function will be removed")
############################
#
# SPECIALIZED FIND METHODS
#
############################
def find_data_product_by_platform_device(self, platform_device_id=''):
ret = []
for i in self.find_instrument_device_by_platform_device(platform_device_id):
data_products = self.find_data_product_by_instrument_device(i)
for d in data_products:
if not d in ret:
ret.append(d)
return ret
############################
#
# LIFECYCLE TRANSITIONS
#
############################
def execute_instrument_agent_lifecycle(self, instrument_agent_id="", lifecycle_event=""):
"""
declare a instrument_agent to be in a given state
@param instrument_agent_id the resource id
"""
return self.RR2.advance_lcs(instrument_agent_id, lifecycle_event)
def execute_instrument_agent_instance_lifecycle(self, instrument_agent_instance_id="", lifecycle_event=""):
"""
declare a instrument_agent_instance to be in a given state
@param instrument_agent_instance_id the resource id
"""
return self.RR2.advance_lcs(instrument_agent_instance_id, lifecycle_event)
def execute_instrument_model_lifecycle(self, instrument_model_id="", lifecycle_event=""):
"""
declare a instrument_model to be in a given state
@param instrument_model_id the resource id
"""
return self.RR2.advance_lcs(instrument_model_id, lifecycle_event)
def execute_instrument_device_lifecycle(self, instrument_device_id="", lifecycle_event=""):
"""
declare an instrument_device to be in a given state
@param instrument_device_id the resource id
"""
return self.RR2.advance_lcs(instrument_device_id, lifecycle_event)
def execute_platform_agent_lifecycle(self, platform_agent_id="", lifecycle_event=""):
"""
declare a platform_agent to be in a given state
@param platform_agent_id the resource id
"""
return self.RR2.advance_lcs(platform_agent_id, lifecycle_event)
def execute_platform_agent_instance_lifecycle(self, platform_agent_instance_id="", lifecycle_event=""):
"""
declare a platform_agent_instance to be in a given state
@param platform_agent_instance_id the resource id
"""
return self.RR2.advance_lcs(platform_agent_instance_id, lifecycle_event)
def execute_platform_model_lifecycle(self, platform_model_id="", lifecycle_event=""):
"""
declare a platform_model to be in a given state
@param platform_model_id the resource id
"""
return self.RR2.advance_lcs(platform_model_id, lifecycle_event)
def execute_platform_device_lifecycle(self, platform_device_id="", lifecycle_event=""):
"""
declare a platform_device to be in a given state
@param platform_device_id the resource id
"""
return self.RR2.advance_lcs(platform_device_id, lifecycle_event)
def execute_sensor_model_lifecycle(self, sensor_model_id="", lifecycle_event=""):
"""
declare a sensor_model to be in a given state
@param sensor_model_id the resource id
"""
return self.RR2.advance_lcs(sensor_model_id, lifecycle_event)
def execute_sensor_device_lifecycle(self, sensor_device_id="", lifecycle_event=""):
"""
declare a sensor_device to be in a given state
@param sensor_device_id the resource id
"""
return self.RR2.advance_lcs(sensor_device_id, lifecycle_event)
############################
#
# EXTENDED RESOURCES
#
############################
def get_instrument_device_extension(self, instrument_device_id='', ext_associations=None, ext_exclude=None, user_id=''):
"""Returns an InstrumentDeviceExtension object containing additional related information
@param instrument_device_id str
@param ext_associations dict
@param ext_exclude list
@retval instrument_device InstrumentDeviceExtension
@throws BadRequest A parameter is missing
@throws NotFound An object with the specified instrument_device_id does not exist
"""
if not instrument_device_id:
raise BadRequest("The instrument_device_id parameter is empty")
extended_resource_handler = ExtendedResourceContainer(self)
extended_instrument = extended_resource_handler.create_extended_resource_container(
OT.InstrumentDeviceExtension,
instrument_device_id,
OT.InstrumentDeviceComputedAttributes,
ext_associations=ext_associations,
ext_exclude=ext_exclude,
user_id=user_id)
# clean up InstAgent list as it sometimes includes the device
ia = []
for agent in extended_instrument.instrument_agent:
if agent.type_ == 'InstrumentAgent':
ia.append(agent)
extended_instrument.instrument_agent = ia
# Status computation
status_rollups = self.outil.get_status_roll_ups(instrument_device_id, RT.InstrumentDevice)
def short_status_rollup(key):
return ComputedIntValue(status=ComputedValueAvailability.PROVIDED,
value=status_rollups[instrument_device_id].get(key, StatusType.STATUS_UNKNOWN))
extended_instrument.computed.communications_status_roll_up = short_status_rollup("comms")
extended_instrument.computed.power_status_roll_up = short_status_rollup("power")
extended_instrument.computed.data_status_roll_up = short_status_rollup("data")
extended_instrument.computed.location_status_roll_up = short_status_rollup("loc")
extended_instrument.computed.aggregated_status = short_status_rollup("agg")
return extended_instrument
# TODO: this causes a problem because an instrument agent must be running in order to look up extended attributes.
def obtain_agent_handle(self, device_id):
ia_client = ResourceAgentClient(device_id, process=self)
log.debug("got the instrument agent client here: %s for the device id: %s and process: %s", ia_client, device_id, self)
# #todo: any validation?
# cmd = AgentCommand(command='get_current_state')
# retval = self._ia_client.execute_agent(cmd)
# state = retval.result
# self.assertEqual(state, InstrumentAgentState.UNINITIALIZED)
#
return ia_client
def obtain_agent_calculation(self, device_id, result_container):
ret = IonObject(result_container)
a_client = None
try:
a_client = self.obtain_agent_handle(device_id)
ret.status = ComputedValueAvailability.PROVIDED
except NotFound:
ret.status = ComputedValueAvailability.NOTAVAILABLE
ret.reason = "Could not connect to instrument agent instance -- may not be running"
except Exception as e:
raise e
return a_client, ret
#functions for INSTRUMENT computed attributes -- currently bogus values returned
def get_firmware_version(self, instrument_device_id):
ia_client, ret = self.obtain_agent_calculation(instrument_device_id, OT.ComputedFloatValue)
if ia_client:
ret.value = 0.0 #todo: use ia_client
return ret
def get_last_data_received_datetime(self, instrument_device_id):
ia_client, ret = self.obtain_agent_calculation(instrument_device_id, OT.ComputedFloatValue)
if ia_client:
ret.value = 0.0 #todo: use ia_client
return ret
def get_operational_state(self, taskable_resource_id): # from Device
ia_client, ret = self.obtain_agent_calculation(taskable_resource_id, OT.ComputedStringValue)
if ia_client:
ret.value = "" #todo: use ia_client
return ret
def get_last_calibration_datetime(self, instrument_device_id):
ia_client, ret = self.obtain_agent_calculation(instrument_device_id, OT.ComputedFloatValue)
if ia_client:
ret.value = 0 #todo: use ia_client
return ret
def get_uptime(self, device_id):
ia_client, ret = self.obtain_agent_calculation(device_id, OT.ComputedStringValue)
if ia_client:
# Find events in the event repo that were published when changes of state occurred for the instrument or the platform
# The Instrument Agent publishes events of a particular type, ResourceAgentStateEvent, and origin_type. So we query the events db for those.
#----------------------------------------------------------------------------------------------
# Check whether it is a platform or an instrument
#----------------------------------------------------------------------------------------------
device = self.RR.read(device_id)
#----------------------------------------------------------------------------------------------
# These below are the possible new event states while taking the instrument off streaming mode or the platform off monitoring mode
# This is info got from possible actions to wind down the instrument or platform that one can take in the UI when the device is already streaming/monitoring
#----------------------------------------------------------------------------------------------
event_state = ''
not_streaming_states = [ResourceAgentState.COMMAND, ResourceAgentState.INACTIVE, ResourceAgentState.UNINITIALIZED]
if device.type_ == 'InstrumentDevice':
event_state = ResourceAgentState.STREAMING
elif device.type_ == 'PlatformDevice':
event_state = 'PLATFORM_AGENT_STATE_MONITORING'
#----------------------------------------------------------------------------------------------
# Get events associated with device from the events db
#----------------------------------------------------------------------------------------------
log.debug("For uptime, we are checking the device with id: %s, type_: %s, and searching recent events for the following event_state: %s",device_id, device.type_, event_state)
event_tuples = self.container.event_repository.find_events(origin=device_id, event_type='ResourceAgentStateEvent', descending=True)
recent_events = [tuple[2] for tuple in event_tuples]
#----------------------------------------------------------------------------------------------
# We assume below that the events have been sorted in time, with most recent events first in the list
#----------------------------------------------------------------------------------------------
for evt in recent_events:
log.debug("Got a recent event with event_state: %s", evt.state)
if evt.state == event_state: # "RESOURCE_AGENT_STATE_STREAMING"
current_time = get_ion_ts() # this is in milliseconds
log.debug("Got most recent streaming event with ts_created: %s. Got the current time: %s", evt.ts_created, current_time)
return self._convert_to_string(ret, int(current_time)/1000 - int(evt.ts_created)/1000 )
elif evt.state in not_streaming_states:
log.debug("Got a most recent event state that means instrument is not streaming anymore: %s", evt.state)
# The instrument has been recently shut down. This has happened recently and no need to look further whether it was streaming earlier
return self._convert_to_string(ret, 0)
return self._convert_to_string(ret, 0)
def _convert_to_string(self, ret, value):
"""
A helper method to put it in a string value into a ComputedStringValue object that will be returned
@param ret ComputedStringValue object
@param value int
@retval ret The ComputedStringValue with a value that is of type String
"""
sec = timedelta(seconds = value)
d = datetime(1,1,1) + sec
ret.value = "%s days, %s hours, %s minutes" %(d.day-1, d.hour, d.minute)
log.debug("Returning the computed attribute for uptime with value: %s", ret.value)
return ret
#functions for INSTRUMENT computed attributes -- currently bogus values returned
def get_platform_device_extension(self, platform_device_id='', ext_associations=None, ext_exclude=None, user_id=''):
"""Returns an PlatformDeviceExtension object containing additional related information
"""
if not platform_device_id:
raise BadRequest("The platform_device_id parameter is empty")
extended_resource_handler = ExtendedResourceContainer(self)
extended_platform = extended_resource_handler.create_extended_resource_container(
OT.PlatformDeviceExtension,
platform_device_id,
OT.PlatformDeviceComputedAttributes,
ext_associations=ext_associations,
ext_exclude=ext_exclude,
user_id=user_id)
# lookup all hasModel predicates
# lookup is a 2d associative array of [subject type][subject id] -> object id
lookup = dict([(rt, {}) for rt in [RT.PlatformDevice, RT.InstrumentDevice]])
for a in self.RR.find_associations(predicate=PRED.hasModel, id_only=False):
if a.st in lookup:
lookup[a.st][a.s] = a.o
def retrieve_model_objs(rsrc_list, object_type):
# rsrc_list is devices that need models looked up. object_type is the resource type (a device)
# not all devices have models (represented as None), which kills read_mult. so, extract the models ids,
# look up all the model ids, then create the proper output
model_list = [lookup[object_type].get(r._id) for r in rsrc_list]
model_uniq = list(set([m for m in model_list if m is not None]))
model_objs = self.clients.resource_registry.read_mult(model_uniq)
model_dict = dict(zip(model_uniq, model_objs))
return [model_dict.get(m) for m in model_list]
extended_platform.instrument_models = retrieve_model_objs(extended_platform.instrument_devices,
RT.InstrumentDevice)
extended_platform.platform_models = retrieve_model_objs(extended_platform.platforms,
RT.PlatformDevice)
s_unknown = StatusType.STATUS_UNKNOWN
# Status computation
extended_platform.computed.instrument_status = [s_unknown] * len(extended_platform.instrument_devices)
extended_platform.computed.platform_status = [s_unknown] * len(extended_platform.platforms)
def status_unknown():
return ComputedIntValue(status=ComputedValueAvailability.PROVIDED, value=StatusType.STATUS_UNKNOWN)
extended_platform.computed.communications_status_roll_up = status_unknown()
extended_platform.computed.power_status_roll_up = status_unknown()
extended_platform.computed.data_status_roll_up = status_unknown()
extended_platform.computed.location_status_roll_up = status_unknown()
extended_platform.computed.aggregated_status = status_unknown()
try:
status_rollups = self.outil.get_status_roll_ups(platform_device_id, RT.PlatformDevice)
extended_platform.computed.instrument_status = [status_rollups.get(idev._id,{}).get("agg", s_unknown)
for idev in extended_platform.instrument_devices]
extended_platform.computed.platform_status = [status_rollups(pdev._id,{}).get("agg", s_unknown)
for pdev in extended_platform.platforms]
def short_status_rollup(key):
return ComputedIntValue(status=ComputedValueAvailability.PROVIDED,
value=status_rollups[platform_device_id].get(key, StatusType.STATUS_UNKNOWN))
extended_platform.computed.communications_status_roll_up = short_status_rollup("comms")
extended_platform.computed.power_status_roll_up = short_status_rollup("power")
extended_platform.computed.data_status_roll_up = short_status_rollup("data")
extended_platform.computed.location_status_roll_up = short_status_rollup("loc")
extended_platform.computed.aggregated_status = short_status_rollup("agg")
except Exception as ex:
log.exception("Computed attribute failed for %s" % platform_device_id)
return extended_platform
def get_data_product_parameters_set(self, resource_id=''):
# return the set of data product with the processing_level_code as the key to identify
ret = IonObject(OT.ComputedDictValue)
log.debug("get_data_product_parameters_set: resource_id is %s ", str(resource_id))
if not resource_id:
raise BadRequest("The resource_id parameter is empty")
#retrieve the output products
data_product_ids, _ = self.clients.resource_registry.find_objects(resource_id,
PRED.hasOutputProduct,
RT.DataProduct,
True)
log.debug("get_data_product_parameters_set: data_product_ids is %s ", str(data_product_ids))
if not data_product_ids:
ret.status = ComputedValueAvailability.NOTAVAILABLE
else:
for data_product_id in data_product_ids:
data_product_obj = self.clients.resource_registry.read(data_product_id)
#retrieve the stream for this data product
data_product_stream_ids, _ = self.clients.resource_registry.find_objects(data_product_id,
PRED.hasStream,
RT.Stream,
True)
if not data_product_stream_ids:
raise BadRequest("The data product has no stream associated")
#retrieve the stream definitions for this stream
stream_def_ids, _ = self.clients.resource_registry.find_objects(data_product_stream_ids[0],
PRED.hasStreamDefinition,
RT.StreamDefinition,
True)
if not stream_def_ids:
raise BadRequest("The data product stream has no stream definition associated")
context_dict = {}
pdict = self.clients.pubsub_management.read_stream_definition(stream_def_ids[0]).parameter_dictionary
log.debug("get_data_product_parameters_set: pdict %s ", str(pdict) )
pdict_full = ParameterDictionary.load(pdict)
for key in pdict_full.keys():
log.debug("get_data_product_parameters_set: key %s ", str(key))
context = DatasetManagementService.get_parameter_context_by_name(key)
log.debug("get_data_product_parameters_set: context %s ", str(context))
context_dict[key] = context.dump()
ret.value[data_product_obj.processing_level_code] = context_dict
ret.status = ComputedValueAvailability.PROVIDED
return ret
OOIION-741 remove hardcoded timeout for agent spawning
#!/usr/bin/env python
from ion.util.agent_launcher import AgentLauncher
from ion.services.sa.instrument.agent_configuration_builder import InstrumentAgentConfigurationBuilder, \
PlatformAgentConfigurationBuilder
from ion.util.enhanced_resource_registry_client import EnhancedResourceRegistryClient
from ion.util.resource_lcs_policy import AgentPolicy, ResourceLCSPolicy, ModelPolicy, DevicePolicy
__author__ = 'Maurice Manning, Ian Katz, Michael Meisinger'
import os
import pwd
import json
from datetime import datetime, timedelta
import time
from ooi.logging import log
from pyon.agent.agent import ResourceAgentClient
from pyon.core.bootstrap import IonObject
from pyon.core.exception import Inconsistent,BadRequest, NotFound
from pyon.ion.resource import ExtendedResourceContainer
from pyon.util.ion_time import IonTime
from pyon.public import LCE
from pyon.public import RT, PRED, OT
from pyon.util.containers import get_ion_ts
from pyon.agent.agent import ResourceAgentState
from coverage_model.parameter import ParameterDictionary
from ion.services.dm.inventory.dataset_management_service import DatasetManagementService
from ion.services.sa.instrument.flag import KeywordFlag
from ion.services.sa.observatory.observatory_util import ObservatoryUtil
from ion.util.module_uploader import RegisterModulePreparerEgg
from ion.util.qa_doc_parser import QADocParser
from ion.agents.port.port_agent_process import PortAgentProcess
from interface.objects import AttachmentType, ComputedValueAvailability, ComputedIntValue, StatusType, ProcessDefinition
from interface.services.sa.iinstrument_management_service import BaseInstrumentManagementService
from ion.services.sa.observatory.observatory_management_service import INSTRUMENT_OPERATOR_ROLE, OBSERVATORY_OPERATOR_ROLE
from pyon.core.governance import ORG_MANAGER_ROLE, GovernanceHeaderValues, has_org_role, is_system_actor, has_exclusive_resource_commitment
from pyon.core.governance import has_shared_resource_commitment, is_resource_owner
class InstrumentManagementService(BaseInstrumentManagementService):
"""
@brief Service to manage instrument, platform, and sensor resources, their relationships, and direct access
"""
def on_init(self):
#suppress a few "variable declared but not used" annoying pyflakes errors
IonObject("Resource")
self.override_clients(self.clients)
self.outil = ObservatoryUtil(self)
self.extended_resource_handler = ExtendedResourceContainer(self)
self.init_module_uploader()
# set up all of the policy interceptions
if self.container and self.container.governance_controller:
reg_precondition = self.container.governance_controller.register_process_operation_precondition
no_policy = ResourceLCSPolicy(self.clients)
agent_policy = AgentPolicy(self.clients)
model_policy = ModelPolicy(self.clients)
device_policy = DevicePolicy(self.clients)
#LCS
reg_precondition(self, 'execute_instrument_agent_lifecycle',
agent_policy.policy_fn_lcs_precondition("instrument_agent_id"))
reg_precondition(self, 'execute_instrument_agent_instance_lifecycle',
no_policy.policy_fn_lcs_precondition("instrument_agent_instance_id"))
reg_precondition(self, 'execute_instrument_model_lifecycle',
model_policy.policy_fn_lcs_precondition("instrument_model_id"))
reg_precondition(self, 'execute_instrument_device_lifecycle',
device_policy.policy_fn_lcs_precondition("instrument_device_id"))
reg_precondition(self, 'execute_platform_agent_lifecycle',
agent_policy.policy_fn_lcs_precondition("platform_agent_id"))
reg_precondition(self, 'execute_platform_agent_instance_lifecycle',
no_policy.policy_fn_lcs_precondition("platform_agent_instance_id"))
reg_precondition(self, 'execute_platform_model_lifecycle',
model_policy.policy_fn_lcs_precondition("platform_model_id"))
reg_precondition(self, 'execute_platform_device_lifecycle',
device_policy.policy_fn_lcs_precondition("platform_device_id"))
reg_precondition(self, 'execute_sensor_model_lifecycle',
model_policy.policy_fn_lcs_precondition("sensor_model_id"))
reg_precondition(self, 'execute_sensor_device_lifecycle',
device_policy.policy_fn_lcs_precondition("sensor_device_id"))
#Delete
reg_precondition(self, 'force_delete_instrument_agent',
agent_policy.policy_fn_delete_precondition("instrument_agent_id"))
reg_precondition(self, 'force_delete_instrument_agent_instance',
no_policy.policy_fn_delete_precondition("instrument_agent_instance_id"))
reg_precondition(self, 'force_delete_instrument_model',
model_policy.policy_fn_delete_precondition("instrument_model_id"))
reg_precondition(self, 'force_delete_instrument_device',
device_policy.policy_fn_delete_precondition("instrument_device_id"))
reg_precondition(self, 'force_delete_platform_agent',
agent_policy.policy_fn_delete_precondition("platform_agent_id"))
reg_precondition(self, 'force_delete_platform_agent_instance',
no_policy.policy_fn_delete_precondition("platform_agent_instance_id"))
reg_precondition(self, 'force_delete_platform_model',
model_policy.policy_fn_delete_precondition("platform_model_id"))
reg_precondition(self, 'force_delete_platform_device',
device_policy.policy_fn_delete_precondition("platform_device_id"))
reg_precondition(self, 'force_delete_sensor_model',
model_policy.policy_fn_delete_precondition("sensor_model_id"))
reg_precondition(self, 'force_delete_sensor_device',
device_policy.policy_fn_delete_precondition("sensor_device_id"))
def init_module_uploader(self):
if self.CFG:
# looking for forms like host=amoeba.ucsd.edu, remotepath=/var/www/release, user=steve
cfg_host = self.CFG.get_safe("service.instrument_management.driver_release_host", None)
cfg_remotepath = self.CFG.get_safe("service.instrument_management.driver_release_directory", None)
cfg_user = self.CFG.get_safe("service.instrument_management.driver_release_user",
pwd.getpwuid(os.getuid())[0])
cfg_wwwprefix = self.CFG.get_safe("service.instrument_management.driver_release_wwwprefix", None)
if cfg_host is None or cfg_remotepath is None or cfg_wwwprefix is None:
raise BadRequest("Missing configuration items; host='%s', directory='%s', wwwprefix='%s'" %
(cfg_host, cfg_remotepath, cfg_wwwprefix))
self.module_uploader = RegisterModulePreparerEgg(dest_user=cfg_user,
dest_host=cfg_host,
dest_path=cfg_remotepath,
dest_wwwprefix=cfg_wwwprefix)
def override_clients(self, new_clients):
"""
Replaces the service clients with a new set of them... and makes sure they go to the right places
"""
self.RR2 = EnhancedResourceRegistryClient(new_clients.resource_registry)
#shortcut names for the import sub-services
# we hide these behind checks even though we expect them so that
# the resource_impl_metatests will work
if hasattr(new_clients, "resource_registry"):
self.RR = new_clients.resource_registry
if hasattr(new_clients, "data_acquisition_management"):
self.DAMS = new_clients.data_acquisition_management
if hasattr(new_clients, "data_product_management"):
self.DPMS = new_clients.data_product_management
if hasattr(new_clients, "pubsub_management"):
self.PSMS = new_clients.pubsub_management
if hasattr(new_clients, "data_retriever"):
self.DRS = new_clients.data_retriever
def restore_resource_state(self, instrument_device_id='', attachment_id=''):
"""
restore a snapshot of an instrument agent instance config
"""
instrument_device_obj = self.RR.read(instrument_device_id)
resource_type = type(instrument_device_obj).__name__
if not RT.InstrumentDevice == resource_type:
raise BadRequest("Can only restore resource states for %s resources, got %s" %
(RT.InstrumentDevice, resource_type))
instrument_agent_instance_obj = self.RR2.find_instrument_agent_instance_of_instrument_device(instrument_device_id)
attachment = self.RR2.read_attachment(attachment_id, include_content=True)
if not KeywordFlag.CONFIG_SNAPSHOT in attachment.keywords:
raise BadRequest("Attachment '%s' does not seem to be a config snapshot" % attachment_id)
if not 'application/json' == attachment.content_type:
raise BadRequest("Attachment '%s' is not labeled as json")
snapshot = json.loads(attachment.content)
driver_config = snapshot["driver_config"]
instrument_agent_instance_obj.driver_config["comms_config"] = driver_config["comms_config"]
instrument_agent_instance_obj.driver_config["pagent_pid"] = driver_config["pagent_pid"]
self.RR2.update(instrument_agent_instance_obj)
#todo
#agent.set_config(snapshot["running_config"])
#todo
# re-launch agent?
def save_resource_state(self, instrument_device_id='', name=''):
"""
take a snapshot of the current instrument agent instance config for this instrument,
and save it as an attachment
"""
config_builder = InstrumentAgentConfigurationBuilder(self.clients)
instrument_device_obj = self.RR.read(instrument_device_id)
resource_type = type(instrument_device_obj).__name__
if not RT.InstrumentDevice == resource_type:
raise BadRequest("Can only save resource states for %s resources, got %s" %
(RT.InstrumentDevice, resource_type))
inst_agent_instance_obj = self.RR2.find_instrument_agent_instance_of_instrument_device(instrument_device_id)
config_builder.set_agent_instance_object(inst_agent_instance_obj)
agent_config = config_builder.prepare(will_launch=False)
epoch = time.mktime(datetime.now().timetuple())
snapshot_name = name or "Running Config Snapshot %s.js" % epoch
snapshot = {}
snapshot["driver_config"] = agent_config['driver_config']
snapshot["agent_config"] = agent_config
#todo
# Start a resource agent client to talk with the instrument agent.
# self._ia_client = ResourceAgentClient(instrument_device_id,
# to_name=inst_agent_instance_obj.agent_process_id,
# process=FakeProcess())
snapshot["running_config"] = {} #agent.get_config()
#make an attachment for the snapshot
attachment = IonObject(RT.Attachment,
name=snapshot_name,
description="Config snapshot at time %s" % epoch,
content=json.dumps(snapshot),
content_type="application/json", # RFC 4627
keywords=[KeywordFlag.CONFIG_SNAPSHOT],
attachment_type=AttachmentType.ASCII)
# return the attachment id
return self.RR2.create_attachment(instrument_device_id, attachment)
##########################################################################
#
# INSTRUMENT AGENT INSTANCE
#
##########################################################################
def create_instrument_agent_instance(self, instrument_agent_instance=None, instrument_agent_id="", instrument_device_id=""):
"""
create a new instance
@param instrument_agent_instance the object to be created as a resource
@retval instrument_agent_instance_id the id of the new object
@throws BadRequest if the incoming _id field is set
@throws BadReqeust if the incoming name already exists
"""
instrument_agent_instance_id = self.RR2.create(instrument_agent_instance, RT.InstrumentAgentInstance)
if instrument_agent_id:
self.assign_instrument_agent_to_instrument_agent_instance(instrument_agent_id, instrument_agent_instance_id)
if instrument_device_id:
self.assign_instrument_agent_instance_to_instrument_device(instrument_agent_instance_id, instrument_device_id)
log.debug("device %s now connected to instrument agent instance %s (L4-CI-SA-RQ-363)",
str(instrument_device_id), str(instrument_agent_instance_id))
return instrument_agent_instance_id
def update_instrument_agent_instance(self, instrument_agent_instance=None):
"""
update an existing instance
@param instrument_agent_instance the object to be created as a resource
@retval success whether we succeeded
@throws BadRequest if the incoming _id field is not set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.update(instrument_agent_instance, RT.InstrumentAgentInstance)
def read_instrument_agent_instance(self, instrument_agent_instance_id=''):
"""
fetch a resource by ID
@param instrument_agent_instance_id the id of the object to be fetched
@retval InstrumentAgentInstance resource
"""
return self.RR2.read(instrument_agent_instance_id, RT.InstrumentAgentInstance)
def delete_instrument_agent_instance(self, instrument_agent_instance_id=''):
"""
delete a resource, including its history (for less ominous deletion, use retire)
@param instrument_agent_instance_id the id of the object to be deleted
@retval success whether it succeeded
"""
self.RR2.retire(instrument_agent_instance_id, RT.InstrumentAgentInstance)
def force_delete_instrument_agent_instance(self, instrument_agent_instance_id=''):
self.RR2.pluck_delete(instrument_agent_instance_id, RT.InstrumentAgentInstance)
def record_instrument_producer_activation(self, instrument_device_id, instrument_agent_instance_id):
log.debug("update the producer context for provenance")
#todo: should get the time from process dispatcher
producer_obj = self._get_instrument_producer(instrument_device_id)
if OT.InstrumentProducerContext == producer_obj.producer_context.type_:
# reload resource as it has been updated by the launch function
instrument_agent_instance_obj = self.RR2.read(instrument_agent_instance_id)
producer_obj.producer_context.activation_time = IonTime().to_string()
producer_obj.producer_context.configuration = instrument_agent_instance_obj.agent_config
# get the site where this device is currently deploy instrument_device_id
try:
site_id = self.RR2.find_instrument_site_id_by_instrument_device(instrument_device_id)
producer_obj.producer_context.deployed_site_id = site_id
except NotFound:
pass
except:
raise
self.RR2.update(producer_obj)
def start_instrument_agent_instance(self, instrument_agent_instance_id=''):
"""
Agent instance must first be created and associated with a instrument device
Launch the instument agent instance and return the id
"""
instrument_agent_instance_obj = self.read_instrument_agent_instance(instrument_agent_instance_id)
# launch the port agent before verifying anything.
# if agent instance doesn't validate, port agent won't care and will be available for when it does validate
# if no comms_config specified in the driver config then we need to start a port agent
if not 'comms_config' in instrument_agent_instance_obj.driver_config:
log.info("IMS:start_instrument_agent_instance no comms_config specified in the driver_config so call _start_port_agent")
instrument_agent_instance_obj = self._start_port_agent(instrument_agent_instance_obj) # <-- this updates agent instance obj!
# if the comms_config host addr in the driver config is localhost
elif 'addr' in instrument_agent_instance_obj.driver_config.get('comms_config') and\
instrument_agent_instance_obj.driver_config['comms_config']['addr'] == 'localhost':
log.info("IMS:start_instrument_agent_instance comms_config host addr in the driver_config is localhost so call _start_port_agent")
instrument_agent_instance_obj = self._start_port_agent(instrument_agent_instance_obj) # <-- this updates agent instance obj!
config_builder = InstrumentAgentConfigurationBuilder(self.clients)
launcher = AgentLauncher(self.clients.process_dispatcher)
try:
config_builder.set_agent_instance_object(instrument_agent_instance_obj)
config = config_builder.prepare()
except:
self._stop_port_agent(instrument_agent_instance_obj.port_agent_config)
raise
process_id = launcher.launch(config, config_builder._get_process_definition()._id)
config_builder.record_launch_parameters(config, process_id)
self.record_instrument_producer_activation(config_builder._get_device()._id, instrument_agent_instance_id)
launcher.await_launch(self._agent_launch_timeout("start_instrument_agent_instance"))
return process_id
def _agent_launch_timeout(self, fn_name):
# some hopefully intelligent buffers on timeout.
#
# we expect to have at least 20 seconds to launch the agent.
# the agent needs around 6 seconds to launch, currently. pad that out to 16 seconds.
#
# use a 1-second buffer to guarantee that we time out the launch and not the function call
# let the buffer be longer (up to 5 seconds) if we have time.
# if the buffer is calculated to be short, warn.
remaining_time_s = self._remaining_reply_time_s(fn_name)
minbuffer = 1
maxbuffer = 5
launchtime = 16
buffer = max(minbuffer, min(maxbuffer, remaining_time_s - launchtime))
log.debug("Agent launch buffer time is %s", buffer)
if buffer == minbuffer:
log_fn = log.warn
else:
log_fn = log.info
log_fn("Allowing (%s - %s) seconds for agent launch in %s", remaining_time_s, buffer, fn_name)
return remaining_time_s - buffer
def _remaining_reply_time_s(self, fn_name):
ret = int(self._remaining_reply_time_ms(fn_name) / 1000)
return ret
def _remaining_reply_time_ms(self, fn_name):
"""
look into the request headers to find out how many milliseconds are left before the call will time out
@param fn_name the name of the RPC function that will be
"""
out = {}
reply_by = None
# there may be multiple listeners, so iterate through them all
for i, listener in enumerate(self._process.listeners):
ctx = listener._process.get_context()
out[i] = ctx
# make sure the op matches our function name
if "op" in ctx and fn_name == ctx["op"]:
# look for the reply-by field
if "reply-by" in ctx:
# convert to int and only allow it if it's nonzero
reply_by_val = int(ctx["reply-by"])
if 0 < reply_by_val:
reply_by = reply_by_val
if None is reply_by:
raise BadRequest("Could not find reply-by for %s in these listener contexts: %s" % (fn_name, out))
now = int(get_ion_ts())
return reply_by - now
def _start_port_agent(self, instrument_agent_instance_obj=None):
"""
Construct and start the port agent, ONLY NEEDED FOR INSTRUMENT AGENTS.
"""
_port_agent_config = instrument_agent_instance_obj.port_agent_config
#todo: ask bill if this blocks
# It blocks until the port agent starts up or a timeout
log.info("IMS:_start_pagent calling PortAgentProcess.launch_process ")
_pagent = PortAgentProcess.launch_process(_port_agent_config, test_mode = True)
pid = _pagent.get_pid()
port = _pagent.get_data_port()
cmd_port = _pagent.get_command_port()
log.info("IMS:_start_pagent returned from PortAgentProcess.launch_process pid: %s ", pid)
# Hack to get ready for DEMO. Further though needs to be put int
# how we pass this config info around.
host = 'localhost'
driver_config = instrument_agent_instance_obj.driver_config
comms_config = driver_config.get('comms_config')
if comms_config:
host = comms_config.get('addr')
else:
log.warn("No comms_config specified, using '%s'" % host)
# Configure driver to use port agent port number.
instrument_agent_instance_obj.driver_config['comms_config'] = {
'addr' : host,
'cmd_port' : cmd_port,
'port' : port
}
instrument_agent_instance_obj.driver_config['pagent_pid'] = pid
self.update_instrument_agent_instance(instrument_agent_instance_obj)
return self.read_instrument_agent_instance(instrument_agent_instance_obj._id)
def _stop_port_agent(self, port_agent_config):
log.debug("Stopping port agent")
try:
_port_agent_config = port_agent_config
process = PortAgentProcess.get_process(_port_agent_config, test_mode=True)
process.stop()
except NotFound:
log.debug("No port agent process found")
pass
except Exception as e:
raise e
else:
log.debug("Success stopping port agent")
def stop_instrument_agent_instance(self, instrument_agent_instance_id=''):
"""
Deactivate the instrument agent instance
"""
instance_obj, device_id = self.stop_agent_instance(instrument_agent_instance_id, RT.InstrumentDevice)
self._stop_port_agent(instance_obj.port_agent_config)
#update the producer context for provenance
producer_obj = self._get_instrument_producer(device_id)
if producer_obj.producer_context.type_ == OT.InstrumentProducerContext :
producer_obj.producer_context.deactivation_time = IonTime().to_string()
self.RR2.update(producer_obj)
def stop_agent_instance(self, agent_instance_id, device_type):
"""
Deactivate an agent instance, return device ID
"""
agent_instance_obj = self.RR2.read(agent_instance_id)
device_id = self.RR2.find_subject(subject_type=device_type,
predicate=PRED.hasAgentInstance,
object=agent_instance_id,
id_only=True)
log.debug("Canceling the execution of agent's process ID")
if None is agent_instance_obj.agent_process_id:
raise BadRequest("Agent Instance '%s' does not have an agent_process_id. Stopped already?"
% agent_instance_id)
try:
self.clients.process_dispatcher.cancel_process(process_id=agent_instance_obj.agent_process_id)
except NotFound:
log.debug("No agent process found")
pass
except Exception as e:
raise e
else:
log.debug("Success cancelling agent process")
#reset the process ids.
agent_instance_obj.agent_process_id = None
if "pagent_pid" in agent_instance_obj.driver_config:
agent_instance_obj.driver_config['pagent_pid'] = None
self.RR2.update(agent_instance_obj)
return agent_instance_obj, device_id
def _get_instrument_producer(self, instrument_device_id=""):
producer_objs, _ = self.clients.resource_registry.find_objects(subject=instrument_device_id,
predicate=PRED.hasDataProducer,
object_type=RT.DataProducer,
id_only=False)
if not producer_objs:
raise NotFound("No Producers created for this Instrument Device " + str(instrument_device_id))
return producer_objs[0]
##########################################################################
#
# INSTRUMENT AGENT
#
##########################################################################
def create_instrument_agent(self, instrument_agent=None):
"""
create a new instance
@param instrument_agent the object to be created as a resource
@retval instrument_agent_id the id of the new object
@throws BadRequest if the incoming _id field is set
@throws BadReqeust if the incoming name already exists
"""
instrument_agent_id = self.RR2.create(instrument_agent, RT.InstrumentAgent)
# Create the process definition to launch the agent
process_definition = ProcessDefinition()
process_definition.executable['module']='ion.agents.instrument.instrument_agent'
process_definition.executable['class'] = 'InstrumentAgent'
pd = self.clients.process_dispatcher
process_definition_id = pd.create_process_definition(process_definition=process_definition)
#associate the agent and the process def
self.RR2.assign_process_definition_to_instrument_agent(process_definition_id, instrument_agent_id)
return instrument_agent_id
def update_instrument_agent(self, instrument_agent=None):
"""
update an existing instance
@param instrument_agent the object to be created as a resource
@retval success whether we succeeded
@throws BadRequest if the incoming _id field is not set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.update(instrument_agent, RT.InstrumentAgent)
def read_instrument_agent(self, instrument_agent_id=''):
"""
fetch a resource by ID
@param instrument_agent_id the id of the object to be fetched
@retval InstrumentAgent resource
"""
return self.RR2.read(instrument_agent_id, RT.InstrumentAgent)
def delete_instrument_agent(self, instrument_agent_id=''):
"""
delete a resource, including its history (for less ominous deletion, use retire)
@param instrument_agent_id the id of the object to be deleted
@retval success whether it succeeded
"""
#retrieve the associated process definition
self.RR2.retire(instrument_agent_id, RT.InstrumentAgent)
def force_delete_instrument_agent(self, instrument_agent_id=''):
process_def_objs = self.RR2.find_process_definitions_of_instrument_agent(instrument_agent_id)
for pd_obj in process_def_objs:
self.RR2.unassign_process_definition_from_instrument_agent(pd_obj._id, instrument_agent_id)
self.clients.process_dispatcher.delete_process_definition(pd_obj._id)
self.RR2.pluck_delete(instrument_agent_id, RT.InstrumentAgent)
def register_instrument_agent(self, instrument_agent_id='', agent_egg='', qa_documents=''):
"""
register an instrument driver by putting it in a web-accessible location
@instrument_agent_id the agent receiving the driver
@agent_egg a base64-encoded egg file
@qa_documents a base64-encoded zip file containing a MANIFEST.csv file
MANIFEST.csv fields:
- filename
- name
- description
- content_type
- keywords
"""
# retrieve the resource
self.read_instrument_agent(instrument_agent_id)
qa_doc_parser = QADocParser()
#process the input files (base64-encoded qa documents)
qa_parse_result, err = qa_doc_parser.prepare(qa_documents)
if not qa_parse_result:
raise BadRequest("Processing qa_documents file failed: %s" % err)
#process the input files (base64-encoded egg)
uploader_obj, err = self.module_uploader.prepare(agent_egg)
if None is uploader_obj:
raise BadRequest("Egg failed validation: %s" % err)
attachments, err = qa_doc_parser.convert_to_attachments()
if None is attachments:
raise BadRequest("QA Docs processing failed: %s" % err)
# actually upload
up_success, err = uploader_obj.upload()
if not up_success:
raise BadRequest("Upload failed: %s" % err)
#now we can do the ION side of things
#make an attachment for the url
attachments.append(IonObject(RT.Attachment,
name=uploader_obj.get_egg_urlfile_name(),
description="url to egg",
content="[InternetShortcut]\nURL=%s" % uploader_obj.get_destination_url(),
content_type="text/url",
keywords=[KeywordFlag.EGG_URL],
attachment_type=AttachmentType.ASCII))
#insert all attachments
for att in attachments:
self.RR2.create_attachment(instrument_agent_id, att)
#updates the state of this InstAgent to integrated
self.RR2.advance_lcs(instrument_agent_id, LCE.INTEGRATE)
##########################################################################
#
# INSTRUMENT MODEL
#
##########################################################################
def create_instrument_model(self, instrument_model=None):
"""
create a new instance
@param instrument_model the object to be created as a resource
@retval instrument_model_id the id of the new object
@throws BadRequest if the incoming _id field is set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.create(instrument_model, RT.InstrumentModel)
def update_instrument_model(self, instrument_model=None):
"""
update an existing instance
@param instrument_model the object to be created as a resource
@retval success whether we succeeded
@throws BadRequest if the incoming _id field is not set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.update(instrument_model, RT.InstrumentModel)
def read_instrument_model(self, instrument_model_id=''):
"""
fetch a resource by ID
@param instrument_model_id the id of the object to be fetched
@retval InstrumentModel resource
"""
return self.RR2.read(instrument_model_id, RT.InstrumentModel)
def delete_instrument_model(self, instrument_model_id=''):
"""
delete a resource, including its history (for less ominous deletion, use retire)
@param instrument_model_id the id of the object to be deleted
@retval success whether it succeeded
"""
self.RR2.retire(instrument_model_id, RT.InstrumentModel)
def force_delete_instrument_model(self, instrument_model_id=''):
self.RR2.pluck_delete(instrument_model_id, RT.InstrumentModel)
##########################################################################
#
# PHYSICAL INSTRUMENT
#
##########################################################################
def create_instrument_device(self, instrument_device=None):
"""
create a new instance
@param instrument_device the object to be created as a resource
@retval instrument_device_id the id of the new object
@throws BadRequest if the incoming _id field is set
@throws BadReqeust if the incoming name already exists
"""
instrument_device_id = self.RR2.create(instrument_device, RT.InstrumentDevice)
#register the instrument as a data producer
self.DAMS.register_instrument(instrument_device_id)
return instrument_device_id
def update_instrument_device(self, instrument_device=None):
"""
update an existing instance
@param instrument_device the object to be created as a resource
@retval success whether we succeeded
@throws BadRequest if the incoming _id field is not set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.update(instrument_device, RT.InstrumentDevice)
def read_instrument_device(self, instrument_device_id=''):
"""
fetch a resource by ID
@param instrument_device_id the id of the object to be fetched
@retval InstrumentDevice resource
"""
return self.RR2.read(instrument_device_id, RT.InstrumentDevice)
def delete_instrument_device(self, instrument_device_id=''):
"""
delete a resource, including its history (for less ominous deletion, use retire)
@param instrument_device_id the id of the object to be deleted
@retval success whether it succeeded
"""
self.RR2.retire(instrument_device_id, RT.InstrumentDevice)
def force_delete_instrument_device(self, instrument_device_id=''):
self.RR2.pluck_delete(instrument_device_id, RT.InstrumentDevice)
##
##
## PRECONDITION FUNCTIONS
##
##
def check_direct_access_policy(self, msg, headers):
try:
gov_values = GovernanceHeaderValues(headers)
except Inconsistent, ex:
return False, ex.message
#The system actor can to anything
if is_system_actor(gov_values.actor_id):
return True, ''
#TODO - this shared commitment might not be with the right Org - may have to relook at how this is working.
if not has_exclusive_resource_commitment(gov_values.actor_id, gov_values.resource_id):
return False, '%s(%s) has been denied since the user %s has not acquired the resource exclusively' % (self.name, gov_values.op, gov_values.actor_id)
return True, ''
def check_device_lifecycle_policy(self, msg, headers):
try:
gov_values = GovernanceHeaderValues(headers)
except Inconsistent, ex:
return False, ex.message
#The system actor can to anything
if is_system_actor(gov_values.actor_id):
return True, ''
if msg.has_key('lifecycle_event'):
lifecycle_event = msg['lifecycle_event']
else:
raise Inconsistent('%s(%s) has been denied since the lifecycle_event can not be found in the message'% (self.name, gov_values.op))
orgs,_ = self.clients.resource_registry.find_subjects(RT.Org, PRED.hasResource, gov_values.resource_id)
if not orgs:
return False, '%s(%s) has been denied since the resource id %s has not been shared with any Orgs' % (self.name, gov_values.op, gov_values.resource_id)
#Handle these lifecycle transitions first
if lifecycle_event == LCE.INTEGRATE or lifecycle_event == LCE.DEPLOY or lifecycle_event == LCE.RETIRE:
#Check across Orgs which have shared this device for role which as proper level to allow lifecycle transition
for org in orgs:
if has_org_role(gov_values.actor_roles, org.org_governance_name, [OBSERVATORY_OPERATOR_ROLE,ORG_MANAGER_ROLE]):
return True, ''
else:
#The owner can do any of these other lifecycle transitions
is_owner = is_resource_owner(gov_values.actor_id, gov_values.resource_id)
if is_owner:
return True, ''
#TODO - this shared commitment might not be with the right Org - may have to relook at how this is working.
is_shared = has_shared_resource_commitment(gov_values.actor_id, gov_values.resource_id)
#Check across Orgs which have shared this device for role which as proper level to allow lifecycle transition
for org in orgs:
if has_org_role(gov_values.actor_roles, org.org_governance_name, [INSTRUMENT_OPERATOR_ROLE, OBSERVATORY_OPERATOR_ROLE,ORG_MANAGER_ROLE] ) and is_shared:
return True, ''
return False, '%s(%s) has been denied since the user %s has not acquired the resource or is not the proper role for this transition: %s' % (self.name, gov_values.op, gov_values.actor_id, lifecycle_event)
##
##
## DIRECT ACCESS
##
##
def request_direct_access(self, instrument_device_id=''):
"""
"""
# determine whether id is for physical or logical instrument
# look up instrument if not
# Validate request; current instrument state, policy, and other
# Retrieve and save current instrument settings
# Request DA channel, save reference
# Return direct access channel
raise NotImplementedError()
pass
def stop_direct_access(self, instrument_device_id=''):
"""
"""
# Return Value
# ------------
# {success: true}
#
raise NotImplementedError()
pass
##########################################################################
#
# PLATFORM AGENT INSTANCE
#
##########################################################################
def create_platform_agent_instance(self, platform_agent_instance=None, platform_agent_id="", platform_device_id=""):
"""
create a new instance
@param platform_agent_instance the object to be created as a resource
@retval platform_agent_instance_id the id of the new object
@throws BadRequest if the incoming _id field is set
@throws BadReqeust if the incoming name already exists
"""
platform_agent_instance_id = self.RR2.create(platform_agent_instance, RT.PlatformAgentInstance)
if platform_agent_id:
self.assign_platform_agent_to_platform_agent_instance(platform_agent_id, platform_agent_instance_id)
if platform_device_id:
self.assign_platform_agent_instance_to_platform_device(platform_agent_instance_id, platform_device_id)
return platform_agent_instance_id
def update_platform_agent_instance(self, platform_agent_instance=None):
"""
update an existing instance
@param platform_agent_instance the object to be created as a resource
@retval success whether we succeeded
@throws BadRequest if the incoming _id field is not set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.update(platform_agent_instance, RT.PlatformAgentInstance)
def read_platform_agent_instance(self, platform_agent_instance_id=''):
"""
fetch a resource by ID
@param platform_agent_instance_id the id of the object to be fetched
@retval PlatformAgentInstance resource
"""
return self.RR2.read(platform_agent_instance_id, RT.PlatformAgentInstance)
def delete_platform_agent_instance(self, platform_agent_instance_id=''):
"""
delete a resource, including its history (for less ominous deletion, use retire)
@param platform_agent_instance_id the id of the object to be deleted
@retval success whether it succeeded
"""
self.RR2.retire(platform_agent_instance_id, RT.PlatformAgentInstance)
def force_delete_platform_agent_instance(self, platform_agent_instance_id=''):
self.RR2.pluck_delete(platform_agent_instance_id, RT.PlatformAgentInstance)
# def _get_child_platforms(self, platform_device_id):
# """ recursively trace hasDevice relationships, return list of all PlatformDevice objects
# TODO: how to get platform ID from platform device?
# """
# children = [] # find by hasDevice relationship
# out = children[:]
# for obj in children:
# descendents = self._get_child_platforms(obj._id)
# out[0:] = descendents
# return out
def start_platform_agent_instance(self, platform_agent_instance_id=''):
"""
Agent instance must first be created and associated with a platform device
Launch the platform agent instance and return the id
"""
configuration_builder = PlatformAgentConfigurationBuilder(self.clients)
launcher = AgentLauncher(self.clients.process_dispatcher)
platform_agent_instance_obj = self.read_platform_agent_instance(platform_agent_instance_id)
configuration_builder.set_agent_instance_object(platform_agent_instance_obj)
config = configuration_builder.prepare()
platform_device_obj = configuration_builder._get_device()
log.debug("start_platform_agent_instance: device is %s connected to platform agent instance %s (L4-CI-SA-RQ-363)",
str(platform_device_obj._id), str(platform_agent_instance_id))
#retrive the stream info for this model
#todo: add stream info to the platform model create
# streams_dict = platform_model_obj.custom_attributes['streams']
# if not streams_dict:
# raise BadRequest("Device model does not contain stream configuation used in launching the agent. Model: '%s", str(platform_models_objs[0]) )
process_id = launcher.launch(config, configuration_builder._get_process_definition()._id)
configuration_builder.record_launch_parameters(config, process_id)
launcher.await_launch(self._agent_launch_timeout("start_platform_agent_instance"))
return process_id
def stop_platform_agent_instance(self, platform_agent_instance_id=''):
"""
Deactivate the platform agent instance
"""
self.stop_agent_instance(platform_agent_instance_id, RT.PlatformDevice)
##########################################################################
#
# PLATFORM AGENT
#
##########################################################################
def create_platform_agent(self, platform_agent=None):
"""
create a new instance
@param platform_agent the object to be created as a resource
@retval platform_agent_id the id of the new object
@throws BadRequest if the incoming _id field is set
@throws BadReqeust if the incoming name already exists
"""
platform_agent_id = self.RR2.create(platform_agent, RT.PlatformAgent)
# Create the process definition to launch the agent
process_definition = ProcessDefinition()
process_definition.executable['module']='ion.agents.platform.platform_agent'
process_definition.executable['class'] = 'PlatformAgent'
pd = self.clients.process_dispatcher
process_definition_id = pd.create_process_definition(process_definition=process_definition)
#associate the agent and the process def
self.RR2.assign_process_definition_to_platform_agent(process_definition_id, platform_agent_id)
return platform_agent_id
def update_platform_agent(self, platform_agent=None):
"""
update an existing instance
@param platform_agent the object to be created as a resource
@retval success whether we succeeded
@throws BadRequest if the incoming _id field is not set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.update(platform_agent, RT.PlatformAgent)
def read_platform_agent(self, platform_agent_id=''):
"""
fetch a resource by ID
@param platform_agent_id the id of the object to be fetched
@retval PlatformAgent resource
"""
return self.RR2.read(platform_agent_id, RT.PlatformAgent)
def delete_platform_agent(self, platform_agent_id=''):
"""
delete a resource, including its history (for less ominous deletion, use retire)
@param platform_agent_id the id of the object to be deleted
@retval success whether it succeeded
"""
self.RR2.retire(platform_agent_id, RT.PlatformAgent)
def force_delete_platform_agent(self, platform_agent_id=''):
self.RR2.pluck_delete(platform_agent_id, RT.PlatformAgent)
##########################################################################
#
# PLATFORM MODEL
#
##########################################################################
def create_platform_model(self, platform_model=None):
"""
create a new instance
@param platform_model the object to be created as a resource
@retval platform_model_id the id of the new object
@throws BadRequest if the incoming _id field is set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.create(platform_model, RT.PlatformModel)
def update_platform_model(self, platform_model=None):
"""
update an existing instance
@param platform_model the object to be created as a resource
@retval success whether we succeeded
@throws BadRequest if the incoming _id field is not set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.update(platform_model, RT.PlatformModel)
def read_platform_model(self, platform_model_id=''):
"""
fetch a resource by ID
@param platform_model_id the id of the object to be fetched
@retval PlatformModel resource
"""
return self.RR2.read(platform_model_id, RT.PlatformModel)
def delete_platform_model(self, platform_model_id=''):
"""
delete a resource, including its history (for less ominous deletion, use retire)
@param platform_model_id the id of the object to be deleted
@retval success whether it succeeded
"""
self.RR2.retire(platform_model_id, RT.PlatformModel)
def force_delete_platform_model(self, platform_model_id=''):
self.RR2.pluck_delete(platform_model_id, RT.PlatformModel)
##########################################################################
#
# PHYSICAL PLATFORM
#
##########################################################################
def create_platform_device(self, platform_device=None):
"""
create a new instance
@param platform_device the object to be created as a resource
@retval platform_device_id the id of the new object
@throws BadRequest if the incoming _id field is set
@throws BadReqeust if the incoming name already exists
"""
platform_device_id = self.RR2.create(platform_device, RT.PlatformDevice)
#register the platform as a data producer
self.DAMS.register_instrument(platform_device_id)
return platform_device_id
def update_platform_device(self, platform_device=None):
"""
update an existing instance
@param platform_device the object to be created as a resource
@retval success whether we succeeded
@throws BadRequest if the incoming _id field is not set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.update(platform_device, RT.PlatformDevice)
def read_platform_device(self, platform_device_id=''):
"""
fetch a resource by ID
@param platform_device_id the id of the object to be fetched
@retval PlatformDevice resource
"""
return self.RR2.read(platform_device_id, RT.PlatformDevice)
def delete_platform_device(self, platform_device_id=''):
"""
delete a resource, including its history (for less ominous deletion, use retire)
@param platform_device_id the id of the object to be deleted
@retval success whether it succeeded
"""
self.RR2.retire(platform_device_id, RT.PlatformDevice)
def force_delete_platform_device(self, platform_device_id=''):
self.RR2.pluck_delete(platform_device_id, RT.PlatformDevice)
##########################################################################
#
# SENSOR MODEL
#
##########################################################################
def create_sensor_model(self, sensor_model=None):
"""
create a new instance
@param sensor_model the object to be created as a resource
@retval sensor_model_id the id of the new object
@throws BadRequest if the incoming _id field is set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.create(sensor_model, RT.SensorModel)
def update_sensor_model(self, sensor_model=None):
"""
update an existing instance
@param sensor_model the object to be created as a resource
@retval success whether we succeeded
@throws BadRequest if the incoming _id field is not set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.update(sensor_model, RT.SensorModel)
def read_sensor_model(self, sensor_model_id=''):
"""
fetch a resource by ID
@param sensor_model_id the id of the object to be fetched
@retval SensorModel resource
"""
return self.RR2.read(sensor_model_id, RT.SensorModel)
def delete_sensor_model(self, sensor_model_id=''):
"""
delete a resource, including its history (for less ominous deletion, use retire)
@param sensor_model_id the id of the object to be deleted
@retval success whether it succeeded
"""
self.RR2.retire(sensor_model_id, RT.SensorModel)
def force_delete_sensor_model(self, sensor_model_id=''):
self.RR2.pluck_delete(sensor_model_id, RT.SensorModel)
##########################################################################
#
# PHYSICAL SENSOR
#
##########################################################################
def create_sensor_device(self, sensor_device=None):
"""
create a new instance
@param sensor_device the object to be created as a resource
@retval sensor_device_id the id of the new object
@throws BadRequest if the incoming _id field is set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.create(sensor_device, RT.SensorDevice)
def update_sensor_device(self, sensor_device=None):
"""
update an existing instance
@param sensor_device the object to be created as a resource
@retval success whether we succeeded
@throws BadRequest if the incoming _id field is not set
@throws BadReqeust if the incoming name already exists
"""
return self.RR2.update(sensor_device, RT.SensorDevice)
def read_sensor_device(self, sensor_device_id=''):
"""
fetch a resource by ID
@param sensor_device_id the id of the object to be fetched
@retval SensorDevice resource
"""
return self.RR2.read(sensor_device_id, RT.SensorDevice)
def delete_sensor_device(self, sensor_device_id=''):
"""
delete a resource, including its history (for less ominous deletion, use retire)
@param sensor_device_id the id of the object to be deleted
@retval success whether it succeeded
"""
self.RR2.retire(sensor_device_id, RT.SensorDevice)
def force_delete_sensor_device(self, sensor_device_id=''):
self.RR2.pluck_delete(sensor_device_id, RT.SensorDevice)
##########################################################################
#
# ASSOCIATIONS
#
##########################################################################
def assign_instrument_model_to_instrument_device(self, instrument_model_id='', instrument_device_id=''):
instrument_model_obj = self.RR2.read(instrument_model_id)
instrument_device_obj = self.RR2.read(instrument_device_id)
for k, v in instrument_device_obj.custom_attributes.iteritems():
if not k in instrument_model_obj.custom_attributes:
err_msg = ("InstrumentDevice '%s' contains custom attribute '%s' (value '%s'), but this attribute"
+ " is not defined by associated InstrumentModel '%s'") % (instrument_device_id,
k, v,
instrument_model_id)
#raise BadRequest(err_msg)
log.warn(err_msg)
self.RR2.assign_one_instrument_model_to_instrument_device(instrument_model_id, instrument_device_id)
def unassign_instrument_model_from_instrument_device(self, instrument_model_id='', instrument_device_id=''):
self.RR2.unassign_instrument_model_from_instrument_device(instrument_model_id, instrument_device_id)
def assign_instrument_model_to_instrument_agent(self, instrument_model_id='', instrument_agent_id=''):
self.RR2.assign_instrument_model_to_instrument_agent(instrument_model_id, instrument_agent_id)
def unassign_instrument_model_from_instrument_agent(self, instrument_model_id='', instrument_agent_id=''):
self.RR2.unassign_instrument_model_from_instrument_agent(instrument_agent_id, instrument_model_id)
def assign_platform_model_to_platform_agent(self, platform_model_id='', platform_agent_id=''):
self.RR2.assign_platform_model_to_platform_agent(platform_model_id, platform_agent_id)
def unassign_platform_model_from_platform_agent(self, platform_model_id='', platform_agent_id=''):
self.RR2.unassign_platform_model_from_platform_agent(platform_model_id, platform_agent_id)
def assign_sensor_model_to_sensor_device(self, sensor_model_id='', sensor_device_id=''):
self.RR2.assign_one_sensor_model_to_sensor_device(sensor_model_id, sensor_device_id)
def unassign_sensor_model_from_sensor_device(self, sensor_model_id='', sensor_device_id=''):
self.RR2.unassign_sensor_model_from_sensor_device(self, sensor_model_id, sensor_device_id)
def assign_platform_model_to_platform_device(self, platform_model_id='', platform_device_id=''):
self.RR2.assign_one_platform_model_to_platform_device(platform_model_id, platform_device_id)
def unassign_platform_model_from_platform_device(self, platform_model_id='', platform_device_id=''):
self.RR2.unassign_platform_model_from_platform_device(platform_model_id, platform_device_id)
def assign_instrument_device_to_platform_device(self, instrument_device_id='', platform_device_id=''):
self.RR2.assign_instrument_device_to_one_platform_device(instrument_device_id, platform_device_id)
def unassign_instrument_device_from_platform_device(self, instrument_device_id='', platform_device_id=''):
self.RR2.unassign_instrument_device_from_platform_device(instrument_device_id, platform_device_id)
def assign_platform_device_to_platform_device(self, child_platform_device_id='', platform_device_id=''):
self.RR2.assign_platform_device_to_one_platform_device(child_platform_device_id, platform_device_id)
def unassign_platform_device_from_platform_device(self, child_platform_device_id='', platform_device_id=''):
self.RR2.unassign_platform_device_from_platform_device(child_platform_device_id, platform_device_id)
def assign_platform_agent_to_platform_agent_instance(self, platform_agent_id='', platform_agent_instance_id=''):
self.RR2.assign_one_platform_agent_to_platform_agent_instance(platform_agent_id, platform_agent_instance_id)
def unassign_platform_agent_from_platform_agent_instance(self, platform_agent_id='', platform_agent_instance_id=''):
self.RR2.unassign_platform_agent_from_platform_agent_instance(platform_agent_id, platform_agent_instance_id)
def assign_instrument_agent_to_instrument_agent_instance(self, instrument_agent_id='', instrument_agent_instance_id=''):
self.RR2.assign_one_instrument_agent_to_instrument_agent_instance(instrument_agent_id, instrument_agent_instance_id)
def unassign_instrument_agent_from_instrument_agent_instance(self, instrument_agent_id='', instrument_agent_instance_id=''):
self.RR2.unassign_instrument_agent_from_instrument_agent_instance(instrument_agent_id, instrument_agent_instance_id)
def assign_instrument_agent_instance_to_instrument_device(self, instrument_agent_instance_id='', instrument_device_id=''):
self.RR2.assign_one_instrument_agent_instance_to_instrument_device(instrument_agent_instance_id, instrument_device_id)
def unassign_instrument_agent_instance_from_instrument_device(self, instrument_agent_instance_id='', instrument_device_id=''):
self.RR2.unassign_instrument_agent_instance_from_instrument_device(instrument_agent_instance_id, instrument_device_id)
def assign_platform_agent_instance_to_platform_device(self, platform_agent_instance_id='', platform_device_id=''):
self.RR2.assign_one_platform_agent_instance_to_platform_device(platform_agent_instance_id, platform_device_id)
def unassign_platform_agent_instance_from_platform_device(self, platform_agent_instance_id='', platform_device_id=''):
self.RR2.unassign_platform_agent_instance_from_platform_device(platform_agent_instance_id, platform_device_id)
def assign_sensor_device_to_instrument_device(self, sensor_device_id='', instrument_device_id=''):
self.RR2.assign_sensor_device_to_one_instrument_device(sensor_device_id, instrument_device_id)
def unassign_sensor_device_from_instrument_device(self, sensor_device_id='', instrument_device_id=''):
self.RR2.unassign_sensor_device_from_instrument_device(sensor_device_id, instrument_device_id)
##########################################################################
#
# DEPLOYMENTS
#
##########################################################################
def deploy_instrument_device(self, instrument_device_id='', deployment_id=''):
#def link_deployment(self, instrument_device_id='', deployment_id=''):
# # make sure that only 1 site-device-deployment triangle exists at one time
# sites, _ = self.RR.find_subjects(RT.InstrumentSite, PRED.hasDevice, instrument_device_id, False)
# if 1 < len(sites):
# raise Inconsistent("Device is assigned to more than one site")
# if 1 == len(sites):
# site_deployments = self._find_stemming(sites[0]._id, PRED.hasDeployment, RT.Deployment)
# if 1 < len(site_deployments):
# raise Inconsistent("Site has more than one deployment")
# if 1 == len(site_deployments):
# if site_deployments[0]._id != deployment_id:
# raise BadRequest("Site to which this device is assigned has a different deployment")
#
# for dev in self._find_stemming(sites[0]._id, PRED.hasDevice, RT.InstrumentDevice):
# if 0 < len(self._find_stemming(dev, PRED.hasDeployment, RT.Deployment)):
# raise BadRequest("Site already has a device with a deployment")
#
# return self._link_resources_single_object(instrument_device_id, PRED.hasDeployment, deployment_id)
self.RR2.assign_deployment_to_instrument_device(deployment_id, instrument_device_id)
def undeploy_instrument_device(self, instrument_device_id='', deployment_id=''):
self.RR2.unassign_deployment_from_instrument_device(deployment_id, instrument_device_id)
def deploy_platform_device(self, platform_device_id='', deployment_id=''):
#def link_deployment(self, platform_device_id='', deployment_id=''):
# # make sure that only 1 site-device-deployment triangle exists at one time
# sites, _ = self.RR.find_subjects(RT.PlatformSite, PRED.hasDevice, platform_device_id, False)
# if 1 < len(sites):
# raise Inconsistent("Device is assigned to more than one site")
# if 1 == len(sites):
# site_deployments = self._find_stemming(sites[0]._id, PRED.hasDeployment, RT.Deployment)
# if 1 < len(site_deployments):
# raise Inconsistent("Site has more than one deployment")
# if 1 == len(site_deployments):
# if site_deployments[0]._id != deployment_id:
# raise BadRequest("Site to which this device is assigned has a different deployment")
#
# for dev in self._find_stemming(sites[0]._id, PRED.hasDevice, RT.PlatformDevice):
# if 0 < len(self._find_stemming(dev, PRED.hasDeployment, RT.Deployment)):
# raise BadRequest("Site already has a device with a deployment")
#
# return self._link_resources_single_object(platform_device_id, PRED.hasDeployment, deployment_id)
self.RR2.assign_deployment_to_platform_device(deployment_id, platform_device_id)
def undeploy_platform_device(self, platform_device_id='', deployment_id=''):
self.RR2.unassign_deployent_from_platform_device(deployment_id, platform_device_id)
############################
#
# ASSOCIATION FIND METHODS
#
############################
def find_instrument_model_by_instrument_device(self, instrument_device_id=''):
return self.RR2.find_instrument_models_of_instrument_device(instrument_device_id)
def find_instrument_device_by_instrument_model(self, instrument_model_id=''):
return self.RR2.find_instrument_devices_by_instrument_model(instrument_model_id)
def find_platform_model_by_platform_device(self, platform_device_id=''):
return self.RR2.find_platform_models_of_platform_device(platform_device_id)
def find_platform_device_by_platform_model(self, platform_model_id=''):
return self.RR2.find_platform_devices_by_platform_model(platform_model_id)
def find_instrument_model_by_instrument_agent(self, instrument_agent_id=''):
return self.RR2.find_instrument_models_of_instrument_agent(instrument_agent_id)
def find_instrument_agent_by_instrument_model(self, instrument_model_id=''):
return self.RR2.find_instrument_agents_by_instrument_model(instrument_model_id)
def find_instrument_device_by_instrument_agent_instance(self, instrument_agent_instance_id=''):
return self.RR2.find_instrument_devices_by_instrument_agent_instance(instrument_agent_instance_id)
def find_instrument_agent_instance_by_instrument_device(self, instrument_device_id=''):
instrument_agent_instance_objs = self.RR2.find_instrument_agent_instances_of_instrument_device(instrument_device_id)
if 0 < len(instrument_agent_instance_objs):
log.debug("L4-CI-SA-RQ-363: device %s is connected to instrument agent instance %s",
str(instrument_device_id),
str(instrument_agent_instance_objs[0]._id))
return instrument_agent_instance_objs
def find_instrument_device_by_platform_device(self, platform_device_id=''):
return self.RR2.find_instrument_devices_of_platform_device(platform_device_id)
def find_platform_device_by_instrument_device(self, instrument_device_id=''):
return self.RR2.find_platform_devices_by_instrument_device(instrument_device_id)
def find_instrument_device_by_logical_instrument(self, logical_instrument_id=''):
raise NotImplementedError("TODO: this function will be removed")
def find_logical_instrument_by_instrument_device(self, instrument_device_id=''):
raise NotImplementedError("TODO: this function will be removed")
def find_platform_device_by_logical_platform(self, logical_platform_id=''):
raise NotImplementedError("TODO: this function will be removed")
def find_logical_platform_by_platform_device(self, platform_device_id=''):
raise NotImplementedError("TODO: this function will be removed")
def find_data_product_by_instrument_device(self, instrument_device_id=''):
raise NotImplementedError("TODO: this function will be removed")
def find_instrument_device_by_data_product(self, data_product_id=''):
raise NotImplementedError("TODO: this function will be removed")
############################
#
# SPECIALIZED FIND METHODS
#
############################
def find_data_product_by_platform_device(self, platform_device_id=''):
ret = []
for i in self.find_instrument_device_by_platform_device(platform_device_id):
data_products = self.find_data_product_by_instrument_device(i)
for d in data_products:
if not d in ret:
ret.append(d)
return ret
############################
#
# LIFECYCLE TRANSITIONS
#
############################
def execute_instrument_agent_lifecycle(self, instrument_agent_id="", lifecycle_event=""):
"""
declare a instrument_agent to be in a given state
@param instrument_agent_id the resource id
"""
return self.RR2.advance_lcs(instrument_agent_id, lifecycle_event)
def execute_instrument_agent_instance_lifecycle(self, instrument_agent_instance_id="", lifecycle_event=""):
"""
declare a instrument_agent_instance to be in a given state
@param instrument_agent_instance_id the resource id
"""
return self.RR2.advance_lcs(instrument_agent_instance_id, lifecycle_event)
def execute_instrument_model_lifecycle(self, instrument_model_id="", lifecycle_event=""):
"""
declare a instrument_model to be in a given state
@param instrument_model_id the resource id
"""
return self.RR2.advance_lcs(instrument_model_id, lifecycle_event)
def execute_instrument_device_lifecycle(self, instrument_device_id="", lifecycle_event=""):
"""
declare an instrument_device to be in a given state
@param instrument_device_id the resource id
"""
return self.RR2.advance_lcs(instrument_device_id, lifecycle_event)
def execute_platform_agent_lifecycle(self, platform_agent_id="", lifecycle_event=""):
"""
declare a platform_agent to be in a given state
@param platform_agent_id the resource id
"""
return self.RR2.advance_lcs(platform_agent_id, lifecycle_event)
def execute_platform_agent_instance_lifecycle(self, platform_agent_instance_id="", lifecycle_event=""):
"""
declare a platform_agent_instance to be in a given state
@param platform_agent_instance_id the resource id
"""
return self.RR2.advance_lcs(platform_agent_instance_id, lifecycle_event)
def execute_platform_model_lifecycle(self, platform_model_id="", lifecycle_event=""):
"""
declare a platform_model to be in a given state
@param platform_model_id the resource id
"""
return self.RR2.advance_lcs(platform_model_id, lifecycle_event)
def execute_platform_device_lifecycle(self, platform_device_id="", lifecycle_event=""):
"""
declare a platform_device to be in a given state
@param platform_device_id the resource id
"""
return self.RR2.advance_lcs(platform_device_id, lifecycle_event)
def execute_sensor_model_lifecycle(self, sensor_model_id="", lifecycle_event=""):
"""
declare a sensor_model to be in a given state
@param sensor_model_id the resource id
"""
return self.RR2.advance_lcs(sensor_model_id, lifecycle_event)
def execute_sensor_device_lifecycle(self, sensor_device_id="", lifecycle_event=""):
"""
declare a sensor_device to be in a given state
@param sensor_device_id the resource id
"""
return self.RR2.advance_lcs(sensor_device_id, lifecycle_event)
############################
#
# EXTENDED RESOURCES
#
############################
def get_instrument_device_extension(self, instrument_device_id='', ext_associations=None, ext_exclude=None, user_id=''):
"""Returns an InstrumentDeviceExtension object containing additional related information
@param instrument_device_id str
@param ext_associations dict
@param ext_exclude list
@retval instrument_device InstrumentDeviceExtension
@throws BadRequest A parameter is missing
@throws NotFound An object with the specified instrument_device_id does not exist
"""
if not instrument_device_id:
raise BadRequest("The instrument_device_id parameter is empty")
extended_resource_handler = ExtendedResourceContainer(self)
extended_instrument = extended_resource_handler.create_extended_resource_container(
OT.InstrumentDeviceExtension,
instrument_device_id,
OT.InstrumentDeviceComputedAttributes,
ext_associations=ext_associations,
ext_exclude=ext_exclude,
user_id=user_id)
# clean up InstAgent list as it sometimes includes the device
ia = []
for agent in extended_instrument.instrument_agent:
if agent.type_ == 'InstrumentAgent':
ia.append(agent)
extended_instrument.instrument_agent = ia
# Status computation
status_rollups = self.outil.get_status_roll_ups(instrument_device_id, RT.InstrumentDevice)
def short_status_rollup(key):
return ComputedIntValue(status=ComputedValueAvailability.PROVIDED,
value=status_rollups[instrument_device_id].get(key, StatusType.STATUS_UNKNOWN))
extended_instrument.computed.communications_status_roll_up = short_status_rollup("comms")
extended_instrument.computed.power_status_roll_up = short_status_rollup("power")
extended_instrument.computed.data_status_roll_up = short_status_rollup("data")
extended_instrument.computed.location_status_roll_up = short_status_rollup("loc")
extended_instrument.computed.aggregated_status = short_status_rollup("agg")
return extended_instrument
# TODO: this causes a problem because an instrument agent must be running in order to look up extended attributes.
def obtain_agent_handle(self, device_id):
ia_client = ResourceAgentClient(device_id, process=self)
log.debug("got the instrument agent client here: %s for the device id: %s and process: %s", ia_client, device_id, self)
# #todo: any validation?
# cmd = AgentCommand(command='get_current_state')
# retval = self._ia_client.execute_agent(cmd)
# state = retval.result
# self.assertEqual(state, InstrumentAgentState.UNINITIALIZED)
#
return ia_client
def obtain_agent_calculation(self, device_id, result_container):
ret = IonObject(result_container)
a_client = None
try:
a_client = self.obtain_agent_handle(device_id)
ret.status = ComputedValueAvailability.PROVIDED
except NotFound:
ret.status = ComputedValueAvailability.NOTAVAILABLE
ret.reason = "Could not connect to instrument agent instance -- may not be running"
except Exception as e:
raise e
return a_client, ret
#functions for INSTRUMENT computed attributes -- currently bogus values returned
def get_firmware_version(self, instrument_device_id):
ia_client, ret = self.obtain_agent_calculation(instrument_device_id, OT.ComputedFloatValue)
if ia_client:
ret.value = 0.0 #todo: use ia_client
return ret
def get_last_data_received_datetime(self, instrument_device_id):
ia_client, ret = self.obtain_agent_calculation(instrument_device_id, OT.ComputedFloatValue)
if ia_client:
ret.value = 0.0 #todo: use ia_client
return ret
def get_operational_state(self, taskable_resource_id): # from Device
ia_client, ret = self.obtain_agent_calculation(taskable_resource_id, OT.ComputedStringValue)
if ia_client:
ret.value = "" #todo: use ia_client
return ret
def get_last_calibration_datetime(self, instrument_device_id):
ia_client, ret = self.obtain_agent_calculation(instrument_device_id, OT.ComputedFloatValue)
if ia_client:
ret.value = 0 #todo: use ia_client
return ret
def get_uptime(self, device_id):
ia_client, ret = self.obtain_agent_calculation(device_id, OT.ComputedStringValue)
if ia_client:
# Find events in the event repo that were published when changes of state occurred for the instrument or the platform
# The Instrument Agent publishes events of a particular type, ResourceAgentStateEvent, and origin_type. So we query the events db for those.
#----------------------------------------------------------------------------------------------
# Check whether it is a platform or an instrument
#----------------------------------------------------------------------------------------------
device = self.RR.read(device_id)
#----------------------------------------------------------------------------------------------
# These below are the possible new event states while taking the instrument off streaming mode or the platform off monitoring mode
# This is info got from possible actions to wind down the instrument or platform that one can take in the UI when the device is already streaming/monitoring
#----------------------------------------------------------------------------------------------
event_state = ''
not_streaming_states = [ResourceAgentState.COMMAND, ResourceAgentState.INACTIVE, ResourceAgentState.UNINITIALIZED]
if device.type_ == 'InstrumentDevice':
event_state = ResourceAgentState.STREAMING
elif device.type_ == 'PlatformDevice':
event_state = 'PLATFORM_AGENT_STATE_MONITORING'
#----------------------------------------------------------------------------------------------
# Get events associated with device from the events db
#----------------------------------------------------------------------------------------------
log.debug("For uptime, we are checking the device with id: %s, type_: %s, and searching recent events for the following event_state: %s",device_id, device.type_, event_state)
event_tuples = self.container.event_repository.find_events(origin=device_id, event_type='ResourceAgentStateEvent', descending=True)
recent_events = [tuple[2] for tuple in event_tuples]
#----------------------------------------------------------------------------------------------
# We assume below that the events have been sorted in time, with most recent events first in the list
#----------------------------------------------------------------------------------------------
for evt in recent_events:
log.debug("Got a recent event with event_state: %s", evt.state)
if evt.state == event_state: # "RESOURCE_AGENT_STATE_STREAMING"
current_time = get_ion_ts() # this is in milliseconds
log.debug("Got most recent streaming event with ts_created: %s. Got the current time: %s", evt.ts_created, current_time)
return self._convert_to_string(ret, int(current_time)/1000 - int(evt.ts_created)/1000 )
elif evt.state in not_streaming_states:
log.debug("Got a most recent event state that means instrument is not streaming anymore: %s", evt.state)
# The instrument has been recently shut down. This has happened recently and no need to look further whether it was streaming earlier
return self._convert_to_string(ret, 0)
return self._convert_to_string(ret, 0)
def _convert_to_string(self, ret, value):
"""
A helper method to put it in a string value into a ComputedStringValue object that will be returned
@param ret ComputedStringValue object
@param value int
@retval ret The ComputedStringValue with a value that is of type String
"""
sec = timedelta(seconds = value)
d = datetime(1,1,1) + sec
ret.value = "%s days, %s hours, %s minutes" %(d.day-1, d.hour, d.minute)
log.debug("Returning the computed attribute for uptime with value: %s", ret.value)
return ret
#functions for INSTRUMENT computed attributes -- currently bogus values returned
def get_platform_device_extension(self, platform_device_id='', ext_associations=None, ext_exclude=None, user_id=''):
"""Returns an PlatformDeviceExtension object containing additional related information
"""
if not platform_device_id:
raise BadRequest("The platform_device_id parameter is empty")
extended_resource_handler = ExtendedResourceContainer(self)
extended_platform = extended_resource_handler.create_extended_resource_container(
OT.PlatformDeviceExtension,
platform_device_id,
OT.PlatformDeviceComputedAttributes,
ext_associations=ext_associations,
ext_exclude=ext_exclude,
user_id=user_id)
# lookup all hasModel predicates
# lookup is a 2d associative array of [subject type][subject id] -> object id
lookup = dict([(rt, {}) for rt in [RT.PlatformDevice, RT.InstrumentDevice]])
for a in self.RR.find_associations(predicate=PRED.hasModel, id_only=False):
if a.st in lookup:
lookup[a.st][a.s] = a.o
def retrieve_model_objs(rsrc_list, object_type):
# rsrc_list is devices that need models looked up. object_type is the resource type (a device)
# not all devices have models (represented as None), which kills read_mult. so, extract the models ids,
# look up all the model ids, then create the proper output
model_list = [lookup[object_type].get(r._id) for r in rsrc_list]
model_uniq = list(set([m for m in model_list if m is not None]))
model_objs = self.clients.resource_registry.read_mult(model_uniq)
model_dict = dict(zip(model_uniq, model_objs))
return [model_dict.get(m) for m in model_list]
extended_platform.instrument_models = retrieve_model_objs(extended_platform.instrument_devices,
RT.InstrumentDevice)
extended_platform.platform_models = retrieve_model_objs(extended_platform.platforms,
RT.PlatformDevice)
s_unknown = StatusType.STATUS_UNKNOWN
# Status computation
extended_platform.computed.instrument_status = [s_unknown] * len(extended_platform.instrument_devices)
extended_platform.computed.platform_status = [s_unknown] * len(extended_platform.platforms)
def status_unknown():
return ComputedIntValue(status=ComputedValueAvailability.PROVIDED, value=StatusType.STATUS_UNKNOWN)
extended_platform.computed.communications_status_roll_up = status_unknown()
extended_platform.computed.power_status_roll_up = status_unknown()
extended_platform.computed.data_status_roll_up = status_unknown()
extended_platform.computed.location_status_roll_up = status_unknown()
extended_platform.computed.aggregated_status = status_unknown()
try:
status_rollups = self.outil.get_status_roll_ups(platform_device_id, RT.PlatformDevice)
extended_platform.computed.instrument_status = [status_rollups.get(idev._id,{}).get("agg", s_unknown)
for idev in extended_platform.instrument_devices]
extended_platform.computed.platform_status = [status_rollups(pdev._id,{}).get("agg", s_unknown)
for pdev in extended_platform.platforms]
def short_status_rollup(key):
return ComputedIntValue(status=ComputedValueAvailability.PROVIDED,
value=status_rollups[platform_device_id].get(key, StatusType.STATUS_UNKNOWN))
extended_platform.computed.communications_status_roll_up = short_status_rollup("comms")
extended_platform.computed.power_status_roll_up = short_status_rollup("power")
extended_platform.computed.data_status_roll_up = short_status_rollup("data")
extended_platform.computed.location_status_roll_up = short_status_rollup("loc")
extended_platform.computed.aggregated_status = short_status_rollup("agg")
except Exception as ex:
log.exception("Computed attribute failed for %s" % platform_device_id)
return extended_platform
def get_data_product_parameters_set(self, resource_id=''):
# return the set of data product with the processing_level_code as the key to identify
ret = IonObject(OT.ComputedDictValue)
log.debug("get_data_product_parameters_set: resource_id is %s ", str(resource_id))
if not resource_id:
raise BadRequest("The resource_id parameter is empty")
#retrieve the output products
data_product_ids, _ = self.clients.resource_registry.find_objects(resource_id,
PRED.hasOutputProduct,
RT.DataProduct,
True)
log.debug("get_data_product_parameters_set: data_product_ids is %s ", str(data_product_ids))
if not data_product_ids:
ret.status = ComputedValueAvailability.NOTAVAILABLE
else:
for data_product_id in data_product_ids:
data_product_obj = self.clients.resource_registry.read(data_product_id)
#retrieve the stream for this data product
data_product_stream_ids, _ = self.clients.resource_registry.find_objects(data_product_id,
PRED.hasStream,
RT.Stream,
True)
if not data_product_stream_ids:
raise BadRequest("The data product has no stream associated")
#retrieve the stream definitions for this stream
stream_def_ids, _ = self.clients.resource_registry.find_objects(data_product_stream_ids[0],
PRED.hasStreamDefinition,
RT.StreamDefinition,
True)
if not stream_def_ids:
raise BadRequest("The data product stream has no stream definition associated")
context_dict = {}
pdict = self.clients.pubsub_management.read_stream_definition(stream_def_ids[0]).parameter_dictionary
log.debug("get_data_product_parameters_set: pdict %s ", str(pdict) )
pdict_full = ParameterDictionary.load(pdict)
for key in pdict_full.keys():
log.debug("get_data_product_parameters_set: key %s ", str(key))
context = DatasetManagementService.get_parameter_context_by_name(key)
log.debug("get_data_product_parameters_set: context %s ", str(context))
context_dict[key] = context.dump()
ret.value[data_product_obj.processing_level_code] = context_dict
ret.status = ComputedValueAvailability.PROVIDED
return ret
|
import re
import sys
import warnings
import hypothesis
import pytest
import diofant
collect_ignore = ["setup.py"]
sp = re.compile(r'([0-9]+)/([1-9][0-9]*)')
hypothesis.settings.register_profile("default",
hypothesis.settings(max_examples=100))
def pytest_report_header(config):
return """
cache: %s
ground types: %s
""" % (diofant.core.cache.USE_CACHE, diofant.core.compatibility.GROUND_TYPES)
def pytest_addoption(parser):
parser.addoption("--split", action="store", default="", help="split tests")
def pytest_collection_modifyitems(session, config, items):
split = config.getoption("--split")
if not split:
return
m = sp.match(split)
if not m:
raise ValueError("split must be a string of the form a/b "
"where a and b are ints.")
i, t = map(int, m.groups())
start, end = (i - 1)*len(items)//t, i*len(items)//t
if i < t:
del items[end:]
del items[:start]
@pytest.fixture(autouse=True, scope='module')
def file_clear_cache():
diofant.core.cache.clear_cache()
@pytest.fixture(autouse=True, scope='session')
def set_displayhook():
sys.__displayhook__ = sys.displayhook # https://bugs.python.org/26092
@pytest.fixture(autouse=True, scope='session')
def enable_mpl_agg_backend():
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
import matplotlib as mpl
mpl.use('Agg')
except ImportError:
pass
@pytest.fixture(autouse=True)
def add_np(doctest_namespace):
for sym in (diofant.symbols('a b c d x y z t') +
diofant.symbols('k m n', integer=True) +
diofant.symbols('f g h', cls=diofant.Function)):
doctest_namespace[str(sym)] = sym
for name in dir(diofant):
doctest_namespace[name] = getattr(diofant, name)
A hack to prevent doctesting if matplotlib is not present
import re
import sys
import warnings
import hypothesis
import pytest
import diofant
collect_ignore = ["setup.py"]
try:
import matplotlib
del matplotlib
except ImportError:
collect_ignore.extend(["diofant/plotting/plot.py",
"diofant/plotting/plot_implicit.py"])
sp = re.compile(r'([0-9]+)/([1-9][0-9]*)')
hypothesis.settings.register_profile("default",
hypothesis.settings(max_examples=100))
def pytest_report_header(config):
return """
cache: %s
ground types: %s
""" % (diofant.core.cache.USE_CACHE, diofant.core.compatibility.GROUND_TYPES)
def pytest_addoption(parser):
parser.addoption("--split", action="store", default="", help="split tests")
def pytest_collection_modifyitems(session, config, items):
split = config.getoption("--split")
if not split:
return
m = sp.match(split)
if not m:
raise ValueError("split must be a string of the form a/b "
"where a and b are ints.")
i, t = map(int, m.groups())
start, end = (i - 1)*len(items)//t, i*len(items)//t
if i < t:
del items[end:]
del items[:start]
@pytest.fixture(autouse=True, scope='module')
def file_clear_cache():
diofant.core.cache.clear_cache()
@pytest.fixture(autouse=True, scope='session')
def set_displayhook():
sys.__displayhook__ = sys.displayhook # https://bugs.python.org/26092
@pytest.fixture(autouse=True, scope='session')
def enable_mpl_agg_backend():
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
import matplotlib as mpl
mpl.use('Agg')
except ImportError:
pass
@pytest.fixture(autouse=True)
def add_np(doctest_namespace):
for sym in (diofant.symbols('a b c d x y z t') +
diofant.symbols('k m n', integer=True) +
diofant.symbols('f g h', cls=diofant.Function)):
doctest_namespace[str(sym)] = sym
for name in dir(diofant):
doctest_namespace[name] = getattr(diofant, name)
|
import click
import logging
import time
from dotenv import load_dotenv, find_dotenv
from grazer.config import Config
from grazer.core import crawler
from grazer.util import time_convert, grouper
logger = logging.getLogger("Verata")
@click.command()
@click.option("--env", default=find_dotenv(), help="Environment file")
@click.option("--config", help="Configuration file")
@click.option("--log_level",
default="INFO",
help="Defines a log level",
type=click.Choice(["DEBUG", "INFO", "TRACE"]))
@click.option("--debug",
default=False,
is_flag=True,
help="Shortcut for DEBUG log level")
@click.option("--output", help="All results goes here",
prompt="Enter output file name")
@click.option("--paginate",
help="Split results into pages by",
default=10,
type=int)
@click.option("--rest_interval",
help="How long to wait before fetching next page",
default="0s")
def main(env, config, log_level, debug, output, paginate, rest_interval):
if output is None:
logger.error("Please provide output file")
exit()
if debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=getattr(logging, log_level))
load_dotenv(env)
cfg = Config(config)
rest = time_convert(rest_interval)
with open(output, "w") as f:
for chunk in grouper(paginate, crawler.create(cfg)):
if chunk is None:
continue
for record, link in chunk:
logging.debug("Record: {0} Link: {1}".format(record, link))
f.write("({0}, {1})\n".format(record, link))
if rest > 0:
time.sleep(rest)
if __name__ == "__main__":
main()
Separate commands into two parts
import click
import logging
import time
from dotenv import load_dotenv, find_dotenv
from grazer.config import Config
from grazer.core import crawler
from grazer.util import time_convert, grouper
logger = logging.getLogger("Verata")
@click.group()
@click.option("--env", default=find_dotenv(), help="Environment file")
@click.option("--log_level",
default="INFO",
help="Defines a log level",
type=click.Choice(["DEBUG", "INFO", "TRACE"]))
@click.option("--debug",
default=False,
is_flag=True,
help="Shortcut for DEBUG log level")
@click.option("--output", help="All results goes here",
prompt="Enter output file name")
@click.pass_context
def main(ctx, env, log_level, debug, output):
if output is None:
logger.error("Please provide output file")
exit()
else:
click.echo(ctx)
ctx.meta["output"] = output
if debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=getattr(logging, log_level))
load_dotenv(env)
@main.command()
@click.pass_context
def scrape(ctx):
click.echo(ctx.meta["output"])
@main.command()
@click.option("--config", help="Configuration file")
@click.option("--paginate",
help="Split results into pages by",
default=10,
type=int)
@click.option("--rest_interval",
help="How long to wait before fetching next page",
default="0s")
def crawl(config, paginate, rest_interval, output):
rest = time_convert(rest_interval)
cfg = Config(config)
with open(output, "w") as f:
for chunk in grouper(paginate, crawler.create(cfg)):
if chunk is None:
continue
for record, link in chunk:
logging.debug("Record: {0} Link: {1}".format(record, link))
f.write("({0}, {1})\n".format(record, link))
if rest > 0:
time.sleep(rest)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
"""
Testing the accuracy of a top-down clustering method using k-means
clustering, k = 2, for splitting homologous blocks into
reference-orthologous blocks.
Requires:
- scikit-learn
- kmodes
- biopython
- sonLib
"""
from argparse import ArgumentParser
import itertools
from sklearn.cluster import KMeans
from sklearn.preprocessing import OneHotEncoder
from kmodes.kmodes import KModes
from Bio import Phylo
import numpy as np
from sonLib.bioio import fastaRead
def seqs_to_columns(seqs, seq_order):
"""Transform a dict of sequences into a list of columns.
Each column is represented by a list of entries. The order of each
sequence within the entries is the same as the order in the
parameter seq_order.
"""
assert len(seq_order) == len(seqs.keys()), \
"'seq_order' has more or fewer entries than 'seqs'"
assert all([seq_name in seqs for seq_name in seq_order]), \
"'seq_order' refers to a sequence not present in 'seqs'"
assert len(set([len(seq) for seq in seqs.values()])), \
"All sequences must have the same length"
columns = []
for seq_name in seq_order:
seq = seqs[seq_name]
if len(columns) == 0:
columns = [[] for _ in xrange(len(seq))]
for i, char in enumerate(seq):
columns[i].append(char)
return columns
def columns_to_matrix(cols, one_hot_encode=True):
def nuc_to_number(nucleotide):
nucleotide = nucleotide.lower()
if nucleotide == 'a':
return 0
elif nucleotide == 'c':
return 1
elif nucleotide == 'g':
return 2
elif nucleotide == 't':
return 3
else:
return 4
transformed_cols = [map(nuc_to_number, col) for col in cols]
raw_matrix = np.matrix(transformed_cols).transpose()
encoder = OneHotEncoder()
encoded_matrix = encoder.fit_transform(raw_matrix)
return encoded_matrix
def parse_args():
parser = ArgumentParser(description=__doc__)
parser.add_argument('fasta', help='fasta file (all sequences must have the '
'same length')
parser.add_argument('species_tree', help='species tree (newick format)')
parser.add_argument('reference', help='reference species')
parser.add_argument('--cluster-method',
choices=['k-means', 'k-modes'],
default='k-means',
help='Clustering method to use')
parser.add_argument('--evaluation-method',
choices=['split-decomposition'],
default='split-decomposition',
help='Method to evaluate the splits')
return parser.parse_args()
def cluster_matrix(matrix, cluster_method):
if cluster_method == 'k-means':
return KMeans(n_clusters=2).fit_predict(matrix)
elif cluster_method == 'k-modes':
return KModes(n_clusters=2).fit_predict(matrix.todense())
else:
raise ArgumentError('Unknown cluster method: %s' % cluster_method)
def satisfies_four_point_criterion(matrix, split1, split2, relaxed=False):
for i, j in itertools.combinations(split1, 2):
for k, l in itertools.combinations(split2, 2):
intra = matrix[i, j] + matrix[k, l]
inter1 = matrix[i, k] + matrix[j, l]
inter2 = matrix[i, l] + matrix[j, k]
if relaxed:
if intra > inter1 and intra > inter2:
return False
else:
if intra > inter1 or intra > inter2:
return False
return True
def build_tree_topdown(columns, seq_names, cluster_method):
def is_finished(cluster, columns):
return len(cluster) <= 2 or all([len(set(column)) == 1 for column in columns])
def recurse(columns, seq_names):
matrix = columns_to_matrix(columns, one_hot_encode=(cluster_method == 'k-means'))
cluster_assignments = cluster_matrix(matrix, cluster_method)
cluster0_indices = [i for i, cluster in enumerate(cluster_assignments) if cluster == 0]
cluster1_indices = [i for i, cluster in enumerate(cluster_assignments) if cluster == 1]
cluster0 = [seq_names[i] for i in cluster0_indices]
cluster1 = [seq_names[i] for i in cluster1_indices]
cluster0_columns = [[column[i] for i in cluster0_indices] for column in columns]
if is_finished(cluster0, cluster0_columns):
clade0 = Phylo.BaseTree.Clade(clades=map(lambda x: Phylo.BaseTree.Clade(name=x), cluster0))
else:
clade0 = Phylo.BaseTree.Clade(clades=recurse(cluster0_columns, cluster0))
cluster1_columns = [[column[i] for i in cluster1_indices] for column in columns]
if is_finished(cluster1, cluster1_columns):
clade1 = Phylo.BaseTree.Clade(clades=map(lambda x: Phylo.BaseTree.Clade(name=x), cluster1))
else:
clade1 = Phylo.BaseTree.Clade(clades=recurse(cluster1_columns, cluster1))
return (clade0, clade1)
tree = Phylo.BaseTree.Tree(Phylo.BaseTree.Clade(clades=recurse(columns, seq_names)))
return tree
def main():
args = parse_args()
species_tree = Phylo.parse(args.species_tree, 'newick')
seqs = dict(fastaRead(args.fasta))
seq_names = seqs.keys()
cols = seqs_to_columns(seqs, seq_names)
tree = build_tree_topdown(cols, seq_names, args.cluster_method)
Phylo.draw_ascii(tree)
if __name__ == '__main__':
main()
run 4-point criterion evaluation
(still needs evaluation of 3-point splits somehow)
#!/usr/bin/env python
"""
Testing the accuracy of a top-down clustering method using k-means
clustering, k = 2, for splitting homologous blocks into
reference-orthologous blocks.
Requires:
- scikit-learn
- kmodes
- biopython
- sonLib
"""
from argparse import ArgumentParser
import itertools
from sklearn.cluster import KMeans
from sklearn.preprocessing import OneHotEncoder
from kmodes.kmodes import KModes
from Bio import Phylo
import numpy as np
from sonLib.bioio import fastaRead
def seqs_to_columns(seqs, seq_order):
"""Transform a dict of sequences into a list of columns.
Each column is represented by a list of entries. The order of each
sequence within the entries is the same as the order in the
parameter seq_order.
"""
assert len(seq_order) == len(seqs.keys()), \
"'seq_order' has more or fewer entries than 'seqs'"
assert all([seq_name in seqs for seq_name in seq_order]), \
"'seq_order' refers to a sequence not present in 'seqs'"
assert len(set([len(seq) for seq in seqs.values()])), \
"All sequences must have the same length"
columns = []
for seq_name in seq_order:
seq = seqs[seq_name]
if len(columns) == 0:
columns = [[] for _ in xrange(len(seq))]
for i, char in enumerate(seq):
columns[i].append(char)
return columns
def columns_to_matrix(cols, one_hot_encode=True):
def nuc_to_number(nucleotide):
nucleotide = nucleotide.lower()
if nucleotide == 'a':
return 0
elif nucleotide == 'c':
return 1
elif nucleotide == 'g':
return 2
elif nucleotide == 't':
return 3
else:
return 4
transformed_cols = [map(nuc_to_number, col) for col in cols]
raw_matrix = np.matrix(transformed_cols).transpose()
encoder = OneHotEncoder()
encoded_matrix = encoder.fit_transform(raw_matrix)
return encoded_matrix
def parse_args():
parser = ArgumentParser(description=__doc__)
parser.add_argument('fasta', help='fasta file (all sequences must have the '
'same length')
parser.add_argument('species_tree', help='species tree (newick format)')
parser.add_argument('reference', help='reference species')
parser.add_argument('--cluster-method',
choices=['k-means', 'k-modes'],
default='k-means',
help='Clustering method to use')
parser.add_argument('--evaluation-method',
choices=['split-decomposition', 'none'],
default='none',
help='Method to evaluate the splits')
return parser.parse_args()
def cluster_matrix(matrix, cluster_method):
if cluster_method == 'k-means':
return KMeans(n_clusters=2).fit_predict(matrix)
elif cluster_method == 'k-modes':
return KModes(n_clusters=2).fit_predict(matrix.todense())
else:
raise ArgumentError('Unknown cluster method: %s' % cluster_method)
def distance_matrix_from_columns(columns):
num_seqs = len(columns[0])
matrix = np.zeros([num_seqs, num_seqs], dtype=int)
for column in columns:
for i, entry_1 in enumerate(column):
for j in xrange(i + 1, len(column)):
entry_2 = column[j]
if entry_1.lower() != entry_2.lower():
matrix[i, j] += 1
matrix[j, i] += 1
return np.true_divide(matrix, len(columns))
def satisfies_four_point_criterion(matrix, split1, split2, relaxed=False):
"""Tests whether a split satisfies the d-split criterion of Bandelt
and Dress 1992.
The "relaxed" version is the same version that is in the paper,
where the internal distance may be larger than one of the
inter-split distances. Otherwise, it must be smaller than both.
"""
for i, j in itertools.combinations(split1, 2):
for k, l in itertools.combinations(split2, 2):
intra = matrix[i, j] + matrix[k, l]
inter1 = matrix[i, k] + matrix[j, l]
inter2 = matrix[i, l] + matrix[j, k]
if relaxed:
if intra > inter1 and intra > inter2:
return False
else:
if intra > inter1 or intra > inter2:
return False
return True
def is_good_split(cluster_assignments, columns, evaluation_method):
assert all([i == 0 or i == 1 for i in cluster_assignments]), \
"A valid split should only split into two partitions"
if evaluation_method == 'none':
return True
elif evaluation_method == 'split-decomposition':
distance_matrix = distance_matrix_from_columns(columns)
split1 = [i for i, cluster in enumerate(cluster_assignments) if cluster == 0]
split2 = [i for i, cluster in enumerate(cluster_assignments) if cluster == 1]
return satisfies_four_point_criterion(distance_matrix, split1, split2)
def build_tree_topdown(columns, seq_names, cluster_method, evaluation_method):
def is_finished(cluster, columns):
return len(cluster) <= 2 or all([len(set(column)) == 1 for column in columns])
def recurse(columns, seq_names):
matrix = columns_to_matrix(columns, one_hot_encode=(cluster_method == 'k-means'))
cluster_assignments = cluster_matrix(matrix, cluster_method)
if not is_good_split(cluster_assignments, columns, evaluation_method):
return Phylo.BaseTree.Clade(clades=map(lambda x: Phylo.BaseTree.Clade(name=x), seq_names))
cluster0_indices = [i for i, cluster in enumerate(cluster_assignments) if cluster == 0]
cluster1_indices = [i for i, cluster in enumerate(cluster_assignments) if cluster == 1]
cluster0 = [seq_names[i] for i in cluster0_indices]
cluster1 = [seq_names[i] for i in cluster1_indices]
cluster0_columns = [[column[i] for i in cluster0_indices] for column in columns]
if is_finished(cluster0, cluster0_columns):
clade0 = Phylo.BaseTree.Clade(clades=map(lambda x: Phylo.BaseTree.Clade(name=x), cluster0))
else:
clade0 = Phylo.BaseTree.Clade(clades=recurse(cluster0_columns, cluster0))
cluster1_columns = [[column[i] for i in cluster1_indices] for column in columns]
if is_finished(cluster1, cluster1_columns):
clade1 = Phylo.BaseTree.Clade(clades=map(lambda x: Phylo.BaseTree.Clade(name=x), cluster1))
else:
clade1 = Phylo.BaseTree.Clade(clades=recurse(cluster1_columns, cluster1))
return (clade0, clade1)
tree = Phylo.BaseTree.Tree(Phylo.BaseTree.Clade(clades=recurse(columns, seq_names)))
return tree
def main():
args = parse_args()
species_tree = Phylo.parse(args.species_tree, 'newick')
seqs = dict(fastaRead(args.fasta))
seq_names = seqs.keys()
cols = seqs_to_columns(seqs, seq_names)
tree = build_tree_topdown(cols, seq_names, args.cluster_method, args.evaluation_method)
Phylo.draw_ascii(tree)
if __name__ == '__main__':
main()
|
add dry-run option to install-system-deps
Summary: Useful to be able to see what packages would be installed without doing it, and to see rpm vs deb packages
Reviewed By: Croohand
Differential Revision: D32693528
fbshipit-source-id: a01257e7192f2f0299d57f6f8f7ee93452a6f3e4
|
"""Define celery tasks for hs_core app."""
import os
import sys
import traceback
import zipfile
import logging
import json
from celery.signals import task_postrun
from datetime import datetime, timedelta, date
from xml.etree import ElementTree
import requests
from celery import shared_task
from celery.schedules import crontab
from celery.task import periodic_task
from celery.exceptions import TaskError
from django.conf import settings
from django.core.mail import send_mail
from django.contrib.sites.models import Site
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from rest_framework import status
from hs_access_control.models import GroupMembershipRequest
from hs_core.hydroshare import utils, create_empty_resource, set_dirty_bag_flag
from hydroshare.hydrocelery import app as celery_app
from hs_core.hydroshare.hs_bagit import create_bag_metadata_files, create_bag, create_bagit_files_by_irods
from hs_core.hydroshare.resource import get_activated_doi, get_crossref_url, deposit_res_metadata_with_crossref
from hs_core.task_utils import get_or_create_task_notification
from hs_odm2.models import ODM2Variable
from django_irods.storage import IrodsStorage
from theme.models import UserQuota, QuotaMessage, User
from django_irods.icommands import SessionException
from celery.result import states
from hs_core.models import BaseResource, TaskNotification
from hs_core.enums import RelationTypes
from theme.utils import get_quota_message
from hs_collection_resource.models import CollectionDeletedResource
from hs_file_types.models import (
FileSetLogicalFile,
GenericLogicalFile,
GeoFeatureLogicalFile,
GeoRasterLogicalFile,
ModelProgramLogicalFile,
ModelInstanceLogicalFile,
NetCDFLogicalFile,
RefTimeseriesLogicalFile,
TimeSeriesLogicalFile
)
FILE_TYPE_MAP = {"GenericLogicalFile": GenericLogicalFile,
"FileSetLogicalFile": FileSetLogicalFile,
"GeoRasterLogicalFile": GeoRasterLogicalFile,
"NetCDFLogicalFile": NetCDFLogicalFile,
"GeoFeatureLogicalFile": GeoFeatureLogicalFile,
"RefTimeseriesLogicalFile": RefTimeseriesLogicalFile,
"TimeSeriesLogicalFile": TimeSeriesLogicalFile,
"ModelProgramLogicalFile": ModelProgramLogicalFile,
"ModelInstanceLogicalFile": ModelInstanceLogicalFile
}
# Pass 'django' into getLogger instead of __name__
# for celery tasks (as this seems to be the
# only way to successfully log in code executed
# by celery, despite our catch-all handler).
logger = logging.getLogger('django')
class FileOverrideException(Exception):
def __init__(self, error_message):
super(FileOverrideException, self).__init__(self, error_message)
@celery_app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
if (hasattr(settings, 'DISABLE_PERIODIC_TASKS') and settings.DISABLE_PERIODIC_TASKS):
logger.debug("Periodic tasks are disabled in SETTINGS")
else:
sender.add_periodic_task(crontab(minute=30, hour=23), nightly_zips_cleanup.s())
sender.add_periodic_task(crontab(minute=0, hour=0), manage_task_nightly.s())
sender.add_periodic_task(crontab(minute=15, hour=0, day_of_week=1, day_of_month='1-7'), send_over_quota_emails.s())
sender.add_periodic_task(crontab(minute=00, hour=12), daily_odm2_sync.s())
sender.add_periodic_task(crontab(day_of_month=1), monthly_group_membership_requests_cleanup.s())
sender.add_periodic_task(crontab(minute=30, hour=0), daily_innactive_group_requests_cleanup.s())
sender.add_periodic_task(crontab(day_of_week=1), task_notification_cleanup.s())
# Currently there are two different cleanups scheduled.
# One is 20 minutes after creation, the other is nightly.
# TODO Clean up zipfiles in remote federated storage as well.
@celery_app.task(ignore_result=True)
def nightly_zips_cleanup():
# delete 2 days ago
date_folder = (date.today() - timedelta(2)).strftime('%Y-%m-%d')
zips_daily_date = "zips/{daily_date}".format(daily_date=date_folder)
if __debug__:
logger.debug("cleaning up {}".format(zips_daily_date))
istorage = IrodsStorage()
if istorage.exists(zips_daily_date):
istorage.delete(zips_daily_date)
federated_prefixes = BaseResource.objects.all().values_list('resource_federation_path')\
.distinct()
for p in federated_prefixes:
prefix = p[0] # strip tuple
if prefix != "":
zips_daily_date = "{prefix}/zips/{daily_date}"\
.format(prefix=prefix, daily_date=date_folder)
if __debug__:
logger.debug("cleaning up {}".format(zips_daily_date))
istorage = IrodsStorage("federated")
if istorage.exists(zips_daily_date):
istorage.delete(zips_daily_date)
@celery_app.task(ignore_result=True)
def manage_task_nightly():
# The nightly running task do DOI activation check
# Check DOI activation on failed and pending resources and send email.
msg_lst = []
# retrieve all published resources with failed metadata deposition with CrossRef if any and
# retry metadata deposition
failed_resources = BaseResource.objects.filter(raccess__published=True, doi__contains='failure')
for res in failed_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
response = deposit_res_metadata_with_crossref(res)
if response.status_code == status.HTTP_200_OK:
# retry of metadata deposition succeeds, change resource flag from failure
# to pending
res.doi = act_doi
res.save()
# create bag and compute checksum for published resource to meet DataONE requirement
create_bag_by_irods(res.short_id)
else:
# retry of metadata deposition failed again, notify admin
msg_lst.append("Metadata deposition with CrossRef for the published resource "
"DOI {res_doi} failed again after retry with first metadata "
"deposition requested since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
pending_resources = BaseResource.objects.filter(raccess__published=True,
doi__contains='pending')
for res in pending_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
main_url = get_crossref_url()
req_str = '{MAIN_URL}servlet/submissionDownload?usr={USERNAME}&pwd=' \
'{PASSWORD}&doi_batch_id={DOI_BATCH_ID}&type={TYPE}'
response = requests.get(req_str.format(MAIN_URL=main_url,
USERNAME=settings.CROSSREF_LOGIN_ID,
PASSWORD=settings.CROSSREF_LOGIN_PWD,
DOI_BATCH_ID=res.short_id,
TYPE='result'),
verify=False)
root = ElementTree.fromstring(response.content)
rec_cnt_elem = root.find('.//record_count')
failure_cnt_elem = root.find('.//failure_count')
success = False
if rec_cnt_elem is not None and failure_cnt_elem is not None:
rec_cnt = int(rec_cnt_elem.text)
failure_cnt = int(failure_cnt_elem.text)
if rec_cnt > 0 and failure_cnt == 0:
res.doi = act_doi
res.save()
success = True
# create bag and compute checksum for published resource to meet DataONE requirement
create_bag_by_irods(res.short_id)
if not success:
msg_lst.append("Published resource DOI {res_doi} is not yet activated with request "
"data deposited since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
if msg_lst:
email_msg = '\n'.join(msg_lst)
subject = 'Notification of pending DOI deposition/activation of published resources'
# send email for people monitoring and follow-up as needed
send_mail(subject, email_msg, settings.DEFAULT_FROM_EMAIL, [settings.DEFAULT_SUPPORT_EMAIL])
@celery_app.task(ignore_result=True)
def send_over_quota_emails():
# check over quota cases and send quota warning emails as needed
hs_internal_zone = "hydroshare"
if not QuotaMessage.objects.exists():
QuotaMessage.objects.create()
qmsg = QuotaMessage.objects.first()
users = User.objects.filter(is_active=True).filter(is_superuser=False).all()
for u in users:
uq = UserQuota.objects.filter(user__username=u.username, zone=hs_internal_zone).first()
if uq:
used_percent = uq.used_percent
if used_percent >= qmsg.soft_limit_percent:
if used_percent >= 100 and used_percent < qmsg.hard_limit_percent:
if uq.remaining_grace_period < 0:
# triggers grace period counting
uq.remaining_grace_period = qmsg.grace_period
elif uq.remaining_grace_period > 0:
# reduce remaining_grace_period by one day
uq.remaining_grace_period -= 1
elif used_percent >= qmsg.hard_limit_percent:
# set grace period to 0 when user quota exceeds hard limit
uq.remaining_grace_period = 0
uq.save()
if u.first_name and u.last_name:
sal_name = '{} {}'.format(u.first_name, u.last_name)
elif u.first_name:
sal_name = u.first_name
elif u.last_name:
sal_name = u.last_name
else:
sal_name = u.username
msg_str = 'Dear ' + sal_name + ':\n\n'
ori_qm = get_quota_message(u)
# make embedded settings.DEFAULT_SUPPORT_EMAIL clickable with subject auto-filled
replace_substr = "<a href='mailto:{0}?subject=Request more quota'>{0}</a>".format(
settings.DEFAULT_SUPPORT_EMAIL)
new_qm = ori_qm.replace(settings.DEFAULT_SUPPORT_EMAIL, replace_substr)
msg_str += new_qm
msg_str += '\n\nHydroShare Support'
subject = 'Quota warning'
if settings.DEBUG or "www.hydroshare.org" not in Site.objects.get_current().domain:
logger.info("quota warning email not sent out on debug server but logged instead: "
"{}".format(msg_str))
else:
try:
# send email for people monitoring and follow-up as needed
send_mail(subject, '', settings.DEFAULT_FROM_EMAIL,
[u.email, settings.DEFAULT_SUPPORT_EMAIL],
html_message=msg_str)
except Exception as ex:
logger.debug("Failed to send quota warning email: " + ex.message)
else:
if uq.remaining_grace_period >= 0:
# turn grace period off now that the user is below quota soft limit
uq.remaining_grace_period = -1
uq.save()
else:
logger.debug('user ' + u.username + ' does not have UserQuota foreign key relation')
@shared_task
def add_zip_file_contents_to_resource(pk, zip_file_path):
"""Add zip file to existing resource and remove tmp zip file."""
zfile = None
resource = None
try:
resource = utils.get_resource_by_shortkey(pk, or_404=False)
zfile = zipfile.ZipFile(zip_file_path)
num_files = len(zfile.infolist())
zcontents = utils.ZipContents(zfile)
files = zcontents.get_files()
resource.file_unpack_status = 'Running'
resource.save()
for i, f in enumerate(files):
logger.debug("Adding file {0} to resource {1}".format(f.name, pk))
utils.add_file_to_resource(resource, f)
resource.file_unpack_message = "Imported {0} of about {1} file(s) ...".format(
i, num_files)
resource.save()
# This might make the resource unsuitable for public consumption
resource.update_public_and_discoverable()
# TODO: this is a bit of a lie because a different user requested the bag overwrite
utils.resource_modified(resource, resource.creator, overwrite_bag=False)
# Call success callback
resource.file_unpack_message = None
resource.file_unpack_status = 'Done'
resource.save()
except BaseResource.DoesNotExist:
msg = "Unable to add zip file contents to non-existent resource {pk}."
msg = msg.format(pk=pk)
logger.error(msg)
except:
exc_info = "".join(traceback.format_exception(*sys.exc_info()))
if resource:
resource.file_unpack_status = 'Error'
resource.file_unpack_message = exc_info
resource.save()
if zfile:
zfile.close()
logger.error(exc_info)
finally:
# Delete upload file
os.unlink(zip_file_path)
@shared_task
def delete_zip(zip_path):
istorage = IrodsStorage()
if istorage.exists(zip_path):
istorage.delete(zip_path)
@shared_task
def create_temp_zip(resource_id, input_path, output_path, aggregation_name=None, sf_zip=False, download_path='',
request_username=None):
""" Create temporary zip file from input_path and store in output_path
:param resource_id: the short_id of a resource
:param input_path: full irods path of input starting with federation path
:param output_path: full irods path of output starting with federation path
:param aggregation_name: The name of the aggregation to zip
:param sf_zip: signals a single file to zip
:param download_path: download path to return as task payload
:param request_username: the username of the requesting user
"""
from hs_core.hydroshare.utils import get_resource_by_shortkey
res = get_resource_by_shortkey(resource_id)
aggregation = None
if aggregation_name:
aggregation = res.get_aggregation_by_aggregation_name(aggregation_name)
istorage = res.get_irods_storage() # invoke federated storage as necessary
if res.resource_type == "CompositeResource":
if '/data/contents/' in input_path:
short_path = input_path.split('/data/contents/')[1] # strip /data/contents/
res.create_aggregation_meta_files(path=short_path)
else: # all metadata included, e.g., /data/*
res.create_aggregation_meta_files()
if aggregation or sf_zip:
# input path points to single file aggregation
# ensure that foo.zip contains aggregation metadata
# by copying these into a temp subdirectory foo/foo parallel to where foo.zip is stored
temp_folder_name, ext = os.path.splitext(output_path) # strip zip to get scratch dir
head, tail = os.path.split(temp_folder_name) # tail is unqualified folder name "foo"
out_with_folder = os.path.join(temp_folder_name, tail) # foo/foo is subdir to zip
# in the case of user provided zip file name, out_with_folder path may not end with
# aggregation file name
aggr_filename = os.path.basename(input_path)
if not out_with_folder.endswith(aggr_filename):
out_with_folder = os.path.join(os.path.dirname(out_with_folder), aggr_filename)
istorage.copyFiles(input_path, out_with_folder)
if not aggregation:
if '/data/contents/' in input_path:
short_path = input_path.split('/data/contents/')[1] # strip /data/contents/
else:
short_path = input_path
try:
aggregation = res.get_aggregation_by_name(short_path)
except ObjectDoesNotExist:
pass
if aggregation:
try:
istorage.copyFiles(aggregation.map_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.map_file_path))
try:
istorage.copyFiles(aggregation.metadata_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.metadata_file_path))
if aggregation.is_model_program or aggregation.is_model_instance:
try:
istorage.copyFiles(aggregation.schema_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.schema_file_path))
if aggregation.is_model_instance:
try:
istorage.copyFiles(aggregation.schema_values_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.schema_values_file_path))
for file in aggregation.files.all():
try:
istorage.copyFiles(file.storage_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(file.storage_path))
istorage.zipup(temp_folder_name, output_path)
istorage.delete(temp_folder_name) # delete working directory; this isn't the zipfile
else: # regular folder to zip
istorage.zipup(input_path, output_path)
return download_path
@shared_task
def create_bag_by_irods(resource_id, create_zip=True):
"""Create a resource bag on iRODS side by running the bagit rule and ibun zip.
This function runs as a celery task, invoked asynchronously so that it does not
block the main web thread when it creates bags for very large files which will take some time.
:param
resource_id: the resource uuid that is used to look for the resource to create the bag for.
:param create_zip: defaults to True, set to false to create bagit files without zipping
:return: bag_url if bag creation operation succeeds or
raise an exception if resource does not exist or any other issues that prevent bags from being created.
"""
res = utils.get_resource_by_shortkey(resource_id)
istorage = res.get_irods_storage()
bag_path = res.bag_path
metadata_dirty = res.getAVU('metadata_dirty')
metadata_dirty = metadata_dirty is None or metadata_dirty
# if metadata has been changed, then regenerate metadata xml files
if metadata_dirty:
create_bag_metadata_files(res)
bag_modified = res.getAVU("bag_modified")
bag_modified = bag_modified is None or bag_modified
if metadata_dirty or bag_modified:
create_bagit_files_by_irods(res, istorage)
res.setAVU("bag_modified", False)
if create_zip:
irods_bagit_input_path = res.get_irods_path(resource_id, prepend_short_id=False)
# only proceed when the resource is not deleted potentially by another request
# when being downloaded
is_exist = istorage.exists(irods_bagit_input_path)
if is_exist:
try:
if istorage.exists(bag_path):
istorage.delete(bag_path)
istorage.zipup(irods_bagit_input_path, bag_path)
if res.raccess.published:
# compute checksum to meet DataONE distribution requirement
chksum = istorage.checksum(bag_path)
res.bag_checksum = chksum
return res.bag_url
except SessionException as ex:
raise SessionException(-1, '', ex.stderr)
else:
raise ObjectDoesNotExist('Resource {} does not exist.'.format(resource_id))
@shared_task
def copy_resource_task(ori_res_id, new_res_id=None, request_username=None):
try:
new_res = None
if not new_res_id:
new_res = create_empty_resource(ori_res_id, request_username, action='copy')
new_res_id = new_res.short_id
utils.copy_resource_files_and_AVUs(ori_res_id, new_res_id)
ori_res = utils.get_resource_by_shortkey(ori_res_id)
if not new_res:
new_res = utils.get_resource_by_shortkey(new_res_id)
utils.copy_and_create_metadata(ori_res, new_res)
if new_res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).exists():
# the resource to be copied is a versioned resource, need to delete this isVersionOf
# relation element to maintain the single versioning obsolescence chain
new_res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).first().delete()
# create the relation element for the new_res
today = date.today().strftime("%m/%d/%Y")
derived_from = "{}, accessed on: {}".format(ori_res.get_citation(), today)
# since we are allowing user to add relation of type source, need to check we don't already have it
if not new_res.metadata.relations.all().filter(type=RelationTypes.source, value=derived_from).exists():
new_res.metadata.create_element('relation', type=RelationTypes.source, value=derived_from)
if ori_res.resource_type.lower() == "collectionresource":
# clone contained_res list of original collection and add to new collection
# note that new collection will not contain "deleted resources"
new_res.resources = ori_res.resources.all()
# create bag for the new resource
create_bag(new_res)
return new_res.get_absolute_url()
except Exception as ex:
if new_res:
new_res.delete()
raise utils.ResourceCopyException(str(ex))
@shared_task
def create_new_version_resource_task(ori_res_id, username, new_res_id=None):
"""
Task for creating a new version of a resource
Args:
ori_res_id: the original resource id that is to be versioned.
new_res_id: the new versioned resource id from the original resource. If None, a
new resource will be created.
username: the requesting user's username
Returns:
the new versioned resource url as the payload
"""
try:
new_res = None
if not new_res_id:
new_res = create_empty_resource(ori_res_id, username)
new_res_id = new_res.short_id
utils.copy_resource_files_and_AVUs(ori_res_id, new_res_id)
# copy metadata from source resource to target new-versioned resource except three elements
ori_res = utils.get_resource_by_shortkey(ori_res_id)
if not new_res:
new_res = utils.get_resource_by_shortkey(new_res_id)
utils.copy_and_create_metadata(ori_res, new_res)
# add or update Relation element to link source and target resources
ori_res.metadata.create_element('relation', type=RelationTypes.isReplacedBy, value=new_res.get_citation())
if new_res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).exists():
# the original resource is already a versioned resource, and its isVersionOf relation
# element is copied over to this new version resource, needs to delete this element so
# it can be created to link to its original resource correctly
new_res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).first().delete()
new_res.metadata.create_element('relation', type=RelationTypes.isVersionOf, value=ori_res.get_citation())
if ori_res.resource_type.lower() == "collectionresource":
# clone contained_res list of original collection and add to new collection
# note that new version collection will not contain "deleted resources"
new_res.resources = ori_res.resources.all()
# create bag for the new resource
create_bag(new_res)
# since an isReplaceBy relation element is added to original resource, needs to call
# resource_modified() for original resource
# if everything goes well up to this point, set original resource to be immutable so that
# obsoleted resources cannot be modified from REST API
ori_res.raccess.immutable = True
ori_res.raccess.save()
ori_res.save()
return new_res.get_absolute_url()
except Exception as ex:
if new_res:
new_res.delete()
raise utils.ResourceVersioningException(str(ex))
finally:
# release the lock regardless
ori_res.locked_time = None
ori_res.save()
@shared_task
def replicate_resource_bag_to_user_zone_task(res_id, request_username):
"""
Task for replicating resource bag which will be created on demand if not existent already to iRODS user zone
Args:
res_id: the resource id with its bag to be replicated to iRODS user zone
request_username: the requesting user's username to whose user zone space the bag is copied to
Returns:
None, but exceptions will be raised if there is an issue with iRODS operation
"""
res = utils.get_resource_by_shortkey(res_id)
res_coll = res.root_path
istorage = res.get_irods_storage()
if istorage.exists(res_coll):
bag_modified = res.getAVU('bag_modified')
if bag_modified is None or not bag_modified:
if not istorage.exists(res.bag_path):
create_bag_by_irods(res_id)
else:
create_bag_by_irods(res_id)
# do replication of the resource bag to irods user zone
if not res.resource_federation_path:
istorage.set_fed_zone_session()
src_file = res.bag_path
tgt_file = '/{userzone}/home/{username}/{resid}.zip'.format(
userzone=settings.HS_USER_IRODS_ZONE, username=request_username, resid=res_id)
fsize = istorage.size(src_file)
utils.validate_user_quota(request_username, fsize)
istorage.copyFiles(src_file, tgt_file)
return None
else:
raise ValidationError("Resource {} does not exist in iRODS".format(res.short_id))
@shared_task
def delete_resource_task(resource_id, request_username=None):
"""
Deletes a resource managed by HydroShare. The caller must be an owner of the resource or an
administrator to perform this function.
:param resource_id: The unique HydroShare identifier of the resource to be deleted
:return: resource_id if delete operation succeeds
raise an exception if there were errors.
"""
res = utils.get_resource_by_shortkey(resource_id)
res_title = res.metadata.title
res_type = res.resource_type
resource_related_collections = [col for col in res.collections.all()]
owners_list = [owner for owner in res.raccess.owners.all()]
# when the most recent version of a resource in an obsolescence chain is deleted, the previous
# version in the chain needs to be set as the "active" version by deleting "isReplacedBy"
# relation element
if res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).exists():
is_version_of_res_link = \
res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).first().value
idx = is_version_of_res_link.rindex('/')
if idx == -1:
obsolete_res_id = is_version_of_res_link
else:
obsolete_res_id = is_version_of_res_link[idx + 1:]
obsolete_res = utils.get_resource_by_shortkey(obsolete_res_id)
if obsolete_res.metadata.relations.all().filter(type=RelationTypes.isReplacedBy).exists():
eid = obsolete_res.metadata.relations.all().filter(type=RelationTypes.isReplacedBy).first().id
obsolete_res.metadata.delete_element('relation', eid)
# also make this obsoleted resource editable if not published now that it becomes the latest version
if not obsolete_res.raccess.published:
obsolete_res.raccess.immutable = False
obsolete_res.raccess.save()
for res_in_col in res.resources.all():
# res being deleted is a collection resource - delete isPartOf relation of all resources that are part of the
# collection
if res_in_col.metadata.relations.filter(type='isPartOf', value__endswith=res.short_id).exists():
res_in_col.metadata.relations.filter(type='isPartOf', value__endswith=res.short_id).delete()
set_dirty_bag_flag(res_in_col)
for collection_res in resource_related_collections:
# res being deleted is part of one or more collections - delete hasPart relation for all those collections
collection_res.metadata.relations.filter(type='hasPart', value__endswith=res.short_id).delete()
set_dirty_bag_flag(collection_res)
res.delete()
if request_username:
# if the deleted resource is part of any collection resource, then for each of those collection
# create a CollectionDeletedResource object which can then be used to list collection deleted
# resources on collection resource landing page
for collection_res in resource_related_collections:
o = CollectionDeletedResource.objects.create(
resource_title=res_title,
deleted_by=User.objects.get(username=request_username),
resource_id=resource_id,
resource_type=res_type,
collection=collection_res
)
o.resource_owners.add(*owners_list)
# return the page URL to redirect to after resource deletion task is complete
return '/my-resources/'
@shared_task
def update_web_services(services_url, api_token, timeout, publish_urls, res_id):
"""Update web services hosted by GeoServer and HydroServer.
This function sends a resource id to the HydroShare web services manager
application, which will check the current status of the resource and register
or unregister services hosted by GeoServer and HydroServer.
The HydroShare web services manager will return a list of endpoint URLs
for both the resource and individual aggregations. If publish_urls is set to
True, these endpoints will be added to the extra metadata fields of the
resource and aggregations.
"""
session = requests.Session()
session.headers.update(
{"Authorization": " ".join(("Token", str(api_token)))}
)
rest_url = str(services_url) + "/" + str(res_id) + "/"
try:
response = session.post(rest_url, timeout=timeout)
if publish_urls and response.status_code == status.HTTP_201_CREATED:
try:
resource = utils.get_resource_by_shortkey(res_id)
response_content = json.loads(response.content.decode())
for key, value in response_content["resource"].items():
resource.extra_metadata[key] = value
resource.save()
for url in response_content["content"]:
logical_files = list(resource.logical_files)
lf = logical_files[[i.aggregation_name for i in
logical_files].index(
url["layer_name"].encode()
)]
lf.metadata.extra_metadata["Web Services URL"] = url["message"]
lf.metadata.save()
except Exception as e:
logger.error(e)
return e
return response
except (requests.exceptions.RequestException, ValueError) as e:
logger.error(e)
return e
@shared_task
def resource_debug(resource_id):
"""Update web services hosted by GeoServer and HydroServer.
"""
resource = utils.get_resource_by_shortkey(resource_id)
from hs_core.management.utils import check_irods_files
return check_irods_files(resource, log_errors=False, return_errors=True)
@shared_task
def unzip_task(user_pk, res_id, zip_with_rel_path, bool_remove_original, overwrite=False, auto_aggregate=False,
ingest_metadata=False, unzip_to_folder=False):
from hs_core.views.utils import unzip_file
user = User.objects.get(pk=user_pk)
unzip_file(user, res_id, zip_with_rel_path, bool_remove_original, overwrite, auto_aggregate, ingest_metadata,
unzip_to_folder)
@shared_task
def move_aggregation_task(res_id, file_type_id, file_type, tgt_path):
from hs_core.views.utils import rename_irods_file_or_folder_in_django
res = utils.get_resource_by_shortkey(res_id)
istorage = res.get_irods_storage()
res_files = []
file_type_obj = FILE_TYPE_MAP[file_type]
aggregation = file_type_obj.objects.get(id=file_type_id)
res_files.extend(aggregation.files.all())
orig_aggregation_name = aggregation.aggregation_name
for file in res_files:
tgt_full_path = os.path.join(res.file_path, tgt_path, os.path.basename(file.storage_path))
istorage.moveFile(file.storage_path, tgt_full_path)
rename_irods_file_or_folder_in_django(res, file.storage_path, tgt_full_path)
new_aggregation_name = os.path.join(tgt_path, os.path.basename(orig_aggregation_name))
res.set_flag_to_recreate_aggregation_meta_files(orig_path=orig_aggregation_name,
new_path=new_aggregation_name)
return res.get_absolute_url()
@celery_app.task(ignore_result=True)
def daily_odm2_sync():
"""
ODM2 variables are maintained on an external site this synchronizes data to HydroShare for local caching
"""
ODM2Variable.sync()
@celery_app.task(ignore_result=True)
def monthly_group_membership_requests_cleanup():
"""
Delete expired and redeemed group membership requests
"""
two_months_ago = datetime.today() - timedelta(days=60)
GroupMembershipRequest.objects.filter(my_date__lte=two_months_ago).delete()
@celery_app.task(ignore_result=True)
def daily_innactive_group_requests_cleanup():
"""
Redeem group membership requests for innactive users
"""
GroupMembershipRequest.objects.filter(request_from__is_active=False).update(redeemed=True)
GroupMembershipRequest.objects.filter(invitation_to__is_active=False).update(redeemed=True)
@task_postrun.connect
def update_task_notification(sender=None, task_id=None, task=None, state=None, retval=None, **kwargs):
"""
Updates the state of TaskNotification model when a celery task completes
:param sender:
:param task_id: task id
:param task: task object
:param state: task return state
:param retval: task return value
:param kwargs:
:return:
"""
if task.name in settings.TASK_NAME_LIST:
if state == states.SUCCESS:
get_or_create_task_notification(task_id, status="completed", payload=retval)
elif state in states.EXCEPTION_STATES:
get_or_create_task_notification(task_id, status="failed", payload=retval)
elif state == states.REVOKED:
get_or_create_task_notification(task_id, status="aborted", payload=retval)
else:
logger.warning("Unhandled task state of {} for {}".format(state, task_id))
@celery_app.task(ignore_result=True)
def task_notification_cleanup():
"""
Delete expired task notifications each week
"""
week_ago = datetime.today() - timedelta(days=7)
TaskNotification.objects.filter(created__lte=week_ago).delete()
[#4802] cleanup tasks
"""Define celery tasks for hs_core app."""
import os
import sys
import traceback
import zipfile
import logging
import json
from celery.signals import task_postrun
from datetime import datetime, timedelta, date
from xml.etree import ElementTree
import requests
from celery import shared_task
from celery.schedules import crontab
from django.conf import settings
from django.core.mail import send_mail
from django.contrib.sites.models import Site
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from rest_framework import status
from hs_access_control.models import GroupMembershipRequest
from hs_core.hydroshare import utils, create_empty_resource, set_dirty_bag_flag
from hydroshare.hydrocelery import app as celery_app
from hs_core.hydroshare.hs_bagit import create_bag_metadata_files, create_bag, create_bagit_files_by_irods
from hs_core.hydroshare.resource import get_activated_doi, get_crossref_url, deposit_res_metadata_with_crossref
from hs_core.task_utils import get_or_create_task_notification
from hs_odm2.models import ODM2Variable
from django_irods.storage import IrodsStorage
from theme.models import UserQuota, QuotaMessage, User
from django_irods.icommands import SessionException
from celery.result import states
from hs_core.models import BaseResource, TaskNotification
from hs_core.enums import RelationTypes
from theme.utils import get_quota_message
from hs_collection_resource.models import CollectionDeletedResource
from hs_file_types.models import (
FileSetLogicalFile,
GenericLogicalFile,
GeoFeatureLogicalFile,
GeoRasterLogicalFile,
ModelProgramLogicalFile,
ModelInstanceLogicalFile,
NetCDFLogicalFile,
RefTimeseriesLogicalFile,
TimeSeriesLogicalFile
)
FILE_TYPE_MAP = {"GenericLogicalFile": GenericLogicalFile,
"FileSetLogicalFile": FileSetLogicalFile,
"GeoRasterLogicalFile": GeoRasterLogicalFile,
"NetCDFLogicalFile": NetCDFLogicalFile,
"GeoFeatureLogicalFile": GeoFeatureLogicalFile,
"RefTimeseriesLogicalFile": RefTimeseriesLogicalFile,
"TimeSeriesLogicalFile": TimeSeriesLogicalFile,
"ModelProgramLogicalFile": ModelProgramLogicalFile,
"ModelInstanceLogicalFile": ModelInstanceLogicalFile
}
# Pass 'django' into getLogger instead of __name__
# for celery tasks (as this seems to be the
# only way to successfully log in code executed
# by celery, despite our catch-all handler).
logger = logging.getLogger('django')
class FileOverrideException(Exception):
def __init__(self, error_message):
super(FileOverrideException, self).__init__(self, error_message)
@celery_app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
if (hasattr(settings, 'DISABLE_PERIODIC_TASKS') and settings.DISABLE_PERIODIC_TASKS):
logger.debug("Periodic tasks are disabled in SETTINGS")
else:
sender.add_periodic_task(crontab(minute=30, hour=23), nightly_zips_cleanup.s())
sender.add_periodic_task(crontab(minute=0, hour=0), manage_task_nightly.s())
sender.add_periodic_task(crontab(minute=15, hour=0, day_of_week=1, day_of_month='1-7'),
send_over_quota_emails.s())
sender.add_periodic_task(crontab(minute=00, hour=12), daily_odm2_sync.s())
sender.add_periodic_task(crontab(day_of_month=1), monthly_group_membership_requests_cleanup.s())
sender.add_periodic_task(crontab(minute=30, hour=0), daily_innactive_group_requests_cleanup.s())
sender.add_periodic_task(crontab(day_of_week=1), task_notification_cleanup.s())
# Currently there are two different cleanups scheduled.
# One is 20 minutes after creation, the other is nightly.
# TODO Clean up zipfiles in remote federated storage as well.
@celery_app.task(ignore_result=True)
def nightly_zips_cleanup():
# delete 2 days ago
date_folder = (date.today() - timedelta(2)).strftime('%Y-%m-%d')
zips_daily_date = "zips/{daily_date}".format(daily_date=date_folder)
if __debug__:
logger.debug("cleaning up {}".format(zips_daily_date))
istorage = IrodsStorage()
if istorage.exists(zips_daily_date):
istorage.delete(zips_daily_date)
federated_prefixes = BaseResource.objects.all().values_list('resource_federation_path')\
.distinct()
for p in federated_prefixes:
prefix = p[0] # strip tuple
if prefix != "":
zips_daily_date = "{prefix}/zips/{daily_date}"\
.format(prefix=prefix, daily_date=date_folder)
if __debug__:
logger.debug("cleaning up {}".format(zips_daily_date))
istorage = IrodsStorage("federated")
if istorage.exists(zips_daily_date):
istorage.delete(zips_daily_date)
@celery_app.task(ignore_result=True)
def manage_task_nightly():
# The nightly running task do DOI activation check
# Check DOI activation on failed and pending resources and send email.
msg_lst = []
# retrieve all published resources with failed metadata deposition with CrossRef if any and
# retry metadata deposition
failed_resources = BaseResource.objects.filter(raccess__published=True, doi__contains='failure')
for res in failed_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
response = deposit_res_metadata_with_crossref(res)
if response.status_code == status.HTTP_200_OK:
# retry of metadata deposition succeeds, change resource flag from failure
# to pending
res.doi = act_doi
res.save()
# create bag and compute checksum for published resource to meet DataONE requirement
create_bag_by_irods(res.short_id)
else:
# retry of metadata deposition failed again, notify admin
msg_lst.append("Metadata deposition with CrossRef for the published resource "
"DOI {res_doi} failed again after retry with first metadata "
"deposition requested since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
pending_resources = BaseResource.objects.filter(raccess__published=True,
doi__contains='pending')
for res in pending_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
main_url = get_crossref_url()
req_str = '{MAIN_URL}servlet/submissionDownload?usr={USERNAME}&pwd=' \
'{PASSWORD}&doi_batch_id={DOI_BATCH_ID}&type={TYPE}'
response = requests.get(req_str.format(MAIN_URL=main_url,
USERNAME=settings.CROSSREF_LOGIN_ID,
PASSWORD=settings.CROSSREF_LOGIN_PWD,
DOI_BATCH_ID=res.short_id,
TYPE='result'),
verify=False)
root = ElementTree.fromstring(response.content)
rec_cnt_elem = root.find('.//record_count')
failure_cnt_elem = root.find('.//failure_count')
success = False
if rec_cnt_elem is not None and failure_cnt_elem is not None:
rec_cnt = int(rec_cnt_elem.text)
failure_cnt = int(failure_cnt_elem.text)
if rec_cnt > 0 and failure_cnt == 0:
res.doi = act_doi
res.save()
success = True
# create bag and compute checksum for published resource to meet DataONE requirement
create_bag_by_irods(res.short_id)
if not success:
msg_lst.append("Published resource DOI {res_doi} is not yet activated with request "
"data deposited since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
if msg_lst:
email_msg = '\n'.join(msg_lst)
subject = 'Notification of pending DOI deposition/activation of published resources'
# send email for people monitoring and follow-up as needed
send_mail(subject, email_msg, settings.DEFAULT_FROM_EMAIL, [settings.DEFAULT_SUPPORT_EMAIL])
@celery_app.task(ignore_result=True)
def send_over_quota_emails():
# check over quota cases and send quota warning emails as needed
hs_internal_zone = "hydroshare"
if not QuotaMessage.objects.exists():
QuotaMessage.objects.create()
qmsg = QuotaMessage.objects.first()
users = User.objects.filter(is_active=True).filter(is_superuser=False).all()
for u in users:
uq = UserQuota.objects.filter(user__username=u.username, zone=hs_internal_zone).first()
if uq:
used_percent = uq.used_percent
if used_percent >= qmsg.soft_limit_percent:
if used_percent >= 100 and used_percent < qmsg.hard_limit_percent:
if uq.remaining_grace_period < 0:
# triggers grace period counting
uq.remaining_grace_period = qmsg.grace_period
elif uq.remaining_grace_period > 0:
# reduce remaining_grace_period by one day
uq.remaining_grace_period -= 1
elif used_percent >= qmsg.hard_limit_percent:
# set grace period to 0 when user quota exceeds hard limit
uq.remaining_grace_period = 0
uq.save()
if u.first_name and u.last_name:
sal_name = '{} {}'.format(u.first_name, u.last_name)
elif u.first_name:
sal_name = u.first_name
elif u.last_name:
sal_name = u.last_name
else:
sal_name = u.username
msg_str = 'Dear ' + sal_name + ':\n\n'
ori_qm = get_quota_message(u)
# make embedded settings.DEFAULT_SUPPORT_EMAIL clickable with subject auto-filled
replace_substr = "<a href='mailto:{0}?subject=Request more quota'>{0}</a>".format(
settings.DEFAULT_SUPPORT_EMAIL)
new_qm = ori_qm.replace(settings.DEFAULT_SUPPORT_EMAIL, replace_substr)
msg_str += new_qm
msg_str += '\n\nHydroShare Support'
subject = 'Quota warning'
if settings.DEBUG or "www.hydroshare.org" not in Site.objects.get_current().domain:
logger.info("quota warning email not sent out on debug server but logged instead: "
"{}".format(msg_str))
else:
try:
# send email for people monitoring and follow-up as needed
send_mail(subject, '', settings.DEFAULT_FROM_EMAIL,
[u.email, settings.DEFAULT_SUPPORT_EMAIL],
html_message=msg_str)
except Exception as ex:
logger.debug("Failed to send quota warning email: " + ex.message)
else:
if uq.remaining_grace_period >= 0:
# turn grace period off now that the user is below quota soft limit
uq.remaining_grace_period = -1
uq.save()
else:
logger.debug('user ' + u.username + ' does not have UserQuota foreign key relation')
@shared_task
def add_zip_file_contents_to_resource(pk, zip_file_path):
"""Add zip file to existing resource and remove tmp zip file."""
zfile = None
resource = None
try:
resource = utils.get_resource_by_shortkey(pk, or_404=False)
zfile = zipfile.ZipFile(zip_file_path)
num_files = len(zfile.infolist())
zcontents = utils.ZipContents(zfile)
files = zcontents.get_files()
resource.file_unpack_status = 'Running'
resource.save()
for i, f in enumerate(files):
logger.debug("Adding file {0} to resource {1}".format(f.name, pk))
utils.add_file_to_resource(resource, f)
resource.file_unpack_message = "Imported {0} of about {1} file(s) ...".format(
i, num_files)
resource.save()
# This might make the resource unsuitable for public consumption
resource.update_public_and_discoverable()
# TODO: this is a bit of a lie because a different user requested the bag overwrite
utils.resource_modified(resource, resource.creator, overwrite_bag=False)
# Call success callback
resource.file_unpack_message = None
resource.file_unpack_status = 'Done'
resource.save()
except BaseResource.DoesNotExist:
msg = "Unable to add zip file contents to non-existent resource {pk}."
msg = msg.format(pk=pk)
logger.error(msg)
except:
exc_info = "".join(traceback.format_exception(*sys.exc_info()))
if resource:
resource.file_unpack_status = 'Error'
resource.file_unpack_message = exc_info
resource.save()
if zfile:
zfile.close()
logger.error(exc_info)
finally:
# Delete upload file
os.unlink(zip_file_path)
@shared_task
def delete_zip(zip_path):
istorage = IrodsStorage()
if istorage.exists(zip_path):
istorage.delete(zip_path)
@shared_task
def create_temp_zip(resource_id, input_path, output_path, aggregation_name=None, sf_zip=False, download_path='',
request_username=None):
""" Create temporary zip file from input_path and store in output_path
:param resource_id: the short_id of a resource
:param input_path: full irods path of input starting with federation path
:param output_path: full irods path of output starting with federation path
:param aggregation_name: The name of the aggregation to zip
:param sf_zip: signals a single file to zip
:param download_path: download path to return as task payload
:param request_username: the username of the requesting user
"""
from hs_core.hydroshare.utils import get_resource_by_shortkey
res = get_resource_by_shortkey(resource_id)
aggregation = None
if aggregation_name:
aggregation = res.get_aggregation_by_aggregation_name(aggregation_name)
istorage = res.get_irods_storage() # invoke federated storage as necessary
if res.resource_type == "CompositeResource":
if '/data/contents/' in input_path:
short_path = input_path.split('/data/contents/')[1] # strip /data/contents/
res.create_aggregation_meta_files(path=short_path)
else: # all metadata included, e.g., /data/*
res.create_aggregation_meta_files()
if aggregation or sf_zip:
# input path points to single file aggregation
# ensure that foo.zip contains aggregation metadata
# by copying these into a temp subdirectory foo/foo parallel to where foo.zip is stored
temp_folder_name, ext = os.path.splitext(output_path) # strip zip to get scratch dir
head, tail = os.path.split(temp_folder_name) # tail is unqualified folder name "foo"
out_with_folder = os.path.join(temp_folder_name, tail) # foo/foo is subdir to zip
# in the case of user provided zip file name, out_with_folder path may not end with
# aggregation file name
aggr_filename = os.path.basename(input_path)
if not out_with_folder.endswith(aggr_filename):
out_with_folder = os.path.join(os.path.dirname(out_with_folder), aggr_filename)
istorage.copyFiles(input_path, out_with_folder)
if not aggregation:
if '/data/contents/' in input_path:
short_path = input_path.split('/data/contents/')[1] # strip /data/contents/
else:
short_path = input_path
try:
aggregation = res.get_aggregation_by_name(short_path)
except ObjectDoesNotExist:
pass
if aggregation:
try:
istorage.copyFiles(aggregation.map_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.map_file_path))
try:
istorage.copyFiles(aggregation.metadata_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.metadata_file_path))
if aggregation.is_model_program or aggregation.is_model_instance:
try:
istorage.copyFiles(aggregation.schema_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.schema_file_path))
if aggregation.is_model_instance:
try:
istorage.copyFiles(aggregation.schema_values_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.schema_values_file_path))
for file in aggregation.files.all():
try:
istorage.copyFiles(file.storage_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(file.storage_path))
istorage.zipup(temp_folder_name, output_path)
istorage.delete(temp_folder_name) # delete working directory; this isn't the zipfile
else: # regular folder to zip
istorage.zipup(input_path, output_path)
return download_path
@shared_task
def create_bag_by_irods(resource_id, create_zip=True):
"""Create a resource bag on iRODS side by running the bagit rule and ibun zip.
This function runs as a celery task, invoked asynchronously so that it does not
block the main web thread when it creates bags for very large files which will take some time.
:param
resource_id: the resource uuid that is used to look for the resource to create the bag for.
:param create_zip: defaults to True, set to false to create bagit files without zipping
:return: bag_url if bag creation operation succeeds or
raise an exception if resource does not exist or any other issues that prevent bags from being created.
"""
res = utils.get_resource_by_shortkey(resource_id)
istorage = res.get_irods_storage()
bag_path = res.bag_path
metadata_dirty = res.getAVU('metadata_dirty')
metadata_dirty = metadata_dirty is None or metadata_dirty
# if metadata has been changed, then regenerate metadata xml files
if metadata_dirty:
create_bag_metadata_files(res)
bag_modified = res.getAVU("bag_modified")
bag_modified = bag_modified is None or bag_modified
if metadata_dirty or bag_modified:
create_bagit_files_by_irods(res, istorage)
res.setAVU("bag_modified", False)
if create_zip:
irods_bagit_input_path = res.get_irods_path(resource_id, prepend_short_id=False)
# only proceed when the resource is not deleted potentially by another request
# when being downloaded
is_exist = istorage.exists(irods_bagit_input_path)
if is_exist:
try:
if istorage.exists(bag_path):
istorage.delete(bag_path)
istorage.zipup(irods_bagit_input_path, bag_path)
if res.raccess.published:
# compute checksum to meet DataONE distribution requirement
chksum = istorage.checksum(bag_path)
res.bag_checksum = chksum
return res.bag_url
except SessionException as ex:
raise SessionException(-1, '', ex.stderr)
else:
raise ObjectDoesNotExist('Resource {} does not exist.'.format(resource_id))
@shared_task
def copy_resource_task(ori_res_id, new_res_id=None, request_username=None):
try:
new_res = None
if not new_res_id:
new_res = create_empty_resource(ori_res_id, request_username, action='copy')
new_res_id = new_res.short_id
utils.copy_resource_files_and_AVUs(ori_res_id, new_res_id)
ori_res = utils.get_resource_by_shortkey(ori_res_id)
if not new_res:
new_res = utils.get_resource_by_shortkey(new_res_id)
utils.copy_and_create_metadata(ori_res, new_res)
if new_res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).exists():
# the resource to be copied is a versioned resource, need to delete this isVersionOf
# relation element to maintain the single versioning obsolescence chain
new_res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).first().delete()
# create the relation element for the new_res
today = date.today().strftime("%m/%d/%Y")
derived_from = "{}, accessed on: {}".format(ori_res.get_citation(), today)
# since we are allowing user to add relation of type source, need to check we don't already have it
if not new_res.metadata.relations.all().filter(type=RelationTypes.source, value=derived_from).exists():
new_res.metadata.create_element('relation', type=RelationTypes.source, value=derived_from)
if ori_res.resource_type.lower() == "collectionresource":
# clone contained_res list of original collection and add to new collection
# note that new collection will not contain "deleted resources"
new_res.resources = ori_res.resources.all()
# create bag for the new resource
create_bag(new_res)
return new_res.get_absolute_url()
except Exception as ex:
if new_res:
new_res.delete()
raise utils.ResourceCopyException(str(ex))
@shared_task
def create_new_version_resource_task(ori_res_id, username, new_res_id=None):
"""
Task for creating a new version of a resource
Args:
ori_res_id: the original resource id that is to be versioned.
new_res_id: the new versioned resource id from the original resource. If None, a
new resource will be created.
username: the requesting user's username
Returns:
the new versioned resource url as the payload
"""
try:
new_res = None
if not new_res_id:
new_res = create_empty_resource(ori_res_id, username)
new_res_id = new_res.short_id
utils.copy_resource_files_and_AVUs(ori_res_id, new_res_id)
# copy metadata from source resource to target new-versioned resource except three elements
ori_res = utils.get_resource_by_shortkey(ori_res_id)
if not new_res:
new_res = utils.get_resource_by_shortkey(new_res_id)
utils.copy_and_create_metadata(ori_res, new_res)
# add or update Relation element to link source and target resources
ori_res.metadata.create_element('relation', type=RelationTypes.isReplacedBy, value=new_res.get_citation())
if new_res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).exists():
# the original resource is already a versioned resource, and its isVersionOf relation
# element is copied over to this new version resource, needs to delete this element so
# it can be created to link to its original resource correctly
new_res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).first().delete()
new_res.metadata.create_element('relation', type=RelationTypes.isVersionOf, value=ori_res.get_citation())
if ori_res.resource_type.lower() == "collectionresource":
# clone contained_res list of original collection and add to new collection
# note that new version collection will not contain "deleted resources"
new_res.resources = ori_res.resources.all()
# create bag for the new resource
create_bag(new_res)
# since an isReplaceBy relation element is added to original resource, needs to call
# resource_modified() for original resource
# if everything goes well up to this point, set original resource to be immutable so that
# obsoleted resources cannot be modified from REST API
ori_res.raccess.immutable = True
ori_res.raccess.save()
ori_res.save()
return new_res.get_absolute_url()
except Exception as ex:
if new_res:
new_res.delete()
raise utils.ResourceVersioningException(str(ex))
finally:
# release the lock regardless
ori_res.locked_time = None
ori_res.save()
@shared_task
def replicate_resource_bag_to_user_zone_task(res_id, request_username):
"""
Task for replicating resource bag which will be created on demand if not existent already to iRODS user zone
Args:
res_id: the resource id with its bag to be replicated to iRODS user zone
request_username: the requesting user's username to whose user zone space the bag is copied to
Returns:
None, but exceptions will be raised if there is an issue with iRODS operation
"""
res = utils.get_resource_by_shortkey(res_id)
res_coll = res.root_path
istorage = res.get_irods_storage()
if istorage.exists(res_coll):
bag_modified = res.getAVU('bag_modified')
if bag_modified is None or not bag_modified:
if not istorage.exists(res.bag_path):
create_bag_by_irods(res_id)
else:
create_bag_by_irods(res_id)
# do replication of the resource bag to irods user zone
if not res.resource_federation_path:
istorage.set_fed_zone_session()
src_file = res.bag_path
tgt_file = '/{userzone}/home/{username}/{resid}.zip'.format(
userzone=settings.HS_USER_IRODS_ZONE, username=request_username, resid=res_id)
fsize = istorage.size(src_file)
utils.validate_user_quota(request_username, fsize)
istorage.copyFiles(src_file, tgt_file)
return None
else:
raise ValidationError("Resource {} does not exist in iRODS".format(res.short_id))
@shared_task
def delete_resource_task(resource_id, request_username=None):
"""
Deletes a resource managed by HydroShare. The caller must be an owner of the resource or an
administrator to perform this function.
:param resource_id: The unique HydroShare identifier of the resource to be deleted
:return: resource_id if delete operation succeeds
raise an exception if there were errors.
"""
res = utils.get_resource_by_shortkey(resource_id)
res_title = res.metadata.title
res_type = res.resource_type
resource_related_collections = [col for col in res.collections.all()]
owners_list = [owner for owner in res.raccess.owners.all()]
# when the most recent version of a resource in an obsolescence chain is deleted, the previous
# version in the chain needs to be set as the "active" version by deleting "isReplacedBy"
# relation element
if res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).exists():
is_version_of_res_link = \
res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).first().value
idx = is_version_of_res_link.rindex('/')
if idx == -1:
obsolete_res_id = is_version_of_res_link
else:
obsolete_res_id = is_version_of_res_link[idx + 1:]
obsolete_res = utils.get_resource_by_shortkey(obsolete_res_id)
if obsolete_res.metadata.relations.all().filter(type=RelationTypes.isReplacedBy).exists():
eid = obsolete_res.metadata.relations.all().filter(type=RelationTypes.isReplacedBy).first().id
obsolete_res.metadata.delete_element('relation', eid)
# also make this obsoleted resource editable if not published now that it becomes the latest version
if not obsolete_res.raccess.published:
obsolete_res.raccess.immutable = False
obsolete_res.raccess.save()
for res_in_col in res.resources.all():
# res being deleted is a collection resource - delete isPartOf relation of all resources that are part of the
# collection
if res_in_col.metadata.relations.filter(type='isPartOf', value__endswith=res.short_id).exists():
res_in_col.metadata.relations.filter(type='isPartOf', value__endswith=res.short_id).delete()
set_dirty_bag_flag(res_in_col)
for collection_res in resource_related_collections:
# res being deleted is part of one or more collections - delete hasPart relation for all those collections
collection_res.metadata.relations.filter(type='hasPart', value__endswith=res.short_id).delete()
set_dirty_bag_flag(collection_res)
res.delete()
if request_username:
# if the deleted resource is part of any collection resource, then for each of those collection
# create a CollectionDeletedResource object which can then be used to list collection deleted
# resources on collection resource landing page
for collection_res in resource_related_collections:
o = CollectionDeletedResource.objects.create(
resource_title=res_title,
deleted_by=User.objects.get(username=request_username),
resource_id=resource_id,
resource_type=res_type,
collection=collection_res
)
o.resource_owners.add(*owners_list)
# return the page URL to redirect to after resource deletion task is complete
return '/my-resources/'
@shared_task
def update_web_services(services_url, api_token, timeout, publish_urls, res_id):
"""Update web services hosted by GeoServer and HydroServer.
This function sends a resource id to the HydroShare web services manager
application, which will check the current status of the resource and register
or unregister services hosted by GeoServer and HydroServer.
The HydroShare web services manager will return a list of endpoint URLs
for both the resource and individual aggregations. If publish_urls is set to
True, these endpoints will be added to the extra metadata fields of the
resource and aggregations.
"""
session = requests.Session()
session.headers.update(
{"Authorization": " ".join(("Token", str(api_token)))}
)
rest_url = str(services_url) + "/" + str(res_id) + "/"
try:
response = session.post(rest_url, timeout=timeout)
if publish_urls and response.status_code == status.HTTP_201_CREATED:
try:
resource = utils.get_resource_by_shortkey(res_id)
response_content = json.loads(response.content.decode())
for key, value in response_content["resource"].items():
resource.extra_metadata[key] = value
resource.save()
for url in response_content["content"]:
logical_files = list(resource.logical_files)
lf = logical_files[[i.aggregation_name for i in
logical_files].index(
url["layer_name"].encode()
)]
lf.metadata.extra_metadata["Web Services URL"] = url["message"]
lf.metadata.save()
except Exception as e:
logger.error(e)
return e
return response
except (requests.exceptions.RequestException, ValueError) as e:
logger.error(e)
return e
@shared_task
def resource_debug(resource_id):
"""Update web services hosted by GeoServer and HydroServer.
"""
resource = utils.get_resource_by_shortkey(resource_id)
from hs_core.management.utils import check_irods_files
return check_irods_files(resource, log_errors=False, return_errors=True)
@shared_task
def unzip_task(user_pk, res_id, zip_with_rel_path, bool_remove_original, overwrite=False, auto_aggregate=False,
ingest_metadata=False, unzip_to_folder=False):
from hs_core.views.utils import unzip_file
user = User.objects.get(pk=user_pk)
unzip_file(user, res_id, zip_with_rel_path, bool_remove_original, overwrite, auto_aggregate, ingest_metadata,
unzip_to_folder)
@shared_task
def move_aggregation_task(res_id, file_type_id, file_type, tgt_path):
from hs_core.views.utils import rename_irods_file_or_folder_in_django
res = utils.get_resource_by_shortkey(res_id)
istorage = res.get_irods_storage()
res_files = []
file_type_obj = FILE_TYPE_MAP[file_type]
aggregation = file_type_obj.objects.get(id=file_type_id)
res_files.extend(aggregation.files.all())
orig_aggregation_name = aggregation.aggregation_name
for file in res_files:
tgt_full_path = os.path.join(res.file_path, tgt_path, os.path.basename(file.storage_path))
istorage.moveFile(file.storage_path, tgt_full_path)
rename_irods_file_or_folder_in_django(res, file.storage_path, tgt_full_path)
new_aggregation_name = os.path.join(tgt_path, os.path.basename(orig_aggregation_name))
res.set_flag_to_recreate_aggregation_meta_files(orig_path=orig_aggregation_name,
new_path=new_aggregation_name)
return res.get_absolute_url()
@celery_app.task(ignore_result=True)
def daily_odm2_sync():
"""
ODM2 variables are maintained on an external site this synchronizes data to HydroShare for local caching
"""
ODM2Variable.sync()
@celery_app.task(ignore_result=True)
def monthly_group_membership_requests_cleanup():
"""
Delete expired and redeemed group membership requests
"""
two_months_ago = datetime.today() - timedelta(days=60)
GroupMembershipRequest.objects.filter(my_date__lte=two_months_ago).delete()
@celery_app.task(ignore_result=True)
def daily_innactive_group_requests_cleanup():
"""
Redeem group membership requests for innactive users
"""
GroupMembershipRequest.objects.filter(request_from__is_active=False).update(redeemed=True)
GroupMembershipRequest.objects.filter(invitation_to__is_active=False).update(redeemed=True)
@task_postrun.connect
def update_task_notification(sender=None, task_id=None, task=None, state=None, retval=None, **kwargs):
"""
Updates the state of TaskNotification model when a celery task completes
:param sender:
:param task_id: task id
:param task: task object
:param state: task return state
:param retval: task return value
:param kwargs:
:return:
"""
if task.name in settings.TASK_NAME_LIST:
if state == states.SUCCESS:
get_or_create_task_notification(task_id, status="completed", payload=retval)
elif state in states.EXCEPTION_STATES:
get_or_create_task_notification(task_id, status="failed", payload=retval)
elif state == states.REVOKED:
get_or_create_task_notification(task_id, status="aborted", payload=retval)
else:
logger.warning("Unhandled task state of {} for {}".format(state, task_id))
@celery_app.task(ignore_result=True)
def task_notification_cleanup():
"""
Delete expired task notifications each week
"""
week_ago = datetime.today() - timedelta(days=7)
TaskNotification.objects.filter(created__lte=week_ago).delete()
|
tests remove metadata clearing from test_runner
Signed-off-by: Alfredo Deza <3f4d00ffa77e6441ab2d23c25a618925d2383b02@redhat.com>
|
import uuid
import time
import threading
import importlib
import sys
import socket
import common
import common.rpc as rpc
import common.resource_status as resource_status
class Base_Resource(object):
type = "Generic"
def __init__(self, resID, interface, **kwargs):
self.config = kwargs.get('config')
self.logger = kwargs.get('logger')
self.__uuid = str(uuid.uuid4())
self.__resID = resID
self.__interface = interface
self.__groupTag = kwargs.get('groupTag', '')
self.__status = 'INIT'
self.__lock = None
self.driver = None
if kwargs.get('enableRpc', True):
self.start()
def start(self):
"""
Start the RPC Server
"""
self.rpc_server = rpc.RpcServer(name='RPC-%s' % (self.__resID),
logger=self.logger)
self.rpc_server.registerObject(self)
def stop(self):
"""
Stop the RPC Server
"""
if hasattr(self, 'rpc_server'):
self.rpc_server.rpc_stop()
def __del__(self):
self.stop()
def getUUID(self):
return self.__uuid
def getResourceID(self):
return self.__resID
def getGroupTag(self):
return self.__groupTag
def getResourceType(self):
return self.type
def getResourceStatus(self):
return self.__status
def setResourceStatus(self, new_status):
self.__status = new_status
if hasattr(self, 'rpc_server'):
self.rpc_server.notifyClients('event_status_change')
def getResourceError(self):
return self.__error
def setResourceError(self, error):
self.__error = error
if hasattr(self, 'rpc_server'):
self.rpc_server.notifyClients('event_resource_error')
def getInterfaceName(self):
"""
Returns the Resource's Controller class name
:returns: str
"""
return self.__interface.getInterfaceName()
def getPort(self):
# Start the RPC server if it isn't already started
if self.rpc_server.rpc_isRunning():
return self.rpc_server.rpc_getPort()
def getProperties(self):
driver_prop = {}
# Append Driver properties if a Driver is loaded
if self.driver is not None:
driver_prop = self.driver.getProperties()
driver_prop['driver'] = self.driver.getDriverName()
res_prop = {
'uuid': self.getUUID(),
'interface': self.getInterfaceName(),
'resourceID': self.getResourceID(),
'resourceType': self.getResourceType(),
'groupTag': self.getGroupTag(),
'status': self.getResourceStatus()
}
if self.rpc_server.rpc_isRunning():
res_prop['address'] = socket.gethostbyname(socket.gethostname())
res_prop['hostname'] = socket.gethostname()
res_prop['port'] = self.rpc_server.rpc_getPort()
driver_prop.update(res_prop)
return driver_prop
#===========================================================================
# Resource State
#===========================================================================
def isOpen(self):
raise NotImplementedError
def open(self):
"""
Open the resource
:returns: True if open was successful, False otherwise
"""
raise NotImplementedError
def close(self):
"""
Close the resource
:returns: True if close was successful, False otherwise
"""
raise NotImplementedError
def checkResourceStatus(self):
"""
Raise an error if the resource is not ready. Used in resources that
sub-class Base_Resource to prevent attempted data transfer when the
resource is in a bad state.
:raises: common.resource_status.ResourceNotReady()
"""
if self.getResourceStatus() != resource_status.READY:
raise resource_status.ResourceNotReady()
def refresh(self):
"""
Refresh the resource
"""
raise NotImplementedError
#===========================================================================
# Data Transmission
#===========================================================================
def write(self, data):
raise NotImplementedError
def read(self):
raise NotImplementedError
def query(self, data):
raise NotImplementedError
#===========================================================================
# Resource Locking
#===========================================================================
def lock(self):
"""
Lock the resource for exclusive access to the IP Address of the active
RPC connection
:returns: True if successful, False otherwise
"""
conn = self.rpc_server.getActiveConnection()
if self.__lock == None:
try:
address, _ = self.conn.getsockname()
self.__lock = address
self.logger.debug("Connection [%s] aquired resource lock", address)
return True
except:
return False
else:
return False
def unlock(self):
"""
Unlock the resource for general access. Must be called from the IP
Address of the connection currently holding the lock.
:returns: True if successful, False otherwise
"""
conn = self.rpc_server.getActiveConnection()
try:
address, _ = self.conn.getsockname()
if self.__lock == address:
self.__lock = None
self.logger.debug("Connection [%s] released resource lock", address)
return True
else:
return False
except:
return False
def force_unlock(self):
"""
Force unlocks the resource even if called from an IP Address that does
not hold the lock
:returns: None
"""
conn = self.rpc_server.getActiveConnection()
self.__lock = None
try:
address, _ = self.conn.getsockname()
self.logger.debug("Connection [%s] force released resource lock", address)
except:
pass
def getLockAddress(self):
"""
Get the IP Address of the connection currently holding the resource
lock.
:returns: str
"""
return self.__lock
def hasLock(self):
"""
Query if the current connection holds the resource lock.
:returns: bool
"""
conn = self.rpc_server.getActiveConnection()
try:
address, _ = self.conn.getsockname()
return (address == self.__lock)
except:
return False
#===========================================================================
# Driver
#===========================================================================
def hasDriver(self):
return self.driver is not None
def loadDriver(self, driverName):
"""
Load a Driver for a resource. A driver name can be specified to load a
specific module, even if it may not be compatible with this resource.
Reloads driver when importing, in case an update has occured.
Example::
instr.loadDriver('Tektronix.Oscilloscope.m_DigitalPhosphor')
:param driverName: Module name of the desired Model
:type driverName: str
:returns: True if successful, False otherwise
"""
try:
# Check if the specified model is valid
testModule = importlib.import_module(driverName)
reload(testModule) # Reload the module in case anything has changed
className = driverName.split('.')[-1]
testClass = getattr(testModule, className)
self.driver = testClass(self, logger=self.logger, config=self.config)
self.driver._onLoad()
# RPC register object
self.rpc_server.registerObject(self.driver)
self.rpc_server.notifyClients('event_driver_loaded')
return True
except:
self.logger.exception('Failed to load driver: %s', driverName)
self.unloadDriver()
return False
def unloadDriver(self):
"""
If a Driver is currently loaded for the resource, unload it.
:returns: True if successful, False otherwise
"""
if self.driver is not None:
try:
self.driver._onUnload()
except:
self.logger.exception('Exception while unloading driver')
# RPC unregister object
self.rpc_server.unregisterObject(self.driver)
self.rpc_server.notifyClients('event_driver_unloaded')
del self.driver
self.driver = None
try:
self.close()
except:
pass
return True
else:
return False
class ResourceNotOpen(RuntimeError):
pass
Bug fixes to base resource. Unchecked calls to rpc_server
import uuid
import time
import threading
import importlib
import sys
import socket
import common
import common.rpc as rpc
import common.resource_status as resource_status
class Base_Resource(object):
type = "Generic"
def __init__(self, resID, interface, **kwargs):
self.config = kwargs.get('config')
self.logger = kwargs.get('logger')
self.__uuid = str(uuid.uuid4())
self.__resID = resID
self.__interface = interface
self.__groupTag = kwargs.get('groupTag', '')
self.__status = 'INIT'
self.__lock = None
self.driver = None
if kwargs.get('enableRpc', True):
self.start()
def start(self):
"""
Start the RPC Server
"""
self.rpc_server = rpc.RpcServer(name='RPC-%s' % (self.__resID),
logger=self.logger)
self.rpc_server.registerObject(self)
def stop(self):
"""
Stop the RPC Server
"""
if hasattr(self, 'rpc_server'):
self.rpc_server.rpc_stop()
def __del__(self):
self.stop()
def getUUID(self):
return self.__uuid
def getResourceID(self):
return self.__resID
def getGroupTag(self):
return self.__groupTag
def getResourceType(self):
return self.type
def getResourceStatus(self):
return self.__status
def setResourceStatus(self, new_status):
self.__status = new_status
if hasattr(self, 'rpc_server'):
self.rpc_server.notifyClients('event_status_change')
def getResourceError(self):
return self.__error
def setResourceError(self, error):
self.__error = error
if hasattr(self, 'rpc_server'):
self.rpc_server.notifyClients('event_resource_error')
def getInterfaceName(self):
"""
Returns the Resource's Controller class name
:returns: str
"""
return self.__interface.getInterfaceName()
def getPort(self):
# Start the RPC server if it isn't already started
if hasattr(self, 'rpc_server') and self.rpc_server.rpc_isRunning():
return self.rpc_server.rpc_getPort()
def getProperties(self):
driver_prop = {}
# Append Driver properties if a Driver is loaded
if self.driver is not None:
driver_prop = self.driver.getProperties()
driver_prop['driver'] = self.driver.getDriverName()
res_prop = {
'uuid': self.getUUID(),
'interface': self.getInterfaceName(),
'resourceID': self.getResourceID(),
'resourceType': self.getResourceType(),
'groupTag': self.getGroupTag(),
'status': self.getResourceStatus()
}
if hasattr(self, 'rpc_server') and self.rpc_server.rpc_isRunning():
res_prop['address'] = socket.gethostbyname(socket.gethostname())
res_prop['hostname'] = socket.gethostname()
res_prop['port'] = self.rpc_server.rpc_getPort()
driver_prop.update(res_prop)
return driver_prop
#===========================================================================
# Resource State
#===========================================================================
def isOpen(self):
raise NotImplementedError
def open(self):
"""
Open the resource
:returns: True if open was successful, False otherwise
"""
raise NotImplementedError
def close(self):
"""
Close the resource
:returns: True if close was successful, False otherwise
"""
raise NotImplementedError
def checkResourceStatus(self):
"""
Raise an error if the resource is not ready. Used in resources that
sub-class Base_Resource to prevent attempted data transfer when the
resource is in a bad state.
:raises: common.resource_status.ResourceNotReady()
"""
if self.getResourceStatus() != resource_status.READY:
raise resource_status.ResourceNotReady()
def refresh(self):
"""
Refresh the resource
"""
raise NotImplementedError
#===========================================================================
# Data Transmission
#===========================================================================
def write(self, data):
raise NotImplementedError
def read(self):
raise NotImplementedError
def query(self, data):
raise NotImplementedError
#===========================================================================
# Driver
#===========================================================================
def hasDriver(self):
return self.driver is not None
def loadDriver(self, driverName):
"""
Load a Driver for a resource. A driver name can be specified to load a
specific module, even if it may not be compatible with this resource.
Reloads driver when importing, in case an update has occured.
Example::
instr.loadDriver('Tektronix.Oscilloscope.m_DigitalPhosphor')
:param driverName: Module name of the desired Model
:type driverName: str
:returns: True if successful, False otherwise
"""
try:
# Check if the specified model is valid
testModule = importlib.import_module(driverName)
reload(testModule) # Reload the module in case anything has changed
className = driverName.split('.')[-1]
testClass = getattr(testModule, className)
self.driver = testClass(self, logger=self.logger, config=self.config)
self.driver._onLoad()
# RPC register object
if hasattr(self, 'rpc_server'):
self.rpc_server.registerObject(self.driver)
self.rpc_server.notifyClients('event_driver_loaded')
return True
except:
self.logger.exception('Failed to load driver: %s', driverName)
self.unloadDriver()
return False
def unloadDriver(self):
"""
If a Driver is currently loaded for the resource, unload it.
:returns: True if successful, False otherwise
"""
if self.driver is not None:
try:
self.driver._onUnload()
except:
self.logger.exception('Exception while unloading driver')
# RPC unregister object
if hasattr(self, 'rpc_server'):
self.rpc_server.unregisterObject(self.driver)
self.rpc_server.notifyClients('event_driver_unloaded')
del self.driver
self.driver = None
try:
self.close()
except:
pass
return True
else:
return False
class ResourceNotOpen(RuntimeError):
pass
|
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
from boto import handler
from boto.resultset import ResultSet
from boto.exception import BotoClientError
from boto.s3.acl import Policy, CannedACLStrings, Grant
from boto.s3.key import Key
from boto.s3.prefix import Prefix
from boto.s3.deletemarker import DeleteMarker
from boto.s3.multipart import MultiPartUpload
from boto.s3.multipart import CompleteMultiPartUpload
from boto.s3.multidelete import MultiDeleteResult
from boto.s3.multidelete import Error
from boto.s3.bucketlistresultset import BucketListResultSet
from boto.s3.bucketlistresultset import VersionedBucketListResultSet
from boto.s3.bucketlistresultset import MultiPartUploadListResultSet
from boto.s3.lifecycle import Lifecycle
from boto.s3.tagging import Tags
from boto.s3.cors import CORSConfiguration
from boto.s3.bucketlogging import BucketLogging
from boto.s3 import website
import boto.jsonresponse
import boto.utils
import xml.sax
import xml.sax.saxutils
import re
import base64
from collections import defaultdict
from boto.compat import BytesIO, six, StringIO, urllib
# as per http://goo.gl/BDuud (02/19/2011)
class S3WebsiteEndpointTranslate(object):
trans_region = defaultdict(lambda: 's3-website-us-east-1')
trans_region['eu-west-1'] = 's3-website-eu-west-1'
trans_region['us-west-1'] = 's3-website-us-west-1'
trans_region['us-west-2'] = 's3-website-us-west-2'
trans_region['sa-east-1'] = 's3-website-sa-east-1'
trans_region['ap-northeast-1'] = 's3-website-ap-northeast-1'
trans_region['ap-southeast-1'] = 's3-website-ap-southeast-1'
trans_region['ap-southeast-2'] = 's3-website-ap-southeast-2'
trans_region['cn-north-1'] = 's3-website.cn-north-1'
@classmethod
def translate_region(self, reg):
return self.trans_region[reg]
S3Permissions = ['READ', 'WRITE', 'READ_ACP', 'WRITE_ACP', 'FULL_CONTROL']
class Bucket(object):
LoggingGroup = 'http://acs.amazonaws.com/groups/s3/LogDelivery'
BucketPaymentBody = """<?xml version="1.0" encoding="UTF-8"?>
<RequestPaymentConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Payer>%s</Payer>
</RequestPaymentConfiguration>"""
VersioningBody = """<?xml version="1.0" encoding="UTF-8"?>
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>%s</Status>
<MfaDelete>%s</MfaDelete>
</VersioningConfiguration>"""
VersionRE = '<Status>([A-Za-z]+)</Status>'
MFADeleteRE = '<MfaDelete>([A-Za-z]+)</MfaDelete>'
def __init__(self, connection=None, name=None, key_class=Key):
self.name = name
self.connection = connection
self.key_class = key_class
def __repr__(self):
return '<Bucket: %s>' % self.name
def __iter__(self):
return iter(BucketListResultSet(self))
def __contains__(self, key_name):
return not (self.get_key(key_name) is None)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Name':
self.name = value
elif name == 'CreationDate':
self.creation_date = value
else:
setattr(self, name, value)
def set_key_class(self, key_class):
"""
Set the Key class associated with this bucket. By default, this
would be the boto.s3.key.Key class but if you want to subclass that
for some reason this allows you to associate your new class with a
bucket so that when you call bucket.new_key() or when you get a listing
of keys in the bucket you will get an instances of your key class
rather than the default.
:type key_class: class
:param key_class: A subclass of Key that can be more specific
"""
self.key_class = key_class
def lookup(self, key_name, headers=None):
"""
Deprecated: Please use get_key method.
:type key_name: string
:param key_name: The name of the key to retrieve
:rtype: :class:`boto.s3.key.Key`
:returns: A Key object from this bucket.
"""
return self.get_key(key_name, headers=headers)
def get_key(self, key_name, headers=None, version_id=None,
response_headers=None, validate=True):
"""
Check to see if a particular key exists within the bucket. This
method uses a HEAD request to check for the existence of the key.
Returns: An instance of a Key object or None
:param key_name: The name of the key to retrieve
:type key_name: string
:param headers: The headers to send when retrieving the key
:type headers: dict
:param version_id:
:type version_id: string
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type response_headers: dict
:param validate: Verifies whether the key exists. If ``False``, this
will not hit the service, constructing an in-memory object.
Default is ``True``.
:type validate: bool
:rtype: :class:`boto.s3.key.Key`
:returns: A Key object from this bucket.
"""
if validate is False:
if headers or version_id or response_headers:
raise BotoClientError(
"When providing 'validate=False', no other params " + \
"are allowed."
)
# This leans on the default behavior of ``new_key`` (not hitting
# the service). If that changes, that behavior should migrate here.
return self.new_key(key_name)
query_args_l = []
if version_id:
query_args_l.append('versionId=%s' % version_id)
if response_headers:
for rk, rv in six.iteritems(response_headers):
query_args_l.append('%s=%s' % (rk, urllib.parse.quote(rv)))
key, resp = self._get_key_internal(key_name, headers, query_args_l)
return key
def _get_key_internal(self, key_name, headers, query_args_l):
query_args = '&'.join(query_args_l) or None
response = self.connection.make_request('HEAD', self.name, key_name,
headers=headers,
query_args=query_args)
response.read()
# Allow any success status (2xx) - for example this lets us
# support Range gets, which return status 206:
if response.status / 100 == 2:
k = self.key_class(self)
provider = self.connection.provider
k.metadata = boto.utils.get_aws_metadata(response.msg, provider)
for field in Key.base_fields:
k.__dict__[field.lower().replace('-', '_')] = \
response.getheader(field)
# the following machinations are a workaround to the fact that
# apache/fastcgi omits the content-length header on HEAD
# requests when the content-length is zero.
# See http://goo.gl/0Tdax for more details.
clen = response.getheader('content-length')
if clen:
k.size = int(response.getheader('content-length'))
else:
k.size = 0
k.name = key_name
k.handle_version_headers(response)
k.handle_encryption_headers(response)
k.handle_restore_headers(response)
k.handle_addl_headers(response.getheaders())
return k, response
else:
if response.status == 404:
return None, response
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, '')
def list(self, prefix='', delimiter='', marker='', headers=None,
encoding_type=None):
"""
List key objects within a bucket. This returns an instance of an
BucketListResultSet that automatically handles all of the result
paging, etc. from S3. You just need to keep iterating until
there are no more results.
Called with no arguments, this will return an iterator object across
all keys within the bucket.
The Key objects returned by the iterator are obtained by parsing
the results of a GET on the bucket, also known as the List Objects
request. The XML returned by this request contains only a subset
of the information about each key. Certain metadata fields such
as Content-Type and user metadata are not available in the XML.
Therefore, if you want these additional metadata fields you will
have to do a HEAD request on the Key in the bucket.
:type prefix: string
:param prefix: allows you to limit the listing to a particular
prefix. For example, if you call the method with
prefix='/foo/' then the iterator will only cycle through
the keys that begin with the string '/foo/'.
:type delimiter: string
:param delimiter: can be used in conjunction with the prefix
to allow you to organize and browse your keys
hierarchically. See http://goo.gl/Xx63h for more details.
:type marker: string
:param marker: The "marker" of where you are in the result set
:param encoding_type: Requests Amazon S3 to encode the response and
specifies the encoding method to use.
An object key can contain any Unicode character; however, XML 1.0
parser cannot parse some characters, such as characters with an
ASCII value from 0 to 10. For characters that are not supported in
XML 1.0, you can add this parameter to request that Amazon S3
encode the keys in the response.
Valid options: ``url``
:type encoding_type: string
:rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
:return: an instance of a BucketListResultSet that handles paging, etc
"""
return BucketListResultSet(self, prefix, delimiter, marker, headers,
encoding_type=encoding_type)
def list_versions(self, prefix='', delimiter='', key_marker='',
version_id_marker='', headers=None, encoding_type=None):
"""
List version objects within a bucket. This returns an
instance of an VersionedBucketListResultSet that automatically
handles all of the result paging, etc. from S3. You just need
to keep iterating until there are no more results. Called
with no arguments, this will return an iterator object across
all keys within the bucket.
:type prefix: string
:param prefix: allows you to limit the listing to a particular
prefix. For example, if you call the method with
prefix='/foo/' then the iterator will only cycle through
the keys that begin with the string '/foo/'.
:type delimiter: string
:param delimiter: can be used in conjunction with the prefix
to allow you to organize and browse your keys
hierarchically. See:
http://aws.amazon.com/releasenotes/Amazon-S3/213
for more details.
:type key_marker: string
:param key_marker: The "marker" of where you are in the result set
:param encoding_type: Requests Amazon S3 to encode the response and
specifies the encoding method to use.
An object key can contain any Unicode character; however, XML 1.0
parser cannot parse some characters, such as characters with an
ASCII value from 0 to 10. For characters that are not supported in
XML 1.0, you can add this parameter to request that Amazon S3
encode the keys in the response.
Valid options: ``url``
:type encoding_type: string
:rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
:return: an instance of a BucketListResultSet that handles paging, etc
"""
return VersionedBucketListResultSet(self, prefix, delimiter,
key_marker, version_id_marker,
headers,
encoding_type=encoding_type)
def list_multipart_uploads(self, key_marker='',
upload_id_marker='',
headers=None, encoding_type=None):
"""
List multipart upload objects within a bucket. This returns an
instance of an MultiPartUploadListResultSet that automatically
handles all of the result paging, etc. from S3. You just need
to keep iterating until there are no more results.
:type key_marker: string
:param key_marker: The "marker" of where you are in the result set
:type upload_id_marker: string
:param upload_id_marker: The upload identifier
:param encoding_type: Requests Amazon S3 to encode the response and
specifies the encoding method to use.
An object key can contain any Unicode character; however, XML 1.0
parser cannot parse some characters, such as characters with an
ASCII value from 0 to 10. For characters that are not supported in
XML 1.0, you can add this parameter to request that Amazon S3
encode the keys in the response.
Valid options: ``url``
:type encoding_type: string
:rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
:return: an instance of a BucketListResultSet that handles paging, etc
"""
return MultiPartUploadListResultSet(self, key_marker,
upload_id_marker,
headers,
encoding_type=encoding_type)
def _get_all_query_args(self, params, initial_query_string=''):
pairs = []
if initial_query_string:
pairs.append(initial_query_string)
for key, value in sorted(params.items(), key=lambda x: x[0]):
if value is None:
continue
key = key.replace('_', '-')
if key == 'maxkeys':
key = 'max-keys'
if not isinstance(value, six.string_types + (six.binary_type,)):
value = six.text_type(value)
if not isinstance(value, six.binary_type):
value = value.encode('utf-8')
if value:
pairs.append(u'%s=%s' % (
urllib.parse.quote(key),
urllib.parse.quote(value)
))
return '&'.join(pairs)
def _get_all(self, element_map, initial_query_string='',
headers=None, **params):
query_args = self._get_all_query_args(
params,
initial_query_string=initial_query_string
)
response = self.connection.make_request('GET', self.name,
headers=headers,
query_args=query_args)
body = response.read()
boto.log.debug(body)
if response.status == 200:
rs = ResultSet(element_map)
h = handler.XmlHandler(rs, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
return rs
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def validate_kwarg_names(self, kwargs, names):
"""
Checks that all named arguments are in the specified list of names.
:type kwargs: dict
:param kwargs: Dictionary of kwargs to validate.
:type names: list
:param names: List of possible named arguments.
"""
for kwarg in kwargs:
if kwarg not in names:
raise TypeError('Invalid argument "%s"!' % kwarg)
def get_all_keys(self, headers=None, **params):
"""
A lower-level method for listing contents of a bucket. This
closely models the actual S3 API and requires you to manually
handle the paging of results. For a higher-level method that
handles the details of paging for you, you can use the list
method.
:type max_keys: int
:param max_keys: The maximum number of keys to retrieve
:type prefix: string
:param prefix: The prefix of the keys you want to retrieve
:type marker: string
:param marker: The "marker" of where you are in the result set
:type delimiter: string
:param delimiter: If this optional, Unicode string parameter
is included with your request, then keys that contain the
same string between the prefix and the first occurrence of
the delimiter will be rolled up into a single result
element in the CommonPrefixes collection. These rolled-up
keys are not returned elsewhere in the response.
:param encoding_type: Requests Amazon S3 to encode the response and
specifies the encoding method to use.
An object key can contain any Unicode character; however, XML 1.0
parser cannot parse some characters, such as characters with an
ASCII value from 0 to 10. For characters that are not supported in
XML 1.0, you can add this parameter to request that Amazon S3
encode the keys in the response.
Valid options: ``url``
:type encoding_type: string
:rtype: ResultSet
:return: The result from S3 listing the keys requested
"""
self.validate_kwarg_names(params, ['maxkeys', 'max_keys', 'prefix',
'marker', 'delimiter',
'encoding_type'])
return self._get_all([('Contents', self.key_class),
('CommonPrefixes', Prefix)],
'', headers, **params)
def get_all_versions(self, headers=None, **params):
"""
A lower-level, version-aware method for listing contents of a
bucket. This closely models the actual S3 API and requires
you to manually handle the paging of results. For a
higher-level method that handles the details of paging for
you, you can use the list method.
:type max_keys: int
:param max_keys: The maximum number of keys to retrieve
:type prefix: string
:param prefix: The prefix of the keys you want to retrieve
:type key_marker: string
:param key_marker: The "marker" of where you are in the result set
with respect to keys.
:type version_id_marker: string
:param version_id_marker: The "marker" of where you are in the result
set with respect to version-id's.
:type delimiter: string
:param delimiter: If this optional, Unicode string parameter
is included with your request, then keys that contain the
same string between the prefix and the first occurrence of
the delimiter will be rolled up into a single result
element in the CommonPrefixes collection. These rolled-up
keys are not returned elsewhere in the response.
:param encoding_type: Requests Amazon S3 to encode the response and
specifies the encoding method to use.
An object key can contain any Unicode character; however, XML 1.0
parser cannot parse some characters, such as characters with an
ASCII value from 0 to 10. For characters that are not supported in
XML 1.0, you can add this parameter to request that Amazon S3
encode the keys in the response.
Valid options: ``url``
:type encoding_type: string
:rtype: ResultSet
:return: The result from S3 listing the keys requested
"""
self.validate_get_all_versions_params(params)
return self._get_all([('Version', self.key_class),
('CommonPrefixes', Prefix),
('DeleteMarker', DeleteMarker)],
'versions', headers, **params)
def validate_get_all_versions_params(self, params):
"""
Validate that the parameters passed to get_all_versions are valid.
Overridden by subclasses that allow a different set of parameters.
:type params: dict
:param params: Parameters to validate.
"""
self.validate_kwarg_names(
params, ['maxkeys', 'max_keys', 'prefix', 'key_marker',
'version_id_marker', 'delimiter', 'encoding_type'])
def get_all_multipart_uploads(self, headers=None, **params):
"""
A lower-level, version-aware method for listing active
MultiPart uploads for a bucket. This closely models the
actual S3 API and requires you to manually handle the paging
of results. For a higher-level method that handles the
details of paging for you, you can use the list method.
:type max_uploads: int
:param max_uploads: The maximum number of uploads to retrieve.
Default value is 1000.
:type key_marker: string
:param key_marker: Together with upload_id_marker, this
parameter specifies the multipart upload after which
listing should begin. If upload_id_marker is not
specified, only the keys lexicographically greater than
the specified key_marker will be included in the list.
If upload_id_marker is specified, any multipart uploads
for a key equal to the key_marker might also be included,
provided those multipart uploads have upload IDs
lexicographically greater than the specified
upload_id_marker.
:type upload_id_marker: string
:param upload_id_marker: Together with key-marker, specifies
the multipart upload after which listing should begin. If
key_marker is not specified, the upload_id_marker
parameter is ignored. Otherwise, any multipart uploads
for a key equal to the key_marker might be included in the
list only if they have an upload ID lexicographically
greater than the specified upload_id_marker.
:type encoding_type: string
:param encoding_type: Requests Amazon S3 to encode the response and
specifies the encoding method to use.
An object key can contain any Unicode character; however, XML 1.0
parser cannot parse some characters, such as characters with an
ASCII value from 0 to 10. For characters that are not supported in
XML 1.0, you can add this parameter to request that Amazon S3
encode the keys in the response.
Valid options: ``url``
:type delimiter: string
:param delimiter: Character you use to group keys.
All keys that contain the same string between the prefix, if
specified, and the first occurrence of the delimiter after the
prefix are grouped under a single result element, CommonPrefixes.
If you don't specify the prefix parameter, then the substring
starts at the beginning of the key. The keys that are grouped
under CommonPrefixes result element are not returned elsewhere
in the response.
:type prefix: string
:param prefix: Lists in-progress uploads only for those keys that
begin with the specified prefix. You can use prefixes to separate
a bucket into different grouping of keys. (You can think of using
prefix to make groups in the same way you'd use a folder in a
file system.)
:rtype: ResultSet
:return: The result from S3 listing the uploads requested
"""
self.validate_kwarg_names(params, ['max_uploads', 'key_marker',
'upload_id_marker', 'encoding_type',
'delimiter', 'prefix'])
return self._get_all([('Upload', MultiPartUpload),
('CommonPrefixes', Prefix)],
'uploads', headers, **params)
def new_key(self, key_name=None):
"""
Creates a new key
:type key_name: string
:param key_name: The name of the key to create
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
if not key_name:
raise ValueError('Empty key names are not allowed')
return self.key_class(self, key_name)
def generate_url(self, expires_in, method='GET', headers=None,
force_http=False, response_headers=None,
expires_in_absolute=False):
return self.connection.generate_url(expires_in, method, self.name,
headers=headers,
force_http=force_http,
response_headers=response_headers,
expires_in_absolute=expires_in_absolute)
def delete_keys(self, keys, quiet=False, mfa_token=None, headers=None):
"""
Deletes a set of keys using S3's Multi-object delete API. If a
VersionID is specified for that key then that version is removed.
Returns a MultiDeleteResult Object, which contains Deleted
and Error elements for each key you ask to delete.
:type keys: list
:param keys: A list of either key_names or (key_name, versionid) pairs
or a list of Key instances.
:type quiet: boolean
:param quiet: In quiet mode the response includes only keys
where the delete operation encountered an error. For a
successful deletion, the operation does not return any
information about the delete in the response body.
:type mfa_token: tuple or list of strings
:param mfa_token: A tuple or list consisting of the serial
number from the MFA device and the current value of the
six-digit token associated with the device. This value is
required anytime you are deleting versioned objects from a
bucket that has the MFADelete option on the bucket.
:returns: An instance of MultiDeleteResult
"""
ikeys = iter(keys)
result = MultiDeleteResult(self)
provider = self.connection.provider
query_args = 'delete'
def delete_keys2(hdrs):
hdrs = hdrs or {}
data = u"""<?xml version="1.0" encoding="UTF-8"?>"""
data += u"<Delete>"
if quiet:
data += u"<Quiet>true</Quiet>"
count = 0
while count < 1000:
try:
key = next(ikeys)
except StopIteration:
break
if isinstance(key, six.string_types):
key_name = key
version_id = None
elif isinstance(key, tuple) and len(key) == 2:
key_name, version_id = key
elif (isinstance(key, Key) or isinstance(key, DeleteMarker)) and key.name:
key_name = key.name
version_id = key.version_id
else:
if isinstance(key, Prefix):
key_name = key.name
code = 'PrefixSkipped' # Don't delete Prefix
else:
key_name = repr(key) # try get a string
code = 'InvalidArgument' # other unknown type
message = 'Invalid. No delete action taken for this object.'
error = Error(key_name, code=code, message=message)
result.errors.append(error)
continue
count += 1
data += u"<Object><Key>%s</Key>" % xml.sax.saxutils.escape(key_name)
if version_id:
data += u"<VersionId>%s</VersionId>" % version_id
data += u"</Object>"
data += u"</Delete>"
if count <= 0:
return False # no more
data = data.encode('utf-8')
fp = BytesIO(data)
md5 = boto.utils.compute_md5(fp)
hdrs['Content-MD5'] = md5[1]
hdrs['Content-Type'] = 'text/xml'
if mfa_token:
hdrs[provider.mfa_header] = ' '.join(mfa_token)
response = self.connection.make_request('POST', self.name,
headers=hdrs,
query_args=query_args,
data=data)
body = response.read()
if response.status == 200:
h = handler.XmlHandler(result, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
return count >= 1000 # more?
else:
raise provider.storage_response_error(response.status,
response.reason,
body)
while delete_keys2(headers):
pass
return result
def delete_key(self, key_name, headers=None, version_id=None,
mfa_token=None):
"""
Deletes a key from the bucket. If a version_id is provided,
only that version of the key will be deleted.
:type key_name: string
:param key_name: The key name to delete
:type version_id: string
:param version_id: The version ID (optional)
:type mfa_token: tuple or list of strings
:param mfa_token: A tuple or list consisting of the serial
number from the MFA device and the current value of the
six-digit token associated with the device. This value is
required anytime you are deleting versioned objects from a
bucket that has the MFADelete option on the bucket.
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: A key object holding information on what was
deleted. The Caller can see if a delete_marker was
created or removed and what version_id the delete created
or removed.
"""
if not key_name:
raise ValueError('Empty key names are not allowed')
return self._delete_key_internal(key_name, headers=headers,
version_id=version_id,
mfa_token=mfa_token,
query_args_l=None)
def _delete_key_internal(self, key_name, headers=None, version_id=None,
mfa_token=None, query_args_l=None):
query_args_l = query_args_l or []
provider = self.connection.provider
if version_id:
query_args_l.append('versionId=%s' % version_id)
query_args = '&'.join(query_args_l) or None
if mfa_token:
if not headers:
headers = {}
headers[provider.mfa_header] = ' '.join(mfa_token)
response = self.connection.make_request('DELETE', self.name, key_name,
headers=headers,
query_args=query_args)
body = response.read()
if response.status != 204:
raise provider.storage_response_error(response.status,
response.reason, body)
else:
# return a key object with information on what was deleted.
k = self.key_class(self)
k.name = key_name
k.handle_version_headers(response)
k.handle_addl_headers(response.getheaders())
return k
def copy_key(self, new_key_name, src_bucket_name,
src_key_name, metadata=None, src_version_id=None,
storage_class='STANDARD', preserve_acl=False,
encrypt_key=False, headers=None, query_args=None):
"""
Create a new key in the bucket by copying another existing key.
:type new_key_name: string
:param new_key_name: The name of the new key
:type src_bucket_name: string
:param src_bucket_name: The name of the source bucket
:type src_key_name: string
:param src_key_name: The name of the source key
:type src_version_id: string
:param src_version_id: The version id for the key. This param
is optional. If not specified, the newest version of the
key will be copied.
:type metadata: dict
:param metadata: Metadata to be associated with new key. If
metadata is supplied, it will replace the metadata of the
source key being copied. If no metadata is supplied, the
source key's metadata will be copied to the new key.
:type storage_class: string
:param storage_class: The storage class of the new key. By
default, the new key will use the standard storage class.
Possible values are: STANDARD | REDUCED_REDUNDANCY
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key will
be copied to the destination key. If False, the
destination key will have the default ACL. Note that
preserving the ACL in the new key object will require two
additional API calls to S3, one to retrieve the current
ACL and one to set that ACL on the new object. If you
don't care about the ACL, a value of False will be
significantly more efficient.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type headers: dict
:param headers: A dictionary of header name/value pairs.
:type query_args: string
:param query_args: A string of additional querystring arguments
to append to the request
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
headers = headers or {}
provider = self.connection.provider
src_key_name = boto.utils.get_utf8_value(src_key_name)
if preserve_acl:
if self.name == src_bucket_name:
src_bucket = self
else:
src_bucket = self.connection.get_bucket(
src_bucket_name, validate=False)
acl = src_bucket.get_xml_acl(src_key_name)
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
src = '%s/%s' % (src_bucket_name, urllib.parse.quote(src_key_name))
if src_version_id:
src += '?versionId=%s' % src_version_id
headers[provider.copy_source_header] = str(src)
# make sure storage_class_header key exists before accessing it
if provider.storage_class_header and storage_class:
headers[provider.storage_class_header] = storage_class
if metadata is not None:
headers[provider.metadata_directive_header] = 'REPLACE'
headers = boto.utils.merge_meta(headers, metadata, provider)
elif not query_args: # Can't use this header with multi-part copy.
headers[provider.metadata_directive_header] = 'COPY'
response = self.connection.make_request('PUT', self.name, new_key_name,
headers=headers,
query_args=query_args)
body = response.read()
if response.status == 200:
key = self.new_key(new_key_name)
h = handler.XmlHandler(key, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
if hasattr(key, 'Error'):
raise provider.storage_copy_error(key.Code, key.Message, body)
key.handle_version_headers(response)
key.handle_addl_headers(response.getheaders())
if preserve_acl:
self.set_xml_acl(acl, new_key_name)
return key
else:
raise provider.storage_response_error(response.status,
response.reason, body)
def set_canned_acl(self, acl_str, key_name='', headers=None,
version_id=None):
assert acl_str in CannedACLStrings
if headers:
headers[self.connection.provider.acl_header] = acl_str
else:
headers = {self.connection.provider.acl_header: acl_str}
query_args = 'acl'
if version_id:
query_args += '&versionId=%s' % version_id
response = self.connection.make_request('PUT', self.name, key_name,
headers=headers, query_args=query_args)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_xml_acl(self, key_name='', headers=None, version_id=None):
query_args = 'acl'
if version_id:
query_args += '&versionId=%s' % version_id
response = self.connection.make_request('GET', self.name, key_name,
query_args=query_args,
headers=headers)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
return body
def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=None,
query_args='acl'):
if version_id:
query_args += '&versionId=%s' % version_id
if not isinstance(acl_str, bytes):
acl_str = acl_str.encode('utf-8')
response = self.connection.make_request('PUT', self.name, key_name,
data=acl_str,
query_args=query_args,
headers=headers)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None):
if isinstance(acl_or_str, Policy):
self.set_xml_acl(acl_or_str.to_xml(), key_name,
headers, version_id)
else:
self.set_canned_acl(acl_or_str, key_name,
headers, version_id)
def get_acl(self, key_name='', headers=None, version_id=None):
query_args = 'acl'
if version_id:
query_args += '&versionId=%s' % version_id
response = self.connection.make_request('GET', self.name, key_name,
query_args=query_args,
headers=headers)
body = response.read()
if response.status == 200:
policy = Policy(self)
h = handler.XmlHandler(policy, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
return policy
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_subresource(self, subresource, value, key_name='', headers=None,
version_id=None):
"""
Set a subresource for a bucket or key.
:type subresource: string
:param subresource: The subresource to set.
:type value: string
:param value: The value of the subresource.
:type key_name: string
:param key_name: The key to operate on, or None to operate on the
bucket.
:type headers: dict
:param headers: Additional HTTP headers to include in the request.
:type src_version_id: string
:param src_version_id: Optional. The version id of the key to
operate on. If not specified, operate on the newest
version.
"""
if not subresource:
raise TypeError('set_subresource called with subresource=None')
query_args = subresource
if version_id:
query_args += '&versionId=%s' % version_id
if not isinstance(value, bytes):
value = value.encode('utf-8')
response = self.connection.make_request('PUT', self.name, key_name,
data=value,
query_args=query_args,
headers=headers)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_subresource(self, subresource, key_name='', headers=None,
version_id=None):
"""
Get a subresource for a bucket or key.
:type subresource: string
:param subresource: The subresource to get.
:type key_name: string
:param key_name: The key to operate on, or None to operate on the
bucket.
:type headers: dict
:param headers: Additional HTTP headers to include in the request.
:type src_version_id: string
:param src_version_id: Optional. The version id of the key to
operate on. If not specified, operate on the newest
version.
:rtype: string
:returns: The value of the subresource.
"""
if not subresource:
raise TypeError('get_subresource called with subresource=None')
query_args = subresource
if version_id:
query_args += '&versionId=%s' % version_id
response = self.connection.make_request('GET', self.name, key_name,
query_args=query_args,
headers=headers)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
return body
def make_public(self, recursive=False, headers=None):
self.set_canned_acl('public-read', headers=headers)
if recursive:
for key in self:
self.set_canned_acl('public-read', key.name, headers=headers)
def add_email_grant(self, permission, email_address,
recursive=False, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a bucket. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the AWS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the
command will apply the grant to all keys within the bucket
or not. The default value is False. By passing a True
value, the call will iterate through all keys in the
bucket and apply the same grant to each key. CAUTION: If
you have a lot of keys, this could take a long time!
"""
if permission not in S3Permissions:
raise self.connection.provider.storage_permissions_error(
'Unknown Permission: %s' % permission)
policy = self.get_acl(headers=headers)
policy.acl.add_email_grant(permission, email_address)
self.set_acl(policy, headers=headers)
if recursive:
for key in self:
key.add_email_grant(permission, email_address, headers=headers)
def add_user_grant(self, permission, user_id, recursive=False,
headers=None, display_name=None):
"""
Convenience method that provides a quick way to add a canonical
user grant to a bucket. This method retrieves the current ACL,
creates a new grant based on the parameters passed in, adds that
grant to the ACL and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type user_id: string
:param user_id: The canonical user id associated with the AWS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the
command will apply the grant to all keys within the bucket
or not. The default value is False. By passing a True
value, the call will iterate through all keys in the
bucket and apply the same grant to each key. CAUTION: If
you have a lot of keys, this could take a long time!
:type display_name: string
:param display_name: An option string containing the user's
Display Name. Only required on Walrus.
"""
if permission not in S3Permissions:
raise self.connection.provider.storage_permissions_error(
'Unknown Permission: %s' % permission)
policy = self.get_acl(headers=headers)
policy.acl.add_user_grant(permission, user_id,
display_name=display_name)
self.set_acl(policy, headers=headers)
if recursive:
for key in self:
key.add_user_grant(permission, user_id, headers=headers,
display_name=display_name)
def list_grants(self, headers=None):
policy = self.get_acl(headers=headers)
return policy.acl.grants
def get_location(self):
"""
Returns the LocationConstraint for the bucket.
:rtype: str
:return: The LocationConstraint for the bucket or the empty
string if no constraint was specified when bucket was created.
"""
response = self.connection.make_request('GET', self.name,
query_args='location')
body = response.read()
if response.status == 200:
rs = ResultSet(self)
h = handler.XmlHandler(rs, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
return rs.LocationConstraint
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_xml_logging(self, logging_str, headers=None):
"""
Set logging on a bucket directly to the given xml string.
:type logging_str: unicode string
:param logging_str: The XML for the bucketloggingstatus which
will be set. The string will be converted to utf-8 before
it is sent. Usually, you will obtain this XML from the
BucketLogging object.
:rtype: bool
:return: True if ok or raises an exception.
"""
body = logging_str
if not isinstance(body, bytes):
body = body.encode('utf-8')
response = self.connection.make_request('PUT', self.name, data=body,
query_args='logging', headers=headers)
body = response.read()
if response.status == 200:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def enable_logging(self, target_bucket, target_prefix='',
grants=None, headers=None):
"""
Enable logging on a bucket.
:type target_bucket: bucket or string
:param target_bucket: The bucket to log to.
:type target_prefix: string
:param target_prefix: The prefix which should be prepended to the
generated log files written to the target_bucket.
:type grants: list of Grant objects
:param grants: A list of extra permissions which will be granted on
the log files which are created.
:rtype: bool
:return: True if ok or raises an exception.
"""
if isinstance(target_bucket, Bucket):
target_bucket = target_bucket.name
blogging = BucketLogging(target=target_bucket, prefix=target_prefix,
grants=grants)
return self.set_xml_logging(blogging.to_xml(), headers=headers)
def disable_logging(self, headers=None):
"""
Disable logging on a bucket.
:rtype: bool
:return: True if ok or raises an exception.
"""
blogging = BucketLogging()
return self.set_xml_logging(blogging.to_xml(), headers=headers)
def get_logging_status(self, headers=None):
"""
Get the logging status for this bucket.
:rtype: :class:`boto.s3.bucketlogging.BucketLogging`
:return: A BucketLogging object for this bucket.
"""
response = self.connection.make_request('GET', self.name,
query_args='logging', headers=headers)
body = response.read()
if response.status == 200:
blogging = BucketLogging()
h = handler.XmlHandler(blogging, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
return blogging
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_as_logging_target(self, headers=None):
"""
Setup the current bucket as a logging target by granting the necessary
permissions to the LogDelivery group to write log files to this bucket.
"""
policy = self.get_acl(headers=headers)
g1 = Grant(permission='WRITE', type='Group', uri=self.LoggingGroup)
g2 = Grant(permission='READ_ACP', type='Group', uri=self.LoggingGroup)
policy.acl.add_grant(g1)
policy.acl.add_grant(g2)
self.set_acl(policy, headers=headers)
def get_request_payment(self, headers=None):
response = self.connection.make_request('GET', self.name,
query_args='requestPayment', headers=headers)
body = response.read()
if response.status == 200:
return body
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_request_payment(self, payer='BucketOwner', headers=None):
body = self.BucketPaymentBody % payer
response = self.connection.make_request('PUT', self.name, data=body,
query_args='requestPayment', headers=headers)
body = response.read()
if response.status == 200:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def configure_versioning(self, versioning, mfa_delete=False,
mfa_token=None, headers=None):
"""
Configure versioning for this bucket.
..note:: This feature is currently in beta.
:type versioning: bool
:param versioning: A boolean indicating whether version is
enabled (True) or disabled (False).
:type mfa_delete: bool
:param mfa_delete: A boolean indicating whether the
Multi-Factor Authentication Delete feature is enabled
(True) or disabled (False). If mfa_delete is enabled then
all Delete operations will require the token from your MFA
device to be passed in the request.
:type mfa_token: tuple or list of strings
:param mfa_token: A tuple or list consisting of the serial
number from the MFA device and the current value of the
six-digit token associated with the device. This value is
required when you are changing the status of the MfaDelete
property of the bucket.
"""
if versioning:
ver = 'Enabled'
else:
ver = 'Suspended'
if mfa_delete:
mfa = 'Enabled'
else:
mfa = 'Disabled'
body = self.VersioningBody % (ver, mfa)
if mfa_token:
if not headers:
headers = {}
provider = self.connection.provider
headers[provider.mfa_header] = ' '.join(mfa_token)
response = self.connection.make_request('PUT', self.name, data=body,
query_args='versioning', headers=headers)
body = response.read()
if response.status == 200:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_versioning_status(self, headers=None):
"""
Returns the current status of versioning on the bucket.
:rtype: dict
:returns: A dictionary containing a key named 'Versioning'
that can have a value of either Enabled, Disabled, or
Suspended. Also, if MFADelete has ever been enabled on the
bucket, the dictionary will contain a key named
'MFADelete' which will have a value of either Enabled or
Suspended.
"""
response = self.connection.make_request('GET', self.name,
query_args='versioning', headers=headers)
body = response.read()
if not isinstance(body, six.string_types):
body = body.decode('utf-8')
boto.log.debug(body)
if response.status == 200:
d = {}
ver = re.search(self.VersionRE, body)
if ver:
d['Versioning'] = ver.group(1)
mfa = re.search(self.MFADeleteRE, body)
if mfa:
d['MfaDelete'] = mfa.group(1)
return d
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def configure_lifecycle(self, lifecycle_config, headers=None):
"""
Configure lifecycle for this bucket.
:type lifecycle_config: :class:`boto.s3.lifecycle.Lifecycle`
:param lifecycle_config: The lifecycle configuration you want
to configure for this bucket.
"""
xml = lifecycle_config.to_xml()
#xml = xml.encode('utf-8')
fp = StringIO(xml)
md5 = boto.utils.compute_md5(fp)
if headers is None:
headers = {}
headers['Content-MD5'] = md5[1]
headers['Content-Type'] = 'text/xml'
response = self.connection.make_request('PUT', self.name,
data=fp.getvalue(),
query_args='lifecycle',
headers=headers)
body = response.read()
if response.status == 200:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_lifecycle_config(self, headers=None):
"""
Returns the current lifecycle configuration on the bucket.
:rtype: :class:`boto.s3.lifecycle.Lifecycle`
:returns: A LifecycleConfig object that describes all current
lifecycle rules in effect for the bucket.
"""
response = self.connection.make_request('GET', self.name,
query_args='lifecycle', headers=headers)
body = response.read()
boto.log.debug(body)
if response.status == 200:
lifecycle = Lifecycle()
h = handler.XmlHandler(lifecycle, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
return lifecycle
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def delete_lifecycle_configuration(self, headers=None):
"""
Removes all lifecycle configuration from the bucket.
"""
response = self.connection.make_request('DELETE', self.name,
query_args='lifecycle',
headers=headers)
body = response.read()
boto.log.debug(body)
if response.status == 204:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def configure_website(self, suffix=None, error_key=None,
redirect_all_requests_to=None,
routing_rules=None,
headers=None):
"""
Configure this bucket to act as a website
:type suffix: str
:param suffix: Suffix that is appended to a request that is for a
"directory" on the website endpoint (e.g. if the suffix is
index.html and you make a request to samplebucket/images/
the data that is returned will be for the object with the
key name images/index.html). The suffix must not be empty
and must not include a slash character.
:type error_key: str
:param error_key: The object key name to use when a 4XX class
error occurs. This is optional.
:type redirect_all_requests_to: :class:`boto.s3.website.RedirectLocation`
:param redirect_all_requests_to: Describes the redirect behavior for
every request to this bucket's website endpoint. If this value is
non None, no other values are considered when configuring the
website configuration for the bucket. This is an instance of
``RedirectLocation``.
:type routing_rules: :class:`boto.s3.website.RoutingRules`
:param routing_rules: Object which specifies conditions
and redirects that apply when the conditions are met.
"""
config = website.WebsiteConfiguration(
suffix, error_key, redirect_all_requests_to,
routing_rules)
return self.set_website_configuration(config, headers=headers)
def set_website_configuration(self, config, headers=None):
"""
:type config: boto.s3.website.WebsiteConfiguration
:param config: Configuration data
"""
return self.set_website_configuration_xml(config.to_xml(),
headers=headers)
def set_website_configuration_xml(self, xml, headers=None):
"""Upload xml website configuration"""
response = self.connection.make_request('PUT', self.name, data=xml,
query_args='website',
headers=headers)
body = response.read()
if response.status == 200:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_website_configuration(self, headers=None):
"""
Returns the current status of website configuration on the bucket.
:rtype: dict
:returns: A dictionary containing a Python representation
of the XML response from S3. The overall structure is:
* WebsiteConfiguration
* IndexDocument
* Suffix : suffix that is appended to request that
is for a "directory" on the website endpoint
* ErrorDocument
* Key : name of object to serve when an error occurs
"""
return self.get_website_configuration_with_xml(headers)[0]
def get_website_configuration_obj(self, headers=None):
"""Get the website configuration as a
:class:`boto.s3.website.WebsiteConfiguration` object.
"""
config_xml = self.get_website_configuration_xml(headers=headers)
config = website.WebsiteConfiguration()
h = handler.XmlHandler(config, self)
xml.sax.parseString(config_xml, h)
return config
def get_website_configuration_with_xml(self, headers=None):
"""
Returns the current status of website configuration on the bucket as
unparsed XML.
:rtype: 2-Tuple
:returns: 2-tuple containing:
1) A dictionary containing a Python representation \
of the XML response. The overall structure is:
* WebsiteConfiguration
* IndexDocument
* Suffix : suffix that is appended to request that \
is for a "directory" on the website endpoint
* ErrorDocument
* Key : name of object to serve when an error occurs
2) unparsed XML describing the bucket's website configuration
"""
body = self.get_website_configuration_xml(headers=headers)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e, body
def get_website_configuration_xml(self, headers=None):
"""Get raw website configuration xml"""
response = self.connection.make_request('GET', self.name,
query_args='website', headers=headers)
body = response.read().decode('utf-8')
boto.log.debug(body)
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
return body
def delete_website_configuration(self, headers=None):
"""
Removes all website configuration from the bucket.
"""
response = self.connection.make_request('DELETE', self.name,
query_args='website', headers=headers)
body = response.read()
boto.log.debug(body)
if response.status == 204:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_website_endpoint(self):
"""
Returns the fully qualified hostname to use is you want to access this
bucket as a website. This doesn't validate whether the bucket has
been correctly configured as a website or not.
"""
l = [self.name]
l.append(S3WebsiteEndpointTranslate.translate_region(self.get_location()))
l.append('.'.join(self.connection.host.split('.')[-2:]))
return '.'.join(l)
def get_policy(self, headers=None):
"""
Returns the JSON policy associated with the bucket. The policy
is returned as an uninterpreted JSON string.
"""
response = self.connection.make_request('GET', self.name,
query_args='policy', headers=headers)
body = response.read()
if response.status == 200:
return body
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_policy(self, policy, headers=None):
"""
Add or replace the JSON policy associated with the bucket.
:type policy: str
:param policy: The JSON policy as a string.
"""
response = self.connection.make_request('PUT', self.name,
data=policy,
query_args='policy',
headers=headers)
body = response.read()
if response.status >= 200 and response.status <= 204:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def delete_policy(self, headers=None):
response = self.connection.make_request('DELETE', self.name,
data='/?policy',
query_args='policy',
headers=headers)
body = response.read()
if response.status >= 200 and response.status <= 204:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_cors_xml(self, cors_xml, headers=None):
"""
Set the CORS (Cross-Origin Resource Sharing) for a bucket.
:type cors_xml: str
:param cors_xml: The XML document describing your desired
CORS configuration. See the S3 documentation for details
of the exact syntax required.
"""
fp = StringIO(cors_xml)
md5 = boto.utils.compute_md5(fp)
if headers is None:
headers = {}
headers['Content-MD5'] = md5[1]
headers['Content-Type'] = 'text/xml'
response = self.connection.make_request('PUT', self.name,
data=fp.getvalue(),
query_args='cors',
headers=headers)
body = response.read()
if response.status == 200:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_cors(self, cors_config, headers=None):
"""
Set the CORS for this bucket given a boto CORSConfiguration
object.
:type cors_config: :class:`boto.s3.cors.CORSConfiguration`
:param cors_config: The CORS configuration you want
to configure for this bucket.
"""
return self.set_cors_xml(cors_config.to_xml())
def get_cors_xml(self, headers=None):
"""
Returns the current CORS configuration on the bucket as an
XML document.
"""
response = self.connection.make_request('GET', self.name,
query_args='cors', headers=headers)
body = response.read()
boto.log.debug(body)
if response.status == 200:
return body
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_cors(self, headers=None):
"""
Returns the current CORS configuration on the bucket.
:rtype: :class:`boto.s3.cors.CORSConfiguration`
:returns: A CORSConfiguration object that describes all current
CORS rules in effect for the bucket.
"""
body = self.get_cors_xml(headers)
cors = CORSConfiguration()
h = handler.XmlHandler(cors, self)
xml.sax.parseString(body, h)
return cors
def delete_cors(self, headers=None):
"""
Removes all CORS configuration from the bucket.
"""
response = self.connection.make_request('DELETE', self.name,
query_args='cors',
headers=headers)
body = response.read()
boto.log.debug(body)
if response.status == 204:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def initiate_multipart_upload(self, key_name, headers=None,
reduced_redundancy=False,
metadata=None, encrypt_key=False,
policy=None):
"""
Start a multipart upload operation.
.. note::
Note: After you initiate multipart upload and upload one or more
parts, you must either complete or abort multipart upload in order
to stop getting charged for storage of the uploaded parts. Only
after you either complete or abort multipart upload, Amazon S3
frees up the parts storage and stops charging you for the parts
storage.
:type key_name: string
:param key_name: The name of the key that will ultimately
result from this multipart upload operation. This will be
exactly as the key appears in the bucket after the upload
process has been completed.
:type headers: dict
:param headers: Additional HTTP headers to send and store with the
resulting key in S3.
:type reduced_redundancy: boolean
:param reduced_redundancy: In multipart uploads, the storage
class is specified when initiating the upload, not when
uploading individual parts. So if you want the resulting
key to use the reduced redundancy storage class set this
flag when you initiate the upload.
:type metadata: dict
:param metadata: Any metadata that you would like to set on the key
that results from the multipart upload.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key (once completed) in S3.
"""
query_args = 'uploads'
provider = self.connection.provider
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
storage_class_header = provider.storage_class_header
if storage_class_header:
headers[storage_class_header] = 'REDUCED_REDUNDANCY'
# TODO: what if the provider doesn't support reduced redundancy?
# (see boto.s3.key.Key.set_contents_from_file)
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
if metadata is None:
metadata = {}
headers = boto.utils.merge_meta(headers, metadata,
self.connection.provider)
response = self.connection.make_request('POST', self.name, key_name,
query_args=query_args,
headers=headers)
body = response.read()
boto.log.debug(body)
if response.status == 200:
resp = MultiPartUpload(self)
h = handler.XmlHandler(resp, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
return resp
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def complete_multipart_upload(self, key_name, upload_id,
xml_body, headers=None):
"""
Complete a multipart upload operation.
"""
query_args = 'uploadId=%s' % upload_id
if headers is None:
headers = {}
headers['Content-Type'] = 'text/xml'
response = self.connection.make_request('POST', self.name, key_name,
query_args=query_args,
headers=headers, data=xml_body)
contains_error = False
body = response.read().decode('utf-8')
# Some errors will be reported in the body of the response
# even though the HTTP response code is 200. This check
# does a quick and dirty peek in the body for an error element.
if body.find('<Error>') > 0:
contains_error = True
boto.log.debug(body)
if response.status == 200 and not contains_error:
resp = CompleteMultiPartUpload(self)
h = handler.XmlHandler(resp, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
# Use a dummy key to parse various response headers
# for versioning, encryption info and then explicitly
# set the completed MPU object values from key.
k = self.key_class(self)
k.handle_version_headers(response)
k.handle_encryption_headers(response)
resp.version_id = k.version_id
resp.encrypted = k.encrypted
return resp
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def cancel_multipart_upload(self, key_name, upload_id, headers=None):
"""
To verify that all parts have been removed, so you don't get charged
for the part storage, you should call the List Parts operation and
ensure the parts list is empty.
"""
query_args = 'uploadId=%s' % upload_id
response = self.connection.make_request('DELETE', self.name, key_name,
query_args=query_args,
headers=headers)
body = response.read()
boto.log.debug(body)
if response.status != 204:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def delete(self, headers=None):
return self.connection.delete_bucket(self.name, headers=headers)
def get_tags(self):
response = self.get_xml_tags()
tags = Tags()
h = handler.XmlHandler(tags, self)
if not isinstance(response, bytes):
response = response.encode('utf-8')
xml.sax.parseString(response, h)
return tags
def get_xml_tags(self):
response = self.connection.make_request('GET', self.name,
query_args='tagging',
headers=None)
body = response.read()
if response.status == 200:
return body
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_xml_tags(self, tag_str, headers=None, query_args='tagging'):
if headers is None:
headers = {}
md5 = boto.utils.compute_md5(StringIO(tag_str))
headers['Content-MD5'] = md5[1]
headers['Content-Type'] = 'text/xml'
if not isinstance(tag_str, bytes):
tag_str = tag_str.encode('utf-8')
response = self.connection.make_request('PUT', self.name,
data=tag_str,
query_args=query_args,
headers=headers)
body = response.read()
if response.status != 204:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
return True
def set_tags(self, tags, headers=None):
return self.set_xml_tags(tags.to_xml(), headers=headers)
def delete_tags(self, headers=None):
response = self.connection.make_request('DELETE', self.name,
query_args='tagging',
headers=headers)
body = response.read()
boto.log.debug(body)
if response.status == 204:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
Add missing eu-central-1 region for S3 Website
URL as per:
http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
from boto import handler
from boto.resultset import ResultSet
from boto.exception import BotoClientError
from boto.s3.acl import Policy, CannedACLStrings, Grant
from boto.s3.key import Key
from boto.s3.prefix import Prefix
from boto.s3.deletemarker import DeleteMarker
from boto.s3.multipart import MultiPartUpload
from boto.s3.multipart import CompleteMultiPartUpload
from boto.s3.multidelete import MultiDeleteResult
from boto.s3.multidelete import Error
from boto.s3.bucketlistresultset import BucketListResultSet
from boto.s3.bucketlistresultset import VersionedBucketListResultSet
from boto.s3.bucketlistresultset import MultiPartUploadListResultSet
from boto.s3.lifecycle import Lifecycle
from boto.s3.tagging import Tags
from boto.s3.cors import CORSConfiguration
from boto.s3.bucketlogging import BucketLogging
from boto.s3 import website
import boto.jsonresponse
import boto.utils
import xml.sax
import xml.sax.saxutils
import re
import base64
from collections import defaultdict
from boto.compat import BytesIO, six, StringIO, urllib
# as per http://goo.gl/BDuud (02/19/2011)
class S3WebsiteEndpointTranslate(object):
trans_region = defaultdict(lambda: 's3-website-us-east-1')
trans_region['eu-west-1'] = 's3-website-eu-west-1'
trans_region['eu-central-1'] = 's3-website.eu-central-1'
trans_region['us-west-1'] = 's3-website-us-west-1'
trans_region['us-west-2'] = 's3-website-us-west-2'
trans_region['sa-east-1'] = 's3-website-sa-east-1'
trans_region['ap-northeast-1'] = 's3-website-ap-northeast-1'
trans_region['ap-southeast-1'] = 's3-website-ap-southeast-1'
trans_region['ap-southeast-2'] = 's3-website-ap-southeast-2'
trans_region['cn-north-1'] = 's3-website.cn-north-1'
@classmethod
def translate_region(self, reg):
return self.trans_region[reg]
S3Permissions = ['READ', 'WRITE', 'READ_ACP', 'WRITE_ACP', 'FULL_CONTROL']
class Bucket(object):
LoggingGroup = 'http://acs.amazonaws.com/groups/s3/LogDelivery'
BucketPaymentBody = """<?xml version="1.0" encoding="UTF-8"?>
<RequestPaymentConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Payer>%s</Payer>
</RequestPaymentConfiguration>"""
VersioningBody = """<?xml version="1.0" encoding="UTF-8"?>
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>%s</Status>
<MfaDelete>%s</MfaDelete>
</VersioningConfiguration>"""
VersionRE = '<Status>([A-Za-z]+)</Status>'
MFADeleteRE = '<MfaDelete>([A-Za-z]+)</MfaDelete>'
def __init__(self, connection=None, name=None, key_class=Key):
self.name = name
self.connection = connection
self.key_class = key_class
def __repr__(self):
return '<Bucket: %s>' % self.name
def __iter__(self):
return iter(BucketListResultSet(self))
def __contains__(self, key_name):
return not (self.get_key(key_name) is None)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Name':
self.name = value
elif name == 'CreationDate':
self.creation_date = value
else:
setattr(self, name, value)
def set_key_class(self, key_class):
"""
Set the Key class associated with this bucket. By default, this
would be the boto.s3.key.Key class but if you want to subclass that
for some reason this allows you to associate your new class with a
bucket so that when you call bucket.new_key() or when you get a listing
of keys in the bucket you will get an instances of your key class
rather than the default.
:type key_class: class
:param key_class: A subclass of Key that can be more specific
"""
self.key_class = key_class
def lookup(self, key_name, headers=None):
"""
Deprecated: Please use get_key method.
:type key_name: string
:param key_name: The name of the key to retrieve
:rtype: :class:`boto.s3.key.Key`
:returns: A Key object from this bucket.
"""
return self.get_key(key_name, headers=headers)
def get_key(self, key_name, headers=None, version_id=None,
response_headers=None, validate=True):
"""
Check to see if a particular key exists within the bucket. This
method uses a HEAD request to check for the existence of the key.
Returns: An instance of a Key object or None
:param key_name: The name of the key to retrieve
:type key_name: string
:param headers: The headers to send when retrieving the key
:type headers: dict
:param version_id:
:type version_id: string
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type response_headers: dict
:param validate: Verifies whether the key exists. If ``False``, this
will not hit the service, constructing an in-memory object.
Default is ``True``.
:type validate: bool
:rtype: :class:`boto.s3.key.Key`
:returns: A Key object from this bucket.
"""
if validate is False:
if headers or version_id or response_headers:
raise BotoClientError(
"When providing 'validate=False', no other params " + \
"are allowed."
)
# This leans on the default behavior of ``new_key`` (not hitting
# the service). If that changes, that behavior should migrate here.
return self.new_key(key_name)
query_args_l = []
if version_id:
query_args_l.append('versionId=%s' % version_id)
if response_headers:
for rk, rv in six.iteritems(response_headers):
query_args_l.append('%s=%s' % (rk, urllib.parse.quote(rv)))
key, resp = self._get_key_internal(key_name, headers, query_args_l)
return key
def _get_key_internal(self, key_name, headers, query_args_l):
query_args = '&'.join(query_args_l) or None
response = self.connection.make_request('HEAD', self.name, key_name,
headers=headers,
query_args=query_args)
response.read()
# Allow any success status (2xx) - for example this lets us
# support Range gets, which return status 206:
if response.status / 100 == 2:
k = self.key_class(self)
provider = self.connection.provider
k.metadata = boto.utils.get_aws_metadata(response.msg, provider)
for field in Key.base_fields:
k.__dict__[field.lower().replace('-', '_')] = \
response.getheader(field)
# the following machinations are a workaround to the fact that
# apache/fastcgi omits the content-length header on HEAD
# requests when the content-length is zero.
# See http://goo.gl/0Tdax for more details.
clen = response.getheader('content-length')
if clen:
k.size = int(response.getheader('content-length'))
else:
k.size = 0
k.name = key_name
k.handle_version_headers(response)
k.handle_encryption_headers(response)
k.handle_restore_headers(response)
k.handle_addl_headers(response.getheaders())
return k, response
else:
if response.status == 404:
return None, response
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, '')
def list(self, prefix='', delimiter='', marker='', headers=None,
encoding_type=None):
"""
List key objects within a bucket. This returns an instance of an
BucketListResultSet that automatically handles all of the result
paging, etc. from S3. You just need to keep iterating until
there are no more results.
Called with no arguments, this will return an iterator object across
all keys within the bucket.
The Key objects returned by the iterator are obtained by parsing
the results of a GET on the bucket, also known as the List Objects
request. The XML returned by this request contains only a subset
of the information about each key. Certain metadata fields such
as Content-Type and user metadata are not available in the XML.
Therefore, if you want these additional metadata fields you will
have to do a HEAD request on the Key in the bucket.
:type prefix: string
:param prefix: allows you to limit the listing to a particular
prefix. For example, if you call the method with
prefix='/foo/' then the iterator will only cycle through
the keys that begin with the string '/foo/'.
:type delimiter: string
:param delimiter: can be used in conjunction with the prefix
to allow you to organize and browse your keys
hierarchically. See http://goo.gl/Xx63h for more details.
:type marker: string
:param marker: The "marker" of where you are in the result set
:param encoding_type: Requests Amazon S3 to encode the response and
specifies the encoding method to use.
An object key can contain any Unicode character; however, XML 1.0
parser cannot parse some characters, such as characters with an
ASCII value from 0 to 10. For characters that are not supported in
XML 1.0, you can add this parameter to request that Amazon S3
encode the keys in the response.
Valid options: ``url``
:type encoding_type: string
:rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
:return: an instance of a BucketListResultSet that handles paging, etc
"""
return BucketListResultSet(self, prefix, delimiter, marker, headers,
encoding_type=encoding_type)
def list_versions(self, prefix='', delimiter='', key_marker='',
version_id_marker='', headers=None, encoding_type=None):
"""
List version objects within a bucket. This returns an
instance of an VersionedBucketListResultSet that automatically
handles all of the result paging, etc. from S3. You just need
to keep iterating until there are no more results. Called
with no arguments, this will return an iterator object across
all keys within the bucket.
:type prefix: string
:param prefix: allows you to limit the listing to a particular
prefix. For example, if you call the method with
prefix='/foo/' then the iterator will only cycle through
the keys that begin with the string '/foo/'.
:type delimiter: string
:param delimiter: can be used in conjunction with the prefix
to allow you to organize and browse your keys
hierarchically. See:
http://aws.amazon.com/releasenotes/Amazon-S3/213
for more details.
:type key_marker: string
:param key_marker: The "marker" of where you are in the result set
:param encoding_type: Requests Amazon S3 to encode the response and
specifies the encoding method to use.
An object key can contain any Unicode character; however, XML 1.0
parser cannot parse some characters, such as characters with an
ASCII value from 0 to 10. For characters that are not supported in
XML 1.0, you can add this parameter to request that Amazon S3
encode the keys in the response.
Valid options: ``url``
:type encoding_type: string
:rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
:return: an instance of a BucketListResultSet that handles paging, etc
"""
return VersionedBucketListResultSet(self, prefix, delimiter,
key_marker, version_id_marker,
headers,
encoding_type=encoding_type)
def list_multipart_uploads(self, key_marker='',
upload_id_marker='',
headers=None, encoding_type=None):
"""
List multipart upload objects within a bucket. This returns an
instance of an MultiPartUploadListResultSet that automatically
handles all of the result paging, etc. from S3. You just need
to keep iterating until there are no more results.
:type key_marker: string
:param key_marker: The "marker" of where you are in the result set
:type upload_id_marker: string
:param upload_id_marker: The upload identifier
:param encoding_type: Requests Amazon S3 to encode the response and
specifies the encoding method to use.
An object key can contain any Unicode character; however, XML 1.0
parser cannot parse some characters, such as characters with an
ASCII value from 0 to 10. For characters that are not supported in
XML 1.0, you can add this parameter to request that Amazon S3
encode the keys in the response.
Valid options: ``url``
:type encoding_type: string
:rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
:return: an instance of a BucketListResultSet that handles paging, etc
"""
return MultiPartUploadListResultSet(self, key_marker,
upload_id_marker,
headers,
encoding_type=encoding_type)
def _get_all_query_args(self, params, initial_query_string=''):
pairs = []
if initial_query_string:
pairs.append(initial_query_string)
for key, value in sorted(params.items(), key=lambda x: x[0]):
if value is None:
continue
key = key.replace('_', '-')
if key == 'maxkeys':
key = 'max-keys'
if not isinstance(value, six.string_types + (six.binary_type,)):
value = six.text_type(value)
if not isinstance(value, six.binary_type):
value = value.encode('utf-8')
if value:
pairs.append(u'%s=%s' % (
urllib.parse.quote(key),
urllib.parse.quote(value)
))
return '&'.join(pairs)
def _get_all(self, element_map, initial_query_string='',
headers=None, **params):
query_args = self._get_all_query_args(
params,
initial_query_string=initial_query_string
)
response = self.connection.make_request('GET', self.name,
headers=headers,
query_args=query_args)
body = response.read()
boto.log.debug(body)
if response.status == 200:
rs = ResultSet(element_map)
h = handler.XmlHandler(rs, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
return rs
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def validate_kwarg_names(self, kwargs, names):
"""
Checks that all named arguments are in the specified list of names.
:type kwargs: dict
:param kwargs: Dictionary of kwargs to validate.
:type names: list
:param names: List of possible named arguments.
"""
for kwarg in kwargs:
if kwarg not in names:
raise TypeError('Invalid argument "%s"!' % kwarg)
def get_all_keys(self, headers=None, **params):
"""
A lower-level method for listing contents of a bucket. This
closely models the actual S3 API and requires you to manually
handle the paging of results. For a higher-level method that
handles the details of paging for you, you can use the list
method.
:type max_keys: int
:param max_keys: The maximum number of keys to retrieve
:type prefix: string
:param prefix: The prefix of the keys you want to retrieve
:type marker: string
:param marker: The "marker" of where you are in the result set
:type delimiter: string
:param delimiter: If this optional, Unicode string parameter
is included with your request, then keys that contain the
same string between the prefix and the first occurrence of
the delimiter will be rolled up into a single result
element in the CommonPrefixes collection. These rolled-up
keys are not returned elsewhere in the response.
:param encoding_type: Requests Amazon S3 to encode the response and
specifies the encoding method to use.
An object key can contain any Unicode character; however, XML 1.0
parser cannot parse some characters, such as characters with an
ASCII value from 0 to 10. For characters that are not supported in
XML 1.0, you can add this parameter to request that Amazon S3
encode the keys in the response.
Valid options: ``url``
:type encoding_type: string
:rtype: ResultSet
:return: The result from S3 listing the keys requested
"""
self.validate_kwarg_names(params, ['maxkeys', 'max_keys', 'prefix',
'marker', 'delimiter',
'encoding_type'])
return self._get_all([('Contents', self.key_class),
('CommonPrefixes', Prefix)],
'', headers, **params)
def get_all_versions(self, headers=None, **params):
"""
A lower-level, version-aware method for listing contents of a
bucket. This closely models the actual S3 API and requires
you to manually handle the paging of results. For a
higher-level method that handles the details of paging for
you, you can use the list method.
:type max_keys: int
:param max_keys: The maximum number of keys to retrieve
:type prefix: string
:param prefix: The prefix of the keys you want to retrieve
:type key_marker: string
:param key_marker: The "marker" of where you are in the result set
with respect to keys.
:type version_id_marker: string
:param version_id_marker: The "marker" of where you are in the result
set with respect to version-id's.
:type delimiter: string
:param delimiter: If this optional, Unicode string parameter
is included with your request, then keys that contain the
same string between the prefix and the first occurrence of
the delimiter will be rolled up into a single result
element in the CommonPrefixes collection. These rolled-up
keys are not returned elsewhere in the response.
:param encoding_type: Requests Amazon S3 to encode the response and
specifies the encoding method to use.
An object key can contain any Unicode character; however, XML 1.0
parser cannot parse some characters, such as characters with an
ASCII value from 0 to 10. For characters that are not supported in
XML 1.0, you can add this parameter to request that Amazon S3
encode the keys in the response.
Valid options: ``url``
:type encoding_type: string
:rtype: ResultSet
:return: The result from S3 listing the keys requested
"""
self.validate_get_all_versions_params(params)
return self._get_all([('Version', self.key_class),
('CommonPrefixes', Prefix),
('DeleteMarker', DeleteMarker)],
'versions', headers, **params)
def validate_get_all_versions_params(self, params):
"""
Validate that the parameters passed to get_all_versions are valid.
Overridden by subclasses that allow a different set of parameters.
:type params: dict
:param params: Parameters to validate.
"""
self.validate_kwarg_names(
params, ['maxkeys', 'max_keys', 'prefix', 'key_marker',
'version_id_marker', 'delimiter', 'encoding_type'])
def get_all_multipart_uploads(self, headers=None, **params):
"""
A lower-level, version-aware method for listing active
MultiPart uploads for a bucket. This closely models the
actual S3 API and requires you to manually handle the paging
of results. For a higher-level method that handles the
details of paging for you, you can use the list method.
:type max_uploads: int
:param max_uploads: The maximum number of uploads to retrieve.
Default value is 1000.
:type key_marker: string
:param key_marker: Together with upload_id_marker, this
parameter specifies the multipart upload after which
listing should begin. If upload_id_marker is not
specified, only the keys lexicographically greater than
the specified key_marker will be included in the list.
If upload_id_marker is specified, any multipart uploads
for a key equal to the key_marker might also be included,
provided those multipart uploads have upload IDs
lexicographically greater than the specified
upload_id_marker.
:type upload_id_marker: string
:param upload_id_marker: Together with key-marker, specifies
the multipart upload after which listing should begin. If
key_marker is not specified, the upload_id_marker
parameter is ignored. Otherwise, any multipart uploads
for a key equal to the key_marker might be included in the
list only if they have an upload ID lexicographically
greater than the specified upload_id_marker.
:type encoding_type: string
:param encoding_type: Requests Amazon S3 to encode the response and
specifies the encoding method to use.
An object key can contain any Unicode character; however, XML 1.0
parser cannot parse some characters, such as characters with an
ASCII value from 0 to 10. For characters that are not supported in
XML 1.0, you can add this parameter to request that Amazon S3
encode the keys in the response.
Valid options: ``url``
:type delimiter: string
:param delimiter: Character you use to group keys.
All keys that contain the same string between the prefix, if
specified, and the first occurrence of the delimiter after the
prefix are grouped under a single result element, CommonPrefixes.
If you don't specify the prefix parameter, then the substring
starts at the beginning of the key. The keys that are grouped
under CommonPrefixes result element are not returned elsewhere
in the response.
:type prefix: string
:param prefix: Lists in-progress uploads only for those keys that
begin with the specified prefix. You can use prefixes to separate
a bucket into different grouping of keys. (You can think of using
prefix to make groups in the same way you'd use a folder in a
file system.)
:rtype: ResultSet
:return: The result from S3 listing the uploads requested
"""
self.validate_kwarg_names(params, ['max_uploads', 'key_marker',
'upload_id_marker', 'encoding_type',
'delimiter', 'prefix'])
return self._get_all([('Upload', MultiPartUpload),
('CommonPrefixes', Prefix)],
'uploads', headers, **params)
def new_key(self, key_name=None):
"""
Creates a new key
:type key_name: string
:param key_name: The name of the key to create
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
if not key_name:
raise ValueError('Empty key names are not allowed')
return self.key_class(self, key_name)
def generate_url(self, expires_in, method='GET', headers=None,
force_http=False, response_headers=None,
expires_in_absolute=False):
return self.connection.generate_url(expires_in, method, self.name,
headers=headers,
force_http=force_http,
response_headers=response_headers,
expires_in_absolute=expires_in_absolute)
def delete_keys(self, keys, quiet=False, mfa_token=None, headers=None):
"""
Deletes a set of keys using S3's Multi-object delete API. If a
VersionID is specified for that key then that version is removed.
Returns a MultiDeleteResult Object, which contains Deleted
and Error elements for each key you ask to delete.
:type keys: list
:param keys: A list of either key_names or (key_name, versionid) pairs
or a list of Key instances.
:type quiet: boolean
:param quiet: In quiet mode the response includes only keys
where the delete operation encountered an error. For a
successful deletion, the operation does not return any
information about the delete in the response body.
:type mfa_token: tuple or list of strings
:param mfa_token: A tuple or list consisting of the serial
number from the MFA device and the current value of the
six-digit token associated with the device. This value is
required anytime you are deleting versioned objects from a
bucket that has the MFADelete option on the bucket.
:returns: An instance of MultiDeleteResult
"""
ikeys = iter(keys)
result = MultiDeleteResult(self)
provider = self.connection.provider
query_args = 'delete'
def delete_keys2(hdrs):
hdrs = hdrs or {}
data = u"""<?xml version="1.0" encoding="UTF-8"?>"""
data += u"<Delete>"
if quiet:
data += u"<Quiet>true</Quiet>"
count = 0
while count < 1000:
try:
key = next(ikeys)
except StopIteration:
break
if isinstance(key, six.string_types):
key_name = key
version_id = None
elif isinstance(key, tuple) and len(key) == 2:
key_name, version_id = key
elif (isinstance(key, Key) or isinstance(key, DeleteMarker)) and key.name:
key_name = key.name
version_id = key.version_id
else:
if isinstance(key, Prefix):
key_name = key.name
code = 'PrefixSkipped' # Don't delete Prefix
else:
key_name = repr(key) # try get a string
code = 'InvalidArgument' # other unknown type
message = 'Invalid. No delete action taken for this object.'
error = Error(key_name, code=code, message=message)
result.errors.append(error)
continue
count += 1
data += u"<Object><Key>%s</Key>" % xml.sax.saxutils.escape(key_name)
if version_id:
data += u"<VersionId>%s</VersionId>" % version_id
data += u"</Object>"
data += u"</Delete>"
if count <= 0:
return False # no more
data = data.encode('utf-8')
fp = BytesIO(data)
md5 = boto.utils.compute_md5(fp)
hdrs['Content-MD5'] = md5[1]
hdrs['Content-Type'] = 'text/xml'
if mfa_token:
hdrs[provider.mfa_header] = ' '.join(mfa_token)
response = self.connection.make_request('POST', self.name,
headers=hdrs,
query_args=query_args,
data=data)
body = response.read()
if response.status == 200:
h = handler.XmlHandler(result, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
return count >= 1000 # more?
else:
raise provider.storage_response_error(response.status,
response.reason,
body)
while delete_keys2(headers):
pass
return result
def delete_key(self, key_name, headers=None, version_id=None,
mfa_token=None):
"""
Deletes a key from the bucket. If a version_id is provided,
only that version of the key will be deleted.
:type key_name: string
:param key_name: The key name to delete
:type version_id: string
:param version_id: The version ID (optional)
:type mfa_token: tuple or list of strings
:param mfa_token: A tuple or list consisting of the serial
number from the MFA device and the current value of the
six-digit token associated with the device. This value is
required anytime you are deleting versioned objects from a
bucket that has the MFADelete option on the bucket.
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: A key object holding information on what was
deleted. The Caller can see if a delete_marker was
created or removed and what version_id the delete created
or removed.
"""
if not key_name:
raise ValueError('Empty key names are not allowed')
return self._delete_key_internal(key_name, headers=headers,
version_id=version_id,
mfa_token=mfa_token,
query_args_l=None)
def _delete_key_internal(self, key_name, headers=None, version_id=None,
mfa_token=None, query_args_l=None):
query_args_l = query_args_l or []
provider = self.connection.provider
if version_id:
query_args_l.append('versionId=%s' % version_id)
query_args = '&'.join(query_args_l) or None
if mfa_token:
if not headers:
headers = {}
headers[provider.mfa_header] = ' '.join(mfa_token)
response = self.connection.make_request('DELETE', self.name, key_name,
headers=headers,
query_args=query_args)
body = response.read()
if response.status != 204:
raise provider.storage_response_error(response.status,
response.reason, body)
else:
# return a key object with information on what was deleted.
k = self.key_class(self)
k.name = key_name
k.handle_version_headers(response)
k.handle_addl_headers(response.getheaders())
return k
def copy_key(self, new_key_name, src_bucket_name,
src_key_name, metadata=None, src_version_id=None,
storage_class='STANDARD', preserve_acl=False,
encrypt_key=False, headers=None, query_args=None):
"""
Create a new key in the bucket by copying another existing key.
:type new_key_name: string
:param new_key_name: The name of the new key
:type src_bucket_name: string
:param src_bucket_name: The name of the source bucket
:type src_key_name: string
:param src_key_name: The name of the source key
:type src_version_id: string
:param src_version_id: The version id for the key. This param
is optional. If not specified, the newest version of the
key will be copied.
:type metadata: dict
:param metadata: Metadata to be associated with new key. If
metadata is supplied, it will replace the metadata of the
source key being copied. If no metadata is supplied, the
source key's metadata will be copied to the new key.
:type storage_class: string
:param storage_class: The storage class of the new key. By
default, the new key will use the standard storage class.
Possible values are: STANDARD | REDUCED_REDUNDANCY
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key will
be copied to the destination key. If False, the
destination key will have the default ACL. Note that
preserving the ACL in the new key object will require two
additional API calls to S3, one to retrieve the current
ACL and one to set that ACL on the new object. If you
don't care about the ACL, a value of False will be
significantly more efficient.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type headers: dict
:param headers: A dictionary of header name/value pairs.
:type query_args: string
:param query_args: A string of additional querystring arguments
to append to the request
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
headers = headers or {}
provider = self.connection.provider
src_key_name = boto.utils.get_utf8_value(src_key_name)
if preserve_acl:
if self.name == src_bucket_name:
src_bucket = self
else:
src_bucket = self.connection.get_bucket(
src_bucket_name, validate=False)
acl = src_bucket.get_xml_acl(src_key_name)
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
src = '%s/%s' % (src_bucket_name, urllib.parse.quote(src_key_name))
if src_version_id:
src += '?versionId=%s' % src_version_id
headers[provider.copy_source_header] = str(src)
# make sure storage_class_header key exists before accessing it
if provider.storage_class_header and storage_class:
headers[provider.storage_class_header] = storage_class
if metadata is not None:
headers[provider.metadata_directive_header] = 'REPLACE'
headers = boto.utils.merge_meta(headers, metadata, provider)
elif not query_args: # Can't use this header with multi-part copy.
headers[provider.metadata_directive_header] = 'COPY'
response = self.connection.make_request('PUT', self.name, new_key_name,
headers=headers,
query_args=query_args)
body = response.read()
if response.status == 200:
key = self.new_key(new_key_name)
h = handler.XmlHandler(key, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
if hasattr(key, 'Error'):
raise provider.storage_copy_error(key.Code, key.Message, body)
key.handle_version_headers(response)
key.handle_addl_headers(response.getheaders())
if preserve_acl:
self.set_xml_acl(acl, new_key_name)
return key
else:
raise provider.storage_response_error(response.status,
response.reason, body)
def set_canned_acl(self, acl_str, key_name='', headers=None,
version_id=None):
assert acl_str in CannedACLStrings
if headers:
headers[self.connection.provider.acl_header] = acl_str
else:
headers = {self.connection.provider.acl_header: acl_str}
query_args = 'acl'
if version_id:
query_args += '&versionId=%s' % version_id
response = self.connection.make_request('PUT', self.name, key_name,
headers=headers, query_args=query_args)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_xml_acl(self, key_name='', headers=None, version_id=None):
query_args = 'acl'
if version_id:
query_args += '&versionId=%s' % version_id
response = self.connection.make_request('GET', self.name, key_name,
query_args=query_args,
headers=headers)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
return body
def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=None,
query_args='acl'):
if version_id:
query_args += '&versionId=%s' % version_id
if not isinstance(acl_str, bytes):
acl_str = acl_str.encode('utf-8')
response = self.connection.make_request('PUT', self.name, key_name,
data=acl_str,
query_args=query_args,
headers=headers)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None):
if isinstance(acl_or_str, Policy):
self.set_xml_acl(acl_or_str.to_xml(), key_name,
headers, version_id)
else:
self.set_canned_acl(acl_or_str, key_name,
headers, version_id)
def get_acl(self, key_name='', headers=None, version_id=None):
query_args = 'acl'
if version_id:
query_args += '&versionId=%s' % version_id
response = self.connection.make_request('GET', self.name, key_name,
query_args=query_args,
headers=headers)
body = response.read()
if response.status == 200:
policy = Policy(self)
h = handler.XmlHandler(policy, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
return policy
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_subresource(self, subresource, value, key_name='', headers=None,
version_id=None):
"""
Set a subresource for a bucket or key.
:type subresource: string
:param subresource: The subresource to set.
:type value: string
:param value: The value of the subresource.
:type key_name: string
:param key_name: The key to operate on, or None to operate on the
bucket.
:type headers: dict
:param headers: Additional HTTP headers to include in the request.
:type src_version_id: string
:param src_version_id: Optional. The version id of the key to
operate on. If not specified, operate on the newest
version.
"""
if not subresource:
raise TypeError('set_subresource called with subresource=None')
query_args = subresource
if version_id:
query_args += '&versionId=%s' % version_id
if not isinstance(value, bytes):
value = value.encode('utf-8')
response = self.connection.make_request('PUT', self.name, key_name,
data=value,
query_args=query_args,
headers=headers)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_subresource(self, subresource, key_name='', headers=None,
version_id=None):
"""
Get a subresource for a bucket or key.
:type subresource: string
:param subresource: The subresource to get.
:type key_name: string
:param key_name: The key to operate on, or None to operate on the
bucket.
:type headers: dict
:param headers: Additional HTTP headers to include in the request.
:type src_version_id: string
:param src_version_id: Optional. The version id of the key to
operate on. If not specified, operate on the newest
version.
:rtype: string
:returns: The value of the subresource.
"""
if not subresource:
raise TypeError('get_subresource called with subresource=None')
query_args = subresource
if version_id:
query_args += '&versionId=%s' % version_id
response = self.connection.make_request('GET', self.name, key_name,
query_args=query_args,
headers=headers)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
return body
def make_public(self, recursive=False, headers=None):
self.set_canned_acl('public-read', headers=headers)
if recursive:
for key in self:
self.set_canned_acl('public-read', key.name, headers=headers)
def add_email_grant(self, permission, email_address,
recursive=False, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a bucket. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the AWS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the
command will apply the grant to all keys within the bucket
or not. The default value is False. By passing a True
value, the call will iterate through all keys in the
bucket and apply the same grant to each key. CAUTION: If
you have a lot of keys, this could take a long time!
"""
if permission not in S3Permissions:
raise self.connection.provider.storage_permissions_error(
'Unknown Permission: %s' % permission)
policy = self.get_acl(headers=headers)
policy.acl.add_email_grant(permission, email_address)
self.set_acl(policy, headers=headers)
if recursive:
for key in self:
key.add_email_grant(permission, email_address, headers=headers)
def add_user_grant(self, permission, user_id, recursive=False,
headers=None, display_name=None):
"""
Convenience method that provides a quick way to add a canonical
user grant to a bucket. This method retrieves the current ACL,
creates a new grant based on the parameters passed in, adds that
grant to the ACL and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type user_id: string
:param user_id: The canonical user id associated with the AWS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the
command will apply the grant to all keys within the bucket
or not. The default value is False. By passing a True
value, the call will iterate through all keys in the
bucket and apply the same grant to each key. CAUTION: If
you have a lot of keys, this could take a long time!
:type display_name: string
:param display_name: An option string containing the user's
Display Name. Only required on Walrus.
"""
if permission not in S3Permissions:
raise self.connection.provider.storage_permissions_error(
'Unknown Permission: %s' % permission)
policy = self.get_acl(headers=headers)
policy.acl.add_user_grant(permission, user_id,
display_name=display_name)
self.set_acl(policy, headers=headers)
if recursive:
for key in self:
key.add_user_grant(permission, user_id, headers=headers,
display_name=display_name)
def list_grants(self, headers=None):
policy = self.get_acl(headers=headers)
return policy.acl.grants
def get_location(self):
"""
Returns the LocationConstraint for the bucket.
:rtype: str
:return: The LocationConstraint for the bucket or the empty
string if no constraint was specified when bucket was created.
"""
response = self.connection.make_request('GET', self.name,
query_args='location')
body = response.read()
if response.status == 200:
rs = ResultSet(self)
h = handler.XmlHandler(rs, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
return rs.LocationConstraint
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_xml_logging(self, logging_str, headers=None):
"""
Set logging on a bucket directly to the given xml string.
:type logging_str: unicode string
:param logging_str: The XML for the bucketloggingstatus which
will be set. The string will be converted to utf-8 before
it is sent. Usually, you will obtain this XML from the
BucketLogging object.
:rtype: bool
:return: True if ok or raises an exception.
"""
body = logging_str
if not isinstance(body, bytes):
body = body.encode('utf-8')
response = self.connection.make_request('PUT', self.name, data=body,
query_args='logging', headers=headers)
body = response.read()
if response.status == 200:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def enable_logging(self, target_bucket, target_prefix='',
grants=None, headers=None):
"""
Enable logging on a bucket.
:type target_bucket: bucket or string
:param target_bucket: The bucket to log to.
:type target_prefix: string
:param target_prefix: The prefix which should be prepended to the
generated log files written to the target_bucket.
:type grants: list of Grant objects
:param grants: A list of extra permissions which will be granted on
the log files which are created.
:rtype: bool
:return: True if ok or raises an exception.
"""
if isinstance(target_bucket, Bucket):
target_bucket = target_bucket.name
blogging = BucketLogging(target=target_bucket, prefix=target_prefix,
grants=grants)
return self.set_xml_logging(blogging.to_xml(), headers=headers)
def disable_logging(self, headers=None):
"""
Disable logging on a bucket.
:rtype: bool
:return: True if ok or raises an exception.
"""
blogging = BucketLogging()
return self.set_xml_logging(blogging.to_xml(), headers=headers)
def get_logging_status(self, headers=None):
"""
Get the logging status for this bucket.
:rtype: :class:`boto.s3.bucketlogging.BucketLogging`
:return: A BucketLogging object for this bucket.
"""
response = self.connection.make_request('GET', self.name,
query_args='logging', headers=headers)
body = response.read()
if response.status == 200:
blogging = BucketLogging()
h = handler.XmlHandler(blogging, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
return blogging
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_as_logging_target(self, headers=None):
"""
Setup the current bucket as a logging target by granting the necessary
permissions to the LogDelivery group to write log files to this bucket.
"""
policy = self.get_acl(headers=headers)
g1 = Grant(permission='WRITE', type='Group', uri=self.LoggingGroup)
g2 = Grant(permission='READ_ACP', type='Group', uri=self.LoggingGroup)
policy.acl.add_grant(g1)
policy.acl.add_grant(g2)
self.set_acl(policy, headers=headers)
def get_request_payment(self, headers=None):
response = self.connection.make_request('GET', self.name,
query_args='requestPayment', headers=headers)
body = response.read()
if response.status == 200:
return body
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_request_payment(self, payer='BucketOwner', headers=None):
body = self.BucketPaymentBody % payer
response = self.connection.make_request('PUT', self.name, data=body,
query_args='requestPayment', headers=headers)
body = response.read()
if response.status == 200:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def configure_versioning(self, versioning, mfa_delete=False,
mfa_token=None, headers=None):
"""
Configure versioning for this bucket.
..note:: This feature is currently in beta.
:type versioning: bool
:param versioning: A boolean indicating whether version is
enabled (True) or disabled (False).
:type mfa_delete: bool
:param mfa_delete: A boolean indicating whether the
Multi-Factor Authentication Delete feature is enabled
(True) or disabled (False). If mfa_delete is enabled then
all Delete operations will require the token from your MFA
device to be passed in the request.
:type mfa_token: tuple or list of strings
:param mfa_token: A tuple or list consisting of the serial
number from the MFA device and the current value of the
six-digit token associated with the device. This value is
required when you are changing the status of the MfaDelete
property of the bucket.
"""
if versioning:
ver = 'Enabled'
else:
ver = 'Suspended'
if mfa_delete:
mfa = 'Enabled'
else:
mfa = 'Disabled'
body = self.VersioningBody % (ver, mfa)
if mfa_token:
if not headers:
headers = {}
provider = self.connection.provider
headers[provider.mfa_header] = ' '.join(mfa_token)
response = self.connection.make_request('PUT', self.name, data=body,
query_args='versioning', headers=headers)
body = response.read()
if response.status == 200:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_versioning_status(self, headers=None):
"""
Returns the current status of versioning on the bucket.
:rtype: dict
:returns: A dictionary containing a key named 'Versioning'
that can have a value of either Enabled, Disabled, or
Suspended. Also, if MFADelete has ever been enabled on the
bucket, the dictionary will contain a key named
'MFADelete' which will have a value of either Enabled or
Suspended.
"""
response = self.connection.make_request('GET', self.name,
query_args='versioning', headers=headers)
body = response.read()
if not isinstance(body, six.string_types):
body = body.decode('utf-8')
boto.log.debug(body)
if response.status == 200:
d = {}
ver = re.search(self.VersionRE, body)
if ver:
d['Versioning'] = ver.group(1)
mfa = re.search(self.MFADeleteRE, body)
if mfa:
d['MfaDelete'] = mfa.group(1)
return d
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def configure_lifecycle(self, lifecycle_config, headers=None):
"""
Configure lifecycle for this bucket.
:type lifecycle_config: :class:`boto.s3.lifecycle.Lifecycle`
:param lifecycle_config: The lifecycle configuration you want
to configure for this bucket.
"""
xml = lifecycle_config.to_xml()
#xml = xml.encode('utf-8')
fp = StringIO(xml)
md5 = boto.utils.compute_md5(fp)
if headers is None:
headers = {}
headers['Content-MD5'] = md5[1]
headers['Content-Type'] = 'text/xml'
response = self.connection.make_request('PUT', self.name,
data=fp.getvalue(),
query_args='lifecycle',
headers=headers)
body = response.read()
if response.status == 200:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_lifecycle_config(self, headers=None):
"""
Returns the current lifecycle configuration on the bucket.
:rtype: :class:`boto.s3.lifecycle.Lifecycle`
:returns: A LifecycleConfig object that describes all current
lifecycle rules in effect for the bucket.
"""
response = self.connection.make_request('GET', self.name,
query_args='lifecycle', headers=headers)
body = response.read()
boto.log.debug(body)
if response.status == 200:
lifecycle = Lifecycle()
h = handler.XmlHandler(lifecycle, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
return lifecycle
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def delete_lifecycle_configuration(self, headers=None):
"""
Removes all lifecycle configuration from the bucket.
"""
response = self.connection.make_request('DELETE', self.name,
query_args='lifecycle',
headers=headers)
body = response.read()
boto.log.debug(body)
if response.status == 204:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def configure_website(self, suffix=None, error_key=None,
redirect_all_requests_to=None,
routing_rules=None,
headers=None):
"""
Configure this bucket to act as a website
:type suffix: str
:param suffix: Suffix that is appended to a request that is for a
"directory" on the website endpoint (e.g. if the suffix is
index.html and you make a request to samplebucket/images/
the data that is returned will be for the object with the
key name images/index.html). The suffix must not be empty
and must not include a slash character.
:type error_key: str
:param error_key: The object key name to use when a 4XX class
error occurs. This is optional.
:type redirect_all_requests_to: :class:`boto.s3.website.RedirectLocation`
:param redirect_all_requests_to: Describes the redirect behavior for
every request to this bucket's website endpoint. If this value is
non None, no other values are considered when configuring the
website configuration for the bucket. This is an instance of
``RedirectLocation``.
:type routing_rules: :class:`boto.s3.website.RoutingRules`
:param routing_rules: Object which specifies conditions
and redirects that apply when the conditions are met.
"""
config = website.WebsiteConfiguration(
suffix, error_key, redirect_all_requests_to,
routing_rules)
return self.set_website_configuration(config, headers=headers)
def set_website_configuration(self, config, headers=None):
"""
:type config: boto.s3.website.WebsiteConfiguration
:param config: Configuration data
"""
return self.set_website_configuration_xml(config.to_xml(),
headers=headers)
def set_website_configuration_xml(self, xml, headers=None):
"""Upload xml website configuration"""
response = self.connection.make_request('PUT', self.name, data=xml,
query_args='website',
headers=headers)
body = response.read()
if response.status == 200:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_website_configuration(self, headers=None):
"""
Returns the current status of website configuration on the bucket.
:rtype: dict
:returns: A dictionary containing a Python representation
of the XML response from S3. The overall structure is:
* WebsiteConfiguration
* IndexDocument
* Suffix : suffix that is appended to request that
is for a "directory" on the website endpoint
* ErrorDocument
* Key : name of object to serve when an error occurs
"""
return self.get_website_configuration_with_xml(headers)[0]
def get_website_configuration_obj(self, headers=None):
"""Get the website configuration as a
:class:`boto.s3.website.WebsiteConfiguration` object.
"""
config_xml = self.get_website_configuration_xml(headers=headers)
config = website.WebsiteConfiguration()
h = handler.XmlHandler(config, self)
xml.sax.parseString(config_xml, h)
return config
def get_website_configuration_with_xml(self, headers=None):
"""
Returns the current status of website configuration on the bucket as
unparsed XML.
:rtype: 2-Tuple
:returns: 2-tuple containing:
1) A dictionary containing a Python representation \
of the XML response. The overall structure is:
* WebsiteConfiguration
* IndexDocument
* Suffix : suffix that is appended to request that \
is for a "directory" on the website endpoint
* ErrorDocument
* Key : name of object to serve when an error occurs
2) unparsed XML describing the bucket's website configuration
"""
body = self.get_website_configuration_xml(headers=headers)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e, body
def get_website_configuration_xml(self, headers=None):
"""Get raw website configuration xml"""
response = self.connection.make_request('GET', self.name,
query_args='website', headers=headers)
body = response.read().decode('utf-8')
boto.log.debug(body)
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
return body
def delete_website_configuration(self, headers=None):
"""
Removes all website configuration from the bucket.
"""
response = self.connection.make_request('DELETE', self.name,
query_args='website', headers=headers)
body = response.read()
boto.log.debug(body)
if response.status == 204:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_website_endpoint(self):
"""
Returns the fully qualified hostname to use is you want to access this
bucket as a website. This doesn't validate whether the bucket has
been correctly configured as a website or not.
"""
l = [self.name]
l.append(S3WebsiteEndpointTranslate.translate_region(self.get_location()))
l.append('.'.join(self.connection.host.split('.')[-2:]))
return '.'.join(l)
def get_policy(self, headers=None):
"""
Returns the JSON policy associated with the bucket. The policy
is returned as an uninterpreted JSON string.
"""
response = self.connection.make_request('GET', self.name,
query_args='policy', headers=headers)
body = response.read()
if response.status == 200:
return body
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_policy(self, policy, headers=None):
"""
Add or replace the JSON policy associated with the bucket.
:type policy: str
:param policy: The JSON policy as a string.
"""
response = self.connection.make_request('PUT', self.name,
data=policy,
query_args='policy',
headers=headers)
body = response.read()
if response.status >= 200 and response.status <= 204:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def delete_policy(self, headers=None):
response = self.connection.make_request('DELETE', self.name,
data='/?policy',
query_args='policy',
headers=headers)
body = response.read()
if response.status >= 200 and response.status <= 204:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_cors_xml(self, cors_xml, headers=None):
"""
Set the CORS (Cross-Origin Resource Sharing) for a bucket.
:type cors_xml: str
:param cors_xml: The XML document describing your desired
CORS configuration. See the S3 documentation for details
of the exact syntax required.
"""
fp = StringIO(cors_xml)
md5 = boto.utils.compute_md5(fp)
if headers is None:
headers = {}
headers['Content-MD5'] = md5[1]
headers['Content-Type'] = 'text/xml'
response = self.connection.make_request('PUT', self.name,
data=fp.getvalue(),
query_args='cors',
headers=headers)
body = response.read()
if response.status == 200:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_cors(self, cors_config, headers=None):
"""
Set the CORS for this bucket given a boto CORSConfiguration
object.
:type cors_config: :class:`boto.s3.cors.CORSConfiguration`
:param cors_config: The CORS configuration you want
to configure for this bucket.
"""
return self.set_cors_xml(cors_config.to_xml())
def get_cors_xml(self, headers=None):
"""
Returns the current CORS configuration on the bucket as an
XML document.
"""
response = self.connection.make_request('GET', self.name,
query_args='cors', headers=headers)
body = response.read()
boto.log.debug(body)
if response.status == 200:
return body
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_cors(self, headers=None):
"""
Returns the current CORS configuration on the bucket.
:rtype: :class:`boto.s3.cors.CORSConfiguration`
:returns: A CORSConfiguration object that describes all current
CORS rules in effect for the bucket.
"""
body = self.get_cors_xml(headers)
cors = CORSConfiguration()
h = handler.XmlHandler(cors, self)
xml.sax.parseString(body, h)
return cors
def delete_cors(self, headers=None):
"""
Removes all CORS configuration from the bucket.
"""
response = self.connection.make_request('DELETE', self.name,
query_args='cors',
headers=headers)
body = response.read()
boto.log.debug(body)
if response.status == 204:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def initiate_multipart_upload(self, key_name, headers=None,
reduced_redundancy=False,
metadata=None, encrypt_key=False,
policy=None):
"""
Start a multipart upload operation.
.. note::
Note: After you initiate multipart upload and upload one or more
parts, you must either complete or abort multipart upload in order
to stop getting charged for storage of the uploaded parts. Only
after you either complete or abort multipart upload, Amazon S3
frees up the parts storage and stops charging you for the parts
storage.
:type key_name: string
:param key_name: The name of the key that will ultimately
result from this multipart upload operation. This will be
exactly as the key appears in the bucket after the upload
process has been completed.
:type headers: dict
:param headers: Additional HTTP headers to send and store with the
resulting key in S3.
:type reduced_redundancy: boolean
:param reduced_redundancy: In multipart uploads, the storage
class is specified when initiating the upload, not when
uploading individual parts. So if you want the resulting
key to use the reduced redundancy storage class set this
flag when you initiate the upload.
:type metadata: dict
:param metadata: Any metadata that you would like to set on the key
that results from the multipart upload.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key (once completed) in S3.
"""
query_args = 'uploads'
provider = self.connection.provider
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
storage_class_header = provider.storage_class_header
if storage_class_header:
headers[storage_class_header] = 'REDUCED_REDUNDANCY'
# TODO: what if the provider doesn't support reduced redundancy?
# (see boto.s3.key.Key.set_contents_from_file)
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
if metadata is None:
metadata = {}
headers = boto.utils.merge_meta(headers, metadata,
self.connection.provider)
response = self.connection.make_request('POST', self.name, key_name,
query_args=query_args,
headers=headers)
body = response.read()
boto.log.debug(body)
if response.status == 200:
resp = MultiPartUpload(self)
h = handler.XmlHandler(resp, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
return resp
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def complete_multipart_upload(self, key_name, upload_id,
xml_body, headers=None):
"""
Complete a multipart upload operation.
"""
query_args = 'uploadId=%s' % upload_id
if headers is None:
headers = {}
headers['Content-Type'] = 'text/xml'
response = self.connection.make_request('POST', self.name, key_name,
query_args=query_args,
headers=headers, data=xml_body)
contains_error = False
body = response.read().decode('utf-8')
# Some errors will be reported in the body of the response
# even though the HTTP response code is 200. This check
# does a quick and dirty peek in the body for an error element.
if body.find('<Error>') > 0:
contains_error = True
boto.log.debug(body)
if response.status == 200 and not contains_error:
resp = CompleteMultiPartUpload(self)
h = handler.XmlHandler(resp, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
# Use a dummy key to parse various response headers
# for versioning, encryption info and then explicitly
# set the completed MPU object values from key.
k = self.key_class(self)
k.handle_version_headers(response)
k.handle_encryption_headers(response)
resp.version_id = k.version_id
resp.encrypted = k.encrypted
return resp
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def cancel_multipart_upload(self, key_name, upload_id, headers=None):
"""
To verify that all parts have been removed, so you don't get charged
for the part storage, you should call the List Parts operation and
ensure the parts list is empty.
"""
query_args = 'uploadId=%s' % upload_id
response = self.connection.make_request('DELETE', self.name, key_name,
query_args=query_args,
headers=headers)
body = response.read()
boto.log.debug(body)
if response.status != 204:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def delete(self, headers=None):
return self.connection.delete_bucket(self.name, headers=headers)
def get_tags(self):
response = self.get_xml_tags()
tags = Tags()
h = handler.XmlHandler(tags, self)
if not isinstance(response, bytes):
response = response.encode('utf-8')
xml.sax.parseString(response, h)
return tags
def get_xml_tags(self):
response = self.connection.make_request('GET', self.name,
query_args='tagging',
headers=None)
body = response.read()
if response.status == 200:
return body
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_xml_tags(self, tag_str, headers=None, query_args='tagging'):
if headers is None:
headers = {}
md5 = boto.utils.compute_md5(StringIO(tag_str))
headers['Content-MD5'] = md5[1]
headers['Content-Type'] = 'text/xml'
if not isinstance(tag_str, bytes):
tag_str = tag_str.encode('utf-8')
response = self.connection.make_request('PUT', self.name,
data=tag_str,
query_args=query_args,
headers=headers)
body = response.read()
if response.status != 204:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
return True
def set_tags(self, tags, headers=None):
return self.set_xml_tags(tags.to_xml(), headers=headers)
def delete_tags(self, headers=None):
response = self.connection.make_request('DELETE', self.name,
query_args='tagging',
headers=headers)
body = response.read()
boto.log.debug(body)
if response.status == 204:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
|
'''
We need to process the DBF files Brian provides, please!
smpl102802010406.dbf
fp10280201
X 26915
Y 26915
ec3m102802 elevation in cm
fpLen10280 flow path length in cm
GenLU10280 land use code
gSSURGO soils code
'''
import dbflib
import glob
import os
import pandas as pd
from pandas import Series
import psycopg2
import datetime
import sys
SCENARIO = sys.argv[1]
PGCONN = psycopg2.connect(database='idep', host='iemdb')
cursor = PGCONN.cursor()
def get_flowpath(huc12, fpath):
cursor.execute("""
SELECT fid from flowpaths where huc_12 = %s and fpath = %s
and scenario = %s
""", (huc12, fpath, SCENARIO))
if cursor.rowcount == 0:
cursor.execute("""
INSERT into flowpaths(huc_12, fpath, scenario)
values (%s, %s, %s) RETURNING fid
""", (huc12, fpath, SCENARIO))
return cursor.fetchone()[0]
def get_data(fn):
''' Load a DBF file into a pandas DF '''
rows = []
dbf = dbflib.open(fn)
for i in range(dbf.record_count()):
rows.append( dbf.read_record(i) )
return pd.DataFrame(rows)
def process(fn, df):
'''Process a given filename into the database '''
huc12 = fn[4:-4]
huc8 = huc12[:-4]
#print df.keys()
flowpaths = Series(df['g4%s' % (huc8,)]).unique()
flowpaths.sort()
for flowpath in flowpaths:
df2 = df[df['g4%s' % (huc8,)]==flowpath]
df2 = df2.sort('g4Len%s' % (huc8[:5],), ascending=True)
fid = get_flowpath(huc12, flowpath)
cursor.execute("""DELETE from flowpath_points WHERE flowpath = %s
and scenario = %s""",
(fid, SCENARIO))
lstring = []
sz = len(df2.index)
for segid, row in enumerate(df2.iterrows()):
row = df2.irow(segid)
if (segid+1) == sz: # Last row!
row2 = df2.irow(segid-1)
else:
row2 = df2.irow(segid+1)
dy = row['ep3m%s' % (huc8[:6],)] - row2['ep3m%s' % (huc8[:6],)]
dx = row2['g4Len%s' % (huc8[:5],)] - row['g4Len%s' % (huc8[:5],)]
if dx == 0:
slope = 0
else:
slope = dy/dx
lu = row['CropRotatn']
if lu.strip() == "":
lu = [None, None, None, None, None, None]
sql = """INSERT into flowpath_points(flowpath, segid,
elevation, length, surgo, management, slope, geom,
landuse1, landuse2, landuse3, landuse4, landuse5, landuse6,
scenario)
values(%s, %s , %s,
%s, %s, %s, %s, 'SRID=26915;POINT(%s %s)',
%s, %s, %s, %s, %s, %s, %s);
"""
args = (fid, segid,
row['ep3m%s' % (huc8[:6],)]/100.,
row['g4Len%s' % (huc8[:5],)]/100.,
row['gSSURGO'],
row['Management'], slope, row['X'],
row['Y'], lu[0], lu[1], lu[2],
lu[3], lu[4], lu[5], SCENARIO)
cursor.execute(sql, args)
lstring.append("%s %s" % (row['X'], row['Y']))
if len(lstring) > 1:
#print '%s %s Save flowpath %s GEOM, length %s' % (huc12, flowpath, fid,
# len(lstring))
sql = """UPDATE flowpaths SET geom = 'SRID=26915;LINESTRING(%s)'
WHERE fid = %s and scenario = %s""" % (",".join(lstring), fid,
SCENARIO)
cursor.execute(sql)
else:
print '---> ERROR: %s flowpath %s < 2 points, deleting' % (
fn, flowpath,)
cursor.execute("""DELETE from flowpath_points
where flowpath = %s and scenario = %s""", (fid, SCENARIO))
cursor.execute("""DELETE from flowpaths where fid = %s
and scenario = %s""", (fid, SCENARIO))
def main():
""" Lets go main go """
os.chdir("../../data/dbfsGo4")
fns = glob.glob("*.dbf")
total = len(fns)
for i, fn in enumerate(fns):
sts = datetime.datetime.now()
if i > 0 and i % 100 == 0:
PGCONN.commit()
cursor = PGCONN.cursor()
df = get_data(fn)
process(fn, df)
ets = datetime.datetime.now()
print '%4i/%4i %7.3fs %s' % (i+1, total,
(ets - sts).microseconds / 100000. + (ets - sts).seconds, fn)
cursor.close()
PGCONN.commit()
if __name__ == '__main__':
main()
support latest flowpaths
'''
We need to process the DBF files Brian provides, please!
smpl102802010406.dbf
fp10280201
X 26915
Y 26915
ec3m102802 elevation in cm
fpLen10280 flow path length in cm
GenLU10280 land use code
gSSURGO soils code
'''
import dbflib
import glob
import os
import pandas as pd
from pandas import Series
import psycopg2
import datetime
import sys
SCENARIO = sys.argv[1]
PGCONN = psycopg2.connect(database='idep', host='iemdb')
cursor = PGCONN.cursor()
def get_flowpath(huc12, fpath):
cursor.execute("""
SELECT fid from flowpaths where huc_12 = %s and fpath = %s
and scenario = %s
""", (huc12, fpath, SCENARIO))
if cursor.rowcount == 0:
cursor.execute("""
INSERT into flowpaths(huc_12, fpath, scenario)
values (%s, %s, %s) RETURNING fid
""", (huc12, fpath, SCENARIO))
return cursor.fetchone()[0]
def get_data(fn):
''' Load a DBF file into a pandas DF '''
rows = []
dbf = dbflib.open(fn)
for i in range(dbf.record_count()):
rows.append( dbf.read_record(i) )
return pd.DataFrame(rows)
def process(fn, df):
'''Process a given filename into the database '''
huc12 = fn[4:-4]
huc8 = huc12[:-4]
#print df.keys()
flowpaths = Series(df['fp%s' % (huc8,)]).unique()
flowpaths.sort()
for flowpath in flowpaths:
df2 = df[df['fp%s' % (huc8,)]==flowpath]
df2 = df2.sort('fpLen%s' % (huc8[:5],), ascending=True)
fid = get_flowpath(huc12, flowpath)
cursor.execute("""DELETE from flowpath_points WHERE flowpath = %s
and scenario = %s""",
(fid, SCENARIO))
lstring = []
sz = len(df2.index)
for segid, row in enumerate(df2.iterrows()):
row = df2.irow(segid)
if (segid+1) == sz: # Last row!
row2 = df2.irow(segid-1)
else:
row2 = df2.irow(segid+1)
dy = row['ep3m%s' % (huc8[:6],)] - row2['ep3m%s' % (huc8[:6],)]
dx = row2['fpLen%s' % (huc8[:5],)] - row['fpLen%s' % (huc8[:5],)]
if dx == 0:
slope = 0
else:
slope = dy/dx
lu = row['CropRotatn']
if lu.strip() == "":
lu = [None, None, None, None, None, None]
sql = """INSERT into flowpath_points(flowpath, segid,
elevation, length, surgo, management, slope, geom,
landuse1, landuse2, landuse3, landuse4, landuse5, landuse6,
scenario)
values(%s, %s , %s,
%s, %s, %s, %s, 'SRID=26915;POINT(%s %s)',
%s, %s, %s, %s, %s, %s, %s);
"""
args = (fid, segid,
row['ep3m%s' % (huc8[:6],)]/100.,
row['fpLen%s' % (huc8[:5],)]/100.,
row['gSSURGO'],
row['Management'], slope, row['X'],
row['Y'], lu[0], lu[1], lu[2],
lu[3], lu[4], lu[5], SCENARIO)
cursor.execute(sql, args)
lstring.append("%s %s" % (row['X'], row['Y']))
if len(lstring) > 1:
#print '%s %s Save flowpath %s GEOM, length %s' % (huc12, flowpath, fid,
# len(lstring))
sql = """UPDATE flowpaths SET geom = 'SRID=26915;LINESTRING(%s)'
WHERE fid = %s and scenario = %s""" % (",".join(lstring), fid,
SCENARIO)
cursor.execute(sql)
else:
print '---> ERROR: %s flowpath %s < 2 points, deleting' % (
fn, flowpath,)
cursor.execute("""DELETE from flowpath_points
where flowpath = %s and scenario = %s""", (fid, SCENARIO))
cursor.execute("""DELETE from flowpaths where fid = %s
and scenario = %s""", (fid, SCENARIO))
def main():
""" Lets go main go """
os.chdir("../../data/%s" % (sys.argv[2],))
fns = glob.glob("*.dbf")
total = len(fns)
for i, fn in enumerate(fns):
sts = datetime.datetime.now()
if i > 0 and i % 100 == 0:
PGCONN.commit()
cursor = PGCONN.cursor()
df = get_data(fn)
process(fn, df)
ets = datetime.datetime.now()
print '%4i/%4i %7.3fs %s' % (i+1, total,
(ets - sts).microseconds / 100000. + (ets - sts).seconds, fn)
cursor.close()
PGCONN.commit()
if __name__ == '__main__':
main() |
from flask import render_template, flash, request
from app import app
from flask import request
from wtforms import Form, validators, TextField, SelectField, TextAreaField, SelectMultipleField, SubmitField
from wtforms.fields.html5 import DateField
from wtforms_components import DateIntervalField, DateRange
from app import getdata
import json
import datetime
from flask_admin.form.widgets import DatePickerWidget
class ReusableForm(Form):
name = TextField(validators=[validators.required()])
date = TextField('Start', default='Select date', validators=[validators.required()])
languages = SelectMultipleField('Languages', choices=[('en', 'English'), ('it', 'Italian'), ('nl','Nederlands'), ('sv','Swedish'),('ceb','Cebuano'),('de','German'),('fr', 'French'),('ru', 'Russian'),('es','Spanish')], validators=[validators.required()])
dataBtn = SubmitField(label='Get Data')
class TrendsForm(Form):
languages = SelectField('Languages', choices=[('en', 'English'), ('it', 'Italian'), ('nl','Nederlands'), ('sv','Swedish'),('ceb','Cebuano'),('de','German'),('fr', 'French'),('ru', 'Russian'),('es','Spanish')], validators=[validators.required()])
trendBtn = SubmitField(label='Get Last Trends')
@app.route('/', methods=['GET', 'POST'])
@app.route('/index')
def index():
supported_languages = ['en','it','de','nl','sv','ceb','fr','ru','es']
langs = []
langs.append(request.accept_languages.best_match(supported_languages))
if langs == [] or not langs or langs == [None]:
langs = ['it']
form = ReusableForm(request.form)
trendForm = TrendsForm(request.form)
data, form_input, name = getdata.acquireTrends(langs)
articles_desc = getdata.enrichArticles(form_input.split(','), langs[0])
if trendForm.validate() and trendForm.trendBtn.data:
if trendForm.validate():
langs = [trendForm.languages.data]
data, form_input, name = getdata.acquireTrends(langs)
articles_desc = getdata.enrichArticles(form_input.split(','), langs[0])
else:
name = 'All the form fields are required'
data = []
if form.validate() and form.dataBtn.data:
if form.validate():
start, end = request.form['date'].split("-")
name = request.form['name']
langs = form.languages.data
s = start.replace("/", "")
e = end.replace("/", "")
startDate = s[-5:-1]+s[:2]+s[2:4]
endDate = e[-4:]+e[1:3]+e[3:5]
form_input = name
data, errors = getdata.launchQuery(name, startDate, endDate, langs)
articles_desc = getdata.enrichArticles(name.split(','), langs[0])
name = name.replace(",", " - ")
else:
name = 'All the form fields are required'
data = []
return render_template('index.html', form=form, trendForm=trendForm, data=data, name=name.replace('_',' '), query=form_input, articles_desc=articles_desc)
removed some non-asci languages
from flask import render_template, flash, request
from app import app
from flask import request
from wtforms import Form, validators, TextField, SelectField, TextAreaField, SelectMultipleField, SubmitField
from wtforms.fields.html5 import DateField
from wtforms_components import DateIntervalField, DateRange
from app import getdata
import json
import datetime
from flask_admin.form.widgets import DatePickerWidget
class ReusableForm(Form):
name = TextField(validators=[validators.required()])
date = TextField('Start', default='Select date', validators=[validators.required()])
languages = SelectMultipleField('Languages', choices=[('en', 'English'), ('it', 'Italian'),('de','German'),('fr', 'French'),('es','Spanish')], validators=[validators.required()])
dataBtn = SubmitField(label='Get Data')
class TrendsForm(Form):
languages = SelectField('Languages', choices=[('en', 'English'), ('it', 'Italian'),('de','German'),('fr', 'French'),('es','Spanish')], validators=[validators.required()])
trendBtn = SubmitField(label='Get Last Trends')
@app.route('/', methods=['GET', 'POST'])
@app.route('/index')
def index():
supported_languages = ['en','it','de','fr','es']
langs = []
langs.append(request.accept_languages.best_match(supported_languages))
if langs == [] or not langs or langs == [None]:
langs = ['en']
form = ReusableForm(request.form)
trendForm = TrendsForm(request.form)
data, form_input, name = getdata.acquireTrends(langs)
articles_desc = getdata.enrichArticles(form_input.split(','), langs[0])
if trendForm.validate() and trendForm.trendBtn.data:
if trendForm.validate():
langs = [trendForm.languages.data]
data, form_input, name = getdata.acquireTrends(langs)
articles_desc = getdata.enrichArticles(form_input.split(','), langs[0])
else:
name = 'All the form fields are required'
data = []
if form.validate() and form.dataBtn.data:
if form.validate():
start, end = request.form['date'].split("-")
name = request.form['name']
langs = form.languages.data
s = start.replace("/", "")
e = end.replace("/", "")
startDate = s[-5:-1]+s[:2]+s[2:4]
endDate = e[-4:]+e[1:3]+e[3:5]
form_input = name
data, errors = getdata.launchQuery(name, startDate, endDate, langs)
articles_desc = getdata.enrichArticles(name.split(','), langs[0])
name = name.replace(",", " - ")
else:
name = 'All the form fields are required'
data = []
return render_template('index.html', form=form, trendForm=trendForm, data=data, name=name.replace('_',' '), query=form_input, articles_desc=articles_desc)
|
import smc.elements.element
import smc.elements.license
import smc.api.web as web_api
from smc.api.web import SMCOperationFailure
import logging
from pprint import pprint
logger = logging.getLogger(__name__)
def host(name, ip, secondary_ip=[], comment=None):
""" Create host element
Args:
* name: name for object
* ip: ipv4 address
* comment (optional)
Returns:
None
"""
if smc.helpers.is_valid_ipv4(ip): #TODO: Change these to if x is not None:
entry_href = web_api.session.get_entry_href('host')
host = smc.elements.element.Host()
host.name = name
host.ip = ip
host.secondary_ip = secondary_ip
host.comment = comment
try:
r = web_api.session.http_post(entry_href, host.create())
logger.info("Success creating single host: %s, href: %s" % (host.name, r))
except SMCOperationFailure, e:
logger.error("Failed creating single host: %s, %s" % (host.name, e.msg))
else:
logger.error("Failed: Invalid IPv4 address specified: %s, create object: %s failed" % (ip, name))
def iprange(name, ip_range, comment=None):
""" Create iprange object
Args:
* name: name for object
* iprange: ip address range, i.e. 1.1.1.1-1.1.1.10
* comment (optional)
Returns:
None
"""
addr = ip_range.split('-') #just verify each side is valid ip addr
if len(addr) == 2: #has two parts
if not smc.helpers.is_valid_ipv4(addr[0]) or not smc.helpers.is_valid_ipv4(addr[1]):
logger.error("Invalid ip address range provided: %s" % ip_range)
return None
else:
logger.error("Invalid ip address range provided: %s" % ip_range)
return None
iprange = smc.elements.element.IpRange()
iprange.name = name
iprange.iprange = ip_range
entry_href = web_api.session.get_entry_href('address_range')
try:
r = web_api.session.http_post(entry_href, iprange.create())
logger.info("Success creating iprange object: %s, href: %s" % (iprange.name, r))
except SMCOperationFailure, e:
logger.error("Failed creating iprange object: %s, %s" % (iprange.name, e.msg))
def router(name, ip, secondary_ip=None, comment=None):
""" Create router element
Args:
* name: name for object
* ip: ipv4 address
* comment (optional)
Returns:
None
"""
if smc.helpers.is_valid_ipv4(ip):
entry_href = web_api.session.get_entry_href('router')
router = smc.elements.element.Router() #TODO: Need router comment field
router.name = name
router.address = ip
router.secondary_ip = secondary_ip
try:
r = web_api.session.http_post(entry_href, router.create())
logger.info("Success creating router object: %s, href: %s" % (router.name, r))
except SMCOperationFailure, e:
logger.error("Failed creating router object: %s, %s" % (router.name, e.msg))
else:
logger.error("Invalid IPv4 address specified: %s, create object: %s failed" % (ip, name))
def network(name, ip_network, comment=None):
""" Create network element
Args:
* name: name for object
* ip_network: ipv4 address in cidr or full netmask format (1.1.1.1/24, or 1.1.1.0/255.255.0.0)
* comment (optional)
Returns:
None
"""
cidr = smc.helpers.ipaddr_as_network(ip_network)
if cidr:
entry_href = web_api.session.get_entry_href('network')
web_api.session.get_entry_href('network')
network = smc.elements.element.Network()
network.name = name
network.ip4_network = cidr
network.comment = comment
try:
r = web_api.session.http_post(entry_href, network.create())
logger.info("Success creating network object: %s, href: %s" % (network.name, r))
except SMCOperationFailure, e:
logger.error("Failed creating network object: %s, %s" % (network.name, e.msg))
else:
logger.error("Invalid address specified for network: %s; make sure address specified is in network: %s" % (name, ip_network))
def group(name, members=[], comment=None):
""" Create group element, optionally with members
Members must already exist in SMC. Before being added to the group a search will be
performed for each member specified.
Args:
* name: name for object
* members list; i.e. ['element1', 'element2', etc]. Most elements can be used in a group
* comment (optional)
Returns:
None
"""
entry_href = web_api.session.get_entry_href('group')
group = smc.elements.element.Group()
group.name = name
group.comment = comment
if members:
for m in members: #add each member
found_member = smc.search.get_element(m)
if found_member:
logger.debug("Found member: %s, adding to group: %s" % (m, group.name))
group.members.append(found_member['href'])
continue
else:
logger.info("Element: %s could not be found, not adding to group" % m)
try:
r = web_api.session.http_post(entry_href, group.create())
logger.info("Success creating group: %s, href: %s" % (group.name, r))
except SMCOperationFailure, e:
logger.error("Failed creating group record: %s, %s" % (name, e.msg))
#TODO: Not finished implementing;This works if it's applied directly to a single fw, but not globally
'''def blacklist(src, dst, duration="3600"):
if smc.helpers.is_valid_ipv4(src) and smc.helpers.is_valid_ipv4(dst):
entry = smc.web_api.get_entry_href('blacklist')
bl_template = smc.helpers.get_json_template('blacklist.json')
print "Blah"
if bl_template:
bl_template['duration'] = duration
bl_template['end_point1']['ip_network'] = src + '/32'
bl_template['end_point2']['ip_network'] = dst + '/0'
print bl_template
try:
smc.web_api.http_post('http://172.18.1.150:8082/6.0/elements/fw_cluster/116/blacklist', bl_template)
except SMCOperationFailure, e:
print "Error!: %s" % e.msg
else:
#logger.error("Invalid IP address given for blacklist entry, src: %s, dst: %s" % (src,dst))
print "Invalid IP address given for blacklist entry, src: %s, dst: %s" % (src,dst)
'''
#TODO: allow interface number to be configured for initial fw
def single_fw(name, mgmt_ip, mgmt_network, interface_id=None, dns=None, fw_license=False):
""" Create single firewall with a single management interface
Args:
* name: name of fw instance
* mgmt_ip: ipv4 address of management interface (interface 0)
* mgmt_network: netmask of mgmt ip
* dns (optional): string for DNS server
* fw_license (optional): After successful creation, try to auto-license
Returns:
None
"""
if not smc.helpers.is_ipaddr_in_network(mgmt_ip, mgmt_network):
logger.error("Management IP: %s is not in the management network: %s, cannot add single_fw" % (mgmt_ip,mgmt_network))
return None
available_log_servers = smc.search.get_element_by_entry_point('log_server')
if not available_log_servers:
logger.error("Can't seem to find an available Log Server on specified SMC, cannot add single_fw: %s" % name)
return None
single_fw = smc.elements.element.SingleFW()
single_fw.name = name
single_fw.mgmt_ip = mgmt_ip
single_fw.mgmt_network = mgmt_network
single_fw.interface_id = interface_id
single_fw.dns = dns
single_fw.fw_license = fw_license
for found in available_log_servers:
#TODO: If multiple log servers are present, how to handle - just get the first one
single_fw.log_server = found['href']
entry_href = web_api.session.get_entry_href('single_fw') #get entry point for single_fw
new_fw = single_fw.create()
logger.debug("Modified json for single_fw: %s" % new_fw)
try:
new_href = web_api.session.http_post(entry_href, new_fw)
logger.info("Success creating single firewall: %s, new href: %s" % (single_fw.name, new_href))
if fw_license: #fw license is specified
logger.debug("Bind license specified, checking for available license")
fw_from_link = smc.search.get_element_by_href(new_href)
bind_license_href = fw_from_link['nodes'][0]['firewall_node']['link']
bind_href = next(item for item in bind_license_href if item['rel'] == 'bind')
logger.debug("Firewall: %s, bind license href: %s" % (single_fw.name,bind_href['href']))
smc.elements.license.bind_license(bind_href['href']) #TODO: Check return to make sure it actually bound
logger.info("Successfully bound license to single_fw: %s" % name)
except SMCOperationFailure, e:
logger.error("Create single_fw: %s failed: %s" % (single_fw.name, e.msg))
def l3interface(l3fw, ip, network, interface_id=None):
""" Add L3 interface for single FW
Args:
* l3fw: name of firewall to add interface to
* ip: ip of interface
* network: ip is validated to be in network before sending
* interface_id: interface_id to use
Returns:
None
"""
if not smc.helpers.is_ipaddr_in_network(ip, network):
logger.error("IP address: %s is not part of the network provided: %s, cannot add interface" % (ip,network))
return None
network = smc.helpers.ipaddr_as_network(network) #convert to cidr in case full mask provided
entry_href = smc.search.get_element(l3fw)
if entry_href is not None:
fw = web_api.session.http_get(entry_href['href'])
element = smc.elements.element.SMCElement(fw.msg) #store current cfg
single_fw = smc.elements.element.SingleFW(element)
single_fw.name = l3fw
fw_json = single_fw.add_interface(ip, network, interface_id)
logger.debug("fw href: %s, fw_json: %s, etag: %s" % (entry_href['href'], fw_json, fw.etag))
try:
web_api.session.http_put(entry_href['href'], fw_json, fw.etag)
logger.info("Successfully added interface to fw: %s as interface_id=%s" % (single_fw.name,single_fw.interface_id))
except SMCOperationFailure, e:
logger.error("Error occurred adding l3 interface: %s to fw: %s" % (single_fw.name, e.msg))
else:
logger.error("Can't find layer 3 FW specified: %s, cannot add interface" % l3fw)
def l3route(engine, gw, network, interface_id):
""" Add route to l3fw
This could be added to any engine type. Non-routable engine roles (L2/IPS) may
still require route/s defined on the L3 management interface
Args:
* l3fw: name of firewall to add route
* gw: next hop router object
* network: next hop network behind gw
* interface_id: interface to apply route
"""
href = smc.search.get_element(engine) #ref to engine
if href is None:
logger.error("Can't find engine node: %s, cannot process route add" % engine)
return None
engine_href = href['href']
router_element = smc.search.get_element(gw, 'router') #router object
if router_element is None:
logger.error("Can't find router object: %s, cannot process route add" % gw)
return None
network_element = smc.search.get_element(network, 'network')
if network_element is None:
logger.error("Can't find network object: %s, cannot process route add" % network)
return None
node = web_api.session.http_get(engine_href) #get node json
route_link = next(item for item in node.msg['link'] if item['rel'] == 'routing')
route_href = route_link['href'] #href link to routing
routing_node = web_api.session.http_get(route_href)
element = smc.elements.element.SMCElement(routing_node.msg) #save return routing json to element
route = smc.elements.element.Route(element)
result = web_api.session.http_get(router_element['href']) #populate router info
route.gw_href = router_element['href']
route.gw_ip = result.msg['address']
route.gw_name = result.msg['name']
result = web_api.session.http_get(network_element['href']) #dest net info
route.network_href = network_element['href']
route.network_ip = result.msg['ipv4_network']
route.network_name = result.msg['name']
route.interface_id = interface_id
routing_json = route.create()
if routing_json is not None:
logger.debug("Modified routing node: %s" % routing_json)
try:
web_api.session.http_put(route_href, routing_json, routing_node.etag)
logger.info("Successfully added route: %s to node: %s" % (network, engine))
except SMCOperationFailure, e:
logger.error("Failed adding route: %s, to node: %s, %s" % (network, engine, e.msg))
else:
logger.error("Can not find specified interface: %s for route add, double check the interface configuration" % route.interface_id)
def cluster_fw(data):
pass
def single_ips(data):
pass
def cluster_ips(data):
pass
def master_engine(data):
pass
def virtual_ips(data):
pass
def virtual_fw(data):
pass
if __name__ == '__main__':
web_api.session.login('http://172.18.1.150:8082', 'EiGpKD4QxlLJ25dbBEp20001')
import time
start_time = time.time()
#Test create hosts, networks, group and routers
smc.create.host('aidan', '23.23.23.23')
smc.create.group('lepagegroup', comment='test comments - see this')
smc.create.network('hostbitsnotinnetwork', '1.2.3.0/255.255.252.0')
smc.create.network('goodnetwork', '1.2.0.0/255.255.252.0')
smc.create.network('networkwithcidr', '1.3.0.0/24', 'created by api tool')
smc.create.router('gatewayrouter', '5.5.5.5')
smc.remove.element('aidan')
smc.remove.element('lepagegroup')
'''
#Test l3route creation
smc.create.l3route('myfw7', '192.18.1.80', 'Any network', 0) #Unknown host
smc.create.l3route('myfw4', '192.18.1.100', 'Any network', 0) #Unknown gw
smc.create.l3route('myfw4', '192.18.1.100', 'Any2 network', 0) #Unknown network
smc.create.l3route('myfw4', '172.18.1.80', 'Any network', 0) #Good
'''
#Test single_fw, add interfaces and routes
smc.remove.element('myfw')
time.sleep(10)
#Create the objects required for routes
smc.create.router('172.18.1.250', '172.18.1.250') #name, #ip
smc.create.router('172.20.1.250', '172.20.1.250') #name, #ip
smc.create.network('192.168.3.0/24', '192.168.3.0/24') #name, #ip
smc.create.single_fw('myfw', '172.18.1.254', '172.18.1.0/24', dns='5.5.5.5', fw_license=True)
smc.create.l3interface('myfw', '10.10.0.1', '10.10.0.0/16', 3)
smc.create.l3interface('myfw', '172.20.1.254', '172.20.1.0/255.255.255.0', 6)
smc.create.l3route('myfw', '172.18.1.250', 'Any network', 0) #Next hop, dest network, interface
smc.create.l3route('myfw', '172.20.1.250', '192.168.3.0/24', 6)
print("--- %s seconds ---" % (time.time() - start_time))
web_api.session.logout()
remove web api calls and use inheritance
import logging
import smc.elements.element
import smc.elements.license
import smc.api.web as web_api
import smc.api.common as common
logger = logging.getLogger(__name__)
def host(name, ip, secondary_ip=[], comment=None):
""" Create host object
Args:
* name: name, must be unique
* ip: ip address of host
* secondary_ip[] (optional): optional additional IP's for host
* comment (optional)
Returns:
None
"""
if smc.helpers.is_valid_ipv4(ip):
entry_href = web_api.session.get_entry_href('host')
host = smc.elements.element.Host()
host.href = entry_href
host.type = "host"
host.name = name
host.ip = ip
host.secondary_ip = secondary_ip
host.comment = comment
common._create(host.create())
else:
logger.error("Failed: Invalid IPv4 address specified: %s, create object: %s failed" % (ip, name))
def iprange(name, ip_range, comment=None):
""" Create iprange object
Args:
* name: name for object
* iprange: ip address range, i.e. 1.1.1.1-1.1.1.10
* comment (optional)
Returns:
None
"""
addr = ip_range.split('-') #just verify each side is valid ip addr
if len(addr) == 2: #has two parts
if not smc.helpers.is_valid_ipv4(addr[0]) or not smc.helpers.is_valid_ipv4(addr[1]):
logger.error("Invalid ip address range provided: %s" % ip_range)
return None
else:
logger.error("Invalid ip address range provided: %s" % ip_range)
return None
entry_href = web_api.session.get_entry_href('address_range')
iprange = smc.elements.element.IpRange()
iprange.href = entry_href
iprange.type = "address range"
iprange.name = name
iprange.iprange = ip_range
common._create(iprange.create())
def router(name, ip, secondary_ip=None, comment=None):
""" Create router element
Args:
* name: name for object
* ip: ipv4 address
* comment (optional)
Returns:
None
"""
if smc.helpers.is_valid_ipv4(ip):
entry_href = web_api.session.get_entry_href('router')
router = smc.elements.element.Router() #TODO: Need router comment field
router.href = entry_href
router.type = "router"
router.name = name
router.address = ip
router.secondary_ip = secondary_ip
common._create(router.create())
else:
logger.error("Invalid IPv4 address specified: %s, create object: %s failed" % (ip, name))
def network(name, ip_network, comment=None):
""" Create network element
Args:
* name: name for object
* ip_network: ipv4 address in cidr or full netmask format (1.1.1.1/24, or 1.1.1.0/255.255.0.0)
* comment (optional)
Returns:
None
"""
cidr = smc.helpers.ipaddr_as_network(ip_network)
if cidr:
entry_href = web_api.session.get_entry_href('network')
network = smc.elements.element.Network()
network.href = entry_href
network.type = "network"
network.name = name
network.ip4_network = cidr
network.comment = comment
common._create(network.create())
else:
logger.error("Invalid address specified for network: %s; make sure address specified is in network: %s" % (name, ip_network))
def group(name, members=[], comment=None):
""" Create group element, optionally with members
Members must already exist in SMC. Before being added to the group a search will be
performed for each member specified.
Args:
* name: name for object
* members list; i.e. ['element1', 'element2', etc]. Most elements can be used in a group
* comment (optional)
Returns:
None
"""
entry_href = web_api.session.get_entry_href('group')
group = smc.elements.element.Group()
group.href = entry_href
group.type = "group"
group.name = name
group.comment = comment
if members:
for m in members: #add each member
found_member = smc.search.get_element(m)
if found_member:
logger.debug("Found member: %s, adding to group: %s" % (m, group.name))
group.members.append(found_member['href'])
continue
else:
logger.info("Element: %s could not be found, not adding to group" % m)
common._create(group.create())
#TODO: Not finished implementing;This works if it's applied directly to a single fw, but not globally
'''def blacklist(src, dst, duration="3600"):
if smc.helpers.is_valid_ipv4(src) and smc.helpers.is_valid_ipv4(dst):
entry = smc.web_api.get_entry_href('blacklist')
bl_template = smc.helpers.get_json_template('blacklist.json')
print "Blah"
if bl_template:
bl_template['duration'] = duration
bl_template['end_point1']['ip_network'] = src + '/32'
bl_template['end_point2']['ip_network'] = dst + '/0'
print bl_template
try:
smc.web_api.http_post('http://172.18.1.150:8082/6.0/elements/fw_cluster/116/blacklist', bl_template)
except SMCOperationFailure, e:
print "Error!: %s" % e.msg
else:
#logger.error("Invalid IP address given for blacklist entry, src: %s, dst: %s" % (src,dst))
print "Invalid IP address given for blacklist entry, src: %s, dst: %s" % (src,dst)
'''
#TODO: allow interface number to be configured for initial fw
def single_fw(name, mgmt_ip, mgmt_network, interface_id=None, dns=None, fw_license=False):
""" Create single firewall with a single management interface
Args:
* name: name of fw instance
* mgmt_ip: ipv4 address of management interface (interface 0)
* mgmt_network: netmask of mgmt ip
* dns (optional): string for DNS server
* fw_license (optional): After successful creation, try to auto-license
Returns:
None
"""
if not smc.helpers.is_ipaddr_in_network(mgmt_ip, mgmt_network):
logger.error("Management IP: %s is not in the management network: %s, cannot add single_fw" % (mgmt_ip,mgmt_network))
return None
available_log_servers = smc.search.get_element_by_entry_point('log_server')
if not available_log_servers:
logger.error("Can't seem to find an available Log Server on specified SMC, cannot add single_fw: %s" % name)
return None
entry_href = web_api.session.get_entry_href('single_fw') #get entry point for single_fw
single_fw = smc.elements.element.SingleFW()
single_fw.href = entry_href
single_fw.type = "single_fw"
single_fw.dns = dns
single_fw.name = name
single_fw.mgmt_ip = mgmt_ip
single_fw.fw_license = fw_license
single_fw.mgmt_network = mgmt_network
single_fw.interface_id = interface_id
for found in available_log_servers:
#TODO: If multiple log servers are present, how to handle - just get the first one
single_fw.log_server = found['href']
common._create(single_fw.create())
print "After update, have href: %s" % single_fw.href
#logger.debug("Modified json for single_fw: %s" % new_fw)
# if fw_license: #fw license is specified
# logger.debug("Bind license specified, checking for available license")
# fw_from_link = smc.search.get_element_by_href(new_href)
# bind_license_href = fw_from_link['nodes'][0]['firewall_node']['link']
# bind_href = next(item for item in bind_license_href if item['rel'] == 'bind')
# logger.debug("Firewall: %s, bind license href: %s" % (single_fw.name,bind_href['href']))
# smc.elements.license.bind_license(bind_href['href']) #TODO: Check return to make sure it actually bound
# logger.info("Successfully bound license to single_fw: %s" % name)
def l3interface(l3fw, ip, network, interface_id=None):
""" Add L3 interface for single FW
Args:
* l3fw: name of firewall to add interface to
* ip: ip of interface
* network: ip is validated to be in network before sending
* interface_id: interface_id to use
Returns:
None
"""
if not smc.helpers.is_ipaddr_in_network(ip, network):
logger.error("IP address: %s is not part of the network provided: %s, cannot add interface" % (ip,network))
return None
network = smc.helpers.ipaddr_as_network(network) #convert to cidr in case full mask provided
entry_href = smc.search.get_element(l3fw)
if entry_href is not None:
entry_href = entry_href['href']
fw_orig = web_api.session.http_get(entry_href)
single_fw = smc.elements.element.SingleFW()
single_fw.type = "interface"
single_fw.href = entry_href
single_fw.etag = fw_orig.etag
single_fw.name = l3fw
single_fw.element = fw_orig.msg
new_interface = single_fw.add_interface(ip, network, interface_id)
logger.debug("href: %s, json: %s, etag: %s" % (single_fw.href, single_fw.json, single_fw.etag))
common._update(new_interface)
else:
logger.error("Can't find layer 3 FW specified: %s, cannot add interface" % l3fw)
def l3route(engine, gw, network, interface_id):
""" Add route to l3fw
This could be added to any engine type. Non-routable engine roles (L2/IPS) may
still require route/s defined on the L3 management interface
Args:
* l3fw: name of firewall to add route
* gw: next hop router object
* network: next hop network behind gw
* interface_id: interface to apply route
Returns:
None
"""
href = smc.search.get_element(engine) #ref to engine
if href is None:
logger.error("Can't find engine node: %s, cannot process route add" % engine)
return None
engine_href = href['href']
router_element = smc.search.get_element(gw, 'router') #router object
if router_element is None:
logger.error("Can't find router object: %s, cannot process route add" % gw)
return None
network_element = smc.search.get_element(network, 'network')
if network_element is None:
logger.error("Can't find network object: %s, cannot process route add" % network)
return None
node = web_api.session.http_get(engine_href) #get node json
route_link = next(item for item in node.msg['link'] if item['rel'] == 'routing')
route_href = route_link['href'] #http put back to this node
routing_node = web_api.session.http_get(route_href)
route = smc.elements.element.Route()
route.name = engine
route.type = "route"
route.href = route_href
route.etag = routing_node.etag
route.element = routing_node.msg
result = web_api.session.http_get(router_element['href']) #populate router info
route.gw_href = router_element['href']
route.gw_ip = result.msg['address']
route.gw_name = result.msg['name']
result = web_api.session.http_get(network_element['href']) #dest net info
route.network_href = network_element['href']
route.network_ip = result.msg['ipv4_network']
route.network_name = result.msg['name']
route.interface_id = interface_id
routing_json = route.create()
if routing_json is not None:
common._update(routing_json)
else:
logger.error("Can not find specified interface: %s for route add, double check the interface configuration" % route.interface_id)
def cluster_fw(data):
pass
def single_ips(data):
pass
def cluster_ips(data):
pass
def master_engine(data):
pass
def virtual_ips(data):
pass
def virtual_fw(data):
pass
if __name__ == '__main__':
web_api.session.login('http://172.18.1.150:8082', 'EiGpKD4QxlLJ25dbBEp20001')
import time
start_time = time.time()
smc.create.host('dlepage', '5.6.7.8')
#Test create hosts, networks, group and routers
smc.create.host('aidan', '23.23.23.23')
smc.create.iprange('myrange', '5.5.5.5-5.5.5.6')
smc.create.group('lepagegroup', comment='test comments - see this')
smc.create.network('hostbitsnotinnetwork', '1.2.3.0/255.255.252.0')
smc.create.network('goodnetwork', '1.2.0.0/255.255.252.0')
smc.create.network('networkwithcidr', '1.3.0.0/24', 'created by api tool')
smc.create.router('gatewayrouter', '5.5.5.5')
smc.create.iprange('myrange', '5.5.5.5-5.5.5.6')
smc.remove.element('aidan')
smc.remove.element('lepagegroup')
'''
#Test l3route creation
smc.create.l3route('myfw7', '192.18.1.80', 'Any network', 0) #Unknown host
smc.create.l3route('myfw4', '192.18.1.100', 'Any network', 0) #Unknown gw
smc.create.l3route('myfw4', '192.18.1.100', 'Any2 network', 0) #Unknown network
smc.create.l3route('myfw4', '172.18.1.80', 'Any network', 0) #Good
'''
#Test single_fw, add interfaces and routes
smc.remove.element('myfw')
time.sleep(10)
#Create the objects required for routes
smc.create.router('172.18.1.250', '172.18.1.250') #name, #ip
smc.create.router('172.20.1.250', '172.20.1.250') #name, #ip
smc.create.network('192.168.3.0/24', '192.168.3.0/24') #name, #ip '''
smc.create.single_fw('myfw', '172.18.1.254', '172.18.1.0/24', dns='5.5.5.5', fw_license=True)
smc.create.l3interface('myfw', '10.10.0.1', '10.10.0.0/16', 3)
#time.sleep(10)
smc.create.l3interface('myfw', '172.20.1.254', '172.20.1.0/255.255.255.0', 6)
smc.create.l3route('myfw', '172.18.1.250', 'Any network', 0) #Next hop, dest network, interface
smc.create.l3route('myfw', '172.20.1.250', '192.168.3.0/24', 6)
print("--- %s seconds ---" % (time.time() - start_time))
web_api.session.logout()
|
# -*- coding: iso-8859-1 -*-
import socket, base64, codecs, os, hashlib, re
from fractions import Fraction
from abc import ABCMeta, abstractmethod, abstractproperty
class EResponse(Exception): pass
class EDuplicate(EResponse): pass
def _uni(s):
if type(s) is not unicode:
try:
s = s.decode("utf-8")
except Exception:
s = s.decode("iso-8859-1")
return s
def _utf(s, allow_space=False):
s = _uni(s)
if not allow_space: assert u" " not in s
return s.encode("utf-8")
def _tagspec(type, value):
if value[0] in "~!":
type = value[0] + type
value = value[1:]
return type + value
def _enc(str):
str = _utf(str, True)
while len(str) % 3: str += "\x00"
return base64.b64encode(str, "_-")
def _dec(enc):
if not enc: return u""
enc = _utf(enc)
str = base64.b64decode(enc, "_-")
while str[-1] == "\x00": str = str[:-1]
return str.decode("utf-8")
_p_hex = lambda x: int(x, 16)
_field_sparser = {
"created" : _p_hex,
"imgdate" : _p_hex,
"imgdate_fuzz" : int,
"width" : _p_hex,
"height" : _p_hex,
"score" : int,
"rotate" : int,
"source" : _dec,
"title" : _dec,
}
_p_int = lambda i: str(int(i))
_p_hexint = lambda i: "%x" % (int(i),)
def _p_str(val):
val = _utf(val)
assert " " not in val
return val
def _p_date(val):
if isinstance(val, basestring) and not val.isdigit():
from time import strptime, mktime
date = mktime(strptime(date, "%Y:%m:%d %H:%M:%S"))
return _p_hexint(val)
_field_cparser = {
"width" : _p_hexint,
"height" : _p_hexint,
"score" : _p_int,
"rotate" : _p_int,
"rating" : _p_str,
"created" : _p_date,
"imgdate" : _p_date,
"imgdate_fuzz" : _p_int,
"ext" : _p_str,
"source" : _enc,
"title" : _enc,
}
class CommentWrapper:
"""Wrap a file so readline/iteration skips comments
and optionally empty lines"""
def __init__(self, fh, allow_empty=False):
self.fh = fh
self.allow_empty = allow_empty
def __iter__(self):
return self
def next(self):
line = self.readline()
if not line: raise StopIteration()
return line
def readline(self):
while 42:
line = self.fh.readline()
if not line: return line
s = line.strip()
if s:
if s[0] != "#": return line
elif self.allow_empty:
return line
class DotDict(dict):
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __getattr__(self, name):
if name[0] == "_":
raise AttributeError(name)
return self.get(name)
def __repr__(self):
return repr(type(self)) + dict.__repr__(self)
class Post(DotDict): pass
class Tag(DotDict):
def populate(self, res):
res = res.split()
alias = []
flaglist = []
hexint = lambda s: int(s, 16)
dummy = lambda s: s
incl = {u"N": ("name", dummy),
u"T": ("type", dummy),
u"A": ("alias", alias.append),
u"P": ("posts", hexint),
u"W": ("weak_posts", hexint),
u"F": ("flags", flaglist.append),
u"G": ("guid", str),
u"V": ("valuetype", str),
}
for data in res:
if data[0] in incl:
name, parser = incl[data[0]]
self[name] = parser(data[1:])
self.alias = alias
if flaglist:
del self.flags
for flag in flaglist:
self[flag] = True
vt = (self.valuetype or "").split("=", 1)
if len(vt) == 2:
self.valuetype = vt[0]
self.value = _vtparse(_dec, *vt)
class ValueType(object):
"""Represents the value of a tag.
v.value is the value as an apropriate type (str, float, int).
v.exact is an exact representation of the value (str, int, Fraction).
v.fuzz is how inexact the value is.
v.exact_fuzz is like .exact but for the fuzz.
v.str (or str(v)) is a string representation of exact value+-fuzz.
v.format() is this value formated for sending to server."""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self): pass
@abstractproperty
def type(self): pass
@abstractproperty
def _cmp_t(self): pass
str = ""
value = 0
exact = 0
fuzz = None
exact_fuzz = None
def __setattr__(self, name, value):
raise AttributeError("ValueTypes are immutable")
def __delattr__(self, name):
raise AttributeError("ValueTypes are immutable")
def __str__(self):
return self.str
def __repr__(self):
c = self.__class__
return c.__module__ + "." + c.__name__ + "(" + repr(self.str) + ")"
def __hash__(self):
return hash(self.exact) ^ hash(self.exact_fuzz) ^ hash(self.type)
def __cmp(self, other):
if not isinstance(other, ValueType) or self._cmp_t != other._cmp_t:
raise TypeError("Can only compare to a " + self._cmp_t)
def __eq__(self, other):
return type(self) == type(other) and \
self.exact == other.exact and \
self.exact_fuzz == other.exact_fuzz
def __ne__(self, other):
if not isinstance(other, ValueType): return False
return type(self) != type(other) or \
self.exact != other.exact or \
self.exact_fuzz != other.exact_fuzz
def __lt__(self, other):
self.__cmp(other)
return self.exact - self.exact_fuzz < other.exact + other.exact_fuzz
def __le__(self, other):
self.__cmp(other)
return self.exact - self.exact_fuzz <= other.exact + other.exact_fuzz
def __gt__(self, other):
self.__cmp(other)
return self.exact + self.exact_fuzz > other.exact - other.exact_fuzz
def __ge__(self, other):
self.__cmp(other)
return self.exact + self.exact_fuzz >= other.exact - other.exact_fuzz
def format(self):
return self.str
class VTstring(ValueType):
"""Represents the value of a tag with valuetype string.
v.value, v.exact and v.str are all the same string.
There is no fuzz for strings."""
type = "string"
_cmp_t = "VTstring"
def __init__(self, val):
for name in ("str", "value", "exact"):
self.__dict__[name] = val
def __lt__(self, other):
self._ValueType__cmp(other)
return self.exact < other.exact
def __le__(self, other):
self._ValueType__cmp(other)
return self.exact <= other.exact
def __gt__(self, other):
self._ValueType__cmp(other)
return self.exact > other.exact
def __ge__(self, other):
self._ValueType__cmp(other)
return self.exact >= other.exact
def __str__(self):
return self.str.encode("utf-8")
def __unicode__(self):
return self.str
def format(self):
return _enc(self.str)
class VTnumber(ValueType):
_cmp_t = "VTnumber"
def _parse(self, v, vp, vp2, fp):
v = str(v)
self.__dict__["str"] = v
a = v.split("+-", 1)
self.__dict__["exact"] = vp(a[0])
self.__dict__["value"] = vp2(self.exact)
if len(a) == 2:
self.__dict__["exact_fuzz"] = fp(a[1])
self.__dict__["fuzz"] = vp2(self.exact_fuzz)
else:
self.__dict__["fuzz"] = self.__dict__["exact_fuzz"] = 0
class VTint(VTnumber):
__doc__ = ValueType.__doc__
type = "int"
def __init__(self, val):
self._parse(val, int, int, _p_hex)
class VTuint(VTnumber):
__doc__ = ValueType.__doc__
type = "uint"
def __init__(self, val):
self._parse(val, _p_hex, int, _p_hex)
class VTfloat(VTnumber):
__doc__ = ValueType.__doc__
type = "float"
def __init__(self, val):
def intfrac(v):
try:
return int(v)
except ValueError:
return Fraction(v)
self._parse(val, intfrac, float, intfrac)
class VTf_stop(VTfloat):
__doc__ = ValueType.__doc__
type = "f-stop"
class VTstop(VTfloat):
__doc__ = ValueType.__doc__
type = "stop"
def __init__(self, val):
VTfloat.__init__(self, val)
if isinstance(self.exact, (int, long)):
self.__dict__["value"] = self.exact
if isinstance(self.exact_fuzz, (int, long)):
self.__dict__["fuzz"] = self.exact_fuzz
valuetypes = {"string" : VTstring,
"int" : VTint,
"uint" : VTuint,
"float" : VTfloat,
"f-stop" : VTf_stop,
"stop" : VTstop,
}
def _vtparse(strparse, vtype, val):
if vtype == "string": val = strparse(val)
return valuetypes[vtype](val)
class dbcfg(DotDict):
def __init__(self, RC_NAME=".wellpapprc", EXTRA_RCs=[]):
DotDict.__init__(self, dict(tagwindow_width=840, tagwindow_height=600))
RCs = []
if RC_NAME:
path = "/"
RCs = [os.path.join(os.environ["HOME"], RC_NAME)]
for dir in os.getcwd().split(os.path.sep):
path = os.path.join(path, dir)
RC = os.path.join(path, RC_NAME)
if os.path.exists(RC): RCs.append(RC)
for RC in RCs + EXTRA_RCs:
self._load(RC)
def _load(self, fn):
for line in CommentWrapper(file(fn)):
line = line.strip()
a = line.split("=", 1)
assert(len(a) == 2)
self[a[0]] = a[1]
class dbclient:
_prot_max_len = 4096
def __init__(self, cfg = None):
if not cfg:
cfg = dbcfg()
self.cfg = cfg
self.server = (cfg.server, int(cfg.port))
self.userpass = None
self.auth_ok = False
self.is_connected = False
self._md5re = re.compile(r"^[0-9a-f]{32}$", re.I)
base = cfg.image_base
if base[-1] == "/": base = base[:-1]
base = re.escape(base)
self._destmd5re = re.compile(r"^" + base + r"/[0-9a-f]/[0-9a-f]{2}/([0-9a-f]{32})$")
def _reconnect(self):
if self.is_connected: return
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(self.server)
self.utfdec = codecs.getdecoder("utf8")
self.fh = self.sock.makefile()
self.is_connected = True
self.auth_ok = False
if self.userpass: self._send_auth()
def _writeline(self, line, retry=True):
self._reconnect()
line = line + "\n"
try:
self.sock.send(line)
except:
self.is_connected = False
if retry:
self._reconnect()
self.sock.send(line)
def _readline(self):
return self.utfdec(self.fh.readline())[0]
def _parse_search(self, line, posts, wanted, props):
if line == u"OK\n": return True
if line[0] != u"R": raise EResponse(line)
if line[1] == u"E": raise EResponse(line)
if line[1] == u"R":
if props != None: props["result_count"] = int(line[2:], 16)
return
f = Post()
seen = set()
pieces = line[1:].split(" :")
for token in pieces[0].split():
type = token[0]
data = token[1:]
if type == u"P":
f.md5 = str(data)
elif type == u"F":
field, value = data.split(u"=", 1)
field = str(field)
if field in _field_sparser:
f[field] = _field_sparser[field](value)
else:
f[field] = value
else:
raise EResponse(line)
f.tags = []
f.weaktags = []
f.impltags = []
f.implweaktags = []
for piece in pieces[1:-1]:
flags, data = piece.split(" ", 1)
if flags == "I~" or flags == "~I":
ta = f.implweaktags
elif flags == "I":
ta = f.impltags
elif flags == "~":
ta = f.weaktags
else:
ta = f.tags
t = Tag()
t.populate(data)
ta.append(t)
if not f.md5: raise EResponse(line)
if f.md5 in seen: raise EDuplicate(f.md5)
seen.add(f.md5)
old = lambda n, full, weak: [t[n] for t in full] + [u"~" + t[n] for t in weak]
if not wanted or "tagname" in wanted:
f.tagname = old("name", f.tags, f.weaktags)
if not wanted or "tagguid" in wanted:
f.tagguid = old("guid", f.tags, f.weaktags)
if wanted and "implied" in wanted:
if "tagname" in wanted:
f.impltagname = old("name", f.impltags, f.implweaktags)
if "tagguid" in wanted:
f.impltagguid = old("guid", f.impltags, f.implweaktags)
else:
del f.impltags
del f.implweaktags
posts.append(f)
def _search_post(self, search, wanted = None, props = None):
self._writeline(search)
posts = []
while not self._parse_search(self._readline(), posts, wanted, props): pass
return posts
def get_post(self, md5, separate_implied = False, wanted = None):
md5 = str(md5)
if not wanted:
wanted = ["tagname", "tagguid", "tagdata", "ext", "created", "width", "height"]
if separate_implied and "implied" not in wanted: wanted.append("implied")
search = "SPM" + md5 + " F".join([""] + wanted)
posts = self._search_post(search, wanted)
if not posts or posts[0]["md5"] != md5: return None
return posts[0]
def delete_post(self, md5):
md5 = str(md5)
assert " " not in md5
cmd = "DP" + md5
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def _list(self, data, converter = _utf):
if not data: return []
if isinstance(data, basestring): return [converter(data)]
return map(converter, data)
def _shuffle_minus(self, pos, neg, converter):
pos = self._list(pos, converter)
neg = self._list(neg, converter)
pos1 = [t for t in pos if t[0] != "-"]
neg1 = [t[1:] for t in pos if t[0] == "-"]
pos2 = [t[1:] for t in neg if t[0] == "-"]
neg2 = [t for t in neg if t[0] != "-"]
return pos1 + pos2, neg1 + neg2
def _build_search(self, tags=None, guids=None, excl_tags=None,
excl_guids=None , wanted=None, order=None, range=None):
search = ""
tags, excl_tags = self._shuffle_minus(tags, excl_tags, _utf)
guids, excl_guids = self._shuffle_minus(guids, excl_guids, str)
for want in self._list(wanted, str):
search += "F" + want + " "
for tag in tags:
search += "T" + _tagspec("N", tag) + " "
for guid in guids:
search += "T" + _tagspec("G", guid) + " "
for tag in excl_tags:
search += "t" + _tagspec("N", tag) + " "
for guid in excl_guids:
search += "t" + _tagspec("G", guid) + " "
for o in self._list(order, str):
search += "O" + o + " "
if range != None:
assert len(range) == 2
search += "R" + ("%x" % range[0]) + ":" + ("%x" % range[1])
return search
def search_post(self, wanted=None, **kw):
search = "SP" + self._build_search(wanted=wanted, **kw)
props = DotDict()
posts = self._search_post(search, wanted, props)
return posts, props
def _send_auth(self):
self._writeline("a" + self.userpass[0] + " " + self.userpass[1], False)
if self._readline() == "OK\n": self.auth_ok = True
def auth(self, user, password):
self.userpass = (_utf(user), _utf(password))
self._send_auth()
return self.auth_ok
def _fieldspec(self, **kwargs):
f = [_utf(f) + "=" + _field_cparser[_utf(f)](kwargs[f]) for f in kwargs]
if not f: return ""
return " " + " ".join(f)
def modify_post(self, md5, **kwargs):
md5 = str(md5)
assert " " not in md5
fspec = self._fieldspec(**kwargs)
if fspec:
cmd = "MP" + md5 + fspec
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def add_post(self, md5, **kwargs):
cmd = "AP" + str(md5)
assert "width" in kwargs
assert "height" in kwargs
assert "ext" in kwargs
cmd += self._fieldspec(**kwargs)
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def _rels(self, c, md5, rels):
cmd = "R" + c + str(md5)
for rel in self._list(rels, str):
cmd += " " + rel
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def add_rels(self, md5, rels):
self._rels("R", md5, rels)
def remove_rels(self, md5, rels):
self._rels("r", md5, rels)
def _parse_rels(self, line, rels):
if line == u"OK\n": return True
if line[0] != u"R": raise EResponse(line)
if line[1] == u"E": raise EResponse(line)
a = str(line[1:]).split()
p = a[0]
l = []
if p in rels: l = rels[p]
for rel in a[1:]:
l.append(rel)
rels[p] = l
def post_rels(self, md5):
md5 = str(md5)
cmd = "RS" + md5
self._writeline(cmd)
rels = {}
while not self._parse_rels(self._readline(), rels): pass
if not md5 in rels: return None
return rels[md5]
def add_tag(self, name, type=None, guid=None, valuetype=None):
cmd = "ATN" + _utf(name)
if type:
cmd += " T" + _utf(type)
if guid:
cmd += " G" + _utf(guid)
if valuetype:
cmd += " V" + _utf(valuetype)
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def add_alias(self, name, origin_guid):
cmd = "AAG" + str(origin_guid) + " N" + _utf(name)
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def remove_alias(self, name):
cmd = "DAN" + _utf(name)
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def _addrem_implies(self, addrem, set_tag, implied_tag, priostr):
assert " " not in set_tag
assert " " not in implied_tag
implied_tag = str(implied_tag)
if implied_tag[0] == "-":
add = " i" + implied_tag[1:]
else:
add = " I" + implied_tag
cmd = "I" + addrem + str(set_tag) + add + priostr
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def add_implies(self, set_tag, implied_tag, priority=0):
self._addrem_implies("I", set_tag, implied_tag, ":" + str(priority))
def remove_implies(self, set_tag, implied_tag):
self._addrem_implies("i", set_tag, implied_tag, "")
def _parse_implies(self, data):
res = self._readline()
if res == u"OK\n": return
set_guid, impl_guid = map(str, res.split(" ", 1))
assert set_guid[:2] == "RI"
set_guid = set_guid[2:]
for impl_guid in impl_guid.split():
impl_guid, prio = impl_guid.split(":")
if impl_guid[0] == "i":
impl_guid = "-" + impl_guid[1:]
else:
assert impl_guid[0] == "I"
impl_guid = impl_guid[1:]
if set_guid not in data: data[set_guid] = []
data[set_guid].append((impl_guid, int(prio)))
return True
def tag_implies(self, tag, reverse=False):
tag = str(tag)
assert " " not in tag
cmd = "IR" if reverse else "IS"
self._writeline(cmd + tag)
data = {}
while self._parse_implies(data): pass
if reverse:
rev = []
for itag in data:
impl = data[itag]
assert len(impl) == 1
impl = impl[0]
assert len(impl) == 2
ttag = impl[0]
if ttag[0] == "-":
assert ttag[1:] == tag
rev.append(("-" + itag, impl[1]))
else:
assert ttag == tag
rev.append((itag, impl[1]))
return rev or None
if tag in data: return data[tag]
def merge_tags(self, into_t, from_t):
assert " " not in into_t
assert " " not in from_t
cmd = "MTG" + str(into_t) + " M" + str(from_t)
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def mod_tag(self, guid, name=None, type=None):
guid = _utf(guid)
assert " " not in guid
cmd = "MTG" + guid
if name:
name = _utf(name)
assert " " not in name
cmd += " N" + name
if type:
type = _utf(type)
assert " " not in type
cmd += " T" + type
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def _tag2spec(self, t):
if type(t) in (tuple, list):
assert len(t) == 2
g = str(t[0])
if t[1] is None: return g
return g + "=" + t[1].format()
else:
return str(t)
def tag_post(self, md5, full_tags=None, weak_tags=None, remove_tags=None):
tags = map(self._tag2spec, full_tags or []) + map(lambda t: "~" + self._tag2spec(t), weak_tags or [])
remove_tags = map(str, remove_tags or [])
init = "TP" + str(md5)
cmd = init
for tag in map(lambda t: " T" + t, tags) + map(lambda t: " t" + t, remove_tags):
assert " " not in tag[1:]
if len(cmd) + len(tag) > self._prot_max_len:
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
cmd = init
cmd += tag
if cmd != init:
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def _parse_tagres(self, resdata = None):
res = self._readline()
if res == u"OK\n": return
if res[0] != u"R": raise EResponse(res)
if res[1] == u"E": raise EResponse(res)
if res[1] == u"R": return True # ignore count for now
t = Tag()
t.populate(res[1:])
if resdata != None: resdata.append(t)
return t
def find_tags(self, matchtype, name, range=None, order=None, **kw):
if kw:
filter = self._build_search(**kw)
if filter:
filter = " :" + filter
else:
filter = ""
matchtype = str(matchtype)
assert " " not in matchtype
name = _utf(name)
assert " " not in name
cmd = "ST" + matchtype + name
for o in self._list(order, str):
assert " " not in o
cmd += " O" + o
if range != None:
assert len(range) == 2
cmd += " R%x:%x" % range
self._writeline(cmd + filter)
tags = []
while self._parse_tagres(tags): pass
return tags
def _parse_tag(self, prefix, spec, pos):
if pos == -1: return None
tag = self.find_tag(spec[:pos])
ppos = spec.rfind("=", 0, pos)
if not tag: return self._parse_tag(prefix, spec, ppos)
tag = self.get_tag(tag)
if not tag or tag.valuetype in (None, "none"): return self._parse_tag(prefix, spec, ppos)
val = spec[pos + 1:]
if val:
val = _vtparse(_uni, tag.valuetype, val)
else:
val = None
return (prefix + tag.guid, val)
def parse_tag(self, spec):
spec = _utf(spec)
if spec[0] in "~-!":
prefix = spec[0]
spec = spec[1:]
else:
prefix = ""
tag = self.find_tag(spec)
if tag: return (prefix + tag, None)
return self._parse_tag(prefix, spec, spec.rfind("="))
def find_tag(self, name, resdata=None, with_prefix=False):
name = _utf(name)
if with_prefix and name[0] in "~-!":
prefix = str(name[0])
name = name[1:]
else:
prefix = ""
tags = self.find_tags("EAN", name)
if not tags: return None
assert len(tags) == 1
guid = tags[0].guid
if resdata != None: resdata.update(tags[0])
return prefix + guid
def get_tag(self, guid, with_prefix=False):
guid = _utf(guid)
if with_prefix and guid[0] in u"~-!":
prefix = guid[0]
guid = guid[1:]
else:
prefix = u""
tags = self.find_tags("EAG", guid)
if not tags: return None
assert len(tags) == 1
data = tags[0]
assert guid == data.guid
data["name"] = prefix + data["name"]
return data
def begin_transaction(self):
self._writeline("tB")
res = self._readline()
return res == u"OK\n"
def end_transaction(self):
self._writeline("tE")
res = self._readline()
return res == u"OK\n"
def thumb_path(self, md5, size):
md5 = str(md5)
return os.path.join(self.cfg.thumb_base, str(size), md5[0], md5[1:3], md5)
def pngthumb_path(self, md5, ft, size):
fn = str(md5) + "." + str(ft)
md5 = hashlib.md5(fn).hexdigest()
return os.path.join(self.cfg.thumb_base, str(size), md5[0], md5[1:3], md5)
def image_dir(self, md5):
md5 = str(md5)
return os.path.join(self.cfg.image_base, md5[0], md5[1:3])
def image_path(self, md5):
md5 = str(md5)
return os.path.join(self.image_dir(md5), md5)
def postspec2md5(self, spec, default = None):
if os.path.lexists(spec) and not os.path.isdir(spec):
# some extra magic to avoid reading the files if possible
if os.path.islink(spec):
dest = os.readlink(spec)
m = self._destmd5re.match(dest)
if m: return m.group(1)
# Even when the fuse fs returns files, bare IDs are links
aspec = spec.split("/")
afn = aspec[-1].split(".")
if len(afn) == 2 and self._md5re.match(afn[0]):
aspec[-1] = afn[0]
shortspec = "/".join(aspec)
if os.path.islink(shortspec):
dest = os.readlink(shortspec)
m = self._destmd5re.match(dest)
if m: return m.group(1)
# Oh well, hash the file.
return hashlib.md5(file(spec).read()).hexdigest()
if self._md5re.match(spec): return spec
return default
def order(self, tag, posts):
tag = str(tag)
assert " " not in tag
init = "OG" + tag
cmd = init
anything = False
for post in map(str, posts):
cmd += " P" + post
anything = True
if len(cmd) + 64 > self._prot_max_len:
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
cmd = init + " P" + post
anything = False
if anything:
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def metalist(self, name):
cmd = "L" + _utf(name)
self._writeline(cmd)
res = self._readline()
names = []
while res != u"OK\n":
if res[:2] != u"RN": raise EResponse(res)
names.append(res[2:-1])
res = self._readline()
return names
def thumb_fns(self, m, ft):
sizes = self.cfg.thumb_sizes.split()
jpeg_fns = map(lambda z: (self.thumb_path(m, int(z)), int(z)), sizes)
png_fns = map(lambda n, z: (self.pngthumb_path(m, ft, n), z),
("normal", "large"), (128, 256))
return jpeg_fns, png_fns
def save_thumbs(self, m, img, ft, rot, force=False):
import Image
from PIL import PngImagePlugin
from dbutil import make_pdirs
fn = self.image_path(m)
mtime = os.stat(fn).st_mtime
if not img:
from dbutil import raw_wrapper
img = Image.open(raw_wrapper(open(fn, "rb")))
img.load()
# PIL rotates CCW
rotation = {90: Image.ROTATE_270, 180: Image.ROTATE_180, 270: Image.ROTATE_90}
if rot in rotation: img = img.transpose(rotation[rot])
w, h = img.size
if img.mode == "1":
# We want to scale B/W as grayscale.
img = img.convert("L")
if img.mode == "P" and "transparency" in img.info:
# special case for transparent gif
img = img.convert("RGBA")
if img.mode not in ("RGB", "RGBA", "L", "LA"):
# Are there other modes to worry about?
img = img.convert("RGB")
jpeg_fns, png_fns = self.thumb_fns(m, ft)
jpeg_opts = {"format": "JPEG", "quality": 95, "optimize": 1}
meta = PngImagePlugin.PngInfo()
meta.add_text("Thumb::URI", str(m + "." + ft), 0)
meta.add_text("Thumb::MTime", str(int(mtime)), 0)
png_opts = {"format": "PNG", "pnginfo": meta}
jpeg = map(lambda t: (t[0], t[1], jpeg_opts), jpeg_fns)
png = map(lambda t: (t[0], t[1], png_opts), png_fns)
z = max(map(lambda d: d[1], jpeg + png)) * 2
if w > z or h > z:
img.thumbnail((z, z), Image.ANTIALIAS)
if img.mode[-1] == "A":
# Images with transparency tend to have crap in the
# tansparent pixel values. This is not handled well
# when they are saved without transparency (jpeg).
# So we put it on a white background.
if img.mode == "LA":
mode = "LA"
col = 255
else:
mode = "RGBA"
col = (255, 255, 255)
bgfix = Image.new(mode, img.size, col)
alpha = img.split()[-1]
bgfix.paste(img, None, alpha)
# It seems reasonable to assume that not everything
# handles transparency properly in PNG thumbs, so
# we want to use this as the data for them as well.
# Just copy the alpha and call it good.
bgfix.putalpha(alpha)
img = bgfix
for fn, z, opts in jpeg + png:
if force or not os.path.exists(fn):
t = img.copy()
if w > z or h > z:
t.thumbnail((z, z), Image.ANTIALIAS)
make_pdirs(fn)
if t.mode == "LA" and opts["format"] == "JPEG":
# This is probably a PIL bug
t = t.convert("L")
t.save(fn, **opts)
[dbclient] Handle datatime valuetype (badly).
# -*- coding: iso-8859-1 -*-
import socket, base64, codecs, os, hashlib, re
from fractions import Fraction
from abc import ABCMeta, abstractmethod, abstractproperty
class EResponse(Exception): pass
class EDuplicate(EResponse): pass
def _uni(s):
if type(s) is not unicode:
try:
s = s.decode("utf-8")
except Exception:
s = s.decode("iso-8859-1")
return s
def _utf(s, allow_space=False):
s = _uni(s)
if not allow_space: assert u" " not in s
return s.encode("utf-8")
def _tagspec(type, value):
if value[0] in "~!":
type = value[0] + type
value = value[1:]
return type + value
def _enc(str):
str = _utf(str, True)
while len(str) % 3: str += "\x00"
return base64.b64encode(str, "_-")
def _dec(enc):
if not enc: return u""
enc = _utf(enc)
str = base64.b64decode(enc, "_-")
while str[-1] == "\x00": str = str[:-1]
return str.decode("utf-8")
_p_hex = lambda x: int(x, 16)
_field_sparser = {
"created" : _p_hex,
"imgdate" : _p_hex,
"imgdate_fuzz" : int,
"width" : _p_hex,
"height" : _p_hex,
"score" : int,
"rotate" : int,
"source" : _dec,
"title" : _dec,
}
_p_int = lambda i: str(int(i))
_p_hexint = lambda i: "%x" % (int(i),)
def _p_str(val):
val = _utf(val)
assert " " not in val
return val
def _p_date(val):
if isinstance(val, basestring) and not val.isdigit():
from time import strptime, mktime
date = mktime(strptime(date, "%Y:%m:%d %H:%M:%S"))
return _p_hexint(val)
_field_cparser = {
"width" : _p_hexint,
"height" : _p_hexint,
"score" : _p_int,
"rotate" : _p_int,
"rating" : _p_str,
"created" : _p_date,
"imgdate" : _p_date,
"imgdate_fuzz" : _p_int,
"ext" : _p_str,
"source" : _enc,
"title" : _enc,
}
class CommentWrapper:
"""Wrap a file so readline/iteration skips comments
and optionally empty lines"""
def __init__(self, fh, allow_empty=False):
self.fh = fh
self.allow_empty = allow_empty
def __iter__(self):
return self
def next(self):
line = self.readline()
if not line: raise StopIteration()
return line
def readline(self):
while 42:
line = self.fh.readline()
if not line: return line
s = line.strip()
if s:
if s[0] != "#": return line
elif self.allow_empty:
return line
class DotDict(dict):
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __getattr__(self, name):
if name[0] == "_":
raise AttributeError(name)
return self.get(name)
def __repr__(self):
return repr(type(self)) + dict.__repr__(self)
class Post(DotDict): pass
class Tag(DotDict):
def populate(self, res):
res = res.split()
alias = []
flaglist = []
hexint = lambda s: int(s, 16)
dummy = lambda s: s
incl = {u"N": ("name", dummy),
u"T": ("type", dummy),
u"A": ("alias", alias.append),
u"P": ("posts", hexint),
u"W": ("weak_posts", hexint),
u"F": ("flags", flaglist.append),
u"G": ("guid", str),
u"V": ("valuetype", str),
}
for data in res:
if data[0] in incl:
name, parser = incl[data[0]]
self[name] = parser(data[1:])
self.alias = alias
if flaglist:
del self.flags
for flag in flaglist:
self[flag] = True
vt = (self.valuetype or "").split("=", 1)
if len(vt) == 2:
self.valuetype = vt[0]
self.value = _vtparse(_dec, *vt)
class ValueType(object):
"""Represents the value of a tag.
v.value is the value as an apropriate type (str, float, int).
v.exact is an exact representation of the value (str, int, Fraction).
v.fuzz is how inexact the value is.
v.exact_fuzz is like .exact but for the fuzz.
v.str (or str(v)) is a string representation of exact value+-fuzz.
v.format() is this value formated for sending to server."""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self): pass
@abstractproperty
def type(self): pass
@abstractproperty
def _cmp_t(self): pass
str = ""
value = 0
exact = 0
fuzz = None
exact_fuzz = None
def __setattr__(self, name, value):
raise AttributeError("ValueTypes are immutable")
def __delattr__(self, name):
raise AttributeError("ValueTypes are immutable")
def __str__(self):
return self.str
def __repr__(self):
c = self.__class__
return c.__module__ + "." + c.__name__ + "(" + repr(self.str) + ")"
def __hash__(self):
return hash(self.exact) ^ hash(self.exact_fuzz) ^ hash(self.type)
def __cmp(self, other):
if not isinstance(other, ValueType) or self._cmp_t != other._cmp_t:
raise TypeError("Can only compare to a " + self._cmp_t)
def __eq__(self, other):
return type(self) == type(other) and \
self.exact == other.exact and \
self.exact_fuzz == other.exact_fuzz
def __ne__(self, other):
if not isinstance(other, ValueType): return False
return type(self) != type(other) or \
self.exact != other.exact or \
self.exact_fuzz != other.exact_fuzz
def __lt__(self, other):
self.__cmp(other)
return self.exact - self.exact_fuzz < other.exact + other.exact_fuzz
def __le__(self, other):
self.__cmp(other)
return self.exact - self.exact_fuzz <= other.exact + other.exact_fuzz
def __gt__(self, other):
self.__cmp(other)
return self.exact + self.exact_fuzz > other.exact - other.exact_fuzz
def __ge__(self, other):
self.__cmp(other)
return self.exact + self.exact_fuzz >= other.exact - other.exact_fuzz
def format(self):
return self.str
class VTstring(ValueType):
"""Represents the value of a tag with valuetype string.
v.value, v.exact and v.str are all the same string.
There is no fuzz for strings."""
type = "string"
_cmp_t = "VTstring"
def __init__(self, val):
for name in ("str", "value", "exact"):
self.__dict__[name] = val
def __lt__(self, other):
self._ValueType__cmp(other)
return self.exact < other.exact
def __le__(self, other):
self._ValueType__cmp(other)
return self.exact <= other.exact
def __gt__(self, other):
self._ValueType__cmp(other)
return self.exact > other.exact
def __ge__(self, other):
self._ValueType__cmp(other)
return self.exact >= other.exact
def __str__(self):
return self.str.encode("utf-8")
def __unicode__(self):
return self.str
def format(self):
return _enc(self.str)
class VTnumber(ValueType):
_cmp_t = "VTnumber"
def _parse(self, v, vp, vp2, fp):
v = str(v)
self.__dict__["str"] = v
a = v.split("+-", 1)
self.__dict__["exact"] = vp(a[0])
self.__dict__["value"] = vp2(self.exact)
if len(a) == 2:
self.__dict__["exact_fuzz"] = fp(a[1])
self.__dict__["fuzz"] = vp2(self.exact_fuzz)
else:
self.__dict__["fuzz"] = self.__dict__["exact_fuzz"] = 0
class VTint(VTnumber):
__doc__ = ValueType.__doc__
type = "int"
def __init__(self, val):
self._parse(val, int, int, _p_hex)
class VTuint(VTnumber):
__doc__ = ValueType.__doc__
type = "uint"
def __init__(self, val):
self._parse(val, _p_hex, int, _p_hex)
class VTfloat(VTnumber):
__doc__ = ValueType.__doc__
type = "float"
def __init__(self, val):
def intfrac(v):
try:
return int(v)
except ValueError:
return Fraction(v)
self._parse(val, intfrac, float, intfrac)
class VTf_stop(VTfloat):
__doc__ = ValueType.__doc__
type = "f-stop"
class VTstop(VTfloat):
__doc__ = ValueType.__doc__
type = "stop"
def __init__(self, val):
VTfloat.__init__(self, val)
if isinstance(self.exact, (int, long)):
self.__dict__["value"] = self.exact
if isinstance(self.exact_fuzz, (int, long)):
self.__dict__["fuzz"] = self.exact_fuzz
# @@ TODO: Handle fuzz/reasonable comparisons.
class VTdatetime(VTstring):
type = "datetime"
_cmp_t = "VTdatetime"
def format(self):
return self.str
valuetypes = {"string" : VTstring,
"int" : VTint,
"uint" : VTuint,
"float" : VTfloat,
"f-stop" : VTf_stop,
"stop" : VTstop,
"datetime": VTdatetime,
}
def _vtparse(strparse, vtype, val):
if vtype == "string": val = strparse(val)
return valuetypes[vtype](val)
class dbcfg(DotDict):
def __init__(self, RC_NAME=".wellpapprc", EXTRA_RCs=[]):
DotDict.__init__(self, dict(tagwindow_width=840, tagwindow_height=600))
RCs = []
if RC_NAME:
path = "/"
RCs = [os.path.join(os.environ["HOME"], RC_NAME)]
for dir in os.getcwd().split(os.path.sep):
path = os.path.join(path, dir)
RC = os.path.join(path, RC_NAME)
if os.path.exists(RC): RCs.append(RC)
for RC in RCs + EXTRA_RCs:
self._load(RC)
def _load(self, fn):
for line in CommentWrapper(file(fn)):
line = line.strip()
a = line.split("=", 1)
assert(len(a) == 2)
self[a[0]] = a[1]
class dbclient:
_prot_max_len = 4096
def __init__(self, cfg = None):
if not cfg:
cfg = dbcfg()
self.cfg = cfg
self.server = (cfg.server, int(cfg.port))
self.userpass = None
self.auth_ok = False
self.is_connected = False
self._md5re = re.compile(r"^[0-9a-f]{32}$", re.I)
base = cfg.image_base
if base[-1] == "/": base = base[:-1]
base = re.escape(base)
self._destmd5re = re.compile(r"^" + base + r"/[0-9a-f]/[0-9a-f]{2}/([0-9a-f]{32})$")
def _reconnect(self):
if self.is_connected: return
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(self.server)
self.utfdec = codecs.getdecoder("utf8")
self.fh = self.sock.makefile()
self.is_connected = True
self.auth_ok = False
if self.userpass: self._send_auth()
def _writeline(self, line, retry=True):
self._reconnect()
line = line + "\n"
try:
self.sock.send(line)
except:
self.is_connected = False
if retry:
self._reconnect()
self.sock.send(line)
def _readline(self):
return self.utfdec(self.fh.readline())[0]
def _parse_search(self, line, posts, wanted, props):
if line == u"OK\n": return True
if line[0] != u"R": raise EResponse(line)
if line[1] == u"E": raise EResponse(line)
if line[1] == u"R":
if props != None: props["result_count"] = int(line[2:], 16)
return
f = Post()
seen = set()
pieces = line[1:].split(" :")
for token in pieces[0].split():
type = token[0]
data = token[1:]
if type == u"P":
f.md5 = str(data)
elif type == u"F":
field, value = data.split(u"=", 1)
field = str(field)
if field in _field_sparser:
f[field] = _field_sparser[field](value)
else:
f[field] = value
else:
raise EResponse(line)
f.tags = []
f.weaktags = []
f.impltags = []
f.implweaktags = []
for piece in pieces[1:-1]:
flags, data = piece.split(" ", 1)
if flags == "I~" or flags == "~I":
ta = f.implweaktags
elif flags == "I":
ta = f.impltags
elif flags == "~":
ta = f.weaktags
else:
ta = f.tags
t = Tag()
t.populate(data)
ta.append(t)
if not f.md5: raise EResponse(line)
if f.md5 in seen: raise EDuplicate(f.md5)
seen.add(f.md5)
old = lambda n, full, weak: [t[n] for t in full] + [u"~" + t[n] for t in weak]
if not wanted or "tagname" in wanted:
f.tagname = old("name", f.tags, f.weaktags)
if not wanted or "tagguid" in wanted:
f.tagguid = old("guid", f.tags, f.weaktags)
if wanted and "implied" in wanted:
if "tagname" in wanted:
f.impltagname = old("name", f.impltags, f.implweaktags)
if "tagguid" in wanted:
f.impltagguid = old("guid", f.impltags, f.implweaktags)
else:
del f.impltags
del f.implweaktags
posts.append(f)
def _search_post(self, search, wanted = None, props = None):
self._writeline(search)
posts = []
while not self._parse_search(self._readline(), posts, wanted, props): pass
return posts
def get_post(self, md5, separate_implied = False, wanted = None):
md5 = str(md5)
if not wanted:
wanted = ["tagname", "tagguid", "tagdata", "ext", "created", "width", "height"]
if separate_implied and "implied" not in wanted: wanted.append("implied")
search = "SPM" + md5 + " F".join([""] + wanted)
posts = self._search_post(search, wanted)
if not posts or posts[0]["md5"] != md5: return None
return posts[0]
def delete_post(self, md5):
md5 = str(md5)
assert " " not in md5
cmd = "DP" + md5
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def _list(self, data, converter = _utf):
if not data: return []
if isinstance(data, basestring): return [converter(data)]
return map(converter, data)
def _shuffle_minus(self, pos, neg, converter):
pos = self._list(pos, converter)
neg = self._list(neg, converter)
pos1 = [t for t in pos if t[0] != "-"]
neg1 = [t[1:] for t in pos if t[0] == "-"]
pos2 = [t[1:] for t in neg if t[0] == "-"]
neg2 = [t for t in neg if t[0] != "-"]
return pos1 + pos2, neg1 + neg2
def _build_search(self, tags=None, guids=None, excl_tags=None,
excl_guids=None , wanted=None, order=None, range=None):
search = ""
tags, excl_tags = self._shuffle_minus(tags, excl_tags, _utf)
guids, excl_guids = self._shuffle_minus(guids, excl_guids, str)
for want in self._list(wanted, str):
search += "F" + want + " "
for tag in tags:
search += "T" + _tagspec("N", tag) + " "
for guid in guids:
search += "T" + _tagspec("G", guid) + " "
for tag in excl_tags:
search += "t" + _tagspec("N", tag) + " "
for guid in excl_guids:
search += "t" + _tagspec("G", guid) + " "
for o in self._list(order, str):
search += "O" + o + " "
if range != None:
assert len(range) == 2
search += "R" + ("%x" % range[0]) + ":" + ("%x" % range[1])
return search
def search_post(self, wanted=None, **kw):
search = "SP" + self._build_search(wanted=wanted, **kw)
props = DotDict()
posts = self._search_post(search, wanted, props)
return posts, props
def _send_auth(self):
self._writeline("a" + self.userpass[0] + " " + self.userpass[1], False)
if self._readline() == "OK\n": self.auth_ok = True
def auth(self, user, password):
self.userpass = (_utf(user), _utf(password))
self._send_auth()
return self.auth_ok
def _fieldspec(self, **kwargs):
f = [_utf(f) + "=" + _field_cparser[_utf(f)](kwargs[f]) for f in kwargs]
if not f: return ""
return " " + " ".join(f)
def modify_post(self, md5, **kwargs):
md5 = str(md5)
assert " " not in md5
fspec = self._fieldspec(**kwargs)
if fspec:
cmd = "MP" + md5 + fspec
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def add_post(self, md5, **kwargs):
cmd = "AP" + str(md5)
assert "width" in kwargs
assert "height" in kwargs
assert "ext" in kwargs
cmd += self._fieldspec(**kwargs)
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def _rels(self, c, md5, rels):
cmd = "R" + c + str(md5)
for rel in self._list(rels, str):
cmd += " " + rel
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def add_rels(self, md5, rels):
self._rels("R", md5, rels)
def remove_rels(self, md5, rels):
self._rels("r", md5, rels)
def _parse_rels(self, line, rels):
if line == u"OK\n": return True
if line[0] != u"R": raise EResponse(line)
if line[1] == u"E": raise EResponse(line)
a = str(line[1:]).split()
p = a[0]
l = []
if p in rels: l = rels[p]
for rel in a[1:]:
l.append(rel)
rels[p] = l
def post_rels(self, md5):
md5 = str(md5)
cmd = "RS" + md5
self._writeline(cmd)
rels = {}
while not self._parse_rels(self._readline(), rels): pass
if not md5 in rels: return None
return rels[md5]
def add_tag(self, name, type=None, guid=None, valuetype=None):
cmd = "ATN" + _utf(name)
if type:
cmd += " T" + _utf(type)
if guid:
cmd += " G" + _utf(guid)
if valuetype:
cmd += " V" + _utf(valuetype)
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def add_alias(self, name, origin_guid):
cmd = "AAG" + str(origin_guid) + " N" + _utf(name)
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def remove_alias(self, name):
cmd = "DAN" + _utf(name)
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def _addrem_implies(self, addrem, set_tag, implied_tag, priostr):
assert " " not in set_tag
assert " " not in implied_tag
implied_tag = str(implied_tag)
if implied_tag[0] == "-":
add = " i" + implied_tag[1:]
else:
add = " I" + implied_tag
cmd = "I" + addrem + str(set_tag) + add + priostr
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def add_implies(self, set_tag, implied_tag, priority=0):
self._addrem_implies("I", set_tag, implied_tag, ":" + str(priority))
def remove_implies(self, set_tag, implied_tag):
self._addrem_implies("i", set_tag, implied_tag, "")
def _parse_implies(self, data):
res = self._readline()
if res == u"OK\n": return
set_guid, impl_guid = map(str, res.split(" ", 1))
assert set_guid[:2] == "RI"
set_guid = set_guid[2:]
for impl_guid in impl_guid.split():
impl_guid, prio = impl_guid.split(":")
if impl_guid[0] == "i":
impl_guid = "-" + impl_guid[1:]
else:
assert impl_guid[0] == "I"
impl_guid = impl_guid[1:]
if set_guid not in data: data[set_guid] = []
data[set_guid].append((impl_guid, int(prio)))
return True
def tag_implies(self, tag, reverse=False):
tag = str(tag)
assert " " not in tag
cmd = "IR" if reverse else "IS"
self._writeline(cmd + tag)
data = {}
while self._parse_implies(data): pass
if reverse:
rev = []
for itag in data:
impl = data[itag]
assert len(impl) == 1
impl = impl[0]
assert len(impl) == 2
ttag = impl[0]
if ttag[0] == "-":
assert ttag[1:] == tag
rev.append(("-" + itag, impl[1]))
else:
assert ttag == tag
rev.append((itag, impl[1]))
return rev or None
if tag in data: return data[tag]
def merge_tags(self, into_t, from_t):
assert " " not in into_t
assert " " not in from_t
cmd = "MTG" + str(into_t) + " M" + str(from_t)
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def mod_tag(self, guid, name=None, type=None):
guid = _utf(guid)
assert " " not in guid
cmd = "MTG" + guid
if name:
name = _utf(name)
assert " " not in name
cmd += " N" + name
if type:
type = _utf(type)
assert " " not in type
cmd += " T" + type
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def _tag2spec(self, t):
if type(t) in (tuple, list):
assert len(t) == 2
g = str(t[0])
if t[1] is None: return g
return g + "=" + t[1].format()
else:
return str(t)
def tag_post(self, md5, full_tags=None, weak_tags=None, remove_tags=None):
tags = map(self._tag2spec, full_tags or []) + map(lambda t: "~" + self._tag2spec(t), weak_tags or [])
remove_tags = map(str, remove_tags or [])
init = "TP" + str(md5)
cmd = init
for tag in map(lambda t: " T" + t, tags) + map(lambda t: " t" + t, remove_tags):
assert " " not in tag[1:]
if len(cmd) + len(tag) > self._prot_max_len:
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
cmd = init
cmd += tag
if cmd != init:
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def _parse_tagres(self, resdata = None):
res = self._readline()
if res == u"OK\n": return
if res[0] != u"R": raise EResponse(res)
if res[1] == u"E": raise EResponse(res)
if res[1] == u"R": return True # ignore count for now
t = Tag()
t.populate(res[1:])
if resdata != None: resdata.append(t)
return t
def find_tags(self, matchtype, name, range=None, order=None, **kw):
if kw:
filter = self._build_search(**kw)
if filter:
filter = " :" + filter
else:
filter = ""
matchtype = str(matchtype)
assert " " not in matchtype
name = _utf(name)
assert " " not in name
cmd = "ST" + matchtype + name
for o in self._list(order, str):
assert " " not in o
cmd += " O" + o
if range != None:
assert len(range) == 2
cmd += " R%x:%x" % range
self._writeline(cmd + filter)
tags = []
while self._parse_tagres(tags): pass
return tags
def _parse_tag(self, prefix, spec, pos):
if pos == -1: return None
tag = self.find_tag(spec[:pos])
ppos = spec.rfind("=", 0, pos)
if not tag: return self._parse_tag(prefix, spec, ppos)
tag = self.get_tag(tag)
if not tag or tag.valuetype in (None, "none"): return self._parse_tag(prefix, spec, ppos)
val = spec[pos + 1:]
if val:
val = _vtparse(_uni, tag.valuetype, val)
else:
val = None
return (prefix + tag.guid, val)
def parse_tag(self, spec):
spec = _utf(spec)
if spec[0] in "~-!":
prefix = spec[0]
spec = spec[1:]
else:
prefix = ""
tag = self.find_tag(spec)
if tag: return (prefix + tag, None)
return self._parse_tag(prefix, spec, spec.rfind("="))
def find_tag(self, name, resdata=None, with_prefix=False):
name = _utf(name)
if with_prefix and name[0] in "~-!":
prefix = str(name[0])
name = name[1:]
else:
prefix = ""
tags = self.find_tags("EAN", name)
if not tags: return None
assert len(tags) == 1
guid = tags[0].guid
if resdata != None: resdata.update(tags[0])
return prefix + guid
def get_tag(self, guid, with_prefix=False):
guid = _utf(guid)
if with_prefix and guid[0] in u"~-!":
prefix = guid[0]
guid = guid[1:]
else:
prefix = u""
tags = self.find_tags("EAG", guid)
if not tags: return None
assert len(tags) == 1
data = tags[0]
assert guid == data.guid
data["name"] = prefix + data["name"]
return data
def begin_transaction(self):
self._writeline("tB")
res = self._readline()
return res == u"OK\n"
def end_transaction(self):
self._writeline("tE")
res = self._readline()
return res == u"OK\n"
def thumb_path(self, md5, size):
md5 = str(md5)
return os.path.join(self.cfg.thumb_base, str(size), md5[0], md5[1:3], md5)
def pngthumb_path(self, md5, ft, size):
fn = str(md5) + "." + str(ft)
md5 = hashlib.md5(fn).hexdigest()
return os.path.join(self.cfg.thumb_base, str(size), md5[0], md5[1:3], md5)
def image_dir(self, md5):
md5 = str(md5)
return os.path.join(self.cfg.image_base, md5[0], md5[1:3])
def image_path(self, md5):
md5 = str(md5)
return os.path.join(self.image_dir(md5), md5)
def postspec2md5(self, spec, default = None):
if os.path.lexists(spec) and not os.path.isdir(spec):
# some extra magic to avoid reading the files if possible
if os.path.islink(spec):
dest = os.readlink(spec)
m = self._destmd5re.match(dest)
if m: return m.group(1)
# Even when the fuse fs returns files, bare IDs are links
aspec = spec.split("/")
afn = aspec[-1].split(".")
if len(afn) == 2 and self._md5re.match(afn[0]):
aspec[-1] = afn[0]
shortspec = "/".join(aspec)
if os.path.islink(shortspec):
dest = os.readlink(shortspec)
m = self._destmd5re.match(dest)
if m: return m.group(1)
# Oh well, hash the file.
return hashlib.md5(file(spec).read()).hexdigest()
if self._md5re.match(spec): return spec
return default
def order(self, tag, posts):
tag = str(tag)
assert " " not in tag
init = "OG" + tag
cmd = init
anything = False
for post in map(str, posts):
cmd += " P" + post
anything = True
if len(cmd) + 64 > self._prot_max_len:
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
cmd = init + " P" + post
anything = False
if anything:
self._writeline(cmd)
res = self._readline()
if res != u"OK\n": raise EResponse(res)
def metalist(self, name):
cmd = "L" + _utf(name)
self._writeline(cmd)
res = self._readline()
names = []
while res != u"OK\n":
if res[:2] != u"RN": raise EResponse(res)
names.append(res[2:-1])
res = self._readline()
return names
def thumb_fns(self, m, ft):
sizes = self.cfg.thumb_sizes.split()
jpeg_fns = map(lambda z: (self.thumb_path(m, int(z)), int(z)), sizes)
png_fns = map(lambda n, z: (self.pngthumb_path(m, ft, n), z),
("normal", "large"), (128, 256))
return jpeg_fns, png_fns
def save_thumbs(self, m, img, ft, rot, force=False):
import Image
from PIL import PngImagePlugin
from dbutil import make_pdirs
fn = self.image_path(m)
mtime = os.stat(fn).st_mtime
if not img:
from dbutil import raw_wrapper
img = Image.open(raw_wrapper(open(fn, "rb")))
img.load()
# PIL rotates CCW
rotation = {90: Image.ROTATE_270, 180: Image.ROTATE_180, 270: Image.ROTATE_90}
if rot in rotation: img = img.transpose(rotation[rot])
w, h = img.size
if img.mode == "1":
# We want to scale B/W as grayscale.
img = img.convert("L")
if img.mode == "P" and "transparency" in img.info:
# special case for transparent gif
img = img.convert("RGBA")
if img.mode not in ("RGB", "RGBA", "L", "LA"):
# Are there other modes to worry about?
img = img.convert("RGB")
jpeg_fns, png_fns = self.thumb_fns(m, ft)
jpeg_opts = {"format": "JPEG", "quality": 95, "optimize": 1}
meta = PngImagePlugin.PngInfo()
meta.add_text("Thumb::URI", str(m + "." + ft), 0)
meta.add_text("Thumb::MTime", str(int(mtime)), 0)
png_opts = {"format": "PNG", "pnginfo": meta}
jpeg = map(lambda t: (t[0], t[1], jpeg_opts), jpeg_fns)
png = map(lambda t: (t[0], t[1], png_opts), png_fns)
z = max(map(lambda d: d[1], jpeg + png)) * 2
if w > z or h > z:
img.thumbnail((z, z), Image.ANTIALIAS)
if img.mode[-1] == "A":
# Images with transparency tend to have crap in the
# tansparent pixel values. This is not handled well
# when they are saved without transparency (jpeg).
# So we put it on a white background.
if img.mode == "LA":
mode = "LA"
col = 255
else:
mode = "RGBA"
col = (255, 255, 255)
bgfix = Image.new(mode, img.size, col)
alpha = img.split()[-1]
bgfix.paste(img, None, alpha)
# It seems reasonable to assume that not everything
# handles transparency properly in PNG thumbs, so
# we want to use this as the data for them as well.
# Just copy the alpha and call it good.
bgfix.putalpha(alpha)
img = bgfix
for fn, z, opts in jpeg + png:
if force or not os.path.exists(fn):
t = img.copy()
if w > z or h > z:
t.thumbnail((z, z), Image.ANTIALIAS)
make_pdirs(fn)
if t.mode == "LA" and opts["format"] == "JPEG":
# This is probably a PIL bug
t = t.convert("L")
t.save(fn, **opts)
|
import logging
log = logging.getLogger(__name__)
import datetime
import threading
import importlib
import botologist.error
import botologist.http
import botologist.protocol
import botologist.plugin
import botologist.util
class CommandMessage:
"""Representation of an IRC message that is a command.
When a user sends a message to the bot that is a bot command, an instance
of this class should be constructed and will be passed to the command
handler to figure out a response.
"""
def __init__(self, message):
assert isinstance(message, botologist.protocol.Message)
self.message = message
self.command = message.words[0]
self.args = message.words[1:]
@property
def user(self):
return self.message.user
@property
def target(self):
return self.message.target
class Bot:
"""IRC bot."""
version = None
# the character commands start with
CMD_PREFIX = '!'
# ticker interval in seconds
TICK_INTERVAL = 120
# spam throttling in seconds
SPAM_THROTTLE = 2
def __init__(self, config):
protocol_module = 'botologist.protocol.{}'.format(config.get('protocol', 'local'))
self.protocol = importlib.import_module(protocol_module)
self.client = self.protocol.get_client(config)
self.config = config
self.storage_dir = config['storage_dir']
self.admins = config.get('admins', [])
self.bans = config.get('bans', [])
self.global_plugins = config.get('global_plugins', [])
self.started = None
self.plugins = {}
self._command_log = {}
self._last_command = None
self._reply_log = {}
self.timer = None
self.http_port = config.get('http_port')
self.http_host = config.get('http_host')
self.http_server = None
self.error_handler = botologist.error.ErrorHandler(self)
self.client.error_handler = self.error_handler.handle_error
self.client.on_connect.append(self._start)
self.client.on_disconnect.append(self._stop)
self.client.on_join.append(self._handle_join)
self.client.on_privmsg.append(self._handle_privmsg)
self.client.on_kick.append(self._handle_kick)
# configure plugins
for name, plugin_class in config.get('plugins', {}).items():
# convenience compatibility layer for when plugins module was moved
plugin_class = plugin_class.replace('botologist.plugin.', 'plugins.')
# dynamically import the plugin module and pass the class
parts = plugin_class.split('.')
module = importlib.import_module('.'.join(parts[:-1]))
plugin_class = getattr(module, parts[-1])
self.register_plugin(name, plugin_class)
# configure channels
channels = config.get('channels')
if isinstance(channels, dict):
for name, channel in channels.items():
self.add_channel(name, **channel)
elif isinstance(channels, list):
for channel in channels:
if isinstance(channel, dict):
name = channel.pop('channel')
else:
name = channel
channel = {}
self.add_channel(name, **channel)
@property
def nick(self):
return self.client.nick
@property
def channels(self):
return self.client.channels
def get_admin_nicks(self):
admin_nicks = set()
for channel in self.client.channels.values():
for admin_id in self.admins:
user = channel.find_users(identifier=admin_id)
for user in users:
if nick != self.nick:
admin_nicks.add(user.name)
return admin_nicks
def run_forever(self):
self.started = datetime.datetime.now()
self.client.run_forever()
def register_plugin(self, name, plugin):
if isinstance(plugin, str):
parts = plugin.split('.')
try:
module = importlib.import_module('.'.join(parts[:-1]))
plugin = getattr(module, parts[-1])
except (AttributeError, ImportError) as exception:
msg = 'Could not find plugin class: {}'.format(plugin)
raise Exception(msg) from exception
assert issubclass(plugin, botologist.plugin.Plugin)
log.debug('Plugin "%s" registered', name)
self.plugins[name] = plugin
def add_channel(self, channel, plugins=None, admins=None, allow_colors=True):
def guess_plugin_class(plugin):
plugin_class = ''.join(part.title() for part in plugin.split('_'))
return 'plugins.{}.{}Plugin'.format(plugin, plugin_class)
if not isinstance(channel, botologist.protocol.Channel):
channel = self.protocol.Channel(channel)
# channel-specific plugins
if plugins:
assert isinstance(plugins, list)
for plugin in plugins:
assert isinstance(plugin, str)
if plugin not in self.plugins:
plugin_class = guess_plugin_class(plugin)
self.register_plugin(plugin, plugin_class)
log.debug('Adding plugin %s to channel %s', plugin, channel.channel)
channel.register_plugin(self.plugins[plugin](self, channel))
# global plugins
for plugin in self.global_plugins:
assert isinstance(plugin, str)
if plugin not in self.plugins:
plugin_class = guess_plugin_class(plugin)
self.register_plugin(plugin, plugin_class)
log.debug('Adding plugin %s to channel %s', plugin, channel.channel)
channel.register_plugin(self.plugins[plugin](self, channel))
if admins:
assert isinstance(admins, list)
channel.admins = admins
channel.allow_colors = allow_colors
self.client.add_channel(channel)
def _send_msg(self, msgs, targets):
if targets == '*':
targets = (channel for channel in self.client.channels)
elif not isinstance(targets, list) and not isinstance(targets, set):
targets = set([targets])
if not isinstance(msgs, list) and not isinstance(msgs, set):
msgs = set([msgs])
for msg in msgs:
for target in targets:
self.client.send_msg(target, msg)
def _handle_join(self, channel, user):
assert isinstance(channel, botologist.protocol.Channel)
assert isinstance(user, botologist.protocol.User)
# iterate through join callbacks. the first, if any, to return a
# non-empty value, will be sent back to the channel as a response.
response = None
for join_func in channel.joins:
response = join_func(user, channel)
if response:
self._send_msg(response, channel.channel)
return
def _handle_kick(self, channel, kicked_user, user):
assert isinstance(channel, botologist.protocol.Channel)
assert isinstance(kicked_user, botologist.protocol.User)
assert isinstance(user, botologist.protocol.User)
# iterate through join callbacks. the first, if any, to return a
# non-empty value, will be sent back to the channel as a response.
response = None
for kick_func in channel.kicks:
response = kick_func(kicked_user, channel, user)
if response:
self._send_msg(response, channel.channel)
return
def _handle_privmsg(self, message):
assert isinstance(message, botologist.protocol.Message)
if message.user.identifier in self.bans:
return
# self-explanatory...
if message.is_private:
log.debug('Message is private, not replying')
return None
# check if the user is an admin - add it to the message.user object for
# later re-use
message.user.is_admin = (
message.user.identifier in self.admins or (
message.channel and
message.user.identifier in message.channel.admins
))
channel = self.client.channels[message.target]
assert isinstance(channel, botologist.protocol.Channel)
if message.message.startswith(self.CMD_PREFIX):
return self._handle_command(message, channel)
# otherwise, call the channel's repliers
response = self._call_repliers(channel, message)
if response:
self._send_msg(response, message.target)
def _handle_command(self, message, channel):
# if the message starts with the command prefix, check for mathing
# command and fire its callback
cmd_string = message.words[0][1:].lower()
if cmd_string in channel.commands:
command = CommandMessage(message)
command_func = channel.commands[cmd_string]
else:
matching_commands = [cmd for cmd in channel.commands
if cmd.startswith(cmd_string)]
if len(matching_commands) == 0:
log.debug('"%s" did not match any commands in channel %s',
cmd_string, channel.channel)
return
elif len(matching_commands) != 1:
log.debug('"%s" matched more than 1 command in channel %s',
cmd_string, channel.channel)
return
command = CommandMessage(message)
command.command = self.CMD_PREFIX + matching_commands[0]
command_func = channel.commands[matching_commands[0]]
if command_func._is_threaded:
log.debug('Starting thread for command %s', cmd_string)
thread = botologist.util.ErrorProneThread(
target=self._maybe_send_cmd_reply,
args=(command_func, command),
error_handler=self.error_handler.handle_error)
thread.start()
else:
self._maybe_send_cmd_reply(command_func, command)
def _maybe_send_cmd_reply(self, command_func, message):
# check for spam
now = datetime.datetime.now()
if message.command in self._command_log and not message.user.is_admin:
diff = now - self._command_log[message.command]
if self._last_command == (message.user.identifier, message.command, message.args):
threshold = self.SPAM_THROTTLE * 3
else:
threshold = self.SPAM_THROTTLE
if diff.seconds < threshold:
log.info('Command throttled: %s', message.command)
return
# log the command call for spam throttling
self._last_command = (message.user.identifier, message.command, message.args)
self._command_log[message.command] = now
response = command_func(message)
if response:
self._send_msg(response, message.target)
def _call_repliers(self, channel, message):
now = datetime.datetime.now()
final_replies = []
# iterate through reply callbacks
for reply_func in channel.replies:
replies = reply_func(message)
if not replies:
continue
if isinstance(replies, list):
final_replies = final_replies + replies
else:
final_replies.append(replies)
if not message.user.is_admin:
for reply in final_replies:
# throttle spam - prevents the same reply from being sent
# more than once in a row within the throttle threshold
if channel.channel not in self._reply_log:
self._reply_log[channel.channel] = {}
if reply in self._reply_log[channel.channel]:
diff = now - self._reply_log[channel.channel][reply]
if diff.seconds < self.SPAM_THROTTLE:
log.info('Reply throttled: "%s"', reply)
final_replies.remove(reply)
# log the reply for spam throttling
self._reply_log[channel.channel][reply] = now
return final_replies
def _start(self):
if self.http_port and not self.http_server:
log.info('Running HTTP server on %s:%s', self.http_host, self.http_port)
thread = botologist.util.ErrorProneThread(
target=botologist.http.run_http_server,
args=(self, self.http_host, self.http_port),
error_handler=self.error_handler.handle_error)
thread.start()
self._start_tick_timer()
def _start_tick_timer(self):
self.timer = threading.Timer(self.TICK_INTERVAL, self._tick)
self.timer.start()
log.debug('Ticker started')
def _stop(self):
if self.http_server:
log.info('Shutting down HTTP server')
self.http_server.shutdown()
self.http_server = None
if self.timer:
log.info('Ticker stopped')
self.timer.cancel()
self.timer = None
def _tick(self):
log.debug('Tick!')
# reset the spam throttle to prevent the log dictionaries from becoming
# too large
self._command_log = {}
for channel in self._reply_log:
self._reply_log[channel] = {}
try:
for channel in self.client.channels.values():
for ticker in channel.tickers:
result = ticker()
if result:
self._send_msg(result, channel.channel)
finally:
self._start_tick_timer()
wrong variable name!
import logging
log = logging.getLogger(__name__)
import datetime
import threading
import importlib
import botologist.error
import botologist.http
import botologist.protocol
import botologist.plugin
import botologist.util
class CommandMessage:
"""Representation of an IRC message that is a command.
When a user sends a message to the bot that is a bot command, an instance
of this class should be constructed and will be passed to the command
handler to figure out a response.
"""
def __init__(self, message):
assert isinstance(message, botologist.protocol.Message)
self.message = message
self.command = message.words[0]
self.args = message.words[1:]
@property
def user(self):
return self.message.user
@property
def target(self):
return self.message.target
class Bot:
"""IRC bot."""
version = None
# the character commands start with
CMD_PREFIX = '!'
# ticker interval in seconds
TICK_INTERVAL = 120
# spam throttling in seconds
SPAM_THROTTLE = 2
def __init__(self, config):
protocol_module = 'botologist.protocol.{}'.format(config.get('protocol', 'local'))
self.protocol = importlib.import_module(protocol_module)
self.client = self.protocol.get_client(config)
self.config = config
self.storage_dir = config['storage_dir']
self.admins = config.get('admins', [])
self.bans = config.get('bans', [])
self.global_plugins = config.get('global_plugins', [])
self.started = None
self.plugins = {}
self._command_log = {}
self._last_command = None
self._reply_log = {}
self.timer = None
self.http_port = config.get('http_port')
self.http_host = config.get('http_host')
self.http_server = None
self.error_handler = botologist.error.ErrorHandler(self)
self.client.error_handler = self.error_handler.handle_error
self.client.on_connect.append(self._start)
self.client.on_disconnect.append(self._stop)
self.client.on_join.append(self._handle_join)
self.client.on_privmsg.append(self._handle_privmsg)
self.client.on_kick.append(self._handle_kick)
# configure plugins
for name, plugin_class in config.get('plugins', {}).items():
# convenience compatibility layer for when plugins module was moved
plugin_class = plugin_class.replace('botologist.plugin.', 'plugins.')
# dynamically import the plugin module and pass the class
parts = plugin_class.split('.')
module = importlib.import_module('.'.join(parts[:-1]))
plugin_class = getattr(module, parts[-1])
self.register_plugin(name, plugin_class)
# configure channels
channels = config.get('channels')
if isinstance(channels, dict):
for name, channel in channels.items():
self.add_channel(name, **channel)
elif isinstance(channels, list):
for channel in channels:
if isinstance(channel, dict):
name = channel.pop('channel')
else:
name = channel
channel = {}
self.add_channel(name, **channel)
@property
def nick(self):
return self.client.nick
@property
def channels(self):
return self.client.channels
def get_admin_nicks(self):
admin_nicks = set()
for channel in self.client.channels.values():
for admin_id in self.admins:
users = channel.find_users(identifier=admin_id)
for user in users:
if nick != self.nick:
admin_nicks.add(user.name)
return admin_nicks
def run_forever(self):
self.started = datetime.datetime.now()
self.client.run_forever()
def register_plugin(self, name, plugin):
if isinstance(plugin, str):
parts = plugin.split('.')
try:
module = importlib.import_module('.'.join(parts[:-1]))
plugin = getattr(module, parts[-1])
except (AttributeError, ImportError) as exception:
msg = 'Could not find plugin class: {}'.format(plugin)
raise Exception(msg) from exception
assert issubclass(plugin, botologist.plugin.Plugin)
log.debug('Plugin "%s" registered', name)
self.plugins[name] = plugin
def add_channel(self, channel, plugins=None, admins=None, allow_colors=True):
def guess_plugin_class(plugin):
plugin_class = ''.join(part.title() for part in plugin.split('_'))
return 'plugins.{}.{}Plugin'.format(plugin, plugin_class)
if not isinstance(channel, botologist.protocol.Channel):
channel = self.protocol.Channel(channel)
# channel-specific plugins
if plugins:
assert isinstance(plugins, list)
for plugin in plugins:
assert isinstance(plugin, str)
if plugin not in self.plugins:
plugin_class = guess_plugin_class(plugin)
self.register_plugin(plugin, plugin_class)
log.debug('Adding plugin %s to channel %s', plugin, channel.channel)
channel.register_plugin(self.plugins[plugin](self, channel))
# global plugins
for plugin in self.global_plugins:
assert isinstance(plugin, str)
if plugin not in self.plugins:
plugin_class = guess_plugin_class(plugin)
self.register_plugin(plugin, plugin_class)
log.debug('Adding plugin %s to channel %s', plugin, channel.channel)
channel.register_plugin(self.plugins[plugin](self, channel))
if admins:
assert isinstance(admins, list)
channel.admins = admins
channel.allow_colors = allow_colors
self.client.add_channel(channel)
def _send_msg(self, msgs, targets):
if targets == '*':
targets = (channel for channel in self.client.channels)
elif not isinstance(targets, list) and not isinstance(targets, set):
targets = set([targets])
if not isinstance(msgs, list) and not isinstance(msgs, set):
msgs = set([msgs])
for msg in msgs:
for target in targets:
self.client.send_msg(target, msg)
def _handle_join(self, channel, user):
assert isinstance(channel, botologist.protocol.Channel)
assert isinstance(user, botologist.protocol.User)
# iterate through join callbacks. the first, if any, to return a
# non-empty value, will be sent back to the channel as a response.
response = None
for join_func in channel.joins:
response = join_func(user, channel)
if response:
self._send_msg(response, channel.channel)
return
def _handle_kick(self, channel, kicked_user, user):
assert isinstance(channel, botologist.protocol.Channel)
assert isinstance(kicked_user, botologist.protocol.User)
assert isinstance(user, botologist.protocol.User)
# iterate through join callbacks. the first, if any, to return a
# non-empty value, will be sent back to the channel as a response.
response = None
for kick_func in channel.kicks:
response = kick_func(kicked_user, channel, user)
if response:
self._send_msg(response, channel.channel)
return
def _handle_privmsg(self, message):
assert isinstance(message, botologist.protocol.Message)
if message.user.identifier in self.bans:
return
# self-explanatory...
if message.is_private:
log.debug('Message is private, not replying')
return None
# check if the user is an admin - add it to the message.user object for
# later re-use
message.user.is_admin = (
message.user.identifier in self.admins or (
message.channel and
message.user.identifier in message.channel.admins
))
channel = self.client.channels[message.target]
assert isinstance(channel, botologist.protocol.Channel)
if message.message.startswith(self.CMD_PREFIX):
return self._handle_command(message, channel)
# otherwise, call the channel's repliers
response = self._call_repliers(channel, message)
if response:
self._send_msg(response, message.target)
def _handle_command(self, message, channel):
# if the message starts with the command prefix, check for mathing
# command and fire its callback
cmd_string = message.words[0][1:].lower()
if cmd_string in channel.commands:
command = CommandMessage(message)
command_func = channel.commands[cmd_string]
else:
matching_commands = [cmd for cmd in channel.commands
if cmd.startswith(cmd_string)]
if len(matching_commands) == 0:
log.debug('"%s" did not match any commands in channel %s',
cmd_string, channel.channel)
return
elif len(matching_commands) != 1:
log.debug('"%s" matched more than 1 command in channel %s',
cmd_string, channel.channel)
return
command = CommandMessage(message)
command.command = self.CMD_PREFIX + matching_commands[0]
command_func = channel.commands[matching_commands[0]]
if command_func._is_threaded:
log.debug('Starting thread for command %s', cmd_string)
thread = botologist.util.ErrorProneThread(
target=self._maybe_send_cmd_reply,
args=(command_func, command),
error_handler=self.error_handler.handle_error)
thread.start()
else:
self._maybe_send_cmd_reply(command_func, command)
def _maybe_send_cmd_reply(self, command_func, message):
# check for spam
now = datetime.datetime.now()
if message.command in self._command_log and not message.user.is_admin:
diff = now - self._command_log[message.command]
if self._last_command == (message.user.identifier, message.command, message.args):
threshold = self.SPAM_THROTTLE * 3
else:
threshold = self.SPAM_THROTTLE
if diff.seconds < threshold:
log.info('Command throttled: %s', message.command)
return
# log the command call for spam throttling
self._last_command = (message.user.identifier, message.command, message.args)
self._command_log[message.command] = now
response = command_func(message)
if response:
self._send_msg(response, message.target)
def _call_repliers(self, channel, message):
now = datetime.datetime.now()
final_replies = []
# iterate through reply callbacks
for reply_func in channel.replies:
replies = reply_func(message)
if not replies:
continue
if isinstance(replies, list):
final_replies = final_replies + replies
else:
final_replies.append(replies)
if not message.user.is_admin:
for reply in final_replies:
# throttle spam - prevents the same reply from being sent
# more than once in a row within the throttle threshold
if channel.channel not in self._reply_log:
self._reply_log[channel.channel] = {}
if reply in self._reply_log[channel.channel]:
diff = now - self._reply_log[channel.channel][reply]
if diff.seconds < self.SPAM_THROTTLE:
log.info('Reply throttled: "%s"', reply)
final_replies.remove(reply)
# log the reply for spam throttling
self._reply_log[channel.channel][reply] = now
return final_replies
def _start(self):
if self.http_port and not self.http_server:
log.info('Running HTTP server on %s:%s', self.http_host, self.http_port)
thread = botologist.util.ErrorProneThread(
target=botologist.http.run_http_server,
args=(self, self.http_host, self.http_port),
error_handler=self.error_handler.handle_error)
thread.start()
self._start_tick_timer()
def _start_tick_timer(self):
self.timer = threading.Timer(self.TICK_INTERVAL, self._tick)
self.timer.start()
log.debug('Ticker started')
def _stop(self):
if self.http_server:
log.info('Shutting down HTTP server')
self.http_server.shutdown()
self.http_server = None
if self.timer:
log.info('Ticker stopped')
self.timer.cancel()
self.timer = None
def _tick(self):
log.debug('Tick!')
# reset the spam throttle to prevent the log dictionaries from becoming
# too large
self._command_log = {}
for channel in self._reply_log:
self._reply_log[channel] = {}
try:
for channel in self.client.channels.values():
for ticker in channel.tickers:
result = ticker()
if result:
self._send_msg(result, channel.channel)
finally:
self._start_tick_timer()
|
from absl import flags
from typing import Dict
from typing import List
from termcolor import cprint
from google.cloud import storage
FLAGS = flags.FLAGS
StringList = List[str]
class SimpleFlag(str):
default = None
help = None
method: callable = None
value: str = None
required: bool = False
validation: callable = None
show: callable = None
after: callable = None
def __init__(self, helptext=None, default=None, method=flags.DEFINE_string,
required=True, validation=None, show=None, after=None):
self.__value_set = False
self.default = default
self.help = helptext
self.method = method
self.required = required
self.validation = validation
self.show = show
self.after = after
super().__init__()
@staticmethod
def dash(v: str) -> str:
return '- ' + v
def set_value(self, value: str):
if self.method == flags.DEFINE_boolean:
if value == '1' or value == 'true':
value = True
elif value == '0' or value == 'false':
value = False
self.value = value
self.__value_set = value is not None
# perform actions
if self.after is not None:
self.after(self)
def value_explicitly_set(self) -> bool:
return self.__value_set
def maybe_needs_input(self):
return not self.value_explicitly_set() and (
self.show is None or self.show())
def __str__(self):
return self.value
class Validator:
@staticmethod
def check_bool(setting: SimpleFlag, errors: list):
value = setting.value
if isinstance(value, bool):
return
if value == 'true' or value == '1' or value == 'false' or value == '0':
return
errors.append('Invalid option '
+ setting.value
+ '. Expecting true/false or 0/1')
@staticmethod
def validate(s: SimpleFlag):
if s.method == flags.DEFINE_boolean and s.validation is None:
s.validation = Validator.check_bool
if s.validation is not None:
errors: StringList = []
s.validation(s, errors)
if len(errors) > 0:
cprint('Error{0}:'.format(
's' if len(errors) > 1 else ''
), 'red', attrs=['bold'])
cprint('\n'.join(map(SimpleFlag.dash, errors)), 'red')
return False
return True
class Hooks:
@staticmethod
def create_bucket(setting: SimpleFlag):
if not setting.value:
return
client = storage.Client()
bucket = client.get_bucket(setting.value)
print(bucket)
client.create_bucket(setting.value,
project=args['gcp_project_name'].value)
SimpleFlags = Dict[str, SimpleFlag]
args: SimpleFlags = {
'gcp_project_name': SimpleFlag('GCP Project Name'),
'raw_dataset': SimpleFlag(
'Where all raw BigQuery data is stored',
default='raw'
),
'view_dataset': SimpleFlag(
'Where all formatted BigQuery data is stored',
default='views'
),
'agency_id': SimpleFlag('SA360 Agency ID'),
'advertiser_id': SimpleFlag(
'SA360 Advertiser IDs',
method=flags.DEFINE_list
),
'historical_data': SimpleFlag(
'Include Historical Data?',
method=flags.DEFINE_boolean
),
'storage_bucket': SimpleFlag(
'Storage Bucket Name',
after=Hooks.create_bucket
),
'historical_table_name': SimpleFlag(
'Name of historical table',
show=lambda: args['historical_data'].value
),
}
class Settings(SimpleFlags):
def __init__(self):
super().__init__(args)
for k in args.keys():
self[k].set_value(getattr(FLAGS, k))
def assign_flags() -> flags:
for k in args:
args[k].method(k, None, args[k].help)
return flags.FLAGS
def load_settings():
settings: Settings = Settings()
first = True
for k in settings.keys():
setting: SimpleFlag = settings[k]
if setting.maybe_needs_input():
if first:
cprint('Interactive Setup', attrs=['bold'])
first = False
default = ' [{0}]'.format(
setting.default
) if setting.default is not None else ''
while True:
setting.set_value(input(
'{0} ({1}){2}: '.format(k, setting.help, default)
))
if setting.value == '' and setting.default is not None:
setting.value = setting.default
validated = Validator.validate(setting)
if not validated:
continue
if setting.value != '' or not setting.required:
break
cprint('Required Field', 'red')
return settings
store
from absl import flags
from typing import Dict
from typing import List
from termcolor import cprint
from google.cloud import storage
FLAGS = flags.FLAGS
StringList = List[str]
class SimpleFlag(object, str):
default = None
help = None
method: callable = None
value: str = None
required: bool = False
validation: callable = None
show: callable = None
after: callable = None
def __init__(self, helptext=None, default=None, method=flags.DEFINE_string,
required=True, validation=None, show=None, after=None):
self.__value_set = False
self.default = default
self.help = helptext
self.method = method
self.required = required
self.validation = validation
self.show = show
self.after = after
super(str).__init__()
@staticmethod
def dash(v: str) -> str:
return '- ' + v
def set_value(self, value: str):
if self.method == flags.DEFINE_boolean:
if value == '1' or value == 'true':
value = True
elif value == '0' or value == 'false':
value = False
self.value = value
self.__value_set = value is not None
# perform actions
if self.after is not None:
self.after(self)
def value_explicitly_set(self) -> bool:
return self.__value_set
def maybe_needs_input(self):
return not self.value_explicitly_set() and (
self.show is None or self.show())
def __str__(self):
return self.value
class Validator:
@staticmethod
def check_bool(setting: SimpleFlag, errors: list):
value = setting.value
if isinstance(value, bool):
return
if value == 'true' or value == '1' or value == 'false' or value == '0':
return
errors.append('Invalid option '
+ setting.value
+ '. Expecting true/false or 0/1')
@staticmethod
def validate(s: SimpleFlag):
if s.method == flags.DEFINE_boolean and s.validation is None:
s.validation = Validator.check_bool
if s.validation is not None:
errors: StringList = []
s.validation(s, errors)
if len(errors) > 0:
cprint('Error{0}:'.format(
's' if len(errors) > 1 else ''
), 'red', attrs=['bold'])
cprint('\n'.join(map(SimpleFlag.dash, errors)), 'red')
return False
return True
class Hooks:
@staticmethod
def create_bucket(setting: SimpleFlag):
if not setting.value:
return
client = storage.Client()
bucket = client.get_bucket(setting.value)
print(bucket)
client.create_bucket(setting.value,
project=args['gcp_project_name'].value)
SimpleFlags = Dict[str, SimpleFlag]
args: SimpleFlags = {
'gcp_project_name': SimpleFlag('GCP Project Name'),
'raw_dataset': SimpleFlag(
'Where all raw BigQuery data is stored',
default='raw'
),
'view_dataset': SimpleFlag(
'Where all formatted BigQuery data is stored',
default='views'
),
'agency_id': SimpleFlag('SA360 Agency ID'),
'advertiser_id': SimpleFlag(
'SA360 Advertiser IDs',
method=flags.DEFINE_list
),
'historical_data': SimpleFlag(
'Include Historical Data?',
method=flags.DEFINE_boolean
),
'storage_bucket': SimpleFlag(
'Storage Bucket Name',
after=Hooks.create_bucket
),
'historical_table_name': SimpleFlag(
'Name of historical table',
show=lambda: args['historical_data'].value
),
}
class Settings(SimpleFlags):
def __init__(self):
super().__init__(args)
for k in args.keys():
self[k].set_value(getattr(FLAGS, k))
def assign_flags() -> flags:
for k in args:
args[k].method(k, None, args[k].help)
return flags.FLAGS
def load_settings():
settings: Settings = Settings()
first = True
for k in settings.keys():
setting: SimpleFlag = settings[k]
if setting.maybe_needs_input():
if first:
cprint('Interactive Setup', attrs=['bold'])
first = False
default = ' [{0}]'.format(
setting.default
) if setting.default is not None else ''
while True:
setting.set_value(input(
'{0} ({1}){2}: '.format(k, setting.help, default)
))
if setting.value == '' and setting.default is not None:
setting.value = setting.default
validated = Validator.validate(setting)
if not validated:
continue
if setting.value != '' or not setting.required:
break
cprint('Required Field', 'red')
return settings
|
import asyncio
import json
import logging
from math import ceil
import aiohttp
import requests
from user_sync.error import AssertionException
class SignClient:
version = 'v5'
_endpoint_template = 'api/rest/{}/'
DEFAULT_GROUP_NAME = 'default group'
def __init__(self, config):
for k in ['host', 'key', 'admin_email']:
if k not in config:
raise AssertionException("Key '{}' must be specified for all Sign orgs".format(k))
self.host = config['host']
self.key = config['key']
self.admin_email = config['admin_email']
self.console_org = config['console_org'] if 'console_org' in config else None
self.api_url = None
self.groups = None
connection_cfg = config.get('connection') or {}
self.max_sign_retries = connection_cfg.get('retry_count') or 5
self.concurrency_limit = connection_cfg.get('request_concurrency') or 1
timeout = connection_cfg.get('timeout') or 120
self.batch_size = connection_cfg.get('batch_size') or 10000
self.ssl_cert_verify = connection_cfg.get('ssl_cert_verify') or True
self.logger = logging.getLogger(self.logger_name())
logging.getLogger("urllib3").setLevel(logging.WARNING)
self.timeout = aiohttp.ClientTimeout(total=None, sock_connect=timeout, sock_read=timeout)
self.loop = asyncio.get_event_loop()
self.users = {}
def _init(self):
self.api_url = self.base_uri()
self.groups = self.get_groups()
self.reverse_groups = {v: k for k, v in self.groups.items()}
def sign_groups(self):
if self.api_url is None or self.groups is None:
self._init()
return self.groups
def logger_name(self):
return 'sign_client.{}'.format(self.console_org if self.console_org else 'main')
def header(self):
"""
Return Sign API auth header
:return: dict()
"""
if self.version == 'v6':
return {
'Authorization': "Bearer {}".format(self.key),
'Connection': 'close',
}
return {
'Access-Token': self.key,
}
def header_json(self):
"""
Get auth headers with options to PUT/POST JSON
:return: dict()
"""
json_headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
json_headers.update(self.header())
return json_headers
def base_uri(self):
"""
This function validates that the SIGN integration key is valid.
:return: dict()
"""
endpoint = self._endpoint_template.format(self.version)
url = 'https://' + self.host + '/' + endpoint
if self.version == 'v6':
url_path = 'baseUris'
access_point_key = 'apiAccessPoint'
else:
url_path = 'base_uris'
access_point_key = 'api_access_point'
result = requests.get(url + url_path, headers=self.header())
if result.status_code != 200:
raise AssertionException('Error getting base URI from Sign API, is API key valid?')
if access_point_key not in result.json():
raise AssertionException('Error getting base URI for Sign API, result invalid')
self.logger.debug('base_uri result: {}'.format(result.json()[access_point_key] + endpoint))
return result.json()[access_point_key] + endpoint
def get_groups(self):
"""
API request to get group information
:return: dict()
"""
if self.api_url is None:
self.api_url = self.base_uri()
url = self.api_url + 'groups'
header = self.header()
sign_groups, code = self.call_with_retry_sync('GET', url, header)
self.logger.info('getting Sign user groups')
groups = {}
for group in sign_groups['groupInfoList']:
groups[group['groupName'].lower()] = group['groupId']
return groups
def create_group(self, group):
"""
Create a new group in Sign
:param group: str
:return:
"""
if self.api_url is None or self.groups is None:
self._init()
url = self.api_url + 'groups'
header = self.header_json()
data = json.dumps({'groupName': group})
self.logger.info('Creating Sign group {} '.format(group))
res, code = self.call_with_retry_sync('POST', url, header, data)
self.groups[group] = res['groupId']
def update_users(self, users):
"""
Passthrough for call handling
"""
self._handle_calls(self._update_user, self.header_json(), users)
def get_users(self):
"""
Gets the full user list, and then extracts the user ID's for making calls
We return self.users because it will be filled by the _get_user method. This is
necessary to avoid returning futures of calls which we cannot predict.
"""
self.logger.info('Getting list of all Sign users')
user_list, _ = self.call_with_retry_sync('GET', self.api_url + 'users', self.header())
user_ids = [u['userId'] for u in user_list['userInfoList']]
self._handle_calls(self._get_user, self.header(), user_ids)
return self.users
def _handle_calls(self, handle, headers, objects):
"""
Batches and executes handle for each of o in objects
handle: reference to function which will be called
headers: api headers (common to all requests)
objects: list of objects, which will be iterated through - and handle called on each
"""
if self.api_url is None or self.groups is None:
self._init()
# Execute calls by batches. This reduces the memory stack, since we do not need to create all
# coroutines before starting execution. We call run_until_complete for each set until all sets have run
set_number = 1
batch_count = ceil(len(objects) / self.batch_size)
for i in range(0, len(objects), self.batch_size):
self.logger.info("{}s - batch {}/{}".format(handle.__name__, set_number, batch_count))
self.loop.run_until_complete(self._await_calls(handle, headers, objects[i:i + self.batch_size]))
set_number += 1
async def _await_calls(self, handle, headers, objects):
"""
Where we actually await the coroutines. Must be own method, in order to be handled by loop
"""
if not objects:
return
# Semaphore specifies number of allowed calls at one time
sem = asyncio.Semaphore(value=self.concurrency_limit)
# We must use only 1 session, else will hang
async with aiohttp.ClientSession(trust_env=True, timeout=self.timeout) as session:
# prepare a list of calls to make * Note: calls are prepared by using call
# syntax (eg, func() and not func), but they will not be run until executed by the wait
# split into batches of self.bach_size to avoid taking too much memory
calls = [handle(sem, o, headers, session) for o in objects]
await asyncio.wait(calls)
async def _get_user(self, semaphore, user_id, header, session):
# This will block the method from executing until a position opens
async with semaphore:
user_url = self.api_url + 'users/' + user_id
user, code = await self.call_with_retry_async('GET', user_url, header, session=session)
if code != 200:
self.logger.error("Error fetching user '{}' with response: {}".format(user_id, user))
return
if user['userStatus'] != 'ACTIVE':
return
if user['email'] == self.admin_email:
return
user['userId'] = user_id
user['roles'] = self.user_roles(user)
self.users[user['email']] = user
self.logger.debug('retrieved user details for Sign user {}'.format(user['email']))
async def _update_user(self, semaphore, user, headers, session):
"""
Update Sign user
"""
# This will block the method from executing until a position opens
async with semaphore:
url = self.api_url + 'users/' + user['userId']
group = self.reverse_groups[user['groupId']]
body, code = await self.call_with_retry_async('PUT', url, headers, data=json.dumps(user), session=session)
self.logger.info(
"Updated Sign user '{}', Group: '{}', Roles: {}".format(user['email'], group, user['roles']))
if code != 200:
self.logger.error("Error updating user '{}' with response: {}".format(user['email'], body))
@staticmethod
def user_roles(user):
"""
Resolve user roles
:return: list[]
"""
return ['NORMAL_USER'] if 'roles' not in user else user['roles']
def call_with_retry_sync(self, method, url, header, data=None):
"""
Need to define this method, so that it can be called outside async context
loop will execute a single synchronous call, but sharing code with the async retry method
"""
return self.loop.run_until_complete(self.call_with_retry_async(method, url, header, data=data or {}))
async def call_with_retry_async(self, method, url, header, data=None, session=None):
"""
Call manager with exponential retry
:return: Response <Response> object
"""
retry_nb = 0
waiting_time = 10
close = session is None
session = session or aiohttp.ClientSession(trust_env=True, timeout=self.timeout)
session.headers.update(header)
while True:
try:
waiting_time *= 3
self.logger.debug('Attempt {} to call: {}'.format(retry_nb, url))
async with session.request(method=method, url=url, data=data or {}) as r:
if r.status >= 500:
raise Exception('{}, Headers: {}'.format(r.status, r.headers))
elif r.status == 429:
raise Exception('{} - too many calls. Headers: {}'.format(r.status, r.headers))
elif r.status > 400 and r.status < 500:
self.logger.critical(' {} - {}. Headers: {}'.format(r.status, r.reason, r.headers))
raise AssertionException('')
body = await r.json()
return body, r.status
except Exception as exp:
retry_nb += 1
self.logger.warning('Failed: {} - {}'.format(type(exp), exp.args))
if retry_nb == (self.max_sign_retries + 1):
raise AssertionException('Quitting after {} retries'.format(self.max_sign_retries))
self.logger.warning('Waiting for {} seconds'.format(waiting_time))
await asyncio.sleep(waiting_time)
finally:
if close:
await session.close()
sign connector
import asyncio
import json
import logging
from math import ceil
import aiohttp
import requests
from user_sync.error import AssertionException
class SignClient:
version = 'v5'
_endpoint_template = 'api/rest/{}/'
DEFAULT_GROUP_NAME = 'default group'
def __init__(self, config):
for k in ['host', 'key', 'admin_email']:
if k not in config:
raise AssertionException("Key '{}' must be specified for all Sign orgs".format(k))
self.host = config['host']
self.key = config['key']
self.admin_email = config['admin_email']
self.console_org = config['console_org'] if 'console_org' in config else None
self.api_url = None
self.groups = None
connection_cfg = config.get('connection') or {}
self.max_sign_retries = connection_cfg.get('retry_count') or 5
self.concurrency_limit = connection_cfg.get('request_concurrency') or 1
timeout = connection_cfg.get('timeout') or 120
self.batch_size = connection_cfg.get('batch_size') or 10000
self.ssl_cert_verify = connection_cfg.get('ssl_cert_verify') or True
self.logger = logging.getLogger(self.logger_name())
logging.getLogger("urllib3").setLevel(logging.WARNING)
self.timeout = aiohttp.ClientTimeout(total=None, sock_connect=timeout, sock_read=timeout)
self.loop = asyncio.get_event_loop()
self.users = {}
def _init(self):
self.api_url = self.base_uri()
self.groups = self.get_groups()
self.reverse_groups = {v: k for k, v in self.groups.items()}
def sign_groups(self):
if self.api_url is None or self.groups is None:
self._init()
return self.groups
def logger_name(self):
return 'sign_client.{}'.format(self.console_org if self.console_org else 'main')
def header(self):
"""
Return Sign API auth header
:return: dict()
"""
if self.version == 'v6':
return {
'Authorization': "Bearer {}".format(self.key),
'Connection': 'close',
}
return {
'Access-Token': self.key,
}
def header_json(self):
"""
Get auth headers with options to PUT/POST JSON
:return: dict()
"""
json_headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
json_headers.update(self.header())
return json_headers
def base_uri(self):
"""
This function validates that the SIGN integration key is valid.
:return: dict()
"""
endpoint = self._endpoint_template.format(self.version)
url = 'https://' + self.host + '/' + endpoint
if self.version == 'v6':
url_path = 'baseUris'
access_point_key = 'apiAccessPoint'
else:
url_path = 'base_uris'
access_point_key = 'api_access_point'
result = requests.get(url + url_path, headers=self.header(), verify=self.ssl_cert_verify)
if result.status_code != 200:
raise AssertionException('Error getting base URI from Sign API, is API key valid?')
if access_point_key not in result.json():
raise AssertionException('Error getting base URI for Sign API, result invalid')
self.logger.debug('base_uri result: {}'.format(result.json()[access_point_key] + endpoint))
return result.json()[access_point_key] + endpoint
def get_groups(self):
"""
API request to get group information
:return: dict()
"""
if self.api_url is None:
self.api_url = self.base_uri()
url = self.api_url + 'groups'
header = self.header()
sign_groups, code = self.call_with_retry_sync('GET', url, header)
self.logger.info('getting Sign user groups')
groups = {}
for group in sign_groups['groupInfoList']:
groups[group['groupName'].lower()] = group['groupId']
return groups
def create_group(self, group):
"""
Create a new group in Sign
:param group: str
:return:
"""
if self.api_url is None or self.groups is None:
self._init()
url = self.api_url + 'groups'
header = self.header_json()
data = json.dumps({'groupName': group})
self.logger.info('Creating Sign group {} '.format(group))
res, code = self.call_with_retry_sync('POST', url, header, data)
self.groups[group] = res['groupId']
def update_users(self, users):
"""
Passthrough for call handling
"""
self._handle_calls(self._update_user, self.header_json(), users)
def get_users(self):
"""
Gets the full user list, and then extracts the user ID's for making calls
We return self.users because it will be filled by the _get_user method. This is
necessary to avoid returning futures of calls which we cannot predict.
"""
self.logger.info('Getting list of all Sign users')
user_list, _ = self.call_with_retry_sync('GET', self.api_url + 'users', self.header())
user_ids = [u['userId'] for u in user_list['userInfoList']]
self._handle_calls(self._get_user, self.header(), user_ids)
return self.users
def _handle_calls(self, handle, headers, objects):
"""
Batches and executes handle for each of o in objects
handle: reference to function which will be called
headers: api headers (common to all requests)
objects: list of objects, which will be iterated through - and handle called on each
"""
if self.api_url is None or self.groups is None:
self._init()
# Execute calls by batches. This reduces the memory stack, since we do not need to create all
# coroutines before starting execution. We call run_until_complete for each set until all sets have run
set_number = 1
batch_count = ceil(len(objects) / self.batch_size)
for i in range(0, len(objects), self.batch_size):
self.logger.info("{}s - batch {}/{}".format(handle.__name__, set_number, batch_count))
self.loop.run_until_complete(self._await_calls(handle, headers, objects[i:i + self.batch_size]))
set_number += 1
async def _await_calls(self, handle, headers, objects):
"""
Where we actually await the coroutines. Must be own method, in order to be handled by loop
"""
if not objects:
return
# Semaphore specifies number of allowed calls at one time
sem = asyncio.Semaphore(value=self.concurrency_limit)
# We must use only 1 session, else will hang
async with aiohttp.ClientSession(trust_env=True, timeout=self.timeout) as session:
# prepare a list of calls to make * Note: calls are prepared by using call
# syntax (eg, func() and not func), but they will not be run until executed by the wait
# split into batches of self.bach_size to avoid taking too much memory
calls = [handle(sem, o, headers, session) for o in objects]
await asyncio.wait(calls)
async def _get_user(self, semaphore, user_id, header, session):
# This will block the method from executing until a position opens
async with semaphore:
user_url = self.api_url + 'users/' + user_id
user, code = await self.call_with_retry_async('GET', user_url, header, session=session)
if code != 200:
self.logger.error("Error fetching user '{}' with response: {}".format(user_id, user))
return
if user['userStatus'] != 'ACTIVE':
return
if user['email'] == self.admin_email:
return
user['userId'] = user_id
user['roles'] = self.user_roles(user)
self.users[user['email']] = user
self.logger.debug('retrieved user details for Sign user {}'.format(user['email']))
async def _update_user(self, semaphore, user, headers, session):
"""
Update Sign user
"""
# This will block the method from executing until a position opens
async with semaphore:
url = self.api_url + 'users/' + user['userId']
group = self.reverse_groups[user['groupId']]
body, code = await self.call_with_retry_async('PUT', url, headers, data=json.dumps(user), session=session)
self.logger.info(
"Updated Sign user '{}', Group: '{}', Roles: {}".format(user['email'], group, user['roles']))
if code != 200:
self.logger.error("Error updating user '{}' with response: {}".format(user['email'], body))
@staticmethod
def user_roles(user):
"""
Resolve user roles
:return: list[]
"""
return ['NORMAL_USER'] if 'roles' not in user else user['roles']
def call_with_retry_sync(self, method, url, header, data=None):
"""
Need to define this method, so that it can be called outside async context
loop will execute a single synchronous call, but sharing code with the async retry method
"""
return self.loop.run_until_complete(self.call_with_retry_async(method, url, header, data=data or {}))
async def call_with_retry_async(self, method, url, header, data=None, session=None):
"""
Call manager with exponential retry
:return: Response <Response> object
"""
retry_nb = 0
waiting_time = 10
close = session is None
session = session or aiohttp.ClientSession(trust_env=True, timeout=self.timeout)
session.headers.update(header)
while True:
try:
waiting_time *= 3
self.logger.debug('Attempt {} to call: {}'.format(retry_nb, url))
async with session.request(method=method, url=url, data=data or {}, ssl=self.ssl_cert_verify) as r:
if r.status >= 500:
raise Exception('{}, Headers: {}'.format(r.status, r.headers))
elif r.status == 429:
raise Exception('{} - too many calls. Headers: {}'.format(r.status, r.headers))
elif r.status > 400 and r.status < 500:
self.logger.critical(' {} - {}. Headers: {}'.format(r.status, r.reason, r.headers))
raise AssertionException('')
body = await r.json()
return body, r.status
except Exception as exp:
retry_nb += 1
self.logger.warning('Failed: {} - {}'.format(type(exp), exp.args))
if retry_nb == (self.max_sign_retries + 1):
raise AssertionException('Quitting after {} retries'.format(self.max_sign_retries))
self.logger.warning('Waiting for {} seconds'.format(waiting_time))
await asyncio.sleep(waiting_time)
finally:
if close:
await session.close()
|
import praw
import pdb
import re
import os
import time
reddit = praw.Reddit('bot1')
subreddit = reddit.subreddit("magictcg+edh+pythonforengineers")
lmsrcomment = """>Queen Marchesa (long may she reign)\n
\nFTFY. I'm a bot. If I've made a mistake, click [here.]
(https://www.reddit.com/message/compose?to=shadowwesley77)
"""
if not os.path.isfile("posts_replied_to.txt"):
posts_replied_to = []
else:
with open("posts_replied_to.txt", "r") as f:
posts_replied_to = f.read()
posts_replied_to = posts_replied_to.split("\n")
posts_replied_to = list(filter(None, posts_replied_to))
if not os.path.isfile("comments_replied_to.txt"):
comments_replied_to = []
else:
with open("comments_replied_to.txt", "r") as t:
comments_replied_to = t.read()
comments_replied_to = comments_replied_to.split("\n")
comments_replied_to = list(filter(None, comments_replied_to))
checked = 0
#Check submission titles
for submission in subreddit.stream.submissions():
if submission.id not in posts_replied_to:
checked = checked + 1
if checked % 100 == 100:
print ("Checked ", checked ," posts")
if re.search("Queen Marchesa", submission.title, re.IGNORECASE) and not re.search("long may she reign", submission.title, re.IGNORECASE):
submission.reply(lmsrcomment)
print("Bot replied to: ", submission.title)
posts_replied_to.append(submission.id)
with open("posts_replied_to.txt", "w") as f:
for post_id in posts_replied_to:
f.write(post_id + "\n")
#Check comments of post
submission.comments.replace_more(limit=0)
comments = submission.comments[:]
while comments:
comment = comments.pop(0)
if comment.id not in comments_replied_to and comment.author is not "MTGCardFetcher":
if re.search("Queen Marchesa", comment.body, re.IGNORECASE) and not re.search("long may she reign", comment.body, re.IGNORECASE):
comment.reply(lmsrcomment)
print("Bot replied to comment under: ", submission.title)
comments_replied_to.append(comment.id)
with open("comments_replied_to.txt", "w") as t:
for post_id in comments_replied_to:
t.write(post_id + "\n")
comments.extend(comment.replies)
Modulo Fix
I fixed the modulo logic to work as intended
import praw
import pdb
import re
import os
import time
reddit = praw.Reddit('bot1')
subreddit = reddit.subreddit("magictcg+edh+pythonforengineers")
lmsrcomment = """>Queen Marchesa (long may she reign)\n
\nFTFY. I'm a bot. If I've made a mistake, click [here.]
(https://www.reddit.com/message/compose?to=shadowwesley77)
"""
if not os.path.isfile("posts_replied_to.txt"):
posts_replied_to = []
else:
with open("posts_replied_to.txt", "r") as f:
posts_replied_to = f.read()
posts_replied_to = posts_replied_to.split("\n")
posts_replied_to = list(filter(None, posts_replied_to))
if not os.path.isfile("comments_replied_to.txt"):
comments_replied_to = []
else:
with open("comments_replied_to.txt", "r") as t:
comments_replied_to = t.read()
comments_replied_to = comments_replied_to.split("\n")
comments_replied_to = list(filter(None, comments_replied_to))
checked = 0
#Check submission titles
for submission in subreddit.stream.submissions():
if submission.id not in posts_replied_to:
checked = checked + 1
if checked % 100 == 0:
print ("Checked ", checked ," posts")
if re.search("Queen Marchesa", submission.title, re.IGNORECASE) and not re.search("long may she reign", submission.title, re.IGNORECASE):
submission.reply(lmsrcomment)
print("Bot replied to: ", submission.title)
posts_replied_to.append(submission.id)
with open("posts_replied_to.txt", "w") as f:
for post_id in posts_replied_to:
f.write(post_id + "\n")
#Check comments of post
submission.comments.replace_more(limit=0)
comments = submission.comments[:]
while comments:
comment = comments.pop(0)
if comment.id not in comments_replied_to and comment.author is not "MTGCardFetcher":
if re.search("Queen Marchesa", comment.body, re.IGNORECASE) and not re.search("long may she reign", comment.body, re.IGNORECASE):
comment.reply(lmsrcomment)
print("Bot replied to comment under: ", submission.title)
comments_replied_to.append(comment.id)
with open("comments_replied_to.txt", "w") as t:
for post_id in comments_replied_to:
t.write(post_id + "\n")
comments.extend(comment.replies)
|
#! python3
# coding: utf-8
import json
import logging
import os
import time
from collections import deque
import requests
import schedule
import tweepy
__version__ = "1.2.4"
# File and directory names
CONFIG_FILE = "config.json"
IMG_DIR = "img"
RECENT_IDS_FILE = "recentids.txt"
DB_DUMP_FILE = "danbooru_dump.txt"
# Templates
DB_URL = "http://danbooru.donmai.us/"
DB_API_URL = DB_URL + "{endpoint}.json{params}"
PIXIV_URL = "http://www.pixiv.net/member_illust.php?mode=medium&illust_id={id}"
DA_URL = "http://{artist}.deviantart.com/gallery/#/{id}"
LOG_FMT = "%(levelname)s (%(name)s): %(message)s"
# Preset tag blacklist, mostly tags that are too explicit
TAG_BLACKLIST = (
"pregnant",
"diaper",
"inflation",
"panties",
"guro",
"scat",
"peeing",
"comic",
"bikini",
"chastity_belt",
"trefoil",
"undressing",
"spread_legs",
"pussy",
"nipples",
"censored",
"cum",
"nude",
"sex",
"facial",
"vaginal",
"cum_on_body",
"convenient_censoring",
"bottomless",
"covering_breasts",
"groin",
"cameltoe",
"panty_lift",
"french_kiss",
"underboob",
"between_breasts",
"lingerie",
"ebola",
"navel_cutout",
"partially_visible_vulva",
"ball_gag",
"bdsm",
"bondage",
"gag",
"gagged",
"spoilers",
"penis",
"disembodied_penis")
# Usually list of usernames who don't want their art reposted
USER_BLACKLIST = (
"khee",
"bakakhee",
"cactuskhee",
"junkhee")
# Use for source URLs
# (except Twitter, Pixiv, and deviantart.net, which are processed separately)
SOURCE_DOMAINS = (
"tumblr.com",
"deviantart.com",
"twitpic.com",
"seiga.nicovideo.jp")
# Use for verifying content type before downloading an image
ALLOWED_CONTENT_TYPES = ("image/jpeg", "image/png", "image/gif",
"binary/octet-stream")
# Post ID number of "TrainerTrish" art
TRISH_ID = 2575437
logger = logging.getLogger(__name__)
config_dict = {}
# For debugging purposes
db_request_raw = ""
class ImageQueue():
def __init__(self):
self._items = []
def enqueue(self, post_id: str, image_uri: str, source: str=None):
item = (str(post_id), image_uri, source)
self._items.insert(0, item)
def dequeue(self):
return self._items.pop()
def __len__(self):
return self._items.__len__()
def __str__(self):
return self._items.__str__()
def is_empty(self):
return self.__len__() < 1
def get_first_item(self):
return self._items[-1] if self._items else None
class TweetPicBot():
def __init__(self, keys: dict):
auth = tweepy.OAuthHandler(keys["consumer"], keys["consumer_secret"])
auth.set_access_token(keys["access"], keys["access_secret"])
self._api = tweepy.API(auth)
self._authenticate()
def _authenticate(self):
try:
user = self._api.verify_credentials().screen_name
logger.info(
"Twitter API keys verified successfully, authenticated as @%s",
user)
except tweepy.TweepError as t:
log_tweepy_err(t, "Can't verify Twitter API keys")
raise SystemExit
def send_tweet(self, media_path: str, tweet=""):
# Return True if tweet was sent successfully, otherwise False
try:
logger.debug("Uploading %s", media_path)
media_id = self._api.media_upload(media_path).media_id_string
logger.debug("Sending tweet")
self._api.update_status(status=tweet, media_ids=[media_id])
return True
except tweepy.TweepError as t:
log_tweepy_err(t, "Failed to send tweet")
return False
image_queue = ImageQueue()
recent_ids = deque([], 25)
def log_tweepy_err(e: tweepy.TweepError, prefix: str=""):
# Tweepy's TweepError exception class is weird,
# that's why I have this set up
if prefix != "":
errmsg = prefix + ": "
if e.api_code:
code = e.api_code
msg = e.args[0][0]["message"]
errmsg += "{0} (error code {1})".format(msg, code)
else:
errmsg += str(e)
logger.error(errmsg)
def parse_config():
global config_dict
with open(CONFIG_FILE) as f:
config_dict = json.load(f)
verify_keys()
def verify_keys():
# TODO: clean up this function
def do_assert(condition, err_msg: str):
assert condition, err_msg
def verify_blacklist_keys():
blacklist = config_dict["blacklist"]
for k in ("tags", "artists"):
do_assert(k in blacklist,
"Required key \"%s\" not found in blacklist config" % k)
do_assert(isinstance(blacklist[k], list), "Required blacklist key "
"\"%s\" must have value of type array (list)" % k)
def verify_twitter_keys():
twkeys = config_dict["twitter_keys"]
for k in ("consumer", "consumer_secret", "access", "access_secret"):
do_assert(k in twkeys,
"Required key \"%s\" not found in Twitter keys config" %k)
do_assert(isinstance(twkeys[k], str) and twkeys[k] != "",
"Required key \"%s\" must have value of type string "
"and can't be blank" % k)
for k in ("tags", "blacklist", "twitter_keys", "score", "favorites"):
do_assert(k in config_dict,
"Required key \"%s\" not found in config" % k)
if k in ("blacklist", "twitter_keys"):
do_assert(isinstance(config_dict[k], dict), "Required key "
"%s must have value of type object (dict)" % k)
elif k == "tags":
do_assert(isinstance(config_dict[k], list), "Required key "
"\"%s\" must have value of type array (list)" % k)
do_assert(len(config_dict[k]) < 3,
"Search queries are limited to 2 tags")
do_assert(len(config_dict[k]) > 0, "Tags cannot be blank")
elif k in ("score", "favorites"):
do_assert(isinstance(config_dict[k], int), "Required key "
"\"%s\" must have value of integer" % k)
else:
do_assert(isinstance(config_dict[k], str), "Required key "
"\"%s\" must have value of type string and can't be blank" % k)
verify_twitter_keys()
verify_blacklist_keys()
def get_danbooru_request(endpoint: str, params: dict):
# Convert params dict to URI string
if params:
params_list = []
params_str = "?"
for k in params:
if not (isinstance(k, str) and isinstance(params[k], str)):
continue
params_list.append("{0}={1}".format(k, params[k]))
params_str += "&".join(params_list)
else:
params_str = ""
global db_request_raw
r = requests.get(DB_API_URL.format(endpoint=endpoint, params=params_str))
db_request_raw = r.content.decode()
return r.json()
def populate_queue(limit: int=50, attempts=1):
# Step 1: Assemble URI parameters
tags_str = "+".join(config_dict["tags"])
logger.info("Building post queue for tag(s) \"%s\"", tags_str)
params = {
"tags":tags_str,
"limit":str(limit),
"random":"true"
}
# Step 2: Get request and check if it returned any posts
posts = get_danbooru_request("posts", params)
assert posts, "Provided tag(s) \"%s\" returned no posts" % tags_str
# Step 3: Iterate through and filter posts
# Unfiltered posts are added to image queue
postcount = 0
for post in posts:
# Evaluate post data for filtering
if not eval_post(post):
continue
# Enqueue post info
postid = post["id"]
# Use "large_file_url" just in case post's actual image is too big
url = post["large_file_url"]
source = get_source(post)
image_queue.enqueue(postid, DB_URL + url, source)
logger.debug("Added post ID %s to queue", postid)
postcount += 1
# Step 4: Log queue size when done, otherwise run function again
if postcount > 0:
logger.info("%s/%s images added to queue, current queue size is now %s",
postcount, len(posts), len(image_queue))
return
# Give up after 3 attempts
if attempts >= 3:
raise SystemExit
logger.info("No matching images added to queue, retrying in 5s")
attempts += 1
time.sleep(5)
populate_queue(limit, attempts)
def eval_post(post: dict):
# Returns False if given post is caught by any filters below
postid = post["id"]
# Check if post is banned (no image available)
if post["is_banned"]:
logger.debug("Post ID %s is banned and skipped", postid)
return False
# Check if rating is q(uestionable) or e(xplicit)
if post["rating"] != "s":
logger.debug("Post ID %s skipped due to rating (rated %s)", postid,
post["rating"])
return False
# Evaluate tags, score, favorite count, and filetype
return (eval_tags(post["tag_string"], postid) and
eval_score(post["score"], postid) and
eval_favorites(post["fav_count"], postid) and
eval_filetype(post["large_file_url"], postid)
)
def eval_tags(tag_string: str, postid):
# Return True if no tags are in blacklist, otherwise return False
tags = tag_string.split()
blacklist_config = config_dict["blacklist"]
for t in tags:
if t in TAG_BLACKLIST or t in blacklist_config["tags"]:
logger.debug("Post ID %s contains blacklisted tag: %s", postid, t)
return False
if t in USER_BLACKLIST or t in blacklist_config["artists"]:
logger.debug("Post ID %s is by blacklisted artist: %s", postid, t)
return False
return True
def eval_score(score: int, postid):
# Return True if post's score meets threshold, otherwise return False
if score >= config_dict["score"]:
return True
logger.debug("Post ID %s did not meet score threshold of %s", postid,
config_dict["score"])
return False
def eval_favorites(count: int, postid):
# Same as eval_score, but evaluate based on post's favorite count
if count >= config_dict["favorites"]:
return True
logger.debug("Post ID %s did not meet favorite count threshold of %s",
postid, config_dict["score"])
return False
def eval_filetype(filename: str, postid):
filetype = filename.split(".")[-1]
if filetype.lower() in ("jpg", "jpeg", "png", "gif"):
return True
logger.debug("Post ID %s has invalid filetype of .%s", postid, filetype)
return False
def get_source(post: dict):
def get_da_permalink(url: str):
info = url.split("_by_")[-1].split(".")[0]
info_dash_split = info.split("-")
if len(info_dash_split) < 2:
return url
artist, art_id = info_dash_split
return DA_URL.format(artist=artist, id=art_id)
if "bad_id" in post["tag_string_general"]:
logger.debug("Post ID %s contains tag \"bad_id\"", post["id"])
return
if post["pixiv_id"]:
return PIXIV_URL.format(id=post["pixiv_id"])
source = post["source"]
if source.startswith("https://twitter.com/"):
return "@" + source.split("/")[3]
if "deviantart.net/" in source:
return get_da_permalink(source)
for domain in SOURCE_DOMAINS:
if domain + "/" in source:
return source
return
def dump_db_request(error):
# Dumps response content to file when bot runs into an error
# while building queue
if type(error) in (SystemExit, KeyboardInterrupt, AssertionError):
return
with open(DB_DUMP_FILE, "w", encoding="utf_8") as f:
f.write(db_request_raw)
logger.info("Error occurred while populating queue, "
"response content dumped to %s", DB_DUMP_FILE)
def post_image(bot: TweetPicBot):
# Step 1: Repopulate queue if size is less than 5
if len(image_queue) < 5:
try:
populate_queue()
except Exception as e:
dump_db_request(e)
raise
# Step 2: Check if post ID was already posted in the last 25 tweets
postdata = image_queue.get_first_item()
postid = postdata[0]
while postid in recent_ids:
# Discard post in queue
image_queue.dequeue()
logger.debug("Post ID %s was uploaded in the last 25 tweets", postid)
postdata = image_queue.get_first_item()
postid = postdata[0]
# Step 3: Download image to file
url = postdata[1]
try:
file_path = download_file(postid, url)
except TypeError as type_error:
# If received content type is not in ALLOWED_CONTENT_TYPES list above,
# then move on to next post in queue
logger.error("%s, moving on to next post in queue", type_error)
image_queue.dequeue()
# Add 1s delay to make sure we're not flooding Danbooru with requests
time.sleep(1)
post_image(bot)
return
except:
logger.exception("Failed to download image for post ID %s, "
"will retry at next scheduled interval", postid)
return
# Step 4: Prepare tweet content
if postid == TRISH_ID:
source_str = "Trish-chan (aka @PlayerOneTimmy as a girl)\n"
else:
source_str = ""
source = postdata[2]
if source:
source_str += "Source: %s" % source
# Step 5: Send tweet and add post ID to recent IDs list
if bot.send_tweet(file_path, source_str):
logger.info("Tweet sent successfully! "
"Post ID of uploaded image was %s", postid)
# Discard post from queue when done
image_queue.dequeue()
logger.debug("%s post(s) remaining in queue", len(image_queue))
# Save recent IDs file
recent_ids.append(postid)
save_recent_ids()
else:
logger.info(
"Tweet for post ID %s will be sent at next scheduled interval",
postid)
def download_file(postid: str, url: str):
# based from http://stackoverflow.com/a/16696317
local_filename = "{0}.{1}".format(postid, url.split('.')[-1])
path = "{0}/{1}".format(IMG_DIR, local_filename)
if local_filename in os.listdir(IMG_DIR + "/"):
logger.debug("Image already exists: %s", path)
return path
logger.info("Downloading post ID %s to %s", postid, path)
time_start = time.time()
r = requests.get(url, stream=True)
# Check Content-Type header in case Danbooru returns HTML/XML file
# instead of an image for any reason
if "Content-Type" in r.headers:
content_type = r.headers["Content-Type"]
if content_type not in ALLOWED_CONTENT_TYPES:
raise TypeError("Content type '%s' is invalid for media upload"
% content_type)
with open(path, "wb") as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
time_end = time.time()
elapsed = round(time_end - time_start, 3)
logger.info("Completed downloading %s in %ss", local_filename, elapsed)
return path
def logging_setup():
logging.basicConfig(format=LOG_FMT, level=logging.INFO)
logging.Formatter.converter = time.gmtime
filehandler = logging.FileHandler("events.log")
fmt = logging.Formatter("[%(asctime)s] " + LOG_FMT, "%Y-%m-%dT%H:%M:%SZ")
filehandler.setFormatter(fmt)
logging.getLogger().addHandler(filehandler)
logging.getLogger("oauthlib").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("requests_oauthlib").setLevel(logging.WARNING)
logging.getLogger("schedule").setLevel(logging.WARNING)
logging.getLogger("tweepy").setLevel(logging.WARNING)
def load_recent_ids():
if not RECENT_IDS_FILE in os.listdir():
logger.debug("%s not found in current directory, skipping",
RECENT_IDS_FILE)
return
with open(RECENT_IDS_FILE) as f:
id_count = 0
for line in f:
line = line.strip("\n")
if line.isdigit():
recent_ids.append(line)
id_count += 1
logger.debug("Found %s post ID(s) in file", id_count)
logger.info("Recent post IDs loaded")
def save_recent_ids():
with open(RECENT_IDS_FILE, mode="w") as f:
f.write("\n".join(recent_ids))
logger.debug("Saved last 25 post IDs to %s", RECENT_IDS_FILE)
def main_loop(interval: int=30):
# Check interval range
assert 60 > interval > 0, "Interval must be between 1 and 59 minutes"
# Make images directory if it doesn't exist
if not IMG_DIR in os.listdir():
logger.info("Creating images directory")
os.mkdir(IMG_DIR)
# Set up Twitter API client
bot = TweetPicBot(config_dict["twitter_keys"])
# Build initial queue, then set up schedule
try:
# Post immediately if current UTC minute is divisible by interval
current_min = time.gmtime().tm_min
if current_min % interval == 0:
post_image(bot)
else:
populate_queue()
except Exception as e:
dump_db_request(e)
raise
for m in range(0, 60, interval):
schedule.every().hour.at("00:%s" % m).do(post_image, bot)
while True:
schedule.run_pending()
time.sleep(1)
if __name__ == "__main__":
logging_setup()
logger.info("Timmy's Danbooru Twitter Bot v%s is starting up",
__version__)
try:
load_recent_ids()
parse_config()
main_loop()
except (KeyboardInterrupt, SystemExit):
# Use Ctrl-C to terminate the bot
logger.info("Now shutting down")
except AssertionError as e:
logger.error(e)
except:
logger.exception("Exception occurred, now shutting down")
schedule.clear()
Bumped to v1.2.5: Fix bug preventing some images to be downloaded
- Caused by extra metadata at end of content type string (ex. "image/jpeg; charset=utf-8")
#! python3
# coding: utf-8
import json
import logging
import os
import time
from collections import deque
import requests
import schedule
import tweepy
__version__ = "1.2.5"
# File and directory names
CONFIG_FILE = "config.json"
IMG_DIR = "img"
RECENT_IDS_FILE = "recentids.txt"
DB_DUMP_FILE = "danbooru_dump.txt"
# Templates
DB_URL = "http://danbooru.donmai.us/"
DB_API_URL = DB_URL + "{endpoint}.json{params}"
PIXIV_URL = "http://www.pixiv.net/member_illust.php?mode=medium&illust_id={id}"
DA_URL = "http://{artist}.deviantart.com/gallery/#/{id}"
LOG_FMT = "%(levelname)s (%(name)s): %(message)s"
# Preset tag blacklist, mostly tags that are too explicit
TAG_BLACKLIST = (
"pregnant",
"diaper",
"inflation",
"panties",
"guro",
"scat",
"peeing",
"comic",
"bikini",
"chastity_belt",
"trefoil",
"undressing",
"spread_legs",
"pussy",
"nipples",
"censored",
"cum",
"nude",
"sex",
"facial",
"vaginal",
"cum_on_body",
"convenient_censoring",
"bottomless",
"covering_breasts",
"groin",
"cameltoe",
"panty_lift",
"french_kiss",
"underboob",
"between_breasts",
"lingerie",
"ebola",
"navel_cutout",
"partially_visible_vulva",
"ball_gag",
"bdsm",
"bondage",
"gag",
"gagged",
"spoilers",
"penis",
"disembodied_penis")
# Usually list of usernames who don't want their art reposted
USER_BLACKLIST = (
"khee",
"bakakhee",
"cactuskhee",
"junkhee")
# Use for source URLs
# (except Twitter, Pixiv, and deviantart.net, which are processed separately)
SOURCE_DOMAINS = (
"tumblr.com",
"deviantart.com",
"twitpic.com",
"seiga.nicovideo.jp")
# Use for verifying content type before downloading an image
ALLOWED_CONTENT_TYPES = ("image/jpeg", "image/png", "image/gif",
"binary/octet-stream")
# Post ID number of "TrainerTrish" art
TRISH_ID = 2575437
logger = logging.getLogger(__name__)
config_dict = {}
# For debugging purposes
db_request_raw = ""
class ImageQueue():
def __init__(self):
self._items = []
def enqueue(self, post_id: str, image_uri: str, source: str=None):
item = (str(post_id), image_uri, source)
self._items.insert(0, item)
def dequeue(self):
return self._items.pop()
def __len__(self):
return self._items.__len__()
def __str__(self):
return self._items.__str__()
def is_empty(self):
return self.__len__() < 1
def get_first_item(self):
return self._items[-1] if self._items else None
class TweetPicBot():
def __init__(self, keys: dict):
auth = tweepy.OAuthHandler(keys["consumer"], keys["consumer_secret"])
auth.set_access_token(keys["access"], keys["access_secret"])
self._api = tweepy.API(auth)
self._authenticate()
def _authenticate(self):
try:
user = self._api.verify_credentials().screen_name
logger.info(
"Twitter API keys verified successfully, authenticated as @%s",
user)
except tweepy.TweepError as t:
log_tweepy_err(t, "Can't verify Twitter API keys")
raise SystemExit
def send_tweet(self, media_path: str, tweet=""):
# Return True if tweet was sent successfully, otherwise False
try:
logger.debug("Uploading %s", media_path)
media_id = self._api.media_upload(media_path).media_id_string
logger.debug("Sending tweet")
self._api.update_status(status=tweet, media_ids=[media_id])
return True
except tweepy.TweepError as t:
log_tweepy_err(t, "Failed to send tweet")
return False
image_queue = ImageQueue()
recent_ids = deque([], 25)
def log_tweepy_err(e: tweepy.TweepError, prefix: str=""):
# Tweepy's TweepError exception class is weird,
# that's why I have this set up
if prefix != "":
errmsg = prefix + ": "
if e.api_code:
code = e.api_code
msg = e.args[0][0]["message"]
errmsg += "{0} (error code {1})".format(msg, code)
else:
errmsg += str(e)
logger.error(errmsg)
def parse_config():
global config_dict
with open(CONFIG_FILE) as f:
config_dict = json.load(f)
verify_keys()
def verify_keys():
# TODO: clean up this function
def do_assert(condition, err_msg: str):
assert condition, err_msg
def verify_blacklist_keys():
blacklist = config_dict["blacklist"]
for k in ("tags", "artists"):
do_assert(k in blacklist,
"Required key \"%s\" not found in blacklist config" % k)
do_assert(isinstance(blacklist[k], list), "Required blacklist key "
"\"%s\" must have value of type array (list)" % k)
def verify_twitter_keys():
twkeys = config_dict["twitter_keys"]
for k in ("consumer", "consumer_secret", "access", "access_secret"):
do_assert(k in twkeys,
"Required key \"%s\" not found in Twitter keys config" %k)
do_assert(isinstance(twkeys[k], str) and twkeys[k] != "",
"Required key \"%s\" must have value of type string "
"and can't be blank" % k)
for k in ("tags", "blacklist", "twitter_keys", "score", "favorites"):
do_assert(k in config_dict,
"Required key \"%s\" not found in config" % k)
if k in ("blacklist", "twitter_keys"):
do_assert(isinstance(config_dict[k], dict), "Required key "
"%s must have value of type object (dict)" % k)
elif k == "tags":
do_assert(isinstance(config_dict[k], list), "Required key "
"\"%s\" must have value of type array (list)" % k)
do_assert(len(config_dict[k]) < 3,
"Search queries are limited to 2 tags")
do_assert(len(config_dict[k]) > 0, "Tags cannot be blank")
elif k in ("score", "favorites"):
do_assert(isinstance(config_dict[k], int), "Required key "
"\"%s\" must have value of integer" % k)
else:
do_assert(isinstance(config_dict[k], str), "Required key "
"\"%s\" must have value of type string and can't be blank" % k)
verify_twitter_keys()
verify_blacklist_keys()
def get_danbooru_request(endpoint: str, params: dict):
# Convert params dict to URI string
if params:
params_list = []
params_str = "?"
for k in params:
if not (isinstance(k, str) and isinstance(params[k], str)):
continue
params_list.append("{0}={1}".format(k, params[k]))
params_str += "&".join(params_list)
else:
params_str = ""
global db_request_raw
r = requests.get(DB_API_URL.format(endpoint=endpoint, params=params_str))
db_request_raw = r.content.decode()
return r.json()
def populate_queue(limit: int=50, attempts=1):
# Step 1: Assemble URI parameters
tags_str = "+".join(config_dict["tags"])
logger.info("Building post queue for tag(s) \"%s\"", tags_str)
params = {
"tags":tags_str,
"limit":str(limit),
"random":"true"
}
# Step 2: Get request and check if it returned any posts
posts = get_danbooru_request("posts", params)
assert posts, "Provided tag(s) \"%s\" returned no posts" % tags_str
# Step 3: Iterate through and filter posts
# Unfiltered posts are added to image queue
postcount = 0
for post in posts:
# Evaluate post data for filtering
if not eval_post(post):
continue
# Enqueue post info
postid = post["id"]
# Use "large_file_url" just in case post's actual image is too big
url = post["large_file_url"]
source = get_source(post)
image_queue.enqueue(postid, DB_URL + url, source)
logger.debug("Added post ID %s to queue", postid)
postcount += 1
# Step 4: Log queue size when done, otherwise run function again
if postcount > 0:
logger.info("%s/%s images added to queue, current queue size is now %s",
postcount, len(posts), len(image_queue))
return
# Give up after 3 attempts
if attempts >= 3:
raise SystemExit
logger.info("No matching images added to queue, retrying in 5s")
attempts += 1
time.sleep(5)
populate_queue(limit, attempts)
def eval_post(post: dict):
# Returns False if given post is caught by any filters below
postid = post["id"]
# Check if post is banned (no image available)
if post["is_banned"]:
logger.debug("Post ID %s is banned and skipped", postid)
return False
# Check if rating is q(uestionable) or e(xplicit)
if post["rating"] != "s":
logger.debug("Post ID %s skipped due to rating (rated %s)", postid,
post["rating"])
return False
# Evaluate tags, score, favorite count, and filetype
return (eval_tags(post["tag_string"], postid) and
eval_score(post["score"], postid) and
eval_favorites(post["fav_count"], postid) and
eval_filetype(post["large_file_url"], postid)
)
def eval_tags(tag_string: str, postid):
# Return True if no tags are in blacklist, otherwise return False
tags = tag_string.split()
blacklist_config = config_dict["blacklist"]
for t in tags:
if t in TAG_BLACKLIST or t in blacklist_config["tags"]:
logger.debug("Post ID %s contains blacklisted tag: %s", postid, t)
return False
if t in USER_BLACKLIST or t in blacklist_config["artists"]:
logger.debug("Post ID %s is by blacklisted artist: %s", postid, t)
return False
return True
def eval_score(score: int, postid):
# Return True if post's score meets threshold, otherwise return False
if score >= config_dict["score"]:
return True
logger.debug("Post ID %s did not meet score threshold of %s", postid,
config_dict["score"])
return False
def eval_favorites(count: int, postid):
# Same as eval_score, but evaluate based on post's favorite count
if count >= config_dict["favorites"]:
return True
logger.debug("Post ID %s did not meet favorite count threshold of %s",
postid, config_dict["score"])
return False
def eval_filetype(filename: str, postid):
filetype = filename.split(".")[-1]
if filetype.lower() in ("jpg", "jpeg", "png", "gif"):
return True
logger.debug("Post ID %s has invalid filetype of .%s", postid, filetype)
return False
def get_source(post: dict):
def get_da_permalink(url: str):
info = url.split("_by_")[-1].split(".")[0]
info_dash_split = info.split("-")
if len(info_dash_split) < 2:
return url
artist, art_id = info_dash_split
return DA_URL.format(artist=artist, id=art_id)
if "bad_id" in post["tag_string_general"]:
logger.debug("Post ID %s contains tag \"bad_id\"", post["id"])
return
if post["pixiv_id"]:
return PIXIV_URL.format(id=post["pixiv_id"])
source = post["source"]
if source.startswith("https://twitter.com/"):
return "@" + source.split("/")[3]
if "deviantart.net/" in source:
return get_da_permalink(source)
for domain in SOURCE_DOMAINS:
if domain + "/" in source:
return source
return
def dump_db_request(error):
# Dumps response content to file when bot runs into an error
# while building queue
if type(error) in (SystemExit, KeyboardInterrupt, AssertionError):
return
with open(DB_DUMP_FILE, "w", encoding="utf_8") as f:
f.write(db_request_raw)
logger.info("Error occurred while populating queue, "
"response content dumped to %s", DB_DUMP_FILE)
def post_image(bot: TweetPicBot):
# Step 1: Repopulate queue if size is less than 5
if len(image_queue) < 5:
try:
populate_queue()
except Exception as e:
dump_db_request(e)
raise
# Step 2: Check if post ID was already posted in the last 25 tweets
postdata = image_queue.get_first_item()
postid = postdata[0]
while postid in recent_ids:
# Discard post in queue
image_queue.dequeue()
logger.debug("Post ID %s was uploaded in the last 25 tweets", postid)
postdata = image_queue.get_first_item()
postid = postdata[0]
# Step 3: Download image to file
url = postdata[1]
try:
file_path = download_file(postid, url)
except TypeError as type_error:
# If received content type is not in ALLOWED_CONTENT_TYPES list above,
# then move on to next post in queue
logger.error("%s, moving on to next post in queue", type_error)
image_queue.dequeue()
# Add 1s delay to make sure we're not flooding Danbooru with requests
time.sleep(1)
post_image(bot)
return
except:
logger.exception("Failed to download image for post ID %s, "
"will retry at next scheduled interval", postid)
return
# Step 4: Prepare tweet content
if postid == TRISH_ID:
source_str = "Trish-chan (aka @PlayerOneTimmy as a girl)\n"
else:
source_str = ""
source = postdata[2]
if source:
source_str += "Source: %s" % source
# Step 5: Send tweet and add post ID to recent IDs list
if bot.send_tweet(file_path, source_str):
logger.info("Tweet sent successfully! "
"Post ID of uploaded image was %s", postid)
# Discard post from queue when done
image_queue.dequeue()
logger.debug("%s post(s) remaining in queue", len(image_queue))
# Save recent IDs file
recent_ids.append(postid)
save_recent_ids()
else:
logger.info(
"Tweet for post ID %s will be sent at next scheduled interval",
postid)
def download_file(postid: str, url: str):
# based from http://stackoverflow.com/a/16696317
local_filename = "{0}.{1}".format(postid, url.split('.')[-1])
path = "{0}/{1}".format(IMG_DIR, local_filename)
if local_filename in os.listdir(IMG_DIR + "/"):
logger.debug("Image already exists: %s", path)
return path
logger.info("Downloading post ID %s to %s", postid, path)
time_start = time.time()
r = requests.get(url, stream=True)
# Check Content-Type header in case Danbooru returns HTML/XML file
# instead of an image for any reason
if "Content-Type" in r.headers:
# Strip out extra stuff in content headers for some images
# (example: "image/jpeg; charset=utf-8")
content_type = r.headers["Content-Type"].split("; ")[0]
if content_type not in ALLOWED_CONTENT_TYPES:
raise TypeError("Content type '%s' is invalid for media upload"
% content_type)
with open(path, "wb") as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
time_end = time.time()
elapsed = round(time_end - time_start, 3)
logger.info("Completed downloading %s in %ss", local_filename, elapsed)
return path
def logging_setup():
logging.basicConfig(format=LOG_FMT, level=logging.INFO)
logging.Formatter.converter = time.gmtime
filehandler = logging.FileHandler("events.log")
fmt = logging.Formatter("[%(asctime)s] " + LOG_FMT, "%Y-%m-%dT%H:%M:%SZ")
filehandler.setFormatter(fmt)
logging.getLogger().addHandler(filehandler)
logging.getLogger("oauthlib").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("requests_oauthlib").setLevel(logging.WARNING)
logging.getLogger("schedule").setLevel(logging.WARNING)
logging.getLogger("tweepy").setLevel(logging.WARNING)
def load_recent_ids():
if not RECENT_IDS_FILE in os.listdir():
logger.debug("%s not found in current directory, skipping",
RECENT_IDS_FILE)
return
with open(RECENT_IDS_FILE) as f:
id_count = 0
for line in f:
line = line.strip("\n")
if line.isdigit():
recent_ids.append(line)
id_count += 1
logger.debug("Found %s post ID(s) in file", id_count)
logger.info("Recent post IDs loaded")
def save_recent_ids():
with open(RECENT_IDS_FILE, mode="w") as f:
f.write("\n".join(recent_ids))
logger.debug("Saved last 25 post IDs to %s", RECENT_IDS_FILE)
def main_loop(interval: int=30):
# Check interval range
assert 60 > interval > 0, "Interval must be between 1 and 59 minutes"
# Make images directory if it doesn't exist
if not IMG_DIR in os.listdir():
logger.info("Creating images directory")
os.mkdir(IMG_DIR)
# Set up Twitter API client
bot = TweetPicBot(config_dict["twitter_keys"])
# Build initial queue, then set up schedule
try:
# Post immediately if current UTC minute is divisible by interval
current_min = time.gmtime().tm_min
if current_min % interval == 0:
post_image(bot)
else:
populate_queue()
except Exception as e:
dump_db_request(e)
raise
for m in range(0, 60, interval):
schedule.every().hour.at("00:%s" % m).do(post_image, bot)
while True:
schedule.run_pending()
time.sleep(1)
if __name__ == "__main__":
logging_setup()
logger.info("Timmy's Danbooru Twitter Bot v%s is starting up",
__version__)
try:
load_recent_ids()
parse_config()
main_loop()
except (KeyboardInterrupt, SystemExit):
# Use Ctrl-C to terminate the bot
logger.info("Now shutting down")
except AssertionError as e:
logger.error(e)
except:
logger.exception("Exception occurred, now shutting down")
schedule.clear()
|
""" Create BOLD models."""
import numpy as np
import simfMRI
def w_double_gamma(impulses):
"""
Convolves impulses, a 1 or 2d (column-oriented) array,
with the double gamma hemodynamic response function (hrf).
"""
hrf = simfMRI.hrf.double_gamma(20)
n_col = 0
try:
n_col = impulses.shape[1]
except IndexError:
n_col = 1
# impulses is likely 1d
except AttributeError:
n_col = 1
print('Impulses is not an array. Assume it is 1d.')
if n_col == 1:
return np.convolve(impulses,hrf)
else:
bold = np.zeros_like(impulses)
for ii in range(n_col):
bold[:,ii] = np.convolve(
impulses[:,ii],hrf)[0:impulses.shape[0]]
return bold
added a noisy option to convolve.w_double_gamma
""" Create BOLD models."""
import numpy as np
import simfMRI
def w_double_gamma(impulses,noisy=False):
"""
Convolves impulses, a 1 or 2d (column-oriented) array,
with the double gamma hemodynamic response function (hrf).
"""
if noisy:
hrf = simfMRI.hrf.double_gamma(20)
hrf_params = None
else:
hrf,hrf_params = simfMRI.noisy_double_gamma(20)
n_col = 0
try:
n_col = impulses.shape[1]
except IndexError:
n_col = 1
# impulses is likely 1d
except AttributeError:
n_col = 1
print('Impulses is not an array. Assume it is 1d.')
if n_col == 1:
return np.convolve(impulses,hrf), hrf_params
else:
bold = np.zeros_like(impulses)
for ii in range(n_col):
bold[:,ii] = np.convolve(
impulses[:,ii],hrf)[0:impulses.shape[0]]
return bold, hrf_params
|
from base64 import b64encode
from hashlib import sha256
import urllib
import hmac
import time
from exceptions import Exception
SERVICE_DOMAINS = {
'CA': ('ecs.amazonaws.ca', 'xml-ca.amznxslt.com'),
'DE': ('ecs.amazonaws.de', 'xml-de.amznxslt.com'),
'FR': ('ecs.amazonaws.fr', 'xml-fr.amznxslt.com'),
'JP': ('ecs.amazonaws.jp', 'xml-jp.amznxslt.com'),
'US': ('ecs.amazonaws.com', 'xml-us.amznxslt.com'),
'UK': ('ecs.amazonaws.co.uk', 'xml-uk.amznxslt.com'),
}
class AmazonError(Exception):
pass
class AmazonCall(object):
def __init__(self, AWSAccessKeyId = None, AWSSecretAccessKey = None, \
AssociateTag = None, Operation = None, Version = "2009-10-01", Region = "US"):
self.AWSAccessKeyId = AWSAccessKeyId
self.AWSSecretAccessKey = AWSSecretAccessKey
self.Operation = Operation
self.AssociateTag = AssociateTag
self.Version = Version
self.Region = Region
def signed_request(self):
pass
def __getattr__(self, k):
try:
return object.__getattr__(self, k)
except:
return AmazonCall(self.AWSAccessKeyId, self.AWSSecretAccessKey, \
self.AssociateTag, Operation = k, Version = self.Version, Region = self.Region)
def __call__(self, **kwargs):
kwargs['Timestamp'] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
kwargs['Operation'] = self.Operation
kwargs['Version'] = self.Version
kwargs['AWSAccessKeyId'] = self.AWSAccessKeyId
kwargs['Service'] = "AWSECommerceService"
if self.AssociateTag:
kwargs['AssociateTag'] = self.AssociateTag
if 'Style' in kwargs:
service_domain = SERVICE_DOMAINS[self.Region][1]
else:
service_domain = SERVICE_DOMAINS[self.Region][0]
keys = kwargs.keys()
keys.sort()
quoted_strings = "&".join("%s=%s" % (k, urllib.quote(str(kwargs[k]).encode('utf-8'), safe = '~')) for k in keys)
data = "GET\n" + service_domain + "\n/onca/xml\n" + quoted_strings
digest = hmac.new(self.AWSSecretAccessKey, data, sha256).digest()
signature = urllib.quote(b64encode(digest))
response = urllib.urlopen("http://" + service_domain + "/onca/xml?" + quoted_strings + "&Signature=%s" % signature)
return response.read()
class Amazon(AmazonCall):
def __init__(self, AWSAccessKeyId = None, AWSSecretAccessKey = None, \
AssociateTag = None, Operation = None, Version = "2009-10-01", Region = "US"):
AmazonCall.__init__(self, AWSAccessKeyId, AWSSecretAccessKey, \
AssociateTag, Operation, Version = Version, Region = Region)
__all__ = ["Amazon", "AmazonError"]
Updated to use urllib2 (fixes problems on my end
from base64 import b64encode
from hashlib import sha256
import urllib
import urllib2
import hmac
import time
from exceptions import Exception
SERVICE_DOMAINS = {
'CA': ('ecs.amazonaws.ca', 'xml-ca.amznxslt.com'),
'DE': ('ecs.amazonaws.de', 'xml-de.amznxslt.com'),
'FR': ('ecs.amazonaws.fr', 'xml-fr.amznxslt.com'),
'JP': ('ecs.amazonaws.jp', 'xml-jp.amznxslt.com'),
'US': ('ecs.amazonaws.com', 'xml-us.amznxslt.com'),
'UK': ('ecs.amazonaws.co.uk', 'xml-uk.amznxslt.com'),
}
class AmazonError(Exception):
pass
class AmazonCall(object):
def __init__(self, AWSAccessKeyId = None, AWSSecretAccessKey = None, \
AssociateTag = None, Operation = None, Version = "2009-10-01", Region = "US"):
self.AWSAccessKeyId = AWSAccessKeyId
self.AWSSecretAccessKey = AWSSecretAccessKey
self.Operation = Operation
self.AssociateTag = AssociateTag
self.Version = Version
self.Region = Region
def signed_request(self):
pass
def __getattr__(self, k):
try:
return object.__getattr__(self, k)
except:
return AmazonCall(self.AWSAccessKeyId, self.AWSSecretAccessKey, \
self.AssociateTag, Operation = k, Version = self.Version, Region = self.Region)
def __call__(self, **kwargs):
kwargs['Timestamp'] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
kwargs['Operation'] = self.Operation
kwargs['Version'] = self.Version
kwargs['AWSAccessKeyId'] = self.AWSAccessKeyId
kwargs['Service'] = "AWSECommerceService"
if self.AssociateTag:
kwargs['AssociateTag'] = self.AssociateTag
if 'Style' in kwargs:
service_domain = SERVICE_DOMAINS[self.Region][1]
else:
service_domain = SERVICE_DOMAINS[self.Region][0]
keys = kwargs.keys()
keys.sort()
quoted_strings = "&".join("%s=%s" % (k, urllib.quote(str(kwargs[k]).encode('utf-8'), safe = '~')) for k in keys)
data = "GET\n" + service_domain + "\n/onca/xml\n" + quoted_strings
digest = hmac.new(self.AWSSecretAccessKey, data, sha256).digest()
signature = urllib.quote(b64encode(digest))
api_string = "http://" + service_domain + "/onca/xml?" + quoted_strings + "&Signature=%s" % signature
api_request = urllib2.Request(api_string)
response = urllib2.urlopen(api_request)
response_text = response.read()
return response_text
class Amazon(AmazonCall):
def __init__(self, AWSAccessKeyId = None, AWSSecretAccessKey = None, \
AssociateTag = None, Operation = None, Version = "2009-10-01", Region = "US"):
AmazonCall.__init__(self, AWSAccessKeyId, AWSSecretAccessKey, \
AssociateTag, Operation, Version = Version, Region = Region)
__all__ = ["Amazon", "AmazonError"]
|
#!/usr/bin/env python
#
# This script will set up tab completion for the Azure CLI.
#
# Calling the script
# e.g. python <filename> <path_to_cli_install> <path_to_config_file>
# <path_to_config_file> is optional as a default will be used. (e.g. ~/.bashrc)
#
# - Optional Environment Variables Available
# AZURE_CLI_DISABLE_PROMPTS - Disable prompts during installation and use the defaults
#
from __future__ import print_function
import os
import sys
import shutil
try:
# Rename raw_input to input to support Python 2
input = raw_input
except NameError:
# Python 3 doesn't have raw_input
pass
DISABLE_PROMPTS = os.environ.get('AZURE_CLI_DISABLE_PROMPTS')
COMPLETION_FILENAME = 'az.completion'
REGISTER_PYTHON_ARGCOMPLETE = """
_python_argcomplete() {
local IFS='\v'
COMPREPLY=( $(IFS="$IFS" COMP_LINE="$COMP_LINE" COMP_POINT="$COMP_POINT" _ARGCOMPLETE_COMP_WORDBREAKS="$COMP_WORDBREAKS" _ARGCOMPLETE=1 "$1" 8>&1 9>&2 1>/dev/null 2>/dev/null) )
if [[ $? != 0 ]]; then
unset COMPREPLY
fi
}
complete -o nospace -o default -F _python_argcomplete "az"
"""
def prompt_input(message):
return None if DISABLE_PROMPTS else input(message)
def create_tab_completion_file(filename):
with open(filename, 'w') as completion_file:
completion_file.write(REGISTER_PYTHON_ARGCOMPLETE)
def _get_default_rc_file():
user_bash_rc = os.path.expanduser(os.path.join('~', '.bashrc'))
user_bash_profile = os.path.expanduser(os.path.join('~', '.bash_profile'))
if not os.path.isfile(user_bash_rc) and os.path.isfile(user_bash_profile):
return user_bash_profile
return user_bash_rc
def backup_rc(rc_file):
try:
shutil.copyfile(rc_file, rc_file+'.backup')
print("Backed up '{}' to '{}'".format(rc_file, rc_file+'.backup'))
except (OSError, IOError):
pass
def find_line_in_file(file_path, search_pattern):
try:
with open(file_path, 'r') as search_file:
for line in search_file:
if search_pattern in line:
return True
except (OSError, IOError):
pass
return False
def modify_rc(rc_file_path, line_to_add):
if not find_line_in_file(rc_file_path, line_to_add):
with open(rc_file_path, 'a') as rc_file:
rc_file.write('\n'+line_to_add+'\n')
def error_exit(message):
print('ERROR: {}'.format(message), file=sys.stderr)
sys.exit(1)
def main():
if len(sys.argv) < 2:
error_exit('Specify install location as argument.')
completion_file_path = os.path.join(sys.argv[1], COMPLETION_FILENAME)
create_tab_completion_file(completion_file_path)
try:
# use value from argv if available else fall back to prompt or default
default_rc_file = _get_default_rc_file()
rc_file = sys.argv[2] if len(sys.argv) >= 3 else prompt_input('Path to rc file to update (default {}): '.format(default_rc_file)) or default_rc_file
except EOFError:
error_exit('Unable to prompt for input. Pass the rc file as an argument to this script.')
rc_file_path = os.path.realpath(os.path.expanduser(rc_file))
backup_rc(rc_file_path)
line_to_add = "source '{}'".format(completion_file_path)
modify_rc(rc_file_path, line_to_add)
print('Tab completion enabled.')
print('Run `exec -l $SHELL` to restart your shell.')
if __name__ == '__main__':
main()
Modify comments
#!/usr/bin/env python
#
# This script will set up tab completion for the Azure CLI.
#
# Calling the script
# e.g. python <filename> <path_to_cli_install> <path_to_rc_file>
# <path_to_rc_file> is optional as a default will be used. (e.g. ~/.bashrc)
#
# - Optional Environment Variables Available
# AZURE_CLI_DISABLE_PROMPTS - Disable prompts during installation and use the defaults
#
from __future__ import print_function
import os
import sys
import shutil
try:
# Rename raw_input to input to support Python 2
input = raw_input
except NameError:
# Python 3 doesn't have raw_input
pass
DISABLE_PROMPTS = os.environ.get('AZURE_CLI_DISABLE_PROMPTS')
COMPLETION_FILENAME = 'az.completion'
REGISTER_PYTHON_ARGCOMPLETE = """
_python_argcomplete() {
local IFS='\v'
COMPREPLY=( $(IFS="$IFS" COMP_LINE="$COMP_LINE" COMP_POINT="$COMP_POINT" _ARGCOMPLETE_COMP_WORDBREAKS="$COMP_WORDBREAKS" _ARGCOMPLETE=1 "$1" 8>&1 9>&2 1>/dev/null 2>/dev/null) )
if [[ $? != 0 ]]; then
unset COMPREPLY
fi
}
complete -o nospace -o default -F _python_argcomplete "az"
"""
def prompt_input(message):
return None if DISABLE_PROMPTS else input(message)
def create_tab_completion_file(filename):
with open(filename, 'w') as completion_file:
completion_file.write(REGISTER_PYTHON_ARGCOMPLETE)
def _get_default_rc_file():
user_bash_rc = os.path.expanduser(os.path.join('~', '.bashrc'))
user_bash_profile = os.path.expanduser(os.path.join('~', '.bash_profile'))
if not os.path.isfile(user_bash_rc) and os.path.isfile(user_bash_profile):
return user_bash_profile
return user_bash_rc
def backup_rc(rc_file):
try:
shutil.copyfile(rc_file, rc_file+'.backup')
print("Backed up '{}' to '{}'".format(rc_file, rc_file+'.backup'))
except (OSError, IOError):
pass
def find_line_in_file(file_path, search_pattern):
try:
with open(file_path, 'r') as search_file:
for line in search_file:
if search_pattern in line:
return True
except (OSError, IOError):
pass
return False
def modify_rc(rc_file_path, line_to_add):
if not find_line_in_file(rc_file_path, line_to_add):
with open(rc_file_path, 'a') as rc_file:
rc_file.write('\n'+line_to_add+'\n')
def error_exit(message):
print('ERROR: {}'.format(message), file=sys.stderr)
sys.exit(1)
def main():
if len(sys.argv) < 2:
error_exit('Specify install location as argument.')
completion_file_path = os.path.join(sys.argv[1], COMPLETION_FILENAME)
create_tab_completion_file(completion_file_path)
try:
# use value from argv if available else fall back to prompt or default
default_rc_file = _get_default_rc_file()
rc_file = sys.argv[2] if len(sys.argv) >= 3 else prompt_input('Path to rc file to update (default {}): '.format(default_rc_file)) or default_rc_file
except EOFError:
error_exit('Unable to prompt for input. Pass the rc file as an argument to this script.')
rc_file_path = os.path.realpath(os.path.expanduser(rc_file))
backup_rc(rc_file_path)
line_to_add = "source '{}'".format(completion_file_path)
modify_rc(rc_file_path, line_to_add)
print('Tab completion enabled.')
print('Run `exec -l $SHELL` to restart your shell.')
if __name__ == '__main__':
main()
|
# graph.py - Some structures to build graphs.
# Author: Franck Michea < franck.michea@gmail.com >
# License: New BSD License (See LICENSE)
import os
import pickle
import sys
import math as m
from collections import Counter
# Change this if you want to use your processor.
# XXX: Nothing smart for now. Useful?
import bracoujl.processor.gb_z80 as proc
_ADDR_SIZE = m.ceil(m.log2(proc.CPU_CONF.get('addr_width', 32)))
_ADDR_FRMT = '0{}X'.format(_ADDR_SIZE)
_DISASSEMBLER = proc.CPU_CONF.get('disassembler', type(None))()
# These two will not be displayed.
_BEGIN_ADDR = -1
_END_ADDR = -2
def _readlines(f):
'''Avoids loading the whole file (that can become pretty heavy) in memory.'''
line = f.readline()
while line:
yield line[:-1]
line = f.readline()
def _enum(**enums):
return type('Enum', (), enums)
LinkType = _enum(NORMAL='black', TAKEN='green', NOT_TAKEN='red')
BlockType = _enum(INT='int', LOC='loc', SUB='sub')
GraphState = _enum(NORMAL_GRAPH=0, INTERRUPT=1)
class Link:
'''
This class represents a link between two blocks.
:param from_: The block from which the link begins.
:param to: The block to which the link goes.
:param link_type: The type of the link.
'''
def __init__(self, from_, to):
self.from_, self.to, self.link_type = from_, to, LinkType.NORMAL
def do_link(self):
self.from_.tos[self] += 1
self.to.froms[self] += 1
def do_unlink(self):
self.from_.tos[self] -= 1
if not self.from_.tos[self]:
del self.from_.tos[self]
self.to.froms[self] -= 1
if not self.to.froms[self]:
del self.to.froms[self]
def __del__(self):
del self.from_.tos[self]
del self.to.froms[self]
def __repr__(self):
return '[{:x}] {:{addr_frmt}} -> {:{addr_frmt}} [{:x}]'.format(
self.from_,
self.from_.addr,
self.to.addr,
self.to,
addr_frmt=ADDR_FRMT,
)
def __eq__(self, other):
return repr(self) == repr(other)
class Instruction:
'''
An instruction consists of an address, an opcode.
:param inst: The instruction parsed by the cpu configuration.
'''
def __init__(self, inst):
self._inst = inst
def __str__(self):
res = ' {addr:{addr_frmt}}: {opcode:02X}'.format(
addr = self['addr'],
opcode = self['opcode'],
addr_frmt=ADDR_FRMT,
)
if _DISASSEMBLER is not None:
res += ' - {disassembly}'.format(
disassembly=_DISASSEMBLER.disassemble(opcode)
)
return res
def __getitem__(self, item):
if item not in ['pc', 'opcode', 'mem']:
return super().__getitem__(item)
return self._inst[item]
def __eq__(self, other):
f = lambda obj: (obj['pc'], obj['opcode'], obj['mem'])
return f(self) == f(other)
class Block:
'''
A block represents a couple of instructions executed in a row without any
branchement in it. It possibly ends with a CALL, a JUMP or a RET.
Links are stored in two attributes, `froms` and `tos`. There is a
additional attribute that holds the information necessary to know if a
branchement trigerred.
:param addr: Start addr of the block.
:param insts: The instructions contained in the block.
:param block_type: Type of the block.
'''
def __init__(self, inst, inst_class=Instruction):
self.insts, self.block_type = [inst_class(inst)], BlockType.LOC
self.froms, self.tos, self.tlf = Counter(), Counter(), False
def __str__(self):
res = '{block_type}_{pc:{addr_frmt}}:\n'.format(
pc = self['pc'], block_type=self.block_type, addr_frmt=ADDR_FRMT,
)
res += '\n'.join(str(it) for it in self.insts)
return res
def __getitem__(self, item):
'''
Until there is multiple instructions in a block, the __getitem__
functions coresponds to the uniq instruction in it. The property always
works for 'pc' property.
'''
if len(self.insts) == 1 or item == 'pc':
return self.insts[0][item]
return super().__getitem__(item)
def accepts_merge_top(self):
'''
This function determines if the current block can be merged with the
preceding block. This is *True* if if we can't reach this block from
multiple other blocks.
'''
return len(self.froms) == 1
def accepts_merge_bottom(self):
'''
This function determines if the current block accepts merge at its
bottom. It is possible if we are not on the end of a block, meaning
that we don't go to multiple blocks, or that we are not part of the
special opcodes (call, jump, ret).
'''
if len(self._tos) != 1:
return False
for spec_opc in ['ret', 'call', 'jump']:
if self.insts[-1]['opcode'] in proc.CPU_CONF[spec_opc + '_opcodes']:
return False
return True
def merge(self, other):
'''
This function will take a block bellow the current block (*self*) and
merge them. They must be directly following themselves to avoid
breaking the graph.
'''
self.insts.extend(other.insts)
self.tos = Counter()
for to in list(other.tos):
Link(self, other).do_link()
def __eq__(self, other):
# This will also check addresses and the like. Don't forget to change
# this if it is not the case anymore.
return self.insts == other.insts
class SpecialBlock(Block):
def __init__(self, inst, label, mergeable=True):
class SpecialInstruction(Instruction):
def __str__(self, label):
return ' {padding} {label}'.format(
padding = ''.ljust(_ADDR_SIZE),
label = label,
)
super().__init__(inst, inst_class=SpecialInstruction)
self._mergeable = mergeable
def accepts_merge_top(self):
return self._mergeable and super().accepts_merge_top()
def accepts_merge_bottom(self):
return self._mergeable and super().accepts_merge_bottom()
class Graph:
def generate_graph(filename):
def find_link(last_block, block):
link = Link(last_block, block)
for ll in last_block.tos:
if ll == link:
link = ll
break
return link
blocks, last_block, backtrace = dict(), None, list()
########################################################################
##### STEP 1: Fetch the graph from the log file. #####
########################################################################
# Create a special block for the begining of the logs.
last_block = SpecialBlock({'pc': _BEGIN_ADDR}, 'BEGIN')
blocks[_BEGIN_ADDR] = [last_block]
with open(filename) as fd:
for line in _readlines(fd):
inst = proc.CPU_CONF['parse_line'](line)
# If line is not recognized, just skip it.
if inst is None:
continue
# Create the list of blocks for the current PC in the blocks
# dictionary.
if inst['pc'] not in blocks:
blocks[inst['pc']] = []
# Check if we already know the current instruction for the
# current program counter. If we do, we keep the current block
# and add a link.
block_found = False
for block in blocks[inst['pc']]:
if block['opcode'] == inst['opcode']:
block_found = True
break
if not block_found:
block = Block(inst)
blocks.append(block)
# Now we need to link this block and the last block.
link = find_link(last_block, block)
# Now we need to treat special cases.
offset = last_block['pc'] - block['pc']
if block['pc'] in proc.CPU_CONF['interrupts']:
# If the block is the beginning of an interrupt, we don't
# need it, but we do need to keep the triggering block in
# the backtrace.
backtrace.append(block)
link = None
elif (last_block['ret'] in proc.CPU_CONF['ret_opcodes'] and
offset != proc.CPU_CONF['ret_opcodes_size']):
# We a ret, and triggered it. A ret trigger happens when
# we don't fall-through. In that case, we traceback to the
# place where we were called.
try:
last_block = backtrace.pop()
except IndexError:
msg = 'Could not pop call place from the which we come'
msg += ' from.'
sys.exit(msg)
link = find_link(last_block, block)
else:
for spec_op in ['call', 'jmp']:
spec_op += '_opcodes'
if last_block['opcode'] in proc.CPU_CONF[spec_op]:
# Links are colorized depending on the detection of
# if they are taken or not. First we need to know
# wether we know the triggering link or not.
if offset == proc.CPU_CONF[spec_op + '_size']:
link.link_type = LinkType.NOT_TAKEN
elif not last_block.tlf:
# Offset is not the size of the opcode *and*
# this is the first time it happens, we are on
# the triggering link.
link.link_type = LinkType.TAKEN
last_block.tlf = True
# We finally really link the Link if it still exists and was not
# known, and add the block to the list of blocks.
if link is not None:
link.do_link()
# To be used in the next step.
last_block = block
# Finally we add a end block, to know were the logs end.
end_block = SpecialBlock({'pc': _END_ADDR}, 'END')
Link(last_block, end_block).do_link()
blocks[_END_ADDR] = [end_block]
########################################################################
##### STEP 2: Split sub functions when possible, and split #####
##### functions that are in different memories. Merge of #####
##### the blocks is done at the end of this step too. #####
########################################################################
for pc, blocks in sorted(blocks.items()):
pass
More explicit comment.
# graph.py - Some structures to build graphs.
# Author: Franck Michea < franck.michea@gmail.com >
# License: New BSD License (See LICENSE)
import os
import pickle
import sys
import math as m
from collections import Counter
# Change this if you want to use your processor.
# XXX: Nothing smart for now. Useful?
import bracoujl.processor.gb_z80 as proc
_ADDR_SIZE = m.ceil(m.log2(proc.CPU_CONF.get('addr_width', 32)))
_ADDR_FRMT = '0{}X'.format(_ADDR_SIZE)
_DISASSEMBLER = proc.CPU_CONF.get('disassembler', type(None))()
# These two will not be displayed.
_BEGIN_ADDR = -1
_END_ADDR = -2
def _readlines(f):
'''Avoids loading the whole file (that can become pretty heavy) in memory.'''
line = f.readline()
while line:
yield line[:-1]
line = f.readline()
def _enum(**enums):
return type('Enum', (), enums)
LinkType = _enum(NORMAL='black', TAKEN='green', NOT_TAKEN='red')
BlockType = _enum(INT='int', LOC='loc', SUB='sub')
GraphState = _enum(NORMAL_GRAPH=0, INTERRUPT=1)
class Link:
'''
This class represents a link between two blocks.
:param from_: The block from which the link begins.
:param to: The block to which the link goes.
:param link_type: The type of the link.
'''
def __init__(self, from_, to):
self.from_, self.to, self.link_type = from_, to, LinkType.NORMAL
def do_link(self):
self.from_.tos[self] += 1
self.to.froms[self] += 1
def do_unlink(self):
self.from_.tos[self] -= 1
if not self.from_.tos[self]:
del self.from_.tos[self]
self.to.froms[self] -= 1
if not self.to.froms[self]:
del self.to.froms[self]
def __del__(self):
del self.from_.tos[self]
del self.to.froms[self]
def __repr__(self):
return '[{:x}] {:{addr_frmt}} -> {:{addr_frmt}} [{:x}]'.format(
self.from_,
self.from_.addr,
self.to.addr,
self.to,
addr_frmt=ADDR_FRMT,
)
def __eq__(self, other):
return repr(self) == repr(other)
class Instruction:
'''
An instruction consists of an address, an opcode.
:param inst: The instruction parsed by the cpu configuration.
'''
def __init__(self, inst):
self._inst = inst
def __str__(self):
res = ' {addr:{addr_frmt}}: {opcode:02X}'.format(
addr = self['addr'],
opcode = self['opcode'],
addr_frmt=ADDR_FRMT,
)
if _DISASSEMBLER is not None:
res += ' - {disassembly}'.format(
disassembly=_DISASSEMBLER.disassemble(opcode)
)
return res
def __getitem__(self, item):
if item not in ['pc', 'opcode', 'mem']:
return super().__getitem__(item)
return self._inst[item]
def __eq__(self, other):
f = lambda obj: (obj['pc'], obj['opcode'], obj['mem'])
return f(self) == f(other)
class Block:
'''
A block represents a couple of instructions executed in a row without any
branchement in it. It possibly ends with a CALL, a JUMP or a RET.
Links are stored in two attributes, `froms` and `tos`. There is a
additional attribute that holds the information necessary to know if a
branchement trigerred.
:param addr: Start addr of the block.
:param insts: The instructions contained in the block.
:param block_type: Type of the block.
'''
def __init__(self, inst, inst_class=Instruction):
self.insts, self.block_type = [inst_class(inst)], BlockType.LOC
self.froms, self.tos, self.tlf = Counter(), Counter(), False
def __str__(self):
res = '{block_type}_{pc:{addr_frmt}}:\n'.format(
pc = self['pc'], block_type=self.block_type, addr_frmt=ADDR_FRMT,
)
res += '\n'.join(str(it) for it in self.insts)
return res
def __getitem__(self, item):
'''
Until there is multiple instructions in a block, the __getitem__
functions coresponds to the uniq instruction in it. The property always
works for 'pc' property.
'''
if len(self.insts) == 1 or item == 'pc':
return self.insts[0][item]
return super().__getitem__(item)
def accepts_merge_top(self):
'''
This function determines if the current block can be merged with the
preceding block. This is *True* if if we can't reach this block from
multiple other blocks.
'''
return len(self.froms) == 1
def accepts_merge_bottom(self):
'''
This function determines if the current block accepts merge at its
bottom. It is possible if we are not on the end of a block, meaning
that we don't go to multiple blocks, or that we are not part of the
special opcodes (call, jump, ret).
'''
if len(self._tos) != 1:
return False
for spec_opc in ['ret', 'call', 'jump']:
if self.insts[-1]['opcode'] in proc.CPU_CONF[spec_opc + '_opcodes']:
return False
return True
def merge(self, other):
'''
This function will take a block bellow the current block (*self*) and
merge them. They must be directly following themselves to avoid
breaking the graph.
'''
self.insts.extend(other.insts)
self.tos = Counter()
for to in list(other.tos):
Link(self, other).do_link()
def __eq__(self, other):
# This will also check addresses and the like. Don't forget to change
# this if it is not the case anymore.
return self.insts == other.insts
class SpecialBlock(Block):
def __init__(self, inst, label, mergeable=True):
class SpecialInstruction(Instruction):
def __str__(self, label):
return ' {padding} {label}'.format(
padding = ''.ljust(_ADDR_SIZE),
label = label,
)
super().__init__(inst, inst_class=SpecialInstruction)
self._mergeable = mergeable
def accepts_merge_top(self):
return self._mergeable and super().accepts_merge_top()
def accepts_merge_bottom(self):
return self._mergeable and super().accepts_merge_bottom()
class Graph:
def generate_graph(filename):
def find_link(last_block, block):
link = Link(last_block, block)
for ll in last_block.tos:
if ll == link:
link = ll
break
return link
blocks, last_block, backtrace = dict(), None, list()
########################################################################
##### STEP 1: Fetch the graph from the log file. #####
########################################################################
# Create a special block for the begining of the logs.
last_block = SpecialBlock({'pc': _BEGIN_ADDR}, 'BEGIN')
blocks[_BEGIN_ADDR] = [last_block]
with open(filename) as fd:
for line in _readlines(fd):
inst = proc.CPU_CONF['parse_line'](line)
# If line is not recognized, just skip it.
if inst is None:
continue
# Create the list of blocks for the current PC in the blocks
# dictionary.
if inst['pc'] not in blocks:
blocks[inst['pc']] = []
# Check if we already know the current instruction for the
# current program counter. If we do, we keep the current block
# and add a link.
block_found = False
for block in blocks[inst['pc']]:
if block['opcode'] == inst['opcode']:
block_found = True
break
if not block_found:
block = Block(inst)
blocks.append(block)
# Now we need to link this block and the last block.
link = find_link(last_block, block)
# Now we need to treat special cases.
offset = last_block['pc'] - block['pc']
if block['pc'] in proc.CPU_CONF['interrupts']:
# If the block is the beginning of an interrupt, we don't
# need the link, but we do need to keep the triggering
# block in the backtrace.
backtrace.append(block)
link = None
elif (last_block['ret'] in proc.CPU_CONF['ret_opcodes'] and
offset != proc.CPU_CONF['ret_opcodes_size']):
# We a ret, and triggered it. A ret trigger happens when
# we don't fall-through. In that case, we traceback to the
# place where we were called.
try:
last_block = backtrace.pop()
except IndexError:
msg = 'Could not pop call place from the which we come'
msg += ' from.'
sys.exit(msg)
link = find_link(last_block, block)
else:
for spec_op in ['call', 'jmp']:
spec_op += '_opcodes'
if last_block['opcode'] in proc.CPU_CONF[spec_op]:
# Links are colorized depending on the detection of
# if they are taken or not. First we need to know
# wether we know the triggering link or not.
if offset == proc.CPU_CONF[spec_op + '_size']:
link.link_type = LinkType.NOT_TAKEN
elif not last_block.tlf:
# Offset is not the size of the opcode *and*
# this is the first time it happens, we are on
# the triggering link.
link.link_type = LinkType.TAKEN
last_block.tlf = True
# We finally really link the Link if it still exists and was not
# known, and add the block to the list of blocks.
if link is not None:
link.do_link()
# To be used in the next step.
last_block = block
# Finally we add a end block, to know were the logs end.
end_block = SpecialBlock({'pc': _END_ADDR}, 'END')
Link(last_block, end_block).do_link()
blocks[_END_ADDR] = [end_block]
########################################################################
##### STEP 2: Split sub functions when possible, and split #####
##### functions that are in different memories. Merge of #####
##### the blocks is done at the end of this step too. #####
########################################################################
for pc, blocks in sorted(blocks.items()):
pass
|
import sys
import collections
import itertools
import ana
import logging
l = logging.getLogger("cooldict")
class FinalizedError(Exception):
pass
class BranchingDictError(Exception):
pass
default_max_depth = sys.getrecursionlimit() * 0.2
default_min_depth = 100
#####################
### Utility stuff ###
#####################
def get_storage(d):
'''Returns the local storage of the dictionary.'''
if isinstance(d, FinalizableDict):
return { }
elif isinstance(d, BackedDict):
return d.storage
elif isinstance(d, CachedDict):
return d.cache
elif isinstance(d, BranchingDict):
return { }
else:
return d
def get_backers(d):
'''Returns the backers of the dictionary.'''
if isinstance(d, FinalizableDict):
return [ d.storage ]
elif isinstance(d, BackedDict):
return d.backers
elif isinstance(d, CachedDict):
return [ d.backer ]
elif isinstance(d, BranchingDict):
return [ d.cowdict ]
else:
return [ ]
def get_deleted(d):
if isinstance(d, BackedDict):
return d.deleted
else:
return set()
def get_id(d):
'''Returns the ID of the dictionary.'''
if hasattr(d, 'dict_id'):
return d.dict_id
else:
return id(d)
def ancestry_line(d):
'''
Returns the ancestry of this dict, back to the first dict that we don't
recognize or that has more than one backer.
'''
b = get_backers(d)
while len(b) == 1:
yield b[0]
b = get_backers(b[0])
############################
### The dicts themselves ###
############################
class CachedDict(ana.Storable, collections.MutableMapping):
''' Implements a write-through cache around another dict. '''
def __init__(self, backer):
self.backer = backer
self.cache = { }
self.make_uuid()
def default_cacher(self, k):
v = self.backer[k]
self.cache[k] = v
return v
def __getitem__(self, k):
try:
return self.cache[k]
except KeyError:
return self.default_cacher(k)
def __setitem__(self, k, v):
self.cache[k] = v
self.backer[k] = v
def __delitem__(self, k):
self.cache.pop(k, None)
self.backer.pop(k, None)
def __iter__(self):
return self.backer.__iter__()
def __len__(self):
return len(list(self.__iter__()))
def _ana_getstate(self):
return self.backer
def _ana_setstate(self, state):
self.backer = state
self.cache = { }
class BackedDict(ana.Storable, collections.MutableMapping):
''' Implements a mapping that's backed by other mappings. '''
def __init__(self, *backers, **kwargs):
self.backers = backers
self.storage = kwargs.get('storage', { })
self.deleted = kwargs.get('deleted', set())
self.make_uuid()
def __getitem__(self, a):
# make sure we haven't deleted it
if a in self.deleted:
raise KeyError(a)
# return it if we have it in storage
if a in self.storage:
return self.storage[a]
# try the backers
for p in self.backers:
try:
return p[a]
except KeyError:
pass
# panic!
raise KeyError(a)
def __delitem__(self, a):
# make sure we can do it
if a not in self:
raise KeyError(a)
# and do it
self.storage.pop(a, None)
self.deleted.add(a)
def __setitem__(self, k, v):
self.deleted.discard(k)
self.storage[k] = v
def __iter__(self):
chain = itertools.chain(self.storage, *[ p for p in self.backers ])
seen = set()
for k in chain:
if k not in self.deleted and k not in seen:
seen.add(k)
yield k
def __len__(self):
return len(list(self.__iter__()))
def flatten(self):
l.info("Flattening backers of %s!", self)
if len(self.backers) > 1:
l.debug("Slow path")
s_keys = set(self.storage.keys())
for b in reversed(self.backers):
b_keys = set(b.keys())
for i in b_keys - s_keys:
self.storage[i] = b[i]
self.backers = [ ]
else:
a_line = list(ancestry_line(self))
ancestors = [ get_storage(a) for a in a_line ]
ancestor_keys = [ set(a.keys()) for a in ancestors ]
remaining = set()
new_backers = [ ]
try:
#print "Checking for ignores"
ignored_idx = [ getattr(a, 'cooldict_ignore', False) for a in a_line ].index(True)
#print "... found at",ignored_idx
new_backers = [ a_line[ignored_idx] ]
ancestors = ancestors[:ignored_idx]
ancestor_keys = ancestor_keys[:ignored_idx]
except ValueError:
#print "... not found"
pass
#print "ancestors:",ancestors
#print "new ancestors:",ancestors
for a in reversed(ancestors):
keys = set(get_storage(a).iterkeys())
ancestor_keys.append(keys)
remaining |= keys
if isinstance(a, BackedDict):
remaining -= a.deleted
remaining -= set(self.storage.iterkeys())
remaining -= self.deleted
for a,keys in zip(ancestors, ancestor_keys):
toadd = keys & remaining
if len(toadd) == 0:
continue
l.debug("Adding %d keys from %s", len(toadd), a)
for k in toadd:
self.storage[k] = a[k]
remaining -= keys
if len(remaining) != 0:
raise Exception("%d items remaining after flatten!", len(remaining))
self.backers = new_backers
def _ana_getstate(self):
return self.storage, self.deleted, self.backers
def _ana_setstate(self, state):
self.storage, self.deleted, self.backers = state
class FinalizableDict(ana.Storable, collections.MutableMapping):
''' Implements a finalizable dict. This is meant to support BranchingDict, and offers no guarantee about the actual immutability of the underlying data. It's quite easy to bypass. You've been warned. '''
def __init__(self, storage = None):
self.finalized = False
self.storage = { } if storage is None else storage
self.make_uuid()
def __getitem__(self, a):
return self.storage[a]
def __delitem__(self, a):
if self.finalized:
raise FinalizedError("dict is finalized")
del self.storage[a]
def __setitem__(self, k, v):
if self.finalized:
raise FinalizedError("dict is finalized")
self.storage[k] = v
def __iter__(self):
return self.storage.__iter__()
def __len__(self):
return self.storage.__len__()
def finalize(self):
self.finalized = True
def _ana_getstate(self):
self.finalize()
return self.storage
def _ana_setstate(self, state):
self.storage = state
self.finalized = True
class BranchingDict(collections.MutableMapping):
''' This implements a branching dictionary. Basically, a BranchingDict can be branch()ed and the two copies will thereafter share a common backer, but will not write back to that backer. Can probably be reimplemented without FinalizableDict. '''
def __init__(self, d = None, max_depth = None, min_depth = None):
max_depth = default_max_depth if max_depth is None else max_depth
min_depth = default_min_depth if min_depth is None else min_depth
d = { } if d is None else d
if not isinstance(d, FinalizableDict):
d = FinalizableDict(d)
self.cowdict = d
ancestors = list(self.ancestry_line())
if len(ancestors) > max_depth:
l.debug("BranchingDict got too deep (%d)", len(ancestors))
new_dictriarch = None
for k in ancestors[min_depth:]:
if isinstance(k, BackedDict):
new_dictriarch = k
break
if new_dictriarch is not None:
l.debug("Found ancestor %s", new_dictriarch)
new_dictriarch.flatten()
self.max_depth = max_depth
self.min_depth = min_depth
# Returns the ancestry of this dict, back to the first dict that we don't recognize
# or that has more than one backer.
def ancestry_line(self):
return ancestry_line(self)
# Returns the common ancestor between self and other.
def common_ancestor(self, other):
our_line = set([ get_id(a) for a in self.ancestry_line() ])
for d in other.ancestry_line():
if get_id(d) in our_line:
return d
return None
# Returns the entries created and the entries deleted since the specified ancestor.
def changes_since(self, ancestor):
created = set()
deleted = set()
for a in self.ancestry_line():
if a is ancestor:
break
elif isinstance(a, FinalizableDict):
continue
elif isinstance(a, BackedDict):
created.update(set(a.storage.keys()) - deleted)
deleted.update(a.deleted - created)
elif isinstance(a, dict):
created.update(a.keys())
return created, deleted
def __getitem__(self, a):
return self.cowdict[a]
def __setitem__(self, k, v):
if self.cowdict.finalized:
l.debug("Got a finalized dict. Making a child.")
self.cowdict = FinalizableDict(BackedDict(self.cowdict.storage))
self.cowdict[k] = v
def __delitem__(self, k):
if self.cowdict.finalized:
l.debug("Got a finalized dict. Making a child.")
self.cowdict = FinalizableDict(BackedDict(self.cowdict.storage))
del self.cowdict[k]
def __iter__(self):
return self.cowdict.__iter__()
def __len__(self):
return self.cowdict.__len__()
def branch(self):
self.cowdict.finalize()
return BranchingDict(self.cowdict, max_depth=self.max_depth, min_depth=self.min_depth)
def test():
import pickle
try:
import standard_logging # pylint: disable=W0612,
except ImportError:
pass
l.setLevel(logging.DEBUG)
l.info("Testing basic BackedDict functionality.")
a = "aa"
b = "bb"
c = "cc"
d = "dd"
one = 11
two = 12
three = 13
b1 = BackedDict()
b2 = BackedDict()
b1[a] = 'a'
b1[one] = 1
b2[b] = 'b'
assert len(b1) == 2
assert len(b2) == 1
assert b1[a] == 'a'
assert b1[one] == 1
assert b2[b] == 'b'
b3 = BackedDict(b1, b2)
b3[c] = c
assert len(b3) == 4
assert b3[a] == 'a'
assert b3[one] == 1
assert b3[b] == 'b'
assert b3[c] == c
assert len(b1) == 2
assert len(b2) == 1
assert b1[a] == 'a'
assert b1[one] == 1
assert b2[b] == 'b'
del b3[a]
assert len(b3) == 3
l.info("Testing BranchingDict functionality.")
d1 = BranchingDict(b3)
d2 = d1.branch()
d3 = d2.branch()
d1[d] = d
assert len(b3) == 3
assert len(d1) == 4
assert len(d2) == 3
assert len(d3) == 3
assert d1[d] == d
assert d1[b] == 'b'
assert d1[one] == 1
b3.flatten()
assert len(b3.backers) == 0
assert len(b3) == 3
d3[b] = "omg"
assert d3[b] == "omg"
assert d2[b] == 'b'
d4 = d3.branch()
del d4[b]
del d4[c]
d5 = d4.branch()
d5['hmm'] = 5
d6 = d5.branch()
l.info("Testing BranchingDict ancestry and flattening.")
assert len(list(d5.ancestry_line())) == 5
dnew = d5.branch()
dnew['ohsnap'] = 1
for _ in range(50):
dnew = dnew.branch()
dnew['ohsnap'] += 1
assert len(list(dnew.ancestry_line())) == 56
for _ in range(2000):
print "Branching dict number", _
dnew = dnew.branch()
dnew['ohsnap'] += 1
assert len(list(dnew.ancestry_line())) == 156
common = d4.common_ancestor(d2)
changed, deleted = d4.changes_since(common)
assert len(changed) == 0
assert len(deleted) == 2
changed, deleted = d6.changes_since(common)
assert len(changed) == 1
assert len(deleted) == 2
l.info("Testing CachedDict.")
b0 = { }
b4 = BackedDict(storage=b0)
b4[one] = 'one'
assert len(b0) == 1
assert b0[one] == 'one'
assert len(b4) == 1
assert b4[one] == 'one'
b5 = CachedDict(BackedDict(b4))
assert len(b5) == 1
assert len(b5.cache) == 0
assert b5[one] == 'one'
assert len(b5.cache) == 1
assert len(b5) == 1
assert len(b4) == 1
b5[two] = 2
assert len(b5) == 2
b6 = BackedDict({three: 3})
b6[three] = 3
assert len(b6) == 1
l.info("Testing pickling.")
pb1 = BackedDict({1: '1', 2: '2', 3: '3'})
pb1_id = pb1.ana_store()
del pb1
pb1 = BackedDict.ana_load(pb1_id)
assert pb1.ana_uuid == pb1_id
assert len(pb1) == 3
assert len(pb1.storage) == 0
assert pb1[2] == '2'
pb1a = BackedDict.ana_load(pb1_id)
assert pb1 is pb1a
del pb1a
pb2 = BackedDict(pb1, {'a': 1, 'b': 2})
pb2s = pickle.dumps(pb2, -1)
del pb2
pb2 = pickle.loads(pb2s)
assert pb1 is pb2.backers[0]
bb1 = BranchingDict(pb2)
bb2 = bb1.branch()
bb1[4] = '4'
assert bb1.common_ancestor(bb2) == pb2
bb1s = pickle.dumps(bb1, -1)
del bb1
bb1 = pickle.loads(bb1s)
assert bb1.common_ancestor(bb2) == pb2
if __name__ == "__main__":
test()
remove some output
import sys
import collections
import itertools
import ana
import logging
l = logging.getLogger("cooldict")
class FinalizedError(Exception):
pass
class BranchingDictError(Exception):
pass
default_max_depth = sys.getrecursionlimit() * 0.2
default_min_depth = 100
#####################
### Utility stuff ###
#####################
def get_storage(d):
'''Returns the local storage of the dictionary.'''
if isinstance(d, FinalizableDict):
return { }
elif isinstance(d, BackedDict):
return d.storage
elif isinstance(d, CachedDict):
return d.cache
elif isinstance(d, BranchingDict):
return { }
else:
return d
def get_backers(d):
'''Returns the backers of the dictionary.'''
if isinstance(d, FinalizableDict):
return [ d.storage ]
elif isinstance(d, BackedDict):
return d.backers
elif isinstance(d, CachedDict):
return [ d.backer ]
elif isinstance(d, BranchingDict):
return [ d.cowdict ]
else:
return [ ]
def get_deleted(d):
if isinstance(d, BackedDict):
return d.deleted
else:
return set()
def get_id(d):
'''Returns the ID of the dictionary.'''
if hasattr(d, 'dict_id'):
return d.dict_id
else:
return id(d)
def ancestry_line(d):
'''
Returns the ancestry of this dict, back to the first dict that we don't
recognize or that has more than one backer.
'''
b = get_backers(d)
while len(b) == 1:
yield b[0]
b = get_backers(b[0])
############################
### The dicts themselves ###
############################
class CachedDict(ana.Storable, collections.MutableMapping):
''' Implements a write-through cache around another dict. '''
def __init__(self, backer):
self.backer = backer
self.cache = { }
self.make_uuid()
def default_cacher(self, k):
v = self.backer[k]
self.cache[k] = v
return v
def __getitem__(self, k):
try:
return self.cache[k]
except KeyError:
return self.default_cacher(k)
def __setitem__(self, k, v):
self.cache[k] = v
self.backer[k] = v
def __delitem__(self, k):
self.cache.pop(k, None)
self.backer.pop(k, None)
def __iter__(self):
return self.backer.__iter__()
def __len__(self):
return len(list(self.__iter__()))
def _ana_getstate(self):
return self.backer
def _ana_setstate(self, state):
self.backer = state
self.cache = { }
class BackedDict(ana.Storable, collections.MutableMapping):
''' Implements a mapping that's backed by other mappings. '''
def __init__(self, *backers, **kwargs):
self.backers = backers
self.storage = kwargs.get('storage', { })
self.deleted = kwargs.get('deleted', set())
self.make_uuid()
def __getitem__(self, a):
# make sure we haven't deleted it
if a in self.deleted:
raise KeyError(a)
# return it if we have it in storage
if a in self.storage:
return self.storage[a]
# try the backers
for p in self.backers:
try:
return p[a]
except KeyError:
pass
# panic!
raise KeyError(a)
def __delitem__(self, a):
# make sure we can do it
if a not in self:
raise KeyError(a)
# and do it
self.storage.pop(a, None)
self.deleted.add(a)
def __setitem__(self, k, v):
self.deleted.discard(k)
self.storage[k] = v
def __iter__(self):
chain = itertools.chain(self.storage, *[ p for p in self.backers ])
seen = set()
for k in chain:
if k not in self.deleted and k not in seen:
seen.add(k)
yield k
def __len__(self):
return len(list(self.__iter__()))
def flatten(self):
l.info("Flattening backers of %s!", self)
if len(self.backers) > 1:
l.debug("Slow path")
s_keys = set(self.storage.keys())
for b in reversed(self.backers):
b_keys = set(b.keys())
for i in b_keys - s_keys:
self.storage[i] = b[i]
self.backers = [ ]
else:
a_line = list(ancestry_line(self))
ancestors = [ get_storage(a) for a in a_line ]
ancestor_keys = [ set(a.keys()) for a in ancestors ]
remaining = set()
new_backers = [ ]
try:
#print "Checking for ignores"
ignored_idx = [ getattr(a, 'cooldict_ignore', False) for a in a_line ].index(True)
#print "... found at",ignored_idx
new_backers = [ a_line[ignored_idx] ]
ancestors = ancestors[:ignored_idx]
ancestor_keys = ancestor_keys[:ignored_idx]
except ValueError:
#print "... not found"
pass
#print "ancestors:",ancestors
#print "new ancestors:",ancestors
for a in reversed(ancestors):
keys = set(get_storage(a).iterkeys())
ancestor_keys.append(keys)
remaining |= keys
if isinstance(a, BackedDict):
remaining -= a.deleted
remaining -= set(self.storage.iterkeys())
remaining -= self.deleted
for a,keys in zip(ancestors, ancestor_keys):
toadd = keys & remaining
if len(toadd) == 0:
continue
l.debug("Adding %d keys from %s", len(toadd), a)
for k in toadd:
self.storage[k] = a[k]
remaining -= keys
if len(remaining) != 0:
raise Exception("%d items remaining after flatten!", len(remaining))
self.backers = new_backers
def _ana_getstate(self):
return self.storage, self.deleted, self.backers
def _ana_setstate(self, state):
self.storage, self.deleted, self.backers = state
class FinalizableDict(ana.Storable, collections.MutableMapping):
''' Implements a finalizable dict. This is meant to support BranchingDict, and offers no guarantee about the actual immutability of the underlying data. It's quite easy to bypass. You've been warned. '''
def __init__(self, storage = None):
self.finalized = False
self.storage = { } if storage is None else storage
self.make_uuid()
def __getitem__(self, a):
return self.storage[a]
def __delitem__(self, a):
if self.finalized:
raise FinalizedError("dict is finalized")
del self.storage[a]
def __setitem__(self, k, v):
if self.finalized:
raise FinalizedError("dict is finalized")
self.storage[k] = v
def __iter__(self):
return self.storage.__iter__()
def __len__(self):
return self.storage.__len__()
def finalize(self):
self.finalized = True
def _ana_getstate(self):
self.finalize()
return self.storage
def _ana_setstate(self, state):
self.storage = state
self.finalized = True
class BranchingDict(collections.MutableMapping):
''' This implements a branching dictionary. Basically, a BranchingDict can be branch()ed and the two copies will thereafter share a common backer, but will not write back to that backer. Can probably be reimplemented without FinalizableDict. '''
def __init__(self, d = None, max_depth = None, min_depth = None):
max_depth = default_max_depth if max_depth is None else max_depth
min_depth = default_min_depth if min_depth is None else min_depth
d = { } if d is None else d
if not isinstance(d, FinalizableDict):
d = FinalizableDict(d)
self.cowdict = d
ancestors = list(self.ancestry_line())
if len(ancestors) > max_depth:
l.debug("BranchingDict got too deep (%d)", len(ancestors))
new_dictriarch = None
for k in ancestors[min_depth:]:
if isinstance(k, BackedDict):
new_dictriarch = k
break
if new_dictriarch is not None:
l.debug("Found ancestor %s", new_dictriarch)
new_dictriarch.flatten()
self.max_depth = max_depth
self.min_depth = min_depth
# Returns the ancestry of this dict, back to the first dict that we don't recognize
# or that has more than one backer.
def ancestry_line(self):
return ancestry_line(self)
# Returns the common ancestor between self and other.
def common_ancestor(self, other):
our_line = set([ get_id(a) for a in self.ancestry_line() ])
for d in other.ancestry_line():
if get_id(d) in our_line:
return d
return None
# Returns the entries created and the entries deleted since the specified ancestor.
def changes_since(self, ancestor):
created = set()
deleted = set()
for a in self.ancestry_line():
if a is ancestor:
break
elif isinstance(a, FinalizableDict):
continue
elif isinstance(a, BackedDict):
created.update(set(a.storage.keys()) - deleted)
deleted.update(a.deleted - created)
elif isinstance(a, dict):
created.update(a.keys())
return created, deleted
def __getitem__(self, a):
return self.cowdict[a]
def __setitem__(self, k, v):
if self.cowdict.finalized:
l.debug("Got a finalized dict. Making a child.")
self.cowdict = FinalizableDict(BackedDict(self.cowdict.storage))
self.cowdict[k] = v
def __delitem__(self, k):
if self.cowdict.finalized:
l.debug("Got a finalized dict. Making a child.")
self.cowdict = FinalizableDict(BackedDict(self.cowdict.storage))
del self.cowdict[k]
def __iter__(self):
return self.cowdict.__iter__()
def __len__(self):
return self.cowdict.__len__()
def branch(self):
self.cowdict.finalize()
return BranchingDict(self.cowdict, max_depth=self.max_depth, min_depth=self.min_depth)
def test():
import pickle
try:
import standard_logging # pylint: disable=W0612,
except ImportError:
pass
l.setLevel(logging.DEBUG)
l.info("Testing basic BackedDict functionality.")
a = "aa"
b = "bb"
c = "cc"
d = "dd"
one = 11
two = 12
three = 13
b1 = BackedDict()
b2 = BackedDict()
b1[a] = 'a'
b1[one] = 1
b2[b] = 'b'
assert len(b1) == 2
assert len(b2) == 1
assert b1[a] == 'a'
assert b1[one] == 1
assert b2[b] == 'b'
b3 = BackedDict(b1, b2)
b3[c] = c
assert len(b3) == 4
assert b3[a] == 'a'
assert b3[one] == 1
assert b3[b] == 'b'
assert b3[c] == c
assert len(b1) == 2
assert len(b2) == 1
assert b1[a] == 'a'
assert b1[one] == 1
assert b2[b] == 'b'
del b3[a]
assert len(b3) == 3
l.info("Testing BranchingDict functionality.")
d1 = BranchingDict(b3)
d2 = d1.branch()
d3 = d2.branch()
d1[d] = d
assert len(b3) == 3
assert len(d1) == 4
assert len(d2) == 3
assert len(d3) == 3
assert d1[d] == d
assert d1[b] == 'b'
assert d1[one] == 1
b3.flatten()
assert len(b3.backers) == 0
assert len(b3) == 3
d3[b] = "omg"
assert d3[b] == "omg"
assert d2[b] == 'b'
d4 = d3.branch()
del d4[b]
del d4[c]
d5 = d4.branch()
d5['hmm'] = 5
d6 = d5.branch()
l.info("Testing BranchingDict ancestry and flattening.")
assert len(list(d5.ancestry_line())) == 5
dnew = d5.branch()
dnew['ohsnap'] = 1
for _ in range(50):
dnew = dnew.branch()
dnew['ohsnap'] += 1
assert len(list(dnew.ancestry_line())) == 56
for _ in range(2000):
#print "Branching dict number", _
dnew = dnew.branch()
dnew['ohsnap'] += 1
assert len(list(dnew.ancestry_line())) == 156
common = d4.common_ancestor(d2)
changed, deleted = d4.changes_since(common)
assert len(changed) == 0
assert len(deleted) == 2
changed, deleted = d6.changes_since(common)
assert len(changed) == 1
assert len(deleted) == 2
l.info("Testing CachedDict.")
b0 = { }
b4 = BackedDict(storage=b0)
b4[one] = 'one'
assert len(b0) == 1
assert b0[one] == 'one'
assert len(b4) == 1
assert b4[one] == 'one'
b5 = CachedDict(BackedDict(b4))
assert len(b5) == 1
assert len(b5.cache) == 0
assert b5[one] == 'one'
assert len(b5.cache) == 1
assert len(b5) == 1
assert len(b4) == 1
b5[two] = 2
assert len(b5) == 2
b6 = BackedDict({three: 3})
b6[three] = 3
assert len(b6) == 1
l.info("Testing pickling.")
pb1 = BackedDict({1: '1', 2: '2', 3: '3'})
pb1_id = pb1.ana_store()
del pb1
pb1 = BackedDict.ana_load(pb1_id)
assert pb1.ana_uuid == pb1_id
assert len(pb1) == 3
assert len(pb1.storage) == 0
assert pb1[2] == '2'
pb1a = BackedDict.ana_load(pb1_id)
assert pb1 is pb1a
del pb1a
pb2 = BackedDict(pb1, {'a': 1, 'b': 2})
pb2s = pickle.dumps(pb2, -1)
del pb2
pb2 = pickle.loads(pb2s)
assert pb1 is pb2.backers[0]
bb1 = BranchingDict(pb2)
bb2 = bb1.branch()
bb1[4] = '4'
assert bb1.common_ancestor(bb2) == pb2
bb1s = pickle.dumps(bb1, -1)
del bb1
bb1 = pickle.loads(bb1s)
assert bb1.common_ancestor(bb2) == pb2
if __name__ == "__main__":
test()
|
import time
import weakref
import gtk
from snaked.core.shortcuts import ContextShortcutActivator, register_shortcut
import snaked.core.manager
import snaked.core.editor
class TabbedEditorManager(snaked.core.manager.EditorManager):
def __init__(self, session):
super(TabbedEditorManager, self).__init__(session)
self.last_switch_time = None
self.panels = weakref.WeakKeyDictionary()
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.connect('delete-event', self.on_delete_event)
self.window.set_property('default-width', 800)
self.window.set_property('default-height', 500)
self.activator = ContextShortcutActivator(self.window, self.get_context)
self.box = gtk.VBox()
self.window.add(self.box)
self.note = gtk.Notebook()
self.note.set_show_tabs(self.snaked_conf['SHOW_TABS'])
self.note.set_scrollable(True)
self.note.set_property('tab-hborder', 10)
self.note.set_property('homogeneous', False)
self.note.connect_after('switch-page', self.on_switch_page)
self.note.connect('page_removed', self.on_page_removed)
self.note.connect('page_reordered', self.on_page_reordered)
self.box.pack_start(self.note)
register_shortcut('toggle-tabs-visibility', '<alt>F11', 'Window', 'Toggles tabs visibility')
register_shortcut('next-editor', '<alt>Right', 'Window', 'Switches to next editor')
register_shortcut('prev-editor', '<alt>Left', 'Window', 'Switches to previous editor')
register_shortcut('next-editor-alt', '<ctrl>Page_Down', 'Window', 'Switches to next editor')
register_shortcut('prev-editor-alt', '<ctrl>Page_Up', 'Window', 'Switches to previous editor')
register_shortcut('fullscreen', 'F11', 'Window', 'Toggles fullscreen mode')
register_shortcut('toggle-console', '<alt>grave', 'Window', 'Toggles console')
if self.snaked_conf['RESTORE_POSITION'] and 'LAST_POSITION' in self.snaked_conf:
pos, size = self.snaked_conf['LAST_POSITION']
self.window.move(*pos)
self.window.resize(*size)
self.window.show_all()
if self.snaked_conf['FULLSCREEN']:
self.window.fullscreen()
def get_context(self):
widget = self.note.get_nth_page(self.note.get_current_page())
for e in self.editors:
if e.widget is widget:
return (e,)
return (None,)
def manage_editor(self, editor):
label = gtk.Label('Unknown')
self.note.insert_page(editor.widget, label, -1)
self.note.set_tab_reorderable(editor.widget, True)
self.focus_editor(editor)
editor.view.grab_focus()
def focus_editor(self, editor):
idx = self.note.page_num(editor.widget)
self.note.set_current_page(idx)
def update_top_level_title(self):
idx = self.note.get_current_page()
if idx < 0:
return
title = self.note.get_tab_label_text(self.note.get_nth_page(idx))
if title is not None:
self.window.set_title(title)
def set_editor_title(self, editor, title):
self.note.set_tab_label_text(editor.widget, title)
if self.note.get_current_page() == self.note.page_num(editor.widget):
self.update_top_level_title()
def on_delete_event(self, *args):
self.quit(None)
def close_editor(self, editor):
idx = self.note.page_num(editor.widget)
self.note.remove_page(idx)
editor.editor_closed.emit()
def set_editor_shortcuts(self, editor):
self.plugin_manager.bind_shortcuts(self.activator, editor)
if hasattr(self, 'editor_shortcuts_binded'):
return
self.editor_shortcuts_binded = True
self.activator.bind_to_name('quit', self.quit)
self.activator.bind_to_name('close-window', self.close_editor)
self.activator.bind_to_name('save', self.save)
self.activator.bind_to_name('next-editor', self.switch_to, 1)
self.activator.bind_to_name('prev-editor', self.switch_to, -1)
self.activator.bind_to_name('next-editor-alt', self.switch_to, 1)
self.activator.bind_to_name('prev-editor-alt', self.switch_to, -1)
self.activator.bind_to_name('new-file', self.new_file_action)
self.activator.bind_to_name('show-preferences', self.show_preferences)
self.activator.bind_to_name('fullscreen', self.fullscreen)
self.activator.bind_to_name('toggle-tabs-visibility', self.toggle_tabs)
self.activator.bind_to_name('place-spot', self.add_spot_with_feedback)
self.activator.bind_to_name('goto-last-spot', self.goto_last_spot)
self.activator.bind_to_name('goto-next-spot', self.goto_next_prev_spot, True)
self.activator.bind_to_name('goto-prev-spot', self.goto_next_prev_spot, False)
self.activator.bind_to_name('toggle-console', self.toggle_console)
self.activator.bind('Escape', self.process_escape)
def quit(self, editor):
self.snaked_conf['LAST_POSITION'] = self.window.get_position(), self.window.get_size()
self.window.hide()
super(TabbedEditorManager, self).quit(editor)
def save(self, editor):
editor.save()
def set_transient_for(self, editor, window):
window.set_transient_for(self.window)
def on_switch_page(self, *args):
self.update_top_level_title()
def on_page_removed(self, note, child, idx):
for e in self.editors:
if e.widget is child:
spot = self.get_last_spot(None, e)
if spot:
note.set_current_page(note.page_num(spot.editor().widget))
return
if idx > 0:
note.set_current_page(idx - 1)
def switch_to(self, editor, dir):
if self.last_switch_time is None or time.time() - self.last_switch_time > 5:
self.add_spot(editor)
self.last_switch_time = time.time()
idx = ( self.note.get_current_page() + dir ) % self.note.get_n_pages()
self.note.set_current_page(idx)
def fullscreen(self, editor):
self.snaked_conf['FULLSCREEN'] = not self.snaked_conf['FULLSCREEN']
if self.snaked_conf['FULLSCREEN']:
self.window.fullscreen()
else:
self.window.unfullscreen()
def toggle_tabs(self, editor):
self.note.set_show_tabs(not self.note.get_show_tabs())
self.snaked_conf['SHOW_TABS'] = self.note.get_show_tabs()
@snaked.core.editor.Editor.stack_add_request
def on_stack_add_request(self, editor, widget, on_popup):
self.panels[widget] = on_popup
self.box.pack_end(widget, False, False)
@snaked.core.editor.Editor.stack_popup_request
def on_stack_popup_request(self, editor, widget):
if widget in self.panels:
for w in self.panels:
if w is not widget:
w.hide()
widget.show()
_, _, _, wh, _ = self.window.window.get_geometry()
w, _ = widget.get_size_request()
h = max(200, wh/3)
widget.set_size_request(w, h)
if self.panels[widget]:
self.panels[widget](widget, editor)
def toggle_console(self, editor):
from snaked.core.console import toggle_console
toggle_console(editor)
def on_page_reordered(self, note, child, num):
for i, e in enumerate(self.editors):
if e.widget is child:
self.editors[i], self.editors[num] = self.editors[num], self.editors[i]
break
store last active tab on window closing by wm facilities
import time
import weakref
import gtk
from snaked.core.shortcuts import ContextShortcutActivator, register_shortcut
import snaked.core.manager
import snaked.core.editor
class TabbedEditorManager(snaked.core.manager.EditorManager):
def __init__(self, session):
super(TabbedEditorManager, self).__init__(session)
self.last_switch_time = None
self.panels = weakref.WeakKeyDictionary()
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.connect('delete-event', self.on_delete_event)
self.window.set_property('default-width', 800)
self.window.set_property('default-height', 500)
self.activator = ContextShortcutActivator(self.window, self.get_context)
self.box = gtk.VBox()
self.window.add(self.box)
self.note = gtk.Notebook()
self.note.set_show_tabs(self.snaked_conf['SHOW_TABS'])
self.note.set_scrollable(True)
self.note.set_property('tab-hborder', 10)
self.note.set_property('homogeneous', False)
self.note.connect_after('switch-page', self.on_switch_page)
self.note.connect('page_removed', self.on_page_removed)
self.note.connect('page_reordered', self.on_page_reordered)
self.box.pack_start(self.note)
register_shortcut('toggle-tabs-visibility', '<alt>F11', 'Window', 'Toggles tabs visibility')
register_shortcut('next-editor', '<alt>Right', 'Window', 'Switches to next editor')
register_shortcut('prev-editor', '<alt>Left', 'Window', 'Switches to previous editor')
register_shortcut('next-editor-alt', '<ctrl>Page_Down', 'Window', 'Switches to next editor')
register_shortcut('prev-editor-alt', '<ctrl>Page_Up', 'Window', 'Switches to previous editor')
register_shortcut('fullscreen', 'F11', 'Window', 'Toggles fullscreen mode')
register_shortcut('toggle-console', '<alt>grave', 'Window', 'Toggles console')
if self.snaked_conf['RESTORE_POSITION'] and 'LAST_POSITION' in self.snaked_conf:
pos, size = self.snaked_conf['LAST_POSITION']
self.window.move(*pos)
self.window.resize(*size)
self.window.show_all()
if self.snaked_conf['FULLSCREEN']:
self.window.fullscreen()
def get_context(self):
widget = self.note.get_nth_page(self.note.get_current_page())
for e in self.editors:
if e.widget is widget:
return (e,)
return (None,)
def manage_editor(self, editor):
label = gtk.Label('Unknown')
self.note.insert_page(editor.widget, label, -1)
self.note.set_tab_reorderable(editor.widget, True)
self.focus_editor(editor)
editor.view.grab_focus()
def focus_editor(self, editor):
idx = self.note.page_num(editor.widget)
self.note.set_current_page(idx)
def update_top_level_title(self):
idx = self.note.get_current_page()
if idx < 0:
return
title = self.note.get_tab_label_text(self.note.get_nth_page(idx))
if title is not None:
self.window.set_title(title)
def set_editor_title(self, editor, title):
self.note.set_tab_label_text(editor.widget, title)
if self.note.get_current_page() == self.note.page_num(editor.widget):
self.update_top_level_title()
def on_delete_event(self, *args):
self.quit(self.get_context()[0])
def close_editor(self, editor):
idx = self.note.page_num(editor.widget)
self.note.remove_page(idx)
editor.editor_closed.emit()
def set_editor_shortcuts(self, editor):
self.plugin_manager.bind_shortcuts(self.activator, editor)
if hasattr(self, 'editor_shortcuts_binded'):
return
self.editor_shortcuts_binded = True
self.activator.bind_to_name('quit', self.quit)
self.activator.bind_to_name('close-window', self.close_editor)
self.activator.bind_to_name('save', self.save)
self.activator.bind_to_name('next-editor', self.switch_to, 1)
self.activator.bind_to_name('prev-editor', self.switch_to, -1)
self.activator.bind_to_name('next-editor-alt', self.switch_to, 1)
self.activator.bind_to_name('prev-editor-alt', self.switch_to, -1)
self.activator.bind_to_name('new-file', self.new_file_action)
self.activator.bind_to_name('show-preferences', self.show_preferences)
self.activator.bind_to_name('fullscreen', self.fullscreen)
self.activator.bind_to_name('toggle-tabs-visibility', self.toggle_tabs)
self.activator.bind_to_name('place-spot', self.add_spot_with_feedback)
self.activator.bind_to_name('goto-last-spot', self.goto_last_spot)
self.activator.bind_to_name('goto-next-spot', self.goto_next_prev_spot, True)
self.activator.bind_to_name('goto-prev-spot', self.goto_next_prev_spot, False)
self.activator.bind_to_name('toggle-console', self.toggle_console)
self.activator.bind('Escape', self.process_escape)
def quit(self, editor):
self.snaked_conf['LAST_POSITION'] = self.window.get_position(), self.window.get_size()
self.window.hide()
super(TabbedEditorManager, self).quit(editor)
def save(self, editor):
editor.save()
def set_transient_for(self, editor, window):
window.set_transient_for(self.window)
def on_switch_page(self, *args):
self.update_top_level_title()
def on_page_removed(self, note, child, idx):
for e in self.editors:
if e.widget is child:
spot = self.get_last_spot(None, e)
if spot:
note.set_current_page(note.page_num(spot.editor().widget))
return
if idx > 0:
note.set_current_page(idx - 1)
def switch_to(self, editor, dir):
if self.last_switch_time is None or time.time() - self.last_switch_time > 5:
self.add_spot(editor)
self.last_switch_time = time.time()
idx = ( self.note.get_current_page() + dir ) % self.note.get_n_pages()
self.note.set_current_page(idx)
def fullscreen(self, editor):
self.snaked_conf['FULLSCREEN'] = not self.snaked_conf['FULLSCREEN']
if self.snaked_conf['FULLSCREEN']:
self.window.fullscreen()
else:
self.window.unfullscreen()
def toggle_tabs(self, editor):
self.note.set_show_tabs(not self.note.get_show_tabs())
self.snaked_conf['SHOW_TABS'] = self.note.get_show_tabs()
@snaked.core.editor.Editor.stack_add_request
def on_stack_add_request(self, editor, widget, on_popup):
self.panels[widget] = on_popup
self.box.pack_end(widget, False, False)
@snaked.core.editor.Editor.stack_popup_request
def on_stack_popup_request(self, editor, widget):
if widget in self.panels:
for w in self.panels:
if w is not widget:
w.hide()
widget.show()
_, _, _, wh, _ = self.window.window.get_geometry()
w, _ = widget.get_size_request()
h = max(200, wh/3)
widget.set_size_request(w, h)
if self.panels[widget]:
self.panels[widget](widget, editor)
def toggle_console(self, editor):
from snaked.core.console import toggle_console
toggle_console(editor)
def on_page_reordered(self, note, child, num):
for i, e in enumerate(self.editors):
if e.widget is child:
self.editors[i], self.editors[num] = self.editors[num], self.editors[i]
break |
#!/usr/bin/env python
# encoding: utf-8
"""
cmd.py
Created by Geert Dekkers on 2010-03-28.
Copyright (c) 2010 Geert Dekkers Web Studio. All rights reserved.
Class dedicated to subprocess functions
"""
import sys
import os
import unittest
import subprocess
from pyPdf import PdfFileWriter, PdfFileReader
class Command:
def __init__(self):
pass
def ffmpeg_simple(self, finput, foutput, dimensions=None, verbose=False):
"""A simple version of the ffmpeg wrapper. Takes input & output, optionally the height/width."""
if dimensions:
size = 'x'.join(dimensions)
cmd = [ 'ffmpeg ', '-i', finput, '-s', size, '-y', '-ar', '11025', '-b', '800', foutput]
else:
cmd = [ 'ffmpeg', '-i', finput, '-y', '-ar', '11025', foutput]
print cmd
proc = subprocess.Popen(cmd)
verbose = proc.communicate()[0]
if not verbose: # Return the full path AND filename if verbose is set to True
if dimensions:
return foutput, dimensions
else:
return foutput
else:
return verbose
def ffmpeg(self, finput, foutput, size="other", defaultwidth=917, frame=1, format='png', verbose=False):
""" Converts all sorts of video formats to a clip in .flv format or set of images.
The number of frames can be set in de args.
Just a python wrapper for ffmpeg.
Takes:
1. finput (str), a path
2. foutput (str). This can be:
a) full path including a filename if arg 3 is "large", or "cropped".
b) full path to a directory if arg 3 is "tiny", "small", or "fullsize"
3. size (str), either one of the above, or left blank. In the latter
case, ffmpeg will be instructed to get the first 180 frames of the source
file.
4. defaultwidth (int), a fallback in case a child function fails to return a value
5. Image format is png by default -- only if you want a slice of the vid back.
Needs to be something that both ffmpeg and sips can handle.
6. Return the full cammand list
For sound only files, use "large".
(from man ffmpeg)
*You can extract images from a video:
ffmpeg −i foo.avi −r 1 −s WxH −f image2 foo−%03d.jpeg
This will extract one video frame per second from the video and will output them in files named
foo−001.jpeg,foo−002.jpeg,etc. Images will be rescaled to fit the newWxH values.
The syntax foo−%03d.jpeg specifies to use a decimal number composed of three digits padded with
zeroes to express the sequence number. It is the same syntax supported by the C printf function, but only
formats accepting a normal integer are suitable.
If you want to extract just a limited number of frames, you can use the above command in combination
with the −vframes or −t option, or in combination with −ss to start extracting from a certain point in time.
"""
dimensions = {} # Init a dict to hold dimensions
if size == 'large':
cmd = ["ffmpeg","-i", finput, "-y","-ar","11025", foutput]
elif size == 'cropped':
cmd = ["ffmpeg","-i",finput,"-y","-fs","100000",foutput]
elif size == 'tiny' or size == 'small':
fname = '/'.join([foutput, os.path.splitext(os.path.basename(finput))[0] + ".png"])
cmd = ["ffmpeg", "-i", finput, "-y", "-vframes", "1", "-ss", unicode(frame), fname]
elif size == 'fullsize':
fname = '/'.join([foutput, os.path.splitext(os.path.basename(finput))[0] + ".jpg"])
cmd = ["ffmpeg", "-i", finput, "-y", "-vframes", "1", "-ss", unicode(frame), fname]
else:
cmd = ["ffmpeg","-i",finput,"-y","-vframes","180","-an","-s","qqvga",foutput]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
verbose = proc.communicate()[0]
if size == 'tiny': self.crop_to_center(fname, foutput,29,29)
if size == 'small': self.crop_to_center(fname, foutput,148,148)
if size == 'fullsize':
dimensions = self.sips_get_properties(fname)
if dimensions: width = dimensions['pixelWidth'] if dimensions.has_key('pixelWidth') else defaultwidth
self.sips_resize(fname, foutput, width, format)
if not verbose: # Return the full path AND filename if verbose is set to True
if dimensions:
return foutput, dimensions
else:
return foutput
else:
return verbose
def sips_get_properties(self, source):
""" Gets item propertes (including a PDF) using sips"""
# Get all properties in a dict
properties = {}
optimized = [] # List in which to optimise items (ie strip white space)
prop_cmd = ["sips", "-g", "all", source]
result = subprocess.Popen(prop_cmd, stdout=subprocess.PIPE).stdout.read()
r = result.split('\n')
r[0] = ':'.join(['file', r[0]])
for item in r:
if item:
optimized.append(item.strip())
for kv in optimized:
try:
properties.setdefault(* kv.split(':'))
except Exception, inst:
print inst
if properties:
for k, v in properties.iteritems(): properties[k] = v.strip()
return properties
else:
return None
def sips_reformat(self, source, target, format):
""" Reformats images using sips. See args:
1) path to source file
2) path to target dir
3) format (one of string jpeg | tiff| png | gif | jp2 | pict | bmp | qtif | psd | sgi | tga | pdf)
"""
if os.path.exists(source):
cmd = ["sips", "--setProperty", "format", format, source, "--out", target]
print cmd
action = subprocess.Popen(cmd,stdout=subprocess.PIPE).stdout.read()
return action
else:
return None
def sips_resize(self, source, target, target_width, format):
"""Scales any source to specified width, respecting aspect using sips.
Takes source (full path), target (can be directory), target_width (string or int), format (one of string jpeg| tiff| png | gif | jp2 | pict | bmp | qtif | psd | sgi | tga )"""
if os.path.exists(source):
resize_cmd = ["sips", "--resampleWidth", str(target_width), "--setProperty", "format", format, source, "--out", target]
action = subprocess.Popen(resize_cmd,stdout=subprocess.PIPE).stdout.read()
return action
else:
return None
def crop_to_center(self, source, target, target_width, target_height):
""" Scales and then crops source (including a PDF) to center with sips in relation to size. Uses sips.
Takes source (full path), target (can be a directory), target_width (string or int), target_height ( string or int)"""
if os.path.exists(source):
# We don't want the entire function breaking if we can't get the properties (but if we can't get them what else will break?)
try:
properties = self.sips_get_properties(source)
width, height = int(properties['pixelWidth']), int(properties['pixelHeight'])
if width > 0 and height > 0:
aspect = target_width/target_height
widthfactor = target_width/width
heightfactor = target_height/height
else:
return None
base_heightwidth = (width/7) if widthfactor > heightfactor else (height/8)
except Exception, inst:
print inst
base_heightwidth = 210
resize_cmd = ["sips", "--resampleHeight", str(base_heightwidth), "--cropToHeightWidth", str(target_width), str(target_height), "--setProperty", "format", "png", source, "--out", target]
action = subprocess.Popen(resize_cmd,stdout=subprocess.PIPE).stdout.read()
return action
else:
return None
def get_pdf_dimensions(self, path):
"""Get pdf dimensions using pyPdf"""
try:
pdf = PdfFileReader(file(path, "rb"))
except:
return None
page_list = []
if pdf.getNumPages() > 0:
for page_number in range(0, pdf.getNumPages()):
page = pdf.getPage(page_number)
page_list.append({'page': page_number, 'width': page.mediaBox.getLowerRight_x(), 'height': page.mediaBox.getUpperLeft_y()})
return page_list
else: return None
def joinpdf(self, input_list, output_file):
"""Join list of pdfs to multipage using pyPdf."""
output = PdfFileWriter()
for f in input_list:
input_file = PdfFileReader(file(f, "rb"))
output.addPage(input_file.getPage(0))
outputStream = file(output_file, "wb")
output.write(outputStream)
outputStream.close()
def pdf_get_no_pages(self, input_file):
"""Return number of pages in a pdf using pyPdf."""
try:
pdf_input = PdfFileReader(file(input_file, "rb"))
return pdf_input.getNumPages()
except:
return None
def splitpdf(self, input_file, output_dir):
"""Split pdf to single-page files using pyPdf"""
try:
input1 = PdfFileReader(file(input_file, "rb"))
except Exception, inst:
return inst
files = []
for page_number in range(0, input1.getNumPages()):
page = input1.getPage(page_number)
fname = os.path.basename(input_file).replace('.pdf', '_%s.pdf' % (page_number + 1))
fpath = os.path.join(output_dir, fname)
files.append(fpath)
output = PdfFileWriter()
output.addPage(page)
output_stream = file(fpath, "wb")
output.write(output_stream)
return 'saved %s' % files
class CommandTests(unittest.TestCase):
def setUp(self):
pass
if __name__ == '__main__':
unittest.main()
removed print statements
#!/usr/bin/env python
# encoding: utf-8
"""
cmd.py
Created by Geert Dekkers on 2010-03-28.
Copyright (c) 2010 Geert Dekkers Web Studio. All rights reserved.
Class dedicated to subprocess functions
"""
import sys
import os
import unittest
import subprocess
from pyPdf import PdfFileWriter, PdfFileReader
class Command:
def __init__(self):
pass
def ffmpeg_simple(self, finput, foutput, dimensions=None, verbose=False):
"""A simple version of the ffmpeg wrapper. Takes input & output, optionally the height/width."""
if dimensions:
size = 'x'.join(dimensions)
cmd = [ 'ffmpeg ', '-i', finput, '-s', size, '-y', '-ar', '11025', '-b', '800', foutput]
else:
cmd = [ 'ffmpeg', '-i', finput, '-y', '-ar', '11025', foutput]
proc = subprocess.Popen(cmd)
verbose = proc.communicate()[0]
if not verbose: # Return the full path AND filename if verbose is set to True
if dimensions:
return foutput, dimensions
else:
return foutput
else:
return verbose
def ffmpeg(self, finput, foutput, size="other", defaultwidth=917, frame=1, format='png', verbose=False):
""" Converts all sorts of video formats to a clip in .flv format or set of images.
The number of frames can be set in de args.
Just a python wrapper for ffmpeg.
Takes:
1. finput (str), a path
2. foutput (str). This can be:
a) full path including a filename if arg 3 is "large", or "cropped".
b) full path to a directory if arg 3 is "tiny", "small", or "fullsize"
3. size (str), either one of the above, or left blank. In the latter
case, ffmpeg will be instructed to get the first 180 frames of the source
file.
4. defaultwidth (int), a fallback in case a child function fails to return a value
5. Image format is png by default -- only if you want a slice of the vid back.
Needs to be something that both ffmpeg and sips can handle.
6. Return the full cammand list
For sound only files, use "large".
(from man ffmpeg)
*You can extract images from a video:
ffmpeg −i foo.avi −r 1 −s WxH −f image2 foo−%03d.jpeg
This will extract one video frame per second from the video and will output them in files named
foo−001.jpeg,foo−002.jpeg,etc. Images will be rescaled to fit the newWxH values.
The syntax foo−%03d.jpeg specifies to use a decimal number composed of three digits padded with
zeroes to express the sequence number. It is the same syntax supported by the C printf function, but only
formats accepting a normal integer are suitable.
If you want to extract just a limited number of frames, you can use the above command in combination
with the −vframes or −t option, or in combination with −ss to start extracting from a certain point in time.
"""
dimensions = {} # Init a dict to hold dimensions
if size == 'large':
cmd = ["ffmpeg","-i", finput, "-y","-ar","11025", foutput]
elif size == 'cropped':
cmd = ["ffmpeg","-i",finput,"-y","-fs","100000",foutput]
elif size == 'tiny' or size == 'small':
fname = '/'.join([foutput, os.path.splitext(os.path.basename(finput))[0] + ".png"])
cmd = ["ffmpeg", "-i", finput, "-y", "-vframes", "1", "-ss", unicode(frame), fname]
elif size == 'fullsize':
fname = '/'.join([foutput, os.path.splitext(os.path.basename(finput))[0] + ".jpg"])
cmd = ["ffmpeg", "-i", finput, "-y", "-vframes", "1", "-ss", unicode(frame), fname]
else:
cmd = ["ffmpeg","-i",finput,"-y","-vframes","180","-an","-s","qqvga",foutput]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
verbose = proc.communicate()[0]
if size == 'tiny': self.crop_to_center(fname, foutput,29,29)
if size == 'small': self.crop_to_center(fname, foutput,148,148)
if size == 'fullsize':
dimensions = self.sips_get_properties(fname)
if dimensions: width = dimensions['pixelWidth'] if dimensions.has_key('pixelWidth') else defaultwidth
self.sips_resize(fname, foutput, width, format)
if not verbose: # Return the full path AND filename if verbose is set to True
if dimensions:
return foutput, dimensions
else:
return foutput
else:
return verbose
def sips_get_properties(self, source):
""" Gets item propertes (including a PDF) using sips"""
# Get all properties in a dict
properties = {}
optimized = [] # List in which to optimise items (ie strip white space)
prop_cmd = ["sips", "-g", "all", source]
result = subprocess.Popen(prop_cmd, stdout=subprocess.PIPE).stdout.read()
r = result.split('\n')
r[0] = ':'.join(['file', r[0]])
for item in r:
if item:
optimized.append(item.strip())
for kv in optimized:
try:
properties.setdefault(* kv.split(':'))
except Exception, inst:
pass
if properties:
for k, v in properties.iteritems(): properties[k] = v.strip()
return properties
else:
return None
def sips_reformat(self, source, target, format):
""" Reformats images using sips. See args:
1) path to source file
2) path to target dir
3) format (one of string jpeg | tiff| png | gif | jp2 | pict | bmp | qtif | psd | sgi | tga | pdf)
"""
if os.path.exists(source):
cmd = ["sips", "--setProperty", "format", format, source, "--out", target]
action = subprocess.Popen(cmd,stdout=subprocess.PIPE).stdout.read()
return action
else:
return None
def sips_resize(self, source, target, target_width, format):
"""Scales any source to specified width, respecting aspect using sips.
Takes source (full path), target (can be directory), target_width (string or int), format (one of string jpeg| tiff| png | gif | jp2 | pict | bmp | qtif | psd | sgi | tga )"""
if os.path.exists(source):
resize_cmd = ["sips", "--resampleWidth", str(target_width), "--setProperty", "format", format, source, "--out", target]
action = subprocess.Popen(resize_cmd,stdout=subprocess.PIPE).stdout.read()
return action
else:
return None
def crop_to_center(self, source, target, target_width, target_height):
""" Scales and then crops source (including a PDF) to center with sips in relation to size. Uses sips.
Takes source (full path), target (can be a directory), target_width (string or int), target_height ( string or int)"""
if os.path.exists(source):
# We don't want the entire function breaking if we can't get the properties (but if we can't get them what else will break?)
try:
properties = self.sips_get_properties(source)
width, height = int(properties['pixelWidth']), int(properties['pixelHeight'])
if width > 0 and height > 0:
aspect = target_width/target_height
widthfactor = target_width/width
heightfactor = target_height/height
else:
return None
base_heightwidth = (width/7) if widthfactor > heightfactor else (height/8)
except Exception, inst:
pass
base_heightwidth = 210
resize_cmd = ["sips", "--resampleHeight", str(base_heightwidth), "--cropToHeightWidth", str(target_width), str(target_height), "--setProperty", "format", "png", source, "--out", target]
action = subprocess.Popen(resize_cmd,stdout=subprocess.PIPE).stdout.read()
return action
else:
return None
def get_pdf_dimensions(self, path):
"""Get pdf dimensions using pyPdf"""
try:
pdf = PdfFileReader(file(path, "rb"))
except:
return None
page_list = []
if pdf.getNumPages() > 0:
for page_number in range(0, pdf.getNumPages()):
page = pdf.getPage(page_number)
page_list.append({'page': page_number, 'width': page.mediaBox.getLowerRight_x(), 'height': page.mediaBox.getUpperLeft_y()})
return page_list
else: return None
def joinpdf(self, input_list, output_file):
"""Join list of pdfs to multipage using pyPdf."""
output = PdfFileWriter()
for f in input_list:
input_file = PdfFileReader(file(f, "rb"))
output.addPage(input_file.getPage(0))
outputStream = file(output_file, "wb")
output.write(outputStream)
outputStream.close()
def pdf_get_no_pages(self, input_file):
"""Return number of pages in a pdf using pyPdf."""
try:
pdf_input = PdfFileReader(file(input_file, "rb"))
return pdf_input.getNumPages()
except:
return None
def splitpdf(self, input_file, output_dir):
"""Split pdf to single-page files using pyPdf"""
try:
input1 = PdfFileReader(file(input_file, "rb"))
except Exception, inst:
return inst
files = []
for page_number in range(0, input1.getNumPages()):
page = input1.getPage(page_number)
fname = os.path.basename(input_file).replace('.pdf', '_%s.pdf' % (page_number + 1))
fpath = os.path.join(output_dir, fname)
files.append(fpath)
output = PdfFileWriter()
output.addPage(page)
output_stream = file(fpath, "wb")
output.write(output_stream)
return 'saved %s' % files
class CommandTests(unittest.TestCase):
def setUp(self):
pass
if __name__ == '__main__':
unittest.main() |
runtime/deps/libuv: change where it looks for 'gyp'
Instead of 'build/gyp' use '../gyp'. We could have used a symbolic
link but that doesn't work so well on Windows.
|
import itertools, re, sys
from django.conf import urls
from django.core import exceptions
from django.db.models import base as models_base
from django.db.models.sql import constants
from django.utils import datastructures
from tastypie import bundle as tastypie_bundle, exceptions as tastypie_exceptions, fields as tastypie_fields, http, resources, utils
import mongoengine
from mongoengine import queryset
import bson
from tastypie_mongoengine import fields
# When Tastypie accesses query terms used by QuerySet it assumes the interface of Django ORM. We use a mock Query object to provide the same interface and return query terms by MongoEngine. MongoEngine code might not expose these query terms, so we fallback to hard-coded values.
getattr(queryset, 'QUERY_TERMS_ALL', ('ne', 'gt', 'gte', 'lt', 'lte', 'in', 'nin', 'mod', 'all', 'size', 'exists', 'not', 'within_distance', 'within_spherical_distance', 'within_box', 'within_polygon', 'near', 'near_sphere','contains', 'icontains', 'startswith', 'istartswith', 'endswith', 'iendswith', 'exact', 'iexact', 'match',))
class Query(object):
query_terms = dict([(query_term, None) for query_term in queryset.QUERY_TERMS_ALL])
queryset.QuerySet.query = Query()
CONTENT_TYPE_RE = re.compile('.*; type=([\w\d-]+);?')
class NOT_HYDRATED:
pass
class ListQuerySet(datastructures.SortedDict):
def _process_filter_value(self, value):
# Sometimes value is passed as a list of one value
# (if filter was converted from QueryDict, for example)
if isinstance(value, (list, tuple)):
assert len(value) == 1
return value[0]
else:
return value
def filter(self, **kwargs):
result = self
# pk optimization
if 'pk' in kwargs:
pk = self._process_filter_value(kwargs.pop('pk'))
if pk in result:
result = ListQuerySet([(pk, result[pk])])
# Sometimes None is passed as a pk to not filter by pk
elif pk is not None:
result = ListQuerySet()
for field, value in kwargs.iteritems():
value = self._process_filter_value(value)
if constants.LOOKUP_SEP in field:
raise tastypie_exceptions.InvalidFilterError("Unsupported filter: (%s, %s)" % (field, value))
try:
result = ListQuerySet([(obj.pk, obj) for obj in result.itervalues() if getattr(obj, field) == value])
except AttributeError, e:
raise tastypie_exceptions.InvalidFilterError(e)
return result
def attrgetter(self, attr):
def g(obj):
return self.resolve_attr(obj, attr)
return g
def resolve_attr(self, obj, attr):
for name in attr.split(constants.LOOKUP_SEP):
while isinstance(obj, list):
# Try to be a bit similar to MongoDB
for o in obj:
if hasattr(o, name):
obj = o
break
else:
obj = obj[0]
obj = getattr(obj, name)
return obj
def order_by(self, *field_names):
if not len(field_names):
return self
result = self
for field in reversed(field_names):
if field.startswith('-'):
reverse = True
field = field[1:]
else:
reverse = False
try:
result = [(obj.pk, obj) for obj in sorted(result, key=self.attrgetter(field), reverse=reverse)]
except (AttributeError, IndexError), e:
raise tastypie_exceptions.InvalidSortError(e)
return ListQuerySet(result)
def __iter__(self):
return self.itervalues()
def __getitem__(self, key):
# Tastypie access object_list[0], so we pretend to be
# a list here (order is same as our iteration order)
if isinstance(key, (int, long)):
return itertools.islice(self, key, key+1).next()
# Tastypie also access sliced object_list in paginator
elif isinstance(key, slice):
return itertools.islice(self, key.start, key.stop, key.step)
else:
return super(ListQuerySet, self).__getitem__(key)
# Adapted from PEP 257
def trim(docstring):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxint
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxint:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return the first paragraph as a single string:
return '\n'.join(trimmed).split('\n\n')[0]
class MongoEngineModelDeclarativeMetaclass(resources.ModelDeclarativeMetaclass):
"""
This class has the same functionality as its supper ``ModelDeclarativeMetaclass``.
Only thing it does differently is how it sets ``object_class`` and ``queryset`` attributes.
This is an internal class and is not used by the end user of tastypie_mongoengine.
"""
def __new__(self, name, bases, attrs):
meta = attrs.get('Meta')
if meta:
if hasattr(meta, 'queryset') and not hasattr(meta, 'object_class'):
setattr(meta, 'object_class', meta.queryset._document)
if hasattr(meta, 'object_class') and not hasattr(meta, 'queryset'):
if hasattr(meta.object_class, 'objects'):
setattr(meta, 'queryset', meta.object_class.objects.all())
new_class = super(resources.ModelDeclarativeMetaclass, self).__new__(self, name, bases, attrs)
include_fields = getattr(new_class._meta, 'fields', [])
excludes = getattr(new_class._meta, 'excludes', [])
field_names = new_class.base_fields.keys()
for field_name in field_names:
if field_name == 'resource_uri':
if hasattr(new_class, '_parent'):
if new_class._parent._meta.object_class and issubclass(new_class._parent._meta.object_class, mongoengine.EmbeddedDocument):
# TODO: We do not support yet nested resources
# If parent is embedded document, then also this one do not have its own resource_uri
del(new_class.base_fields[field_name])
elif new_class._meta.object_class and issubclass(new_class._meta.object_class, mongoengine.EmbeddedDocument):
# Embedded documents which are not in lists (do not have _parent) do not have their own resource_uri
del(new_class.base_fields[field_name])
if field_name in new_class.declared_fields:
continue
if len(include_fields) and not field_name in include_fields:
del(new_class.base_fields[field_name])
if len(excludes) and field_name in excludes:
del(new_class.base_fields[field_name])
# Add in the new fields
new_class.base_fields.update(new_class.get_fields(include_fields, excludes))
if getattr(new_class._meta, 'include_absolute_url', True):
if not 'absolute_url' in new_class.base_fields:
new_class.base_fields['absolute_url'] = tastypie_fields.CharField(attribute='get_absolute_url', readonly=True)
elif 'absolute_url' in new_class.base_fields and not 'absolute_url' in attrs:
del(new_class.base_fields['absolute_url'])
type_map = getattr(new_class._meta, 'polymorphic', {})
if type_map and getattr(new_class._meta, 'include_resource_type', True):
if not 'resource_type' in new_class.base_fields:
new_class.base_fields['resource_type'] = tastypie_fields.CharField(readonly=True)
elif 'resource_type' in new_class.base_fields and not 'resource_type' in attrs:
del(new_class.base_fields['resource_type'])
seen_types = set()
for typ, resource in type_map.iteritems():
if resource == 'self':
type_map[typ] = new_class
break
# In the code for polymorphic resources we are assuming
# that document classes are not duplicated among used resources
# (that each resource is linked to its own document class)
# So we are checking this assumption here
if type_map[typ]._meta.object_class in seen_types:
raise exceptions.ImproperlyConfigured("Used polymorphic resources should each use its own document class.")
else:
seen_types.add(type_map[typ]._meta.object_class)
return new_class
class MongoEngineResource(resources.ModelResource):
"""
Adaptation of ``ModelResource`` to MongoEngine.
"""
__metaclass__ = MongoEngineModelDeclarativeMetaclass
def dispatch_subresource(self, request, subresource_name, **kwargs):
field = self.fields[subresource_name]
resource = field.to_class(self._meta.api_name)
return resource.dispatch(request=request, **kwargs)
def base_urls(self):
base = super(MongoEngineResource, self).base_urls()
embedded_urls = []
embedded = ((name, obj) for name, obj in self.fields.iteritems() if isinstance(obj, fields.EmbeddedListField))
for name, obj in embedded:
embedded_urls.extend((
urls.url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w-]*)/(?P<subresource_name>%s)%s$" % (self._meta.resource_name, name, utils.trailing_slash()),
self.wrap_view('dispatch_subresource'),
{'request_type': 'list'},
name='api_dispatch_subresource_list',
),
urls.url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w-]*)/(?P<subresource_name>%s)/(?P<index>\d+)%s$" % (self._meta.resource_name, name, utils.trailing_slash()),
self.wrap_view('dispatch_subresource'),
{'request_type': 'detail'},
name='api_dispatch_subresource_detail',
),
))
return embedded_urls + base
def get_object_list(self, request):
"""
An ORM-specific implementation of ``get_object_list``.
Returns a queryset that may have been limited by other overrides.
"""
return self._meta.queryset.clone()
def _get_object_type(self, request):
match = CONTENT_TYPE_RE.match(request.META.get('CONTENT_TYPE', ''))
if match:
return match.group(1)
elif 'type' in request.GET:
return request.GET.get('type')
else:
return None
def _wrap_polymorphic(self, resource, fun):
object_class = self._meta.object_class
qs = self._meta.queryset
base_fields = self.base_fields
fields = self.fields
try:
self._meta.object_class = resource._meta.object_class
self._meta.queryset = resource._meta.queryset
self.base_fields = resource.base_fields.copy()
self.fields = resource.fields.copy()
if getattr(self._meta, 'include_resource_type', True):
self.base_fields['resource_type'] = base_fields['resource_type']
self.fields['resource_type'] = fields['resource_type']
return fun()
finally:
self._meta.object_class = object_class
self._meta.queryset = qs
self.base_fields = base_fields
self.fields = fields
def _wrap_request(self, request, fun):
type_map = getattr(self._meta, 'polymorphic', {})
if not type_map:
return fun()
object_type = self._get_object_type(request)
if not object_type:
# Polymorphic resources are enabled, but
# nothing is passed, so set it to a default
try:
object_type = self._get_type_from_class(type_map, self._meta.object_class)
except KeyError:
raise tastypie_exceptions.BadRequest("Invalid object type.")
if object_type not in type_map:
raise tastypie_exceptions.BadRequest("Invalid object type.")
resource = type_map[object_type](self._meta.api_name)
# Optimization
if resource._meta.object_class is self._meta.object_class:
return fun()
return self._wrap_polymorphic(resource, fun)
def dispatch(self, request_type, request, **kwargs):
# We process specially only requests with payload
if not request.body:
assert request.method.lower() not in ('put', 'post', 'patch'), request.method
return super(MongoEngineResource, self).dispatch(request_type, request, **kwargs)
assert request.method.lower() in ('put', 'post', 'patch'), request.method
return self._wrap_request(request, lambda: super(MongoEngineResource, self).dispatch(request_type, request, **kwargs))
def get_schema(self, request, **kwargs):
return self._wrap_request(request, lambda: super(MongoEngineResource, self).get_schema(request, **kwargs))
def _get_resource_from_class(self, type_map, cls):
for resource in type_map.itervalues():
if resource._meta.object_class is cls:
return resource
raise KeyError(cls)
def _get_type_from_class(self, type_map, cls):
# As we are overriding self._meta.object_class we have to make sure
# that we do not miss real match, so if self._meta.object_class
# matches, we still check other items, otherwise we return immediately
res = None
for typ, resource in type_map.iteritems():
if resource._meta.object_class is cls:
if resource._meta.object_class is self._meta.object_class:
res = typ
else:
return typ
if res is not None:
return res
else:
raise KeyError(cls)
def dehydrate_resource_type(self, bundle):
type_map = getattr(self._meta, 'polymorphic', {})
if not type_map:
return None
return self._get_type_from_class(type_map, bundle.obj.__class__)
def full_dehydrate(self, bundle):
type_map = getattr(self._meta, 'polymorphic', {})
if not type_map:
return super(MongoEngineResource, self).full_dehydrate(bundle)
# Optimization
if self._meta.object_class is bundle.obj.__class__:
return super(MongoEngineResource, self).full_dehydrate(bundle)
resource = self._get_resource_from_class(type_map, bundle.obj.__class__)(self._meta.api_name)
return self._wrap_polymorphic(resource, lambda: super(MongoEngineResource, self).full_dehydrate(bundle))
def full_hydrate(self, bundle):
# When updating objects, we want to force only updates of the same type, and object
# should be completely replaced if type is changed, so we throw and exception here
# to direct program logic flow (it is cached and replace instead of update is tried)
if bundle.obj and self._meta.object_class is not bundle.obj.__class__:
raise tastypie_exceptions.NotFound("A document instance matching the provided arguments could not be found.")
bundle = super(MongoEngineResource, self).full_hydrate(bundle)
# We redo check for required fields as Tastypie is not
# reliable as it does checks in an inconsistent way
# (https://github.com/toastdriven/django-tastypie/issues/491)
for field_object in self.fields.itervalues():
if field_object.readonly:
continue
if not field_object.attribute:
continue
value = NOT_HYDRATED
# Tastypie also skips setting value if it is None, but this means
# updates to None are ignored: this is not good as it hides invalid
# PUT/PATCH REST requests (setting value to None which should fail
# validation (field required) is simply ignored and value is left
# as it is)
# (https://github.com/toastdriven/django-tastypie/issues/492)
# We hydrate field again only if existing value is not None
if getattr(bundle.obj, field_object.attribute, None) is not None:
# Tastypie also ignores missing fields in PUT,
# so we check for missing field here
# (https://github.com/toastdriven/django-tastypie/issues/496)
if field_object.instance_name not in bundle.data:
if field_object._default is not tastypie_fields.NOT_PROVIDED:
if callable(field_object.default):
value = field_object.default()
else:
value = field_object.default
else:
value = None
else:
value = field_object.hydrate(bundle)
if value is None:
# This does not really set None in a way that calling
# getattr on bundle.obj would return None later on
# This is how MongoEngine is implemented
# (https://github.com/hmarr/mongoengine/issues/505)
setattr(bundle.obj, field_object.attribute, None)
if field_object.blank or field_object.null:
continue
# We are just trying to fix Tastypie here, for other "null" values
# like [] and {} we leave to validate bellow to catch them
if getattr(bundle.obj, field_object.attribute, None) is None or value is None: # We also have to check value, read comment above
raise tastypie_exceptions.ApiFieldError("The '%s' field has no data and doesn't allow a default or null value." % field_object.instance_name)
# We validate MongoEngine object here so that possible exception
# is thrown before going to MongoEngine layer, wrapped in
# Django exception so that it is handled properly
# is_valid method is too early as bundle.obj is not yet ready then
try:
# Validation fails for unsaved related resources, so
# we fake pk here temporary, for validation code to
# assume resource is saved
pk = getattr(bundle.obj, 'pk', None)
try:
if pk is None:
bundle.obj.pk = bson.ObjectId()
bundle.obj.validate()
finally:
if pk is None:
bundle.obj.pk = pk
except mongoengine.ValidationError, e:
raise exceptions.ValidationError(e.message)
return bundle
def build_schema(self):
data = super(MongoEngineResource, self).build_schema()
for field_name, field_object in self.fields.items():
# We process ListField specially here (and not use field's
# build_schema) so that Tastypie's ListField can be used
if isinstance(field_object, tastypie_fields.ListField):
if field_object.field:
data['fields'][field_name]['content'] = {}
field_type = field_object.field.__class__.__name__.lower()
if field_type.endswith('field'):
field_type = field_type[:-5]
data['fields'][field_name]['content']['type'] = field_type
if field_object.field.__doc__:
data['fields'][field_name]['content']['help_text'] = trim(field_object.field.__doc__)
if hasattr(field_object, 'build_schema'):
data['fields'][field_name].update(field_object.build_schema())
type_map = getattr(self._meta, 'polymorphic', {})
if not type_map:
return data
data.update({
'resource_types': type_map.keys(),
})
return data
def obj_get(self, request=None, **kwargs):
# MongoEngine exceptions are separate from Django exceptions, we combine them here
try:
return super(MongoEngineResource, self).obj_get(request, **kwargs)
except self._meta.object_class.DoesNotExist, e:
exp = models_base.subclass_exception('DoesNotExist', (self._meta.object_class.DoesNotExist, exceptions.ObjectDoesNotExist), self._meta.object_class.DoesNotExist.__module__)
raise exp(*e.args)
except queryset.DoesNotExist, e:
exp = models_base.subclass_exception('DoesNotExist', (queryset.DoesNotExist, exceptions.ObjectDoesNotExist), queryset.DoesNotExist.__module__)
raise exp(*e.args)
except self._meta.object_class.MultipleObjectsReturned, e:
exp = models_base.subclass_exception('MultipleObjectsReturned', (self._meta.object_class.MultipleObjectsReturned, exceptions.MultipleObjectsReturned), self._meta.object_class.MultipleObjectsReturned.__module__)
raise exp(*e.args)
except queryset.MultipleObjectsReturned, e:
exp = models_base.subclass_exception('MultipleObjectsReturned', (queryset.MultipleObjectsReturned, exceptions.MultipleObjectsReturned), queryset.MultipleObjectsReturned.__module__)
raise exp(*e.args)
except mongoengine.ValidationError, e:
exp = models_base.subclass_exception('DoesNotExist', (queryset.DoesNotExist, exceptions.ObjectDoesNotExist), queryset.DoesNotExist.__module__)
raise exp(*e.args)
def obj_update(self, bundle, request=None, **kwargs):
if not bundle.obj or not getattr(bundle.obj, 'pk', None):
try:
bundle.obj = self.obj_get(request, **kwargs)
except (queryset.DoesNotExist, exceptions.ObjectDoesNotExist):
raise tastypie_exceptions.NotFound("A document instance matching the provided arguments could not be found.")
bundle = self.full_hydrate(bundle)
self.save_related(bundle)
bundle.obj.save()
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def obj_delete(self, request=None, **kwargs):
# MongoEngine exceptions are separate from Django exceptions and Tastypie
# expects Django exceptions, so we catch it here ourselves and raise NotFound
try:
return super(MongoEngineResource, self).obj_delete(request, **kwargs)
except queryset.DoesNotExist:
raise tastypie_exceptions.NotFound("A document instance matching the provided arguments could not be found.")
@classmethod
def api_field_from_mongo_field(cls, f, default=tastypie_fields.CharField):
"""
Returns the field type that would likely be associated with each
MongoEngine type.
"""
result = default
if isinstance(f, (mongoengine.ComplexDateTimeField, mongoengine.DateTimeField)):
result = tastypie_fields.DateTimeField
elif isinstance(f, mongoengine.BooleanField):
result = tastypie_fields.BooleanField
elif isinstance(f, mongoengine.FloatField):
result = tastypie_fields.FloatField
elif isinstance(f, mongoengine.DecimalField):
result = tastypie_fields.DecimalField
elif isinstance(f, mongoengine.IntField):
result = tastypie_fields.IntegerField
elif isinstance(f, (mongoengine.FileField, mongoengine.BinaryField)):
result = tastypie_fields.FileField
elif isinstance(f, mongoengine.DictField):
result = tastypie_fields.DictField
elif isinstance(f, mongoengine.ListField):
result = tastypie_fields.ListField
elif isinstance(f, mongoengine.GeoPointField):
result = tastypie_fields.ListField
elif isinstance(f, mongoengine.ObjectIdField):
result = fields.ObjectId
return result
@classmethod
def get_fields(cls, fields=None, excludes=None):
"""
Given any explicit fields to include and fields to exclude, add
additional fields based on the associated document.
"""
final_fields = {}
fields = fields or []
excludes = excludes or []
if not cls._meta.object_class:
return final_fields
for name, f in cls._meta.object_class._fields.iteritems():
# If the field name is already present, skip
if name in cls.base_fields:
continue
# If field is not present in explicit field listing, skip
if fields and name not in fields:
continue
# If field is in exclude list, skip
if excludes and name in excludes:
continue
# TODO: Might need it in the future
#if cls.should_skip_field(f):
# continue
api_field_class = cls.api_field_from_mongo_field(f)
kwargs = {
'attribute': name,
'unique': f.unique,
'null': not f.required,
'help_text': f.help_text,
}
# If field is not required, it does not matter if set default value,
# so we do
if not f.required:
kwargs['default'] = f.default
else:
# MongoEngine does not really differ between user-specified default
# and its default, so we try to guess
if isinstance(f, mongoengine.ListField):
if not callable(f.default) or f.default() != []: # If not MongoEngine's default
kwargs['default'] = f.default
elif isinstance(f, mongoengine.DictField):
if not callable(f.default) or f.default() != {}: # If not MongoEngine's default
kwargs['default'] = f.default
else:
if f.default is not None: # If not MongoEngine's default
kwargs['default'] = f.default
final_fields[name] = api_field_class(**kwargs)
final_fields[name].instance_name = name
# We store MongoEngine field so that schema output can show
# to which content the list is limited to (if any)
if isinstance(f, mongoengine.ListField):
final_fields[name].field = f.field
return final_fields
class MongoEngineListResource(MongoEngineResource):
"""
A MongoEngine resource used in conjunction with EmbeddedListField.
"""
def __init__(self, api_name=None):
super(MongoEngineListResource, self).__init__(api_name)
self.instance = None
self.parent = self._parent(api_name)
def _safe_get(self, request, **kwargs):
filters = self.remove_api_resource_names(kwargs)
try:
return self.parent.cached_obj_get(request=request, **filters)
except (queryset.DoesNotExist, exceptions.ObjectDoesNotExist):
raise tastypie_exceptions.ImmediateHttpResponse(response=http.HttpNotFound())
def dispatch(self, request_type, request, **kwargs):
index = None
if 'index' in kwargs:
index = kwargs.pop('index')
self.instance = self._safe_get(request, **kwargs)
# We use pk as index from now on
kwargs['pk'] = index
return super(MongoEngineListResource, self).dispatch(request_type, request, **kwargs)
def remove_api_resource_names(self, url_dict):
kwargs_subset = super(MongoEngineListResource, self).remove_api_resource_names(url_dict)
for key in ['subresource_name']:
try:
del(kwargs_subset[key])
except KeyError:
pass
return kwargs_subset
def get_object_list(self, request):
if not self.instance:
return ListQuerySet()
def add_index(index, obj):
obj.pk = unicode(index)
return obj
return ListQuerySet([(unicode(index), add_index(index, obj)) for index, obj in enumerate(getattr(self.instance, self.attribute))])
def obj_create(self, bundle, request=None, **kwargs):
bundle.obj = self._meta.object_class()
for key, value in kwargs.items():
setattr(bundle.obj, key, value)
bundle = self.full_hydrate(bundle)
object_list = getattr(self.instance, self.attribute)
object_list.append(bundle.obj)
bundle.obj.pk = unicode(len(object_list) - 1)
self.save_related(bundle)
self.instance.save()
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def obj_update(self, bundle, request=None, **kwargs):
if not bundle.obj or not getattr(bundle.obj, 'pk', None):
try:
bundle.obj = self.obj_get(request, **kwargs)
except (queryset.DoesNotExist, exceptions.ObjectDoesNotExist):
raise tastypie_exceptions.NotFound("A document instance matching the provided arguments could not be found.")
bundle = self.full_hydrate(bundle)
object_list = getattr(self.instance, self.attribute)
object_list[int(bundle.obj.pk)] = bundle.obj
self.save_related(bundle)
self.instance.save()
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def obj_delete(self, request=None, **kwargs):
obj = kwargs.pop('_obj', None)
if not getattr(obj, 'pk', None):
try:
obj = self.obj_get(request, **kwargs)
except (queryset.DoesNotExist, exceptions.ObjectDoesNotExist):
raise exceptions.NotFound("A document instance matching the provided arguments could not be found.")
getattr(self.instance, self.attribute).pop(int(obj.pk))
self.instance.save()
def get_resource_uri(self, bundle_or_obj):
if isinstance(bundle_or_obj, tastypie_bundle.Bundle):
obj = bundle_or_obj.obj
else:
obj = bundle_or_obj
kwargs = {
'resource_name': self.parent._meta.resource_name,
'subresource_name': self.attribute,
'index': obj.pk,
}
if hasattr(obj, 'parent'):
# pk could not exist in the case of nested resources, but we should not come here in this
# case as we should remove resource_uri from fields in MongoEngineModelDeclarativeMetaclass
# TODO: Support nested resources
kwargs['pk'] = obj.parent.pk
else:
kwargs['pk'] = self.instance.pk
if self._meta.api_name is not None:
kwargs['api_name'] = self._meta.api_name
return self._build_reverse_url('api_dispatch_subresource_detail', kwargs=kwargs)
Splitted comment into multilines, removed trailing comma.
import itertools, re, sys
from django.conf import urls
from django.core import exceptions
from django.db.models import base as models_base
from django.db.models.sql import constants
from django.utils import datastructures
from tastypie import bundle as tastypie_bundle, exceptions as tastypie_exceptions, fields as tastypie_fields, http, resources, utils
import mongoengine
from mongoengine import queryset
import bson
from tastypie_mongoengine import fields
# When Tastypie accesses query terms used by QuerySet it assumes the interface of Django ORM.
# We use a mock Query object to provide the same interface and return query terms by MongoEngine.
# MongoEngine code might not expose these query terms, so we fallback to hard-coded values.
getattr(queryset, 'QUERY_TERMS_ALL', ('ne', 'gt', 'gte', 'lt', 'lte', 'in', 'nin', 'mod', 'all', 'size', 'exists', 'not', 'within_distance', 'within_spherical_distance', 'within_box', 'within_polygon', 'near', 'near_sphere','contains', 'icontains', 'startswith', 'istartswith', 'endswith', 'iendswith', 'exact', 'iexact', 'match'))
class Query(object):
query_terms = dict([(query_term, None) for query_term in queryset.QUERY_TERMS_ALL])
queryset.QuerySet.query = Query()
CONTENT_TYPE_RE = re.compile('.*; type=([\w\d-]+);?')
class NOT_HYDRATED:
pass
class ListQuerySet(datastructures.SortedDict):
def _process_filter_value(self, value):
# Sometimes value is passed as a list of one value
# (if filter was converted from QueryDict, for example)
if isinstance(value, (list, tuple)):
assert len(value) == 1
return value[0]
else:
return value
def filter(self, **kwargs):
result = self
# pk optimization
if 'pk' in kwargs:
pk = self._process_filter_value(kwargs.pop('pk'))
if pk in result:
result = ListQuerySet([(pk, result[pk])])
# Sometimes None is passed as a pk to not filter by pk
elif pk is not None:
result = ListQuerySet()
for field, value in kwargs.iteritems():
value = self._process_filter_value(value)
if constants.LOOKUP_SEP in field:
raise tastypie_exceptions.InvalidFilterError("Unsupported filter: (%s, %s)" % (field, value))
try:
result = ListQuerySet([(obj.pk, obj) for obj in result.itervalues() if getattr(obj, field) == value])
except AttributeError, e:
raise tastypie_exceptions.InvalidFilterError(e)
return result
def attrgetter(self, attr):
def g(obj):
return self.resolve_attr(obj, attr)
return g
def resolve_attr(self, obj, attr):
for name in attr.split(constants.LOOKUP_SEP):
while isinstance(obj, list):
# Try to be a bit similar to MongoDB
for o in obj:
if hasattr(o, name):
obj = o
break
else:
obj = obj[0]
obj = getattr(obj, name)
return obj
def order_by(self, *field_names):
if not len(field_names):
return self
result = self
for field in reversed(field_names):
if field.startswith('-'):
reverse = True
field = field[1:]
else:
reverse = False
try:
result = [(obj.pk, obj) for obj in sorted(result, key=self.attrgetter(field), reverse=reverse)]
except (AttributeError, IndexError), e:
raise tastypie_exceptions.InvalidSortError(e)
return ListQuerySet(result)
def __iter__(self):
return self.itervalues()
def __getitem__(self, key):
# Tastypie access object_list[0], so we pretend to be
# a list here (order is same as our iteration order)
if isinstance(key, (int, long)):
return itertools.islice(self, key, key+1).next()
# Tastypie also access sliced object_list in paginator
elif isinstance(key, slice):
return itertools.islice(self, key.start, key.stop, key.step)
else:
return super(ListQuerySet, self).__getitem__(key)
# Adapted from PEP 257
def trim(docstring):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxint
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxint:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return the first paragraph as a single string:
return '\n'.join(trimmed).split('\n\n')[0]
class MongoEngineModelDeclarativeMetaclass(resources.ModelDeclarativeMetaclass):
"""
This class has the same functionality as its supper ``ModelDeclarativeMetaclass``.
Only thing it does differently is how it sets ``object_class`` and ``queryset`` attributes.
This is an internal class and is not used by the end user of tastypie_mongoengine.
"""
def __new__(self, name, bases, attrs):
meta = attrs.get('Meta')
if meta:
if hasattr(meta, 'queryset') and not hasattr(meta, 'object_class'):
setattr(meta, 'object_class', meta.queryset._document)
if hasattr(meta, 'object_class') and not hasattr(meta, 'queryset'):
if hasattr(meta.object_class, 'objects'):
setattr(meta, 'queryset', meta.object_class.objects.all())
new_class = super(resources.ModelDeclarativeMetaclass, self).__new__(self, name, bases, attrs)
include_fields = getattr(new_class._meta, 'fields', [])
excludes = getattr(new_class._meta, 'excludes', [])
field_names = new_class.base_fields.keys()
for field_name in field_names:
if field_name == 'resource_uri':
if hasattr(new_class, '_parent'):
if new_class._parent._meta.object_class and issubclass(new_class._parent._meta.object_class, mongoengine.EmbeddedDocument):
# TODO: We do not support yet nested resources
# If parent is embedded document, then also this one do not have its own resource_uri
del(new_class.base_fields[field_name])
elif new_class._meta.object_class and issubclass(new_class._meta.object_class, mongoengine.EmbeddedDocument):
# Embedded documents which are not in lists (do not have _parent) do not have their own resource_uri
del(new_class.base_fields[field_name])
if field_name in new_class.declared_fields:
continue
if len(include_fields) and not field_name in include_fields:
del(new_class.base_fields[field_name])
if len(excludes) and field_name in excludes:
del(new_class.base_fields[field_name])
# Add in the new fields
new_class.base_fields.update(new_class.get_fields(include_fields, excludes))
if getattr(new_class._meta, 'include_absolute_url', True):
if not 'absolute_url' in new_class.base_fields:
new_class.base_fields['absolute_url'] = tastypie_fields.CharField(attribute='get_absolute_url', readonly=True)
elif 'absolute_url' in new_class.base_fields and not 'absolute_url' in attrs:
del(new_class.base_fields['absolute_url'])
type_map = getattr(new_class._meta, 'polymorphic', {})
if type_map and getattr(new_class._meta, 'include_resource_type', True):
if not 'resource_type' in new_class.base_fields:
new_class.base_fields['resource_type'] = tastypie_fields.CharField(readonly=True)
elif 'resource_type' in new_class.base_fields and not 'resource_type' in attrs:
del(new_class.base_fields['resource_type'])
seen_types = set()
for typ, resource in type_map.iteritems():
if resource == 'self':
type_map[typ] = new_class
break
# In the code for polymorphic resources we are assuming
# that document classes are not duplicated among used resources
# (that each resource is linked to its own document class)
# So we are checking this assumption here
if type_map[typ]._meta.object_class in seen_types:
raise exceptions.ImproperlyConfigured("Used polymorphic resources should each use its own document class.")
else:
seen_types.add(type_map[typ]._meta.object_class)
return new_class
class MongoEngineResource(resources.ModelResource):
"""
Adaptation of ``ModelResource`` to MongoEngine.
"""
__metaclass__ = MongoEngineModelDeclarativeMetaclass
def dispatch_subresource(self, request, subresource_name, **kwargs):
field = self.fields[subresource_name]
resource = field.to_class(self._meta.api_name)
return resource.dispatch(request=request, **kwargs)
def base_urls(self):
base = super(MongoEngineResource, self).base_urls()
embedded_urls = []
embedded = ((name, obj) for name, obj in self.fields.iteritems() if isinstance(obj, fields.EmbeddedListField))
for name, obj in embedded:
embedded_urls.extend((
urls.url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w-]*)/(?P<subresource_name>%s)%s$" % (self._meta.resource_name, name, utils.trailing_slash()),
self.wrap_view('dispatch_subresource'),
{'request_type': 'list'},
name='api_dispatch_subresource_list',
),
urls.url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w-]*)/(?P<subresource_name>%s)/(?P<index>\d+)%s$" % (self._meta.resource_name, name, utils.trailing_slash()),
self.wrap_view('dispatch_subresource'),
{'request_type': 'detail'},
name='api_dispatch_subresource_detail',
),
))
return embedded_urls + base
def get_object_list(self, request):
"""
An ORM-specific implementation of ``get_object_list``.
Returns a queryset that may have been limited by other overrides.
"""
return self._meta.queryset.clone()
def _get_object_type(self, request):
match = CONTENT_TYPE_RE.match(request.META.get('CONTENT_TYPE', ''))
if match:
return match.group(1)
elif 'type' in request.GET:
return request.GET.get('type')
else:
return None
def _wrap_polymorphic(self, resource, fun):
object_class = self._meta.object_class
qs = self._meta.queryset
base_fields = self.base_fields
fields = self.fields
try:
self._meta.object_class = resource._meta.object_class
self._meta.queryset = resource._meta.queryset
self.base_fields = resource.base_fields.copy()
self.fields = resource.fields.copy()
if getattr(self._meta, 'include_resource_type', True):
self.base_fields['resource_type'] = base_fields['resource_type']
self.fields['resource_type'] = fields['resource_type']
return fun()
finally:
self._meta.object_class = object_class
self._meta.queryset = qs
self.base_fields = base_fields
self.fields = fields
def _wrap_request(self, request, fun):
type_map = getattr(self._meta, 'polymorphic', {})
if not type_map:
return fun()
object_type = self._get_object_type(request)
if not object_type:
# Polymorphic resources are enabled, but
# nothing is passed, so set it to a default
try:
object_type = self._get_type_from_class(type_map, self._meta.object_class)
except KeyError:
raise tastypie_exceptions.BadRequest("Invalid object type.")
if object_type not in type_map:
raise tastypie_exceptions.BadRequest("Invalid object type.")
resource = type_map[object_type](self._meta.api_name)
# Optimization
if resource._meta.object_class is self._meta.object_class:
return fun()
return self._wrap_polymorphic(resource, fun)
def dispatch(self, request_type, request, **kwargs):
# We process specially only requests with payload
if not request.body:
assert request.method.lower() not in ('put', 'post', 'patch'), request.method
return super(MongoEngineResource, self).dispatch(request_type, request, **kwargs)
assert request.method.lower() in ('put', 'post', 'patch'), request.method
return self._wrap_request(request, lambda: super(MongoEngineResource, self).dispatch(request_type, request, **kwargs))
def get_schema(self, request, **kwargs):
return self._wrap_request(request, lambda: super(MongoEngineResource, self).get_schema(request, **kwargs))
def _get_resource_from_class(self, type_map, cls):
for resource in type_map.itervalues():
if resource._meta.object_class is cls:
return resource
raise KeyError(cls)
def _get_type_from_class(self, type_map, cls):
# As we are overriding self._meta.object_class we have to make sure
# that we do not miss real match, so if self._meta.object_class
# matches, we still check other items, otherwise we return immediately
res = None
for typ, resource in type_map.iteritems():
if resource._meta.object_class is cls:
if resource._meta.object_class is self._meta.object_class:
res = typ
else:
return typ
if res is not None:
return res
else:
raise KeyError(cls)
def dehydrate_resource_type(self, bundle):
type_map = getattr(self._meta, 'polymorphic', {})
if not type_map:
return None
return self._get_type_from_class(type_map, bundle.obj.__class__)
def full_dehydrate(self, bundle):
type_map = getattr(self._meta, 'polymorphic', {})
if not type_map:
return super(MongoEngineResource, self).full_dehydrate(bundle)
# Optimization
if self._meta.object_class is bundle.obj.__class__:
return super(MongoEngineResource, self).full_dehydrate(bundle)
resource = self._get_resource_from_class(type_map, bundle.obj.__class__)(self._meta.api_name)
return self._wrap_polymorphic(resource, lambda: super(MongoEngineResource, self).full_dehydrate(bundle))
def full_hydrate(self, bundle):
# When updating objects, we want to force only updates of the same type, and object
# should be completely replaced if type is changed, so we throw and exception here
# to direct program logic flow (it is cached and replace instead of update is tried)
if bundle.obj and self._meta.object_class is not bundle.obj.__class__:
raise tastypie_exceptions.NotFound("A document instance matching the provided arguments could not be found.")
bundle = super(MongoEngineResource, self).full_hydrate(bundle)
# We redo check for required fields as Tastypie is not
# reliable as it does checks in an inconsistent way
# (https://github.com/toastdriven/django-tastypie/issues/491)
for field_object in self.fields.itervalues():
if field_object.readonly:
continue
if not field_object.attribute:
continue
value = NOT_HYDRATED
# Tastypie also skips setting value if it is None, but this means
# updates to None are ignored: this is not good as it hides invalid
# PUT/PATCH REST requests (setting value to None which should fail
# validation (field required) is simply ignored and value is left
# as it is)
# (https://github.com/toastdriven/django-tastypie/issues/492)
# We hydrate field again only if existing value is not None
if getattr(bundle.obj, field_object.attribute, None) is not None:
# Tastypie also ignores missing fields in PUT,
# so we check for missing field here
# (https://github.com/toastdriven/django-tastypie/issues/496)
if field_object.instance_name not in bundle.data:
if field_object._default is not tastypie_fields.NOT_PROVIDED:
if callable(field_object.default):
value = field_object.default()
else:
value = field_object.default
else:
value = None
else:
value = field_object.hydrate(bundle)
if value is None:
# This does not really set None in a way that calling
# getattr on bundle.obj would return None later on
# This is how MongoEngine is implemented
# (https://github.com/hmarr/mongoengine/issues/505)
setattr(bundle.obj, field_object.attribute, None)
if field_object.blank or field_object.null:
continue
# We are just trying to fix Tastypie here, for other "null" values
# like [] and {} we leave to validate bellow to catch them
if getattr(bundle.obj, field_object.attribute, None) is None or value is None: # We also have to check value, read comment above
raise tastypie_exceptions.ApiFieldError("The '%s' field has no data and doesn't allow a default or null value." % field_object.instance_name)
# We validate MongoEngine object here so that possible exception
# is thrown before going to MongoEngine layer, wrapped in
# Django exception so that it is handled properly
# is_valid method is too early as bundle.obj is not yet ready then
try:
# Validation fails for unsaved related resources, so
# we fake pk here temporary, for validation code to
# assume resource is saved
pk = getattr(bundle.obj, 'pk', None)
try:
if pk is None:
bundle.obj.pk = bson.ObjectId()
bundle.obj.validate()
finally:
if pk is None:
bundle.obj.pk = pk
except mongoengine.ValidationError, e:
raise exceptions.ValidationError(e.message)
return bundle
def build_schema(self):
data = super(MongoEngineResource, self).build_schema()
for field_name, field_object in self.fields.items():
# We process ListField specially here (and not use field's
# build_schema) so that Tastypie's ListField can be used
if isinstance(field_object, tastypie_fields.ListField):
if field_object.field:
data['fields'][field_name]['content'] = {}
field_type = field_object.field.__class__.__name__.lower()
if field_type.endswith('field'):
field_type = field_type[:-5]
data['fields'][field_name]['content']['type'] = field_type
if field_object.field.__doc__:
data['fields'][field_name]['content']['help_text'] = trim(field_object.field.__doc__)
if hasattr(field_object, 'build_schema'):
data['fields'][field_name].update(field_object.build_schema())
type_map = getattr(self._meta, 'polymorphic', {})
if not type_map:
return data
data.update({
'resource_types': type_map.keys(),
})
return data
def obj_get(self, request=None, **kwargs):
# MongoEngine exceptions are separate from Django exceptions, we combine them here
try:
return super(MongoEngineResource, self).obj_get(request, **kwargs)
except self._meta.object_class.DoesNotExist, e:
exp = models_base.subclass_exception('DoesNotExist', (self._meta.object_class.DoesNotExist, exceptions.ObjectDoesNotExist), self._meta.object_class.DoesNotExist.__module__)
raise exp(*e.args)
except queryset.DoesNotExist, e:
exp = models_base.subclass_exception('DoesNotExist', (queryset.DoesNotExist, exceptions.ObjectDoesNotExist), queryset.DoesNotExist.__module__)
raise exp(*e.args)
except self._meta.object_class.MultipleObjectsReturned, e:
exp = models_base.subclass_exception('MultipleObjectsReturned', (self._meta.object_class.MultipleObjectsReturned, exceptions.MultipleObjectsReturned), self._meta.object_class.MultipleObjectsReturned.__module__)
raise exp(*e.args)
except queryset.MultipleObjectsReturned, e:
exp = models_base.subclass_exception('MultipleObjectsReturned', (queryset.MultipleObjectsReturned, exceptions.MultipleObjectsReturned), queryset.MultipleObjectsReturned.__module__)
raise exp(*e.args)
except mongoengine.ValidationError, e:
exp = models_base.subclass_exception('DoesNotExist', (queryset.DoesNotExist, exceptions.ObjectDoesNotExist), queryset.DoesNotExist.__module__)
raise exp(*e.args)
def obj_update(self, bundle, request=None, **kwargs):
if not bundle.obj or not getattr(bundle.obj, 'pk', None):
try:
bundle.obj = self.obj_get(request, **kwargs)
except (queryset.DoesNotExist, exceptions.ObjectDoesNotExist):
raise tastypie_exceptions.NotFound("A document instance matching the provided arguments could not be found.")
bundle = self.full_hydrate(bundle)
self.save_related(bundle)
bundle.obj.save()
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def obj_delete(self, request=None, **kwargs):
# MongoEngine exceptions are separate from Django exceptions and Tastypie
# expects Django exceptions, so we catch it here ourselves and raise NotFound
try:
return super(MongoEngineResource, self).obj_delete(request, **kwargs)
except queryset.DoesNotExist:
raise tastypie_exceptions.NotFound("A document instance matching the provided arguments could not be found.")
@classmethod
def api_field_from_mongo_field(cls, f, default=tastypie_fields.CharField):
"""
Returns the field type that would likely be associated with each
MongoEngine type.
"""
result = default
if isinstance(f, (mongoengine.ComplexDateTimeField, mongoengine.DateTimeField)):
result = tastypie_fields.DateTimeField
elif isinstance(f, mongoengine.BooleanField):
result = tastypie_fields.BooleanField
elif isinstance(f, mongoengine.FloatField):
result = tastypie_fields.FloatField
elif isinstance(f, mongoengine.DecimalField):
result = tastypie_fields.DecimalField
elif isinstance(f, mongoengine.IntField):
result = tastypie_fields.IntegerField
elif isinstance(f, (mongoengine.FileField, mongoengine.BinaryField)):
result = tastypie_fields.FileField
elif isinstance(f, mongoengine.DictField):
result = tastypie_fields.DictField
elif isinstance(f, mongoengine.ListField):
result = tastypie_fields.ListField
elif isinstance(f, mongoengine.GeoPointField):
result = tastypie_fields.ListField
elif isinstance(f, mongoengine.ObjectIdField):
result = fields.ObjectId
return result
@classmethod
def get_fields(cls, fields=None, excludes=None):
"""
Given any explicit fields to include and fields to exclude, add
additional fields based on the associated document.
"""
final_fields = {}
fields = fields or []
excludes = excludes or []
if not cls._meta.object_class:
return final_fields
for name, f in cls._meta.object_class._fields.iteritems():
# If the field name is already present, skip
if name in cls.base_fields:
continue
# If field is not present in explicit field listing, skip
if fields and name not in fields:
continue
# If field is in exclude list, skip
if excludes and name in excludes:
continue
# TODO: Might need it in the future
#if cls.should_skip_field(f):
# continue
api_field_class = cls.api_field_from_mongo_field(f)
kwargs = {
'attribute': name,
'unique': f.unique,
'null': not f.required,
'help_text': f.help_text,
}
# If field is not required, it does not matter if set default value,
# so we do
if not f.required:
kwargs['default'] = f.default
else:
# MongoEngine does not really differ between user-specified default
# and its default, so we try to guess
if isinstance(f, mongoengine.ListField):
if not callable(f.default) or f.default() != []: # If not MongoEngine's default
kwargs['default'] = f.default
elif isinstance(f, mongoengine.DictField):
if not callable(f.default) or f.default() != {}: # If not MongoEngine's default
kwargs['default'] = f.default
else:
if f.default is not None: # If not MongoEngine's default
kwargs['default'] = f.default
final_fields[name] = api_field_class(**kwargs)
final_fields[name].instance_name = name
# We store MongoEngine field so that schema output can show
# to which content the list is limited to (if any)
if isinstance(f, mongoengine.ListField):
final_fields[name].field = f.field
return final_fields
class MongoEngineListResource(MongoEngineResource):
"""
A MongoEngine resource used in conjunction with EmbeddedListField.
"""
def __init__(self, api_name=None):
super(MongoEngineListResource, self).__init__(api_name)
self.instance = None
self.parent = self._parent(api_name)
def _safe_get(self, request, **kwargs):
filters = self.remove_api_resource_names(kwargs)
try:
return self.parent.cached_obj_get(request=request, **filters)
except (queryset.DoesNotExist, exceptions.ObjectDoesNotExist):
raise tastypie_exceptions.ImmediateHttpResponse(response=http.HttpNotFound())
def dispatch(self, request_type, request, **kwargs):
index = None
if 'index' in kwargs:
index = kwargs.pop('index')
self.instance = self._safe_get(request, **kwargs)
# We use pk as index from now on
kwargs['pk'] = index
return super(MongoEngineListResource, self).dispatch(request_type, request, **kwargs)
def remove_api_resource_names(self, url_dict):
kwargs_subset = super(MongoEngineListResource, self).remove_api_resource_names(url_dict)
for key in ['subresource_name']:
try:
del(kwargs_subset[key])
except KeyError:
pass
return kwargs_subset
def get_object_list(self, request):
if not self.instance:
return ListQuerySet()
def add_index(index, obj):
obj.pk = unicode(index)
return obj
return ListQuerySet([(unicode(index), add_index(index, obj)) for index, obj in enumerate(getattr(self.instance, self.attribute))])
def obj_create(self, bundle, request=None, **kwargs):
bundle.obj = self._meta.object_class()
for key, value in kwargs.items():
setattr(bundle.obj, key, value)
bundle = self.full_hydrate(bundle)
object_list = getattr(self.instance, self.attribute)
object_list.append(bundle.obj)
bundle.obj.pk = unicode(len(object_list) - 1)
self.save_related(bundle)
self.instance.save()
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def obj_update(self, bundle, request=None, **kwargs):
if not bundle.obj or not getattr(bundle.obj, 'pk', None):
try:
bundle.obj = self.obj_get(request, **kwargs)
except (queryset.DoesNotExist, exceptions.ObjectDoesNotExist):
raise tastypie_exceptions.NotFound("A document instance matching the provided arguments could not be found.")
bundle = self.full_hydrate(bundle)
object_list = getattr(self.instance, self.attribute)
object_list[int(bundle.obj.pk)] = bundle.obj
self.save_related(bundle)
self.instance.save()
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def obj_delete(self, request=None, **kwargs):
obj = kwargs.pop('_obj', None)
if not getattr(obj, 'pk', None):
try:
obj = self.obj_get(request, **kwargs)
except (queryset.DoesNotExist, exceptions.ObjectDoesNotExist):
raise exceptions.NotFound("A document instance matching the provided arguments could not be found.")
getattr(self.instance, self.attribute).pop(int(obj.pk))
self.instance.save()
def get_resource_uri(self, bundle_or_obj):
if isinstance(bundle_or_obj, tastypie_bundle.Bundle):
obj = bundle_or_obj.obj
else:
obj = bundle_or_obj
kwargs = {
'resource_name': self.parent._meta.resource_name,
'subresource_name': self.attribute,
'index': obj.pk,
}
if hasattr(obj, 'parent'):
# pk could not exist in the case of nested resources, but we should not come here in this
# case as we should remove resource_uri from fields in MongoEngineModelDeclarativeMetaclass
# TODO: Support nested resources
kwargs['pk'] = obj.parent.pk
else:
kwargs['pk'] = self.instance.pk
if self._meta.api_name is not None:
kwargs['api_name'] = self._meta.api_name
return self._build_reverse_url('api_dispatch_subresource_detail', kwargs=kwargs)
|
# -*- coding: utf-8 -*-
import os
import re
import tempfile
from anima.dcc.mayaEnv import auxiliary
from anima.utils.progress import ProgressManager
from maya import cmds as cmds, mel as mel
from pymel import core as pm
class Render(object):
"""Tools for render"""
rso_options = {
"bake": {
# motion blur settings
"motionBlurEnable": 1,
"motionBlurDeformationEnable": 1,
"motionBlurNumTransformationSteps": 31,
"motionBlurFrameDuration": 100,
"motionBlurShutterStart": 0,
"motionBlurShutterEnd": 1,
"motionBlurShutterPosition": 1,
# set GI Engines
"primaryGIEngine": 3,
"secondaryGIEngine": 2,
# set file paths
"irradiancePointCloudMode": 2, # Rebuild (prepass only)
"irradianceCacheMode": 2, # Rebuild (prepass only)
"irradiancePointCloudFilename": "Outputs/rs/ipc_baked.rsmap",
"irradianceCacheFilename": "Outputs/rs/im_baked.rsmap",
},
"orig": {},
"current_frame": 1,
}
node_attr_info_temp_file_path = os.path.join(tempfile.gettempdir(), "attr_info")
shader_data_temp_file_path = os.path.join(tempfile.gettempdir(), "shader_data")
@classmethod
def assign_random_material_color(cls):
"""assigns a lambert with a random color to the selected object"""
selected = pm.selected()
# create the lambert material
lambert = pm.shadingNode("lambert", asShader=1)
# create the shading engine
shading_engine = pm.nt.ShadingEngine()
lambert.outColor >> shading_engine.surfaceShader
# randomize the lambert color
import random
h = random.random() # 0-1
s = random.random() * 0.5 + 0.25 # 0.25-0.75
v = random.random() * 0.5 + 0.5 # 0.5 - 1
from anima.utils import hsv_to_rgb
r, g, b = hsv_to_rgb(h, s, v)
lambert.color.set(r, g, b)
pm.sets(shading_engine, fe=selected)
pm.select(selected)
@classmethod
def randomize_material_color(cls):
"""randomizes material color of selected nodes"""
selected = pm.selected()
all_materials = []
for node in selected:
shading_engines = node.listHistory(f=1, type="shadingEngine")
if not shading_engines:
continue
shading_engine = shading_engines[0]
materials = shading_engine.surfaceShader.inputs()
if not materials:
continue
else:
for material in materials:
if material not in all_materials:
all_materials.append(material)
import random
from anima.utils import hsv_to_rgb
attr_lut = {
"lambert": "color",
}
for material in all_materials:
h = random.random() # 0-1
s = random.random() * 0.5 + 0.25 # 0.25-0.75
v = random.random() * 0.5 + 0.5 # 0.5 - 1
r, g, b = hsv_to_rgb(h, s, v)
attr_name = attr_lut[material.type()]
material.attr(attr_name).set(r, g, b)
@classmethod
def vertigo_setup_look_at(cls):
"""sets up a the necessary locator for teh Vertigo effect for the
selected camera
"""
from anima.dcc.mayaEnv import vertigo
cam = pm.ls(sl=1)[0]
vertigo.setup_look_at(cam)
@classmethod
def vertigo_setup_vertigo(cls):
"""sets up a Vertigo effect for the selected camera"""
from anima.dcc.mayaEnv import vertigo
cam = pm.ls(sl=1)[0]
vertigo.setup_vertigo(cam)
@classmethod
def vertigo_delete(cls):
"""deletes the Vertigo setup for the selected camera"""
from anima.dcc.mayaEnv import vertigo
cam = pm.ls(sl=1)[0]
vertigo.delete(cam)
@classmethod
def duplicate_with_connections(cls):
"""duplicates the selected nodes with connections to the network"""
return pm.duplicate(ic=1, rr=1)
@classmethod
def duplicate_input_graph(cls):
"""duplicates the selected nodes with all their inputs"""
return pm.duplicate(un=1, rr=1)
@classmethod
def delete_render_and_display_layers(cls):
"""Deletes the display and render layers in the current scene"""
cls.delete_display_layers()
cls.delete_render_layers()
@classmethod
def delete_display_layers(cls):
"""Deletes the display layers in the current scene"""
# switch to default render layer before deleting anything
# this will prevent layers to be non-deletable
from anima.dcc.mayaEnv import auxiliary
auxiliary.switch_to_default_render_layer()
pm.delete(pm.ls(type=["displayLayer"]))
@classmethod
def delete_render_layers(cls):
"""Deletes the render layers in the current scene"""
# switch to default render layer before deleting anything
# this will prevent layers to be non-deletable
from anima.dcc.mayaEnv import auxiliary
auxiliary.switch_to_default_render_layer()
pm.delete(pm.ls(type=["renderLayer"]))
@classmethod
def delete_unused_shading_nodes(cls):
"""Deletes unused shading nodes"""
pm.mel.eval("MLdeleteUnused")
@classmethod
def normalize_texture_paths(cls):
"""Expands the environment variables in texture paths"""
import os
for node in pm.ls(type="file"):
if node.hasAttr("colorSpace"):
color_space = node.colorSpace.get()
node.fileTextureName.set(os.path.expandvars(node.fileTextureName.get()))
if node.hasAttr("colorSpace"):
node.colorSpace.set(color_space)
@classmethod
def unnormalize_texture_paths(cls):
"""Contracts the environment variables in texture paths bu adding
the repository environment variable to the file paths
"""
from anima.dcc import mayaEnv
m = mayaEnv.Maya()
m.replace_external_paths()
@classmethod
def assign_substance_textures(cls):
"""auto assigns textures to selected materials.
Supports both Arnold and Redshift materials
"""
#
# Substance Texture Assigner
#
# material_subfixes = {
# "BaseColor": {
# "aiStandardSurface": {
# "attr": "baseColor"
# },
# "RedshiftMaterial": {
# "attr": "diffuse_color"
# },
# },
# "Height": {},
# "Metalness": {
# "aiStandarSurface": {
# "attr": "metalness"
# }
# },
# "Normal": {
# "aiStandardSurface": {
# "tree": {
# "type": "aiBump2D",
# "class": "asUtility",
# "attr": {
# "bumpMap": {
# "output": "outColorR"
# "type": "aiImage",
# "attr": {
# "filename": "%TEXTUREFILE%"
# }
# }
# }
# "target": "normalCamera"
# }
# }
# },
# "Roughness": {
# "aiStandardSurface": {
# "attr": "specularRoughness"
# }
# }
# }
def connect_place2d_to_file(place2d_node, file_node):
"""connects place2dtexture node to file image node"""
place2d_outputs = ["outUV", "outUvFilterSize"]
texture_inputs = ["uvCoord", "uvFilterSize"]
place2d_attrs = [
"coverage",
"translateFrame",
"rotateFrame",
"mirrorU",
"mirrorV",
"stagger",
"wrapU",
"wrapV",
"repeatUV",
"offset",
"rotateUV",
"noiseUV",
"vertexUvOne",
"vertexUvTwo",
"vertexUvThree",
"vertexCameraOne",
]
for i in range(0, len(place2d_outputs)):
place2d_node.attr(place2d_outputs[i]).connect(
file_node.attr(texture_inputs[i])
)
for attr in place2d_attrs:
place2d_node.attr(attr).connect(file_node.attr(attr))
import glob
materials = []
# support both object and material selections
nodes = pm.selected()
accepted_materials = [
"aiStandardSurface", "RedshiftMaterial", "RedshiftStandardMaterial"
]
for node in nodes:
if node.type() in accepted_materials:
materials.append(node)
elif node.type() == "transform":
try:
se = node.getShape().listConnections(type="shadingEngine")[0]
material = se.attr("surfaceShader").inputs()[0]
if material not in materials:
materials.append(material)
except (AttributeError, IndexError):
pass
# ask the texture folder
texture_path = pm.fileDialog2(cap="Choose Texture Folder", okc="Choose", fm=2)[
0
]
for material in materials:
# textures should start with the same name of the material
material_name = material.name().split(":")[-1] # strip namespaces
print("material.name: %s" % material_name)
pattern = "%s/%s_*" % (texture_path, material_name)
print("pattern: %s" % pattern)
files = glob.glob(pattern)
print(files)
# TODO: Make it beautiful by using the auxiliary.create_shader()
# For now do it ugly!
if material.type() == "aiStandardSurface":
# create place2dTexture node
place2d = pm.shadingNode("place2dTexture", asUtility=1)
# *********************************************
# BaseColor
# create a new aiImage
base_color_file_path = glob.glob(
"%s/%s_BaseColor*" % (texture_path, material_name)
)
if base_color_file_path:
# fix diffuse weight
material.base.set(1)
base_color_file_path = base_color_file_path[0]
base_color_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, base_color_file)
base_color_file.setAttr("ignoreColorSpaceFileRules", 1)
base_color_file.fileTextureName.set(base_color_file_path)
base_color_file.colorSpace.set("sRGB")
base_color_file.outColor >> material.baseColor
# *********************************************
# Height
# height_file_path = glob.glob("%s/%s_Height*" % (texture_path, material_name))
height_channel_names = [
"Height",
"DisplaceHeightField",
"DisplacementHeight",
]
for height_channel_name in height_channel_names:
height_file_path = glob.glob(
"%s/%s_%s*" % (texture_path, material_name, height_channel_name)
)
if height_file_path:
height_file_path = height_file_path[0]
# create a displacement node
shading_node = material.attr("outColor").outputs(
type="shadingEngine"
)[0]
disp_shader = pm.shadingNode("displacementShader", asShader=1)
disp_shader.displacement >> shading_node.displacementShader
# create texture
disp_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, disp_file)
disp_file.setAttr("ignoreColorSpaceFileRules", 1)
disp_file.fileTextureName.set(height_file_path)
disp_file.colorSpace.set("Raw")
disp_file.alphaIsLuminance.set(1)
disp_file.outAlpha >> disp_shader.displacement
# *********************************************
# Metalness
metalness_file_path = glob.glob(
"%s/%s_Metalness*" % (texture_path, material_name)
)
if metalness_file_path:
metalness_file_path = metalness_file_path[0]
metalness_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, metalness_file)
metalness_file.setAttr("ignoreColorSpaceFileRules", 1)
metalness_file.fileTextureName.set(metalness_file_path)
metalness_file.colorSpace.set("Raw")
metalness_file.alphaIsLuminance.set(1)
metalness_file.outAlpha >> material.metalness
# *********************************************
# Normal
normal_file_path = glob.glob(
"%s/%s_Normal*" % (texture_path, material_name)
)
if normal_file_path:
normal_file_path = normal_file_path[0]
normal_ai_normalmap = pm.shadingNode("aiNormalMap", asUtility=1)
normal_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, normal_file)
normal_file.setAttr("ignoreColorSpaceFileRules", 1)
normal_file.fileTextureName.set(normal_file_path)
normal_file.colorSpace.set("Raw")
normal_file.outColor >> normal_ai_normalmap.input
normal_ai_normalmap.outValue >> material.normalCamera
# *********************************************
# Roughness
# specularRoughness
roughness_file_path = glob.glob(
"%s/%s_Roughness*" % (texture_path, material_name)
)
if roughness_file_path:
roughness_file_path = roughness_file_path[0]
roughness_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, roughness_file)
roughness_file.setAttr("ignoreColorSpaceFileRules", 1)
roughness_file.fileTextureName.set(roughness_file_path)
roughness_file.colorSpace.set("Raw")
roughness_file.alphaIsLuminance.set(1)
roughness_file.outAlpha >> material.specularRoughness
elif material.type() in ["RedshiftMaterial", "RedshiftStandardMaterial"]:
# create place2dTexture node
place2d = pm.shadingNode("place2dTexture", asUtility=1)
# *********************************************
# BaseColor
# create a new aiImage
diffuse_color_file_path = glob.glob(
"%s/%s_Diffuse*" % (texture_path, material_name)
)
if diffuse_color_file_path:
use_udim = False
if len(diffuse_color_file_path) > 1:
use_udim = True
diffuse_color_file_path = diffuse_color_file_path[0]
if use_udim:
diffuse_color_file_path = \
diffuse_color_file_path.replace("1001", "<udim>")
diffuse_color_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, diffuse_color_file)
diffuse_color_file.setAttr("ignoreColorSpaceFileRules", 1)
diffuse_color_file.fileTextureName.set(diffuse_color_file_path)
diffuse_color_file.colorSpace.set("sRGB")
diffuse_color_file.outColor >> material.diffuse_color
# Accept also BaseColor
# create a new aiImage
base_color_file_path = glob.glob(
"%s/%s_BaseColor*" % (texture_path, material_name)
)
if base_color_file_path:
use_udim = False
if len(base_color_file_path) > 1:
use_udim = True
base_color_file_path = base_color_file_path[0]
if use_udim:
base_color_file_path = \
base_color_file_path.replace("1001", "<udim>")
base_color_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, base_color_file)
base_color_file.setAttr("ignoreColorSpaceFileRules", 1)
base_color_file.fileTextureName.set(base_color_file_path)
base_color_file.colorSpace.set("sRGB")
try:
base_color_file.outColor >> material.diffuse_color
except AttributeError:
# RedshiftStandardMaterial
base_color_file.outColor >> material.base_color
# *********************************************
# Height
height_channel_names = [
"Height",
"DisplaceHeightField",
"DisplacementHeight",
]
for height_channel_name in height_channel_names:
height_file_path = glob.glob(
"%s/%s_%s*" % (texture_path, material_name, height_channel_name)
)
if height_file_path:
use_udim = False
if len(height_file_path) > 1:
use_udim = True
height_file_path = height_file_path[0]
if use_udim:
height_file_path = \
height_file_path.replace("1001", "<udim>")
# create a displacement node
shading_node = material.attr("outColor").outputs(
type="shadingEngine"
)[0]
disp_shader = pm.shadingNode(
"RedshiftDisplacement", asUtility=1
)
# if os.path.splitext(height_file_path)[1] == '.exr': # might not be necessary
# disp_shader.setAttr('newrange_min', -1)
disp_shader.out >> shading_node.displacementShader
# create texture
disp_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, disp_file)
disp_file.fileTextureName.set(height_file_path)
disp_file.colorSpace.set("Raw")
disp_file.setAttr("ignoreColorSpaceFileRules", 1)
disp_file.alphaIsLuminance.set(1)
disp_file.outColor >> disp_shader.texMap
break
# *********************************************
# Metalness
# set material BRDF to GGX and set fresnel type to metalness
try:
material.refl_brdf.set(1)
material.refl_fresnel_mode.set(2)
except AttributeError:
# RedshiftStandardMaterial
pass
metalness_file_path = glob.glob(
"%s/%s_Metal*" % (texture_path, material_name)
)
if metalness_file_path:
use_udim = False
if len(metalness_file_path) > 1:
use_udim = True
metalness_file_path = metalness_file_path[0]
if use_udim:
metalness_file_path = \
metalness_file_path.replace("1001", "<udim>")
metalness_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, metalness_file)
metalness_file.fileTextureName.set(metalness_file_path)
metalness_file.colorSpace.set("Raw")
metalness_file.setAttr("ignoreColorSpaceFileRules", 1)
metalness_file.alphaIsLuminance.set(1)
try:
metalness_file.outAlpha >> material.refl_metalness
except AttributeError:
# RedshiftStandardMaterial
metalness_file.outAlpha >> material.metalness
# *********************************************
# Reflectivity
reflectivity_file_path = glob.glob(
"%s/%s_Reflectivity*" % (texture_path, material_name)
)
if reflectivity_file_path:
use_udim = False
if len(reflectivity_file_path) > 1:
use_udim = True
reflectivity_file_path = reflectivity_file_path[0]
if use_udim:
reflectivity_file_path = \
reflectivity_file_path.replace("1001", "<udim>")
reflectivity_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, reflectivity_file)
reflectivity_file.fileTextureName.set(reflectivity_file_path)
reflectivity_file.colorSpace.set("sRGB")
reflectivity_file.setAttr("ignoreColorSpaceFileRules", 1)
reflectivity_file.alphaIsLuminance.set(1)
try:
reflectivity_file.outColor >> material.refl_reflectivity
except AttributeError:
# RedshiftStandardMaterial
reflectivity_file.outColor >> material.refl_weight
# *********************************************
# Normal
normal_file_path = glob.glob(
"%s/%s_Normal*" % (texture_path, material_name)
)
if normal_file_path:
use_udim = False
if len(normal_file_path) > 1:
use_udim = True
normal_file_path = normal_file_path[0]
if use_udim:
normal_file_path = normal_file_path.replace("1001", "<udim>")
# Redshift BumpMap doesn't work properly with Substance normals
rs_normal_map = pm.shadingNode("RedshiftBumpMap", asUtility=1)
# rs_normal_map = pm.shadingNode("RedshiftNormalMap", asUtility=1)
# set to tangent-space normals
rs_normal_map.inputType.set(1)
normal_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, normal_file)
normal_file.fileTextureName.set(normal_file_path)
normal_file.colorSpace.set("Raw")
normal_file.setAttr("ignoreColorSpaceFileRules", 1)
normal_file.outColor >> rs_normal_map.input
# rs_normal_map.tex0.set(normal_file_path)
rs_normal_map.out >> material.bump_input
rs_normal_map.scale.set(1)
# *********************************************
# Roughness
# specularRoughness
roughness_file_path = glob.glob(
"%s/%s_Roughness*" % (texture_path, material_name)
)
if roughness_file_path:
use_udim = False
if len(roughness_file_path) > 1:
use_udim = True
roughness_file_path = roughness_file_path[0]
if use_udim:
roughness_file_path = \
roughness_file_path.replace("1001", "<udim>")
roughness_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, roughness_file)
roughness_file.fileTextureName.set(roughness_file_path)
roughness_file.colorSpace.set("Raw")
roughness_file.setAttr("ignoreColorSpaceFileRules", 1)
roughness_file.alphaIsLuminance.set(1)
roughness_file.outAlpha >> material.refl_roughness
@classmethod
def redshift_ic_ipc_bake(cls):
"""Sets the render settings for IC + IPC bake"""
# set motion blur
start_frame = int(pm.playbackOptions(q=True, ast=True))
end_frame = int(pm.playbackOptions(q=True, aet=True))
cls.rso_options["bake"]["motionBlurFrameDuration"] = end_frame - start_frame + 1
rso = pm.PyNode("redshiftOptions")
# store and set attributes
for attr in cls.rso_options["bake"]:
cls.rso_options["orig"][attr] = rso.attr(attr).get()
rso.attr(attr).set(cls.rso_options["bake"][attr])
# go to the first frame
current_frame = pm.currentTime(q=1)
cls.rso_options["current_frame"] = current_frame
pm.currentTime(start_frame)
# do a render
pm.mel.eval('rsRender -render -rv -cam "<renderview>";')
@classmethod
def redshift_ic_ipc_bake_restore(cls):
"""restores the previous render settings"""
rso = pm.PyNode("redshiftOptions")
# revert settings back
for attr in cls.rso_options["orig"]:
rso.attr(attr).set(cls.rso_options["orig"][attr])
# set the GI engines
rso.primaryGIEngine.set(cls.rso_options["bake"]["primaryGIEngine"])
rso.secondaryGIEngine.set(cls.rso_options["bake"]["secondaryGIEngine"])
# set the irradiance method to load
rso.irradiancePointCloudMode.set(1) # Load
rso.irradianceCacheMode.set(1) # Load
# set the cache paths
rso.irradiancePointCloudFilename.set(
cls.rso_options["bake"]["irradiancePointCloudFilename"]
)
rso.irradianceCacheFilename.set(
cls.rso_options["bake"]["irradianceCacheFilename"]
)
# go to current frame
current_frame = cls.rso_options["current_frame"]
pm.currentTime(current_frame)
@classmethod
def update_render_settings(cls):
"""updates render settings for current renderer"""
from anima.dcc import mayaEnv
m = mayaEnv.Maya()
v = m.get_current_version()
if v:
m.set_render_filename(version=v)
@classmethod
def afanasy_job_submitter(cls):
"""Opens the Afanasy job sumitter UI"""
from anima.dcc.mayaEnv import afanasy
ui = afanasy.UI()
ui.show()
@classmethod
def auto_convert_to_redshift(cls):
"""converts the current scene to Redshift"""
from anima.dcc.mayaEnv import ai2rs
cm = ai2rs.ConversionManager()
cm.auto_convert()
@classmethod
def convert_nodes_to_redshift(cls):
"""converts the selected nodes to Redshift"""
from anima.dcc.mayaEnv import ai2rs
cm = ai2rs.ConversionManager()
for node in pm.selected():
cm.convert(node)
@classmethod
def rsproxy_to_bounding_box(cls):
"""sets the display mode to bounding box on selected proxy nodes"""
cls.rsproxy_display_mode_toggle(display_mode=0)
@classmethod
def rsproxy_to_preview_mesh(cls):
"""sets the display mode to preview mesh on selected proxy nodes"""
cls.rsproxy_display_mode_toggle(display_mode=1)
@classmethod
def rsproxy_display_mode_toggle(cls, display_mode=0):
"""sets the display mode on selected proxies
:param display_mode:
0: Bounding Box
1: Preview Mesh
2: Linked Mesh
3: Hide In Viewport
:return:
"""
for node in pm.ls(sl=1):
hist = node.getShape().listHistory()
proxy = hist[1]
proxy.displayMode.set(display_mode)
@classmethod
def standin_to_bbox(cls):
"""convert the selected stand-in nodes to bbox"""
[
node.mode.set(0)
for node in pm.ls(sl=1)
if isinstance(node.getShape(), pm.nt.AiStandIn)
]
@classmethod
def standin_to_polywire(cls):
"""convert the selected stand-in nodes to bbox"""
[
node.mode.set(2)
for node in pm.ls(sl=1)
if isinstance(node.getShape(), pm.nt.AiStandIn)
]
@classmethod
def add_miLabel(cls):
selection = pm.ls(sl=1)
for node in selection:
if node.type() == "Transform":
if node.hasAttr("miLabel"):
pass
else:
pm.addAttr(node, ln="miLabel", at="long", keyable=True)
@classmethod
def connect_facingRatio_to_vCoord(cls):
selection = pm.ls(sl=1)
for i in range(1, len(selection)):
selection[0].facingRatio.connect((selection[i] + ".vCoord"), force=True)
@classmethod
def set_shape_attribute(
cls, attr_name, value, apply_to_hierarchy, disable_undo_queue=False
):
"""sets shape attributes"""
undo_state = pm.undoInfo(q=1, st=1)
if disable_undo_queue:
pm.undoInfo(st=False)
supported_shapes = ["aiStandIn", "mesh", "nurbsCurve"]
attr_mapper = {
"castsShadows": "overrideCastsShadows",
"receiveShadows": "overrideReceiveShadows",
"primaryVisibility": "overridePrimaryVisibility",
"visibleInReflections": "overrideVisibleInReflections",
"visibleInRefractions": "overrideVisibleInRefractions",
"doubleSided": "overrideDoubleSided",
"aiSelfShadows": "overrideSelfShadows",
"aiOpaque": "overrideOpaque",
"aiVisibleInDiffuse": "overrideVisibleInDiffuse",
"aiVisibleInGlossy": "overrideVisibleInGlossy",
"aiMatte": "overrideMatte",
}
pre_selection_list = pm.ls(sl=1)
if apply_to_hierarchy:
pm.select(hierarchy=1)
objects = pm.ls(sl=1, type=supported_shapes)
# get override_attr_name from dictionary
if attr_name in attr_mapper:
override_attr_name = attr_mapper[attr_name]
else:
override_attr_name = None
# register a caller
pdm = ProgressManager()
caller = pdm.register(len(objects), "Setting Shape Attribute")
layers = pm.ls(type="renderLayer")
is_default_layer = layers[0].currentLayer() == layers[0].defaultRenderLayer()
if value != -1:
for item in objects:
attr_full_name = "%s.%s" % (item.name(), attr_name)
override_attr_full_name = "%s.%s" % (item.name(), override_attr_name)
caller.step(message=attr_full_name)
if not is_default_layer:
pm.editRenderLayerAdjustment(attr_full_name)
item.setAttr(attr_name, value)
# if there is an accompanying override attribute like it is
# found in aiStandIn node
# then also set override{Attr} to True
if override_attr_name and cmds.attributeQuery(
override_attr_name, n=item.name(), ex=1
):
if not is_default_layer:
pm.editRenderLayerAdjustment(override_attr_full_name)
item.setAttr(override_attr_name, True)
else:
for item in objects:
attr_full_name = "%s.%s" % (item.name(), attr_name)
override_attr_full_name = "%s.%s" % (item.name(), override_attr_name)
caller.step(message=attr_full_name)
# remove any overrides
if not is_default_layer:
pm.editRenderLayerAdjustment(attr_full_name, remove=1)
if (
override_attr_name
and cmds.attributeQuery(override_attr_name, n=item.name(), ex=1)
and not is_default_layer
):
pm.editRenderLayerAdjustment(override_attr_full_name, remove=1)
# caller.end_progress()
pm.undoInfo(st=undo_state)
pm.select(pre_selection_list)
@classmethod
def set_finalGatherHide(cls, value):
"""sets the finalGatherHide to on or off for the given list of objects"""
attr_name = "miFinalGatherHide"
objects = pm.ls(sl=1)
for obj in objects:
shape = obj
if isinstance(obj, pm.nt.Transform):
shape = obj.getShape()
if not isinstance(shape, (pm.nt.Mesh, pm.nt.NurbsSurface)):
continue
# add the attribute if it doesn't already exists
if not shape.hasAttr(attr_name):
pm.addAttr(shape, ln=attr_name, at="long", min=0, max=1, k=1)
obj.setAttr(attr_name, value)
@classmethod
def replace_shaders_with_last(cls):
"""Assigns the last shader selected to all the objects using the shaders
on the list
"""
sel_list = pm.ls(sl=1)
target_node = sel_list[-1]
for node in sel_list[:-1]:
pm.hyperShade(objects=node)
pm.hyperShade(assign=target_node)
pm.select(None)
@classmethod
def create_texture_ref_object(cls):
selection = pm.ls(sl=1)
for obj in selection:
pm.select(obj)
pm.runtime.CreateTextureReferenceObject()
pm.select(selection)
@classmethod
def use_mib_texture_filter_lookup(cls):
"""Adds texture filter lookup node to the selected file texture nodes for
better texture filtering.
The function is smart enough to use the existing nodes, if there is a
connection from the selected file nodes to a mib_texture_filter_lookup node
then it will not create any new node and just use the existing ones.
It will also not create any place2dTexture nodes if the file node doesn't
have a place2dTexture node but is connected to a filter lookup node which
already has a connection to a place2dTexture node.
"""
file_nodes = pm.ls(sl=1, type="file")
for file_node in file_nodes:
# set the filter type to none
file_node.filterType.set(0)
# check if it is already connected to a mib_texture_filter_lookup node
message_outputs = file_node.message.outputs(
type="mib_texture_filter_lookup"
)
if len(message_outputs):
# use the first one
mib_texture_filter_lookup = message_outputs[0]
else:
# create a texture filter lookup node
mib_texture_filter_lookup = pm.createNode("mib_texture_filter_lookup")
# do the connection
file_node.message >> mib_texture_filter_lookup.tex
# check if the mib_texture_filter_lookup has any connection to a
# placement node
mib_t_f_l_to_placement = mib_texture_filter_lookup.inputs(
type="place2dTexture"
)
placement_node = None
if len(mib_t_f_l_to_placement):
# do nothing
placement_node = mib_t_f_l_to_placement[0].node()
else:
# get the texture placement
placement_connections = file_node.inputs(
type="place2dTexture", p=1, c=1
)
# if there is no placement create one
placement_node = None
if len(placement_connections):
placement_node = placement_connections[0][1].node()
# disconnect connections from placement to file node
for conn in placement_connections:
conn[1] // conn[0]
else:
placement_node = pm.createNode("place2dTexture")
# connect placement to mr_texture_filter_lookup
placement_node.outU >> mib_texture_filter_lookup.coordX
placement_node.outV >> mib_texture_filter_lookup.coordY
# connect color
for output in file_node.outColor.outputs(p=1):
mib_texture_filter_lookup.outValue >> output
# connect alpha
for output in file_node.outAlpha.outputs(p=1):
mib_texture_filter_lookup.outValueA >> output
@classmethod
def convert_to_linear(cls):
"""adds a gamma_gain node in between the selected nodes outputs to make the
result linear
"""
#
# convert to linear
#
selection = pm.ls(sl=1)
for file_node in selection:
# get the connections
outputs = file_node.outputs(plugs=True)
if not len(outputs):
continue
# and insert a mip_gamma_gain
gamma_node = pm.createNode("mip_gamma_gain")
gamma_node.setAttr("gamma", 2.2)
gamma_node.setAttr("reverse", True)
# connect the file_node to gamma_node
try:
file_node.outValue >> gamma_node.input
file_node.outValueA >> gamma_node.inputA
except AttributeError:
file_node.outColor >> gamma_node.input
# do all the connections from the output of the gamma
for output in outputs:
try:
gamma_node.outValue >> output
except RuntimeError:
gamma_node.outValueA >> output
pm.select(selection)
@classmethod
def use_image_sequence(cls):
"""creates an expression to make the mentalrayTexture node also able to read
image sequences
Select your mentalrayTexture nodes and then run the script.
The filename should use the file.%nd.ext format
"""
textures = pm.ls(sl=1, type="mentalrayTexture")
for texture in textures:
# get the filename
filename = texture.getAttr("fileTextureName")
splits = filename.split(".")
if len(splits) == 3:
base = ".".join(splits[0:-2]) + "."
pad = len(splits[-2])
extension = "." + splits[-1]
expr = (
"string $padded_frame = python(\"'%0"
+ str(pad)
+ "d'%\" + string(frame));\n"
+ 'string $filename = "'
+ base
+ '" + \
$padded_frame + ".tga";\n'
+ 'setAttr -type "string" '
+ texture.name()
+ ".fileTextureName $filename;\n"
)
# create the expression
pm.expression(s=expr)
@classmethod
def add_to_selected_container(cls):
selection = pm.ls(sl=1)
conList = pm.ls(sl=1, con=1)
objList = list(set(selection) - set(conList))
if len(conList) == 0:
pm.container(addNode=selection)
elif len(conList) == 1:
pm.container(conList, edit=True, addNode=objList)
else:
length = len(conList) - 1
for i in range(0, length):
containerList = conList[i]
pm.container(conList[-1], edit=True, f=True, addNode=containerList)
pm.container(conList[-1], edit=True, f=True, addNode=objList)
@classmethod
def remove_from_container(cls):
selection = pm.ls(sl=1)
for i in range(0, len(selection)):
con = pm.container(q=True, fc=selection[i])
pm.container(con, edit=True, removeNode=selection[i])
@classmethod
def reload_file_textures(cls):
fileList = pm.ls(type="file")
for fileNode in fileList:
mel.eval("AEfileTextureReloadCmd(%s.fileTextureName)" % fileNode)
@classmethod
def transfer_shaders(cls, allow_component_assignments=False):
"""transfer shaders between selected objects. It can search for
hierarchies both in source and target sides.
:param (bool) allow_component_assignments: If True will transfer component level
shader assignments.
"""
selection = pm.ls(sl=1)
pm.select(None)
source = selection[0]
target = selection[1]
# auxiliary.transfer_shaders(source, target)
# pm.select(selection)
attr_names = [
"castsShadows",
"receiveShadows",
"motionBlur",
"primaryVisibility",
"smoothShading",
"visibleInReflections",
"visibleInRefractions",
"doubleSided",
"opposite",
"aiSelfShadows",
"aiOpaque",
"aiVisibleInDiffuse",
"aiVisibleInGlossy",
"aiExportTangents",
"aiExportColors",
"aiExportRefPoints",
"aiExportRefNormals",
"aiExportRefTangents",
"color",
"interpolation",
"aiTranslator",
"intensity",
"aiExposure",
"aiColorTemperature",
"emitDiffuse",
"emitSpecular",
"aiDecayType",
"lightVisible",
"aiSamples",
"aiNormalize",
"aiCastShadows",
"aiShadowDensity",
"aiShadowColor",
"aiAffectVolumetrics",
"aiCastVolumetricShadows",
"aiVolumeSamples",
"aiDiffuse",
"aiSpecular",
"aiSss",
"aiIndirect",
"aiMaxBounces",
"aiSubdivType",
"aiSubdivIterations",
"aiSubdivAdaptiveMetric",
"aiSubdivPixelError",
"aiSubdivUvSmoothing",
"aiSubdivSmoothDerivs",
"aiDispHeight",
"aiDispPadding",
"aiDispZeroValue",
"aiDispAutobump",
"aiStepSize",
"rsEnableSubdivision",
"rsSubdivisionRule",
"rsScreenSpaceAdaptive",
"rsDoSmoothSubdivision",
"rsMinTessellationLength",
"rsMaxTessellationSubdivs",
"rsOutOfFrustumTessellationFactor",
"rsLimitOutOfFrustumTessellation",
"rsMaxOutOfFrustumTessellationSubdivs",
"rsEnableDisplacement",
"rsMaxDisplacement",
"rsDisplacementScale",
"rsAutoBumpMap",
"rsObjectId",
]
# check if they are direct parents of mesh or nurbs shapes
source_shape = source.getShape()
target_shape = target.getShape()
if (
source_shape
and not isinstance(source_shape, pm.nt.NurbsCurve)
and target_shape
and not isinstance(target_shape, pm.nt.NurbsCurve)
):
# do a direct assignment from source to target
# shading_engines = source_shape.outputs(type=pm.nt.ShadingEngine)
# pm.sets(shading_engines[0], fe=target)
# pm.select(selection)
lut = {"match": [(source_shape, target_shape)], "no_match": []}
else:
lut = auxiliary.match_hierarchy(source, target)
for source_node, target_node in lut["match"]:
auxiliary.transfer_shaders(
source_node,
target_node,
allow_component_assignments=allow_component_assignments
)
# also transfer render attributes
for attr_name in attr_names:
try:
target_node.setAttr(attr_name, source_node.getAttr(attr_name))
except (pm.MayaAttributeError, RuntimeError):
pass
# input connections to attributes
try:
for plug in source_node.attr(attr_name).inputs(p=1):
plug >> target_node.attr(attr_name)
except pm.MayaAttributeError:
pass
# caller.step()
# caller.end_progress()
if len(lut["no_match"]):
pm.select(lut["no_match"])
print(
"The following nodes has no corresponding source:\n%s"
% ("\n".join([node.name() for node in lut["no_match"]]))
)
@classmethod
def fit_placement_to_UV(cls):
selection = pm.ls(sl=1)
uvs = [n for n in selection if isinstance(n, pm.general.MeshUV)]
placements = [p for p in selection if isinstance(p, pm.nt.Place2dTexture)]
# get the uv extends
temp_data = pm.polyEditUV(uvs, q=1)
u = sorted(temp_data[0::2])
v = sorted(temp_data[1::2])
umin = u[0]
umax = u[-1]
vmin = v[0]
vmax = v[-1]
for p in placements:
p.setAttr("coverage", (umax - umin, vmax - vmin))
p.setAttr("translateFrame", (umin, vmin))
@classmethod
def connect_placement2d_to_file(cls):
"""connects the selected placement node to the selected file textures"""
attr_lut = [
"coverage",
"translateFrame",
"rotateFrame",
"mirrorU",
"mirrorV",
"stagger",
"wrapU",
"wrapV",
"repeatUV",
"offset",
"rotateUV",
"noiseUV",
"vertexUvOne",
"vertexUvTwo",
"vertexUvThree",
"vertexCameraOne",
("outUV", "uvCoord"),
("outUvFilterSize", "uvFilterSize"),
]
# get placement and file nodes
placement_node = pm.ls(sl=1, type=pm.nt.Place2dTexture)[0]
file_nodes = pm.ls(sl=1, type=pm.nt.File)
from anima import __string_types__
for file_node in file_nodes:
for attr in attr_lut:
if isinstance(attr, __string_types__):
source_attr_name = attr
target_attr_name = attr
elif isinstance(attr, tuple):
source_attr_name = attr[0]
target_attr_name = attr[1]
placement_node.attr(source_attr_name) >> file_node.attr(
target_attr_name
)
@classmethod
def open_node_in_browser(cls):
# get selected nodes
node_attrs = {
"file": "fileTextureName",
"aiImage": "filename",
"aiStandIn": "dso",
}
import os
from anima.utils import open_browser_in_location
for node in pm.ls(sl=1):
type_ = pm.objectType(node)
# special case: if transform use shape
if type_ == "transform":
node = node.getShape()
type_ = pm.objectType(node)
attr_name = node_attrs.get(type_)
if attr_name:
# if any how it contains a "#" character use the path
path = node.getAttr(attr_name)
if "#" in path:
path = os.path.dirname(path)
open_browser_in_location(path)
@classmethod
def enable_matte(cls, color=0):
"""enables matte on selected objects"""
#
# Enable Matte on Selected Objects
#
colors = [
[0, 0, 0, 0], # Not Visible
[1, 0, 0, 0], # Red
[0, 1, 0, 0], # Green
[0, 0, 1, 0], # Blue
[0, 0, 0, 1], # Alpha
]
arnold_shaders = (pm.nt.AiStandard, pm.nt.AiHair, pm.nt.AiSkin, pm.nt.AiUtility)
for node in pm.ls(
sl=1, dag=1, type=[pm.nt.Mesh, pm.nt.NurbsSurface, "aiStandIn"]
):
obj = node
# if isinstance(node, pm.nt.Mesh):
# obj = node
# elif isinstance(node, pm.nt.Transform):
# obj = node.getShape()
shading_nodes = pm.listConnections(obj, type="shadingEngine")
for shadingNode in shading_nodes:
shader = shadingNode.attr("surfaceShader").connections()[0]
if isinstance(shader, arnold_shaders):
try:
pm.editRenderLayerAdjustment(shader.attr("aiEnableMatte"))
pm.editRenderLayerAdjustment(shader.attr("aiMatteColor"))
pm.editRenderLayerAdjustment(shader.attr("aiMatteColorA"))
shader.attr("aiEnableMatte").set(1)
shader.attr("aiMatteColor").set(
colors[color][0:3], type="double3"
)
shader.attr("aiMatteColorA").set(colors[color][3])
except RuntimeError as e:
# there is some connections
print(str(e))
@classmethod
def disable_subdiv(cls, node):
"""Disables the subdiv on the given nodes
:param node:
:return:
"""
if isinstance(node, pm.nt.Transform):
shapes = node.getShapes()
else:
shapes = [node]
for shape in shapes:
try:
shape.aiSubdivType.set(0)
except AttributeError:
pass
try:
shape.rsEnableSubdivision.set(0)
except AttributeError:
pass
@classmethod
def disable_subdiv_on_selected(cls):
"""disables subdiv on selected nodes"""
for node in pm.ls(sl=1):
cls.disable_subdiv(node)
@classmethod
def enable_subdiv_on_selected(cls, fixed_tes=False, max_subdiv=3):
"""enables subdiv on selected objects
:param fixed_tes: Uses fixed tessellation.
:param max_subdiv: The max subdivision iteration. Default 3.
"""
#
# Set SubDiv to CatClark on Selected nodes
#
for node in pm.ls(sl=1):
cls.enable_subdiv(node, fixed_tes=fixed_tes, max_subdiv=max_subdiv)
@classmethod
def enable_subdiv(cls, node, fixed_tes=False, max_subdiv=3):
"""enables subdiv on selected objects
:param node: The node to enable the subdiv too
:param fixed_tes: Uses fixed tessellation.
:param max_subdiv: The max subdivision iteration. Default 3.
"""
if isinstance(node, pm.nt.Transform):
shapes = node.getShapes()
else:
shapes = [node]
for shape in shapes:
try:
shape.aiSubdivIterations.set(max_subdiv)
shape.aiSubdivType.set(1)
shape.aiSubdivPixelError.set(0)
except AttributeError:
pass
try:
shape.rsEnableSubdivision.set(1)
shape.rsMaxTessellationSubdivs.set(max_subdiv)
if not fixed_tes:
shape.rsScreenSpaceAdaptive.set(1)
shape.rsLimitOutOfFrustumTessellation.set(1)
shape.rsMaxOutOfFrustumTessellationSubdivs.set(1)
else:
shape.rsScreenSpaceAdaptive.set(0)
shape.rsMinTessellationLength.set(0)
except AttributeError:
pass
@classmethod
def export_shader_attributes(cls):
"""exports the selected shader attributes to a JSON file"""
# get data
data = []
nodes = pm.ls(sl=1)
for node in nodes:
node_attr_data = {}
attrs = node.listAttr()
for attr in attrs:
try:
value = attr.get()
if not isinstance(value, pm.PyNode):
node_attr_data[attr.shortName()] = value
except TypeError:
continue
data.append(node_attr_data)
# write data
import json
with open(cls.node_attr_info_temp_file_path, "w") as f:
json.dump(data, f)
@classmethod
def export_shader_assignments_to_houdini(cls):
"""Exports shader assignments to Houdini via a JSON file.
Use the Houdini counterpart to import the assignment data
"""
# get the shaders from viewport selection
shaders = []
for node in pm.selected():
shape = node.getShape()
shading_engines = shape.outputs(type=pm.nt.ShadingEngine)
for shading_engine in shading_engines:
inputs = shading_engine.surfaceShader.inputs()
for shader in inputs:
shaders.append(shader)
# get the shapes for each shader
shader_assignments = {}
for shader in shaders:
shader_name = shader.name()
shading_engines = shader.outputs(type=pm.nt.ShadingEngine)
if not shading_engines:
continue
shading_engine = shading_engines[0]
shader_assignments[shader_name] = []
assigned_nodes = pm.sets(shading_engine, q=1)
for assigned_node in assigned_nodes:
shape = assigned_node.node()
# get the full path of the shape
shape_full_path = shape.fullPath().replace("|", "/")
shader_assignments[shader_name].append(shape_full_path)
# write data
try:
import json
with open(cls.shader_data_temp_file_path, "w") as f:
json.dump(shader_assignments, f, indent=4)
except BaseException as e:
pm.confirmDialog(title="Error", message="%s" % e, button="OK")
else:
pm.confirmDialog(
title="Successful",
message="Shader Data exported successfully!",
button="OK",
)
@classmethod
def import_shader_attributes(cls):
"""imports shader attributes from a temp JSON file"""
# read data
import json
with open(cls.node_attr_info_temp_file_path) as f:
data = json.load(f)
# set data
nodes = pm.ls(sl=1)
for i, node in enumerate(nodes):
i = i % len(data)
node_data = data[i]
for key in node_data:
value = node_data[key]
try:
node.setAttr(key, value)
except RuntimeError:
continue
@classmethod
def barndoor_simulator_setup(cls):
"""creates a barndoor simulator"""
bs = auxiliary.BarnDoorSimulator()
bs.light = pm.ls(sl=1)[0]
bs.setup()
@classmethod
def barndoor_simulator_unsetup(cls):
"""removes the barndoor simulator"""
bs = auxiliary.BarnDoorSimulator()
for light in pm.ls(sl=1):
light_shape = light.getShape()
if isinstance(light_shape, pm.nt.Light):
bs.light = light
bs.unsetup()
@classmethod
def fix_barndoors(cls):
"""fixes the barndoors on scene lights created in MtoA 1.0 to match the
new behaviour of barndoors in MtoA 1.1
"""
for light in pm.ls(type="spotLight"):
# calculate scale
cone_angle = light.getAttr("coneAngle")
penumbra_angle = light.getAttr("penumbraAngle")
if penumbra_angle < 0:
light.setAttr("coneAngle", max(cone_angle + penumbra_angle, 0.1))
else:
light.setAttr("coneAngle", max(cone_angle - penumbra_angle, 0.1))
@classmethod
def convert_aiSkinSSS_to_aiSkin(cls):
"""converts aiSkinSSS nodes in the current scene to aiSkin + aiStandard
nodes automatically
"""
attr_mapper = {
# diffuse
"color": {"node": "aiStandard", "attr_name": "color"},
"diffuseWeight": {
"node": "aiStandard",
"attr_name": "Kd",
"multiplier": 0.7,
},
"diffuseRoughness": {"node": "aiStandard", "attr_name": "diffuseRoughness"},
# sss
"sssWeight": {"node": "aiSkin", "attr_name": "sssWeight"},
# shallowScatter
"shallowScatterColor": {
"node": "aiSkin",
"attr_name": "shallowScatterColor",
},
"shallowScatterWeight": {
"node": "aiSkin",
"attr_name": "shallowScatterWeight",
},
"shallowScatterRadius": {
"node": "aiSkin",
"attr_name": "shallowScatterRadius",
},
# midScatter
"midScatterColor": {
"node": "aiSkin",
"attr_name": "midScatterColor",
},
"midScatterWeight": {"node": "aiSkin", "attr_name": "midScatterWeight"},
"midScatterRadius": {"node": "aiSkin", "attr_name": "midScatterRadius"},
# deepScatter
"deepScatterColor": {
"node": "aiSkin",
"attr_name": "deepScatterColor",
},
"deepScatterWeight": {"node": "aiSkin", "attr_name": "deepScatterWeight"},
"deepScatterRadius": {"node": "aiSkin", "attr_name": "deepScatterRadius"},
# primaryReflection
"primaryReflectionColor": {"node": "aiSkin", "attr_name": "specularColor"},
"primaryReflectionWeight": {
"node": "aiSkin",
"attr_name": "specularWeight",
},
"primaryReflectionRoughness": {
"node": "aiSkin",
"attr_name": "specularRoughness",
},
# secondaryReflection
"secondaryReflectionColor": {"node": "aiSkin", "attr_name": "sheenColor"},
"secondaryReflectionWeight": {"node": "aiSkin", "attr_name": "sheenWeight"},
"secondaryReflectionRoughness": {
"node": "aiSkin",
"attr_name": "sheenRoughness",
},
# bump
"normalCamera": {"node": "aiSkin", "attr_name": "normalCamera"},
# sss multiplier
"globalSssRadiusMultiplier": {
"node": "aiSkin",
"attr_name": "globalSssRadiusMultiplier",
},
}
all_skin_sss = pm.ls(type="aiSkinSss")
for skin_sss in all_skin_sss:
skin = pm.shadingNode("aiSkin", asShader=1)
standard = pm.shadingNode("aiStandard", asShader=1)
skin.attr("outColor") >> standard.attr("emissionColor")
standard.setAttr("emission", 1.0)
skin.setAttr("fresnelAffectSss", 0) # to match the previous behaviour
node_mapper = {"aiSkin": skin, "aiStandard": standard}
for attr in attr_mapper.keys():
inputs = skin_sss.attr(attr).inputs(p=1, c=1)
if inputs:
# copy inputs
destination_attr_name = inputs[0][0].name().split(".")[-1]
source = inputs[0][1]
if destination_attr_name in attr_mapper:
node = attr_mapper[destination_attr_name]["node"]
attr_name = attr_mapper[destination_attr_name]["attr_name"]
source >> node_mapper[node].attr(attr_name)
else:
source >> skin.attr(destination_attr_name)
else:
# copy values
node = node_mapper[attr_mapper[attr]["node"]]
attr_name = attr_mapper[attr]["attr_name"]
multiplier = attr_mapper[attr].get("multiplier", 1.0)
attr_value = skin_sss.getAttr(attr)
if isinstance(attr_value, tuple):
attr_value = list(map(lambda x: x * multiplier, attr_value))
else:
attr_value *= multiplier
node.attr(attr_name).set(attr_value)
# after everything is set up
# connect the aiStandard to the shadingEngine
for source, dest in skin_sss.outputs(p=1, c=1):
standard.attr("outColor") >> dest
# and rename the materials
orig_name = skin_sss.name()
# delete the skinSSS node
pm.delete(skin_sss)
skin_name = orig_name
standard_name = "%s_aiStandard" % orig_name
skin.rename(skin_name)
standard.rename(standard_name)
print("updated %s" % skin_name)
@classmethod
def normalize_sss_weights(cls):
"""normalizes the sss weights so their total weight is 1.0
if a aiStandard is assigned to the selected object it searches for an
aiSkin in the emission channel.
the script considers 0.7 as the highest diffuse value for aiStandard
"""
# get the shader of the selected object
assigned_shader = pm.ls(
pm.ls(sl=1)[0].getShape().outputs(type="shadingEngine")[0].inputs(), mat=1
)[0]
if assigned_shader.type() == "aiStandard":
sss_shader = assigned_shader.attr("emissionColor").inputs()[0]
diffuse_weight = assigned_shader.attr("Kd").get()
else:
sss_shader = assigned_shader
diffuse_weight = 0
def get_attr_or_texture(attr):
if attr.inputs():
# we probably have a texture assigned
# so use its multiply attribute
texture = attr.inputs()[0]
attr = texture.attr("multiply")
if isinstance(texture, pm.nt.AiImage):
attr = texture.attr("multiply")
elif isinstance(texture, pm.nt.File):
attr = texture.attr("colorGain")
return attr
shallow_attr = get_attr_or_texture(sss_shader.attr("shallowScatterWeight"))
mid_attr = get_attr_or_texture(sss_shader.attr("midScatterWeight"))
deep_attr = get_attr_or_texture(sss_shader.attr("deepScatterWeight"))
shallow_weight = shallow_attr.get()
if isinstance(shallow_weight, tuple):
shallow_weight = (
shallow_weight[0] + shallow_weight[1] + shallow_weight[2]
) / 3.0
mid_weight = mid_attr.get()
if isinstance(mid_weight, tuple):
mid_weight = (mid_weight[0] + mid_weight[1] + mid_weight[2]) / 3.0
deep_weight = deep_attr.get()
if isinstance(deep_weight, tuple):
deep_weight = (deep_weight[0] + deep_weight[1] + deep_weight[2]) / 3.0
total_sss_weight = shallow_weight + mid_weight + deep_weight
mult = (1 - diffuse_weight / 0.7) / total_sss_weight
try:
shallow_attr.set(shallow_weight * mult)
except RuntimeError:
w = shallow_weight * mult
shallow_attr.set(w, w, w)
try:
mid_attr.set(mid_weight * mult)
except RuntimeError:
w = mid_weight * mult
mid_attr.set(w, w, w)
try:
deep_attr.set(deep_weight * mult)
except RuntimeError:
w = deep_weight * mult
deep_attr.set(w, w, w)
@classmethod
def create_eye_shader_and_controls(cls):
"""This is pretty much specific to the way we are creating eye shaders
for characters in KKS project, but it is a useful trick, select the
inner eye objects before running
"""
eyes = pm.ls(sl=1)
if not eyes:
return
char = eyes[0].getAllParents()[-1]
place = pm.shadingNode("place2dTexture", asUtility=1)
emission_image = pm.shadingNode("aiImage", asTexture=1)
ks_image = pm.shadingNode("aiImage", asTexture=1)
texture_paths = {
"emission": "$REPO1977/KKS/Assets/Characters/Body_Parts/Textures/"
"char_eyeInner_light_v001.png",
"Ks": "$REPO1977/KKS/Assets/Characters/Body_Parts/Textures/"
"char_eyeInner_spec_v002.png",
}
emission_image.setAttr("filename", texture_paths["emission"])
ks_image.setAttr("filename", texture_paths["Ks"])
place.outUV >> emission_image.attr("uvcoords")
if not char.hasAttr("eyeLightStrength"):
char.addAttr("eyeLightStrength", at="double", min=0, dv=0.0, k=1)
else:
# set the default
char.attr("eyeLightStrength").set(0)
if not char.hasAttr("eyeLightAngle"):
char.addAttr("eyeLightAngle", at="double", dv=0, k=1)
if not char.hasAttr("eyeDiffuseWeight"):
char.addAttr("eyeDiffuseWeight", at="double", dv=0.15, k=1, min=0, max=1)
if not char.hasAttr("eyeSpecularWeight"):
char.addAttr("eyeSpecularWeight", at="double", dv=1.0, k=1, min=0, max=1)
if not char.hasAttr("eyeSSSWeight"):
char.addAttr("eyeSSSWeight", at="double", dv=0.5, k=1, min=0, max=1)
# connect eye light strength
char.eyeLightStrength >> emission_image.attr("multiplyR")
char.eyeLightStrength >> emission_image.attr("multiplyG")
char.eyeLightStrength >> emission_image.attr("multiplyB")
# connect eye light angle
char.eyeLightAngle >> place.attr("rotateFrame")
# connect specular weight
char.eyeSpecularWeight >> ks_image.attr("multiplyR")
char.eyeSpecularWeight >> ks_image.attr("multiplyG")
char.eyeSpecularWeight >> ks_image.attr("multiplyB")
for eye in eyes:
shading_engine = eye.getShape().outputs(type="shadingEngine")[0]
shader = pm.ls(shading_engine.inputs(), mat=1)[0]
# connect the diffuse shader input to the emissionColor
diffuse_texture = shader.attr("color").inputs(p=1, s=1)[0]
diffuse_texture >> shader.attr("emissionColor")
emission_image.outColorR >> shader.attr("emission")
# also connect it to specular color
diffuse_texture >> shader.attr("KsColor")
# connect the Ks image to the specular weight
ks_image.outColorR >> shader.attr("Ks")
# also connect it to sss color
diffuse_texture >> shader.attr("KsssColor")
char.eyeDiffuseWeight >> shader.attr("Kd")
char.eyeSSSWeight >> shader.attr("Ksss")
# set some default values
shader.attr("diffuseRoughness").set(0)
shader.attr("Kb").set(0)
shader.attr("directDiffuse").set(1)
shader.attr("indirectDiffuse").set(1)
shader.attr("specularRoughness").set(0.4)
shader.attr("specularAnisotropy").set(0.5)
shader.attr("specularRotation").set(0)
shader.attr("specularFresnel").set(0)
shader.attr("Kr").set(0)
shader.attr("enableInternalReflections").set(0)
shader.attr("Kt").set(0)
shader.attr("transmittance").set([1, 1, 1])
shader.attr("opacity").set([1, 1, 1])
shader.attr("sssRadius").set([1, 1, 1])
pm.select(eyes)
@classmethod
def randomize_attr(cls, nodes, attr, min, max, pre=0.1):
"""Randomizes the given attributes of the given nodes
:param list nodes:
:param str attr:
:param float, int min:
:param float, int max:
:return:
"""
import random
import math
rand = random.random
floor = math.floor
for node in nodes:
r = rand() * float(max - min) + float(min)
r = floor(r / pre) * pre
node.setAttr(attr, r)
@classmethod
def randomize_light_color_temp(cls, min_field, max_field):
"""Randomizes the color temperature of selected lights
:param min:
:param max:
:return:
"""
min = pm.floatField(min_field, q=1, v=1)
max = pm.floatField(max_field, q=1, v=1)
cls.randomize_attr(
[node.getShape() for node in pm.ls(sl=1)], "aiColorTemperature", min, max, 1
)
@classmethod
def randomize_light_intensity(cls, min_field, max_field):
"""Randomizes the intensities of selected lights
:param min:
:param max:
:return:
"""
min = pm.floatField(min_field, q=1, v=1)
max = pm.floatField(max_field, q=1, v=1)
cls.randomize_attr(
[node.getShape() for node in pm.ls(sl=1)], "aiExposure", min, max, 0.1
)
@classmethod
def setup_outer_eye_render_attributes(cls):
"""sets outer eye render attributes for characters, select outer eye
objects and run this
"""
for node in pm.ls(sl=1):
shape = node.getShape()
shape.setAttr("castsShadows", 0)
shape.setAttr("visibleInReflections", 0)
shape.setAttr("visibleInRefractions", 0)
shape.setAttr("aiSelfShadows", 0)
shape.setAttr("aiOpaque", 0)
shape.setAttr("aiVisibleInDiffuse", 0)
shape.setAttr("aiVisibleInGlossy", 0)
@classmethod
def setup_window_glass_render_attributes(cls):
"""sets window glass render attributes for environments, select window
glass objects and run this
"""
shader_name = "toolbox_glass_shader"
shaders = pm.ls("%s*" % shader_name)
selection = pm.ls(sl=1)
if len(shaders) > 0:
shader = shaders[0]
else:
shader = pm.shadingNode("aiStandard", asShader=1, name="%s#" % shader_name)
shader.setAttr("Ks", 1)
shader.setAttr("specularRoughness", 0)
shader.setAttr("Kr", 0)
shader.setAttr("enableInternalReflections", 0)
shader.setAttr("Kt", 0)
shader.setAttr("KtColor", (0, 0, 0))
shape_attributes = [
("castsShadows", 0),
("visibleInReflections", 0),
("visibleInRefractions", 0),
("aiSelfShadows", 0),
("aiOpaque", 1),
("aiVisibleInDiffuse", 0),
("aiVisibleInGlossy", 0),
]
for node in selection:
shape = node.getShape()
map(lambda x: shape.setAttr(*x), shape_attributes)
if isinstance(shape, pm.nt.AiStandIn):
# get the glass shader or create one
shape.overrideShaders.set(1)
# assign it to the stand in
pm.select(node)
pm.hyperShade(assign=shader)
@classmethod
def dummy_window_light_plane(cls):
"""creates or updates the dummy window plane for the given area light"""
area_light_list = pm.selected()
from anima.dcc.mayaEnv import auxiliary
for light in area_light_list:
dwl = auxiliary.DummyWindowLight()
dwl.light = light
dwl.update()
@classmethod
def setup_z_limiter(cls):
"""creates z limiter setup"""
shader_name = "z_limiter_shader#"
shaders = pm.ls("%s*" * shader_name)
if len(shaders) > 0:
shader = shaders[0]
else:
shader = pm.shadingNode(
"surfaceShader", asShader=1, name="%s#" % shader_name
)
@classmethod
def convert_file_node_to_ai_image_node(cls):
"""converts the file node to aiImage node"""
default_values = {
"coverageU": 1,
"coverageV": 1,
"translateFrameU": 0,
"translateFrameV": 0,
"rotateFrame": 0,
"repeatU": 1,
"repeatV": 1,
"offsetU": 0,
"offsetV": 0,
"rotateUV": 0,
"noiseU": 0,
"noiseV": 0,
}
for node in pm.ls(sl=1, type="file"):
node_name = node.name()
path = node.getAttr("fileTextureName")
ai_image = pm.shadingNode("aiImage", asTexture=1)
ai_image.setAttr("filename", path)
# check the placement node
placements = node.listHistory(type="place2dTexture")
if len(placements):
placement = placements[0]
# check default values
if any(
[
placement.getAttr(attr_name) != default_values[attr_name]
for attr_name in default_values
]
):
# connect the placement to the aiImage
placement.outUV >> ai_image.uvcoords
else:
# delete it
pm.delete(placement)
# connect the aiImage
for attr_out, attr_in in node.outputs(p=1, c=1):
attr_name = attr_out.name().split(".")[-1]
if attr_name == "message":
continue
ai_image.attr(attr_name) >> attr_in
# delete the File node
pm.delete(node)
# rename the aiImage node
ai_image.rename(node_name)
@classmethod
def create_generic_tooth_shader(cls):
"""creates generic tooth shader for selected objects"""
shader_name = "toolbox_generic_tooth_shader#"
selection = pm.ls(sl=1)
shader_tree = {
"type": "aiStandard",
"class": "asShader",
"attr": {
"color": [1, 0.909, 0.815],
"Kd": 0.2,
"KsColor": [1, 1, 1],
"Ks": 0.5,
"specularRoughness": 0.10,
"specularFresnel": 1,
"Ksn": 0.05,
"enableInternalReflections": 0,
"KsssColor": [1, 1, 1],
"Ksss": 1,
"sssRadius": [1, 0.853, 0.68],
"normalCamera": {
"output": "outNormal",
"type": "bump2d",
"class": "asTexture",
"attr": {
"bumpDepth": 0.05,
"bumpValue": {
"output": "outValue",
"type": "aiNoise",
"class": "asUtility",
"attr": {
"scaleX": 4,
"scaleY": 0.250,
"scaleZ": 4,
},
},
},
},
},
}
shader = auxiliary.create_shader(shader_tree, shader_name)
for node in selection:
# assign it to the stand in
pm.select(node)
pm.hyperShade(assign=shader)
@classmethod
def create_generic_gum_shader(self):
"""set ups generic gum shader for selected objects"""
shader_name = "toolbox_generic_gum_shader#"
selection = pm.ls(sl=1)
shader_tree = {
"type": "aiStandard",
"class": "asShader",
"attr": {
"color": [0.993, 0.596, 0.612],
"Kd": 0.35,
"KsColor": [1, 1, 1],
"Ks": 0.010,
"specularRoughness": 0.2,
"enableInternalReflections": 0,
"KsssColor": [1, 0.6, 0.6],
"Ksss": 0.5,
"sssRadius": [0.5, 0.5, 0.5],
"normalCamera": {
"output": "outNormal",
"type": "bump2d",
"class": "asTexture",
"attr": {
"bumpDepth": 0.1,
"bumpValue": {
"output": "outValue",
"type": "aiNoise",
"class": "asUtility",
"attr": {
"scaleX": 4,
"scaleY": 1,
"scaleZ": 4,
},
},
},
},
},
}
shader = auxiliary.create_shader(shader_tree, shader_name)
for node in selection:
# assign it to the stand in
pm.select(node)
pm.hyperShade(assign=shader)
@classmethod
def create_generic_tongue_shader(self):
"""set ups generic tongue shader for selected objects"""
shader_name = "toolbox_generic_tongue_shader#"
selection = pm.ls(sl=1)
shader_tree = {
"type": "aiStandard",
"class": "asShader",
"attr": {
"color": [0.675, 0.174, 0.194],
"Kd": 0.35,
"KsColor": [1, 1, 1],
"Ks": 0.010,
"specularRoughness": 0.2,
"enableInternalReflections": 0,
"KsssColor": [1, 0.3, 0.3],
"Ksss": 0.5,
"sssRadius": [0.5, 0.5, 0.5],
"normalCamera": {
"output": "outNormal",
"type": "bump2d",
"class": "asTexture",
"attr": {
"bumpDepth": 0.1,
"bumpValue": {
"output": "outValue",
"type": "aiNoise",
"class": "asUtility",
"attr": {
"scaleX": 4,
"scaleY": 1,
"scaleZ": 4,
},
},
},
},
},
}
shader = auxiliary.create_shader(shader_tree, shader_name)
for node in selection:
# assign it to the stand in
pm.select(node)
pm.hyperShade(assign=shader)
@classmethod
def create_ea_matte(cls):
"""creates "ebesinin ami" matte shader with opacity for selected
objects.
It is called "EA Matte" for one reason, this matte is not necessary in
normal working conditions. That is you change the color and look of
some 3D element in 3D application and do an artistic grading at post to
the whole plate, not to individual elements in the render.
And because we are forced to create this matte layer, we thought that
we should give it a proper name.
"""
# get the selected objects
# for each object create a new surface shader with the opacity
# channel having the opacity of the original shader
# create a lut for objects that have the same material not to cause
# multiple materials to be created
daro = pm.PyNode("defaultArnoldRenderOptions")
attrs = {
"AASamples": 4,
"GIDiffuseSamples": 0,
"GIGlossySamples": 0,
"GIRefractionSamples": 0,
"sssBssrdfSamples": 0,
"volumeIndirectSamples": 0,
"GITotalDepth": 0,
"GIDiffuseDepth": 0,
"GIGlossyDepth": 0,
"GIReflectionDepth": 0,
"GIRefractionDepth": 0,
"GIVolumeDepth": 0,
"ignoreTextures": 1,
"ignoreAtmosphere": 1,
"ignoreLights": 1,
"ignoreShadows": 1,
"ignoreBump": 1,
"ignoreSss": 1,
}
for attr in attrs:
pm.editRenderLayerAdjustment(daro.attr(attr))
daro.setAttr(attr, attrs[attr])
try:
aov_z = pm.PyNode("aiAOV_Z")
pm.editRenderLayerAdjustment(aov_z.attr("enabled"))
aov_z.setAttr("enabled", 0)
except pm.MayaNodeError:
pass
try:
aov_mv = pm.PyNode("aiAOV_motionvector")
pm.editRenderLayerAdjustment(aov_mv.attr("enabled"))
aov_mv.setAttr("enabled", 0)
except pm.MayaNodeError:
pass
dad = pm.PyNode("defaultArnoldDriver")
pm.editRenderLayerAdjustment(dad.attr("autocrop"))
dad.setAttr("autocrop", 0)
@classmethod
def create_z_layer(cls):
"""creates z layer with arnold render settings"""
daro = pm.PyNode("defaultArnoldRenderOptions")
attrs = {
"AASamples": 4,
"GIDiffuseSamples": 0,
"GIGlossySamples": 0,
"GIRefractionSamples": 0,
"sssBssrdfSamples": 0,
"volumeIndirectSamples": 0,
"GITotalDepth": 0,
"GIDiffuseDepth": 0,
"GIGlossyDepth": 0,
"GIReflectionDepth": 0,
"GIRefractionDepth": 0,
"GIVolumeDepth": 0,
"ignoreShaders": 1,
"ignoreAtmosphere": 1,
"ignoreLights": 1,
"ignoreShadows": 1,
"ignoreBump": 1,
"ignoreNormalSmoothing": 1,
"ignoreDof": 1,
"ignoreSss": 1,
}
for attr in attrs:
pm.editRenderLayerAdjustment(daro.attr(attr))
daro.setAttr(attr, attrs[attr])
try:
aov_z = pm.PyNode("aiAOV_Z")
pm.editRenderLayerAdjustment(aov_z.attr("enabled"))
aov_z.setAttr("enabled", 1)
except pm.MayaNodeError:
pass
try:
aov_mv = pm.PyNode("aiAOV_motionvector")
pm.editRenderLayerAdjustment(aov_mv.attr("enabled"))
aov_mv.setAttr("enabled", 1)
except pm.MayaNodeError:
pass
dad = pm.PyNode("defaultArnoldDriver")
pm.editRenderLayerAdjustment(dad.attr("autocrop"))
dad.setAttr("autocrop", 1)
@classmethod
def generate_reflection_curve(self):
"""Generates a curve which helps creating specular at the desired point"""
from maya.OpenMaya import MVector
from anima.dcc.mayaEnv import auxiliary
vtx = pm.ls(sl=1)[0]
normal = vtx.getNormal(space="world")
panel = auxiliary.Playblaster.get_active_panel()
camera = pm.PyNode(pm.modelPanel(panel, q=1, cam=1))
camera_axis = MVector(0, 0, -1) * camera.worldMatrix.get()
refl = camera_axis - 2 * normal.dot(camera_axis) * normal
# create a new curve
p1 = vtx.getPosition(space="world")
p2 = p1 + refl
curve = pm.curve(d=1, p=[p1, p2])
# move pivot to the first point
pm.xform(curve, rp=p1, sp=p1)
@classmethod
def import_gpu_content(self):
"""imports the selected GPU content"""
import os
imported_nodes = []
for node in pm.ls(sl=1):
gpu_node = node.getShape()
gpu_path = gpu_node.getAttr("cacheFileName")
new_nodes = pm.mel.eval(
'AbcImport -mode import -reparent "%s" "%s";'
% (node.fullPath(), os.path.expandvars(gpu_path))
)
# get imported nodes
new_nodes = node.getChildren()
new_nodes.remove(gpu_node)
imported_node = None
# filter material node
for n in new_nodes:
if n.name() != "materials":
imported_node = n
else:
pm.delete(n)
if imported_node:
imported_node.t.set(0, 0, 0)
imported_node.r.set(0, 0, 0)
imported_node.s.set(1, 1, 1)
pm.parent(imported_node, world=1)
imported_nodes.append(imported_node)
pm.select(imported_nodes)
@classmethod
def render_slicer(self):
"""A tool for slicing big render scenes
:return:
"""
# TODO: Add UI call for Render Slicer
raise NotImplementedError("This UI is not implemented yet!")
@classmethod
def move_cache_files_wrapper(cls, source_driver_field, target_driver_field):
"""Wrapper for move_cache_files() command
:param source_driver_field: Text field for source driver
:param target_driver_field: Text field for target driver
:return:
"""
source_driver = source_driver_field.text()
target_driver = target_driver_field.text()
Render.move_cache_files(source_driver, target_driver)
@classmethod
def move_cache_files(cls, source_driver, target_driver):
"""moves the selected cache files to another location
:param source_driver:
:param target_driver:
:return:
"""
#
# Move fur caches to new server
#
import os
import shutil
import glob
pdm = ProgressManager()
selected_nodes = pm.ls(sl=1)
caller = pdm.register(len(selected_nodes), title="Moving Cache Files")
for node in selected_nodes:
ass_node = node.getShape()
if not isinstance(ass_node, (pm.nt.AiStandIn, pm.nt.AiVolume)):
continue
if isinstance(ass_node, pm.nt.AiStandIn):
ass_path = ass_node.dso.get()
elif isinstance(ass_node, pm.nt.AiVolume):
ass_path = ass_node.filename.get()
ass_path = os.path.normpath(os.path.expandvars(ass_path))
# give info to user
caller.title = "Moving: %s" % ass_path
# check if it is in the source location
if source_driver not in ass_path:
continue
# check if it contains .ass.gz in its path
if isinstance(ass_node, pm.nt.AiStandIn):
if ".ass.gz" not in ass_path:
continue
elif isinstance(ass_node, pm.nt.AiVolume):
if ".vdb" not in ass_path:
continue
# get the dirname
ass_source_dir = os.path.dirname(ass_path)
ass_target_dir = ass_source_dir.replace(source_driver, target_driver)
# create the intermediate folders at destination
try:
os.makedirs(ass_target_dir)
except OSError:
# dir already exists
pass
# get all files list
pattern = re.subn(r"[#]+", "*", ass_path)[0].replace(".ass.gz", ".ass*")
all_cache_files = glob.glob(pattern)
inner_caller = pdm.register(len(all_cache_files))
for source_f in all_cache_files:
target_f = source_f.replace(source_driver, target_driver)
# move files to new location
shutil.move(source_f, target_f)
inner_caller.step(message="Moving: %s" % source_f)
inner_caller.end_progress()
# finally update DSO path
if isinstance(ass_node, pm.nt.AiStandIn):
ass_node.dso.set(ass_path.replace(source_driver, target_driver))
elif isinstance(ass_node, pm.nt.AiVolume):
ass_node.filename.set(ass_path.replace(source_driver, target_driver))
caller.step()
caller.end_progress()
@classmethod
def generate_rsproxy_from_selection(cls, per_selection=False):
"""generates a temp rs file from selected nodes and hides the selected
nodes
:param bool per_selection: Generates one rs file per selected objects
if True. Default is False.
"""
import os
import tempfile
import shutil
from anima.dcc.mayaEnv import auxiliary
from anima.dcc import mayaEnv
m = mayaEnv.Maya()
v = m.get_current_version()
nodes = pm.ls(sl=1)
temp_rs_proxies_grp = None
if pm.ls("temp_rs_proxies_grp"):
temp_rs_proxies_grp = pm.ls("temp_rs_proxies_grp")[0]
else:
temp_rs_proxies_grp = pm.nt.Transform(name="temp_rs_proxies_grp")
rs_output_folder_path = os.path.join(v.absolute_path, "Outputs/rs").replace(
"\\", "/"
)
try:
os.makedirs(rs_output_folder_path)
except OSError:
pass
def _generate_rs():
export_command = 'rsProxy -fp "%(path)s" -c -z -sl;'
temp_rs_full_path = tempfile.mktemp(suffix=".rs")
rs_full_path = os.path.join(
rs_output_folder_path, os.path.basename(temp_rs_full_path)
).replace("\\", "/")
pm.mel.eval(export_command % {"path": temp_rs_full_path.replace("\\", "/")})
shutil.move(temp_rs_full_path, rs_full_path)
[n.v.set(0) for n in pm.ls(sl=1)]
rs_proxy_node, rs_proxy_mesh = auxiliary.create_rs_proxy_node(
path=rs_full_path
)
rs_proxy_tra = rs_proxy_mesh.getParent()
rs_proxy_tra.rename("temp_rs_proxy#")
pm.parent(rs_proxy_tra, temp_rs_proxies_grp)
if per_selection:
for node in nodes:
pm.select(node)
_generate_rs()
else:
pm.select(nodes)
_generate_rs()
@classmethod
def import_image_as_plane(cls):
"""The replica of Blender tool"""
# get the image path
image_path = pm.fileDialog2(fileMode=1)
# get the image width and height
image_path = image_path[0] if image_path else ""
from PIL import Image
img = Image.open(image_path)
w, h = img.size
# create a new plane with that ratio
# keep the height 1
transform, poly_plane = pm.polyPlane(
axis=[0, 0, 1], cuv=1, h=1, w=float(w) / float(h), texture=2, sh=1, sw=1
)
shape = transform.getShape()
shape.instObjGroups[0].disconnect()
# assign a surface shader
surface_shader = pm.shadingNode("surfaceShader", asShader=1)
shading_engine = pm.nt.ShadingEngine()
surface_shader.outColor >> shading_engine.surfaceShader
# assign the given file as texture
placement = pm.nt.Place2dTexture()
file_texture = pm.nt.File()
pm.select([placement, file_texture])
cls.connect_placement2d_to_file()
file_texture.fileTextureName.set(image_path)
file_texture.outColor >> surface_shader.outColor
file_texture.outTransparency >> surface_shader.outTransparency
# pm.sets(shading_engine, fe=transform)
pm.select(shape)
pm.hyperShade(assign=surface_shader)
class RenderSlicer(object):
"""A tool to help slice single frame renders in to many little parts which
will help it to be rendered in small parts in a render farm.
"""
def __init__(self, camera=None):
self._camera = None
self.camera = camera
@property
def slices_in_x(self):
"""getter for _slices_in_x attribute"""
return self.camera.slicesInX.get()
@slices_in_x.setter
def slices_in_x(self, slices_in_x):
"""setter for _slices_in_x attribute"""
self.camera.slicesInX.set(self._validate_slices_in_x(slices_in_x))
@classmethod
def _validate_slices_in_x(cls, slices_in_x):
"""validates the slices_in_x value"""
if not isinstance(slices_in_x, int):
raise TypeError(
"%s.slices_in_x should be a non-zero positive integer, not %s"
% (cls.__name__, slices_in_x.__class__.__name__)
)
if slices_in_x <= 0:
raise ValueError(
"%s.slices_in_x should be a non-zero positive integer" % cls.__name__
)
return slices_in_x
@property
def slices_in_y(self):
"""getter for _slices_in_y attribute"""
return self.camera.slicesInY.get()
@slices_in_y.setter
def slices_in_y(self, slices_in_y):
"""setter for _slices_in_y attribute"""
self.camera.slicesInY.set(self._validate_slices_in_y(slices_in_y))
@classmethod
def _validate_slices_in_y(cls, slices_in_y):
"""validates the slices_in_y value"""
if not isinstance(slices_in_y, int):
raise TypeError(
"%s.slices_in_y should be a non-zero positive integer, not %s"
% (cls.__name__, slices_in_y.__class__.__name__)
)
if slices_in_y <= 0:
raise ValueError(
"%s.slices_in_y should be a non-zero positive integer" % cls.__name__
)
return slices_in_y
@property
def camera(self):
"""getter for the _camera attribute"""
return self._camera
@camera.setter
def camera(self, camera):
"""setter for the _camera attribute
:param camera: A Maya camera
:return: None
"""
camera = self._validate_camera(camera)
self._create_data_attributes(camera)
self._camera = camera
@classmethod
def _validate_camera(cls, camera):
"""validates the given camera"""
if camera is None:
raise TypeError("Please supply a Maya camera")
if not isinstance(camera, pm.nt.Camera):
raise TypeError(
"%s.camera should be a Maya camera, not %s"
% (cls.__name__, camera.__class__.__name__)
)
return camera
@classmethod
def _create_data_attributes(cls, camera):
"""creates slicer data attributes inside the camera
:param pm.nt.Camera camera: A maya camera
"""
# store the original resolution
# slices in x
# slices in y
# is_sliced
# non_sliced_resolution_x
# non_sliced_resolution_y
# slices_in_x
# slices_in_y
if not camera.hasAttr("isSliced"):
camera.addAttr("isSliced", at="bool")
if not camera.hasAttr("nonSlicedResolutionX"):
camera.addAttr("nonSlicedResolutionX", at="short")
if not camera.hasAttr("nonSlicedResolutionY"):
camera.addAttr("nonSlicedResolutionY", at="short")
if not camera.hasAttr("slicesInX"):
camera.addAttr("slicesInX", at="short")
if not camera.hasAttr("slicesInY"):
camera.addAttr("slicesInY", at="short")
def _store_data(self):
"""stores slicer data inside the camera"""
self._create_data_attributes(self.camera)
self.camera.isSliced.set(self.is_sliced)
# get the current render resolution
dres = pm.PyNode("defaultResolution")
width = dres.width.get()
height = dres.height.get()
self.camera.nonSlicedResolutionX.set(width)
self.camera.nonSlicedResolutionY.set(height)
self.camera.slicesInX.set(self.slices_in_x)
self.camera.slicesInY.set(self.slices_in_y)
@property
def is_sliced(self):
"""A shortcut for the camera.isSliced attribute"""
if self.camera.hasAttr("isSliced"):
return self.camera.isSliced.get()
return False
@is_sliced.setter
def is_sliced(self, is_sliced):
"""A shortcut for the camera.isSliced attribute"""
if not self.camera.hasAttr("isSliced"):
self._create_data_attributes(self.camera)
self.camera.isSliced.set(is_sliced)
def unslice(self):
"""resets the camera to original non-sliced state"""
# unslice the camera
dres = pm.PyNode("defaultResolution")
# set the resolution to original
dres.width.set(self.camera.getAttr("nonSlicedResolutionX"))
dres.height.set(self.camera.getAttr("nonSlicedResolutionY"))
dres.pixelAspect.set(1)
self.camera.panZoomEnabled.set(0)
self.camera.isSliced.set(False)
def unslice_scene(self):
"""scans the scene cameras and unslice the scene"""
dres = pm.PyNode("defaultResolution")
dres.aspectLock.set(0)
# TODO: check multi sliced camera
for cam in pm.ls(type=pm.nt.Camera):
if cam.hasAttr("isSliced") and cam.isSliced.get():
dres.width.set(cam.nonSlicedResolutionX.get())
dres.height.set(cam.nonSlicedResolutionY.get())
dres.pixelAspect.set(1)
cam.isSliced.set(False)
def slice(self, slices_in_x, slices_in_y):
"""slices all renderable cameras"""
# set render resolution
self.unslice_scene()
self.is_sliced = True
self._store_data()
sx = self.slices_in_x = slices_in_x
sy = self.slices_in_y = slices_in_y
# set render resolution
d_res = pm.PyNode("defaultResolution")
h_res = d_res.width.get()
v_res = d_res.height.get()
# this system only works when the
d_res.aspectLock.set(0)
d_res.pixelAspect.set(1)
d_res.width.set(h_res / float(sx))
d_res.pixelAspect.set(1)
d_res.height.set(v_res / float(sy))
d_res.pixelAspect.set(1)
# use h_aperture to calculate v_aperture
h_aperture = self.camera.getAttr("horizontalFilmAperture")
# recalculate the other aperture
v_aperture = h_aperture * v_res / h_res
self.camera.setAttr("verticalFilmAperture", v_aperture)
v_aperture = self.camera.getAttr("verticalFilmAperture")
self.camera.setAttr("zoom", 1.0 / float(sx))
t = 0
for i in range(sy):
v_pan = v_aperture / (2.0 * sy) * (1 + 2 * i - sy)
for j in range(sx):
h_pan = h_aperture / (2.0 * sx) * (1 + 2 * j - sx)
pm.currentTime(t)
pm.setKeyframe(self.camera, at="horizontalPan", v=h_pan)
pm.setKeyframe(self.camera, at="verticalPan", v=v_pan)
t += 1
self.camera.panZoomEnabled.set(1)
self.camera.renderPanZoom.set(1)
d_res.pixelAspect.set(1)
class LightingSceneBuilder(object):
"""Build lighting scenes.
This is a class that helps building lighting scenes by looking at the animation
scenes, gathering data and then using that data to reference assets and cache files
to the lighting scene.
"""
ANIMS_GROUP_NAME = "ANIMS"
CAMERA_GROUP_NAME = "CAMERA"
LAYOUTS_GROUP_NAME = "LAYOUTS"
LIGHTS_GROUP_NAME = "LIGHTS"
LOOK_DEVS_GROUP_NAME = "LOOK_DEVS"
RIG_TO_CACHEABLE_LUT_FILE_NAME = "rig_to_cacheable_lut.json"
RIG_TO_LOOK_DEV_LUT_FILE_NAME = "rig_to_look_dev_lut.json"
def __init__(self):
self.rig_to_cacheable_lut_file_path = None
self.rig_to_cacheable_lut = {}
self.rig_to_look_dev_lut_file_path = None
self.rig_to_look_dev_lut = {}
def generate_rig_to_cacheable_lut_file_path(self, project):
"""Generate rig_to_cacheable_lut_file_path.
:param project: A Stalker project.
:return str: The path of the JSON file path.
"""
if self.rig_to_cacheable_lut_file_path:
return self.rig_to_cacheable_lut_file_path
from stalker import Project
if not isinstance(project, Project):
raise TypeError("Please supply a stalker Project instance, not {}".format(
project.__class__.__name__
))
self.rig_to_cacheable_lut_file_path = os.path.join(
project.repository.path,
project.code,
"References",
self.RIG_TO_CACHEABLE_LUT_FILE_NAME
)
return self.rig_to_cacheable_lut_file_path
def generate_rig_to_look_dev_lut_file_path(self, project):
"""Generate rig_to_look_dev_lut_file_path.
:param project: A Stalker project.
:return str: The path of the JSON file path.
"""
if self.rig_to_look_dev_lut_file_path:
return self.rig_to_look_dev_lut_file_path
from stalker import Project
if not isinstance(project, Project):
raise TypeError("Please supply a stalker Project instance, not {}".format(
project.__class__.__name__
))
self.rig_to_look_dev_lut_file_path = os.path.join(
project.repository.path,
project.code,
"References",
self.RIG_TO_LOOK_DEV_LUT_FILE_NAME
)
return self.rig_to_look_dev_lut_file_path
def update_rig_to_look_dev_lut(self, path):
"""Update the ``self.rig_to_look_dev_lut``.
:param str path: The path to the custom json file.
"""
# override the default paths
self.rig_to_look_dev_lut_file_path = path
def read_rig_to_cacheable_lut(self, project):
"""Read the JSON file at the given path.
Reads the rig -> cacheable attr value from the file for speeding the whole
process.
The path is for a JSON file that contains the mapping from rig_to_cacheable and
the reverse mapping. In theory, this should make things way faster by skipping
the loading of the reference files.
:param Project project: The Stalker Project instance.
"""
import json
path = self.generate_rig_to_cacheable_lut_file_path(project)
if os.path.isfile(path):
with open(path, "r") as f:
self.rig_to_cacheable_lut = json.load(f)
def write_rig_to_cacheable_lut(self, project):
"""Update the JSON file with new info
:param Project project: A Stalker Project instance.
"""
import json
path = self.generate_rig_to_cacheable_lut_file_path(project)
try:
os.makedirs(os.path.dirname(path))
except OSError:
# already exists, skip
pass
with open(path, "w") as f:
json.dump(self.rig_to_cacheable_lut, f, indent=4, sort_keys=True)
def read_rig_to_look_dev_lut(self, project):
"""Read the JSON file at the given path.
Reads the rig -> cacheable attr value from the file for speeding the whole
process.
The path is for a JSON file that contains the mapping from rig_to_cacheable and
the reverse mapping. In theory, this should make things way faster by skipping
the loading of the reference files.
:param Project project: The Stalker Project instance.
"""
import json
path = self.generate_rig_to_look_dev_lut_file_path(project)
if os.path.isfile(path):
with open(path, "r") as f:
self.rig_to_look_dev_lut = json.load(f)
def get_cacheable_to_look_dev_version_lut(self, animation_version):
"""Build look dev version lut.
:param Version animation_version: The animation Version to open.
:return:
"""
from stalker import Type, Task, Version
look_dev_type = Type.query.filter(Type.name == "Look Development").first()
if not look_dev_type:
raise RuntimeError(
"No Look Development task type found, please create one!"
)
# open the animation version
from anima.dcc import mayaEnv
# get the current version
m = mayaEnv.Maya()
# store the current version to open later on
lighting_version = m.get_current_version()
m.open(
animation_version,
force=True,
skip_update_check=True,
prompt=False,
reference_depth=3
)
# this version may uploaded with Stalker Pyramid, so update referenced versions
# to get a proper version.inputs list
m.update_version_inputs()
cacheable_to_look_dev_version_lut = {}
references_with_no_look_dev_task = []
references_with_no_look_dev_version = []
# load the self.rig_to_cacheable_lut
self.read_rig_to_cacheable_lut(animation_version.task.project)
self.read_rig_to_look_dev_lut(animation_version.task.project)
# now load all references
for ref in pm.listReferences():
# get all cacheable nodes
ref_version = ref.version
rig_task = ref_version.task
rig_task_id = rig_task.id
rig_task_id_as_str = str(rig_task_id)
rig_take_name = ref_version.take_name
copy_number = auxiliary.get_reference_copy_number(ref)
cacheable_attr_value = None
# try to use the cache file
if rig_task_id_as_str in self.rig_to_cacheable_lut:
if rig_take_name in self.rig_to_cacheable_lut[rig_task_id_as_str]:
cacheable_attr_value = \
self.rig_to_cacheable_lut[rig_task_id_as_str][rig_take_name]
else:
# load the reference
ref.load()
cacheable_nodes = auxiliary.get_cacheable_nodes(ref)
if not cacheable_nodes:
continue
cacheable_node = cacheable_nodes[0]
cacheable_attr_value = cacheable_node.cacheable.get()
# store the value for the next time
if rig_task_id_as_str not in self.rig_to_cacheable_lut:
self.rig_to_cacheable_lut[rig_task_id_as_str] = {}
self.rig_to_cacheable_lut[rig_task_id_as_str][rig_take_name] = \
cacheable_attr_value
cacheable_attr_value_with_copy_number = "{}{}".format(
cacheable_attr_value, copy_number
)
non_renderable_objects = []
look_dev_take_name = None
look_dev_task = None
if rig_task_id_as_str in self.rig_to_look_dev_lut:
# there is a custom mapping for this rig use it
if rig_take_name in self.rig_to_look_dev_lut[rig_task_id_as_str]:
lut_data = \
self.rig_to_look_dev_lut[rig_task_id_as_str][rig_take_name]
look_dev_task_id = lut_data['look_dev_task_id']
look_dev_take_name = lut_data['look_dev_take_name']
look_dev_task = Task.query.get(look_dev_task_id)
if "no_render" in lut_data:
# there are object not to be rendered
non_renderable_objects = lut_data["no_render"]
else:
# try to get the sibling look dev task
look_dev_take_name = ref_version.take_name
look_dev_task = Task.query\
.filter(Task.parent == rig_task.parent)\
.filter(Task.type == look_dev_type)\
.first()
# no look_dev_task, we can't do anything about this asset, report it
if not look_dev_task:
references_with_no_look_dev_task.append(ref_version)
# skip to the next cacheable node
continue
# get the latest published look dev version for this cacheable node
latest_published_look_dev_version = Version.query\
.filter(Version.task == look_dev_task)\
.filter(Version.take_name == look_dev_take_name)\
.filter(Version.is_published == True)\
.order_by(Version.version_number.desc())\
.first()
if not latest_published_look_dev_version:
references_with_no_look_dev_version.append(ref_version)
cacheable_to_look_dev_version_lut[cacheable_attr_value_with_copy_number] = {
"look_dev_version": latest_published_look_dev_version,
"no_render": non_renderable_objects
}
# save the self.rig_to_cacheable_lut
self.write_rig_to_cacheable_lut(animation_version.task.project)
# re-open the lighting version
m.open(lighting_version, force=True, skip_update_check=True, prompt=False)
print("\nReferences With No Look Dev Task")
print("================================")
print("\n".join([v.absolute_full_path for v in references_with_no_look_dev_task]))
print("\nReferences With No Look Dev Version")
print("===================================")
print("\n".join([v.absolute_full_path for v in references_with_no_look_dev_version]))
import pprint
print("\nCacheable To LookDev Version Lut")
print("================================")
pprint.pprint(cacheable_to_look_dev_version_lut)
return cacheable_to_look_dev_version_lut
def create_item_group(self, group_name, hidden=False):
"""Crete item group.
:param str group_name: The group name.
:param bool hidden: If the group should be invisible.
"""
query = pm.ls(group_name)
if not query:
group = pm.nt.Transform(name=group_name)
group.v.set(not hidden) # It should be hidden
else:
group = query[0]
return group
def build(
self,
transfer_shaders=True,
transfer_uvs=False,
cache_type=auxiliary.ALEMBIC
):
"""Build the lighting scene
:return:
"""
from anima.dcc import mayaEnv
# get the current version
m = mayaEnv.Maya()
v = m.get_current_version()
if not v:
raise RuntimeError(
"No version found! Please save an empty scene as a version under the "
"Lighting task"
)
# check if this is really a lighting task
from stalker import Type
lighting_task = v.task
lighting_type = Type.query.filter(Type.name == "Lighting").first()
if not lighting_type:
raise RuntimeError("No Lighting task type found, please create one!")
if not lighting_task.type or not lighting_task.type == lighting_type:
raise RuntimeError(
"This is not a lighting task, please run this in a scene related to a "
"Lighting task."
)
shot = lighting_task.parent
if not shot:
raise RuntimeError(
"No parent task found! It is not possible to find sibling tasks!"
)
# get the animation task
animation_type = Type.query.filter(Type.name == "Animation").first()
if not animation_type:
raise RuntimeError("No Animation task type found, please create one!")
from stalker import Task
animation_task = Task.query.filter(Task.parent == shot)\
.filter(Task.type == animation_type).first()
if not animation_task:
raise RuntimeError("No Animation task found!")
# get latest animation version
from stalker import Version
animation_version = Version.query\
.filter(Version.task == animation_task)\
.filter(Version.take_name == "Main")\
.order_by(Version.version_number.desc())\
.first()
if not animation_version:
raise RuntimeError("No Animation Version under Main take is found!")
# get the cacheable_to_look_dev_lut
cacheable_to_look_dev_version_lut = \
self.get_cacheable_to_look_dev_version_lut(animation_version)
# reference all caches
# (we are assuming that these are all generated before)
auxiliary.auto_reference_caches()
# create the LOOK_DEVS group if it doesn't exist
look_devs_group = self.create_item_group(self.LOOK_DEVS_GROUP_NAME)
anims_group = self.create_item_group(self.ANIMS_GROUP_NAME)
camera_group = self.create_item_group(self.CAMERA_GROUP_NAME)
lights_group = self.create_item_group(self.LIGHTS_GROUP_NAME)
# get all referenced cache files
# to prevent referencing the same look dev more than once,
# store the referenced look dev version in a dictionary
look_dev_version_to_ref_node_lut = {}
for cache_ref_node in pm.listReferences():
if not cache_ref_node.path.endswith(
auxiliary.CACHE_FORMAT_DATA[cache_type]["file_extension"]
):
continue
# ref namespace is equal to the cacheable_attr_value
cacheable_attr_value = cache_ref_node.namespace
# if this is the shotCam, renderCam or the camera, just skip it
if any([cam.lower() in cacheable_attr_value.lower() for cam in ("shotCam", "renderCam")]):
# parent it under CAMERA group
pm.parent(cache_ref_node.nodes()[0], camera_group)
# and skip the rest
continue
# now use the cacheable_to_look_dev_version_lut to reference the look_dev
# file
look_dev_version = \
cacheable_to_look_dev_version_lut[cacheable_attr_value]['look_dev_version']
if look_dev_version in look_dev_version_to_ref_node_lut:
# use the same ref_node
look_dev_ref_node = look_dev_version_to_ref_node_lut[look_dev_version]
elif look_dev_version is not None:
# reference the look dev file
look_dev_ref_node = m.reference(look_dev_version)
look_dev_version_to_ref_node_lut[look_dev_version] = look_dev_ref_node
else:
# no published look dev
# skip this cacheable not
print("Warning: No published Look Dev version found for: {}".format(
cacheable_attr_value
))
continue
# now we should have a reference node for the cache and a reference node for
# the look dev
look_dev_root_node = auxiliary.get_root_nodes(look_dev_ref_node)[0]
cache_root_node = auxiliary.get_root_nodes(cache_ref_node)[0]
if transfer_shaders:
# transfer shaders from the look dev to the cache nodes
pm.select(None)
# look dev scenes references the model scene and the geometry is in the
# model scene
pm.select([look_dev_root_node, cache_root_node])
Render.transfer_shaders()
# hide all the transform nodes under the look_dev_root_node
for node in pm.listRelatives(look_dev_root_node, ad=1, type=pm.nt.Transform):
node.v.set(0)
# and the look dev node itself
look_dev_root_node.v.set(0)
if transfer_uvs:
from anima.dcc.mayaEnv import modeling
pm.select(None)
pm.select([look_dev_root_node, cache_root_node])
modeling.Model.transfer_uvs()
# hide non renderable objects
cache_ref_node_nodes = cache_ref_node.nodes()
for no_render_name in cacheable_to_look_dev_version_lut[cacheable_attr_value]["no_render"]:
for cached_node in cache_ref_node_nodes:
if cached_node.stripNamespace() == no_render_name:
cached_node.v.set(0)
continue
# deselect everything to prevent unpredicted errors
pm.select(None)
# parent the look_dev_root_node under the LOOK_DEVS group
pm.parent(look_dev_root_node, look_devs_group)
# parent the alembic under the ANIMS group
pm.parent(cache_root_node, anims_group)
# animation version inputs should have been updated
# reference any Layouts
layouts_group = self.create_item_group(self.LAYOUTS_GROUP_NAME)
layout_type = Type.query.filter(Type.name == "Layout").first()
for input_version in animation_version.inputs:
if input_version.task.type and input_version.task.type == layout_type:
# reference this version here too
# use the RSProxy repr
rs_proxy_take_name = "{}@RS".format(
input_version.take_name.split("@")[0]
)
input_version = Version.query\
.filter(Version.task==input_version.task)\
.filter(Version.take_name==rs_proxy_take_name)\
.filter(Version.is_published==True)\
.order_by(Version.version_number.desc())\
.first()
if input_version:
ref_node = m.reference(input_version)
# parent it to the LAYOUTS group
pm.parent(ref_node.nodes()[0], layouts_group)
* [Maya] Updated ``render.LightingSceneBuilder.get_cacheable_to_look_dev_version_lut()`` to try to fix the dreaded "InitialShader is locked" (sort of) problem.
# -*- coding: utf-8 -*-
import os
import re
import tempfile
from anima.dcc.mayaEnv import auxiliary
from anima.utils.progress import ProgressManager
from maya import cmds as cmds, mel as mel
from pymel import core as pm
class Render(object):
"""Tools for render"""
rso_options = {
"bake": {
# motion blur settings
"motionBlurEnable": 1,
"motionBlurDeformationEnable": 1,
"motionBlurNumTransformationSteps": 31,
"motionBlurFrameDuration": 100,
"motionBlurShutterStart": 0,
"motionBlurShutterEnd": 1,
"motionBlurShutterPosition": 1,
# set GI Engines
"primaryGIEngine": 3,
"secondaryGIEngine": 2,
# set file paths
"irradiancePointCloudMode": 2, # Rebuild (prepass only)
"irradianceCacheMode": 2, # Rebuild (prepass only)
"irradiancePointCloudFilename": "Outputs/rs/ipc_baked.rsmap",
"irradianceCacheFilename": "Outputs/rs/im_baked.rsmap",
},
"orig": {},
"current_frame": 1,
}
node_attr_info_temp_file_path = os.path.join(tempfile.gettempdir(), "attr_info")
shader_data_temp_file_path = os.path.join(tempfile.gettempdir(), "shader_data")
@classmethod
def assign_random_material_color(cls):
"""assigns a lambert with a random color to the selected object"""
selected = pm.selected()
# create the lambert material
lambert = pm.shadingNode("lambert", asShader=1)
# create the shading engine
shading_engine = pm.nt.ShadingEngine()
lambert.outColor >> shading_engine.surfaceShader
# randomize the lambert color
import random
h = random.random() # 0-1
s = random.random() * 0.5 + 0.25 # 0.25-0.75
v = random.random() * 0.5 + 0.5 # 0.5 - 1
from anima.utils import hsv_to_rgb
r, g, b = hsv_to_rgb(h, s, v)
lambert.color.set(r, g, b)
pm.sets(shading_engine, fe=selected)
pm.select(selected)
@classmethod
def randomize_material_color(cls):
"""randomizes material color of selected nodes"""
selected = pm.selected()
all_materials = []
for node in selected:
shading_engines = node.listHistory(f=1, type="shadingEngine")
if not shading_engines:
continue
shading_engine = shading_engines[0]
materials = shading_engine.surfaceShader.inputs()
if not materials:
continue
else:
for material in materials:
if material not in all_materials:
all_materials.append(material)
import random
from anima.utils import hsv_to_rgb
attr_lut = {
"lambert": "color",
}
for material in all_materials:
h = random.random() # 0-1
s = random.random() * 0.5 + 0.25 # 0.25-0.75
v = random.random() * 0.5 + 0.5 # 0.5 - 1
r, g, b = hsv_to_rgb(h, s, v)
attr_name = attr_lut[material.type()]
material.attr(attr_name).set(r, g, b)
@classmethod
def vertigo_setup_look_at(cls):
"""sets up a the necessary locator for teh Vertigo effect for the
selected camera
"""
from anima.dcc.mayaEnv import vertigo
cam = pm.ls(sl=1)[0]
vertigo.setup_look_at(cam)
@classmethod
def vertigo_setup_vertigo(cls):
"""sets up a Vertigo effect for the selected camera"""
from anima.dcc.mayaEnv import vertigo
cam = pm.ls(sl=1)[0]
vertigo.setup_vertigo(cam)
@classmethod
def vertigo_delete(cls):
"""deletes the Vertigo setup for the selected camera"""
from anima.dcc.mayaEnv import vertigo
cam = pm.ls(sl=1)[0]
vertigo.delete(cam)
@classmethod
def duplicate_with_connections(cls):
"""duplicates the selected nodes with connections to the network"""
return pm.duplicate(ic=1, rr=1)
@classmethod
def duplicate_input_graph(cls):
"""duplicates the selected nodes with all their inputs"""
return pm.duplicate(un=1, rr=1)
@classmethod
def delete_render_and_display_layers(cls):
"""Deletes the display and render layers in the current scene"""
cls.delete_display_layers()
cls.delete_render_layers()
@classmethod
def delete_display_layers(cls):
"""Deletes the display layers in the current scene"""
# switch to default render layer before deleting anything
# this will prevent layers to be non-deletable
from anima.dcc.mayaEnv import auxiliary
auxiliary.switch_to_default_render_layer()
pm.delete(pm.ls(type=["displayLayer"]))
@classmethod
def delete_render_layers(cls):
"""Deletes the render layers in the current scene"""
# switch to default render layer before deleting anything
# this will prevent layers to be non-deletable
from anima.dcc.mayaEnv import auxiliary
auxiliary.switch_to_default_render_layer()
pm.delete(pm.ls(type=["renderLayer"]))
@classmethod
def delete_unused_shading_nodes(cls):
"""Deletes unused shading nodes"""
pm.mel.eval("MLdeleteUnused")
@classmethod
def normalize_texture_paths(cls):
"""Expands the environment variables in texture paths"""
import os
for node in pm.ls(type="file"):
if node.hasAttr("colorSpace"):
color_space = node.colorSpace.get()
node.fileTextureName.set(os.path.expandvars(node.fileTextureName.get()))
if node.hasAttr("colorSpace"):
node.colorSpace.set(color_space)
@classmethod
def unnormalize_texture_paths(cls):
"""Contracts the environment variables in texture paths bu adding
the repository environment variable to the file paths
"""
from anima.dcc import mayaEnv
m = mayaEnv.Maya()
m.replace_external_paths()
@classmethod
def assign_substance_textures(cls):
"""auto assigns textures to selected materials.
Supports both Arnold and Redshift materials
"""
#
# Substance Texture Assigner
#
# material_subfixes = {
# "BaseColor": {
# "aiStandardSurface": {
# "attr": "baseColor"
# },
# "RedshiftMaterial": {
# "attr": "diffuse_color"
# },
# },
# "Height": {},
# "Metalness": {
# "aiStandarSurface": {
# "attr": "metalness"
# }
# },
# "Normal": {
# "aiStandardSurface": {
# "tree": {
# "type": "aiBump2D",
# "class": "asUtility",
# "attr": {
# "bumpMap": {
# "output": "outColorR"
# "type": "aiImage",
# "attr": {
# "filename": "%TEXTUREFILE%"
# }
# }
# }
# "target": "normalCamera"
# }
# }
# },
# "Roughness": {
# "aiStandardSurface": {
# "attr": "specularRoughness"
# }
# }
# }
def connect_place2d_to_file(place2d_node, file_node):
"""connects place2dtexture node to file image node"""
place2d_outputs = ["outUV", "outUvFilterSize"]
texture_inputs = ["uvCoord", "uvFilterSize"]
place2d_attrs = [
"coverage",
"translateFrame",
"rotateFrame",
"mirrorU",
"mirrorV",
"stagger",
"wrapU",
"wrapV",
"repeatUV",
"offset",
"rotateUV",
"noiseUV",
"vertexUvOne",
"vertexUvTwo",
"vertexUvThree",
"vertexCameraOne",
]
for i in range(0, len(place2d_outputs)):
place2d_node.attr(place2d_outputs[i]).connect(
file_node.attr(texture_inputs[i])
)
for attr in place2d_attrs:
place2d_node.attr(attr).connect(file_node.attr(attr))
import glob
materials = []
# support both object and material selections
nodes = pm.selected()
accepted_materials = [
"aiStandardSurface", "RedshiftMaterial", "RedshiftStandardMaterial"
]
for node in nodes:
if node.type() in accepted_materials:
materials.append(node)
elif node.type() == "transform":
try:
se = node.getShape().listConnections(type="shadingEngine")[0]
material = se.attr("surfaceShader").inputs()[0]
if material not in materials:
materials.append(material)
except (AttributeError, IndexError):
pass
# ask the texture folder
texture_path = pm.fileDialog2(cap="Choose Texture Folder", okc="Choose", fm=2)[
0
]
for material in materials:
# textures should start with the same name of the material
material_name = material.name().split(":")[-1] # strip namespaces
print("material.name: %s" % material_name)
pattern = "%s/%s_*" % (texture_path, material_name)
print("pattern: %s" % pattern)
files = glob.glob(pattern)
print(files)
# TODO: Make it beautiful by using the auxiliary.create_shader()
# For now do it ugly!
if material.type() == "aiStandardSurface":
# create place2dTexture node
place2d = pm.shadingNode("place2dTexture", asUtility=1)
# *********************************************
# BaseColor
# create a new aiImage
base_color_file_path = glob.glob(
"%s/%s_BaseColor*" % (texture_path, material_name)
)
if base_color_file_path:
# fix diffuse weight
material.base.set(1)
base_color_file_path = base_color_file_path[0]
base_color_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, base_color_file)
base_color_file.setAttr("ignoreColorSpaceFileRules", 1)
base_color_file.fileTextureName.set(base_color_file_path)
base_color_file.colorSpace.set("sRGB")
base_color_file.outColor >> material.baseColor
# *********************************************
# Height
# height_file_path = glob.glob("%s/%s_Height*" % (texture_path, material_name))
height_channel_names = [
"Height",
"DisplaceHeightField",
"DisplacementHeight",
]
for height_channel_name in height_channel_names:
height_file_path = glob.glob(
"%s/%s_%s*" % (texture_path, material_name, height_channel_name)
)
if height_file_path:
height_file_path = height_file_path[0]
# create a displacement node
shading_node = material.attr("outColor").outputs(
type="shadingEngine"
)[0]
disp_shader = pm.shadingNode("displacementShader", asShader=1)
disp_shader.displacement >> shading_node.displacementShader
# create texture
disp_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, disp_file)
disp_file.setAttr("ignoreColorSpaceFileRules", 1)
disp_file.fileTextureName.set(height_file_path)
disp_file.colorSpace.set("Raw")
disp_file.alphaIsLuminance.set(1)
disp_file.outAlpha >> disp_shader.displacement
# *********************************************
# Metalness
metalness_file_path = glob.glob(
"%s/%s_Metalness*" % (texture_path, material_name)
)
if metalness_file_path:
metalness_file_path = metalness_file_path[0]
metalness_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, metalness_file)
metalness_file.setAttr("ignoreColorSpaceFileRules", 1)
metalness_file.fileTextureName.set(metalness_file_path)
metalness_file.colorSpace.set("Raw")
metalness_file.alphaIsLuminance.set(1)
metalness_file.outAlpha >> material.metalness
# *********************************************
# Normal
normal_file_path = glob.glob(
"%s/%s_Normal*" % (texture_path, material_name)
)
if normal_file_path:
normal_file_path = normal_file_path[0]
normal_ai_normalmap = pm.shadingNode("aiNormalMap", asUtility=1)
normal_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, normal_file)
normal_file.setAttr("ignoreColorSpaceFileRules", 1)
normal_file.fileTextureName.set(normal_file_path)
normal_file.colorSpace.set("Raw")
normal_file.outColor >> normal_ai_normalmap.input
normal_ai_normalmap.outValue >> material.normalCamera
# *********************************************
# Roughness
# specularRoughness
roughness_file_path = glob.glob(
"%s/%s_Roughness*" % (texture_path, material_name)
)
if roughness_file_path:
roughness_file_path = roughness_file_path[0]
roughness_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, roughness_file)
roughness_file.setAttr("ignoreColorSpaceFileRules", 1)
roughness_file.fileTextureName.set(roughness_file_path)
roughness_file.colorSpace.set("Raw")
roughness_file.alphaIsLuminance.set(1)
roughness_file.outAlpha >> material.specularRoughness
elif material.type() in ["RedshiftMaterial", "RedshiftStandardMaterial"]:
# create place2dTexture node
place2d = pm.shadingNode("place2dTexture", asUtility=1)
# *********************************************
# BaseColor
# create a new aiImage
diffuse_color_file_path = glob.glob(
"%s/%s_Diffuse*" % (texture_path, material_name)
)
if diffuse_color_file_path:
use_udim = False
if len(diffuse_color_file_path) > 1:
use_udim = True
diffuse_color_file_path = diffuse_color_file_path[0]
if use_udim:
diffuse_color_file_path = \
diffuse_color_file_path.replace("1001", "<udim>")
diffuse_color_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, diffuse_color_file)
diffuse_color_file.setAttr("ignoreColorSpaceFileRules", 1)
diffuse_color_file.fileTextureName.set(diffuse_color_file_path)
diffuse_color_file.colorSpace.set("sRGB")
diffuse_color_file.outColor >> material.diffuse_color
# Accept also BaseColor
# create a new aiImage
base_color_file_path = glob.glob(
"%s/%s_BaseColor*" % (texture_path, material_name)
)
if base_color_file_path:
use_udim = False
if len(base_color_file_path) > 1:
use_udim = True
base_color_file_path = base_color_file_path[0]
if use_udim:
base_color_file_path = \
base_color_file_path.replace("1001", "<udim>")
base_color_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, base_color_file)
base_color_file.setAttr("ignoreColorSpaceFileRules", 1)
base_color_file.fileTextureName.set(base_color_file_path)
base_color_file.colorSpace.set("sRGB")
try:
base_color_file.outColor >> material.diffuse_color
except AttributeError:
# RedshiftStandardMaterial
base_color_file.outColor >> material.base_color
# *********************************************
# Height
height_channel_names = [
"Height",
"DisplaceHeightField",
"DisplacementHeight",
]
for height_channel_name in height_channel_names:
height_file_path = glob.glob(
"%s/%s_%s*" % (texture_path, material_name, height_channel_name)
)
if height_file_path:
use_udim = False
if len(height_file_path) > 1:
use_udim = True
height_file_path = height_file_path[0]
if use_udim:
height_file_path = \
height_file_path.replace("1001", "<udim>")
# create a displacement node
shading_node = material.attr("outColor").outputs(
type="shadingEngine"
)[0]
disp_shader = pm.shadingNode(
"RedshiftDisplacement", asUtility=1
)
# if os.path.splitext(height_file_path)[1] == '.exr': # might not be necessary
# disp_shader.setAttr('newrange_min', -1)
disp_shader.out >> shading_node.displacementShader
# create texture
disp_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, disp_file)
disp_file.fileTextureName.set(height_file_path)
disp_file.colorSpace.set("Raw")
disp_file.setAttr("ignoreColorSpaceFileRules", 1)
disp_file.alphaIsLuminance.set(1)
disp_file.outColor >> disp_shader.texMap
break
# *********************************************
# Metalness
# set material BRDF to GGX and set fresnel type to metalness
try:
material.refl_brdf.set(1)
material.refl_fresnel_mode.set(2)
except AttributeError:
# RedshiftStandardMaterial
pass
metalness_file_path = glob.glob(
"%s/%s_Metal*" % (texture_path, material_name)
)
if metalness_file_path:
use_udim = False
if len(metalness_file_path) > 1:
use_udim = True
metalness_file_path = metalness_file_path[0]
if use_udim:
metalness_file_path = \
metalness_file_path.replace("1001", "<udim>")
metalness_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, metalness_file)
metalness_file.fileTextureName.set(metalness_file_path)
metalness_file.colorSpace.set("Raw")
metalness_file.setAttr("ignoreColorSpaceFileRules", 1)
metalness_file.alphaIsLuminance.set(1)
try:
metalness_file.outAlpha >> material.refl_metalness
except AttributeError:
# RedshiftStandardMaterial
metalness_file.outAlpha >> material.metalness
# *********************************************
# Reflectivity
reflectivity_file_path = glob.glob(
"%s/%s_Reflectivity*" % (texture_path, material_name)
)
if reflectivity_file_path:
use_udim = False
if len(reflectivity_file_path) > 1:
use_udim = True
reflectivity_file_path = reflectivity_file_path[0]
if use_udim:
reflectivity_file_path = \
reflectivity_file_path.replace("1001", "<udim>")
reflectivity_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, reflectivity_file)
reflectivity_file.fileTextureName.set(reflectivity_file_path)
reflectivity_file.colorSpace.set("sRGB")
reflectivity_file.setAttr("ignoreColorSpaceFileRules", 1)
reflectivity_file.alphaIsLuminance.set(1)
try:
reflectivity_file.outColor >> material.refl_reflectivity
except AttributeError:
# RedshiftStandardMaterial
reflectivity_file.outColor >> material.refl_weight
# *********************************************
# Normal
normal_file_path = glob.glob(
"%s/%s_Normal*" % (texture_path, material_name)
)
if normal_file_path:
use_udim = False
if len(normal_file_path) > 1:
use_udim = True
normal_file_path = normal_file_path[0]
if use_udim:
normal_file_path = normal_file_path.replace("1001", "<udim>")
# Redshift BumpMap doesn't work properly with Substance normals
rs_normal_map = pm.shadingNode("RedshiftBumpMap", asUtility=1)
# rs_normal_map = pm.shadingNode("RedshiftNormalMap", asUtility=1)
# set to tangent-space normals
rs_normal_map.inputType.set(1)
normal_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, normal_file)
normal_file.fileTextureName.set(normal_file_path)
normal_file.colorSpace.set("Raw")
normal_file.setAttr("ignoreColorSpaceFileRules", 1)
normal_file.outColor >> rs_normal_map.input
# rs_normal_map.tex0.set(normal_file_path)
rs_normal_map.out >> material.bump_input
rs_normal_map.scale.set(1)
# *********************************************
# Roughness
# specularRoughness
roughness_file_path = glob.glob(
"%s/%s_Roughness*" % (texture_path, material_name)
)
if roughness_file_path:
use_udim = False
if len(roughness_file_path) > 1:
use_udim = True
roughness_file_path = roughness_file_path[0]
if use_udim:
roughness_file_path = \
roughness_file_path.replace("1001", "<udim>")
roughness_file = pm.shadingNode("file", asTexture=1)
connect_place2d_to_file(place2d, roughness_file)
roughness_file.fileTextureName.set(roughness_file_path)
roughness_file.colorSpace.set("Raw")
roughness_file.setAttr("ignoreColorSpaceFileRules", 1)
roughness_file.alphaIsLuminance.set(1)
roughness_file.outAlpha >> material.refl_roughness
@classmethod
def redshift_ic_ipc_bake(cls):
"""Sets the render settings for IC + IPC bake"""
# set motion blur
start_frame = int(pm.playbackOptions(q=True, ast=True))
end_frame = int(pm.playbackOptions(q=True, aet=True))
cls.rso_options["bake"]["motionBlurFrameDuration"] = end_frame - start_frame + 1
rso = pm.PyNode("redshiftOptions")
# store and set attributes
for attr in cls.rso_options["bake"]:
cls.rso_options["orig"][attr] = rso.attr(attr).get()
rso.attr(attr).set(cls.rso_options["bake"][attr])
# go to the first frame
current_frame = pm.currentTime(q=1)
cls.rso_options["current_frame"] = current_frame
pm.currentTime(start_frame)
# do a render
pm.mel.eval('rsRender -render -rv -cam "<renderview>";')
@classmethod
def redshift_ic_ipc_bake_restore(cls):
"""restores the previous render settings"""
rso = pm.PyNode("redshiftOptions")
# revert settings back
for attr in cls.rso_options["orig"]:
rso.attr(attr).set(cls.rso_options["orig"][attr])
# set the GI engines
rso.primaryGIEngine.set(cls.rso_options["bake"]["primaryGIEngine"])
rso.secondaryGIEngine.set(cls.rso_options["bake"]["secondaryGIEngine"])
# set the irradiance method to load
rso.irradiancePointCloudMode.set(1) # Load
rso.irradianceCacheMode.set(1) # Load
# set the cache paths
rso.irradiancePointCloudFilename.set(
cls.rso_options["bake"]["irradiancePointCloudFilename"]
)
rso.irradianceCacheFilename.set(
cls.rso_options["bake"]["irradianceCacheFilename"]
)
# go to current frame
current_frame = cls.rso_options["current_frame"]
pm.currentTime(current_frame)
@classmethod
def update_render_settings(cls):
"""updates render settings for current renderer"""
from anima.dcc import mayaEnv
m = mayaEnv.Maya()
v = m.get_current_version()
if v:
m.set_render_filename(version=v)
@classmethod
def afanasy_job_submitter(cls):
"""Opens the Afanasy job sumitter UI"""
from anima.dcc.mayaEnv import afanasy
ui = afanasy.UI()
ui.show()
@classmethod
def auto_convert_to_redshift(cls):
"""converts the current scene to Redshift"""
from anima.dcc.mayaEnv import ai2rs
cm = ai2rs.ConversionManager()
cm.auto_convert()
@classmethod
def convert_nodes_to_redshift(cls):
"""converts the selected nodes to Redshift"""
from anima.dcc.mayaEnv import ai2rs
cm = ai2rs.ConversionManager()
for node in pm.selected():
cm.convert(node)
@classmethod
def rsproxy_to_bounding_box(cls):
"""sets the display mode to bounding box on selected proxy nodes"""
cls.rsproxy_display_mode_toggle(display_mode=0)
@classmethod
def rsproxy_to_preview_mesh(cls):
"""sets the display mode to preview mesh on selected proxy nodes"""
cls.rsproxy_display_mode_toggle(display_mode=1)
@classmethod
def rsproxy_display_mode_toggle(cls, display_mode=0):
"""sets the display mode on selected proxies
:param display_mode:
0: Bounding Box
1: Preview Mesh
2: Linked Mesh
3: Hide In Viewport
:return:
"""
for node in pm.ls(sl=1):
hist = node.getShape().listHistory()
proxy = hist[1]
proxy.displayMode.set(display_mode)
@classmethod
def standin_to_bbox(cls):
"""convert the selected stand-in nodes to bbox"""
[
node.mode.set(0)
for node in pm.ls(sl=1)
if isinstance(node.getShape(), pm.nt.AiStandIn)
]
@classmethod
def standin_to_polywire(cls):
"""convert the selected stand-in nodes to bbox"""
[
node.mode.set(2)
for node in pm.ls(sl=1)
if isinstance(node.getShape(), pm.nt.AiStandIn)
]
@classmethod
def add_miLabel(cls):
selection = pm.ls(sl=1)
for node in selection:
if node.type() == "Transform":
if node.hasAttr("miLabel"):
pass
else:
pm.addAttr(node, ln="miLabel", at="long", keyable=True)
@classmethod
def connect_facingRatio_to_vCoord(cls):
selection = pm.ls(sl=1)
for i in range(1, len(selection)):
selection[0].facingRatio.connect((selection[i] + ".vCoord"), force=True)
@classmethod
def set_shape_attribute(
cls, attr_name, value, apply_to_hierarchy, disable_undo_queue=False
):
"""sets shape attributes"""
undo_state = pm.undoInfo(q=1, st=1)
if disable_undo_queue:
pm.undoInfo(st=False)
supported_shapes = ["aiStandIn", "mesh", "nurbsCurve"]
attr_mapper = {
"castsShadows": "overrideCastsShadows",
"receiveShadows": "overrideReceiveShadows",
"primaryVisibility": "overridePrimaryVisibility",
"visibleInReflections": "overrideVisibleInReflections",
"visibleInRefractions": "overrideVisibleInRefractions",
"doubleSided": "overrideDoubleSided",
"aiSelfShadows": "overrideSelfShadows",
"aiOpaque": "overrideOpaque",
"aiVisibleInDiffuse": "overrideVisibleInDiffuse",
"aiVisibleInGlossy": "overrideVisibleInGlossy",
"aiMatte": "overrideMatte",
}
pre_selection_list = pm.ls(sl=1)
if apply_to_hierarchy:
pm.select(hierarchy=1)
objects = pm.ls(sl=1, type=supported_shapes)
# get override_attr_name from dictionary
if attr_name in attr_mapper:
override_attr_name = attr_mapper[attr_name]
else:
override_attr_name = None
# register a caller
pdm = ProgressManager()
caller = pdm.register(len(objects), "Setting Shape Attribute")
layers = pm.ls(type="renderLayer")
is_default_layer = layers[0].currentLayer() == layers[0].defaultRenderLayer()
if value != -1:
for item in objects:
attr_full_name = "%s.%s" % (item.name(), attr_name)
override_attr_full_name = "%s.%s" % (item.name(), override_attr_name)
caller.step(message=attr_full_name)
if not is_default_layer:
pm.editRenderLayerAdjustment(attr_full_name)
item.setAttr(attr_name, value)
# if there is an accompanying override attribute like it is
# found in aiStandIn node
# then also set override{Attr} to True
if override_attr_name and cmds.attributeQuery(
override_attr_name, n=item.name(), ex=1
):
if not is_default_layer:
pm.editRenderLayerAdjustment(override_attr_full_name)
item.setAttr(override_attr_name, True)
else:
for item in objects:
attr_full_name = "%s.%s" % (item.name(), attr_name)
override_attr_full_name = "%s.%s" % (item.name(), override_attr_name)
caller.step(message=attr_full_name)
# remove any overrides
if not is_default_layer:
pm.editRenderLayerAdjustment(attr_full_name, remove=1)
if (
override_attr_name
and cmds.attributeQuery(override_attr_name, n=item.name(), ex=1)
and not is_default_layer
):
pm.editRenderLayerAdjustment(override_attr_full_name, remove=1)
# caller.end_progress()
pm.undoInfo(st=undo_state)
pm.select(pre_selection_list)
@classmethod
def set_finalGatherHide(cls, value):
"""sets the finalGatherHide to on or off for the given list of objects"""
attr_name = "miFinalGatherHide"
objects = pm.ls(sl=1)
for obj in objects:
shape = obj
if isinstance(obj, pm.nt.Transform):
shape = obj.getShape()
if not isinstance(shape, (pm.nt.Mesh, pm.nt.NurbsSurface)):
continue
# add the attribute if it doesn't already exists
if not shape.hasAttr(attr_name):
pm.addAttr(shape, ln=attr_name, at="long", min=0, max=1, k=1)
obj.setAttr(attr_name, value)
@classmethod
def replace_shaders_with_last(cls):
"""Assigns the last shader selected to all the objects using the shaders
on the list
"""
sel_list = pm.ls(sl=1)
target_node = sel_list[-1]
for node in sel_list[:-1]:
pm.hyperShade(objects=node)
pm.hyperShade(assign=target_node)
pm.select(None)
@classmethod
def create_texture_ref_object(cls):
selection = pm.ls(sl=1)
for obj in selection:
pm.select(obj)
pm.runtime.CreateTextureReferenceObject()
pm.select(selection)
@classmethod
def use_mib_texture_filter_lookup(cls):
"""Adds texture filter lookup node to the selected file texture nodes for
better texture filtering.
The function is smart enough to use the existing nodes, if there is a
connection from the selected file nodes to a mib_texture_filter_lookup node
then it will not create any new node and just use the existing ones.
It will also not create any place2dTexture nodes if the file node doesn't
have a place2dTexture node but is connected to a filter lookup node which
already has a connection to a place2dTexture node.
"""
file_nodes = pm.ls(sl=1, type="file")
for file_node in file_nodes:
# set the filter type to none
file_node.filterType.set(0)
# check if it is already connected to a mib_texture_filter_lookup node
message_outputs = file_node.message.outputs(
type="mib_texture_filter_lookup"
)
if len(message_outputs):
# use the first one
mib_texture_filter_lookup = message_outputs[0]
else:
# create a texture filter lookup node
mib_texture_filter_lookup = pm.createNode("mib_texture_filter_lookup")
# do the connection
file_node.message >> mib_texture_filter_lookup.tex
# check if the mib_texture_filter_lookup has any connection to a
# placement node
mib_t_f_l_to_placement = mib_texture_filter_lookup.inputs(
type="place2dTexture"
)
placement_node = None
if len(mib_t_f_l_to_placement):
# do nothing
placement_node = mib_t_f_l_to_placement[0].node()
else:
# get the texture placement
placement_connections = file_node.inputs(
type="place2dTexture", p=1, c=1
)
# if there is no placement create one
placement_node = None
if len(placement_connections):
placement_node = placement_connections[0][1].node()
# disconnect connections from placement to file node
for conn in placement_connections:
conn[1] // conn[0]
else:
placement_node = pm.createNode("place2dTexture")
# connect placement to mr_texture_filter_lookup
placement_node.outU >> mib_texture_filter_lookup.coordX
placement_node.outV >> mib_texture_filter_lookup.coordY
# connect color
for output in file_node.outColor.outputs(p=1):
mib_texture_filter_lookup.outValue >> output
# connect alpha
for output in file_node.outAlpha.outputs(p=1):
mib_texture_filter_lookup.outValueA >> output
@classmethod
def convert_to_linear(cls):
"""adds a gamma_gain node in between the selected nodes outputs to make the
result linear
"""
#
# convert to linear
#
selection = pm.ls(sl=1)
for file_node in selection:
# get the connections
outputs = file_node.outputs(plugs=True)
if not len(outputs):
continue
# and insert a mip_gamma_gain
gamma_node = pm.createNode("mip_gamma_gain")
gamma_node.setAttr("gamma", 2.2)
gamma_node.setAttr("reverse", True)
# connect the file_node to gamma_node
try:
file_node.outValue >> gamma_node.input
file_node.outValueA >> gamma_node.inputA
except AttributeError:
file_node.outColor >> gamma_node.input
# do all the connections from the output of the gamma
for output in outputs:
try:
gamma_node.outValue >> output
except RuntimeError:
gamma_node.outValueA >> output
pm.select(selection)
@classmethod
def use_image_sequence(cls):
"""creates an expression to make the mentalrayTexture node also able to read
image sequences
Select your mentalrayTexture nodes and then run the script.
The filename should use the file.%nd.ext format
"""
textures = pm.ls(sl=1, type="mentalrayTexture")
for texture in textures:
# get the filename
filename = texture.getAttr("fileTextureName")
splits = filename.split(".")
if len(splits) == 3:
base = ".".join(splits[0:-2]) + "."
pad = len(splits[-2])
extension = "." + splits[-1]
expr = (
"string $padded_frame = python(\"'%0"
+ str(pad)
+ "d'%\" + string(frame));\n"
+ 'string $filename = "'
+ base
+ '" + \
$padded_frame + ".tga";\n'
+ 'setAttr -type "string" '
+ texture.name()
+ ".fileTextureName $filename;\n"
)
# create the expression
pm.expression(s=expr)
@classmethod
def add_to_selected_container(cls):
selection = pm.ls(sl=1)
conList = pm.ls(sl=1, con=1)
objList = list(set(selection) - set(conList))
if len(conList) == 0:
pm.container(addNode=selection)
elif len(conList) == 1:
pm.container(conList, edit=True, addNode=objList)
else:
length = len(conList) - 1
for i in range(0, length):
containerList = conList[i]
pm.container(conList[-1], edit=True, f=True, addNode=containerList)
pm.container(conList[-1], edit=True, f=True, addNode=objList)
@classmethod
def remove_from_container(cls):
selection = pm.ls(sl=1)
for i in range(0, len(selection)):
con = pm.container(q=True, fc=selection[i])
pm.container(con, edit=True, removeNode=selection[i])
@classmethod
def reload_file_textures(cls):
fileList = pm.ls(type="file")
for fileNode in fileList:
mel.eval("AEfileTextureReloadCmd(%s.fileTextureName)" % fileNode)
@classmethod
def transfer_shaders(cls, allow_component_assignments=False):
"""transfer shaders between selected objects. It can search for
hierarchies both in source and target sides.
:param (bool) allow_component_assignments: If True will transfer component level
shader assignments.
"""
selection = pm.ls(sl=1)
pm.select(None)
source = selection[0]
target = selection[1]
# auxiliary.transfer_shaders(source, target)
# pm.select(selection)
attr_names = [
"castsShadows",
"receiveShadows",
"motionBlur",
"primaryVisibility",
"smoothShading",
"visibleInReflections",
"visibleInRefractions",
"doubleSided",
"opposite",
"aiSelfShadows",
"aiOpaque",
"aiVisibleInDiffuse",
"aiVisibleInGlossy",
"aiExportTangents",
"aiExportColors",
"aiExportRefPoints",
"aiExportRefNormals",
"aiExportRefTangents",
"color",
"interpolation",
"aiTranslator",
"intensity",
"aiExposure",
"aiColorTemperature",
"emitDiffuse",
"emitSpecular",
"aiDecayType",
"lightVisible",
"aiSamples",
"aiNormalize",
"aiCastShadows",
"aiShadowDensity",
"aiShadowColor",
"aiAffectVolumetrics",
"aiCastVolumetricShadows",
"aiVolumeSamples",
"aiDiffuse",
"aiSpecular",
"aiSss",
"aiIndirect",
"aiMaxBounces",
"aiSubdivType",
"aiSubdivIterations",
"aiSubdivAdaptiveMetric",
"aiSubdivPixelError",
"aiSubdivUvSmoothing",
"aiSubdivSmoothDerivs",
"aiDispHeight",
"aiDispPadding",
"aiDispZeroValue",
"aiDispAutobump",
"aiStepSize",
"rsEnableSubdivision",
"rsSubdivisionRule",
"rsScreenSpaceAdaptive",
"rsDoSmoothSubdivision",
"rsMinTessellationLength",
"rsMaxTessellationSubdivs",
"rsOutOfFrustumTessellationFactor",
"rsLimitOutOfFrustumTessellation",
"rsMaxOutOfFrustumTessellationSubdivs",
"rsEnableDisplacement",
"rsMaxDisplacement",
"rsDisplacementScale",
"rsAutoBumpMap",
"rsObjectId",
]
# check if they are direct parents of mesh or nurbs shapes
source_shape = source.getShape()
target_shape = target.getShape()
if (
source_shape
and not isinstance(source_shape, pm.nt.NurbsCurve)
and target_shape
and not isinstance(target_shape, pm.nt.NurbsCurve)
):
# do a direct assignment from source to target
# shading_engines = source_shape.outputs(type=pm.nt.ShadingEngine)
# pm.sets(shading_engines[0], fe=target)
# pm.select(selection)
lut = {"match": [(source_shape, target_shape)], "no_match": []}
else:
lut = auxiliary.match_hierarchy(source, target)
for source_node, target_node in lut["match"]:
auxiliary.transfer_shaders(
source_node,
target_node,
allow_component_assignments=allow_component_assignments
)
# also transfer render attributes
for attr_name in attr_names:
try:
target_node.setAttr(attr_name, source_node.getAttr(attr_name))
except (pm.MayaAttributeError, RuntimeError):
pass
# input connections to attributes
try:
for plug in source_node.attr(attr_name).inputs(p=1):
plug >> target_node.attr(attr_name)
except pm.MayaAttributeError:
pass
# caller.step()
# caller.end_progress()
if len(lut["no_match"]):
pm.select(lut["no_match"])
print(
"The following nodes has no corresponding source:\n%s"
% ("\n".join([node.name() for node in lut["no_match"]]))
)
@classmethod
def fit_placement_to_UV(cls):
selection = pm.ls(sl=1)
uvs = [n for n in selection if isinstance(n, pm.general.MeshUV)]
placements = [p for p in selection if isinstance(p, pm.nt.Place2dTexture)]
# get the uv extends
temp_data = pm.polyEditUV(uvs, q=1)
u = sorted(temp_data[0::2])
v = sorted(temp_data[1::2])
umin = u[0]
umax = u[-1]
vmin = v[0]
vmax = v[-1]
for p in placements:
p.setAttr("coverage", (umax - umin, vmax - vmin))
p.setAttr("translateFrame", (umin, vmin))
@classmethod
def connect_placement2d_to_file(cls):
"""connects the selected placement node to the selected file textures"""
attr_lut = [
"coverage",
"translateFrame",
"rotateFrame",
"mirrorU",
"mirrorV",
"stagger",
"wrapU",
"wrapV",
"repeatUV",
"offset",
"rotateUV",
"noiseUV",
"vertexUvOne",
"vertexUvTwo",
"vertexUvThree",
"vertexCameraOne",
("outUV", "uvCoord"),
("outUvFilterSize", "uvFilterSize"),
]
# get placement and file nodes
placement_node = pm.ls(sl=1, type=pm.nt.Place2dTexture)[0]
file_nodes = pm.ls(sl=1, type=pm.nt.File)
from anima import __string_types__
for file_node in file_nodes:
for attr in attr_lut:
if isinstance(attr, __string_types__):
source_attr_name = attr
target_attr_name = attr
elif isinstance(attr, tuple):
source_attr_name = attr[0]
target_attr_name = attr[1]
placement_node.attr(source_attr_name) >> file_node.attr(
target_attr_name
)
@classmethod
def open_node_in_browser(cls):
# get selected nodes
node_attrs = {
"file": "fileTextureName",
"aiImage": "filename",
"aiStandIn": "dso",
}
import os
from anima.utils import open_browser_in_location
for node in pm.ls(sl=1):
type_ = pm.objectType(node)
# special case: if transform use shape
if type_ == "transform":
node = node.getShape()
type_ = pm.objectType(node)
attr_name = node_attrs.get(type_)
if attr_name:
# if any how it contains a "#" character use the path
path = node.getAttr(attr_name)
if "#" in path:
path = os.path.dirname(path)
open_browser_in_location(path)
@classmethod
def enable_matte(cls, color=0):
"""enables matte on selected objects"""
#
# Enable Matte on Selected Objects
#
colors = [
[0, 0, 0, 0], # Not Visible
[1, 0, 0, 0], # Red
[0, 1, 0, 0], # Green
[0, 0, 1, 0], # Blue
[0, 0, 0, 1], # Alpha
]
arnold_shaders = (pm.nt.AiStandard, pm.nt.AiHair, pm.nt.AiSkin, pm.nt.AiUtility)
for node in pm.ls(
sl=1, dag=1, type=[pm.nt.Mesh, pm.nt.NurbsSurface, "aiStandIn"]
):
obj = node
# if isinstance(node, pm.nt.Mesh):
# obj = node
# elif isinstance(node, pm.nt.Transform):
# obj = node.getShape()
shading_nodes = pm.listConnections(obj, type="shadingEngine")
for shadingNode in shading_nodes:
shader = shadingNode.attr("surfaceShader").connections()[0]
if isinstance(shader, arnold_shaders):
try:
pm.editRenderLayerAdjustment(shader.attr("aiEnableMatte"))
pm.editRenderLayerAdjustment(shader.attr("aiMatteColor"))
pm.editRenderLayerAdjustment(shader.attr("aiMatteColorA"))
shader.attr("aiEnableMatte").set(1)
shader.attr("aiMatteColor").set(
colors[color][0:3], type="double3"
)
shader.attr("aiMatteColorA").set(colors[color][3])
except RuntimeError as e:
# there is some connections
print(str(e))
@classmethod
def disable_subdiv(cls, node):
"""Disables the subdiv on the given nodes
:param node:
:return:
"""
if isinstance(node, pm.nt.Transform):
shapes = node.getShapes()
else:
shapes = [node]
for shape in shapes:
try:
shape.aiSubdivType.set(0)
except AttributeError:
pass
try:
shape.rsEnableSubdivision.set(0)
except AttributeError:
pass
@classmethod
def disable_subdiv_on_selected(cls):
"""disables subdiv on selected nodes"""
for node in pm.ls(sl=1):
cls.disable_subdiv(node)
@classmethod
def enable_subdiv_on_selected(cls, fixed_tes=False, max_subdiv=3):
"""enables subdiv on selected objects
:param fixed_tes: Uses fixed tessellation.
:param max_subdiv: The max subdivision iteration. Default 3.
"""
#
# Set SubDiv to CatClark on Selected nodes
#
for node in pm.ls(sl=1):
cls.enable_subdiv(node, fixed_tes=fixed_tes, max_subdiv=max_subdiv)
@classmethod
def enable_subdiv(cls, node, fixed_tes=False, max_subdiv=3):
"""enables subdiv on selected objects
:param node: The node to enable the subdiv too
:param fixed_tes: Uses fixed tessellation.
:param max_subdiv: The max subdivision iteration. Default 3.
"""
if isinstance(node, pm.nt.Transform):
shapes = node.getShapes()
else:
shapes = [node]
for shape in shapes:
try:
shape.aiSubdivIterations.set(max_subdiv)
shape.aiSubdivType.set(1)
shape.aiSubdivPixelError.set(0)
except AttributeError:
pass
try:
shape.rsEnableSubdivision.set(1)
shape.rsMaxTessellationSubdivs.set(max_subdiv)
if not fixed_tes:
shape.rsScreenSpaceAdaptive.set(1)
shape.rsLimitOutOfFrustumTessellation.set(1)
shape.rsMaxOutOfFrustumTessellationSubdivs.set(1)
else:
shape.rsScreenSpaceAdaptive.set(0)
shape.rsMinTessellationLength.set(0)
except AttributeError:
pass
@classmethod
def export_shader_attributes(cls):
"""exports the selected shader attributes to a JSON file"""
# get data
data = []
nodes = pm.ls(sl=1)
for node in nodes:
node_attr_data = {}
attrs = node.listAttr()
for attr in attrs:
try:
value = attr.get()
if not isinstance(value, pm.PyNode):
node_attr_data[attr.shortName()] = value
except TypeError:
continue
data.append(node_attr_data)
# write data
import json
with open(cls.node_attr_info_temp_file_path, "w") as f:
json.dump(data, f)
@classmethod
def export_shader_assignments_to_houdini(cls):
"""Exports shader assignments to Houdini via a JSON file.
Use the Houdini counterpart to import the assignment data
"""
# get the shaders from viewport selection
shaders = []
for node in pm.selected():
shape = node.getShape()
shading_engines = shape.outputs(type=pm.nt.ShadingEngine)
for shading_engine in shading_engines:
inputs = shading_engine.surfaceShader.inputs()
for shader in inputs:
shaders.append(shader)
# get the shapes for each shader
shader_assignments = {}
for shader in shaders:
shader_name = shader.name()
shading_engines = shader.outputs(type=pm.nt.ShadingEngine)
if not shading_engines:
continue
shading_engine = shading_engines[0]
shader_assignments[shader_name] = []
assigned_nodes = pm.sets(shading_engine, q=1)
for assigned_node in assigned_nodes:
shape = assigned_node.node()
# get the full path of the shape
shape_full_path = shape.fullPath().replace("|", "/")
shader_assignments[shader_name].append(shape_full_path)
# write data
try:
import json
with open(cls.shader_data_temp_file_path, "w") as f:
json.dump(shader_assignments, f, indent=4)
except BaseException as e:
pm.confirmDialog(title="Error", message="%s" % e, button="OK")
else:
pm.confirmDialog(
title="Successful",
message="Shader Data exported successfully!",
button="OK",
)
@classmethod
def import_shader_attributes(cls):
"""imports shader attributes from a temp JSON file"""
# read data
import json
with open(cls.node_attr_info_temp_file_path) as f:
data = json.load(f)
# set data
nodes = pm.ls(sl=1)
for i, node in enumerate(nodes):
i = i % len(data)
node_data = data[i]
for key in node_data:
value = node_data[key]
try:
node.setAttr(key, value)
except RuntimeError:
continue
@classmethod
def barndoor_simulator_setup(cls):
"""creates a barndoor simulator"""
bs = auxiliary.BarnDoorSimulator()
bs.light = pm.ls(sl=1)[0]
bs.setup()
@classmethod
def barndoor_simulator_unsetup(cls):
"""removes the barndoor simulator"""
bs = auxiliary.BarnDoorSimulator()
for light in pm.ls(sl=1):
light_shape = light.getShape()
if isinstance(light_shape, pm.nt.Light):
bs.light = light
bs.unsetup()
@classmethod
def fix_barndoors(cls):
"""fixes the barndoors on scene lights created in MtoA 1.0 to match the
new behaviour of barndoors in MtoA 1.1
"""
for light in pm.ls(type="spotLight"):
# calculate scale
cone_angle = light.getAttr("coneAngle")
penumbra_angle = light.getAttr("penumbraAngle")
if penumbra_angle < 0:
light.setAttr("coneAngle", max(cone_angle + penumbra_angle, 0.1))
else:
light.setAttr("coneAngle", max(cone_angle - penumbra_angle, 0.1))
@classmethod
def convert_aiSkinSSS_to_aiSkin(cls):
"""converts aiSkinSSS nodes in the current scene to aiSkin + aiStandard
nodes automatically
"""
attr_mapper = {
# diffuse
"color": {"node": "aiStandard", "attr_name": "color"},
"diffuseWeight": {
"node": "aiStandard",
"attr_name": "Kd",
"multiplier": 0.7,
},
"diffuseRoughness": {"node": "aiStandard", "attr_name": "diffuseRoughness"},
# sss
"sssWeight": {"node": "aiSkin", "attr_name": "sssWeight"},
# shallowScatter
"shallowScatterColor": {
"node": "aiSkin",
"attr_name": "shallowScatterColor",
},
"shallowScatterWeight": {
"node": "aiSkin",
"attr_name": "shallowScatterWeight",
},
"shallowScatterRadius": {
"node": "aiSkin",
"attr_name": "shallowScatterRadius",
},
# midScatter
"midScatterColor": {
"node": "aiSkin",
"attr_name": "midScatterColor",
},
"midScatterWeight": {"node": "aiSkin", "attr_name": "midScatterWeight"},
"midScatterRadius": {"node": "aiSkin", "attr_name": "midScatterRadius"},
# deepScatter
"deepScatterColor": {
"node": "aiSkin",
"attr_name": "deepScatterColor",
},
"deepScatterWeight": {"node": "aiSkin", "attr_name": "deepScatterWeight"},
"deepScatterRadius": {"node": "aiSkin", "attr_name": "deepScatterRadius"},
# primaryReflection
"primaryReflectionColor": {"node": "aiSkin", "attr_name": "specularColor"},
"primaryReflectionWeight": {
"node": "aiSkin",
"attr_name": "specularWeight",
},
"primaryReflectionRoughness": {
"node": "aiSkin",
"attr_name": "specularRoughness",
},
# secondaryReflection
"secondaryReflectionColor": {"node": "aiSkin", "attr_name": "sheenColor"},
"secondaryReflectionWeight": {"node": "aiSkin", "attr_name": "sheenWeight"},
"secondaryReflectionRoughness": {
"node": "aiSkin",
"attr_name": "sheenRoughness",
},
# bump
"normalCamera": {"node": "aiSkin", "attr_name": "normalCamera"},
# sss multiplier
"globalSssRadiusMultiplier": {
"node": "aiSkin",
"attr_name": "globalSssRadiusMultiplier",
},
}
all_skin_sss = pm.ls(type="aiSkinSss")
for skin_sss in all_skin_sss:
skin = pm.shadingNode("aiSkin", asShader=1)
standard = pm.shadingNode("aiStandard", asShader=1)
skin.attr("outColor") >> standard.attr("emissionColor")
standard.setAttr("emission", 1.0)
skin.setAttr("fresnelAffectSss", 0) # to match the previous behaviour
node_mapper = {"aiSkin": skin, "aiStandard": standard}
for attr in attr_mapper.keys():
inputs = skin_sss.attr(attr).inputs(p=1, c=1)
if inputs:
# copy inputs
destination_attr_name = inputs[0][0].name().split(".")[-1]
source = inputs[0][1]
if destination_attr_name in attr_mapper:
node = attr_mapper[destination_attr_name]["node"]
attr_name = attr_mapper[destination_attr_name]["attr_name"]
source >> node_mapper[node].attr(attr_name)
else:
source >> skin.attr(destination_attr_name)
else:
# copy values
node = node_mapper[attr_mapper[attr]["node"]]
attr_name = attr_mapper[attr]["attr_name"]
multiplier = attr_mapper[attr].get("multiplier", 1.0)
attr_value = skin_sss.getAttr(attr)
if isinstance(attr_value, tuple):
attr_value = list(map(lambda x: x * multiplier, attr_value))
else:
attr_value *= multiplier
node.attr(attr_name).set(attr_value)
# after everything is set up
# connect the aiStandard to the shadingEngine
for source, dest in skin_sss.outputs(p=1, c=1):
standard.attr("outColor") >> dest
# and rename the materials
orig_name = skin_sss.name()
# delete the skinSSS node
pm.delete(skin_sss)
skin_name = orig_name
standard_name = "%s_aiStandard" % orig_name
skin.rename(skin_name)
standard.rename(standard_name)
print("updated %s" % skin_name)
@classmethod
def normalize_sss_weights(cls):
"""normalizes the sss weights so their total weight is 1.0
if a aiStandard is assigned to the selected object it searches for an
aiSkin in the emission channel.
the script considers 0.7 as the highest diffuse value for aiStandard
"""
# get the shader of the selected object
assigned_shader = pm.ls(
pm.ls(sl=1)[0].getShape().outputs(type="shadingEngine")[0].inputs(), mat=1
)[0]
if assigned_shader.type() == "aiStandard":
sss_shader = assigned_shader.attr("emissionColor").inputs()[0]
diffuse_weight = assigned_shader.attr("Kd").get()
else:
sss_shader = assigned_shader
diffuse_weight = 0
def get_attr_or_texture(attr):
if attr.inputs():
# we probably have a texture assigned
# so use its multiply attribute
texture = attr.inputs()[0]
attr = texture.attr("multiply")
if isinstance(texture, pm.nt.AiImage):
attr = texture.attr("multiply")
elif isinstance(texture, pm.nt.File):
attr = texture.attr("colorGain")
return attr
shallow_attr = get_attr_or_texture(sss_shader.attr("shallowScatterWeight"))
mid_attr = get_attr_or_texture(sss_shader.attr("midScatterWeight"))
deep_attr = get_attr_or_texture(sss_shader.attr("deepScatterWeight"))
shallow_weight = shallow_attr.get()
if isinstance(shallow_weight, tuple):
shallow_weight = (
shallow_weight[0] + shallow_weight[1] + shallow_weight[2]
) / 3.0
mid_weight = mid_attr.get()
if isinstance(mid_weight, tuple):
mid_weight = (mid_weight[0] + mid_weight[1] + mid_weight[2]) / 3.0
deep_weight = deep_attr.get()
if isinstance(deep_weight, tuple):
deep_weight = (deep_weight[0] + deep_weight[1] + deep_weight[2]) / 3.0
total_sss_weight = shallow_weight + mid_weight + deep_weight
mult = (1 - diffuse_weight / 0.7) / total_sss_weight
try:
shallow_attr.set(shallow_weight * mult)
except RuntimeError:
w = shallow_weight * mult
shallow_attr.set(w, w, w)
try:
mid_attr.set(mid_weight * mult)
except RuntimeError:
w = mid_weight * mult
mid_attr.set(w, w, w)
try:
deep_attr.set(deep_weight * mult)
except RuntimeError:
w = deep_weight * mult
deep_attr.set(w, w, w)
@classmethod
def create_eye_shader_and_controls(cls):
"""This is pretty much specific to the way we are creating eye shaders
for characters in KKS project, but it is a useful trick, select the
inner eye objects before running
"""
eyes = pm.ls(sl=1)
if not eyes:
return
char = eyes[0].getAllParents()[-1]
place = pm.shadingNode("place2dTexture", asUtility=1)
emission_image = pm.shadingNode("aiImage", asTexture=1)
ks_image = pm.shadingNode("aiImage", asTexture=1)
texture_paths = {
"emission": "$REPO1977/KKS/Assets/Characters/Body_Parts/Textures/"
"char_eyeInner_light_v001.png",
"Ks": "$REPO1977/KKS/Assets/Characters/Body_Parts/Textures/"
"char_eyeInner_spec_v002.png",
}
emission_image.setAttr("filename", texture_paths["emission"])
ks_image.setAttr("filename", texture_paths["Ks"])
place.outUV >> emission_image.attr("uvcoords")
if not char.hasAttr("eyeLightStrength"):
char.addAttr("eyeLightStrength", at="double", min=0, dv=0.0, k=1)
else:
# set the default
char.attr("eyeLightStrength").set(0)
if not char.hasAttr("eyeLightAngle"):
char.addAttr("eyeLightAngle", at="double", dv=0, k=1)
if not char.hasAttr("eyeDiffuseWeight"):
char.addAttr("eyeDiffuseWeight", at="double", dv=0.15, k=1, min=0, max=1)
if not char.hasAttr("eyeSpecularWeight"):
char.addAttr("eyeSpecularWeight", at="double", dv=1.0, k=1, min=0, max=1)
if not char.hasAttr("eyeSSSWeight"):
char.addAttr("eyeSSSWeight", at="double", dv=0.5, k=1, min=0, max=1)
# connect eye light strength
char.eyeLightStrength >> emission_image.attr("multiplyR")
char.eyeLightStrength >> emission_image.attr("multiplyG")
char.eyeLightStrength >> emission_image.attr("multiplyB")
# connect eye light angle
char.eyeLightAngle >> place.attr("rotateFrame")
# connect specular weight
char.eyeSpecularWeight >> ks_image.attr("multiplyR")
char.eyeSpecularWeight >> ks_image.attr("multiplyG")
char.eyeSpecularWeight >> ks_image.attr("multiplyB")
for eye in eyes:
shading_engine = eye.getShape().outputs(type="shadingEngine")[0]
shader = pm.ls(shading_engine.inputs(), mat=1)[0]
# connect the diffuse shader input to the emissionColor
diffuse_texture = shader.attr("color").inputs(p=1, s=1)[0]
diffuse_texture >> shader.attr("emissionColor")
emission_image.outColorR >> shader.attr("emission")
# also connect it to specular color
diffuse_texture >> shader.attr("KsColor")
# connect the Ks image to the specular weight
ks_image.outColorR >> shader.attr("Ks")
# also connect it to sss color
diffuse_texture >> shader.attr("KsssColor")
char.eyeDiffuseWeight >> shader.attr("Kd")
char.eyeSSSWeight >> shader.attr("Ksss")
# set some default values
shader.attr("diffuseRoughness").set(0)
shader.attr("Kb").set(0)
shader.attr("directDiffuse").set(1)
shader.attr("indirectDiffuse").set(1)
shader.attr("specularRoughness").set(0.4)
shader.attr("specularAnisotropy").set(0.5)
shader.attr("specularRotation").set(0)
shader.attr("specularFresnel").set(0)
shader.attr("Kr").set(0)
shader.attr("enableInternalReflections").set(0)
shader.attr("Kt").set(0)
shader.attr("transmittance").set([1, 1, 1])
shader.attr("opacity").set([1, 1, 1])
shader.attr("sssRadius").set([1, 1, 1])
pm.select(eyes)
@classmethod
def randomize_attr(cls, nodes, attr, min, max, pre=0.1):
"""Randomizes the given attributes of the given nodes
:param list nodes:
:param str attr:
:param float, int min:
:param float, int max:
:return:
"""
import random
import math
rand = random.random
floor = math.floor
for node in nodes:
r = rand() * float(max - min) + float(min)
r = floor(r / pre) * pre
node.setAttr(attr, r)
@classmethod
def randomize_light_color_temp(cls, min_field, max_field):
"""Randomizes the color temperature of selected lights
:param min:
:param max:
:return:
"""
min = pm.floatField(min_field, q=1, v=1)
max = pm.floatField(max_field, q=1, v=1)
cls.randomize_attr(
[node.getShape() for node in pm.ls(sl=1)], "aiColorTemperature", min, max, 1
)
@classmethod
def randomize_light_intensity(cls, min_field, max_field):
"""Randomizes the intensities of selected lights
:param min:
:param max:
:return:
"""
min = pm.floatField(min_field, q=1, v=1)
max = pm.floatField(max_field, q=1, v=1)
cls.randomize_attr(
[node.getShape() for node in pm.ls(sl=1)], "aiExposure", min, max, 0.1
)
@classmethod
def setup_outer_eye_render_attributes(cls):
"""sets outer eye render attributes for characters, select outer eye
objects and run this
"""
for node in pm.ls(sl=1):
shape = node.getShape()
shape.setAttr("castsShadows", 0)
shape.setAttr("visibleInReflections", 0)
shape.setAttr("visibleInRefractions", 0)
shape.setAttr("aiSelfShadows", 0)
shape.setAttr("aiOpaque", 0)
shape.setAttr("aiVisibleInDiffuse", 0)
shape.setAttr("aiVisibleInGlossy", 0)
@classmethod
def setup_window_glass_render_attributes(cls):
"""sets window glass render attributes for environments, select window
glass objects and run this
"""
shader_name = "toolbox_glass_shader"
shaders = pm.ls("%s*" % shader_name)
selection = pm.ls(sl=1)
if len(shaders) > 0:
shader = shaders[0]
else:
shader = pm.shadingNode("aiStandard", asShader=1, name="%s#" % shader_name)
shader.setAttr("Ks", 1)
shader.setAttr("specularRoughness", 0)
shader.setAttr("Kr", 0)
shader.setAttr("enableInternalReflections", 0)
shader.setAttr("Kt", 0)
shader.setAttr("KtColor", (0, 0, 0))
shape_attributes = [
("castsShadows", 0),
("visibleInReflections", 0),
("visibleInRefractions", 0),
("aiSelfShadows", 0),
("aiOpaque", 1),
("aiVisibleInDiffuse", 0),
("aiVisibleInGlossy", 0),
]
for node in selection:
shape = node.getShape()
map(lambda x: shape.setAttr(*x), shape_attributes)
if isinstance(shape, pm.nt.AiStandIn):
# get the glass shader or create one
shape.overrideShaders.set(1)
# assign it to the stand in
pm.select(node)
pm.hyperShade(assign=shader)
@classmethod
def dummy_window_light_plane(cls):
"""creates or updates the dummy window plane for the given area light"""
area_light_list = pm.selected()
from anima.dcc.mayaEnv import auxiliary
for light in area_light_list:
dwl = auxiliary.DummyWindowLight()
dwl.light = light
dwl.update()
@classmethod
def setup_z_limiter(cls):
"""creates z limiter setup"""
shader_name = "z_limiter_shader#"
shaders = pm.ls("%s*" * shader_name)
if len(shaders) > 0:
shader = shaders[0]
else:
shader = pm.shadingNode(
"surfaceShader", asShader=1, name="%s#" % shader_name
)
@classmethod
def convert_file_node_to_ai_image_node(cls):
"""converts the file node to aiImage node"""
default_values = {
"coverageU": 1,
"coverageV": 1,
"translateFrameU": 0,
"translateFrameV": 0,
"rotateFrame": 0,
"repeatU": 1,
"repeatV": 1,
"offsetU": 0,
"offsetV": 0,
"rotateUV": 0,
"noiseU": 0,
"noiseV": 0,
}
for node in pm.ls(sl=1, type="file"):
node_name = node.name()
path = node.getAttr("fileTextureName")
ai_image = pm.shadingNode("aiImage", asTexture=1)
ai_image.setAttr("filename", path)
# check the placement node
placements = node.listHistory(type="place2dTexture")
if len(placements):
placement = placements[0]
# check default values
if any(
[
placement.getAttr(attr_name) != default_values[attr_name]
for attr_name in default_values
]
):
# connect the placement to the aiImage
placement.outUV >> ai_image.uvcoords
else:
# delete it
pm.delete(placement)
# connect the aiImage
for attr_out, attr_in in node.outputs(p=1, c=1):
attr_name = attr_out.name().split(".")[-1]
if attr_name == "message":
continue
ai_image.attr(attr_name) >> attr_in
# delete the File node
pm.delete(node)
# rename the aiImage node
ai_image.rename(node_name)
@classmethod
def create_generic_tooth_shader(cls):
"""creates generic tooth shader for selected objects"""
shader_name = "toolbox_generic_tooth_shader#"
selection = pm.ls(sl=1)
shader_tree = {
"type": "aiStandard",
"class": "asShader",
"attr": {
"color": [1, 0.909, 0.815],
"Kd": 0.2,
"KsColor": [1, 1, 1],
"Ks": 0.5,
"specularRoughness": 0.10,
"specularFresnel": 1,
"Ksn": 0.05,
"enableInternalReflections": 0,
"KsssColor": [1, 1, 1],
"Ksss": 1,
"sssRadius": [1, 0.853, 0.68],
"normalCamera": {
"output": "outNormal",
"type": "bump2d",
"class": "asTexture",
"attr": {
"bumpDepth": 0.05,
"bumpValue": {
"output": "outValue",
"type": "aiNoise",
"class": "asUtility",
"attr": {
"scaleX": 4,
"scaleY": 0.250,
"scaleZ": 4,
},
},
},
},
},
}
shader = auxiliary.create_shader(shader_tree, shader_name)
for node in selection:
# assign it to the stand in
pm.select(node)
pm.hyperShade(assign=shader)
@classmethod
def create_generic_gum_shader(self):
"""set ups generic gum shader for selected objects"""
shader_name = "toolbox_generic_gum_shader#"
selection = pm.ls(sl=1)
shader_tree = {
"type": "aiStandard",
"class": "asShader",
"attr": {
"color": [0.993, 0.596, 0.612],
"Kd": 0.35,
"KsColor": [1, 1, 1],
"Ks": 0.010,
"specularRoughness": 0.2,
"enableInternalReflections": 0,
"KsssColor": [1, 0.6, 0.6],
"Ksss": 0.5,
"sssRadius": [0.5, 0.5, 0.5],
"normalCamera": {
"output": "outNormal",
"type": "bump2d",
"class": "asTexture",
"attr": {
"bumpDepth": 0.1,
"bumpValue": {
"output": "outValue",
"type": "aiNoise",
"class": "asUtility",
"attr": {
"scaleX": 4,
"scaleY": 1,
"scaleZ": 4,
},
},
},
},
},
}
shader = auxiliary.create_shader(shader_tree, shader_name)
for node in selection:
# assign it to the stand in
pm.select(node)
pm.hyperShade(assign=shader)
@classmethod
def create_generic_tongue_shader(self):
"""set ups generic tongue shader for selected objects"""
shader_name = "toolbox_generic_tongue_shader#"
selection = pm.ls(sl=1)
shader_tree = {
"type": "aiStandard",
"class": "asShader",
"attr": {
"color": [0.675, 0.174, 0.194],
"Kd": 0.35,
"KsColor": [1, 1, 1],
"Ks": 0.010,
"specularRoughness": 0.2,
"enableInternalReflections": 0,
"KsssColor": [1, 0.3, 0.3],
"Ksss": 0.5,
"sssRadius": [0.5, 0.5, 0.5],
"normalCamera": {
"output": "outNormal",
"type": "bump2d",
"class": "asTexture",
"attr": {
"bumpDepth": 0.1,
"bumpValue": {
"output": "outValue",
"type": "aiNoise",
"class": "asUtility",
"attr": {
"scaleX": 4,
"scaleY": 1,
"scaleZ": 4,
},
},
},
},
},
}
shader = auxiliary.create_shader(shader_tree, shader_name)
for node in selection:
# assign it to the stand in
pm.select(node)
pm.hyperShade(assign=shader)
@classmethod
def create_ea_matte(cls):
"""creates "ebesinin ami" matte shader with opacity for selected
objects.
It is called "EA Matte" for one reason, this matte is not necessary in
normal working conditions. That is you change the color and look of
some 3D element in 3D application and do an artistic grading at post to
the whole plate, not to individual elements in the render.
And because we are forced to create this matte layer, we thought that
we should give it a proper name.
"""
# get the selected objects
# for each object create a new surface shader with the opacity
# channel having the opacity of the original shader
# create a lut for objects that have the same material not to cause
# multiple materials to be created
daro = pm.PyNode("defaultArnoldRenderOptions")
attrs = {
"AASamples": 4,
"GIDiffuseSamples": 0,
"GIGlossySamples": 0,
"GIRefractionSamples": 0,
"sssBssrdfSamples": 0,
"volumeIndirectSamples": 0,
"GITotalDepth": 0,
"GIDiffuseDepth": 0,
"GIGlossyDepth": 0,
"GIReflectionDepth": 0,
"GIRefractionDepth": 0,
"GIVolumeDepth": 0,
"ignoreTextures": 1,
"ignoreAtmosphere": 1,
"ignoreLights": 1,
"ignoreShadows": 1,
"ignoreBump": 1,
"ignoreSss": 1,
}
for attr in attrs:
pm.editRenderLayerAdjustment(daro.attr(attr))
daro.setAttr(attr, attrs[attr])
try:
aov_z = pm.PyNode("aiAOV_Z")
pm.editRenderLayerAdjustment(aov_z.attr("enabled"))
aov_z.setAttr("enabled", 0)
except pm.MayaNodeError:
pass
try:
aov_mv = pm.PyNode("aiAOV_motionvector")
pm.editRenderLayerAdjustment(aov_mv.attr("enabled"))
aov_mv.setAttr("enabled", 0)
except pm.MayaNodeError:
pass
dad = pm.PyNode("defaultArnoldDriver")
pm.editRenderLayerAdjustment(dad.attr("autocrop"))
dad.setAttr("autocrop", 0)
@classmethod
def create_z_layer(cls):
"""creates z layer with arnold render settings"""
daro = pm.PyNode("defaultArnoldRenderOptions")
attrs = {
"AASamples": 4,
"GIDiffuseSamples": 0,
"GIGlossySamples": 0,
"GIRefractionSamples": 0,
"sssBssrdfSamples": 0,
"volumeIndirectSamples": 0,
"GITotalDepth": 0,
"GIDiffuseDepth": 0,
"GIGlossyDepth": 0,
"GIReflectionDepth": 0,
"GIRefractionDepth": 0,
"GIVolumeDepth": 0,
"ignoreShaders": 1,
"ignoreAtmosphere": 1,
"ignoreLights": 1,
"ignoreShadows": 1,
"ignoreBump": 1,
"ignoreNormalSmoothing": 1,
"ignoreDof": 1,
"ignoreSss": 1,
}
for attr in attrs:
pm.editRenderLayerAdjustment(daro.attr(attr))
daro.setAttr(attr, attrs[attr])
try:
aov_z = pm.PyNode("aiAOV_Z")
pm.editRenderLayerAdjustment(aov_z.attr("enabled"))
aov_z.setAttr("enabled", 1)
except pm.MayaNodeError:
pass
try:
aov_mv = pm.PyNode("aiAOV_motionvector")
pm.editRenderLayerAdjustment(aov_mv.attr("enabled"))
aov_mv.setAttr("enabled", 1)
except pm.MayaNodeError:
pass
dad = pm.PyNode("defaultArnoldDriver")
pm.editRenderLayerAdjustment(dad.attr("autocrop"))
dad.setAttr("autocrop", 1)
@classmethod
def generate_reflection_curve(self):
"""Generates a curve which helps creating specular at the desired point"""
from maya.OpenMaya import MVector
from anima.dcc.mayaEnv import auxiliary
vtx = pm.ls(sl=1)[0]
normal = vtx.getNormal(space="world")
panel = auxiliary.Playblaster.get_active_panel()
camera = pm.PyNode(pm.modelPanel(panel, q=1, cam=1))
camera_axis = MVector(0, 0, -1) * camera.worldMatrix.get()
refl = camera_axis - 2 * normal.dot(camera_axis) * normal
# create a new curve
p1 = vtx.getPosition(space="world")
p2 = p1 + refl
curve = pm.curve(d=1, p=[p1, p2])
# move pivot to the first point
pm.xform(curve, rp=p1, sp=p1)
@classmethod
def import_gpu_content(self):
"""imports the selected GPU content"""
import os
imported_nodes = []
for node in pm.ls(sl=1):
gpu_node = node.getShape()
gpu_path = gpu_node.getAttr("cacheFileName")
new_nodes = pm.mel.eval(
'AbcImport -mode import -reparent "%s" "%s";'
% (node.fullPath(), os.path.expandvars(gpu_path))
)
# get imported nodes
new_nodes = node.getChildren()
new_nodes.remove(gpu_node)
imported_node = None
# filter material node
for n in new_nodes:
if n.name() != "materials":
imported_node = n
else:
pm.delete(n)
if imported_node:
imported_node.t.set(0, 0, 0)
imported_node.r.set(0, 0, 0)
imported_node.s.set(1, 1, 1)
pm.parent(imported_node, world=1)
imported_nodes.append(imported_node)
pm.select(imported_nodes)
@classmethod
def render_slicer(self):
"""A tool for slicing big render scenes
:return:
"""
# TODO: Add UI call for Render Slicer
raise NotImplementedError("This UI is not implemented yet!")
@classmethod
def move_cache_files_wrapper(cls, source_driver_field, target_driver_field):
"""Wrapper for move_cache_files() command
:param source_driver_field: Text field for source driver
:param target_driver_field: Text field for target driver
:return:
"""
source_driver = source_driver_field.text()
target_driver = target_driver_field.text()
Render.move_cache_files(source_driver, target_driver)
@classmethod
def move_cache_files(cls, source_driver, target_driver):
"""moves the selected cache files to another location
:param source_driver:
:param target_driver:
:return:
"""
#
# Move fur caches to new server
#
import os
import shutil
import glob
pdm = ProgressManager()
selected_nodes = pm.ls(sl=1)
caller = pdm.register(len(selected_nodes), title="Moving Cache Files")
for node in selected_nodes:
ass_node = node.getShape()
if not isinstance(ass_node, (pm.nt.AiStandIn, pm.nt.AiVolume)):
continue
if isinstance(ass_node, pm.nt.AiStandIn):
ass_path = ass_node.dso.get()
elif isinstance(ass_node, pm.nt.AiVolume):
ass_path = ass_node.filename.get()
ass_path = os.path.normpath(os.path.expandvars(ass_path))
# give info to user
caller.title = "Moving: %s" % ass_path
# check if it is in the source location
if source_driver not in ass_path:
continue
# check if it contains .ass.gz in its path
if isinstance(ass_node, pm.nt.AiStandIn):
if ".ass.gz" not in ass_path:
continue
elif isinstance(ass_node, pm.nt.AiVolume):
if ".vdb" not in ass_path:
continue
# get the dirname
ass_source_dir = os.path.dirname(ass_path)
ass_target_dir = ass_source_dir.replace(source_driver, target_driver)
# create the intermediate folders at destination
try:
os.makedirs(ass_target_dir)
except OSError:
# dir already exists
pass
# get all files list
pattern = re.subn(r"[#]+", "*", ass_path)[0].replace(".ass.gz", ".ass*")
all_cache_files = glob.glob(pattern)
inner_caller = pdm.register(len(all_cache_files))
for source_f in all_cache_files:
target_f = source_f.replace(source_driver, target_driver)
# move files to new location
shutil.move(source_f, target_f)
inner_caller.step(message="Moving: %s" % source_f)
inner_caller.end_progress()
# finally update DSO path
if isinstance(ass_node, pm.nt.AiStandIn):
ass_node.dso.set(ass_path.replace(source_driver, target_driver))
elif isinstance(ass_node, pm.nt.AiVolume):
ass_node.filename.set(ass_path.replace(source_driver, target_driver))
caller.step()
caller.end_progress()
@classmethod
def generate_rsproxy_from_selection(cls, per_selection=False):
"""generates a temp rs file from selected nodes and hides the selected
nodes
:param bool per_selection: Generates one rs file per selected objects
if True. Default is False.
"""
import os
import tempfile
import shutil
from anima.dcc.mayaEnv import auxiliary
from anima.dcc import mayaEnv
m = mayaEnv.Maya()
v = m.get_current_version()
nodes = pm.ls(sl=1)
temp_rs_proxies_grp = None
if pm.ls("temp_rs_proxies_grp"):
temp_rs_proxies_grp = pm.ls("temp_rs_proxies_grp")[0]
else:
temp_rs_proxies_grp = pm.nt.Transform(name="temp_rs_proxies_grp")
rs_output_folder_path = os.path.join(v.absolute_path, "Outputs/rs").replace(
"\\", "/"
)
try:
os.makedirs(rs_output_folder_path)
except OSError:
pass
def _generate_rs():
export_command = 'rsProxy -fp "%(path)s" -c -z -sl;'
temp_rs_full_path = tempfile.mktemp(suffix=".rs")
rs_full_path = os.path.join(
rs_output_folder_path, os.path.basename(temp_rs_full_path)
).replace("\\", "/")
pm.mel.eval(export_command % {"path": temp_rs_full_path.replace("\\", "/")})
shutil.move(temp_rs_full_path, rs_full_path)
[n.v.set(0) for n in pm.ls(sl=1)]
rs_proxy_node, rs_proxy_mesh = auxiliary.create_rs_proxy_node(
path=rs_full_path
)
rs_proxy_tra = rs_proxy_mesh.getParent()
rs_proxy_tra.rename("temp_rs_proxy#")
pm.parent(rs_proxy_tra, temp_rs_proxies_grp)
if per_selection:
for node in nodes:
pm.select(node)
_generate_rs()
else:
pm.select(nodes)
_generate_rs()
@classmethod
def import_image_as_plane(cls):
"""The replica of Blender tool"""
# get the image path
image_path = pm.fileDialog2(fileMode=1)
# get the image width and height
image_path = image_path[0] if image_path else ""
from PIL import Image
img = Image.open(image_path)
w, h = img.size
# create a new plane with that ratio
# keep the height 1
transform, poly_plane = pm.polyPlane(
axis=[0, 0, 1], cuv=1, h=1, w=float(w) / float(h), texture=2, sh=1, sw=1
)
shape = transform.getShape()
shape.instObjGroups[0].disconnect()
# assign a surface shader
surface_shader = pm.shadingNode("surfaceShader", asShader=1)
shading_engine = pm.nt.ShadingEngine()
surface_shader.outColor >> shading_engine.surfaceShader
# assign the given file as texture
placement = pm.nt.Place2dTexture()
file_texture = pm.nt.File()
pm.select([placement, file_texture])
cls.connect_placement2d_to_file()
file_texture.fileTextureName.set(image_path)
file_texture.outColor >> surface_shader.outColor
file_texture.outTransparency >> surface_shader.outTransparency
# pm.sets(shading_engine, fe=transform)
pm.select(shape)
pm.hyperShade(assign=surface_shader)
class RenderSlicer(object):
"""A tool to help slice single frame renders in to many little parts which
will help it to be rendered in small parts in a render farm.
"""
def __init__(self, camera=None):
self._camera = None
self.camera = camera
@property
def slices_in_x(self):
"""getter for _slices_in_x attribute"""
return self.camera.slicesInX.get()
@slices_in_x.setter
def slices_in_x(self, slices_in_x):
"""setter for _slices_in_x attribute"""
self.camera.slicesInX.set(self._validate_slices_in_x(slices_in_x))
@classmethod
def _validate_slices_in_x(cls, slices_in_x):
"""validates the slices_in_x value"""
if not isinstance(slices_in_x, int):
raise TypeError(
"%s.slices_in_x should be a non-zero positive integer, not %s"
% (cls.__name__, slices_in_x.__class__.__name__)
)
if slices_in_x <= 0:
raise ValueError(
"%s.slices_in_x should be a non-zero positive integer" % cls.__name__
)
return slices_in_x
@property
def slices_in_y(self):
"""getter for _slices_in_y attribute"""
return self.camera.slicesInY.get()
@slices_in_y.setter
def slices_in_y(self, slices_in_y):
"""setter for _slices_in_y attribute"""
self.camera.slicesInY.set(self._validate_slices_in_y(slices_in_y))
@classmethod
def _validate_slices_in_y(cls, slices_in_y):
"""validates the slices_in_y value"""
if not isinstance(slices_in_y, int):
raise TypeError(
"%s.slices_in_y should be a non-zero positive integer, not %s"
% (cls.__name__, slices_in_y.__class__.__name__)
)
if slices_in_y <= 0:
raise ValueError(
"%s.slices_in_y should be a non-zero positive integer" % cls.__name__
)
return slices_in_y
@property
def camera(self):
"""getter for the _camera attribute"""
return self._camera
@camera.setter
def camera(self, camera):
"""setter for the _camera attribute
:param camera: A Maya camera
:return: None
"""
camera = self._validate_camera(camera)
self._create_data_attributes(camera)
self._camera = camera
@classmethod
def _validate_camera(cls, camera):
"""validates the given camera"""
if camera is None:
raise TypeError("Please supply a Maya camera")
if not isinstance(camera, pm.nt.Camera):
raise TypeError(
"%s.camera should be a Maya camera, not %s"
% (cls.__name__, camera.__class__.__name__)
)
return camera
@classmethod
def _create_data_attributes(cls, camera):
"""creates slicer data attributes inside the camera
:param pm.nt.Camera camera: A maya camera
"""
# store the original resolution
# slices in x
# slices in y
# is_sliced
# non_sliced_resolution_x
# non_sliced_resolution_y
# slices_in_x
# slices_in_y
if not camera.hasAttr("isSliced"):
camera.addAttr("isSliced", at="bool")
if not camera.hasAttr("nonSlicedResolutionX"):
camera.addAttr("nonSlicedResolutionX", at="short")
if not camera.hasAttr("nonSlicedResolutionY"):
camera.addAttr("nonSlicedResolutionY", at="short")
if not camera.hasAttr("slicesInX"):
camera.addAttr("slicesInX", at="short")
if not camera.hasAttr("slicesInY"):
camera.addAttr("slicesInY", at="short")
def _store_data(self):
"""stores slicer data inside the camera"""
self._create_data_attributes(self.camera)
self.camera.isSliced.set(self.is_sliced)
# get the current render resolution
dres = pm.PyNode("defaultResolution")
width = dres.width.get()
height = dres.height.get()
self.camera.nonSlicedResolutionX.set(width)
self.camera.nonSlicedResolutionY.set(height)
self.camera.slicesInX.set(self.slices_in_x)
self.camera.slicesInY.set(self.slices_in_y)
@property
def is_sliced(self):
"""A shortcut for the camera.isSliced attribute"""
if self.camera.hasAttr("isSliced"):
return self.camera.isSliced.get()
return False
@is_sliced.setter
def is_sliced(self, is_sliced):
"""A shortcut for the camera.isSliced attribute"""
if not self.camera.hasAttr("isSliced"):
self._create_data_attributes(self.camera)
self.camera.isSliced.set(is_sliced)
def unslice(self):
"""resets the camera to original non-sliced state"""
# unslice the camera
dres = pm.PyNode("defaultResolution")
# set the resolution to original
dres.width.set(self.camera.getAttr("nonSlicedResolutionX"))
dres.height.set(self.camera.getAttr("nonSlicedResolutionY"))
dres.pixelAspect.set(1)
self.camera.panZoomEnabled.set(0)
self.camera.isSliced.set(False)
def unslice_scene(self):
"""scans the scene cameras and unslice the scene"""
dres = pm.PyNode("defaultResolution")
dres.aspectLock.set(0)
# TODO: check multi sliced camera
for cam in pm.ls(type=pm.nt.Camera):
if cam.hasAttr("isSliced") and cam.isSliced.get():
dres.width.set(cam.nonSlicedResolutionX.get())
dres.height.set(cam.nonSlicedResolutionY.get())
dres.pixelAspect.set(1)
cam.isSliced.set(False)
def slice(self, slices_in_x, slices_in_y):
"""slices all renderable cameras"""
# set render resolution
self.unslice_scene()
self.is_sliced = True
self._store_data()
sx = self.slices_in_x = slices_in_x
sy = self.slices_in_y = slices_in_y
# set render resolution
d_res = pm.PyNode("defaultResolution")
h_res = d_res.width.get()
v_res = d_res.height.get()
# this system only works when the
d_res.aspectLock.set(0)
d_res.pixelAspect.set(1)
d_res.width.set(h_res / float(sx))
d_res.pixelAspect.set(1)
d_res.height.set(v_res / float(sy))
d_res.pixelAspect.set(1)
# use h_aperture to calculate v_aperture
h_aperture = self.camera.getAttr("horizontalFilmAperture")
# recalculate the other aperture
v_aperture = h_aperture * v_res / h_res
self.camera.setAttr("verticalFilmAperture", v_aperture)
v_aperture = self.camera.getAttr("verticalFilmAperture")
self.camera.setAttr("zoom", 1.0 / float(sx))
t = 0
for i in range(sy):
v_pan = v_aperture / (2.0 * sy) * (1 + 2 * i - sy)
for j in range(sx):
h_pan = h_aperture / (2.0 * sx) * (1 + 2 * j - sx)
pm.currentTime(t)
pm.setKeyframe(self.camera, at="horizontalPan", v=h_pan)
pm.setKeyframe(self.camera, at="verticalPan", v=v_pan)
t += 1
self.camera.panZoomEnabled.set(1)
self.camera.renderPanZoom.set(1)
d_res.pixelAspect.set(1)
class LightingSceneBuilder(object):
"""Build lighting scenes.
This is a class that helps building lighting scenes by looking at the animation
scenes, gathering data and then using that data to reference assets and cache files
to the lighting scene.
"""
ANIMS_GROUP_NAME = "ANIMS"
CAMERA_GROUP_NAME = "CAMERA"
LAYOUTS_GROUP_NAME = "LAYOUTS"
LIGHTS_GROUP_NAME = "LIGHTS"
LOOK_DEVS_GROUP_NAME = "LOOK_DEVS"
RIG_TO_CACHEABLE_LUT_FILE_NAME = "rig_to_cacheable_lut.json"
RIG_TO_LOOK_DEV_LUT_FILE_NAME = "rig_to_look_dev_lut.json"
def __init__(self):
self.rig_to_cacheable_lut_file_path = None
self.rig_to_cacheable_lut = {}
self.rig_to_look_dev_lut_file_path = None
self.rig_to_look_dev_lut = {}
def generate_rig_to_cacheable_lut_file_path(self, project):
"""Generate rig_to_cacheable_lut_file_path.
:param project: A Stalker project.
:return str: The path of the JSON file path.
"""
if self.rig_to_cacheable_lut_file_path:
return self.rig_to_cacheable_lut_file_path
from stalker import Project
if not isinstance(project, Project):
raise TypeError("Please supply a stalker Project instance, not {}".format(
project.__class__.__name__
))
self.rig_to_cacheable_lut_file_path = os.path.join(
project.repository.path,
project.code,
"References",
self.RIG_TO_CACHEABLE_LUT_FILE_NAME
)
return self.rig_to_cacheable_lut_file_path
def generate_rig_to_look_dev_lut_file_path(self, project):
"""Generate rig_to_look_dev_lut_file_path.
:param project: A Stalker project.
:return str: The path of the JSON file path.
"""
if self.rig_to_look_dev_lut_file_path:
return self.rig_to_look_dev_lut_file_path
from stalker import Project
if not isinstance(project, Project):
raise TypeError("Please supply a stalker Project instance, not {}".format(
project.__class__.__name__
))
self.rig_to_look_dev_lut_file_path = os.path.join(
project.repository.path,
project.code,
"References",
self.RIG_TO_LOOK_DEV_LUT_FILE_NAME
)
return self.rig_to_look_dev_lut_file_path
def update_rig_to_look_dev_lut(self, path):
"""Update the ``self.rig_to_look_dev_lut``.
:param str path: The path to the custom json file.
"""
# override the default paths
self.rig_to_look_dev_lut_file_path = path
def read_rig_to_cacheable_lut(self, project):
"""Read the JSON file at the given path.
Reads the rig -> cacheable attr value from the file for speeding the whole
process.
The path is for a JSON file that contains the mapping from rig_to_cacheable and
the reverse mapping. In theory, this should make things way faster by skipping
the loading of the reference files.
:param Project project: The Stalker Project instance.
"""
import json
path = self.generate_rig_to_cacheable_lut_file_path(project)
if os.path.isfile(path):
with open(path, "r") as f:
self.rig_to_cacheable_lut = json.load(f)
def write_rig_to_cacheable_lut(self, project):
"""Update the JSON file with new info
:param Project project: A Stalker Project instance.
"""
import json
path = self.generate_rig_to_cacheable_lut_file_path(project)
try:
os.makedirs(os.path.dirname(path))
except OSError:
# already exists, skip
pass
with open(path, "w") as f:
json.dump(self.rig_to_cacheable_lut, f, indent=4, sort_keys=True)
def read_rig_to_look_dev_lut(self, project):
"""Read the JSON file at the given path.
Reads the rig -> cacheable attr value from the file for speeding the whole
process.
The path is for a JSON file that contains the mapping from rig_to_cacheable and
the reverse mapping. In theory, this should make things way faster by skipping
the loading of the reference files.
:param Project project: The Stalker Project instance.
"""
import json
path = self.generate_rig_to_look_dev_lut_file_path(project)
if os.path.isfile(path):
with open(path, "r") as f:
self.rig_to_look_dev_lut = json.load(f)
def get_cacheable_to_look_dev_version_lut(self, animation_version):
"""Build look dev version lut.
:param Version animation_version: The animation Version to open.
:return:
"""
from stalker import Type, Task, Version
look_dev_type = Type.query.filter(Type.name == "Look Development").first()
if not look_dev_type:
raise RuntimeError(
"No Look Development task type found, please create one!"
)
# open the animation version
from anima.dcc import mayaEnv
# get the current version
m = mayaEnv.Maya()
# store the current version to open later on
lighting_version = m.get_current_version()
m.open(
animation_version,
force=True,
skip_update_check=True,
prompt=False,
reference_depth=3
)
# this version may uploaded with Stalker Pyramid, so update referenced versions
# to get a proper version.inputs list
m.update_version_inputs()
# there is a new bug in some animation scene
# that causes the initialShader to be locked
pm.lockNode('initialShadingGroup', l=0, lockUnpublished=0)
cacheable_to_look_dev_version_lut = {}
references_with_no_look_dev_task = []
references_with_no_look_dev_version = []
# load the self.rig_to_cacheable_lut
self.read_rig_to_cacheable_lut(animation_version.task.project)
self.read_rig_to_look_dev_lut(animation_version.task.project)
# now load all references
for ref in pm.listReferences():
# get all cacheable nodes
ref_version = ref.version
rig_task = ref_version.task
rig_task_id = rig_task.id
rig_task_id_as_str = str(rig_task_id)
rig_take_name = ref_version.take_name
copy_number = auxiliary.get_reference_copy_number(ref)
cacheable_attr_value = None
# try to use the cache file
if rig_task_id_as_str in self.rig_to_cacheable_lut:
if rig_take_name in self.rig_to_cacheable_lut[rig_task_id_as_str]:
cacheable_attr_value = \
self.rig_to_cacheable_lut[rig_task_id_as_str][rig_take_name]
else:
# load the reference
ref.load()
cacheable_nodes = auxiliary.get_cacheable_nodes(ref)
if not cacheable_nodes:
continue
cacheable_node = cacheable_nodes[0]
cacheable_attr_value = cacheable_node.cacheable.get()
# store the value for the next time
if rig_task_id_as_str not in self.rig_to_cacheable_lut:
self.rig_to_cacheable_lut[rig_task_id_as_str] = {}
self.rig_to_cacheable_lut[rig_task_id_as_str][rig_take_name] = \
cacheable_attr_value
cacheable_attr_value_with_copy_number = "{}{}".format(
cacheable_attr_value, copy_number
)
non_renderable_objects = []
look_dev_take_name = None
look_dev_task = None
if rig_task_id_as_str in self.rig_to_look_dev_lut:
# there is a custom mapping for this rig use it
if rig_take_name in self.rig_to_look_dev_lut[rig_task_id_as_str]:
lut_data = \
self.rig_to_look_dev_lut[rig_task_id_as_str][rig_take_name]
look_dev_task_id = lut_data['look_dev_task_id']
look_dev_take_name = lut_data['look_dev_take_name']
look_dev_task = Task.query.get(look_dev_task_id)
if "no_render" in lut_data:
# there are object not to be rendered
non_renderable_objects = lut_data["no_render"]
else:
# try to get the sibling look dev task
look_dev_take_name = ref_version.take_name
look_dev_task = Task.query\
.filter(Task.parent == rig_task.parent)\
.filter(Task.type == look_dev_type)\
.first()
# no look_dev_task, we can't do anything about this asset, report it
if not look_dev_task:
references_with_no_look_dev_task.append(ref_version)
# skip to the next cacheable node
continue
# get the latest published look dev version for this cacheable node
latest_published_look_dev_version = Version.query\
.filter(Version.task == look_dev_task)\
.filter(Version.take_name == look_dev_take_name)\
.filter(Version.is_published == True)\
.order_by(Version.version_number.desc())\
.first()
if not latest_published_look_dev_version:
references_with_no_look_dev_version.append(ref_version)
cacheable_to_look_dev_version_lut[cacheable_attr_value_with_copy_number] = {
"look_dev_version": latest_published_look_dev_version,
"no_render": non_renderable_objects
}
# save the self.rig_to_cacheable_lut
self.write_rig_to_cacheable_lut(animation_version.task.project)
# re-open the lighting version
m.open(lighting_version, force=True, skip_update_check=True, prompt=False)
print("\nReferences With No Look Dev Task")
print("================================")
print("\n".join([v.absolute_full_path for v in references_with_no_look_dev_task]))
print("\nReferences With No Look Dev Version")
print("===================================")
print("\n".join([v.absolute_full_path for v in references_with_no_look_dev_version]))
import pprint
print("\nCacheable To LookDev Version Lut")
print("================================")
pprint.pprint(cacheable_to_look_dev_version_lut)
return cacheable_to_look_dev_version_lut
def create_item_group(self, group_name, hidden=False):
"""Crete item group.
:param str group_name: The group name.
:param bool hidden: If the group should be invisible.
"""
query = pm.ls(group_name)
if not query:
group = pm.nt.Transform(name=group_name)
group.v.set(not hidden) # It should be hidden
else:
group = query[0]
return group
def build(
self,
transfer_shaders=True,
transfer_uvs=False,
cache_type=auxiliary.ALEMBIC
):
"""Build the lighting scene
:return:
"""
from anima.dcc import mayaEnv
# get the current version
m = mayaEnv.Maya()
v = m.get_current_version()
if not v:
raise RuntimeError(
"No version found! Please save an empty scene as a version under the "
"Lighting task"
)
# check if this is really a lighting task
from stalker import Type
lighting_task = v.task
lighting_type = Type.query.filter(Type.name == "Lighting").first()
if not lighting_type:
raise RuntimeError("No Lighting task type found, please create one!")
if not lighting_task.type or not lighting_task.type == lighting_type:
raise RuntimeError(
"This is not a lighting task, please run this in a scene related to a "
"Lighting task."
)
shot = lighting_task.parent
if not shot:
raise RuntimeError(
"No parent task found! It is not possible to find sibling tasks!"
)
# get the animation task
animation_type = Type.query.filter(Type.name == "Animation").first()
if not animation_type:
raise RuntimeError("No Animation task type found, please create one!")
from stalker import Task
animation_task = Task.query.filter(Task.parent == shot)\
.filter(Task.type == animation_type).first()
if not animation_task:
raise RuntimeError("No Animation task found!")
# get latest animation version
from stalker import Version
animation_version = Version.query\
.filter(Version.task == animation_task)\
.filter(Version.take_name == "Main")\
.order_by(Version.version_number.desc())\
.first()
if not animation_version:
raise RuntimeError("No Animation Version under Main take is found!")
# get the cacheable_to_look_dev_lut
cacheable_to_look_dev_version_lut = \
self.get_cacheable_to_look_dev_version_lut(animation_version)
# reference all caches
# (we are assuming that these are all generated before)
auxiliary.auto_reference_caches()
# create the LOOK_DEVS group if it doesn't exist
look_devs_group = self.create_item_group(self.LOOK_DEVS_GROUP_NAME)
anims_group = self.create_item_group(self.ANIMS_GROUP_NAME)
camera_group = self.create_item_group(self.CAMERA_GROUP_NAME)
lights_group = self.create_item_group(self.LIGHTS_GROUP_NAME)
# get all referenced cache files
# to prevent referencing the same look dev more than once,
# store the referenced look dev version in a dictionary
look_dev_version_to_ref_node_lut = {}
for cache_ref_node in pm.listReferences():
if not cache_ref_node.path.endswith(
auxiliary.CACHE_FORMAT_DATA[cache_type]["file_extension"]
):
continue
# ref namespace is equal to the cacheable_attr_value
cacheable_attr_value = cache_ref_node.namespace
# if this is the shotCam, renderCam or the camera, just skip it
if any([cam.lower() in cacheable_attr_value.lower() for cam in ("shotCam", "renderCam")]):
# parent it under CAMERA group
pm.parent(cache_ref_node.nodes()[0], camera_group)
# and skip the rest
continue
# now use the cacheable_to_look_dev_version_lut to reference the look_dev
# file
look_dev_version = \
cacheable_to_look_dev_version_lut[cacheable_attr_value]['look_dev_version']
if look_dev_version in look_dev_version_to_ref_node_lut:
# use the same ref_node
look_dev_ref_node = look_dev_version_to_ref_node_lut[look_dev_version]
elif look_dev_version is not None:
# reference the look dev file
look_dev_ref_node = m.reference(look_dev_version)
look_dev_version_to_ref_node_lut[look_dev_version] = look_dev_ref_node
else:
# no published look dev
# skip this cacheable not
print("Warning: No published Look Dev version found for: {}".format(
cacheable_attr_value
))
continue
# now we should have a reference node for the cache and a reference node for
# the look dev
look_dev_root_node = auxiliary.get_root_nodes(look_dev_ref_node)[0]
cache_root_node = auxiliary.get_root_nodes(cache_ref_node)[0]
if transfer_shaders:
# transfer shaders from the look dev to the cache nodes
pm.select(None)
# look dev scenes references the model scene and the geometry is in the
# model scene
pm.select([look_dev_root_node, cache_root_node])
Render.transfer_shaders()
# hide all the transform nodes under the look_dev_root_node
for node in pm.listRelatives(look_dev_root_node, ad=1, type=pm.nt.Transform):
node.v.set(0)
# and the look dev node itself
look_dev_root_node.v.set(0)
if transfer_uvs:
from anima.dcc.mayaEnv import modeling
pm.select(None)
pm.select([look_dev_root_node, cache_root_node])
modeling.Model.transfer_uvs()
# hide non renderable objects
cache_ref_node_nodes = cache_ref_node.nodes()
for no_render_name in cacheable_to_look_dev_version_lut[cacheable_attr_value]["no_render"]:
for cached_node in cache_ref_node_nodes:
if cached_node.stripNamespace() == no_render_name:
cached_node.v.set(0)
continue
# deselect everything to prevent unpredicted errors
pm.select(None)
# parent the look_dev_root_node under the LOOK_DEVS group
pm.parent(look_dev_root_node, look_devs_group)
# parent the alembic under the ANIMS group
pm.parent(cache_root_node, anims_group)
# animation version inputs should have been updated
# reference any Layouts
layouts_group = self.create_item_group(self.LAYOUTS_GROUP_NAME)
layout_type = Type.query.filter(Type.name == "Layout").first()
for input_version in animation_version.inputs:
if input_version.task.type and input_version.task.type == layout_type:
# reference this version here too
# use the RSProxy repr
rs_proxy_take_name = "{}@RS".format(
input_version.take_name.split("@")[0]
)
input_version = Version.query\
.filter(Version.task==input_version.task)\
.filter(Version.take_name==rs_proxy_take_name)\
.filter(Version.is_published==True)\
.order_by(Version.version_number.desc())\
.first()
if input_version:
ref_node = m.reference(input_version)
# parent it to the LAYOUTS group
pm.parent(ref_node.nodes()[0], layouts_group)
|
import tellurium as te
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
class parameterScan (object):
def __init__(self, rr):
self.startTime = 0
self.endTime = 20
self.numberOfPoints = 100
self.independent = ["Time", "k2"]
self.selection = ["Time", "S1"]
self.dependent = ["S1"]
self.startInd = [1]
self.endInd = [5]
self.integrator = "cvode"
self.rr = rr
self.colorSelect = None
self.width = 2.5
self.color = None
self.xlabel = None
self.ylabel = None
self.zlabel = None
self.colormap = "seismic"
self.colorbar = True
self.title = None
def Sim(self):
result = self.rr.simulate(self.startTime, self.endTime, self.numberOfPoints,
self.selection, integrator = self.integrator)
return result
def plotArray(self):
"""Plots result of simulation with options for linewdith and line color"""
result = self.Sim()
if self.color is None:
plt.plot(result[:,0], result[:,1:], linewidth = self.width)
else:
plt.plot(result[:,0], result[:,1:], color = self.color, linewidth = self.width)
if self.ylabel is not None:
plt.ylabel(self.ylabel)
if self.xlabel is not None:
plt.xlabel(self.xlabel)
if self.title is not None:
plt.suptitle(self.title)
plt.show()
def surfacePlot(self):
""" Plots results of simulation as a colored surface. Takes three variables, two independent
and one dependent. Legal colormap names can be found at
http://matplotlib.org/examples/color/colormaps_reference.html.
p.surfacePlot()"""
fig = plt.figure()
ax = fig.gca(projection='3d')
interval = (self.endTime - self.startTime) / float(self.numberOfPoints - 1)
X = np.arange(self.startTime, (self.endTime + (interval - 0.001)), interval)
interval = (self.endInd - self.startInd) / float(self.numberOfPoints - 1)
Y = np.arange(self.startInd, (self.endInd + (interval - 0.001)), interval)
X, Y = np.meshgrid(X, Y)
self.rr.reset()
self.rr.model[self.independent[1]] = self.startInd
Z = self.rr.simulate(self.startTime, self.endTime, (self.numberOfPoints - 1),
self.dependent, integrator = self.integrator)
Z = Z.T
for i in range(self.numberOfPoints - 1):
self.rr.reset()
self.rr.model[self.independent[1]] = self.startInd + ((i + 1) * interval)
Z1 = self.rr.simulate(self.startTime, self.endTime, (self.numberOfPoints - 1),
self.dependent, integrator = self.integrator)
Z1 = Z1.T
Z = np.concatenate ((Z, Z1))
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap = self.colormap,
linewidth=0)
ax.yaxis.set_major_locator(LinearLocator((6)))
ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax.set_xlabel(self.independent[0]) if self.xlabel is None else ax.set_xlabel(self.xlabel)
ax.set_ylabel(self.independent[1]) if self.ylabel is None else ax.set_ylabel(self.ylabel)
ax.set_zlabel(self.dependent[0]) if self.zlabel is None else ax.set_zlabel(self.zlabel)
if self.colorbar is True:
fig.colorbar(surf, shrink=0.5, aspect=4)
plt.show()
Removed ParameterScan2
|
'''
@author: frank
'''
import sys, os, os.path
from zstacklib.utils import log
from zstacklib.utils import linux
import zstacklib.utils.iptables as iptables
pidfile = '/var/run/zstack/kvmagent.pid'
log.configure_log('/var/log/zstack/zstack-kvmagent.log')
logger = log.get_logger(__name__)
import kvmagent
def prepare_pid_dir(path):
pdir = os.path.dirname(path)
if not os.path.isdir(pdir):
os.makedirs(pdir)
def main():
usage = 'usage: python -c "from kvmagent import kdaemon; kdaemon.main()" start|stop|restart'
if len(sys.argv) != 2 or not sys.argv[1] in ['start', 'stop', 'restart']:
print usage
sys.exit(1)
global pidfile
prepare_pid_dir(pidfile)
try:
iptc = iptables.from_iptables_save()
iptc.add_rule('-A INPUT -p tcp -m tcp --dport 7070 -j ACCEPT')
iptc.add_rule('-A INPUT -p tcp -m tcp --dport 16509 -j ACCEPT')
iptc.iptable_restore()
cmd = sys.argv[1]
py_process_name = 'from kvmagent import kdaemon'
agentdaemon = kvmagent.KvmDaemon(pidfile, py_process_name)
if cmd == 'start':
logger.debug('zstack-kvmagent starts')
agentdaemon.start()
elif cmd == 'stop':
logger.debug('zstack-kvmagent stops')
agentdaemon.stop()
elif cmd == 'restart':
logger.debug('zstack-kvmagent restarts')
agentdaemon.restart()
sys.exit(0)
except Exception:
logger.warning(linux.get_exception_stacktrace())
sys.exit(1)
if __name__ == '__main__':
main()
fix the path of log is wrong
'''
@author: frank
'''
import sys, os, os.path
from zstacklib.utils import log
log.configure_log('/var/log/zstack/zstack-kvmagent.log')
from zstacklib.utils import linux
import zstacklib.utils.iptables as iptables
pidfile = '/var/run/zstack/kvmagent.pid'
logger = log.get_logger(__name__)
import kvmagent
def prepare_pid_dir(path):
pdir = os.path.dirname(path)
if not os.path.isdir(pdir):
os.makedirs(pdir)
def main():
usage = 'usage: python -c "from kvmagent import kdaemon; kdaemon.main()" start|stop|restart'
if len(sys.argv) != 2 or not sys.argv[1] in ['start', 'stop', 'restart']:
print usage
sys.exit(1)
global pidfile
prepare_pid_dir(pidfile)
try:
iptc = iptables.from_iptables_save()
iptc.add_rule('-A INPUT -p tcp -m tcp --dport 7070 -j ACCEPT')
iptc.add_rule('-A INPUT -p tcp -m tcp --dport 16509 -j ACCEPT')
iptc.iptable_restore()
cmd = sys.argv[1]
py_process_name = 'from kvmagent import kdaemon'
agentdaemon = kvmagent.KvmDaemon(pidfile, py_process_name)
if cmd == 'start':
logger.debug('zstack-kvmagent starts')
agentdaemon.start()
elif cmd == 'stop':
logger.debug('zstack-kvmagent stops')
agentdaemon.stop()
elif cmd == 'restart':
logger.debug('zstack-kvmagent restarts')
agentdaemon.restart()
sys.exit(0)
except Exception:
logger.warning(linux.get_exception_stacktrace())
sys.exit(1)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from openerp import models, api, fields
import openerp.addons.decimal_precision as dp
class sale_order_line(models.Model):
"""
En argentina como no se diferencian los impuestos en las facturas, excepto
el IVA, agrego campos que ignoran el iva solamenta a la hora de imprimir
los valores. Extendemos dicha funcionalidad a las ordenes de venta
"""
_inherit = "sale.order.line"
@api.one
def _printed_prices(self):
taxes = self.env['account.tax']
price = self.price_unit * (1 - (self.discount or 0.0) / 100.0)
price_unit_without_tax = self.tax_id.compute_all(
self.price_unit, 1, product=self.product_id,
partner=self.order_id.partner_id)
# For document that not discriminate we include the prices
if self.order_id.vat_discriminated:
printed_price_unit = price_unit_without_tax['total']
printed_price_net = price_unit_without_tax['total'] * (
1 - (self.discount or 0.0) / 100.0)
printed_price_subtotal = printed_price_net * self.product_uom_qty
else:
printed_price_unit = price_unit_without_tax['total_included']
printed_price_net = price_unit_without_tax['total_included'] * (
1 - (self.discount or 0.0) / 100.0)
printed_price_subtotal = printed_price_net * self.product_uom_qty
self.printed_price_unit = printed_price_unit
self.printed_price_net = printed_price_net
self.printed_price_subtotal = printed_price_subtotal
# Not VAT taxes
not_vat_taxes = self.tax_id.filtered(
lambda r: r.tax_code_id.parent_id.name != 'IVA').compute_all(
price, 1,
product=self.product_id,
partner=self.order_id.partner_id)
not_vat_taxes_amount = not_vat_taxes[
'total_included'] - not_vat_taxes['total']
# VAT taxes
vat_taxes = self.tax_id.filtered(
lambda r: r.tax_code_id.parent_id.name == 'IVA').compute_all(
price, 1,
product=self.product_id,
partner=self.order_id.partner_id)
vat_taxes_amount = vat_taxes['total_included'] - vat_taxes['total']
exempt_amount = 0.0
if not vat_taxes:
exempt_amount = taxes['total_included']
self.price_unit_with_tax = printed_price_unit + vat_taxes_amount + not_vat_taxes_amount
self.vat_amount = vat_taxes_amount * self.product_uom_qty
self.other_taxes_amount = not_vat_taxes_amount * self.product_uom_qty
self.exempt_amount = exempt_amount * self.product_uom_qty
price_unit_with_tax = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Unit Price w/ Taxes'
)
printed_price_unit = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Unit Price'
)
printed_price_net = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Net Price',
)
printed_price_subtotal = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Subtotal',
)
vat_amount = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Vat Amount',
)
other_taxes_amount = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Other Taxes Amount',
)
exempt_amount = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Exempt Amount',
)
class sale_order(models.Model):
_inherit = "sale.order"
@api.one
@api.depends(
'partner_id',
'partner_id.responsability_id',
'company_id',
'company_id.partner_id.responsability_id',)
def get_vat_discriminated(self):
vat_discriminated = True
if self.company_id.sale_allow_vat_no_discrimination:
letter_ids = self.env['account.invoice'].get_valid_document_letters(
self.partner_id.id, 'sale', self.company_id.id)
letters = self.env['afip.document_letter'].browse(letter_ids)
if letters:
vat_discriminated = letters[0].vat_discriminated
elif self.company_id.sale_allow_vat_no_discrimination == 'no_discriminate_default':
vat_discriminated = False
self.vat_discriminated = vat_discriminated
@api.one
@api.depends(
'partner_id',
'partner_id.responsability_id',
'company_id',
'company_id.partner_id.responsability_id',)
def get_taxes(self):
self.tax_line = self.env['account.tax']
tax_ids = []
for line in self.order_line:
tax_ids += line.tax_id.ids
tax_ids = list(set(tax_ids))
self.tax_line = tax_ids
tax_line = fields.Many2many(
'account.tax',
compute='get_taxes')
vat_discriminated = fields.Boolean(
'Discriminate VAT?',
compute="get_vat_discriminated",
store=True,
readonly=False,
help="Discriminate VAT on Quotations and Sale Orders?")
@api.one
def _printed_prices(self):
vat_amount = sum(
line.vat_amount for line in self.order_line)
other_taxes_amount = sum(
line.other_taxes_amount for line in self.order_line)
exempt_amount = sum(
line.exempt_amount for line in self.order_line)
vat_tax_ids = [
x.id for x in self.tax_line if x.tax_code_id.parent_id.name == 'IVA']
if self.vat_discriminated:
printed_amount_untaxed = self.amount_untaxed
printed_tax_ids = [x.id for x in self.tax_line]
else:
printed_amount_untaxed = self.amount_total
# printed_amount_untaxed = sum(
# line.printed_price_subtotal for line in self.order_line)
# por ahora hacemos que no se imprima ninguno
printed_tax_ids = []
# printed_tax_ids = [
# x.id for x in self.tax_line if x.tax_code_id.parent_id.name != 'IVA']
self.printed_amount_untaxed = printed_amount_untaxed
self.printed_tax_ids = printed_tax_ids
self.printed_amount_tax = self.amount_total - printed_amount_untaxed
self.vat_tax_ids = vat_tax_ids
self.vat_amount = vat_amount
self.other_taxes_amount = other_taxes_amount
self.exempt_amount = exempt_amount
printed_amount_tax = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Tax'
)
printed_amount_untaxed = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Subtotal'
)
exempt_amount = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Exempt Amount'
)
vat_amount = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Vat Amount'
)
other_taxes_amount = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Other Taxes Amount'
)
printed_tax_ids = fields.One2many(
compute="_printed_prices",
comodel_name='account.invoice.tax',
string='Tax'
)
vat_tax_ids = fields.One2many(
compute="_printed_prices",
comodel_name='account.invoice.tax',
string='VAT Taxes'
)
FIX price unit with tax
# -*- coding: utf-8 -*-
from openerp import models, api, fields
import openerp.addons.decimal_precision as dp
class sale_order_line(models.Model):
"""
En argentina como no se diferencian los impuestos en las facturas, excepto
el IVA, agrego campos que ignoran el iva solamenta a la hora de imprimir
los valores. Extendemos dicha funcionalidad a las ordenes de venta
"""
_inherit = "sale.order.line"
@api.one
def _printed_prices(self):
taxes = self.env['account.tax']
price = self.price_unit * (1 - (self.discount or 0.0) / 100.0)
price_unit_without_tax = self.tax_id.compute_all(
self.price_unit, 1, product=self.product_id,
partner=self.order_id.partner_id)
# For document that not discriminate we include the prices
if self.order_id.vat_discriminated:
printed_price_unit = price_unit_without_tax['total']
printed_price_net = price_unit_without_tax['total'] * (
1 - (self.discount or 0.0) / 100.0)
printed_price_subtotal = printed_price_net * self.product_uom_qty
else:
printed_price_unit = price_unit_without_tax['total_included']
printed_price_net = price_unit_without_tax['total_included'] * (
1 - (self.discount or 0.0) / 100.0)
printed_price_subtotal = printed_price_net * self.product_uom_qty
self.printed_price_unit = printed_price_unit
self.printed_price_net = printed_price_net
self.printed_price_subtotal = printed_price_subtotal
# Not VAT taxes
not_vat_taxes = self.tax_id.filtered(
lambda r: r.tax_code_id.parent_id.name != 'IVA').compute_all(
price, 1,
product=self.product_id,
partner=self.order_id.partner_id)
not_vat_taxes_amount = not_vat_taxes[
'total_included'] - not_vat_taxes['total']
# VAT taxes
vat_taxes = self.tax_id.filtered(
lambda r: r.tax_code_id.parent_id.name == 'IVA').compute_all(
price, 1,
product=self.product_id,
partner=self.order_id.partner_id)
vat_taxes_amount = vat_taxes['total_included'] - vat_taxes['total']
exempt_amount = 0.0
if not vat_taxes:
exempt_amount = taxes['total_included']
self.price_unit_with_tax = printed_price_unit
self.vat_amount = vat_taxes_amount * self.product_uom_qty
self.other_taxes_amount = not_vat_taxes_amount * self.product_uom_qty
self.exempt_amount = exempt_amount * self.product_uom_qty
price_unit_with_tax = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Unit Price w/ Taxes'
)
printed_price_unit = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Unit Price'
)
printed_price_net = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Net Price',
)
printed_price_subtotal = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Subtotal',
)
vat_amount = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Vat Amount',
)
other_taxes_amount = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Other Taxes Amount',
)
exempt_amount = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Exempt Amount',
)
class sale_order(models.Model):
_inherit = "sale.order"
@api.one
@api.depends(
'partner_id',
'partner_id.responsability_id',
'company_id',
'company_id.partner_id.responsability_id',)
def get_vat_discriminated(self):
vat_discriminated = True
if self.company_id.sale_allow_vat_no_discrimination:
letter_ids = self.env['account.invoice'].get_valid_document_letters(
self.partner_id.id, 'sale', self.company_id.id)
letters = self.env['afip.document_letter'].browse(letter_ids)
if letters:
vat_discriminated = letters[0].vat_discriminated
elif self.company_id.sale_allow_vat_no_discrimination == 'no_discriminate_default':
vat_discriminated = False
self.vat_discriminated = vat_discriminated
@api.one
@api.depends(
'partner_id',
'partner_id.responsability_id',
'company_id',
'company_id.partner_id.responsability_id',)
def get_taxes(self):
self.tax_line = self.env['account.tax']
tax_ids = []
for line in self.order_line:
tax_ids += line.tax_id.ids
tax_ids = list(set(tax_ids))
self.tax_line = tax_ids
tax_line = fields.Many2many(
'account.tax',
compute='get_taxes')
vat_discriminated = fields.Boolean(
'Discriminate VAT?',
compute="get_vat_discriminated",
store=True,
readonly=False,
help="Discriminate VAT on Quotations and Sale Orders?")
@api.one
def _printed_prices(self):
vat_amount = sum(
line.vat_amount for line in self.order_line)
other_taxes_amount = sum(
line.other_taxes_amount for line in self.order_line)
exempt_amount = sum(
line.exempt_amount for line in self.order_line)
vat_tax_ids = [
x.id for x in self.tax_line if x.tax_code_id.parent_id.name == 'IVA']
if self.vat_discriminated:
printed_amount_untaxed = self.amount_untaxed
printed_tax_ids = [x.id for x in self.tax_line]
else:
printed_amount_untaxed = self.amount_total
# printed_amount_untaxed = sum(
# line.printed_price_subtotal for line in self.order_line)
# por ahora hacemos que no se imprima ninguno
printed_tax_ids = []
# printed_tax_ids = [
# x.id for x in self.tax_line if x.tax_code_id.parent_id.name != 'IVA']
self.printed_amount_untaxed = printed_amount_untaxed
self.printed_tax_ids = printed_tax_ids
self.printed_amount_tax = self.amount_total - printed_amount_untaxed
self.vat_tax_ids = vat_tax_ids
self.vat_amount = vat_amount
self.other_taxes_amount = other_taxes_amount
self.exempt_amount = exempt_amount
printed_amount_tax = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Tax'
)
printed_amount_untaxed = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Subtotal'
)
exempt_amount = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Exempt Amount'
)
vat_amount = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Vat Amount'
)
other_taxes_amount = fields.Float(
compute="_printed_prices",
digits_compute=dp.get_precision('Account'),
string='Other Taxes Amount'
)
printed_tax_ids = fields.One2many(
compute="_printed_prices",
comodel_name='account.invoice.tax',
string='Tax'
)
vat_tax_ids = fields.One2many(
compute="_printed_prices",
comodel_name='account.invoice.tax',
string='VAT Taxes'
)
|
# Copyright (C) 2013 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import api, fields, models
from odoo.addons import decimal_precision as dp
from odoo.tools import float_is_zero
from ..constants.fiscal import (
FISCAL_IN,
FISCAL_OUT,
TAX_BASE_TYPE,
TAX_BASE_TYPE_PERCENT,
TAX_BASE_TYPE_VALUE,
TAX_DOMAIN,
FINAL_CUSTOMER_YES,
NFE_IND_IE_DEST_1,
NFE_IND_IE_DEST_2,
NFE_IND_IE_DEST_9
)
from ..constants.icms import (
ICMS_BASE_TYPE,
ICMS_BASE_TYPE_DEFAULT,
ICMS_ST_BASE_TYPE,
ICMS_ST_BASE_TYPE_DEFAULT,
ICMS_SN_CST_WITH_CREDIT,
ICMS_DIFAL_PARTITION,
ICMS_DIFAL_UNIQUE_BASE,
ICMS_DIFAL_DOUBLE_BASE,
ICMS_ORIGIN_TAX_IMPORTED,
)
TAX_DICT_VALUES = {
"name": False,
"fiscal_tax_id": False,
"tax_include": False,
"tax_withholding": False,
"tax_domain": False,
"cst_id": False,
"cst_code": False,
"base_type": "percent",
"base": 0.00,
"base_reduction": 0.00,
"percent_amount": 0.00,
"percent_reduction": 0.00,
"value_amount": 0.00,
"uot_id": False,
"tax_value": 0.00,
"compute_reduction": True,
}
class Tax(models.Model):
_name = 'l10n_br_fiscal.tax'
_order = 'sequence, tax_domain, name'
_description = 'Fiscal Tax'
name = fields.Char(
string="Name",
size=256,
required=True)
sequence = fields.Integer(
string="Sequence",
related="tax_group_id.sequence",
help="The sequence field is used to define the "
"order in which taxes are displayed.",
)
compute_sequence = fields.Integer(
string="Compute Sequence",
related="tax_group_id.compute_sequence",
help="The sequence field is used to define "
"order in which the tax lines are applied.",
)
tax_scope = fields.Selection(
related="tax_group_id.tax_scope",
store=True,
)
tax_base_type = fields.Selection(
selection=TAX_BASE_TYPE,
string="Tax Base Type",
default=TAX_BASE_TYPE_PERCENT,
required=True)
percent_amount = fields.Float(
string="Percent",
default=0.00,
digits=dp.get_precision("Fiscal Tax Percent"),
required=True)
percent_reduction = fields.Float(
string="Percent Reduction",
default=0.00,
digits=dp.get_precision("Fiscal Tax Percent"),
required=True)
percent_debit_credit = fields.Float(
string="Percent Debit/Credit",
default=0.00,
digits=dp.get_precision("Fiscal Tax Percent"),
required=True)
currency_id = fields.Many2one(
comodel_name="res.currency",
default=lambda self: self.env.ref("base.BRL"),
string="Currency")
value_amount = fields.Float(
string="Value",
default=0.00,
digits=dp.get_precision("Fiscal Tax Value"),
required=True)
uot_id = fields.Many2one(
comodel_name="uom.uom",
string="Tax UoM")
tax_group_id = fields.Many2one(
comodel_name="l10n_br_fiscal.tax.group",
string="Fiscal Tax Group",
required=True)
tax_domain = fields.Selection(
selection=TAX_DOMAIN,
related="tax_group_id.tax_domain",
string="Tax Domain",
required=True,
readonly=True)
cst_in_id = fields.Many2one(
comodel_name="l10n_br_fiscal.cst",
string="CST In",
domain="[('cst_type', 'in', ('in', 'all')), "
"('tax_domain', '=', tax_domain)]")
cst_out_id = fields.Many2one(
comodel_name="l10n_br_fiscal.cst",
string="CST Out",
domain="[('cst_type', 'in', ('out', 'all')), "
"('tax_domain', '=', tax_domain)]")
# ICMS Fields
icms_base_type = fields.Selection(
selection=ICMS_BASE_TYPE,
string="ICMS Base Type",
required=True,
default=ICMS_BASE_TYPE_DEFAULT)
icmsst_base_type = fields.Selection(
selection=ICMS_ST_BASE_TYPE,
string="ICMS ST Base Type",
required=True,
default=ICMS_ST_BASE_TYPE_DEFAULT)
icmsst_mva_percent = fields.Float(
string="MVA Percent",
default=0.00,
digits=dp.get_precision("Fiscal Tax Percent"),
required=True)
icmsst_value = fields.Float(
string="PFC Value",
default=0.00,
digits=dp.get_precision("Fiscal Tax Value"),
required=True)
_sql_constraints = [(
"fiscal_tax_code_uniq", "unique (name)",
"Tax already exists with this name !")]
@api.multi
def get_account_tax(self, fiscal_operation_type=FISCAL_OUT):
account_tax_type = {'out': 'sale', 'in': 'purchase'}
type_tax_use = account_tax_type.get(fiscal_operation_type, 'sale')
account_taxes = self.env["account.tax"].search([
("fiscal_tax_id", "=", self.ids),
('active', '=', True),
('type_tax_use', '=', type_tax_use)])
return account_taxes
def cst_from_tax(self, fiscal_operation_type=FISCAL_OUT):
self.ensure_one()
cst = self.env["l10n_br_fiscal.cst"]
if fiscal_operation_type == FISCAL_IN:
cst = self.cst_in_id
if fiscal_operation_type == FISCAL_OUT:
cst = self.cst_out_id
return cst
def _compute_tax_base(self, tax, tax_dict, **kwargs):
company = kwargs.get("company", tax.env.user.company_id)
currency = kwargs.get("currency", company.currency_id)
precision = currency.decimal_places
fiscal_price = kwargs.get("fiscal_price", 0.00)
fiscal_quantity = kwargs.get("fiscal_quantity", 0.00)
add_to_base = kwargs.get("add_to_base", 0.00)
remove_from_base = kwargs.get("remove_from_base", 0.00)
compute_reduction = kwargs.get("compute_reduction", True)
base = 0.00
if not tax_dict.get("percent_amount") and tax.percent_amount:
tax_dict["percent_amount"] = tax.percent_amount
tax_dict["value_amount"] = tax.value_amount
if tax_dict["base_type"] == "percent":
# Compute initial Tax Base for base_type Percent
base = round(fiscal_price * fiscal_quantity, precision)
if tax_dict["base_type"] == "quantity":
# Compute initial Tax Base for base_type Quantity
base = fiscal_quantity
if tax_dict["base_type"] == "fixed":
# Compute initial Tax Base
base = round(fiscal_price * fiscal_quantity, precision)
# Update Base Value
base_amount = (base + add_to_base) - remove_from_base
# Compute Tax Base Reduction
base_reduction = round(
base_amount * abs(tax.percent_reduction / 100), precision)
# Compute Tax Base Amount
if compute_reduction:
base_amount -= base_reduction
if tax_dict.get("icmsst_mva_percent"):
base_amount *= (1 + (tax_dict["icmsst_mva_percent"] / 100))
if (not tax.percent_amount and not tax.value_amount and
not tax_dict.get('percent_amount') and
not tax_dict.get('value_amount')):
tax_dict["base"] = 0.00
else:
tax_dict["base"] = base_amount
return tax_dict
def _compute_tax(self, tax, taxes_dict, **kwargs):
tax_dict = taxes_dict.get(tax.tax_domain)
tax_dict["name"] = tax.name
tax_dict["base_type"] = tax.tax_base_type
tax_dict["tax_include"] = tax.tax_group_id.tax_include
tax_dict["tax_withholding"] = tax.tax_group_id.tax_withholding
tax_dict["fiscal_tax_id"] = tax.id
tax_dict["tax_domain"] = tax.tax_domain
tax_dict["percent_reduction"] = tax.percent_reduction
tax_dict["percent_amount"] = tax_dict.get('percent_amount',
tax.percent_amount)
company = kwargs.get("company", tax.env.user.company_id)
# partner = kwargs.get("partner")
currency = kwargs.get("currency", company.currency_id)
precision = currency.decimal_places
# product = kwargs.get("product")
# price_unit = kwargs.get("price_unit", 0.00)
# quantity = kwargs.get("quantity", 0.00)
# uom_id = kwargs.get("uom_id")
# fiscal_price = kwargs.get("fiscal_price", 0.00)
# fiscal_quantity = kwargs.get("fiscal_quantity", 0.00)
# uot_id = kwargs.get("uot_id")
discount_value = kwargs.get("discount_value", 0.00)
# insurance_value = kwargs.get("insurance_value", 0.00)
# other_value = kwargs.get("other_value", 0.00)
# freight_value = kwargs.get("freight_value", 0.00)
# ncm = kwargs.get("ncm")
# cest = kwargs.get("cest")
operation_line = kwargs.get("operation_line")
remove_from_base = [discount_value]
if tax.tax_group_id.base_without_icms:
# Get Computed ICMS Tax
tax_dict_icms = taxes_dict.get("icms", {})
remove_from_base.append(tax_dict_icms.get('tax_value', 0.00))
kwargs.update({
'remove_from_base': sum(remove_from_base),
})
# TODO futuramente levar em consideração outros tipos de base de calculo
if float_is_zero(tax_dict.get("base", 0.00), precision):
tax_dict = self._compute_tax_base(tax, tax_dict, **kwargs)
fiscal_operation_type = (operation_line.fiscal_operation_type
or FISCAL_OUT)
tax_dict['cst_id'] = tax.cst_from_tax(fiscal_operation_type)
base_amount = tax_dict.get("base", 0.00)
if tax_dict["base_type"] == "percent":
# Compute Tax Value
tax_value = round(
base_amount * (tax_dict["percent_amount"] / 100),
precision)
tax_dict["tax_value"] = tax_value
if tax_dict["base_type"] in ("quantity", "fixed"):
tax_dict["tax_value"] = round(
base_amount * tax_dict["value_amount"],
precision)
return tax_dict
def _compute_estimate_taxes(self, **kwargs):
company = kwargs.get("company")
product = kwargs.get("product")
fiscal_price = kwargs.get("fiscal_price")
fiscal_quantity = kwargs.get("fiscal_quantity")
currency = kwargs.get("currency", company.currency_id)
precision = currency.decimal_places
ncm = kwargs.get("ncm") or product.ncm_id
nbs = kwargs.get("nbs") or product.nbs_id
icms_origin = kwargs.get("icms_origin") or product.icms_origin
op_line = kwargs.get("operation_line")
amount_estimate_tax = 0.00
amount_total = round(fiscal_price * fiscal_quantity, precision)
if op_line and (op_line.fiscal_operation_type == FISCAL_OUT
and op_line.fiscal_operation_id.fiscal_type == 'sale'):
if nbs:
amount_estimate_tax = round(amount_total * (
nbs.estimate_tax_national / 100), precision)
elif ncm:
if icms_origin in ICMS_ORIGIN_TAX_IMPORTED:
amount_estimate_tax = round(amount_total * (
ncm.estimate_tax_imported / 100), precision)
else:
amount_estimate_tax = round(amount_total * (
ncm.estimate_tax_national / 100), precision)
return amount_estimate_tax
def _compute_icms(self, tax, taxes_dict, **kwargs):
partner = kwargs.get("partner")
company = kwargs.get("company")
product = kwargs.get("product")
currency = kwargs.get("currency", company.currency_id)
precision = currency.decimal_places
ncm = kwargs.get("ncm")
nbm = kwargs.get("nbm")
cest = kwargs.get("cest")
operation_line = kwargs.get("operation_line")
discount_value = kwargs.get("discount_value", 0.00)
insurance_value = kwargs.get("insurance_value", 0.00)
freight_value = kwargs.get("freight_value", 0.00)
other_value = kwargs.get("other_value", 0.00)
add_to_base = [insurance_value, freight_value, other_value]
remove_from_base = [discount_value]
# Get Computed IPI Tax
tax_dict_ipi = taxes_dict.get("ipi", {})
if partner.ind_ie_dest in (NFE_IND_IE_DEST_2, NFE_IND_IE_DEST_9) or \
(operation_line.fiscal_operation_id.ind_final ==
FINAL_CUSTOMER_YES):
# Add IPI in ICMS Base
add_to_base.append(tax_dict_ipi.get("tax_value", 0.00))
kwargs.update({
'add_to_base': sum(add_to_base),
'remove_from_base': sum(remove_from_base),
'icms_base_type': tax.icms_base_type
})
taxes_dict[tax.tax_domain].update(self._compute_tax_base(
tax, taxes_dict.get(tax.tax_domain), **kwargs))
taxes_dict[tax.tax_domain].update(self._compute_tax(
tax, taxes_dict, **kwargs))
taxes_dict[tax.tax_domain].update({
'icms_base_type': tax.icms_base_type})
# DIFAL
# TODO
# and operation_line.ind_final == FINAL_CUSTOMER_YES):
if (company.state_id != partner.state_id
and operation_line.fiscal_operation_type == FISCAL_OUT
and partner.ind_ie_dest == NFE_IND_IE_DEST_9
and taxes_dict[tax.tax_domain].get('tax_value')):
tax_icms_difal = company.icms_regulation_id.map_tax_icms_difal(
company, partner, product, ncm, nbm, cest, operation_line)
tax_icmsfcp_difal = company.icms_regulation_id.map_tax_icmsfcp(
company, partner, product, ncm, nbm, cest, operation_line)
# Difal - Origin Percent
icms_origin_perc = taxes_dict[tax.tax_domain].get('percent_amount')
# Difal - Origin Value
icms_origin_value = taxes_dict[tax.tax_domain].get('tax_value')
# Difal - Destination Percent
icms_dest_perc = 0.00
if tax_icms_difal:
icms_dest_perc = tax_icms_difal[0].percent_amount
# Difal - FCP Percent
icmsfcp_perc = 0.00
if tax_icmsfcp_difal:
icmsfcp_perc = tax_icmsfcp_difal[0].percent_amount
# Difal - Base
icms_base = taxes_dict[tax.tax_domain].get('base')
difal_icms_base = 0.00
# Difal - ICMS Dest Value
icms_dest_value = round(
icms_base * (icms_dest_perc / 100), precision)
if company.state_id.code in ICMS_DIFAL_UNIQUE_BASE:
difal_icms_base = icms_base
if company.state_id.code in ICMS_DIFAL_DOUBLE_BASE:
difal_icms_base = round(
(icms_base - icms_dest_value) / (1 - (
(icms_dest_perc + icmsfcp_perc) / 100)),
precision)
icms_origin_value = round(
difal_icms_base * (icms_origin_perc / 100), precision)
difal_value = icms_dest_value - icms_origin_value
# Difal - Sharing Percent
date_year = fields.Date.today().year
if date_year >= 2019:
taxes_dict[tax.tax_domain].update(
ICMS_DIFAL_PARTITION[2019])
else:
if date_year == 2018:
taxes_dict[tax.tax_domain].update(
ICMS_DIFAL_PARTITION[2018])
if date_year == 2017:
taxes_dict[tax.tax_domain].update(
ICMS_DIFAL_PARTITION[2017])
else:
taxes_dict[tax.tax_domain].update(
ICMS_DIFAL_PARTITION[2016])
difal_share_origin = taxes_dict[tax.tax_domain].get(
'difal_origin_perc')
difal_share_dest = taxes_dict[tax.tax_domain].get(
'difal_dest_perc')
difal_origin_value = round(
difal_value * difal_share_origin / 100, precision)
difal_dest_value = round(
difal_value * difal_share_dest / 100, precision)
taxes_dict[tax.tax_domain].update({
'icms_origin_perc': icms_origin_perc,
'icms_dest_perc': icms_dest_perc,
'icms_dest_base': difal_icms_base,
'icms_sharing_percent': difal_share_dest,
'icms_origin_value': difal_origin_value,
'icms_dest_value': difal_dest_value,
})
return taxes_dict
def _compute_icmsfcp(self, tax, taxes_dict, **kwargs):
"""Compute ICMS FCP"""
tax_dict_icms = taxes_dict.get('icms')
taxes_dict[tax.tax_domain].update({
'base': tax_dict_icms.get('icms_dest_base', 0.0),
})
return self._compute_tax(tax, taxes_dict, **kwargs)
def _compute_icmsst(self, tax, taxes_dict, **kwargs):
# partner = kwargs.get("partner")
# company = kwargs.get("company")
discount_value = kwargs.get("discount_value", 0.00)
insurance_value = kwargs.get("insurance_value", 0.00)
freight_value = kwargs.get("freight_value", 0.00)
other_value = kwargs.get("other_value", 0.00)
add_to_base = [insurance_value, freight_value, other_value]
remove_from_base = [discount_value]
# Get Computed IPI Tax
tax_dict_ipi = taxes_dict.get("ipi", {})
add_to_base.append(tax_dict_ipi.get("tax_value", 0.00))
kwargs.update({
'add_to_base': sum(add_to_base),
'remove_from_base': sum(remove_from_base),
'icmsst_base_type': tax.icmsst_base_type
})
if taxes_dict.get(tax.tax_domain):
taxes_dict[tax.tax_domain]["icmsst_mva_percent"] = \
tax.icmsst_mva_percent
taxes_dict[tax.tax_domain].update(self._compute_tax_base(
tax, taxes_dict.get(tax.tax_domain), **kwargs))
tax_dict = self._compute_tax(tax, taxes_dict, **kwargs)
if tax_dict.get("icmsst_mva_percent"):
tax_dict["tax_value"] -= taxes_dict.get(
"icms", {}
).get("tax_value", 0.0)
return tax_dict
def _compute_icmssn(self, tax, taxes_dict, **kwargs):
tax_dict = taxes_dict.get(tax.tax_domain)
partner = kwargs.get("partner")
company = kwargs.get("company")
cst = kwargs.get("cst", self.env["l10n_br_fiscal.cst"])
discount_value = kwargs.get("discount_value", 0.00)
insurance_value = kwargs.get("insurance_value", 0.00)
freight_value = kwargs.get("freight_value", 0.00)
other_value = kwargs.get("other_value", 0.00)
icmssn_range = kwargs.get("icmssn_range")
add_to_base = [insurance_value, freight_value, other_value]
remove_from_base = [discount_value]
# Get Computed IPI Tax
tax_dict_ipi = taxes_dict.get("ipi", {})
# Partner not ICMS's Contributor
if partner.ind_ie_dest == NFE_IND_IE_DEST_9:
# Add IPI in ICMS Base
add_to_base.append(tax_dict_ipi.get("tax_value", 0.00))
# Partner ICMS's Contributor
if partner.ind_ie_dest in (NFE_IND_IE_DEST_1, NFE_IND_IE_DEST_2):
if cst.code in ICMS_SN_CST_WITH_CREDIT:
icms_sn_percent = round(
company.simplifed_tax_percent *
(icmssn_range.tax_icms_percent / 100), 2)
tax_dict["percent_amount"] = icms_sn_percent
tax_dict["value_amount"] = icms_sn_percent
kwargs.update({
'add_to_base': sum(add_to_base),
'remove_from_base': sum(remove_from_base),
})
taxes_dict.update(self._compute_tax_base(
tax, taxes_dict.get(tax.tax_domain), **kwargs))
return self._compute_tax(tax, taxes_dict, **kwargs)
def _compute_issqn(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_issqn_wh(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_csll(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_csll_wh(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_irpj(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_irpj_wh(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_inss(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_inss_wh(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_ipi(self, tax, taxes_dict, **kwargs):
discount_value = kwargs.get("discount_value", 0.00)
insurance_value = kwargs.get("insurance_value", 0.00)
freight_value = kwargs.get("freight_value", 0.00)
other_value = kwargs.get("other_value", 0.00)
add_to_base = [insurance_value, freight_value, other_value]
remove_from_base = [discount_value]
kwargs.update({
'add_to_base': sum(add_to_base),
'remove_from_base': sum(remove_from_base),
})
taxes_dict[tax.tax_domain].update(self._compute_tax_base(
tax, taxes_dict.get(tax.tax_domain), **kwargs))
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_ii(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_pis(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_pis_wh(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_cofins(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_cofins_wh(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_generic(self, tax, taxes_dict, **kwargs):
taxes_dict[tax.tax_domain].update(self._compute_tax_base(
tax, taxes_dict.get(tax.tax_domain), **kwargs))
return self._compute_tax(tax, taxes_dict, **kwargs)
@api.multi
def compute_taxes(self, **kwargs):
"""
arguments:
company,
partner,
product,
price_unit,
quantity,
uom_id,
fiscal_price,
fiscal_quantity,
uot_id,
discount_value,
insurance_value,
other_value,
freight_value,
ncm,
nbs,
nbm,
cest,
operation_line,
icmssn_range,
icms_origin,
return
{
'amount_included': float
'amount_not_included': float
'amount_withholding': float
'taxes': dict
}
"""
result_amounts = {
'amount_included': 0.00,
'amount_not_included': 0.00,
'amount_withholding': 0.00,
'amount_estimate_tax': 0.00,
'taxes': {},
}
taxes = {}
for tax in self.sorted(key=lambda t: t.compute_sequence):
tax_dict = TAX_DICT_VALUES.copy()
taxes[tax.tax_domain] = tax_dict
try:
# Define CST FROM TAX
operation_line = kwargs.get("operation_line")
fiscal_operation_type = (operation_line.fiscal_operation_type
or FISCAL_OUT)
kwargs.update({"cst": tax.cst_from_tax(fiscal_operation_type)})
compute_method = getattr(self, "_compute_%s" % tax.tax_domain)
taxes[tax.tax_domain].update(
compute_method(tax, taxes, **kwargs)
)
if taxes[tax.tax_domain]['tax_include']:
result_amounts['amount_included'] += taxes[
tax.tax_domain].get('tax_value', 0.00)
else:
result_amounts['amount_not_included'] += taxes[
tax.tax_domain].get('tax_value', 0.00)
if taxes[tax.tax_domain]['tax_withholding']:
result_amounts['amount_withholding'] += taxes[
tax.tax_domain].get('tax_value', 0.00)
except AttributeError:
taxes[tax.tax_domain].update(
tax._compute_generic(tax, taxes, **kwargs))
# Caso não exista campos especificos dos impostos
# no documento fiscal, os mesmos são calculados.
continue
# Estimate taxes
result_amounts['amount_estimate_tax'] = self._compute_estimate_taxes(
**kwargs)
result_amounts['taxes'] = taxes
return result_amounts
@api.onchange('icmsst_base_type')
def _onchange_icmsst_base_type(self):
if self.icmsst_base_type:
ICMS_ST_BASE_TYPE_REL = {
'0': TAX_BASE_TYPE_VALUE,
'1': TAX_BASE_TYPE_VALUE,
'2': TAX_BASE_TYPE_VALUE,
'3': TAX_BASE_TYPE_VALUE,
'4': TAX_BASE_TYPE_PERCENT,
'5': TAX_BASE_TYPE_VALUE}
self.tax_base_type = ICMS_ST_BASE_TYPE_REL.get(
self.icmsst_base_type)
[IMP] FCP Base calculation for SN
# Copyright (C) 2013 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import api, fields, models
from odoo.addons import decimal_precision as dp
from odoo.tools import float_is_zero
from ..constants.fiscal import (
FISCAL_IN,
FISCAL_OUT,
TAX_BASE_TYPE,
TAX_BASE_TYPE_PERCENT,
TAX_BASE_TYPE_VALUE,
TAX_DOMAIN,
FINAL_CUSTOMER_YES,
NFE_IND_IE_DEST_1,
NFE_IND_IE_DEST_2,
NFE_IND_IE_DEST_9
)
from ..constants.icms import (
ICMS_BASE_TYPE,
ICMS_BASE_TYPE_DEFAULT,
ICMS_ST_BASE_TYPE,
ICMS_ST_BASE_TYPE_DEFAULT,
ICMS_SN_CST_WITH_CREDIT,
ICMS_DIFAL_PARTITION,
ICMS_DIFAL_UNIQUE_BASE,
ICMS_DIFAL_DOUBLE_BASE,
ICMS_ORIGIN_TAX_IMPORTED,
)
TAX_DICT_VALUES = {
"name": False,
"fiscal_tax_id": False,
"tax_include": False,
"tax_withholding": False,
"tax_domain": False,
"cst_id": False,
"cst_code": False,
"base_type": "percent",
"base": 0.00,
"base_reduction": 0.00,
"percent_amount": 0.00,
"percent_reduction": 0.00,
"value_amount": 0.00,
"uot_id": False,
"tax_value": 0.00,
"compute_reduction": True,
}
class Tax(models.Model):
_name = 'l10n_br_fiscal.tax'
_order = 'sequence, tax_domain, name'
_description = 'Fiscal Tax'
name = fields.Char(
string="Name",
size=256,
required=True)
sequence = fields.Integer(
string="Sequence",
related="tax_group_id.sequence",
help="The sequence field is used to define the "
"order in which taxes are displayed.",
)
compute_sequence = fields.Integer(
string="Compute Sequence",
related="tax_group_id.compute_sequence",
help="The sequence field is used to define "
"order in which the tax lines are applied.",
)
tax_scope = fields.Selection(
related="tax_group_id.tax_scope",
store=True,
)
tax_base_type = fields.Selection(
selection=TAX_BASE_TYPE,
string="Tax Base Type",
default=TAX_BASE_TYPE_PERCENT,
required=True)
percent_amount = fields.Float(
string="Percent",
default=0.00,
digits=dp.get_precision("Fiscal Tax Percent"),
required=True)
percent_reduction = fields.Float(
string="Percent Reduction",
default=0.00,
digits=dp.get_precision("Fiscal Tax Percent"),
required=True)
percent_debit_credit = fields.Float(
string="Percent Debit/Credit",
default=0.00,
digits=dp.get_precision("Fiscal Tax Percent"),
required=True)
currency_id = fields.Many2one(
comodel_name="res.currency",
default=lambda self: self.env.ref("base.BRL"),
string="Currency")
value_amount = fields.Float(
string="Value",
default=0.00,
digits=dp.get_precision("Fiscal Tax Value"),
required=True)
uot_id = fields.Many2one(
comodel_name="uom.uom",
string="Tax UoM")
tax_group_id = fields.Many2one(
comodel_name="l10n_br_fiscal.tax.group",
string="Fiscal Tax Group",
required=True)
tax_domain = fields.Selection(
selection=TAX_DOMAIN,
related="tax_group_id.tax_domain",
string="Tax Domain",
required=True,
readonly=True)
cst_in_id = fields.Many2one(
comodel_name="l10n_br_fiscal.cst",
string="CST In",
domain="[('cst_type', 'in', ('in', 'all')), "
"('tax_domain', '=', tax_domain)]")
cst_out_id = fields.Many2one(
comodel_name="l10n_br_fiscal.cst",
string="CST Out",
domain="[('cst_type', 'in', ('out', 'all')), "
"('tax_domain', '=', tax_domain)]")
# ICMS Fields
icms_base_type = fields.Selection(
selection=ICMS_BASE_TYPE,
string="ICMS Base Type",
required=True,
default=ICMS_BASE_TYPE_DEFAULT)
icmsst_base_type = fields.Selection(
selection=ICMS_ST_BASE_TYPE,
string="ICMS ST Base Type",
required=True,
default=ICMS_ST_BASE_TYPE_DEFAULT)
icmsst_mva_percent = fields.Float(
string="MVA Percent",
default=0.00,
digits=dp.get_precision("Fiscal Tax Percent"),
required=True)
icmsst_value = fields.Float(
string="PFC Value",
default=0.00,
digits=dp.get_precision("Fiscal Tax Value"),
required=True)
_sql_constraints = [(
"fiscal_tax_code_uniq", "unique (name)",
"Tax already exists with this name !")]
@api.multi
def get_account_tax(self, fiscal_operation_type=FISCAL_OUT):
account_tax_type = {'out': 'sale', 'in': 'purchase'}
type_tax_use = account_tax_type.get(fiscal_operation_type, 'sale')
account_taxes = self.env["account.tax"].search([
("fiscal_tax_id", "=", self.ids),
('active', '=', True),
('type_tax_use', '=', type_tax_use)])
return account_taxes
def cst_from_tax(self, fiscal_operation_type=FISCAL_OUT):
self.ensure_one()
cst = self.env["l10n_br_fiscal.cst"]
if fiscal_operation_type == FISCAL_IN:
cst = self.cst_in_id
if fiscal_operation_type == FISCAL_OUT:
cst = self.cst_out_id
return cst
def _compute_tax_base(self, tax, tax_dict, **kwargs):
company = kwargs.get("company", tax.env.user.company_id)
currency = kwargs.get("currency", company.currency_id)
precision = currency.decimal_places
fiscal_price = kwargs.get("fiscal_price", 0.00)
fiscal_quantity = kwargs.get("fiscal_quantity", 0.00)
add_to_base = kwargs.get("add_to_base", 0.00)
remove_from_base = kwargs.get("remove_from_base", 0.00)
compute_reduction = kwargs.get("compute_reduction", True)
base = 0.00
if not tax_dict.get("percent_amount") and tax.percent_amount:
tax_dict["percent_amount"] = tax.percent_amount
tax_dict["value_amount"] = tax.value_amount
if tax_dict["base_type"] == "percent":
# Compute initial Tax Base for base_type Percent
base = round(fiscal_price * fiscal_quantity, precision)
if tax_dict["base_type"] == "quantity":
# Compute initial Tax Base for base_type Quantity
base = fiscal_quantity
if tax_dict["base_type"] == "fixed":
# Compute initial Tax Base
base = round(fiscal_price * fiscal_quantity, precision)
# Update Base Value
base_amount = (base + add_to_base) - remove_from_base
# Compute Tax Base Reduction
base_reduction = round(
base_amount * abs(tax.percent_reduction / 100), precision)
# Compute Tax Base Amount
if compute_reduction:
base_amount -= base_reduction
if tax_dict.get("icmsst_mva_percent"):
base_amount *= (1 + (tax_dict["icmsst_mva_percent"] / 100))
if (not tax.percent_amount and not tax.value_amount and
not tax_dict.get('percent_amount') and
not tax_dict.get('value_amount')):
tax_dict["base"] = 0.00
else:
tax_dict["base"] = base_amount
return tax_dict
def _compute_tax(self, tax, taxes_dict, **kwargs):
tax_dict = taxes_dict.get(tax.tax_domain)
tax_dict["name"] = tax.name
tax_dict["base_type"] = tax.tax_base_type
tax_dict["tax_include"] = tax.tax_group_id.tax_include
tax_dict["tax_withholding"] = tax.tax_group_id.tax_withholding
tax_dict["fiscal_tax_id"] = tax.id
tax_dict["tax_domain"] = tax.tax_domain
tax_dict["percent_reduction"] = tax.percent_reduction
tax_dict["percent_amount"] = tax_dict.get('percent_amount',
tax.percent_amount)
company = kwargs.get("company", tax.env.user.company_id)
# partner = kwargs.get("partner")
currency = kwargs.get("currency", company.currency_id)
precision = currency.decimal_places
# product = kwargs.get("product")
# price_unit = kwargs.get("price_unit", 0.00)
# quantity = kwargs.get("quantity", 0.00)
# uom_id = kwargs.get("uom_id")
# fiscal_price = kwargs.get("fiscal_price", 0.00)
# fiscal_quantity = kwargs.get("fiscal_quantity", 0.00)
# uot_id = kwargs.get("uot_id")
discount_value = kwargs.get("discount_value", 0.00)
# insurance_value = kwargs.get("insurance_value", 0.00)
# other_value = kwargs.get("other_value", 0.00)
# freight_value = kwargs.get("freight_value", 0.00)
# ncm = kwargs.get("ncm")
# cest = kwargs.get("cest")
operation_line = kwargs.get("operation_line")
remove_from_base = [discount_value]
if tax.tax_group_id.base_without_icms:
# Get Computed ICMS Tax
tax_dict_icms = taxes_dict.get("icms", {})
remove_from_base.append(tax_dict_icms.get('tax_value', 0.00))
kwargs.update({
'remove_from_base': sum(remove_from_base),
})
# TODO futuramente levar em consideração outros tipos de base de calculo
if float_is_zero(tax_dict.get("base", 0.00), precision):
tax_dict = self._compute_tax_base(tax, tax_dict, **kwargs)
fiscal_operation_type = (operation_line.fiscal_operation_type
or FISCAL_OUT)
tax_dict['cst_id'] = tax.cst_from_tax(fiscal_operation_type)
base_amount = tax_dict.get("base", 0.00)
if tax_dict["base_type"] == "percent":
# Compute Tax Value
tax_value = round(
base_amount * (tax_dict["percent_amount"] / 100),
precision)
tax_dict["tax_value"] = tax_value
if tax_dict["base_type"] in ("quantity", "fixed"):
tax_dict["tax_value"] = round(
base_amount * tax_dict["value_amount"],
precision)
return tax_dict
def _compute_estimate_taxes(self, **kwargs):
company = kwargs.get("company")
product = kwargs.get("product")
fiscal_price = kwargs.get("fiscal_price")
fiscal_quantity = kwargs.get("fiscal_quantity")
currency = kwargs.get("currency", company.currency_id)
precision = currency.decimal_places
ncm = kwargs.get("ncm") or product.ncm_id
nbs = kwargs.get("nbs") or product.nbs_id
icms_origin = kwargs.get("icms_origin") or product.icms_origin
op_line = kwargs.get("operation_line")
amount_estimate_tax = 0.00
amount_total = round(fiscal_price * fiscal_quantity, precision)
if op_line and (op_line.fiscal_operation_type == FISCAL_OUT
and op_line.fiscal_operation_id.fiscal_type == 'sale'):
if nbs:
amount_estimate_tax = round(amount_total * (
nbs.estimate_tax_national / 100), precision)
elif ncm:
if icms_origin in ICMS_ORIGIN_TAX_IMPORTED:
amount_estimate_tax = round(amount_total * (
ncm.estimate_tax_imported / 100), precision)
else:
amount_estimate_tax = round(amount_total * (
ncm.estimate_tax_national / 100), precision)
return amount_estimate_tax
def _compute_icms(self, tax, taxes_dict, **kwargs):
partner = kwargs.get("partner")
company = kwargs.get("company")
product = kwargs.get("product")
currency = kwargs.get("currency", company.currency_id)
precision = currency.decimal_places
ncm = kwargs.get("ncm")
nbm = kwargs.get("nbm")
cest = kwargs.get("cest")
operation_line = kwargs.get("operation_line")
discount_value = kwargs.get("discount_value", 0.00)
insurance_value = kwargs.get("insurance_value", 0.00)
freight_value = kwargs.get("freight_value", 0.00)
other_value = kwargs.get("other_value", 0.00)
add_to_base = [insurance_value, freight_value, other_value]
remove_from_base = [discount_value]
# Get Computed IPI Tax
tax_dict_ipi = taxes_dict.get("ipi", {})
if partner.ind_ie_dest in (NFE_IND_IE_DEST_2, NFE_IND_IE_DEST_9) or \
(operation_line.fiscal_operation_id.ind_final ==
FINAL_CUSTOMER_YES):
# Add IPI in ICMS Base
add_to_base.append(tax_dict_ipi.get("tax_value", 0.00))
kwargs.update({
'add_to_base': sum(add_to_base),
'remove_from_base': sum(remove_from_base),
'icms_base_type': tax.icms_base_type
})
taxes_dict[tax.tax_domain].update(self._compute_tax_base(
tax, taxes_dict.get(tax.tax_domain), **kwargs))
taxes_dict[tax.tax_domain].update(self._compute_tax(
tax, taxes_dict, **kwargs))
taxes_dict[tax.tax_domain].update({
'icms_base_type': tax.icms_base_type})
# DIFAL
# TODO
# and operation_line.ind_final == FINAL_CUSTOMER_YES):
if (company.state_id != partner.state_id
and operation_line.fiscal_operation_type == FISCAL_OUT
and partner.ind_ie_dest == NFE_IND_IE_DEST_9
and taxes_dict[tax.tax_domain].get('tax_value')):
tax_icms_difal = company.icms_regulation_id.map_tax_icms_difal(
company, partner, product, ncm, nbm, cest, operation_line)
tax_icmsfcp_difal = company.icms_regulation_id.map_tax_icmsfcp(
company, partner, product, ncm, nbm, cest, operation_line)
# Difal - Origin Percent
icms_origin_perc = taxes_dict[tax.tax_domain].get('percent_amount')
# Difal - Origin Value
icms_origin_value = taxes_dict[tax.tax_domain].get('tax_value')
# Difal - Destination Percent
icms_dest_perc = 0.00
if tax_icms_difal:
icms_dest_perc = tax_icms_difal[0].percent_amount
# Difal - FCP Percent
icmsfcp_perc = 0.00
if tax_icmsfcp_difal:
icmsfcp_perc = tax_icmsfcp_difal[0].percent_amount
# Difal - Base
icms_base = taxes_dict[tax.tax_domain].get('base')
difal_icms_base = 0.00
# Difal - ICMS Dest Value
icms_dest_value = round(
icms_base * (icms_dest_perc / 100), precision)
if company.state_id.code in ICMS_DIFAL_UNIQUE_BASE:
difal_icms_base = icms_base
if company.state_id.code in ICMS_DIFAL_DOUBLE_BASE:
difal_icms_base = round(
(icms_base - icms_dest_value) / (1 - (
(icms_dest_perc + icmsfcp_perc) / 100)),
precision)
icms_origin_value = round(
difal_icms_base * (icms_origin_perc / 100), precision)
difal_value = icms_dest_value - icms_origin_value
# Difal - Sharing Percent
date_year = fields.Date.today().year
if date_year >= 2019:
taxes_dict[tax.tax_domain].update(
ICMS_DIFAL_PARTITION[2019])
else:
if date_year == 2018:
taxes_dict[tax.tax_domain].update(
ICMS_DIFAL_PARTITION[2018])
if date_year == 2017:
taxes_dict[tax.tax_domain].update(
ICMS_DIFAL_PARTITION[2017])
else:
taxes_dict[tax.tax_domain].update(
ICMS_DIFAL_PARTITION[2016])
difal_share_origin = taxes_dict[tax.tax_domain].get(
'difal_origin_perc')
difal_share_dest = taxes_dict[tax.tax_domain].get(
'difal_dest_perc')
difal_origin_value = round(
difal_value * difal_share_origin / 100, precision)
difal_dest_value = round(
difal_value * difal_share_dest / 100, precision)
taxes_dict[tax.tax_domain].update({
'icms_origin_perc': icms_origin_perc,
'icms_dest_perc': icms_dest_perc,
'icms_dest_base': difal_icms_base,
'icms_sharing_percent': difal_share_dest,
'icms_origin_value': difal_origin_value,
'icms_dest_value': difal_dest_value,
})
return taxes_dict
def _compute_icmsfcp(self, tax, taxes_dict, **kwargs):
"""Compute ICMS FCP"""
company_id = kwargs['company']
if company_id.tax_framework == '1':
operation_line = kwargs.get("operation_line")
discount_value = kwargs.get("discount_value", 0.00)
insurance_value = kwargs.get("insurance_value", 0.00)
freight_value = kwargs.get("freight_value", 0.00)
other_value = kwargs.get("other_value", 0.00)
add_to_base = [insurance_value, freight_value, other_value]
remove_from_base = [discount_value]
kwargs.update({
'add_to_base': sum(add_to_base),
'remove_from_base': sum(remove_from_base),
'icms_base_type': tax.icms_base_type
})
taxes_dict[tax.tax_domain].update(self._compute_tax_base(
tax, taxes_dict.get(tax.tax_domain), **kwargs))
taxes_dict[tax.tax_domain].update(self._compute_tax(
tax, taxes_dict, **kwargs))
taxes_dict[tax.tax_domain].update({
'icms_base_type': tax.icms_base_type})
else:
tax_dict_icms = taxes_dict.get('icms')
taxes_dict[tax.tax_domain].update({
'base': tax_dict_icms.get('icms_dest_base', 0.0),
})
return self._compute_tax(tax, taxes_dict, **kwargs)
def _compute_icmsst(self, tax, taxes_dict, **kwargs):
# partner = kwargs.get("partner")
# company = kwargs.get("company")
discount_value = kwargs.get("discount_value", 0.00)
insurance_value = kwargs.get("insurance_value", 0.00)
freight_value = kwargs.get("freight_value", 0.00)
other_value = kwargs.get("other_value", 0.00)
add_to_base = [insurance_value, freight_value, other_value]
remove_from_base = [discount_value]
# Get Computed IPI Tax
tax_dict_ipi = taxes_dict.get("ipi", {})
add_to_base.append(tax_dict_ipi.get("tax_value", 0.00))
kwargs.update({
'add_to_base': sum(add_to_base),
'remove_from_base': sum(remove_from_base),
'icmsst_base_type': tax.icmsst_base_type
})
if taxes_dict.get(tax.tax_domain):
taxes_dict[tax.tax_domain]["icmsst_mva_percent"] = \
tax.icmsst_mva_percent
taxes_dict[tax.tax_domain].update(self._compute_tax_base(
tax, taxes_dict.get(tax.tax_domain), **kwargs))
tax_dict = self._compute_tax(tax, taxes_dict, **kwargs)
if tax_dict.get("icmsst_mva_percent"):
tax_dict["tax_value"] -= taxes_dict.get(
"icms", {}
).get("tax_value", 0.0)
return tax_dict
def _compute_icmssn(self, tax, taxes_dict, **kwargs):
tax_dict = taxes_dict.get(tax.tax_domain)
partner = kwargs.get("partner")
company = kwargs.get("company")
cst = kwargs.get("cst", self.env["l10n_br_fiscal.cst"])
discount_value = kwargs.get("discount_value", 0.00)
insurance_value = kwargs.get("insurance_value", 0.00)
freight_value = kwargs.get("freight_value", 0.00)
other_value = kwargs.get("other_value", 0.00)
icmssn_range = kwargs.get("icmssn_range")
add_to_base = [insurance_value, freight_value, other_value]
remove_from_base = [discount_value]
# Get Computed IPI Tax
tax_dict_ipi = taxes_dict.get("ipi", {})
# Partner not ICMS's Contributor
if partner.ind_ie_dest == NFE_IND_IE_DEST_9:
# Add IPI in ICMS Base
add_to_base.append(tax_dict_ipi.get("tax_value", 0.00))
# Partner ICMS's Contributor
if partner.ind_ie_dest in (NFE_IND_IE_DEST_1, NFE_IND_IE_DEST_2):
if cst.code in ICMS_SN_CST_WITH_CREDIT:
icms_sn_percent = round(
company.simplifed_tax_percent *
(icmssn_range.tax_icms_percent / 100), 2)
tax_dict["percent_amount"] = icms_sn_percent
tax_dict["value_amount"] = icms_sn_percent
kwargs.update({
'add_to_base': sum(add_to_base),
'remove_from_base': sum(remove_from_base),
})
taxes_dict.update(self._compute_tax_base(
tax, taxes_dict.get(tax.tax_domain), **kwargs))
return self._compute_tax(tax, taxes_dict, **kwargs)
def _compute_issqn(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_issqn_wh(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_csll(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_csll_wh(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_irpj(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_irpj_wh(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_inss(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_inss_wh(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_ipi(self, tax, taxes_dict, **kwargs):
discount_value = kwargs.get("discount_value", 0.00)
insurance_value = kwargs.get("insurance_value", 0.00)
freight_value = kwargs.get("freight_value", 0.00)
other_value = kwargs.get("other_value", 0.00)
add_to_base = [insurance_value, freight_value, other_value]
remove_from_base = [discount_value]
kwargs.update({
'add_to_base': sum(add_to_base),
'remove_from_base': sum(remove_from_base),
})
taxes_dict[tax.tax_domain].update(self._compute_tax_base(
tax, taxes_dict.get(tax.tax_domain), **kwargs))
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_ii(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_pis(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_pis_wh(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_cofins(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_cofins_wh(self, tax, taxes_dict, **kwargs):
return self._compute_generic(tax, taxes_dict, **kwargs)
def _compute_generic(self, tax, taxes_dict, **kwargs):
taxes_dict[tax.tax_domain].update(self._compute_tax_base(
tax, taxes_dict.get(tax.tax_domain), **kwargs))
return self._compute_tax(tax, taxes_dict, **kwargs)
@api.multi
def compute_taxes(self, **kwargs):
"""
arguments:
company,
partner,
product,
price_unit,
quantity,
uom_id,
fiscal_price,
fiscal_quantity,
uot_id,
discount_value,
insurance_value,
other_value,
freight_value,
ncm,
nbs,
nbm,
cest,
operation_line,
icmssn_range,
icms_origin,
return
{
'amount_included': float
'amount_not_included': float
'amount_withholding': float
'taxes': dict
}
"""
result_amounts = {
'amount_included': 0.00,
'amount_not_included': 0.00,
'amount_withholding': 0.00,
'amount_estimate_tax': 0.00,
'taxes': {},
}
taxes = {}
for tax in self.sorted(key=lambda t: t.compute_sequence):
tax_dict = TAX_DICT_VALUES.copy()
taxes[tax.tax_domain] = tax_dict
try:
# Define CST FROM TAX
operation_line = kwargs.get("operation_line")
fiscal_operation_type = (operation_line.fiscal_operation_type
or FISCAL_OUT)
kwargs.update({"cst": tax.cst_from_tax(fiscal_operation_type)})
compute_method = getattr(self, "_compute_%s" % tax.tax_domain)
taxes[tax.tax_domain].update(
compute_method(tax, taxes, **kwargs)
)
if taxes[tax.tax_domain]['tax_include']:
result_amounts['amount_included'] += taxes[
tax.tax_domain].get('tax_value', 0.00)
else:
result_amounts['amount_not_included'] += taxes[
tax.tax_domain].get('tax_value', 0.00)
if taxes[tax.tax_domain]['tax_withholding']:
result_amounts['amount_withholding'] += taxes[
tax.tax_domain].get('tax_value', 0.00)
except AttributeError:
taxes[tax.tax_domain].update(
tax._compute_generic(tax, taxes, **kwargs))
# Caso não exista campos especificos dos impostos
# no documento fiscal, os mesmos são calculados.
continue
# Estimate taxes
result_amounts['amount_estimate_tax'] = self._compute_estimate_taxes(
**kwargs)
result_amounts['taxes'] = taxes
return result_amounts
@api.onchange('icmsst_base_type')
def _onchange_icmsst_base_type(self):
if self.icmsst_base_type:
ICMS_ST_BASE_TYPE_REL = {
'0': TAX_BASE_TYPE_VALUE,
'1': TAX_BASE_TYPE_VALUE,
'2': TAX_BASE_TYPE_VALUE,
'3': TAX_BASE_TYPE_VALUE,
'4': TAX_BASE_TYPE_PERCENT,
'5': TAX_BASE_TYPE_VALUE}
self.tax_base_type = ICMS_ST_BASE_TYPE_REL.get(
self.icmsst_base_type)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2019, Erkan Ozgur Yilmaz
#
# This module is part of anima and is released under the MIT
# License: http://www.opensource.org/licenses/MIT
import re
from anima.env.mayaEnv import auxiliary
from anima.ui.progress_dialog import ProgressDialogManager
from maya import cmds as cmds, mel as mel
from pymel import core as pm
class Render(object):
"""Tools for render
"""
rso_options = {
'bake': {
# motion blur settings
'motionBlurEnable': 1,
'motionBlurDeformationEnable': 1,
'motionBlurNumTransformationSteps': 31,
'motionBlurFrameDuration': 100,
'motionBlurShutterStart': 0,
'motionBlurShutterEnd': 1,
'motionBlurShutterPosition': 1,
# set GI Engines
'primaryGIEngine': 3,
'secondaryGIEngine': 2,
# set file paths
'irradiancePointCloudMode': 2, # Rebuild (prepass only)
'irradianceCacheMode': 2, # Rebuild (prepass only)
'irradiancePointCloudFilename': 'Outputs/rs/ipc_baked.rsmap',
'irradianceCacheFilename': 'Outputs/rs/im_baked.rsmap'
},
'orig': {},
'current_frame': 1
}
@classmethod
def assign_random_material_color(cls):
"""assigns a lambert with a random color to the selected object
"""
selected = pm.selected()
# create the lambert material
lambert = pm.shadingNode('lambert', asShader=1)
# create the shading engine
shading_engine = pm.nt.ShadingEngine()
lambert.outColor >> shading_engine.surfaceShader
# randomize the lambert color
import random
h = random.random() # 0-1
s = random.random() * 0.5 + 0.25 # 0.25-0.75
v = random.random() * 0.5 + 0.5 # 0.5 - 1
from anima.utils import hsv_to_rgb
r, g, b = hsv_to_rgb(h, s, v)
lambert.color.set(r, g, b)
pm.sets(shading_engine, fe=selected)
pm.select(selected)
@classmethod
def randomize_material_color(cls):
"""randomizes material color of selected nodes
"""
selected = pm.selected()
all_materials = []
for node in selected:
shading_engines = node.listHistory(f=1, type='shadingEngine')
if not shading_engines:
continue
shading_engine = shading_engines[0]
materials = shading_engine.surfaceShader.inputs()
if not materials:
continue
else:
for material in materials:
if material not in all_materials:
all_materials.append(material)
import random
from anima.utils import hsv_to_rgb
attr_lut = {
'lambert': 'color',
}
for material in all_materials:
h = random.random() # 0-1
s = random.random() * 0.5 + 0.25 # 0.25-0.75
v = random.random() * 0.5 + 0.5 # 0.5 - 1
r, g, b = hsv_to_rgb(h, s, v)
attr_name = attr_lut[material.type()]
material.attr(attr_name).set(r, g, b)
@classmethod
def vertigo_setup_look_at(cls):
"""sets up a the necessary locator for teh Vertigo effect for the
selected camera
"""
from anima.env.mayaEnv import vertigo
cam = pm.ls(sl=1)[0]
vertigo.setup_look_at(cam)
@classmethod
def vertigo_setup_vertigo(cls):
"""sets up a Vertigo effect for the selected camera
"""
from anima.env.mayaEnv import vertigo
cam = pm.ls(sl=1)[0]
vertigo.setup_vertigo(cam)
@classmethod
def vertigo_delete(cls):
"""deletes the Vertigo setup for the selected camera
"""
from anima.env.mayaEnv import vertigo
cam = pm.ls(sl=1)[0]
vertigo.delete(cam)
@classmethod
def duplicate_with_connections(cls):
"""duplicates the selected nodes with connections to the network
"""
return pm.duplicate(ic=1, rr=1)
@classmethod
def duplicate_input_graph(cls):
"""duplicates the selected nodes with all their inputs
"""
return pm.duplicate(un=1, rr=1)
@classmethod
def delete_render_and_display_layers(cls):
"""Deletes the display and render layers in the current scene
"""
cls.delete_display_layers()
cls.delete_render_layers()
@classmethod
def delete_display_layers(cls):
"""Deletes the display layers in the current scene
"""
# switch to default render layer before deleting anything
# this will prevent layers to be non-deletable
from anima.env.mayaEnv import auxiliary
auxiliary.switch_to_default_render_layer()
pm.delete(pm.ls(type=['displayLayer']))
@classmethod
def delete_render_layers(cls):
"""Deletes the render layers in the current scene
"""
# switch to default render layer before deleting anything
# this will prevent layers to be non-deletable
from anima.env.mayaEnv import auxiliary
auxiliary.switch_to_default_render_layer()
pm.delete(pm.ls(type=['renderLayer']))
@classmethod
def delete_unused_shading_nodes(cls):
"""Deletes unused shading nodes
"""
pm.mel.eval('MLdeleteUnused')
@classmethod
def normalize_texture_paths(cls):
"""Expands the environment variables in texture paths
"""
import os
for node in pm.ls(type='file'):
if node.hasAttr('colorSpace'):
color_space = node.colorSpace.get()
node.fileTextureName.set(
os.path.expandvars(node.fileTextureName.get())
)
if node.hasAttr('colorSpace'):
node.colorSpace.set(color_space)
@classmethod
def unnormalize_texture_paths(cls):
"""Contracts the environment variables in texture paths bu adding
the repository environment variable to the file paths
"""
from anima.env import mayaEnv
m = mayaEnv.Maya()
m.replace_external_paths()
@classmethod
def assign_substance_textures(cls):
"""auto assigns textures to selected materials.
Supports both Arnold and Redshift materials
"""
#
# Substance Texture Assigner
#
# material_subfixes = {
# "BaseColor": {
# "aiStandardSurface": {
# "attr": "baseColor"
# },
# "RedshiftMaterial": {
# "attr": "diffuse_color"
# },
# },
# "Height": {},
# "Metalness": {
# "aiStandarSurface": {
# "attr": "metalness"
# }
# },
# "Normal": {
# "aiStandardSurface": {
# "tree": {
# "type": "aiBump2D",
# "class": "asUtility",
# "attr": {
# "bumpMap": {
# "output": "outColorR"
# "type": "aiImage",
# "attr": {
# "filename": "%TEXTUREFILE%"
# }
# }
# }
# "target": "normalCamera"
# }
# }
# },
# "Roughness": {
# "aiStandardSurface": {
# "attr": "specularRoughness"
# }
# }
# }
import glob
materials = pm.selected()
# ask the texture folder
texture_path = pm.fileDialog2(cap="Choose Texture Folder", okc="Choose", fm=2)[0]
for material in materials:
# textures should start with the same name of the material
material_name = material.name().split(':')[-1] # strip namespaces
print("material.name: %s" % material_name)
pattern = "%s/%s_*" % (texture_path, material_name)
print("pattern: %s" % pattern)
files = glob.glob(pattern)
print files
# TODO: Make it beautiful by using the auxiliary.create_shader()
# For now do it ugly!
if material.type() == "AiStandardSurface":
# *********************************************
# BaseColor
# create a new aiImage
base_color_file_path = glob.glob(
"%s/%s_BaseColor*" % (texture_path, material_name)
)
if base_color_file_path:
# fix diffuse weight
material.base.set(1)
base_color_file_path = base_color_file_path[0]
base_color_file = pm.shadingNode('file', asTexture=1)
base_color_file.fileTextureName.set(base_color_file_path)
base_color_file.colorSpace.set('sRGB')
base_color_file.outColor >> material.baseColor
# *********************************************
# Height
height_file_path = glob.glob("%s/%s_Height*" % (texture_path, material_name))
if height_file_path:
height_file_path = height_file_path[0]
# create a displacement node
shading_node = material.attr("outColor").outputs(type="shadingEngine")[0]
disp_shader = pm.shadingNode('displacementShader', asShader=1)
disp_shader.displacement >> shading_node.displacementShader
# create texture
disp_file = pm.shadingNode('file', asTexture=1)
disp_file.fileTextureName.set(height_file_path)
disp_file.colorSpace.set('Raw')
disp_file.alphaIsLuminance.set(1)
disp_file.outAlpha >> disp_shader.displacement
# *********************************************
# Metalness
metalness_file_path = glob.glob("%s/%s_Metalness*" % (texture_path, material_name))
if metalness_file_path:
metalness_file_path = metalness_file_path[0]
metalness_file = pm.shadingNode("file", asTexture=1)
metalness_file.fileTextureName.set(metalness_file_path)
metalness_file.colorSpace.set('Raw')
metalness_file.alphaIsLuminance.set(1)
metalness_file.outAlpha >> material.metalness
# *********************************************
# Normal
normal_file_path = glob.glob("%s/%s_Normal*" % (texture_path, material_name))
if normal_file_path:
normal_file_path = normal_file_path[0]
# normal_ai_bump2d = pm.nt.AiBump2d()
normal_ai_normalmap = pm.shadingNode("aiNormalMap", asUtility=1)
normal_file = pm.shadingNode("file", asTexture=1)
normal_file.fileTextureName.set(normal_file_path)
normal_file.colorSpace.set('Raw')
normal_file.outColor >> normal_ai_normalmap.input
normal_ai_normalmap.outValue >> material.normalCamera
# *********************************************
# Roughness
# specularRoughness
roughness_file_path = glob.glob("%s/%s_Roughness*" % (texture_path, material_name))
if roughness_file_path:
roughness_file_path = roughness_file_path[0]
roughness_file = pm.shadingNode("file", asTexture=1)
roughness_file.fileTextureName.set(roughness_file_path)
roughness_file.colorSpace.set('Raw')
roughness_file.alphaIsLuminance.set(1)
roughness_file.outAlpha >> material.specularRoughness
elif material.type() == "RedshiftMaterial":
# *********************************************
# BaseColor
# create a new aiImage
diffuse_color_file_path = glob.glob(
"%s/%s_Diffuse*" % (texture_path, material_name))
if diffuse_color_file_path:
diffuse_color_file_path = diffuse_color_file_path[0]
diffuse_color_file = pm.shadingNode('file', asTexture=1)
diffuse_color_file.fileTextureName.set(
diffuse_color_file_path
)
diffuse_color_file.colorSpace.set('sRGB')
diffuse_color_file.outColor >> material.diffuse_color
# *********************************************
# Height
height_file_path = glob.glob(
"%s/%s_Height*" % (texture_path, material_name))
if height_file_path:
height_file_path = height_file_path[0]
# create a displacement node
shading_node = \
material.attr("outColor").outputs(type="shadingEngine")[0]
disp_shader = \
pm.shadingNode('displacementShader', asShader=1)
disp_shader.displacement >> shading_node.displacementShader
# create texture
disp_file = pm.shadingNode('file', asTexture=1)
disp_file.fileTextureName.set(height_file_path)
disp_file.colorSpace.set('Raw')
disp_file.alphaIsLuminance.set(1)
disp_file.outAlpha >> disp_shader.displacement
# *********************************************
# Metalness
# set material BRDF to GGX and set fresnel type to metalness
material.refl_brdf.set(1)
material.refl_fresnel_mode.set(2)
metalness_file_path = glob.glob(
"%s/%s_Metal*" % (texture_path, material_name))
if metalness_file_path:
metalness_file_path = metalness_file_path[0]
metalness_file = pm.shadingNode("file", asTexture=1)
metalness_file.fileTextureName.set(metalness_file_path)
metalness_file.colorSpace.set('Raw')
metalness_file.alphaIsLuminance.set(1)
metalness_file.outAlpha >> material.refl_metalness
# *********************************************
# Reflectivity
reflectivity_file_path = glob.glob(
"%s/%s_Reflectivity*" % (texture_path, material_name))
if reflectivity_file_path:
reflectivity_file_path = reflectivity_file_path[0]
reflectivity_file = pm.shadingNode("file", asTexture=1)
reflectivity_file.fileTextureName.set(
reflectivity_file_path
)
reflectivity_file.colorSpace.set('sRGB')
reflectivity_file.alphaIsLuminance.set(1)
reflectivity_file.outColor >> material.refl_reflectivity
# *********************************************
# Normal
normal_file_path = glob.glob(
"%s/%s_Normal*" % (texture_path, material_name))
if normal_file_path:
normal_file_path = normal_file_path[0]
rs_bump_map = \
pm.shadingNode("RedshiftBumpMap", asUtility=1)
# set to tangent-space normals
rs_bump_map.inputType.set(1)
normal_file = pm.shadingNode("file", asTexture=1)
normal_file.fileTextureName.set(normal_file_path)
normal_file.colorSpace.set('Raw')
normal_file.outColor >> rs_bump_map.input
rs_bump_map.out >> material.bump_input
rs_bump_map.scale.set(1)
# *********************************************
# Roughness
# specularRoughness
roughness_file_path = glob.glob(
"%s/%s_Roughness*" % (texture_path, material_name))
if roughness_file_path:
roughness_file_path = roughness_file_path[0]
roughness_file = pm.shadingNode("file", asTexture=1)
roughness_file.fileTextureName.set(roughness_file_path)
roughness_file.colorSpace.set('Raw')
roughness_file.alphaIsLuminance.set(1)
roughness_file.outAlpha >> material.refl_roughness
@classmethod
def redshift_ic_ipc_bake(cls):
"""Sets the render settings for IC + IPC bake
"""
# set motion blur
start_frame = int(pm.playbackOptions(q=True, ast=True))
end_frame = int(pm.playbackOptions(q=True, aet=True))
cls.rso_options['bake']['motionBlurFrameDuration'] = end_frame - start_frame + 1
rso = pm.PyNode('redshiftOptions')
# store and set attributes
for attr in cls.rso_options['bake']:
cls.rso_options['orig'][attr] = rso.attr(attr).get()
rso.attr(attr).set(cls.rso_options['bake'][attr])
# go to the first frame
current_frame = pm.currentTime(q=1)
cls.rso_options['current_frame'] = current_frame
pm.currentTime(start_frame)
# do a render
pm.mel.eval('rsRender -render -rv -cam "<renderview>";')
@classmethod
def redshift_ic_ipc_bake_restore(cls):
"""restores the previous render settings
"""
rso = pm.PyNode('redshiftOptions')
# revert settings back
for attr in cls.rso_options['orig']:
rso.attr(attr).set(cls.rso_options['orig'][attr])
# set the GI engines
rso.primaryGIEngine.set(cls.rso_options['bake']['primaryGIEngine'])
rso.secondaryGIEngine.set(cls.rso_options['bake']['secondaryGIEngine'])
# set the irradiance method to load
rso.irradiancePointCloudMode.set(1) # Load
rso.irradianceCacheMode.set(1) # Load
# set the cache paths
rso.irradiancePointCloudFilename.set(cls.rso_options['bake']['irradiancePointCloudFilename'])
rso.irradianceCacheFilename.set(cls.rso_options['bake']['irradianceCacheFilename'])
# go to current frame
current_frame = cls.rso_options['current_frame']
pm.currentTime(current_frame)
@classmethod
def update_render_settings(cls):
"""updates render settings for current renderer
"""
from anima.env import mayaEnv
m = mayaEnv.Maya()
v = m.get_current_version()
if v:
m.set_render_filename(version=v)
@classmethod
def afanasy_job_submitter(cls):
"""Opens the Afanasy job sumitter UI
"""
from anima.env.mayaEnv import afanasy
ui = afanasy.UI()
ui.show()
@classmethod
def auto_convert_to_redshift(cls):
"""converts the current scene to Redshift
"""
from anima.env.mayaEnv import ai2rs
cm = ai2rs.ConversionManager()
cm.auto_convert()
@classmethod
def convert_nodes_to_redshift(cls):
"""converts the selected nodes to Redshift
"""
from anima.env.mayaEnv import ai2rs
cm = ai2rs.ConversionManager()
for node in pm.selected():
cm.convert(node)
@classmethod
def standin_to_bbox(cls):
"""convert the selected stand-in nodes to bbox
"""
[node.mode.set(0) for node in pm.ls(sl=1) if isinstance(node.getShape(), pm.nt.AiStandIn)]
@classmethod
def standin_to_polywire(cls):
"""convert the selected stand-in nodes to bbox
"""
[node.mode.set(2) for node in pm.ls(sl=1) if isinstance(node.getShape(), pm.nt.AiStandIn)]
@classmethod
def add_miLabel(cls):
selection = pm.ls(sl=1)
for node in selection:
if node.type() == 'Transform':
if node.hasAttr('miLabel'):
pass
else:
pm.addAttr(node, ln='miLabel', at='long', keyable=True)
@classmethod
def connect_facingRatio_to_vCoord(cls):
selection = pm.ls(sl=1)
for i in range(1, len(selection)):
selection[0].facingRatio.connect((selection[i] + '.vCoord'),
force=True)
@classmethod
def set_shape_attribute(cls, attr_name, value, apply_to_hierarchy,
disable_undo_queue=False):
"""sets shape attributes
"""
undo_state = pm.undoInfo(q=1, st=1)
if disable_undo_queue:
pm.undoInfo(st=False)
supported_shapes = [
'aiStandIn',
'mesh',
'nurbsCurve'
]
attr_mapper = {
'castsShadows': 'overrideCastsShadows',
'receiveShadows': 'overrideReceiveShadows',
'primaryVisibility': 'overridePrimaryVisibility',
'visibleInReflections': 'overrideVisibleInReflections',
'visibleInRefractions': 'overrideVisibleInRefractions',
'doubleSided': 'overrideDoubleSided',
'aiSelfShadows': 'overrideSelfShadows',
'aiOpaque': 'overrideOpaque',
'aiVisibleInDiffuse': 'overrideVisibleInDiffuse',
'aiVisibleInGlossy': 'overrideVisibleInGlossy',
'aiMatte': 'overrideMatte',
}
pre_selection_list = pm.ls(sl=1)
if apply_to_hierarchy:
pm.select(hierarchy=1)
objects = pm.ls(sl=1, type=supported_shapes)
# get override_attr_name from dictionary
if attr_name in attr_mapper:
override_attr_name = attr_mapper[attr_name]
else:
override_attr_name = None
# register a caller
from anima.env.mayaEnv import MayaMainProgressBarWrapper
wrp = MayaMainProgressBarWrapper()
pdm = ProgressDialogManager(dialog=wrp)
pdm.use_ui = True if len(objects) > 3 else False
caller = pdm.register(len(objects), 'Setting Shape Attribute')
layers = pm.ls(type='renderLayer')
is_default_layer = \
layers[0].currentLayer() == layers[0].defaultRenderLayer()
if value != -1:
for item in objects:
attr_full_name = '%s.%s' % (item.name(), attr_name)
override_attr_full_name = '%s.%s' % (item.name(), override_attr_name)
caller.step(message=attr_full_name)
if not is_default_layer:
pm.editRenderLayerAdjustment(attr_full_name)
item.setAttr(attr_name, value)
# if there is an accompanying override attribute like it is
# found in aiStandIn node
# then also set override{Attr} to True
if override_attr_name \
and cmds.attributeQuery(override_attr_name, n=item.name(), ex=1):
if not is_default_layer:
pm.editRenderLayerAdjustment(
override_attr_full_name
)
item.setAttr(override_attr_name, True)
else:
for item in objects:
attr_full_name = '%s.%s' % (item.name(), attr_name)
override_attr_full_name = '%s.%s' % (item.name(), override_attr_name)
caller.step(message=attr_full_name)
# remove any overrides
if not is_default_layer:
pm.editRenderLayerAdjustment(
attr_full_name,
remove=1
)
if override_attr_name \
and cmds.attributeQuery(override_attr_name, n=item.name(), ex=1) \
and not is_default_layer:
pm.editRenderLayerAdjustment(
override_attr_full_name,
remove=1
)
# caller.end_progress()
pm.undoInfo(st=undo_state)
pm.select(pre_selection_list)
@classmethod
def set_finalGatherHide(cls, value):
"""sets the finalGatherHide to on or off for the given list of objects
"""
attr_name = "miFinalGatherHide"
objects = pm.ls(sl=1)
for obj in objects:
shape = obj
if isinstance(obj, pm.nt.Transform):
shape = obj.getShape()
if not isinstance(shape, (pm.nt.Mesh, pm.nt.NurbsSurface)):
continue
# add the attribute if it doesn't already exists
if not shape.hasAttr(attr_name):
pm.addAttr(shape, ln=attr_name, at="long", min=0, max=1, k=1)
obj.setAttr(attr_name, value)
@classmethod
def replace_shaders_with_last(cls):
"""Assigns the last shader selected to all the objects using the shaders
on the list
"""
sel_list = pm.ls(sl=1)
target_node = sel_list[-1]
for node in sel_list[:-1]:
pm.hyperShade(objects=node)
pm.hyperShade(assign=target_node)
pm.select(None)
@classmethod
def create_texture_ref_object(cls):
selection = pm.ls(sl=1)
for obj in selection:
pm.select(obj)
pm.runtime.CreateTextureReferenceObject()
pm.select(selection)
@classmethod
def use_mib_texture_filter_lookup(cls):
"""Adds texture filter lookup node to the selected file texture nodes for
better texture filtering.
The function is smart enough to use the existing nodes, if there is a
connection from the selected file nodes to a mib_texture_filter_lookup node
then it will not create any new node and just use the existing ones.
It will also not create any place2dTexture nodes if the file node doesn't
have a place2dTexture node but is connected to a filter lookup node which
already has a connection to a place2dTexture node.
"""
file_nodes = pm.ls(sl=1, type="file")
for file_node in file_nodes:
# set the filter type to none
file_node.filterType.set(0)
# check if it is already connected to a mib_texture_filter_lookup node
message_outputs = \
file_node.message.outputs(type="mib_texture_filter_lookup")
if len(message_outputs):
# use the first one
mib_texture_filter_lookup = message_outputs[0]
else:
# create a texture filter lookup node
mib_texture_filter_lookup = \
pm.createNode("mib_texture_filter_lookup")
# do the connection
file_node.message >> mib_texture_filter_lookup.tex
# check if the mib_texture_filter_lookup has any connection to a
# placement node
mib_t_f_l_to_placement = \
mib_texture_filter_lookup.inputs(type="place2dTexture")
placement_node = None
if len(mib_t_f_l_to_placement):
# do nothing
placement_node = mib_t_f_l_to_placement[0].node()
else:
# get the texture placement
placement_connections = \
file_node.inputs(type="place2dTexture", p=1, c=1)
# if there is no placement create one
placement_node = None
if len(placement_connections):
placement_node = placement_connections[0][1].node()
# disconnect connections from placement to file node
for conn in placement_connections:
conn[1] // conn[0]
else:
placement_node = pm.createNode("place2dTexture")
# connect placement to mr_texture_filter_lookup
placement_node.outU >> mib_texture_filter_lookup.coordX
placement_node.outV >> mib_texture_filter_lookup.coordY
# connect color
for output in file_node.outColor.outputs(p=1):
mib_texture_filter_lookup.outValue >> output
# connect alpha
for output in file_node.outAlpha.outputs(p=1):
mib_texture_filter_lookup.outValueA >> output
@classmethod
def convert_to_linear(cls):
"""adds a gamma_gain node in between the selected nodes outputs to make the
result linear
"""
#
# convert to linear
#
selection = pm.ls(sl=1)
for file_node in selection:
# get the connections
outputs = file_node.outputs(plugs=True)
if not len(outputs):
continue
# and insert a mip_gamma_gain
gamma_node = pm.createNode('mip_gamma_gain')
gamma_node.setAttr('gamma', 2.2)
gamma_node.setAttr('reverse', True)
# connect the file_node to gamma_node
try:
file_node.outValue >> gamma_node.input
file_node.outValueA >> gamma_node.inputA
except AttributeError:
file_node.outColor >> gamma_node.input
# do all the connections from the output of the gamma
for output in outputs:
try:
gamma_node.outValue >> output
except RuntimeError:
gamma_node.outValueA >> output
pm.select(selection)
@classmethod
def use_image_sequence(cls):
"""creates an expression to make the mentalrayTexture node also able to read
image sequences
Select your mentalrayTexture nodes and then run the script.
The filename should use the file.%nd.ext format
"""
textures = pm.ls(sl=1, type="mentalrayTexture")
for texture in textures:
# get the filename
filename = texture.getAttr("fileTextureName")
splits = filename.split(".")
if len(splits) == 3:
base = ".".join(splits[0:-2]) + "."
pad = len(splits[-2])
extension = "." + splits[-1]
expr = 'string $padded_frame = python("\'%0' + str(pad) + \
'd\'%" + string(frame));\n' + \
'string $filename = "' + base + '" + \
$padded_frame + ".tga";\n' + \
'setAttr -type "string" ' + texture.name() + \
'.fileTextureName $filename;\n'
# create the expression
pm.expression(s=expr)
@classmethod
def add_to_selected_container(cls):
selection = pm.ls(sl=1)
conList = pm.ls(sl=1, con=1)
objList = list(set(selection) - set(conList))
if len(conList) == 0:
pm.container(addNode=selection)
elif len(conList) == 1:
pm.container(conList, edit=True, addNode=objList)
else:
length = len(conList) - 1
for i in range(0, length):
containerList = conList[i]
pm.container(conList[-1], edit=True, f=True,
addNode=containerList)
pm.container(conList[-1], edit=True, f=True, addNode=objList)
@classmethod
def remove_from_container(cls):
selection = pm.ls(sl=1)
for i in range(0, len(selection)):
con = pm.container(q=True, fc=selection[i])
pm.container(con, edit=True, removeNode=selection[i])
@classmethod
def reload_file_textures(cls):
fileList = pm.ls(type="file")
for fileNode in fileList:
mel.eval('AEfileTextureReloadCmd(%s.fileTextureName)' % fileNode)
@classmethod
def transfer_shaders(cls):
"""transfer shaders between selected objects. It can search for
hierarchies both in source and target sides.
"""
selection = pm.ls(sl=1)
pm.select(None)
source = selection[0]
target = selection[1]
# auxiliary.transfer_shaders(source, target)
# pm.select(selection)
# check if they are direct parents of mesh or nurbs shapes
source_shape = source.getShape()
target_shape = target.getShape()
if source_shape and target_shape:
# do a direct assignment from source to target
shading_engines = source_shape.outputs(type=pm.nt.ShadingEngine)
pm.sets(shading_engines[0], fe=target)
pm.select(selection)
return
lut = auxiliary.match_hierarchy(source, target)
attr_names = [
'castsShadows',
'receiveShadows',
'motionBlur',
'primaryVisibility',
'smoothShading',
'visibleInReflections',
'visibleInRefractions',
'doubleSided',
'opposite',
'aiSelfShadows',
'aiOpaque',
'aiVisibleInDiffuse',
'aiVisibleInGlossy',
'aiExportTangents',
'aiExportColors',
'aiExportRefPoints',
'aiExportRefNormals',
'aiExportRefTangents',
'color',
'interpolation',
'aiTranslator',
'intensity',
'aiExposure',
'aiColorTemperature',
'emitDiffuse',
'emitSpecular',
'aiDecayType',
'lightVisible',
'aiSamples',
'aiNormalize',
'aiCastShadows',
'aiShadowDensity',
'aiShadowColor',
'aiAffectVolumetrics',
'aiCastVolumetricShadows',
'aiVolumeSamples',
'aiDiffuse',
'aiSpecular',
'aiSss',
'aiIndirect',
'aiMaxBounces',
'aiSubdivType',
'aiSubdivIterations',
'aiSubdivAdaptiveMetric',
'aiSubdivPixelError',
'aiSubdivUvSmoothing',
'aiSubdivSmoothDerivs',
'aiDispHeight',
'aiDispPadding',
'aiDispZeroValue',
'aiDispAutobump',
'aiStepSize',
'rsEnableSubdivision',
'rsSubdivisionRule',
'rsScreenSpaceAdaptive',
'rsDoSmoothSubdivision',
'rsMinTessellationLength',
'rsMaxTessellationSubdivs',
'rsOutOfFrustumTessellationFactor',
'rsLimitOutOfFrustumTessellation',
'rsMaxOutOfFrustumTessellationSubdivs',
'rsEnableDisplacement',
'rsMaxDisplacement',
'rsDisplacementScale',
'rsAutoBumpMap',
'rsObjectId',
]
# from anima.ui import progress_dialog
# from anima.env.mayaEnv import MayaMainProgressBarWrapper
# wrp = MayaMainProgressBarWrapper()
# pdm = progress_dialog.ProgressDialogManager(dialog=wrp)
# caller = pdm.register(2, title='Transferring materials')
for source_node, target_node in lut['match']:
auxiliary.transfer_shaders(source_node, target_node)
# also transfer render attributes
for attr_name in attr_names:
try:
target_node.setAttr(
attr_name,
source_node.getAttr(attr_name)
)
except (pm.MayaAttributeError, RuntimeError):
pass
# input connections to attributes
try:
for plug in source_node.attr(attr_name).inputs(p=1):
plug >> target_node.attr(attr_name)
except pm.MayaAttributeError:
pass
# caller.step()
# caller.end_progress()
if len(lut['no_match']):
pm.select(lut['no_match'])
print(
'The following nodes has no corresponding source:\n%s' % (
'\n'.join(
[node.name() for node in lut['no_match']]
)
)
)
@classmethod
def fit_placement_to_UV(cls):
selection = pm.ls(sl=1, fl=1)
uvs = [n for n in selection if isinstance(n, pm.general.MeshUV)]
placements = \
[p for p in selection if isinstance(p, pm.nt.Place2dTexture)]
minU = 1000
minV = 1000
maxU = -1000
maxV = -1000
for uv in uvs:
uvCoord = pm.polyEditUV(uv, q=1)
if uvCoord[0] > maxU:
maxU = uvCoord[0]
if uvCoord[0] < minU:
minU = uvCoord[0]
if uvCoord[1] > maxV:
maxV = uvCoord[1]
if uvCoord[1] < minV:
minV = uvCoord[1]
for p in placements:
p.setAttr('coverage', (maxU - minU, maxV - minV))
p.setAttr('translateFrame', (minU, minV))
@classmethod
def connect_placement2d_to_file(cls):
"""connects the selected placement node to the selected file textures
"""
attr_lut = [
'coverage',
'translateFrame',
'rotateFrame',
'mirrorU',
'mirrorV',
'stagger',
'wrapU',
'wrapV',
'repeatUV',
'offset',
'rotateUV',
'noiseUV',
'vertexUvOne',
'vertexUvTwo',
'vertexUvThree',
'vertexCameraOne',
('outUV', 'uvCoord'),
('outUvFilterSize', 'uvFilterSize')
]
# get placement and file nodes
placement_node = pm.ls(sl=1, type=pm.nt.Place2dTexture)[0]
file_nodes = pm.ls(sl=1, type=pm.nt.File)
for file_node in file_nodes:
for attr in attr_lut:
if isinstance(attr, str):
source_attr_name = attr
target_attr_name = attr
elif isinstance(attr, tuple):
source_attr_name = attr[0]
target_attr_name = attr[1]
placement_node.attr(source_attr_name) >> \
file_node.attr(target_attr_name)
@classmethod
def open_node_in_browser(cls):
# get selected nodes
node_attrs = {
'file': 'fileTextureName',
'aiImage': 'filename',
'aiStandIn': 'dso',
}
import os
from anima.utils import open_browser_in_location
for node in pm.ls(sl=1):
type_ = pm.objectType(node)
# special case: if transform use shape
if type_ == 'transform':
node = node.getShape()
type_ = pm.objectType(node)
attr_name = node_attrs.get(type_)
if attr_name:
# if any how it contains a "#" character use the path
path = node.getAttr(attr_name)
if "#" in path:
path = os.path.dirname(path)
open_browser_in_location(path)
@classmethod
def enable_matte(cls, color=0):
"""enables matte on selected objects
"""
#
# Enable Matte on Selected Objects
#
colors = [
[0, 0, 0, 0], # Not Visible
[1, 0, 0, 0], # Red
[0, 1, 0, 0], # Green
[0, 0, 1, 0], # Blue
[0, 0, 0, 1], # Alpha
]
arnold_shaders = (
pm.nt.AiStandard, pm.nt.AiHair, pm.nt.AiSkin, pm.nt.AiUtility
)
for node in pm.ls(sl=1, dag=1, type=[pm.nt.Mesh, pm.nt.NurbsSurface,
'aiStandIn']):
obj = node
#if isinstance(node, pm.nt.Mesh):
# obj = node
#elif isinstance(node, pm.nt.Transform):
# obj = node.getShape()
shading_nodes = pm.listConnections(obj, type='shadingEngine')
for shadingNode in shading_nodes:
shader = shadingNode.attr('surfaceShader').connections()[0]
if isinstance(shader, arnold_shaders):
try:
pm.editRenderLayerAdjustment(shader.attr("aiEnableMatte"))
pm.editRenderLayerAdjustment(shader.attr("aiMatteColor"))
pm.editRenderLayerAdjustment(shader.attr("aiMatteColorA"))
shader.attr("aiEnableMatte").set(1)
shader.attr("aiMatteColor").set(colors[color][0:3], type='double3')
shader.attr("aiMatteColorA").set(colors[color][3])
except RuntimeError as e:
# there is some connections
print(str(e))
@classmethod
def enable_subdiv(cls, fixed_tes=False, max_subdiv=3):
"""enables subdiv on selected objects
:param fixed_tes: Uses fixed tessellation.
:param max_subdiv: The max subdivision iteration. Default 3.
"""
#
# Set SubDiv to CatClark on Selected nodes
#
for node in pm.ls(sl=1):
shape = node.getShape()
try:
shape.aiSubdivIterations.set(max_subdiv)
shape.aiSubdivType.set(1)
shape.aiSubdivPixelError.set(0)
except AttributeError:
pass
try:
shape.rsEnableSubdivision.set(1)
shape.rsMaxTessellationSubdivs.set(max_subdiv)
if not fixed_tes:
shape.rsLimitOutOfFrustumTessellation.set(1)
shape.rsMaxOutOfFrustumTessellationSubdivs.set(1)
else:
shape.rsScreenSpaceAdaptive.set(0)
shape.rsMinTessellationLength.set(0)
except AttributeError:
pass
@classmethod
def barndoor_simulator_setup(cls):
"""creates a barndoor simulator
"""
bs = auxiliary.BarnDoorSimulator()
bs.light = pm.ls(sl=1)[0]
bs.setup()
@classmethod
def barndoor_simulator_unsetup(cls):
"""removes the barndoor simulator
"""
bs = auxiliary.BarnDoorSimulator()
for light in pm.ls(sl=1):
light_shape = light.getShape()
if isinstance(light_shape, pm.nt.Light):
bs.light = light
bs.unsetup()
@classmethod
def fix_barndoors(cls):
"""fixes the barndoors on scene lights created in MtoA 1.0 to match the
new behaviour of barndoors in MtoA 1.1
"""
for light in pm.ls(type='spotLight'):
# calculate scale
cone_angle = light.getAttr('coneAngle')
penumbra_angle = light.getAttr('penumbraAngle')
if penumbra_angle < 0:
light.setAttr(
'coneAngle',
max(cone_angle + penumbra_angle, 0.1)
)
else:
light.setAttr(
'coneAngle',
max(cone_angle - penumbra_angle, 0.1)
)
@classmethod
def convert_aiSkinSSS_to_aiSkin(cls):
"""converts aiSkinSSS nodes in the current scene to aiSkin + aiStandard
nodes automatically
"""
attr_mapper = {
# diffuse
'color': {
'node': 'aiStandard',
'attr_name': 'color'
},
'diffuseWeight': {
'node': 'aiStandard',
'attr_name': 'Kd',
'multiplier': 0.7
},
'diffuseRoughness': {
'node': 'aiStandard',
'attr_name': 'diffuseRoughness'
},
# sss
'sssWeight': {
'node': 'aiSkin',
'attr_name': 'sssWeight'
},
# shallowScatter
'shallowScatterColor': {
'node': 'aiSkin',
'attr_name': 'shallowScatterColor',
},
'shallowScatterWeight': {
'node': 'aiSkin',
'attr_name': 'shallowScatterWeight'
},
'shallowScatterRadius': {
'node': 'aiSkin',
'attr_name': 'shallowScatterRadius'
},
# midScatter
'midScatterColor': {
'node': 'aiSkin',
'attr_name': 'midScatterColor',
},
'midScatterWeight': {
'node': 'aiSkin',
'attr_name': 'midScatterWeight'
},
'midScatterRadius': {
'node': 'aiSkin',
'attr_name': 'midScatterRadius'
},
# deepScatter
'deepScatterColor': {
'node': 'aiSkin',
'attr_name': 'deepScatterColor',
},
'deepScatterWeight': {
'node': 'aiSkin',
'attr_name': 'deepScatterWeight'
},
'deepScatterRadius': {
'node': 'aiSkin',
'attr_name': 'deepScatterRadius'
},
# primaryReflection
'primaryReflectionColor': {
'node': 'aiSkin',
'attr_name': 'specularColor'
},
'primaryReflectionWeight': {
'node': 'aiSkin',
'attr_name': 'specularWeight'
},
'primaryReflectionRoughness': {
'node': 'aiSkin',
'attr_name': 'specularRoughness'
},
# secondaryReflection
'secondaryReflectionColor': {
'node': 'aiSkin',
'attr_name': 'sheenColor'
},
'secondaryReflectionWeight': {
'node': 'aiSkin',
'attr_name': 'sheenWeight'
},
'secondaryReflectionRoughness': {
'node': 'aiSkin',
'attr_name': 'sheenRoughness'
},
# bump
'normalCamera': {
'node': 'aiSkin',
'attr_name': 'normalCamera'
},
# sss multiplier
'globalSssRadiusMultiplier': {
'node': 'aiSkin',
'attr_name': 'globalSssRadiusMultiplier'
},
}
all_skin_sss = pm.ls(type='aiSkinSss')
for skin_sss in all_skin_sss:
skin = pm.shadingNode('aiSkin', asShader=1)
standard = pm.shadingNode('aiStandard', asShader=1)
skin.attr('outColor') >> standard.attr('emissionColor')
standard.setAttr('emission', 1.0)
skin.setAttr('fresnelAffectSss',
0) # to match the previous behaviour
node_mapper = {
'aiSkin': skin,
'aiStandard': standard
}
for attr in attr_mapper.keys():
inputs = skin_sss.attr(attr).inputs(p=1, c=1)
if inputs:
# copy inputs
destination_attr_name = inputs[0][0].name().split('.')[-1]
source = inputs[0][1]
if destination_attr_name in attr_mapper:
node = attr_mapper[destination_attr_name]['node']
attr_name = attr_mapper[destination_attr_name][
'attr_name']
source >> node_mapper[node].attr(attr_name)
else:
source >> skin.attr(destination_attr_name)
else:
# copy values
node = node_mapper[attr_mapper[attr]['node']]
attr_name = attr_mapper[attr]['attr_name']
multiplier = attr_mapper[attr].get('multiplier', 1.0)
attr_value = skin_sss.getAttr(attr)
if isinstance(attr_value, tuple):
attr_value = map(lambda x: x * multiplier, attr_value)
else:
attr_value *= multiplier
node.attr(attr_name).set(attr_value)
# after everything is set up
# connect the aiStandard to the shadingEngine
for source, dest in skin_sss.outputs(p=1, c=1):
standard.attr('outColor') >> dest
# and rename the materials
orig_name = skin_sss.name()
# delete the skinSSS node
pm.delete(skin_sss)
skin_name = orig_name
standard_name = '%s_aiStandard' % orig_name
skin.rename(skin_name)
standard.rename(standard_name)
print('updated %s' % skin_name)
@classmethod
def normalize_sss_weights(cls):
"""normalizes the sss weights so their total weight is 1.0
if a aiStandard is assigned to the selected object it searches for an
aiSkin in the emission channel.
the script considers 0.7 as the highest diffuse value for aiStandard
"""
# get the shader of the selected object
assigned_shader = pm.ls(
pm.ls(sl=1)[0].getShape().outputs(type='shadingEngine')[0].inputs(),
mat=1
)[0]
if assigned_shader.type() == 'aiStandard':
sss_shader = assigned_shader.attr('emissionColor').inputs()[0]
diffuse_weight = assigned_shader.attr('Kd').get()
else:
sss_shader = assigned_shader
diffuse_weight = 0
def get_attr_or_texture(attr):
if attr.inputs():
# we probably have a texture assigned
# so use its multiply attribute
texture = attr.inputs()[0]
attr = texture.attr('multiply')
if isinstance(texture, pm.nt.AiImage):
attr = texture.attr('multiply')
elif isinstance(texture, pm.nt.File):
attr = texture.attr('colorGain')
return attr
shallow_attr = get_attr_or_texture(
sss_shader.attr('shallowScatterWeight')
)
mid_attr = get_attr_or_texture(sss_shader.attr('midScatterWeight'))
deep_attr = get_attr_or_texture(sss_shader.attr('deepScatterWeight'))
shallow_weight = shallow_attr.get()
if isinstance(shallow_weight, tuple):
shallow_weight = (
shallow_weight[0] + shallow_weight[1] + shallow_weight[2]
) / 3.0
mid_weight = mid_attr.get()
if isinstance(mid_weight, tuple):
mid_weight = (
mid_weight[0] + mid_weight[1] + mid_weight[2]
) / 3.0
deep_weight = deep_attr.get()
if isinstance(deep_weight, tuple):
deep_weight = (
deep_weight[0] + deep_weight[1] + deep_weight[2]
) / 3.0
total_sss_weight = shallow_weight + mid_weight + deep_weight
mult = (1 - diffuse_weight / 0.7) / total_sss_weight
try:
shallow_attr.set(shallow_weight * mult)
except RuntimeError:
w = shallow_weight * mult
shallow_attr.set(w, w, w)
try:
mid_attr.set(mid_weight * mult)
except RuntimeError:
w = mid_weight * mult
mid_attr.set(w, w, w)
try:
deep_attr.set(deep_weight * mult)
except RuntimeError:
w = deep_weight * mult
deep_attr.set(w, w, w)
@classmethod
def create_eye_shader_and_controls(cls):
"""This is pretty much specific to the way we are creating eye shaders
for characters in KKS project, but it is a useful trick, select the
inner eye objects before running
"""
eyes = pm.ls(sl=1)
if not eyes:
return
char = eyes[0].getAllParents()[-1]
place = pm.shadingNode('place2dTexture', asUtility=1)
emission_image = pm.shadingNode('aiImage', asTexture=1)
ks_image = pm.shadingNode('aiImage', asTexture=1)
texture_paths = {
'emission': '$REPO1977/KKS/Assets/Characters/Body_Parts/Textures/'
'char_eyeInner_light_v001.png',
'Ks': '$REPO1977/KKS/Assets/Characters/Body_Parts/Textures/'
'char_eyeInner_spec_v002.png',
}
emission_image.setAttr('filename', texture_paths['emission'])
ks_image.setAttr('filename', texture_paths['Ks'])
place.outUV >> emission_image.attr('uvcoords')
if not char.hasAttr('eyeLightStrength'):
char.addAttr('eyeLightStrength', at='double', min=0, dv=0.0, k=1)
else:
# set the default
char.attr('eyeLightStrength').set(0)
if not char.hasAttr('eyeLightAngle'):
char.addAttr("eyeLightAngle", at='double', dv=0, k=1)
if not char.hasAttr('eyeDiffuseWeight'):
char.addAttr(
"eyeDiffuseWeight", at='double', dv=0.15, k=1, min=0, max=1
)
if not char.hasAttr('eyeSpecularWeight'):
char.addAttr(
'eyeSpecularWeight', at='double', dv=1.0, k=1, min=0, max=1
)
if not char.hasAttr('eyeSSSWeight'):
char.addAttr(
'eyeSSSWeight', at='double', dv=0.5, k=1, min=0, max=1
)
# connect eye light strength
char.eyeLightStrength >> emission_image.attr('multiplyR')
char.eyeLightStrength >> emission_image.attr('multiplyG')
char.eyeLightStrength >> emission_image.attr('multiplyB')
# connect eye light angle
char.eyeLightAngle >> place.attr('rotateFrame')
# connect specular weight
char.eyeSpecularWeight >> ks_image.attr('multiplyR')
char.eyeSpecularWeight >> ks_image.attr('multiplyG')
char.eyeSpecularWeight >> ks_image.attr('multiplyB')
for eye in eyes:
shading_engine = eye.getShape().outputs(type='shadingEngine')[0]
shader = pm.ls(shading_engine.inputs(), mat=1)[0]
# connect the diffuse shader input to the emissionColor
diffuse_texture = shader.attr('color').inputs(p=1, s=1)[0]
diffuse_texture >> shader.attr('emissionColor')
emission_image.outColorR >> shader.attr('emission')
# also connect it to specular color
diffuse_texture >> shader.attr('KsColor')
# connect the Ks image to the specular weight
ks_image.outColorR >> shader.attr('Ks')
# also connect it to sss color
diffuse_texture >> shader.attr('KsssColor')
char.eyeDiffuseWeight >> shader.attr('Kd')
char.eyeSSSWeight >> shader.attr('Ksss')
# set some default values
shader.attr('diffuseRoughness').set(0)
shader.attr('Kb').set(0)
shader.attr('directDiffuse').set(1)
shader.attr('indirectDiffuse').set(1)
shader.attr('specularRoughness').set(0.4)
shader.attr('specularAnisotropy').set(0.5)
shader.attr('specularRotation').set(0)
shader.attr('specularFresnel').set(0)
shader.attr('Kr').set(0)
shader.attr('enableInternalReflections').set(0)
shader.attr('Kt').set(0)
shader.attr('transmittance').set([1, 1, 1])
shader.attr('opacity').set([1, 1, 1])
shader.attr('sssRadius').set([1, 1, 1])
pm.select(eyes)
@classmethod
def randomize_attr(cls, nodes, attr, min, max, pre=0.1):
"""Randomizes the given attributes of the given nodes
:param list nodes:
:param str attr:
:param float, int min:
:param float, int max:
:return:
"""
import random
import math
rand = random.random
floor = math.floor
for node in nodes:
r = rand() * float(max - min) + float(min)
r = floor(r / pre) * pre
node.setAttr(attr, r)
@classmethod
def randomize_light_color_temp(cls, min_field, max_field):
"""Randomizes the color temperature of selected lights
:param min:
:param max:
:return:
"""
min = pm.floatField(min_field, q=1, v=1)
max = pm.floatField(max_field, q=1, v=1)
cls.randomize_attr(
[node.getShape() for node in pm.ls(sl=1)],
'aiColorTemperature',
min,
max,
1
)
@classmethod
def randomize_light_intensity(cls, min_field, max_field):
"""Randomizes the intensities of selected lights
:param min:
:param max:
:return:
"""
min = pm.floatField(min_field, q=1, v=1)
max = pm.floatField(max_field, q=1, v=1)
cls.randomize_attr(
[node.getShape() for node in pm.ls(sl=1)],
'aiExposure',
min,
max,
0.1
)
@classmethod
def setup_outer_eye_render_attributes(cls):
"""sets outer eye render attributes for characters, select outer eye
objects and run this
"""
for node in pm.ls(sl=1):
shape = node.getShape()
shape.setAttr('castsShadows', 0)
shape.setAttr('visibleInReflections', 0)
shape.setAttr('visibleInRefractions', 0)
shape.setAttr('aiSelfShadows', 0)
shape.setAttr('aiOpaque', 0)
shape.setAttr('aiVisibleInDiffuse', 0)
shape.setAttr('aiVisibleInGlossy', 0)
@classmethod
def setup_window_glass_render_attributes(cls):
"""sets window glass render attributes for environments, select window
glass objects and run this
"""
shader_name = 'toolbox_glass_shader'
shaders = pm.ls('%s*' % shader_name)
selection = pm.ls(sl=1)
if len(shaders) > 0:
shader = shaders[0]
else:
shader = pm.shadingNode(
'aiStandard',
asShader=1,
name='%s#' % shader_name
)
shader.setAttr('Ks', 1)
shader.setAttr('specularRoughness', 0)
shader.setAttr('Kr', 0)
shader.setAttr('enableInternalReflections', 0)
shader.setAttr('Kt', 0)
shader.setAttr('KtColor', (0, 0, 0))
shape_attributes = [
('castsShadows', 0),
('visibleInReflections', 0),
('visibleInRefractions', 0),
('aiSelfShadows', 0),
('aiOpaque', 1),
('aiVisibleInDiffuse', 0),
('aiVisibleInGlossy', 0),
]
for node in selection:
shape = node.getShape()
map(lambda x: shape.setAttr(*x), shape_attributes)
if isinstance(shape, pm.nt.AiStandIn):
# get the glass shader or create one
shape.overrideShaders.set(1)
# assign it to the stand in
pm.select(node)
pm.hyperShade(assign=shader)
@classmethod
def dummy_window_light_plane(cls):
"""creates or updates the dummy window plane for the given area light
"""
area_light_list = pm.selected()
from anima.env.mayaEnv import auxiliary
reload(auxiliary)
for light in area_light_list:
dwl = auxiliary.DummyWindowLight()
dwl.light = light
dwl.update()
@classmethod
def setup_z_limiter(cls):
"""creates z limiter setup
"""
shader_name = 'z_limiter_shader#'
shaders = pm.ls('%s*' * shader_name)
if len(shaders) > 0:
shader = shaders[0]
else:
shader = pm.shadingNode(
'surfaceShader',
asShader=1,
name='%s#' % shader_name
)
@classmethod
def convert_file_node_to_ai_image_node(cls):
"""converts the file node to aiImage node
"""
default_values = {
'coverageU': 1,
'coverageV': 1,
'translateFrameU': 0,
'translateFrameV': 0,
'rotateFrame': 0,
'repeatU': 1,
'repeatV': 1,
'offsetU': 0,
'offsetV': 0,
'rotateUV': 0,
'noiseU': 0,
'noiseV': 0
}
for node in pm.ls(sl=1, type='file'):
node_name = node.name()
path = node.getAttr('fileTextureName')
ai_image = pm.shadingNode('aiImage', asTexture=1)
ai_image.setAttr('filename', path)
# check the placement node
placements = node.listHistory(type='place2dTexture')
if len(placements):
placement = placements[0]
# check default values
if any([placement.getAttr(attr_name) != default_values[attr_name] for attr_name in default_values]):
# connect the placement to the aiImage
placement.outUV >> ai_image.uvcoords
else:
# delete it
pm.delete(placement)
# connect the aiImage
for attr_out, attr_in in node.outputs(p=1, c=1):
attr_name = attr_out.name().split('.')[-1]
if attr_name == 'message':
continue
ai_image.attr(attr_name) >> attr_in
# delete the File node
pm.delete(node)
# rename the aiImage node
ai_image.rename(node_name)
@classmethod
def create_generic_tooth_shader(cls):
"""creates generic tooth shader for selected objects
"""
shader_name = 'toolbox_generic_tooth_shader#'
selection = pm.ls(sl=1)
shader_tree = {
'type': 'aiStandard',
'class': 'asShader',
'attr': {
'color': [1, 0.909, 0.815],
'Kd': 0.2,
'KsColor': [1, 1, 1],
'Ks': 0.5,
'specularRoughness': 0.10,
'specularFresnel': 1,
'Ksn': 0.05,
'enableInternalReflections': 0,
'KsssColor': [1, 1, 1],
'Ksss': 1,
'sssRadius': [1, 0.853, 0.68],
'normalCamera': {
'output': 'outNormal',
'type': 'bump2d',
'class': 'asTexture',
'attr': {
'bumpDepth': 0.05,
'bumpValue': {
'output': 'outValue',
'type': 'aiNoise',
'class': 'asUtility',
'attr': {
'scaleX': 4,
'scaleY': 0.250,
'scaleZ': 4,
}
}
}
}
}
}
shader = auxiliary.create_shader(shader_tree, shader_name)
for node in selection:
# assign it to the stand in
pm.select(node)
pm.hyperShade(assign=shader)
@classmethod
def create_generic_gum_shader(self):
"""set ups generic gum shader for selected objects
"""
shader_name = 'toolbox_generic_gum_shader#'
selection = pm.ls(sl=1)
shader_tree = {
'type': 'aiStandard',
'class': 'asShader',
'attr': {
'color': [0.993, 0.596, 0.612],
'Kd': 0.35,
'KsColor': [1, 1, 1],
'Ks': 0.010,
'specularRoughness': 0.2,
'enableInternalReflections': 0,
'KsssColor': [1, 0.6, 0.6],
'Ksss': 0.5,
'sssRadius': [0.5, 0.5, 0.5],
'normalCamera': {
'output': 'outNormal',
'type': 'bump2d',
'class': 'asTexture',
'attr': {
'bumpDepth': 0.1,
'bumpValue': {
'output': 'outValue',
'type': 'aiNoise',
'class': 'asUtility',
'attr': {
'scaleX': 4,
'scaleY': 1,
'scaleZ': 4,
}
}
}
}
}
}
shader = auxiliary.create_shader(shader_tree, shader_name)
for node in selection:
# assign it to the stand in
pm.select(node)
pm.hyperShade(assign=shader)
@classmethod
def create_generic_tongue_shader(self):
"""set ups generic tongue shader for selected objects
"""
shader_name = 'toolbox_generic_tongue_shader#'
selection = pm.ls(sl=1)
shader_tree = {
'type': 'aiStandard',
'class': 'asShader',
'attr': {
'color': [0.675, 0.174, 0.194],
'Kd': 0.35,
'KsColor': [1, 1, 1],
'Ks': 0.010,
'specularRoughness': 0.2,
'enableInternalReflections': 0,
'KsssColor': [1, 0.3, 0.3],
'Ksss': 0.5,
'sssRadius': [0.5, 0.5, 0.5],
'normalCamera': {
'output': 'outNormal',
'type': 'bump2d',
'class': 'asTexture',
'attr': {
'bumpDepth': 0.1,
'bumpValue': {
'output': 'outValue',
'type': 'aiNoise',
'class': 'asUtility',
'attr': {
'scaleX': 4,
'scaleY': 1,
'scaleZ': 4,
}
}
}
}
}
}
shader = auxiliary.create_shader(shader_tree, shader_name)
for node in selection:
# assign it to the stand in
pm.select(node)
pm.hyperShade(assign=shader)
@classmethod
def create_ea_matte(cls):
"""creates "ebesinin ami" matte shader with opacity for selected
objects.
It is called "EA Matte" for one reason, this matte is not necessary in
normal working conditions. That is you change the color and look of
some 3D element in 3D application and do an artistic grading at post to
the whole plate, not to individual elements in the render.
And because we are forced to create this matte layer, we thought that
we should give it a proper name.
"""
# get the selected objects
# for each object create a new surface shader with the opacity
# channel having the opacity of the original shader
# create a lut for objects that have the same material not to cause
# multiple materials to be created
daro = pm.PyNode('defaultArnoldRenderOptions')
attrs = {
'AASamples': 4,
'GIDiffuseSamples': 0,
'GIGlossySamples': 0,
'GIRefractionSamples': 0,
'sssBssrdfSamples': 0,
'volumeIndirectSamples': 0,
'GITotalDepth': 0,
'GIDiffuseDepth': 0,
'GIGlossyDepth': 0,
'GIReflectionDepth': 0,
'GIRefractionDepth': 0,
'GIVolumeDepth': 0,
'ignoreTextures': 1,
'ignoreAtmosphere': 1,
'ignoreLights': 1,
'ignoreShadows': 1,
'ignoreBump': 1,
'ignoreSss': 1,
}
for attr in attrs:
pm.editRenderLayerAdjustment(daro.attr(attr))
daro.setAttr(attr, attrs[attr])
try:
aov_z = pm.PyNode('aiAOV_Z')
pm.editRenderLayerAdjustment(aov_z.attr('enabled'))
aov_z.setAttr('enabled', 0)
except pm.MayaNodeError:
pass
try:
aov_mv = pm.PyNode('aiAOV_motionvector')
pm.editRenderLayerAdjustment(aov_mv.attr('enabled'))
aov_mv.setAttr('enabled', 0)
except pm.MayaNodeError:
pass
dad = pm.PyNode('defaultArnoldDriver')
pm.editRenderLayerAdjustment(dad.attr('autocrop'))
dad.setAttr('autocrop', 0)
@classmethod
def create_z_layer(cls):
"""creates z layer with arnold render settings
"""
daro = pm.PyNode('defaultArnoldRenderOptions')
attrs = {
'AASamples': 4,
'GIDiffuseSamples': 0,
'GIGlossySamples': 0,
'GIRefractionSamples': 0,
'sssBssrdfSamples': 0,
'volumeIndirectSamples': 0,
'GITotalDepth': 0,
'GIDiffuseDepth': 0,
'GIGlossyDepth': 0,
'GIReflectionDepth': 0,
'GIRefractionDepth': 0,
'GIVolumeDepth': 0,
'ignoreShaders': 1,
'ignoreAtmosphere': 1,
'ignoreLights': 1,
'ignoreShadows': 1,
'ignoreBump': 1,
'ignoreNormalSmoothing': 1,
'ignoreDof': 1,
'ignoreSss': 1,
}
for attr in attrs:
pm.editRenderLayerAdjustment(daro.attr(attr))
daro.setAttr(attr, attrs[attr])
try:
aov_z = pm.PyNode('aiAOV_Z')
pm.editRenderLayerAdjustment(aov_z.attr('enabled'))
aov_z.setAttr('enabled', 1)
except pm.MayaNodeError:
pass
try:
aov_mv = pm.PyNode('aiAOV_motionvector')
pm.editRenderLayerAdjustment(aov_mv.attr('enabled'))
aov_mv.setAttr('enabled', 1)
except pm.MayaNodeError:
pass
dad = pm.PyNode('defaultArnoldDriver')
pm.editRenderLayerAdjustment(dad.attr('autocrop'))
dad.setAttr('autocrop', 1)
@classmethod
def generate_reflection_curve(self):
"""Generates a curve which helps creating specular at the desired point
"""
from maya.OpenMaya import MVector
from anima.env.mayaEnv import auxiliary
vtx = pm.ls(sl=1)[0]
normal = vtx.getNormal(space='world')
panel = auxiliary.Playblaster.get_active_panel()
camera = pm.PyNode(pm.modelPanel(panel, q=1, cam=1))
camera_axis = MVector(0, 0, -1) * camera.worldMatrix.get()
refl = camera_axis - 2 * normal.dot(camera_axis) * normal
# create a new curve
p1 = vtx.getPosition(space='world')
p2 = p1 + refl
curve = pm.curve(d=1, p=[p1, p2])
# move pivot to the first point
pm.xform(curve, rp=p1, sp=p1)
@classmethod
def import_gpu_content(self):
"""imports the selected GPU content
"""
import os
imported_nodes = []
for node in pm.ls(sl=1):
gpu_node = node.getShape()
gpu_path = gpu_node.getAttr('cacheFileName')
new_nodes = pm.mel.eval(
'AbcImport -mode import -reparent "%s" "%s";' % (node.fullPath(), os.path.expandvars(gpu_path))
)
# get imported nodes
new_nodes = node.getChildren()
new_nodes.remove(gpu_node)
imported_node = None
# filter material node
for n in new_nodes:
if n.name() != 'materials':
imported_node = n
else:
pm.delete(n)
if imported_node:
imported_node.t.set(0, 0, 0)
imported_node.r.set(0, 0, 0)
imported_node.s.set(1, 1, 1)
pm.parent(imported_node, world=1)
imported_nodes.append(imported_node)
pm.select(imported_nodes)
@classmethod
def render_slicer(self):
"""A tool for slicing big render scenes
:return:
"""
from anima.env.mayaEnv import render_slicer
rs_UI = render_slicer.UI()
@classmethod
def move_cache_files_wrapper(cls, source_driver_field, target_driver_field):
"""Wrapper for move_cache_files() command
:param source_driver_field: Text field for source driver
:param target_driver_field: Text field for target driver
:return:
"""
source_driver = source_driver_field.text()
target_driver = target_driver_field.text()
Render.move_cache_files(
source_driver,
target_driver
)
@classmethod
def move_cache_files(cls, source_driver, target_driver):
"""moves the selected cache files to another location
:param source_driver:
:param target_driver:
:return:
"""
#
# Move fur caches to new server
#
import os
import shutil
import glob
# from maya import OpenMayaUI
#
# try:
# from shiboken import wrapInstance
# except ImportError:
# from shiboken2 import wrapInstance
#
# from anima.ui import progress_dialog
#
# maya_main_window = wrapInstance(
# long(OpenMayaUI.MQtUtil.mainWindow()),
# progress_dialog.QtWidgets.QWidget
# )
#
from anima.env.mayaEnv import MayaMainProgressBarWrapper
wrp = MayaMainProgressBarWrapper()
pdm = ProgressDialogManager(dialog=wrp)
selected_nodes = pm.ls(sl=1)
caller = pdm.register(len(selected_nodes), title='Moving Cache Files')
for node in selected_nodes:
ass_node = node.getShape()
if not isinstance(ass_node, (pm.nt.AiStandIn, pm.nt.AiVolume)):
continue
if isinstance(ass_node, pm.nt.AiStandIn):
ass_path = ass_node.dso.get()
elif isinstance(ass_node, pm.nt.AiVolume):
ass_path = ass_node.filename.get()
ass_path = os.path.normpath(
os.path.expandvars(ass_path)
)
# give info to user
caller.title = 'Moving: %s' % ass_path
# check if it is in the source location
if source_driver not in ass_path:
continue
# check if it contains .ass.gz in its path
if isinstance(ass_node, pm.nt.AiStandIn):
if '.ass.gz' not in ass_path:
continue
elif isinstance(ass_node, pm.nt.AiVolume):
if '.vdb' not in ass_path:
continue
# get the dirname
ass_source_dir = os.path.dirname(ass_path)
ass_target_dir = ass_source_dir.replace(source_driver, target_driver)
# create the intermediate folders at destination
try:
os.makedirs(
ass_target_dir
)
except OSError:
# dir already exists
pass
# get all files list
pattern = re.subn(r'[#]+', '*', ass_path)[0].replace('.ass.gz', '.ass*')
all_cache_files = glob.glob(pattern)
inner_caller = pdm.register(len(all_cache_files))
for source_f in all_cache_files:
target_f = source_f.replace(source_driver, target_driver)
# move files to new location
shutil.move(source_f, target_f)
inner_caller.step(message='Moving: %s' % source_f)
inner_caller.end_progress()
# finally update DSO path
if isinstance(ass_node, pm.nt.AiStandIn):
ass_node.dso.set(ass_path.replace(source_driver, target_driver))
elif isinstance(ass_node, pm.nt.AiVolume):
ass_node.filename.set(
ass_path.replace(source_driver, target_driver)
)
caller.step()
caller.end_progress()
@classmethod
def generate_rs_from_selection(cls, per_selection=False):
"""generates a temp rs file from selected nodes and hides the selected
nodes
:param bool per_selection: Generates one rs file per selected objects
if True. Default is False.
"""
import os
import tempfile
import shutil
from anima.env.mayaEnv import auxiliary
from anima.env import mayaEnv
m = mayaEnv.Maya()
v = m.get_current_version()
nodes = pm.ls(sl=1)
temp_rs_proxies_grp = None
if pm.ls('temp_rs_proxies_grp'):
temp_rs_proxies_grp = pm.ls('temp_rs_proxies_grp')[0]
else:
temp_rs_proxies_grp = pm.nt.Transform(name='temp_rs_proxies_grp')
rs_output_folder_path = os.path.join(
v.absolute_path,
'Outputs/rs'
).replace('\\', '/')
try:
os.makedirs(rs_output_folder_path)
except OSError:
pass
def _generate_rs():
export_command = 'rsProxy -fp "%(path)s" -c -z -sl;'
temp_rs_full_path = tempfile.mktemp(suffix='.rs')
rs_full_path = os.path.join(
rs_output_folder_path,
os.path.basename(temp_rs_full_path)
).replace('\\', '/')
pm.mel.eval(
export_command % {
'path': temp_rs_full_path.replace('\\', '/')
}
)
shutil.move(
temp_rs_full_path,
rs_full_path
)
[n.v.set(0) for n in pm.ls(sl=1)]
rs_proxy_node, rs_proxy_mesh = auxiliary.create_rs_proxy_node(
path=rs_full_path)
rs_proxy_tra = rs_proxy_mesh.getParent()
rs_proxy_tra.rename('temp_rs_proxy#')
pm.parent(rs_proxy_tra, temp_rs_proxies_grp)
if per_selection:
for node in nodes:
pm.select(node)
_generate_rs()
else:
pm.select(nodes)
_generate_rs()
* [Maya] Updated ``toolbox`` and ``render.Render.assign_substance_textures`` utility to also support ``BaseColor`` for Diffuse textures.
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2019, Erkan Ozgur Yilmaz
#
# This module is part of anima and is released under the MIT
# License: http://www.opensource.org/licenses/MIT
import re
from anima.env.mayaEnv import auxiliary
from anima.ui.progress_dialog import ProgressDialogManager
from maya import cmds as cmds, mel as mel
from pymel import core as pm
class Render(object):
"""Tools for render
"""
rso_options = {
'bake': {
# motion blur settings
'motionBlurEnable': 1,
'motionBlurDeformationEnable': 1,
'motionBlurNumTransformationSteps': 31,
'motionBlurFrameDuration': 100,
'motionBlurShutterStart': 0,
'motionBlurShutterEnd': 1,
'motionBlurShutterPosition': 1,
# set GI Engines
'primaryGIEngine': 3,
'secondaryGIEngine': 2,
# set file paths
'irradiancePointCloudMode': 2, # Rebuild (prepass only)
'irradianceCacheMode': 2, # Rebuild (prepass only)
'irradiancePointCloudFilename': 'Outputs/rs/ipc_baked.rsmap',
'irradianceCacheFilename': 'Outputs/rs/im_baked.rsmap'
},
'orig': {},
'current_frame': 1
}
@classmethod
def assign_random_material_color(cls):
"""assigns a lambert with a random color to the selected object
"""
selected = pm.selected()
# create the lambert material
lambert = pm.shadingNode('lambert', asShader=1)
# create the shading engine
shading_engine = pm.nt.ShadingEngine()
lambert.outColor >> shading_engine.surfaceShader
# randomize the lambert color
import random
h = random.random() # 0-1
s = random.random() * 0.5 + 0.25 # 0.25-0.75
v = random.random() * 0.5 + 0.5 # 0.5 - 1
from anima.utils import hsv_to_rgb
r, g, b = hsv_to_rgb(h, s, v)
lambert.color.set(r, g, b)
pm.sets(shading_engine, fe=selected)
pm.select(selected)
@classmethod
def randomize_material_color(cls):
"""randomizes material color of selected nodes
"""
selected = pm.selected()
all_materials = []
for node in selected:
shading_engines = node.listHistory(f=1, type='shadingEngine')
if not shading_engines:
continue
shading_engine = shading_engines[0]
materials = shading_engine.surfaceShader.inputs()
if not materials:
continue
else:
for material in materials:
if material not in all_materials:
all_materials.append(material)
import random
from anima.utils import hsv_to_rgb
attr_lut = {
'lambert': 'color',
}
for material in all_materials:
h = random.random() # 0-1
s = random.random() * 0.5 + 0.25 # 0.25-0.75
v = random.random() * 0.5 + 0.5 # 0.5 - 1
r, g, b = hsv_to_rgb(h, s, v)
attr_name = attr_lut[material.type()]
material.attr(attr_name).set(r, g, b)
@classmethod
def vertigo_setup_look_at(cls):
"""sets up a the necessary locator for teh Vertigo effect for the
selected camera
"""
from anima.env.mayaEnv import vertigo
cam = pm.ls(sl=1)[0]
vertigo.setup_look_at(cam)
@classmethod
def vertigo_setup_vertigo(cls):
"""sets up a Vertigo effect for the selected camera
"""
from anima.env.mayaEnv import vertigo
cam = pm.ls(sl=1)[0]
vertigo.setup_vertigo(cam)
@classmethod
def vertigo_delete(cls):
"""deletes the Vertigo setup for the selected camera
"""
from anima.env.mayaEnv import vertigo
cam = pm.ls(sl=1)[0]
vertigo.delete(cam)
@classmethod
def duplicate_with_connections(cls):
"""duplicates the selected nodes with connections to the network
"""
return pm.duplicate(ic=1, rr=1)
@classmethod
def duplicate_input_graph(cls):
"""duplicates the selected nodes with all their inputs
"""
return pm.duplicate(un=1, rr=1)
@classmethod
def delete_render_and_display_layers(cls):
"""Deletes the display and render layers in the current scene
"""
cls.delete_display_layers()
cls.delete_render_layers()
@classmethod
def delete_display_layers(cls):
"""Deletes the display layers in the current scene
"""
# switch to default render layer before deleting anything
# this will prevent layers to be non-deletable
from anima.env.mayaEnv import auxiliary
auxiliary.switch_to_default_render_layer()
pm.delete(pm.ls(type=['displayLayer']))
@classmethod
def delete_render_layers(cls):
"""Deletes the render layers in the current scene
"""
# switch to default render layer before deleting anything
# this will prevent layers to be non-deletable
from anima.env.mayaEnv import auxiliary
auxiliary.switch_to_default_render_layer()
pm.delete(pm.ls(type=['renderLayer']))
@classmethod
def delete_unused_shading_nodes(cls):
"""Deletes unused shading nodes
"""
pm.mel.eval('MLdeleteUnused')
@classmethod
def normalize_texture_paths(cls):
"""Expands the environment variables in texture paths
"""
import os
for node in pm.ls(type='file'):
if node.hasAttr('colorSpace'):
color_space = node.colorSpace.get()
node.fileTextureName.set(
os.path.expandvars(node.fileTextureName.get())
)
if node.hasAttr('colorSpace'):
node.colorSpace.set(color_space)
@classmethod
def unnormalize_texture_paths(cls):
"""Contracts the environment variables in texture paths bu adding
the repository environment variable to the file paths
"""
from anima.env import mayaEnv
m = mayaEnv.Maya()
m.replace_external_paths()
@classmethod
def assign_substance_textures(cls):
"""auto assigns textures to selected materials.
Supports both Arnold and Redshift materials
"""
#
# Substance Texture Assigner
#
# material_subfixes = {
# "BaseColor": {
# "aiStandardSurface": {
# "attr": "baseColor"
# },
# "RedshiftMaterial": {
# "attr": "diffuse_color"
# },
# },
# "Height": {},
# "Metalness": {
# "aiStandarSurface": {
# "attr": "metalness"
# }
# },
# "Normal": {
# "aiStandardSurface": {
# "tree": {
# "type": "aiBump2D",
# "class": "asUtility",
# "attr": {
# "bumpMap": {
# "output": "outColorR"
# "type": "aiImage",
# "attr": {
# "filename": "%TEXTUREFILE%"
# }
# }
# }
# "target": "normalCamera"
# }
# }
# },
# "Roughness": {
# "aiStandardSurface": {
# "attr": "specularRoughness"
# }
# }
# }
import glob
materials = pm.selected()
# ask the texture folder
texture_path = pm.fileDialog2(
cap="Choose Texture Folder", okc="Choose", fm=2
)[0]
for material in materials:
# textures should start with the same name of the material
material_name = material.name().split(':')[-1] # strip namespaces
print("material.name: %s" % material_name)
pattern = "%s/%s_*" % (texture_path, material_name)
print("pattern: %s" % pattern)
files = glob.glob(pattern)
print files
# TODO: Make it beautiful by using the auxiliary.create_shader()
# For now do it ugly!
if material.type() == "AiStandardSurface":
# *********************************************
# BaseColor
# create a new aiImage
base_color_file_path = glob.glob(
"%s/%s_BaseColor*" % (texture_path, material_name)
)
if base_color_file_path:
# fix diffuse weight
material.base.set(1)
base_color_file_path = base_color_file_path[0]
base_color_file = pm.shadingNode('file', asTexture=1)
base_color_file.fileTextureName.set(base_color_file_path)
base_color_file.colorSpace.set('sRGB')
base_color_file.outColor >> material.baseColor
# *********************************************
# Height
height_file_path = glob.glob("%s/%s_Height*" % (texture_path, material_name))
if height_file_path:
height_file_path = height_file_path[0]
# create a displacement node
shading_node = material.attr("outColor").outputs(type="shadingEngine")[0]
disp_shader = pm.shadingNode('displacementShader', asShader=1)
disp_shader.displacement >> shading_node.displacementShader
# create texture
disp_file = pm.shadingNode('file', asTexture=1)
disp_file.fileTextureName.set(height_file_path)
disp_file.colorSpace.set('Raw')
disp_file.alphaIsLuminance.set(1)
disp_file.outAlpha >> disp_shader.displacement
# *********************************************
# Metalness
metalness_file_path = glob.glob("%s/%s_Metalness*" % (texture_path, material_name))
if metalness_file_path:
metalness_file_path = metalness_file_path[0]
metalness_file = pm.shadingNode("file", asTexture=1)
metalness_file.fileTextureName.set(metalness_file_path)
metalness_file.colorSpace.set('Raw')
metalness_file.alphaIsLuminance.set(1)
metalness_file.outAlpha >> material.metalness
# *********************************************
# Normal
normal_file_path = glob.glob("%s/%s_Normal*" % (texture_path, material_name))
if normal_file_path:
normal_file_path = normal_file_path[0]
# normal_ai_bump2d = pm.nt.AiBump2d()
normal_ai_normalmap = pm.shadingNode("aiNormalMap", asUtility=1)
normal_file = pm.shadingNode("file", asTexture=1)
normal_file.fileTextureName.set(normal_file_path)
normal_file.colorSpace.set('Raw')
normal_file.outColor >> normal_ai_normalmap.input
normal_ai_normalmap.outValue >> material.normalCamera
# *********************************************
# Roughness
# specularRoughness
roughness_file_path = glob.glob("%s/%s_Roughness*" % (texture_path, material_name))
if roughness_file_path:
roughness_file_path = roughness_file_path[0]
roughness_file = pm.shadingNode("file", asTexture=1)
roughness_file.fileTextureName.set(roughness_file_path)
roughness_file.colorSpace.set('Raw')
roughness_file.alphaIsLuminance.set(1)
roughness_file.outAlpha >> material.specularRoughness
elif material.type() == "RedshiftMaterial":
# *********************************************
# BaseColor
# create a new aiImage
diffuse_color_file_path = glob.glob(
"%s/%s_Diffuse*" % (texture_path, material_name))
if diffuse_color_file_path:
diffuse_color_file_path = diffuse_color_file_path[0]
diffuse_color_file = pm.shadingNode('file', asTexture=1)
diffuse_color_file.fileTextureName.set(
diffuse_color_file_path
)
diffuse_color_file.colorSpace.set('sRGB')
diffuse_color_file.outColor >> material.diffuse_color
# Accept also BaseColor
# create a new aiImage
base_color_file_path = glob.glob(
"%s/%s_BaseColor*" % (texture_path, material_name))
if base_color_file_path:
base_color_file_path = base_color_file_path[0]
base_color_file = pm.shadingNode('file', asTexture=1)
base_color_file.fileTextureName.set(
base_color_file_path
)
base_color_file.colorSpace.set('sRGB')
base_color_file.outColor >> material.diffuse_color
# *********************************************
# Height
height_file_path = glob.glob(
"%s/%s_Height*" % (texture_path, material_name))
if height_file_path:
height_file_path = height_file_path[0]
# create a displacement node
shading_node = \
material.attr("outColor").outputs(type="shadingEngine")[0]
disp_shader = \
pm.shadingNode('displacementShader', asShader=1)
disp_shader.displacement >> shading_node.displacementShader
# create texture
disp_file = pm.shadingNode('file', asTexture=1)
disp_file.fileTextureName.set(height_file_path)
disp_file.colorSpace.set('Raw')
disp_file.alphaIsLuminance.set(1)
disp_file.outAlpha >> disp_shader.displacement
# *********************************************
# Metalness
# set material BRDF to GGX and set fresnel type to metalness
material.refl_brdf.set(1)
material.refl_fresnel_mode.set(2)
metalness_file_path = glob.glob(
"%s/%s_Metal*" % (texture_path, material_name))
if metalness_file_path:
metalness_file_path = metalness_file_path[0]
metalness_file = pm.shadingNode("file", asTexture=1)
metalness_file.fileTextureName.set(metalness_file_path)
metalness_file.colorSpace.set('Raw')
metalness_file.alphaIsLuminance.set(1)
metalness_file.outAlpha >> material.refl_metalness
# *********************************************
# Reflectivity
reflectivity_file_path = glob.glob(
"%s/%s_Reflectivity*" % (texture_path, material_name))
if reflectivity_file_path:
reflectivity_file_path = reflectivity_file_path[0]
reflectivity_file = pm.shadingNode("file", asTexture=1)
reflectivity_file.fileTextureName.set(
reflectivity_file_path
)
reflectivity_file.colorSpace.set('sRGB')
reflectivity_file.alphaIsLuminance.set(1)
reflectivity_file.outColor >> material.refl_reflectivity
# *********************************************
# Normal
normal_file_path = glob.glob(
"%s/%s_Normal*" % (texture_path, material_name))
if normal_file_path:
normal_file_path = normal_file_path[0]
rs_bump_map = \
pm.shadingNode("RedshiftBumpMap", asUtility=1)
# set to tangent-space normals
rs_bump_map.inputType.set(1)
normal_file = pm.shadingNode("file", asTexture=1)
normal_file.fileTextureName.set(normal_file_path)
normal_file.colorSpace.set('Raw')
normal_file.outColor >> rs_bump_map.input
rs_bump_map.out >> material.bump_input
rs_bump_map.scale.set(1)
# *********************************************
# Roughness
# specularRoughness
roughness_file_path = glob.glob(
"%s/%s_Roughness*" % (texture_path, material_name))
if roughness_file_path:
roughness_file_path = roughness_file_path[0]
roughness_file = pm.shadingNode("file", asTexture=1)
roughness_file.fileTextureName.set(roughness_file_path)
roughness_file.colorSpace.set('Raw')
roughness_file.alphaIsLuminance.set(1)
roughness_file.outAlpha >> material.refl_roughness
@classmethod
def redshift_ic_ipc_bake(cls):
"""Sets the render settings for IC + IPC bake
"""
# set motion blur
start_frame = int(pm.playbackOptions(q=True, ast=True))
end_frame = int(pm.playbackOptions(q=True, aet=True))
cls.rso_options['bake']['motionBlurFrameDuration'] = end_frame - start_frame + 1
rso = pm.PyNode('redshiftOptions')
# store and set attributes
for attr in cls.rso_options['bake']:
cls.rso_options['orig'][attr] = rso.attr(attr).get()
rso.attr(attr).set(cls.rso_options['bake'][attr])
# go to the first frame
current_frame = pm.currentTime(q=1)
cls.rso_options['current_frame'] = current_frame
pm.currentTime(start_frame)
# do a render
pm.mel.eval('rsRender -render -rv -cam "<renderview>";')
@classmethod
def redshift_ic_ipc_bake_restore(cls):
"""restores the previous render settings
"""
rso = pm.PyNode('redshiftOptions')
# revert settings back
for attr in cls.rso_options['orig']:
rso.attr(attr).set(cls.rso_options['orig'][attr])
# set the GI engines
rso.primaryGIEngine.set(cls.rso_options['bake']['primaryGIEngine'])
rso.secondaryGIEngine.set(cls.rso_options['bake']['secondaryGIEngine'])
# set the irradiance method to load
rso.irradiancePointCloudMode.set(1) # Load
rso.irradianceCacheMode.set(1) # Load
# set the cache paths
rso.irradiancePointCloudFilename.set(cls.rso_options['bake']['irradiancePointCloudFilename'])
rso.irradianceCacheFilename.set(cls.rso_options['bake']['irradianceCacheFilename'])
# go to current frame
current_frame = cls.rso_options['current_frame']
pm.currentTime(current_frame)
@classmethod
def update_render_settings(cls):
"""updates render settings for current renderer
"""
from anima.env import mayaEnv
m = mayaEnv.Maya()
v = m.get_current_version()
if v:
m.set_render_filename(version=v)
@classmethod
def afanasy_job_submitter(cls):
"""Opens the Afanasy job sumitter UI
"""
from anima.env.mayaEnv import afanasy
ui = afanasy.UI()
ui.show()
@classmethod
def auto_convert_to_redshift(cls):
"""converts the current scene to Redshift
"""
from anima.env.mayaEnv import ai2rs
cm = ai2rs.ConversionManager()
cm.auto_convert()
@classmethod
def convert_nodes_to_redshift(cls):
"""converts the selected nodes to Redshift
"""
from anima.env.mayaEnv import ai2rs
cm = ai2rs.ConversionManager()
for node in pm.selected():
cm.convert(node)
@classmethod
def standin_to_bbox(cls):
"""convert the selected stand-in nodes to bbox
"""
[node.mode.set(0) for node in pm.ls(sl=1) if isinstance(node.getShape(), pm.nt.AiStandIn)]
@classmethod
def standin_to_polywire(cls):
"""convert the selected stand-in nodes to bbox
"""
[node.mode.set(2) for node in pm.ls(sl=1) if isinstance(node.getShape(), pm.nt.AiStandIn)]
@classmethod
def add_miLabel(cls):
selection = pm.ls(sl=1)
for node in selection:
if node.type() == 'Transform':
if node.hasAttr('miLabel'):
pass
else:
pm.addAttr(node, ln='miLabel', at='long', keyable=True)
@classmethod
def connect_facingRatio_to_vCoord(cls):
selection = pm.ls(sl=1)
for i in range(1, len(selection)):
selection[0].facingRatio.connect((selection[i] + '.vCoord'),
force=True)
@classmethod
def set_shape_attribute(cls, attr_name, value, apply_to_hierarchy,
disable_undo_queue=False):
"""sets shape attributes
"""
undo_state = pm.undoInfo(q=1, st=1)
if disable_undo_queue:
pm.undoInfo(st=False)
supported_shapes = [
'aiStandIn',
'mesh',
'nurbsCurve'
]
attr_mapper = {
'castsShadows': 'overrideCastsShadows',
'receiveShadows': 'overrideReceiveShadows',
'primaryVisibility': 'overridePrimaryVisibility',
'visibleInReflections': 'overrideVisibleInReflections',
'visibleInRefractions': 'overrideVisibleInRefractions',
'doubleSided': 'overrideDoubleSided',
'aiSelfShadows': 'overrideSelfShadows',
'aiOpaque': 'overrideOpaque',
'aiVisibleInDiffuse': 'overrideVisibleInDiffuse',
'aiVisibleInGlossy': 'overrideVisibleInGlossy',
'aiMatte': 'overrideMatte',
}
pre_selection_list = pm.ls(sl=1)
if apply_to_hierarchy:
pm.select(hierarchy=1)
objects = pm.ls(sl=1, type=supported_shapes)
# get override_attr_name from dictionary
if attr_name in attr_mapper:
override_attr_name = attr_mapper[attr_name]
else:
override_attr_name = None
# register a caller
from anima.env.mayaEnv import MayaMainProgressBarWrapper
wrp = MayaMainProgressBarWrapper()
pdm = ProgressDialogManager(dialog=wrp)
pdm.use_ui = True if len(objects) > 3 else False
caller = pdm.register(len(objects), 'Setting Shape Attribute')
layers = pm.ls(type='renderLayer')
is_default_layer = \
layers[0].currentLayer() == layers[0].defaultRenderLayer()
if value != -1:
for item in objects:
attr_full_name = '%s.%s' % (item.name(), attr_name)
override_attr_full_name = '%s.%s' % (item.name(), override_attr_name)
caller.step(message=attr_full_name)
if not is_default_layer:
pm.editRenderLayerAdjustment(attr_full_name)
item.setAttr(attr_name, value)
# if there is an accompanying override attribute like it is
# found in aiStandIn node
# then also set override{Attr} to True
if override_attr_name \
and cmds.attributeQuery(override_attr_name, n=item.name(), ex=1):
if not is_default_layer:
pm.editRenderLayerAdjustment(
override_attr_full_name
)
item.setAttr(override_attr_name, True)
else:
for item in objects:
attr_full_name = '%s.%s' % (item.name(), attr_name)
override_attr_full_name = '%s.%s' % (item.name(), override_attr_name)
caller.step(message=attr_full_name)
# remove any overrides
if not is_default_layer:
pm.editRenderLayerAdjustment(
attr_full_name,
remove=1
)
if override_attr_name \
and cmds.attributeQuery(override_attr_name, n=item.name(), ex=1) \
and not is_default_layer:
pm.editRenderLayerAdjustment(
override_attr_full_name,
remove=1
)
# caller.end_progress()
pm.undoInfo(st=undo_state)
pm.select(pre_selection_list)
@classmethod
def set_finalGatherHide(cls, value):
"""sets the finalGatherHide to on or off for the given list of objects
"""
attr_name = "miFinalGatherHide"
objects = pm.ls(sl=1)
for obj in objects:
shape = obj
if isinstance(obj, pm.nt.Transform):
shape = obj.getShape()
if not isinstance(shape, (pm.nt.Mesh, pm.nt.NurbsSurface)):
continue
# add the attribute if it doesn't already exists
if not shape.hasAttr(attr_name):
pm.addAttr(shape, ln=attr_name, at="long", min=0, max=1, k=1)
obj.setAttr(attr_name, value)
@classmethod
def replace_shaders_with_last(cls):
"""Assigns the last shader selected to all the objects using the shaders
on the list
"""
sel_list = pm.ls(sl=1)
target_node = sel_list[-1]
for node in sel_list[:-1]:
pm.hyperShade(objects=node)
pm.hyperShade(assign=target_node)
pm.select(None)
@classmethod
def create_texture_ref_object(cls):
selection = pm.ls(sl=1)
for obj in selection:
pm.select(obj)
pm.runtime.CreateTextureReferenceObject()
pm.select(selection)
@classmethod
def use_mib_texture_filter_lookup(cls):
"""Adds texture filter lookup node to the selected file texture nodes for
better texture filtering.
The function is smart enough to use the existing nodes, if there is a
connection from the selected file nodes to a mib_texture_filter_lookup node
then it will not create any new node and just use the existing ones.
It will also not create any place2dTexture nodes if the file node doesn't
have a place2dTexture node but is connected to a filter lookup node which
already has a connection to a place2dTexture node.
"""
file_nodes = pm.ls(sl=1, type="file")
for file_node in file_nodes:
# set the filter type to none
file_node.filterType.set(0)
# check if it is already connected to a mib_texture_filter_lookup node
message_outputs = \
file_node.message.outputs(type="mib_texture_filter_lookup")
if len(message_outputs):
# use the first one
mib_texture_filter_lookup = message_outputs[0]
else:
# create a texture filter lookup node
mib_texture_filter_lookup = \
pm.createNode("mib_texture_filter_lookup")
# do the connection
file_node.message >> mib_texture_filter_lookup.tex
# check if the mib_texture_filter_lookup has any connection to a
# placement node
mib_t_f_l_to_placement = \
mib_texture_filter_lookup.inputs(type="place2dTexture")
placement_node = None
if len(mib_t_f_l_to_placement):
# do nothing
placement_node = mib_t_f_l_to_placement[0].node()
else:
# get the texture placement
placement_connections = \
file_node.inputs(type="place2dTexture", p=1, c=1)
# if there is no placement create one
placement_node = None
if len(placement_connections):
placement_node = placement_connections[0][1].node()
# disconnect connections from placement to file node
for conn in placement_connections:
conn[1] // conn[0]
else:
placement_node = pm.createNode("place2dTexture")
# connect placement to mr_texture_filter_lookup
placement_node.outU >> mib_texture_filter_lookup.coordX
placement_node.outV >> mib_texture_filter_lookup.coordY
# connect color
for output in file_node.outColor.outputs(p=1):
mib_texture_filter_lookup.outValue >> output
# connect alpha
for output in file_node.outAlpha.outputs(p=1):
mib_texture_filter_lookup.outValueA >> output
@classmethod
def convert_to_linear(cls):
"""adds a gamma_gain node in between the selected nodes outputs to make the
result linear
"""
#
# convert to linear
#
selection = pm.ls(sl=1)
for file_node in selection:
# get the connections
outputs = file_node.outputs(plugs=True)
if not len(outputs):
continue
# and insert a mip_gamma_gain
gamma_node = pm.createNode('mip_gamma_gain')
gamma_node.setAttr('gamma', 2.2)
gamma_node.setAttr('reverse', True)
# connect the file_node to gamma_node
try:
file_node.outValue >> gamma_node.input
file_node.outValueA >> gamma_node.inputA
except AttributeError:
file_node.outColor >> gamma_node.input
# do all the connections from the output of the gamma
for output in outputs:
try:
gamma_node.outValue >> output
except RuntimeError:
gamma_node.outValueA >> output
pm.select(selection)
@classmethod
def use_image_sequence(cls):
"""creates an expression to make the mentalrayTexture node also able to read
image sequences
Select your mentalrayTexture nodes and then run the script.
The filename should use the file.%nd.ext format
"""
textures = pm.ls(sl=1, type="mentalrayTexture")
for texture in textures:
# get the filename
filename = texture.getAttr("fileTextureName")
splits = filename.split(".")
if len(splits) == 3:
base = ".".join(splits[0:-2]) + "."
pad = len(splits[-2])
extension = "." + splits[-1]
expr = 'string $padded_frame = python("\'%0' + str(pad) + \
'd\'%" + string(frame));\n' + \
'string $filename = "' + base + '" + \
$padded_frame + ".tga";\n' + \
'setAttr -type "string" ' + texture.name() + \
'.fileTextureName $filename;\n'
# create the expression
pm.expression(s=expr)
@classmethod
def add_to_selected_container(cls):
selection = pm.ls(sl=1)
conList = pm.ls(sl=1, con=1)
objList = list(set(selection) - set(conList))
if len(conList) == 0:
pm.container(addNode=selection)
elif len(conList) == 1:
pm.container(conList, edit=True, addNode=objList)
else:
length = len(conList) - 1
for i in range(0, length):
containerList = conList[i]
pm.container(conList[-1], edit=True, f=True,
addNode=containerList)
pm.container(conList[-1], edit=True, f=True, addNode=objList)
@classmethod
def remove_from_container(cls):
selection = pm.ls(sl=1)
for i in range(0, len(selection)):
con = pm.container(q=True, fc=selection[i])
pm.container(con, edit=True, removeNode=selection[i])
@classmethod
def reload_file_textures(cls):
fileList = pm.ls(type="file")
for fileNode in fileList:
mel.eval('AEfileTextureReloadCmd(%s.fileTextureName)' % fileNode)
@classmethod
def transfer_shaders(cls):
"""transfer shaders between selected objects. It can search for
hierarchies both in source and target sides.
"""
selection = pm.ls(sl=1)
pm.select(None)
source = selection[0]
target = selection[1]
# auxiliary.transfer_shaders(source, target)
# pm.select(selection)
# check if they are direct parents of mesh or nurbs shapes
source_shape = source.getShape()
target_shape = target.getShape()
if source_shape and target_shape:
# do a direct assignment from source to target
shading_engines = source_shape.outputs(type=pm.nt.ShadingEngine)
pm.sets(shading_engines[0], fe=target)
pm.select(selection)
return
lut = auxiliary.match_hierarchy(source, target)
attr_names = [
'castsShadows',
'receiveShadows',
'motionBlur',
'primaryVisibility',
'smoothShading',
'visibleInReflections',
'visibleInRefractions',
'doubleSided',
'opposite',
'aiSelfShadows',
'aiOpaque',
'aiVisibleInDiffuse',
'aiVisibleInGlossy',
'aiExportTangents',
'aiExportColors',
'aiExportRefPoints',
'aiExportRefNormals',
'aiExportRefTangents',
'color',
'interpolation',
'aiTranslator',
'intensity',
'aiExposure',
'aiColorTemperature',
'emitDiffuse',
'emitSpecular',
'aiDecayType',
'lightVisible',
'aiSamples',
'aiNormalize',
'aiCastShadows',
'aiShadowDensity',
'aiShadowColor',
'aiAffectVolumetrics',
'aiCastVolumetricShadows',
'aiVolumeSamples',
'aiDiffuse',
'aiSpecular',
'aiSss',
'aiIndirect',
'aiMaxBounces',
'aiSubdivType',
'aiSubdivIterations',
'aiSubdivAdaptiveMetric',
'aiSubdivPixelError',
'aiSubdivUvSmoothing',
'aiSubdivSmoothDerivs',
'aiDispHeight',
'aiDispPadding',
'aiDispZeroValue',
'aiDispAutobump',
'aiStepSize',
'rsEnableSubdivision',
'rsSubdivisionRule',
'rsScreenSpaceAdaptive',
'rsDoSmoothSubdivision',
'rsMinTessellationLength',
'rsMaxTessellationSubdivs',
'rsOutOfFrustumTessellationFactor',
'rsLimitOutOfFrustumTessellation',
'rsMaxOutOfFrustumTessellationSubdivs',
'rsEnableDisplacement',
'rsMaxDisplacement',
'rsDisplacementScale',
'rsAutoBumpMap',
'rsObjectId',
]
# from anima.ui import progress_dialog
# from anima.env.mayaEnv import MayaMainProgressBarWrapper
# wrp = MayaMainProgressBarWrapper()
# pdm = progress_dialog.ProgressDialogManager(dialog=wrp)
# caller = pdm.register(2, title='Transferring materials')
for source_node, target_node in lut['match']:
auxiliary.transfer_shaders(source_node, target_node)
# also transfer render attributes
for attr_name in attr_names:
try:
target_node.setAttr(
attr_name,
source_node.getAttr(attr_name)
)
except (pm.MayaAttributeError, RuntimeError):
pass
# input connections to attributes
try:
for plug in source_node.attr(attr_name).inputs(p=1):
plug >> target_node.attr(attr_name)
except pm.MayaAttributeError:
pass
# caller.step()
# caller.end_progress()
if len(lut['no_match']):
pm.select(lut['no_match'])
print(
'The following nodes has no corresponding source:\n%s' % (
'\n'.join(
[node.name() for node in lut['no_match']]
)
)
)
@classmethod
def fit_placement_to_UV(cls):
selection = pm.ls(sl=1, fl=1)
uvs = [n for n in selection if isinstance(n, pm.general.MeshUV)]
placements = \
[p for p in selection if isinstance(p, pm.nt.Place2dTexture)]
minU = 1000
minV = 1000
maxU = -1000
maxV = -1000
for uv in uvs:
uvCoord = pm.polyEditUV(uv, q=1)
if uvCoord[0] > maxU:
maxU = uvCoord[0]
if uvCoord[0] < minU:
minU = uvCoord[0]
if uvCoord[1] > maxV:
maxV = uvCoord[1]
if uvCoord[1] < minV:
minV = uvCoord[1]
for p in placements:
p.setAttr('coverage', (maxU - minU, maxV - minV))
p.setAttr('translateFrame', (minU, minV))
@classmethod
def connect_placement2d_to_file(cls):
"""connects the selected placement node to the selected file textures
"""
attr_lut = [
'coverage',
'translateFrame',
'rotateFrame',
'mirrorU',
'mirrorV',
'stagger',
'wrapU',
'wrapV',
'repeatUV',
'offset',
'rotateUV',
'noiseUV',
'vertexUvOne',
'vertexUvTwo',
'vertexUvThree',
'vertexCameraOne',
('outUV', 'uvCoord'),
('outUvFilterSize', 'uvFilterSize')
]
# get placement and file nodes
placement_node = pm.ls(sl=1, type=pm.nt.Place2dTexture)[0]
file_nodes = pm.ls(sl=1, type=pm.nt.File)
for file_node in file_nodes:
for attr in attr_lut:
if isinstance(attr, str):
source_attr_name = attr
target_attr_name = attr
elif isinstance(attr, tuple):
source_attr_name = attr[0]
target_attr_name = attr[1]
placement_node.attr(source_attr_name) >> \
file_node.attr(target_attr_name)
@classmethod
def open_node_in_browser(cls):
# get selected nodes
node_attrs = {
'file': 'fileTextureName',
'aiImage': 'filename',
'aiStandIn': 'dso',
}
import os
from anima.utils import open_browser_in_location
for node in pm.ls(sl=1):
type_ = pm.objectType(node)
# special case: if transform use shape
if type_ == 'transform':
node = node.getShape()
type_ = pm.objectType(node)
attr_name = node_attrs.get(type_)
if attr_name:
# if any how it contains a "#" character use the path
path = node.getAttr(attr_name)
if "#" in path:
path = os.path.dirname(path)
open_browser_in_location(path)
@classmethod
def enable_matte(cls, color=0):
"""enables matte on selected objects
"""
#
# Enable Matte on Selected Objects
#
colors = [
[0, 0, 0, 0], # Not Visible
[1, 0, 0, 0], # Red
[0, 1, 0, 0], # Green
[0, 0, 1, 0], # Blue
[0, 0, 0, 1], # Alpha
]
arnold_shaders = (
pm.nt.AiStandard, pm.nt.AiHair, pm.nt.AiSkin, pm.nt.AiUtility
)
for node in pm.ls(sl=1, dag=1, type=[pm.nt.Mesh, pm.nt.NurbsSurface,
'aiStandIn']):
obj = node
#if isinstance(node, pm.nt.Mesh):
# obj = node
#elif isinstance(node, pm.nt.Transform):
# obj = node.getShape()
shading_nodes = pm.listConnections(obj, type='shadingEngine')
for shadingNode in shading_nodes:
shader = shadingNode.attr('surfaceShader').connections()[0]
if isinstance(shader, arnold_shaders):
try:
pm.editRenderLayerAdjustment(shader.attr("aiEnableMatte"))
pm.editRenderLayerAdjustment(shader.attr("aiMatteColor"))
pm.editRenderLayerAdjustment(shader.attr("aiMatteColorA"))
shader.attr("aiEnableMatte").set(1)
shader.attr("aiMatteColor").set(colors[color][0:3], type='double3')
shader.attr("aiMatteColorA").set(colors[color][3])
except RuntimeError as e:
# there is some connections
print(str(e))
@classmethod
def enable_subdiv(cls, fixed_tes=False, max_subdiv=3):
"""enables subdiv on selected objects
:param fixed_tes: Uses fixed tessellation.
:param max_subdiv: The max subdivision iteration. Default 3.
"""
#
# Set SubDiv to CatClark on Selected nodes
#
for node in pm.ls(sl=1):
shape = node.getShape()
try:
shape.aiSubdivIterations.set(max_subdiv)
shape.aiSubdivType.set(1)
shape.aiSubdivPixelError.set(0)
except AttributeError:
pass
try:
shape.rsEnableSubdivision.set(1)
shape.rsMaxTessellationSubdivs.set(max_subdiv)
if not fixed_tes:
shape.rsLimitOutOfFrustumTessellation.set(1)
shape.rsMaxOutOfFrustumTessellationSubdivs.set(1)
else:
shape.rsScreenSpaceAdaptive.set(0)
shape.rsMinTessellationLength.set(0)
except AttributeError:
pass
@classmethod
def barndoor_simulator_setup(cls):
"""creates a barndoor simulator
"""
bs = auxiliary.BarnDoorSimulator()
bs.light = pm.ls(sl=1)[0]
bs.setup()
@classmethod
def barndoor_simulator_unsetup(cls):
"""removes the barndoor simulator
"""
bs = auxiliary.BarnDoorSimulator()
for light in pm.ls(sl=1):
light_shape = light.getShape()
if isinstance(light_shape, pm.nt.Light):
bs.light = light
bs.unsetup()
@classmethod
def fix_barndoors(cls):
"""fixes the barndoors on scene lights created in MtoA 1.0 to match the
new behaviour of barndoors in MtoA 1.1
"""
for light in pm.ls(type='spotLight'):
# calculate scale
cone_angle = light.getAttr('coneAngle')
penumbra_angle = light.getAttr('penumbraAngle')
if penumbra_angle < 0:
light.setAttr(
'coneAngle',
max(cone_angle + penumbra_angle, 0.1)
)
else:
light.setAttr(
'coneAngle',
max(cone_angle - penumbra_angle, 0.1)
)
@classmethod
def convert_aiSkinSSS_to_aiSkin(cls):
"""converts aiSkinSSS nodes in the current scene to aiSkin + aiStandard
nodes automatically
"""
attr_mapper = {
# diffuse
'color': {
'node': 'aiStandard',
'attr_name': 'color'
},
'diffuseWeight': {
'node': 'aiStandard',
'attr_name': 'Kd',
'multiplier': 0.7
},
'diffuseRoughness': {
'node': 'aiStandard',
'attr_name': 'diffuseRoughness'
},
# sss
'sssWeight': {
'node': 'aiSkin',
'attr_name': 'sssWeight'
},
# shallowScatter
'shallowScatterColor': {
'node': 'aiSkin',
'attr_name': 'shallowScatterColor',
},
'shallowScatterWeight': {
'node': 'aiSkin',
'attr_name': 'shallowScatterWeight'
},
'shallowScatterRadius': {
'node': 'aiSkin',
'attr_name': 'shallowScatterRadius'
},
# midScatter
'midScatterColor': {
'node': 'aiSkin',
'attr_name': 'midScatterColor',
},
'midScatterWeight': {
'node': 'aiSkin',
'attr_name': 'midScatterWeight'
},
'midScatterRadius': {
'node': 'aiSkin',
'attr_name': 'midScatterRadius'
},
# deepScatter
'deepScatterColor': {
'node': 'aiSkin',
'attr_name': 'deepScatterColor',
},
'deepScatterWeight': {
'node': 'aiSkin',
'attr_name': 'deepScatterWeight'
},
'deepScatterRadius': {
'node': 'aiSkin',
'attr_name': 'deepScatterRadius'
},
# primaryReflection
'primaryReflectionColor': {
'node': 'aiSkin',
'attr_name': 'specularColor'
},
'primaryReflectionWeight': {
'node': 'aiSkin',
'attr_name': 'specularWeight'
},
'primaryReflectionRoughness': {
'node': 'aiSkin',
'attr_name': 'specularRoughness'
},
# secondaryReflection
'secondaryReflectionColor': {
'node': 'aiSkin',
'attr_name': 'sheenColor'
},
'secondaryReflectionWeight': {
'node': 'aiSkin',
'attr_name': 'sheenWeight'
},
'secondaryReflectionRoughness': {
'node': 'aiSkin',
'attr_name': 'sheenRoughness'
},
# bump
'normalCamera': {
'node': 'aiSkin',
'attr_name': 'normalCamera'
},
# sss multiplier
'globalSssRadiusMultiplier': {
'node': 'aiSkin',
'attr_name': 'globalSssRadiusMultiplier'
},
}
all_skin_sss = pm.ls(type='aiSkinSss')
for skin_sss in all_skin_sss:
skin = pm.shadingNode('aiSkin', asShader=1)
standard = pm.shadingNode('aiStandard', asShader=1)
skin.attr('outColor') >> standard.attr('emissionColor')
standard.setAttr('emission', 1.0)
skin.setAttr('fresnelAffectSss',
0) # to match the previous behaviour
node_mapper = {
'aiSkin': skin,
'aiStandard': standard
}
for attr in attr_mapper.keys():
inputs = skin_sss.attr(attr).inputs(p=1, c=1)
if inputs:
# copy inputs
destination_attr_name = inputs[0][0].name().split('.')[-1]
source = inputs[0][1]
if destination_attr_name in attr_mapper:
node = attr_mapper[destination_attr_name]['node']
attr_name = attr_mapper[destination_attr_name][
'attr_name']
source >> node_mapper[node].attr(attr_name)
else:
source >> skin.attr(destination_attr_name)
else:
# copy values
node = node_mapper[attr_mapper[attr]['node']]
attr_name = attr_mapper[attr]['attr_name']
multiplier = attr_mapper[attr].get('multiplier', 1.0)
attr_value = skin_sss.getAttr(attr)
if isinstance(attr_value, tuple):
attr_value = map(lambda x: x * multiplier, attr_value)
else:
attr_value *= multiplier
node.attr(attr_name).set(attr_value)
# after everything is set up
# connect the aiStandard to the shadingEngine
for source, dest in skin_sss.outputs(p=1, c=1):
standard.attr('outColor') >> dest
# and rename the materials
orig_name = skin_sss.name()
# delete the skinSSS node
pm.delete(skin_sss)
skin_name = orig_name
standard_name = '%s_aiStandard' % orig_name
skin.rename(skin_name)
standard.rename(standard_name)
print('updated %s' % skin_name)
@classmethod
def normalize_sss_weights(cls):
"""normalizes the sss weights so their total weight is 1.0
if a aiStandard is assigned to the selected object it searches for an
aiSkin in the emission channel.
the script considers 0.7 as the highest diffuse value for aiStandard
"""
# get the shader of the selected object
assigned_shader = pm.ls(
pm.ls(sl=1)[0].getShape().outputs(type='shadingEngine')[0].inputs(),
mat=1
)[0]
if assigned_shader.type() == 'aiStandard':
sss_shader = assigned_shader.attr('emissionColor').inputs()[0]
diffuse_weight = assigned_shader.attr('Kd').get()
else:
sss_shader = assigned_shader
diffuse_weight = 0
def get_attr_or_texture(attr):
if attr.inputs():
# we probably have a texture assigned
# so use its multiply attribute
texture = attr.inputs()[0]
attr = texture.attr('multiply')
if isinstance(texture, pm.nt.AiImage):
attr = texture.attr('multiply')
elif isinstance(texture, pm.nt.File):
attr = texture.attr('colorGain')
return attr
shallow_attr = get_attr_or_texture(
sss_shader.attr('shallowScatterWeight')
)
mid_attr = get_attr_or_texture(sss_shader.attr('midScatterWeight'))
deep_attr = get_attr_or_texture(sss_shader.attr('deepScatterWeight'))
shallow_weight = shallow_attr.get()
if isinstance(shallow_weight, tuple):
shallow_weight = (
shallow_weight[0] + shallow_weight[1] + shallow_weight[2]
) / 3.0
mid_weight = mid_attr.get()
if isinstance(mid_weight, tuple):
mid_weight = (
mid_weight[0] + mid_weight[1] + mid_weight[2]
) / 3.0
deep_weight = deep_attr.get()
if isinstance(deep_weight, tuple):
deep_weight = (
deep_weight[0] + deep_weight[1] + deep_weight[2]
) / 3.0
total_sss_weight = shallow_weight + mid_weight + deep_weight
mult = (1 - diffuse_weight / 0.7) / total_sss_weight
try:
shallow_attr.set(shallow_weight * mult)
except RuntimeError:
w = shallow_weight * mult
shallow_attr.set(w, w, w)
try:
mid_attr.set(mid_weight * mult)
except RuntimeError:
w = mid_weight * mult
mid_attr.set(w, w, w)
try:
deep_attr.set(deep_weight * mult)
except RuntimeError:
w = deep_weight * mult
deep_attr.set(w, w, w)
@classmethod
def create_eye_shader_and_controls(cls):
"""This is pretty much specific to the way we are creating eye shaders
for characters in KKS project, but it is a useful trick, select the
inner eye objects before running
"""
eyes = pm.ls(sl=1)
if not eyes:
return
char = eyes[0].getAllParents()[-1]
place = pm.shadingNode('place2dTexture', asUtility=1)
emission_image = pm.shadingNode('aiImage', asTexture=1)
ks_image = pm.shadingNode('aiImage', asTexture=1)
texture_paths = {
'emission': '$REPO1977/KKS/Assets/Characters/Body_Parts/Textures/'
'char_eyeInner_light_v001.png',
'Ks': '$REPO1977/KKS/Assets/Characters/Body_Parts/Textures/'
'char_eyeInner_spec_v002.png',
}
emission_image.setAttr('filename', texture_paths['emission'])
ks_image.setAttr('filename', texture_paths['Ks'])
place.outUV >> emission_image.attr('uvcoords')
if not char.hasAttr('eyeLightStrength'):
char.addAttr('eyeLightStrength', at='double', min=0, dv=0.0, k=1)
else:
# set the default
char.attr('eyeLightStrength').set(0)
if not char.hasAttr('eyeLightAngle'):
char.addAttr("eyeLightAngle", at='double', dv=0, k=1)
if not char.hasAttr('eyeDiffuseWeight'):
char.addAttr(
"eyeDiffuseWeight", at='double', dv=0.15, k=1, min=0, max=1
)
if not char.hasAttr('eyeSpecularWeight'):
char.addAttr(
'eyeSpecularWeight', at='double', dv=1.0, k=1, min=0, max=1
)
if not char.hasAttr('eyeSSSWeight'):
char.addAttr(
'eyeSSSWeight', at='double', dv=0.5, k=1, min=0, max=1
)
# connect eye light strength
char.eyeLightStrength >> emission_image.attr('multiplyR')
char.eyeLightStrength >> emission_image.attr('multiplyG')
char.eyeLightStrength >> emission_image.attr('multiplyB')
# connect eye light angle
char.eyeLightAngle >> place.attr('rotateFrame')
# connect specular weight
char.eyeSpecularWeight >> ks_image.attr('multiplyR')
char.eyeSpecularWeight >> ks_image.attr('multiplyG')
char.eyeSpecularWeight >> ks_image.attr('multiplyB')
for eye in eyes:
shading_engine = eye.getShape().outputs(type='shadingEngine')[0]
shader = pm.ls(shading_engine.inputs(), mat=1)[0]
# connect the diffuse shader input to the emissionColor
diffuse_texture = shader.attr('color').inputs(p=1, s=1)[0]
diffuse_texture >> shader.attr('emissionColor')
emission_image.outColorR >> shader.attr('emission')
# also connect it to specular color
diffuse_texture >> shader.attr('KsColor')
# connect the Ks image to the specular weight
ks_image.outColorR >> shader.attr('Ks')
# also connect it to sss color
diffuse_texture >> shader.attr('KsssColor')
char.eyeDiffuseWeight >> shader.attr('Kd')
char.eyeSSSWeight >> shader.attr('Ksss')
# set some default values
shader.attr('diffuseRoughness').set(0)
shader.attr('Kb').set(0)
shader.attr('directDiffuse').set(1)
shader.attr('indirectDiffuse').set(1)
shader.attr('specularRoughness').set(0.4)
shader.attr('specularAnisotropy').set(0.5)
shader.attr('specularRotation').set(0)
shader.attr('specularFresnel').set(0)
shader.attr('Kr').set(0)
shader.attr('enableInternalReflections').set(0)
shader.attr('Kt').set(0)
shader.attr('transmittance').set([1, 1, 1])
shader.attr('opacity').set([1, 1, 1])
shader.attr('sssRadius').set([1, 1, 1])
pm.select(eyes)
@classmethod
def randomize_attr(cls, nodes, attr, min, max, pre=0.1):
"""Randomizes the given attributes of the given nodes
:param list nodes:
:param str attr:
:param float, int min:
:param float, int max:
:return:
"""
import random
import math
rand = random.random
floor = math.floor
for node in nodes:
r = rand() * float(max - min) + float(min)
r = floor(r / pre) * pre
node.setAttr(attr, r)
@classmethod
def randomize_light_color_temp(cls, min_field, max_field):
"""Randomizes the color temperature of selected lights
:param min:
:param max:
:return:
"""
min = pm.floatField(min_field, q=1, v=1)
max = pm.floatField(max_field, q=1, v=1)
cls.randomize_attr(
[node.getShape() for node in pm.ls(sl=1)],
'aiColorTemperature',
min,
max,
1
)
@classmethod
def randomize_light_intensity(cls, min_field, max_field):
"""Randomizes the intensities of selected lights
:param min:
:param max:
:return:
"""
min = pm.floatField(min_field, q=1, v=1)
max = pm.floatField(max_field, q=1, v=1)
cls.randomize_attr(
[node.getShape() for node in pm.ls(sl=1)],
'aiExposure',
min,
max,
0.1
)
@classmethod
def setup_outer_eye_render_attributes(cls):
"""sets outer eye render attributes for characters, select outer eye
objects and run this
"""
for node in pm.ls(sl=1):
shape = node.getShape()
shape.setAttr('castsShadows', 0)
shape.setAttr('visibleInReflections', 0)
shape.setAttr('visibleInRefractions', 0)
shape.setAttr('aiSelfShadows', 0)
shape.setAttr('aiOpaque', 0)
shape.setAttr('aiVisibleInDiffuse', 0)
shape.setAttr('aiVisibleInGlossy', 0)
@classmethod
def setup_window_glass_render_attributes(cls):
"""sets window glass render attributes for environments, select window
glass objects and run this
"""
shader_name = 'toolbox_glass_shader'
shaders = pm.ls('%s*' % shader_name)
selection = pm.ls(sl=1)
if len(shaders) > 0:
shader = shaders[0]
else:
shader = pm.shadingNode(
'aiStandard',
asShader=1,
name='%s#' % shader_name
)
shader.setAttr('Ks', 1)
shader.setAttr('specularRoughness', 0)
shader.setAttr('Kr', 0)
shader.setAttr('enableInternalReflections', 0)
shader.setAttr('Kt', 0)
shader.setAttr('KtColor', (0, 0, 0))
shape_attributes = [
('castsShadows', 0),
('visibleInReflections', 0),
('visibleInRefractions', 0),
('aiSelfShadows', 0),
('aiOpaque', 1),
('aiVisibleInDiffuse', 0),
('aiVisibleInGlossy', 0),
]
for node in selection:
shape = node.getShape()
map(lambda x: shape.setAttr(*x), shape_attributes)
if isinstance(shape, pm.nt.AiStandIn):
# get the glass shader or create one
shape.overrideShaders.set(1)
# assign it to the stand in
pm.select(node)
pm.hyperShade(assign=shader)
@classmethod
def dummy_window_light_plane(cls):
"""creates or updates the dummy window plane for the given area light
"""
area_light_list = pm.selected()
from anima.env.mayaEnv import auxiliary
reload(auxiliary)
for light in area_light_list:
dwl = auxiliary.DummyWindowLight()
dwl.light = light
dwl.update()
@classmethod
def setup_z_limiter(cls):
"""creates z limiter setup
"""
shader_name = 'z_limiter_shader#'
shaders = pm.ls('%s*' * shader_name)
if len(shaders) > 0:
shader = shaders[0]
else:
shader = pm.shadingNode(
'surfaceShader',
asShader=1,
name='%s#' % shader_name
)
@classmethod
def convert_file_node_to_ai_image_node(cls):
"""converts the file node to aiImage node
"""
default_values = {
'coverageU': 1,
'coverageV': 1,
'translateFrameU': 0,
'translateFrameV': 0,
'rotateFrame': 0,
'repeatU': 1,
'repeatV': 1,
'offsetU': 0,
'offsetV': 0,
'rotateUV': 0,
'noiseU': 0,
'noiseV': 0
}
for node in pm.ls(sl=1, type='file'):
node_name = node.name()
path = node.getAttr('fileTextureName')
ai_image = pm.shadingNode('aiImage', asTexture=1)
ai_image.setAttr('filename', path)
# check the placement node
placements = node.listHistory(type='place2dTexture')
if len(placements):
placement = placements[0]
# check default values
if any([placement.getAttr(attr_name) != default_values[attr_name] for attr_name in default_values]):
# connect the placement to the aiImage
placement.outUV >> ai_image.uvcoords
else:
# delete it
pm.delete(placement)
# connect the aiImage
for attr_out, attr_in in node.outputs(p=1, c=1):
attr_name = attr_out.name().split('.')[-1]
if attr_name == 'message':
continue
ai_image.attr(attr_name) >> attr_in
# delete the File node
pm.delete(node)
# rename the aiImage node
ai_image.rename(node_name)
@classmethod
def create_generic_tooth_shader(cls):
"""creates generic tooth shader for selected objects
"""
shader_name = 'toolbox_generic_tooth_shader#'
selection = pm.ls(sl=1)
shader_tree = {
'type': 'aiStandard',
'class': 'asShader',
'attr': {
'color': [1, 0.909, 0.815],
'Kd': 0.2,
'KsColor': [1, 1, 1],
'Ks': 0.5,
'specularRoughness': 0.10,
'specularFresnel': 1,
'Ksn': 0.05,
'enableInternalReflections': 0,
'KsssColor': [1, 1, 1],
'Ksss': 1,
'sssRadius': [1, 0.853, 0.68],
'normalCamera': {
'output': 'outNormal',
'type': 'bump2d',
'class': 'asTexture',
'attr': {
'bumpDepth': 0.05,
'bumpValue': {
'output': 'outValue',
'type': 'aiNoise',
'class': 'asUtility',
'attr': {
'scaleX': 4,
'scaleY': 0.250,
'scaleZ': 4,
}
}
}
}
}
}
shader = auxiliary.create_shader(shader_tree, shader_name)
for node in selection:
# assign it to the stand in
pm.select(node)
pm.hyperShade(assign=shader)
@classmethod
def create_generic_gum_shader(self):
"""set ups generic gum shader for selected objects
"""
shader_name = 'toolbox_generic_gum_shader#'
selection = pm.ls(sl=1)
shader_tree = {
'type': 'aiStandard',
'class': 'asShader',
'attr': {
'color': [0.993, 0.596, 0.612],
'Kd': 0.35,
'KsColor': [1, 1, 1],
'Ks': 0.010,
'specularRoughness': 0.2,
'enableInternalReflections': 0,
'KsssColor': [1, 0.6, 0.6],
'Ksss': 0.5,
'sssRadius': [0.5, 0.5, 0.5],
'normalCamera': {
'output': 'outNormal',
'type': 'bump2d',
'class': 'asTexture',
'attr': {
'bumpDepth': 0.1,
'bumpValue': {
'output': 'outValue',
'type': 'aiNoise',
'class': 'asUtility',
'attr': {
'scaleX': 4,
'scaleY': 1,
'scaleZ': 4,
}
}
}
}
}
}
shader = auxiliary.create_shader(shader_tree, shader_name)
for node in selection:
# assign it to the stand in
pm.select(node)
pm.hyperShade(assign=shader)
@classmethod
def create_generic_tongue_shader(self):
"""set ups generic tongue shader for selected objects
"""
shader_name = 'toolbox_generic_tongue_shader#'
selection = pm.ls(sl=1)
shader_tree = {
'type': 'aiStandard',
'class': 'asShader',
'attr': {
'color': [0.675, 0.174, 0.194],
'Kd': 0.35,
'KsColor': [1, 1, 1],
'Ks': 0.010,
'specularRoughness': 0.2,
'enableInternalReflections': 0,
'KsssColor': [1, 0.3, 0.3],
'Ksss': 0.5,
'sssRadius': [0.5, 0.5, 0.5],
'normalCamera': {
'output': 'outNormal',
'type': 'bump2d',
'class': 'asTexture',
'attr': {
'bumpDepth': 0.1,
'bumpValue': {
'output': 'outValue',
'type': 'aiNoise',
'class': 'asUtility',
'attr': {
'scaleX': 4,
'scaleY': 1,
'scaleZ': 4,
}
}
}
}
}
}
shader = auxiliary.create_shader(shader_tree, shader_name)
for node in selection:
# assign it to the stand in
pm.select(node)
pm.hyperShade(assign=shader)
@classmethod
def create_ea_matte(cls):
"""creates "ebesinin ami" matte shader with opacity for selected
objects.
It is called "EA Matte" for one reason, this matte is not necessary in
normal working conditions. That is you change the color and look of
some 3D element in 3D application and do an artistic grading at post to
the whole plate, not to individual elements in the render.
And because we are forced to create this matte layer, we thought that
we should give it a proper name.
"""
# get the selected objects
# for each object create a new surface shader with the opacity
# channel having the opacity of the original shader
# create a lut for objects that have the same material not to cause
# multiple materials to be created
daro = pm.PyNode('defaultArnoldRenderOptions')
attrs = {
'AASamples': 4,
'GIDiffuseSamples': 0,
'GIGlossySamples': 0,
'GIRefractionSamples': 0,
'sssBssrdfSamples': 0,
'volumeIndirectSamples': 0,
'GITotalDepth': 0,
'GIDiffuseDepth': 0,
'GIGlossyDepth': 0,
'GIReflectionDepth': 0,
'GIRefractionDepth': 0,
'GIVolumeDepth': 0,
'ignoreTextures': 1,
'ignoreAtmosphere': 1,
'ignoreLights': 1,
'ignoreShadows': 1,
'ignoreBump': 1,
'ignoreSss': 1,
}
for attr in attrs:
pm.editRenderLayerAdjustment(daro.attr(attr))
daro.setAttr(attr, attrs[attr])
try:
aov_z = pm.PyNode('aiAOV_Z')
pm.editRenderLayerAdjustment(aov_z.attr('enabled'))
aov_z.setAttr('enabled', 0)
except pm.MayaNodeError:
pass
try:
aov_mv = pm.PyNode('aiAOV_motionvector')
pm.editRenderLayerAdjustment(aov_mv.attr('enabled'))
aov_mv.setAttr('enabled', 0)
except pm.MayaNodeError:
pass
dad = pm.PyNode('defaultArnoldDriver')
pm.editRenderLayerAdjustment(dad.attr('autocrop'))
dad.setAttr('autocrop', 0)
@classmethod
def create_z_layer(cls):
"""creates z layer with arnold render settings
"""
daro = pm.PyNode('defaultArnoldRenderOptions')
attrs = {
'AASamples': 4,
'GIDiffuseSamples': 0,
'GIGlossySamples': 0,
'GIRefractionSamples': 0,
'sssBssrdfSamples': 0,
'volumeIndirectSamples': 0,
'GITotalDepth': 0,
'GIDiffuseDepth': 0,
'GIGlossyDepth': 0,
'GIReflectionDepth': 0,
'GIRefractionDepth': 0,
'GIVolumeDepth': 0,
'ignoreShaders': 1,
'ignoreAtmosphere': 1,
'ignoreLights': 1,
'ignoreShadows': 1,
'ignoreBump': 1,
'ignoreNormalSmoothing': 1,
'ignoreDof': 1,
'ignoreSss': 1,
}
for attr in attrs:
pm.editRenderLayerAdjustment(daro.attr(attr))
daro.setAttr(attr, attrs[attr])
try:
aov_z = pm.PyNode('aiAOV_Z')
pm.editRenderLayerAdjustment(aov_z.attr('enabled'))
aov_z.setAttr('enabled', 1)
except pm.MayaNodeError:
pass
try:
aov_mv = pm.PyNode('aiAOV_motionvector')
pm.editRenderLayerAdjustment(aov_mv.attr('enabled'))
aov_mv.setAttr('enabled', 1)
except pm.MayaNodeError:
pass
dad = pm.PyNode('defaultArnoldDriver')
pm.editRenderLayerAdjustment(dad.attr('autocrop'))
dad.setAttr('autocrop', 1)
@classmethod
def generate_reflection_curve(self):
"""Generates a curve which helps creating specular at the desired point
"""
from maya.OpenMaya import MVector
from anima.env.mayaEnv import auxiliary
vtx = pm.ls(sl=1)[0]
normal = vtx.getNormal(space='world')
panel = auxiliary.Playblaster.get_active_panel()
camera = pm.PyNode(pm.modelPanel(panel, q=1, cam=1))
camera_axis = MVector(0, 0, -1) * camera.worldMatrix.get()
refl = camera_axis - 2 * normal.dot(camera_axis) * normal
# create a new curve
p1 = vtx.getPosition(space='world')
p2 = p1 + refl
curve = pm.curve(d=1, p=[p1, p2])
# move pivot to the first point
pm.xform(curve, rp=p1, sp=p1)
@classmethod
def import_gpu_content(self):
"""imports the selected GPU content
"""
import os
imported_nodes = []
for node in pm.ls(sl=1):
gpu_node = node.getShape()
gpu_path = gpu_node.getAttr('cacheFileName')
new_nodes = pm.mel.eval(
'AbcImport -mode import -reparent "%s" "%s";' % (node.fullPath(), os.path.expandvars(gpu_path))
)
# get imported nodes
new_nodes = node.getChildren()
new_nodes.remove(gpu_node)
imported_node = None
# filter material node
for n in new_nodes:
if n.name() != 'materials':
imported_node = n
else:
pm.delete(n)
if imported_node:
imported_node.t.set(0, 0, 0)
imported_node.r.set(0, 0, 0)
imported_node.s.set(1, 1, 1)
pm.parent(imported_node, world=1)
imported_nodes.append(imported_node)
pm.select(imported_nodes)
@classmethod
def render_slicer(self):
"""A tool for slicing big render scenes
:return:
"""
from anima.env.mayaEnv import render_slicer
rs_UI = render_slicer.UI()
@classmethod
def move_cache_files_wrapper(cls, source_driver_field, target_driver_field):
"""Wrapper for move_cache_files() command
:param source_driver_field: Text field for source driver
:param target_driver_field: Text field for target driver
:return:
"""
source_driver = source_driver_field.text()
target_driver = target_driver_field.text()
Render.move_cache_files(
source_driver,
target_driver
)
@classmethod
def move_cache_files(cls, source_driver, target_driver):
"""moves the selected cache files to another location
:param source_driver:
:param target_driver:
:return:
"""
#
# Move fur caches to new server
#
import os
import shutil
import glob
# from maya import OpenMayaUI
#
# try:
# from shiboken import wrapInstance
# except ImportError:
# from shiboken2 import wrapInstance
#
# from anima.ui import progress_dialog
#
# maya_main_window = wrapInstance(
# long(OpenMayaUI.MQtUtil.mainWindow()),
# progress_dialog.QtWidgets.QWidget
# )
#
from anima.env.mayaEnv import MayaMainProgressBarWrapper
wrp = MayaMainProgressBarWrapper()
pdm = ProgressDialogManager(dialog=wrp)
selected_nodes = pm.ls(sl=1)
caller = pdm.register(len(selected_nodes), title='Moving Cache Files')
for node in selected_nodes:
ass_node = node.getShape()
if not isinstance(ass_node, (pm.nt.AiStandIn, pm.nt.AiVolume)):
continue
if isinstance(ass_node, pm.nt.AiStandIn):
ass_path = ass_node.dso.get()
elif isinstance(ass_node, pm.nt.AiVolume):
ass_path = ass_node.filename.get()
ass_path = os.path.normpath(
os.path.expandvars(ass_path)
)
# give info to user
caller.title = 'Moving: %s' % ass_path
# check if it is in the source location
if source_driver not in ass_path:
continue
# check if it contains .ass.gz in its path
if isinstance(ass_node, pm.nt.AiStandIn):
if '.ass.gz' not in ass_path:
continue
elif isinstance(ass_node, pm.nt.AiVolume):
if '.vdb' not in ass_path:
continue
# get the dirname
ass_source_dir = os.path.dirname(ass_path)
ass_target_dir = ass_source_dir.replace(source_driver, target_driver)
# create the intermediate folders at destination
try:
os.makedirs(
ass_target_dir
)
except OSError:
# dir already exists
pass
# get all files list
pattern = re.subn(r'[#]+', '*', ass_path)[0].replace('.ass.gz', '.ass*')
all_cache_files = glob.glob(pattern)
inner_caller = pdm.register(len(all_cache_files))
for source_f in all_cache_files:
target_f = source_f.replace(source_driver, target_driver)
# move files to new location
shutil.move(source_f, target_f)
inner_caller.step(message='Moving: %s' % source_f)
inner_caller.end_progress()
# finally update DSO path
if isinstance(ass_node, pm.nt.AiStandIn):
ass_node.dso.set(ass_path.replace(source_driver, target_driver))
elif isinstance(ass_node, pm.nt.AiVolume):
ass_node.filename.set(
ass_path.replace(source_driver, target_driver)
)
caller.step()
caller.end_progress()
@classmethod
def generate_rs_from_selection(cls, per_selection=False):
"""generates a temp rs file from selected nodes and hides the selected
nodes
:param bool per_selection: Generates one rs file per selected objects
if True. Default is False.
"""
import os
import tempfile
import shutil
from anima.env.mayaEnv import auxiliary
from anima.env import mayaEnv
m = mayaEnv.Maya()
v = m.get_current_version()
nodes = pm.ls(sl=1)
temp_rs_proxies_grp = None
if pm.ls('temp_rs_proxies_grp'):
temp_rs_proxies_grp = pm.ls('temp_rs_proxies_grp')[0]
else:
temp_rs_proxies_grp = pm.nt.Transform(name='temp_rs_proxies_grp')
rs_output_folder_path = os.path.join(
v.absolute_path,
'Outputs/rs'
).replace('\\', '/')
try:
os.makedirs(rs_output_folder_path)
except OSError:
pass
def _generate_rs():
export_command = 'rsProxy -fp "%(path)s" -c -z -sl;'
temp_rs_full_path = tempfile.mktemp(suffix='.rs')
rs_full_path = os.path.join(
rs_output_folder_path,
os.path.basename(temp_rs_full_path)
).replace('\\', '/')
pm.mel.eval(
export_command % {
'path': temp_rs_full_path.replace('\\', '/')
}
)
shutil.move(
temp_rs_full_path,
rs_full_path
)
[n.v.set(0) for n in pm.ls(sl=1)]
rs_proxy_node, rs_proxy_mesh = auxiliary.create_rs_proxy_node(
path=rs_full_path)
rs_proxy_tra = rs_proxy_mesh.getParent()
rs_proxy_tra.rename('temp_rs_proxy#')
pm.parent(rs_proxy_tra, temp_rs_proxies_grp)
if per_selection:
for node in nodes:
pm.select(node)
_generate_rs()
else:
pm.select(nodes)
_generate_rs() |
##
# Copyright (c) 2005-2007 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DRI: David Reid, dreid@apple.com
##
try:
from hashlib import sha1
except ImportError:
import sha
sha1 = sha.new
import random
import os
import stat
import sys
from zope.interface import implements
from twisted.python import log
from twisted.python.usage import Options, UsageError
from twisted.python.reflect import namedClass
from twisted.application import internet, service
from twisted.plugin import IPlugin
from twisted.scripts.mktap import getid
from twisted.cred.portal import Portal
from twisted.web2.dav import auth
from twisted.web2.dav import davxml
from twisted.web2.dav.resource import TwistedACLInheritable
from twisted.web2.auth.basic import BasicCredentialFactory
from twisted.web2.channel import http
from twisted.web2.log import LogWrapperResource
from twisted.web2.server import Site
from twistedcaldav import logging
from twistedcaldav.cluster import makeService_Combined, makeService_Master
from twistedcaldav.config import config, parseConfig, defaultConfig, ConfigurationError
from twistedcaldav.logging import RotatingFileAccessLoggingObserver
from twistedcaldav.root import RootResource
from twistedcaldav.resource import CalDAVResource
from twistedcaldav.directory.digest import QopDigestCredentialFactory
from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource
from twistedcaldav.directory.aggregate import AggregateDirectoryService
from twistedcaldav.directory.sudo import SudoDirectoryService
from twistedcaldav.static import CalendarHomeProvisioningFile
from twistedcaldav import pdmonster
try:
from twistedcaldav.authkerb import NegotiateCredentialFactory
except ImportError:
NegotiateCredentialFactory = None
class CalDAVService(service.MultiService):
def __init__(self, logObserver):
self.logObserver = logObserver
service.MultiService.__init__(self)
def privilegedStartService(self):
service.MultiService.privilegedStartService(self)
self.logObserver.start()
def stopService(self):
service.MultiService.stopService(self)
self.logObserver.stop()
class CalDAVOptions(Options):
optParameters = [[
"config", "f", "/etc/caldavd/caldavd.plist", "Path to configuration file."
]]
zsh_actions = {"config" : "_files -g '*.plist'"}
def __init__(self, *args, **kwargs):
super(CalDAVOptions, self).__init__(*args, **kwargs)
self.overrides = {}
def opt_option(self, option):
"""
Set an option to override a value in the config file. True, False, int,
and float options are supported, as well as comma seperated lists. Only
one option may be given for each --option flag, however multiple
--option flags may be specified.
"""
if '=' in option:
key, value = option.split('=')
if key in defaultConfig:
if isinstance(defaultConfig[key], bool):
value = value == "True"
elif isinstance(defaultConfig[key], (int, float, long)):
value = type(defaultConfig[key])(value)
elif isinstance(defaultConfig[key], (list, tuple)):
value = value.split(',')
elif isinstance(defaultConfig[key], dict):
raise UsageError("Dict options not supported on the command line")
elif value == 'None':
value = None
self.overrides[key] = value
else:
self.opt_option('%s=True' % (option,))
opt_o = opt_option
def postOptions(self):
if not os.path.exists(self['config']):
log.msg("Config file %s not found, using defaults" % (
self['config'],))
parseConfig(self['config'])
config.updateDefaults(self.overrides)
uid, gid = None, None
if self.parent['uid'] or self.parent['gid']:
uid, gid = getid(self.parent['uid'],
self.parent['gid'])
if uid:
if uid != os.getuid() and os.getuid() != 0:
import pwd
username = pwd.getpwuid(os.getuid())[0]
raise UsageError("Only root can drop privileges you are: %r"
% (username,))
if gid:
if gid != os.getgid() and os.getgid() != 0:
import grp
groupname = grp.getgrgid(os.getgid())[0]
raise UsageError("Only root can drop privileges, you are: %s"
% (groupname,))
# Ignore the logfile parameter if not daemonized and log to stdout.
if self.parent['nodaemon']:
self.parent['logfile'] = None
else:
self.parent['logfile'] = config.ErrorLogFile
self.parent['pidfile'] = config.PIDFile
# Verify that document root actually exists
self.checkDirectory(
config.DocumentRoot,
"Document root",
access=os.W_OK,
#permissions=0750,
#uname=config.UserName,
#gname=config.GroupName
)
# Verify that ssl certs exist if needed
if config.SSLPort:
self.checkFile(
config.SSLPrivateKey,
"SSL Private key",
access=os.R_OK,
#permissions=0640
)
self.checkFile(
config.SSLCertificate,
"SSL Public key",
access=os.R_OK,
#permissions=0644
)
#
# Nuke the file log observer's time format.
#
if not config.ErrorLogFile and config.ProcessType == 'Slave':
log.FileLogObserver.timeFormat = ''
# Check current umask and warn if changed
oldmask = os.umask(config.umask)
if oldmask != config.umask:
log.msg("WARNING: changing umask from: 0%03o to 0%03o" % (
oldmask, config.umask,))
# Generate a shared secret that will be passed to any slave processes
if not config.SharedSecret:
c = tuple([random.randrange(sys.maxint) for _ in range(3)])
config.SharedSecret = sha1('%d%d%d' % c).hexdigest()
def checkDirectory(self, dirpath, description, access=None, fail=False, permissions=None, uname=None, gname=None):
if not os.path.exists(dirpath):
raise ConfigurationError("%s does not exist: %s" % (description, dirpath,))
elif not os.path.isdir(dirpath):
raise ConfigurationError("%s is not a directory: %s" % (description, dirpath,))
elif access and not os.access(dirpath, access):
raise ConfigurationError("Insufficient permissions for server on %s directory: %s" % (description, dirpath,))
self.securityCheck(dirpath, description, fail=fail, permissions=permissions, uname=uname, gname=gname)
def checkFile(self, filepath, description, access=None, fail=False, permissions=None, uname=None, gname=None):
if not os.path.exists(filepath):
raise ConfigurationError("%s does not exist: %s" % (description, filepath,))
elif not os.path.isfile(filepath):
raise ConfigurationError("%s is not a file: %s" % (description, filepath,))
elif access and not os.access(filepath, access):
raise ConfigurationError("Insufficient permissions for server on %s directory: %s" % (description, filepath,))
self.securityCheck(filepath, description, fail=fail, permissions=permissions, uname=uname, gname=gname)
def securityCheck(self, path, description, fail=False, permissions=None, uname=None, gname=None):
def raiseOrPrint(txt):
if fail:
raise ConfigurationError(txt)
else:
log.msg("WARNING: %s" % (txt,))
pathstat = os.stat(path)
if permissions:
if stat.S_IMODE(pathstat[stat.ST_MODE]) != permissions:
raiseOrPrint("The permisions on %s directory %s are 0%03o and do not match expected permissions: 0%03o"
% (description, path, stat.S_IMODE(pathstat[stat.ST_MODE]), permissions))
if uname:
import pwd
try:
pathuname = pwd.getpwuid(pathstat[stat.ST_UID])[0]
if pathuname not in (uname, "_" + uname):
raiseOrPrint("The owner of %s directory %s is %s and does not match the expected owner: %s"
% (description, path, pathuname, uname))
except KeyError:
raiseOrPrint("The owner of %s directory %s is unknown (%s) and does not match the expected owner: %s"
% (description, path, pathstat[stat.ST_UID], uname))
if gname:
import grp
try:
pathgname = grp.getgrgid(pathstat[stat.ST_GID])[0]
if pathgname != gname:
raiseOrPrint("The group of %s directory %s is %s and does not match the expected group: %s"
% (description, path, pathgname, gname))
except KeyError:
raiseOrPrint("The group of %s directory %s is unknown (%s) and does not match the expected group: %s"
% (description, path, pathstat[stat.ST_GID], gname))
class CalDAVServiceMaker(object):
implements(IPlugin, service.IServiceMaker)
tapname = "caldav"
description = "The Darwin Calendar Server"
options = CalDAVOptions
#
# default resource classes
#
rootResourceClass = RootResource
principalResourceClass = DirectoryPrincipalProvisioningResource
calendarResourceClass = CalendarHomeProvisioningFile
def makeService_Slave(self, options):
#
# Setup the Directory
#
directories = []
directoryClass = namedClass(config.DirectoryService['type'])
log.msg("Configuring directory service of type: %s"
% (config.DirectoryService['type'],))
baseDirectory = directoryClass(**config.DirectoryService['params'])
directories.append(baseDirectory)
sudoDirectory = None
if config.SudoersFile and os.path.exists(config.SudoersFile):
log.msg("Configuring SudoDirectoryService with file: %s"
% (config.SudoersFile,))
sudoDirectory = SudoDirectoryService(config.SudoersFile)
sudoDirectory.realmName = baseDirectory.realmName
CalDAVResource.sudoDirectory = sudoDirectory
directories.append(sudoDirectory)
else:
log.msg("Not using SudoDirectoryService; file doesn't exist: %s"
% (config.SudoersFile,))
directory = AggregateDirectoryService(directories)
if sudoDirectory:
directory.userRecordTypes.append(
SudoDirectoryService.recordType_sudoers)
#
# Setup Resource hierarchy
#
log.msg("Setting up document root at: %s" % (config.DocumentRoot,))
log.msg("Setting up principal collection: %r" % (self.principalResourceClass,))
principalCollection = self.principalResourceClass(
os.path.join(config.DocumentRoot, 'principals'),
'/principals/',
directory
)
log.msg("Setting up calendar collection: %r" % (self.calendarResourceClass,))
calendarCollection = self.calendarResourceClass(
os.path.join(config.DocumentRoot, 'calendars'),
directory,
'/calendars/'
)
log.msg("Setting up root resource: %r" % (self.rootResourceClass,))
root = self.rootResourceClass(
config.DocumentRoot,
principalCollections=(principalCollection,)
)
root.putChild('principals', principalCollection)
root.putChild('calendars', calendarCollection)
# Configure default ACLs on the root resource
log.msg("Setting up default ACEs on root resource")
rootACEs = [
davxml.ACE(
davxml.Principal(davxml.All()),
davxml.Grant(davxml.Privilege(davxml.Read())),
),
]
log.msg("Setting up AdminPrincipals")
for principal in config.AdminPrincipals:
log.msg("Added %s as admin principal" % (principal,))
rootACEs.append(
davxml.ACE(
davxml.Principal(davxml.HRef(principal)),
davxml.Grant(davxml.Privilege(davxml.All())),
davxml.Protected(),
TwistedACLInheritable(),
)
)
log.msg("Setting root ACL")
root.setAccessControlList(davxml.ACL(*rootACEs))
#
# Configure the Site and Wrappers
#
credentialFactories = []
portal = Portal(auth.DavRealm())
portal.registerChecker(directory)
realm = directory.realmName or ""
log.msg("Configuring authentication for realm: %s" % (realm,))
for scheme, schemeConfig in config.Authentication.iteritems():
scheme = scheme.lower()
credFactory = None
if schemeConfig['Enabled']:
log.msg("Setting up scheme: %s" % (scheme,))
if scheme == 'kerberos':
if not NegotiateCredentialFactory:
log.msg("Kerberos support not available")
continue
service = schemeConfig['ServicePrincipal']
if '@' in service:
rest, kerbRealm = service.split('@', 1)
else:
kerbRealm = config.ServerHostName
credFactory = NegotiateCredentialFactory(
service,
kerbRealm
)
elif scheme == 'digest':
secret = schemeConfig['Secret']
if not secret and config.SharedSecret:
log.msg("Using master process shared secret for Digest authentication")
secret = config.SharedSecret
else:
log.msg("No shared secret for Digest authentication")
credFactory = QopDigestCredentialFactory(
schemeConfig['Algorithm'],
schemeConfig['Qop'],
secret,
realm
)
elif scheme == 'basic':
credFactory = BasicCredentialFactory(realm)
else:
log.err("Unknown scheme: %s" % (scheme,))
if credFactory:
credentialFactories.append(credFactory)
log.msg("Configuring authentication wrapper")
authWrapper = auth.AuthenticationWrapper(
root,
portal,
credentialFactories,
(auth.IPrincipal,)
)
logWrapper = LogWrapperResource(authWrapper)
#
# Configure the service
#
log.msg("Setting up service")
if config.ProcessType == 'Slave':
realRoot = pdmonster.PDClientAddressWrapper(
logWrapper,
config.PythonDirector['ControlSocket'])
logObserver = logging.AMPCommonAccessLoggingObserver(
config.ControlSocket)
elif config.ProcessType == 'Single':
realRoot = logWrapper
logObserver = logging.RotatingFileAccessLoggingObserver(
config.AccessLogFile)
log.msg("Configuring log observer: %s" % (
logObserver,))
service = CalDAVService(logObserver)
site = Site(realRoot)
channel = http.HTTPFactory(site)
if not config.BindAddresses:
config.BindAddresses = [""]
for bindAddress in config.BindAddresses:
if config.BindHTTPPorts:
if config.HTTPPort == -1:
raise UsageError("HTTPPort required if BindHTTPPorts is not empty")
elif config.HTTPPort != -1:
config.BindHTTPPorts = [config.HTTPPort]
if config.BindSSLPorts:
if config.SSLPort == -1:
raise UsageError("SSLPort required if BindSSLPorts is not empty")
elif config.SSLPort != -1:
config.BindSSLPorts = [config.SSLPort]
if config.BindSSLPorts:
from twisted.internet.ssl import DefaultOpenSSLContextFactory
for port in config.BindHTTPPorts:
log.msg("Adding server at %s:%s" % (bindAddress, port))
httpService = internet.TCPServer(int(port), channel, interface=bindAddress)
httpService.setServiceParent(service)
for port in config.BindSSLPorts:
log.msg("Adding SSL server at %s:%s" % (bindAddress, port))
httpsService = internet.SSLServer(
int(port), channel,
DefaultOpenSSLContextFactory(config.SSLPrivateKey, config.SSLCertificate),
interface=bindAddress
)
httpsService.setServiceParent(service)
return service
makeService_Combined = makeService_Combined
makeService_Master = makeService_Master
makeService_Single = makeService_Slave
def makeService(self, options):
serverType = config.ProcessType
serviceMethod = getattr(self, "makeService_%s" % (serverType,), None)
if not serviceMethod:
raise UsageError("Unknown server type %s. Please choose: Master, Slave or Combined"
% (serverType,))
else:
service = serviceMethod(options)
# Temporary hack to work around SIGHUP problem
# If there is a stopped process in the same session as the calendar server
# and the calendar server is the group leader then when twistd forks to drop
# privelages a SIGHUP may be sent by the kernel. This SIGHUP should be ignored.
# Note that this handler is not unset, so any further SIGHUPs are also ignored.
import signal
def sighup_handler(num, frame):
if frame is None:
location = "Unknown"
else:
location = str(frame.f_code.co_name) + ": " + str(frame.f_lineno)
log.msg("SIGHUP recieved at " + location)
signal.signal(signal.SIGHUP, sighup_handler)
return service
cosmetic
git-svn-id: 81e381228600e5752b80483efd2b45b26c451ea2@1492 e27351fd-9f3e-4f54-a53b-843176b1656c
##
# Copyright (c) 2005-2007 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DRI: David Reid, dreid@apple.com
##
try:
from hashlib import sha1
except ImportError:
import sha
sha1 = sha.new
import random
import os
import stat
import sys
from zope.interface import implements
from twisted.python import log
from twisted.python.usage import Options, UsageError
from twisted.python.reflect import namedClass
from twisted.application import internet, service
from twisted.plugin import IPlugin
from twisted.scripts.mktap import getid
from twisted.cred.portal import Portal
from twisted.web2.dav import auth
from twisted.web2.dav import davxml
from twisted.web2.dav.resource import TwistedACLInheritable
from twisted.web2.auth.basic import BasicCredentialFactory
from twisted.web2.channel import http
from twisted.web2.log import LogWrapperResource
from twisted.web2.server import Site
from twistedcaldav import logging
from twistedcaldav.cluster import makeService_Combined, makeService_Master
from twistedcaldav.config import config, parseConfig, defaultConfig, ConfigurationError
from twistedcaldav.logging import RotatingFileAccessLoggingObserver
from twistedcaldav.root import RootResource
from twistedcaldav.resource import CalDAVResource
from twistedcaldav.directory.digest import QopDigestCredentialFactory
from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource
from twistedcaldav.directory.aggregate import AggregateDirectoryService
from twistedcaldav.directory.sudo import SudoDirectoryService
from twistedcaldav.static import CalendarHomeProvisioningFile
from twistedcaldav import pdmonster
try:
from twistedcaldav.authkerb import NegotiateCredentialFactory
except ImportError:
NegotiateCredentialFactory = None
class CalDAVService(service.MultiService):
def __init__(self, logObserver):
self.logObserver = logObserver
service.MultiService.__init__(self)
def privilegedStartService(self):
service.MultiService.privilegedStartService(self)
self.logObserver.start()
def stopService(self):
service.MultiService.stopService(self)
self.logObserver.stop()
class CalDAVOptions(Options):
optParameters = [[
"config", "f", "/etc/caldavd/caldavd.plist", "Path to configuration file."
]]
zsh_actions = {"config" : "_files -g '*.plist'"}
def __init__(self, *args, **kwargs):
super(CalDAVOptions, self).__init__(*args, **kwargs)
self.overrides = {}
def opt_option(self, option):
"""
Set an option to override a value in the config file. True, False, int,
and float options are supported, as well as comma seperated lists. Only
one option may be given for each --option flag, however multiple
--option flags may be specified.
"""
if '=' in option:
key, value = option.split('=')
if key in defaultConfig:
if isinstance(defaultConfig[key], bool):
value = value == "True"
elif isinstance(defaultConfig[key], (int, float, long)):
value = type(defaultConfig[key])(value)
elif isinstance(defaultConfig[key], (list, tuple)):
value = value.split(',')
elif isinstance(defaultConfig[key], dict):
raise UsageError("Dict options not supported on the command line")
elif value == 'None':
value = None
self.overrides[key] = value
else:
self.opt_option('%s=True' % (option,))
opt_o = opt_option
def postOptions(self):
if not os.path.exists(self['config']):
log.msg("Config file %s not found, using defaults" % (
self['config'],))
parseConfig(self['config'])
config.updateDefaults(self.overrides)
uid, gid = None, None
if self.parent['uid'] or self.parent['gid']:
uid, gid = getid(self.parent['uid'],
self.parent['gid'])
if uid:
if uid != os.getuid() and os.getuid() != 0:
import pwd
username = pwd.getpwuid(os.getuid())[0]
raise UsageError("Only root can drop privileges you are: %r"
% (username,))
if gid:
if gid != os.getgid() and os.getgid() != 0:
import grp
groupname = grp.getgrgid(os.getgid())[0]
raise UsageError("Only root can drop privileges, you are: %s"
% (groupname,))
# Ignore the logfile parameter if not daemonized and log to stdout.
if self.parent['nodaemon']:
self.parent['logfile'] = None
else:
self.parent['logfile'] = config.ErrorLogFile
self.parent['pidfile'] = config.PIDFile
# Verify that document root actually exists
self.checkDirectory(
config.DocumentRoot,
"Document root",
access=os.W_OK,
#permissions=0750,
#uname=config.UserName,
#gname=config.GroupName
)
# Verify that ssl certs exist if needed
if config.SSLPort:
self.checkFile(
config.SSLPrivateKey,
"SSL Private key",
access=os.R_OK,
#permissions=0640
)
self.checkFile(
config.SSLCertificate,
"SSL Public key",
access=os.R_OK,
#permissions=0644
)
#
# Nuke the file log observer's time format.
#
if not config.ErrorLogFile and config.ProcessType == 'Slave':
log.FileLogObserver.timeFormat = ''
# Check current umask and warn if changed
oldmask = os.umask(config.umask)
if oldmask != config.umask:
log.msg("WARNING: changing umask from: 0%03o to 0%03o" % (
oldmask, config.umask,))
# Generate a shared secret that will be passed to any slave processes
if not config.SharedSecret:
c = tuple([random.randrange(sys.maxint) for _ in range(3)])
config.SharedSecret = sha1('%d%d%d' % c).hexdigest()
def checkDirectory(self, dirpath, description, access=None, fail=False, permissions=None, uname=None, gname=None):
if not os.path.exists(dirpath):
raise ConfigurationError("%s does not exist: %s" % (description, dirpath,))
elif not os.path.isdir(dirpath):
raise ConfigurationError("%s is not a directory: %s" % (description, dirpath,))
elif access and not os.access(dirpath, access):
raise ConfigurationError("Insufficient permissions for server on %s directory: %s" % (description, dirpath,))
self.securityCheck(dirpath, description, fail=fail, permissions=permissions, uname=uname, gname=gname)
def checkFile(self, filepath, description, access=None, fail=False, permissions=None, uname=None, gname=None):
if not os.path.exists(filepath):
raise ConfigurationError("%s does not exist: %s" % (description, filepath,))
elif not os.path.isfile(filepath):
raise ConfigurationError("%s is not a file: %s" % (description, filepath,))
elif access and not os.access(filepath, access):
raise ConfigurationError("Insufficient permissions for server on %s directory: %s" % (description, filepath,))
self.securityCheck(filepath, description, fail=fail, permissions=permissions, uname=uname, gname=gname)
def securityCheck(self, path, description, fail=False, permissions=None, uname=None, gname=None):
def raiseOrPrint(txt):
if fail:
raise ConfigurationError(txt)
else:
log.msg("WARNING: %s" % (txt,))
pathstat = os.stat(path)
if permissions:
if stat.S_IMODE(pathstat[stat.ST_MODE]) != permissions:
raiseOrPrint("The permisions on %s directory %s are 0%03o and do not match expected permissions: 0%03o"
% (description, path, stat.S_IMODE(pathstat[stat.ST_MODE]), permissions))
if uname:
import pwd
try:
pathuname = pwd.getpwuid(pathstat[stat.ST_UID])[0]
if pathuname not in (uname, "_" + uname):
raiseOrPrint("The owner of %s directory %s is %s and does not match the expected owner: %s"
% (description, path, pathuname, uname))
except KeyError:
raiseOrPrint("The owner of %s directory %s is unknown (%s) and does not match the expected owner: %s"
% (description, path, pathstat[stat.ST_UID], uname))
if gname:
import grp
try:
pathgname = grp.getgrgid(pathstat[stat.ST_GID])[0]
if pathgname != gname:
raiseOrPrint("The group of %s directory %s is %s and does not match the expected group: %s"
% (description, path, pathgname, gname))
except KeyError:
raiseOrPrint("The group of %s directory %s is unknown (%s) and does not match the expected group: %s"
% (description, path, pathstat[stat.ST_GID], gname))
class CalDAVServiceMaker(object):
implements(IPlugin, service.IServiceMaker)
tapname = "caldav"
description = "The Darwin Calendar Server"
options = CalDAVOptions
#
# default resource classes
#
rootResourceClass = RootResource
principalResourceClass = DirectoryPrincipalProvisioningResource
calendarResourceClass = CalendarHomeProvisioningFile
def makeService_Slave(self, options):
#
# Setup the Directory
#
directories = []
directoryClass = namedClass(config.DirectoryService['type'])
log.msg("Configuring directory service of type: %s"
% (config.DirectoryService['type'],))
baseDirectory = directoryClass(**config.DirectoryService['params'])
directories.append(baseDirectory)
sudoDirectory = None
if config.SudoersFile and os.path.exists(config.SudoersFile):
log.msg("Configuring SudoDirectoryService with file: %s"
% (config.SudoersFile,))
sudoDirectory = SudoDirectoryService(config.SudoersFile)
sudoDirectory.realmName = baseDirectory.realmName
CalDAVResource.sudoDirectory = sudoDirectory
directories.append(sudoDirectory)
else:
log.msg("Not using SudoDirectoryService; file doesn't exist: %s"
% (config.SudoersFile,))
directory = AggregateDirectoryService(directories)
if sudoDirectory:
directory.userRecordTypes.append(
SudoDirectoryService.recordType_sudoers)
#
# Setup Resource hierarchy
#
log.msg("Setting up document root at: %s" % (config.DocumentRoot,))
log.msg("Setting up principal collection: %r" % (self.principalResourceClass,))
principalCollection = self.principalResourceClass(
os.path.join(config.DocumentRoot, 'principals'),
'/principals/',
directory
)
log.msg("Setting up calendar collection: %r" % (self.calendarResourceClass,))
calendarCollection = self.calendarResourceClass(
os.path.join(config.DocumentRoot, 'calendars'),
directory,
'/calendars/'
)
log.msg("Setting up root resource: %r" % (self.rootResourceClass,))
root = self.rootResourceClass(
config.DocumentRoot,
principalCollections=(principalCollection,)
)
root.putChild('principals', principalCollection)
root.putChild('calendars', calendarCollection)
# Configure default ACLs on the root resource
log.msg("Setting up default ACEs on root resource")
rootACEs = [
davxml.ACE(
davxml.Principal(davxml.All()),
davxml.Grant(davxml.Privilege(davxml.Read())),
),
]
log.msg("Setting up AdminPrincipals")
for principal in config.AdminPrincipals:
log.msg("Added %s as admin principal" % (principal,))
rootACEs.append(
davxml.ACE(
davxml.Principal(davxml.HRef(principal)),
davxml.Grant(davxml.Privilege(davxml.All())),
davxml.Protected(),
TwistedACLInheritable(),
)
)
log.msg("Setting root ACL")
root.setAccessControlList(davxml.ACL(*rootACEs))
#
# Configure the Site and Wrappers
#
credentialFactories = []
portal = Portal(auth.DavRealm())
portal.registerChecker(directory)
realm = directory.realmName or ""
log.msg("Configuring authentication for realm: %s" % (realm,))
for scheme, schemeConfig in config.Authentication.iteritems():
scheme = scheme.lower()
credFactory = None
if schemeConfig['Enabled']:
log.msg("Setting up scheme: %s" % (scheme,))
if scheme == 'kerberos':
if not NegotiateCredentialFactory:
log.msg("Kerberos support not available")
continue
service = schemeConfig['ServicePrincipal']
if '@' in service:
rest, kerbRealm = service.split('@', 1)
else:
kerbRealm = config.ServerHostName
credFactory = NegotiateCredentialFactory(
service,
kerbRealm
)
elif scheme == 'digest':
secret = schemeConfig['Secret']
if not secret and config.SharedSecret:
log.msg("Using master process shared secret for Digest authentication")
secret = config.SharedSecret
else:
log.msg("No shared secret for Digest authentication")
credFactory = QopDigestCredentialFactory(
schemeConfig['Algorithm'],
schemeConfig['Qop'],
secret,
realm
)
elif scheme == 'basic':
credFactory = BasicCredentialFactory(realm)
else:
log.err("Unknown scheme: %s" % (scheme,))
if credFactory:
credentialFactories.append(credFactory)
log.msg("Configuring authentication wrapper")
authWrapper = auth.AuthenticationWrapper(
root,
portal,
credentialFactories,
(auth.IPrincipal,)
)
logWrapper = LogWrapperResource(authWrapper)
#
# Configure the service
#
log.msg("Setting up service")
if config.ProcessType == 'Slave':
realRoot = pdmonster.PDClientAddressWrapper(
logWrapper,
config.PythonDirector['ControlSocket']
)
logObserver = logging.AMPCommonAccessLoggingObserver(config.ControlSocket)
elif config.ProcessType == 'Single':
realRoot = logWrapper
logObserver = logging.RotatingFileAccessLoggingObserver(config.AccessLogFile)
log.msg("Configuring log observer: %s" % (
logObserver,))
service = CalDAVService(logObserver)
site = Site(realRoot)
channel = http.HTTPFactory(site)
if not config.BindAddresses:
config.BindAddresses = [""]
for bindAddress in config.BindAddresses:
if config.BindHTTPPorts:
if config.HTTPPort == -1:
raise UsageError("HTTPPort required if BindHTTPPorts is not empty")
elif config.HTTPPort != -1:
config.BindHTTPPorts = [config.HTTPPort]
if config.BindSSLPorts:
if config.SSLPort == -1:
raise UsageError("SSLPort required if BindSSLPorts is not empty")
elif config.SSLPort != -1:
config.BindSSLPorts = [config.SSLPort]
if config.BindSSLPorts:
from twisted.internet.ssl import DefaultOpenSSLContextFactory
for port in config.BindHTTPPorts:
log.msg("Adding server at %s:%s" % (bindAddress, port))
httpService = internet.TCPServer(int(port), channel, interface=bindAddress)
httpService.setServiceParent(service)
for port in config.BindSSLPorts:
log.msg("Adding SSL server at %s:%s" % (bindAddress, port))
contextFactory = DefaultOpenSSLContextFactory(config.SSLPrivateKey, config.SSLCertificate)
httpsService = internet.SSLServer(int(port), channel, contextFactory, interface=bindAddress)
httpsService.setServiceParent(service)
return service
makeService_Combined = makeService_Combined
makeService_Master = makeService_Master
makeService_Single = makeService_Slave
def makeService(self, options):
serverType = config.ProcessType
serviceMethod = getattr(self, "makeService_%s" % (serverType,), None)
if not serviceMethod:
raise UsageError("Unknown server type %s. Please choose: Master, Slave or Combined"
% (serverType,))
else:
service = serviceMethod(options)
# Temporary hack to work around SIGHUP problem
# If there is a stopped process in the same session as the calendar server
# and the calendar server is the group leader then when twistd forks to drop
# privelages a SIGHUP may be sent by the kernel. This SIGHUP should be ignored.
# Note that this handler is not unset, so any further SIGHUPs are also ignored.
import signal
def sighup_handler(num, frame):
if frame is None:
location = "Unknown"
else:
location = str(frame.f_code.co_name) + ": " + str(frame.f_lineno)
log.msg("SIGHUP recieved at " + location)
signal.signal(signal.SIGHUP, sighup_handler)
return service
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
'name': 'Brazilian Localization Sale',
'category': 'Localisation',
'license': 'AGPL-3',
'author': 'Akretion, Odoo Community Association (OCA)',
'website': 'http://odoo-brasil.org',
'version': '10.0.1.0.0',
'depends': [
'l10n_br_account',
'account_fiscal_position_rule_sale',
],
'data': [
'data/l10n_br_sale_data.xml',
'views/sale_view.xml',
'views/res_config_view.xml',
'security/ir.model.access.csv',
'security/l10n_br_sale_security.xml',
'report/sale_report_view.xml',
],
'test': [],
'demo': [],
'installable': False,
'auto_install': True,
}
[FIX] Set installable
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
'name': 'Brazilian Localization Sale',
'category': 'Localisation',
'license': 'AGPL-3',
'author': 'Akretion, Odoo Community Association (OCA)',
'website': 'http://odoo-brasil.org',
'version': '10.0.1.0.0',
'depends': [
'l10n_br_account',
'account_fiscal_position_rule_sale',
],
'data': [
'data/l10n_br_sale_data.xml',
'views/sale_view.xml',
'views/res_config_view.xml',
'security/ir.model.access.csv',
'security/l10n_br_sale_security.xml',
'report/sale_report_view.xml',
],
'test': [],
'demo': [],
'installable': True,
'auto_install': True,
}
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class ResCompany(orm.Model):
""" Model name: ResCompany
"""
_inherit = 'res.company'
# Procedure:
def export_product_status_for_inventory(self, cr, uid, ids, context=None):
''' Export inventory data from order and picking
'''
# Output file:
log_file = '/home/administrator/photo/output/C01_inventory.csv'
f_log = open(log_file, 'w')
# Pool used:
product_pool = self.pool.get('product.product')
line_pool = self.pool.get('purchase.order.line')
move_pool = self.pool.get('stock.move')
# Database:
move_db = {}
line_db = {}
# Search only product in cat. stat: C01:
_logger.warning('Start select product')
product_ids = product_pool.search(cr, uid, [
('statistic_category', '=', 'C01'),
], context=context)
_logger.warning('End select, total: %s' % len(product_ids))
# Check purchase line for price:
_logger.warning('Start select product')
line_ids = line_pool.search(cr, uid, [
('product_id', 'in', product_ids),
('order_id.state', 'not in', ('draft', 'cancel')),
], context=context)
_logger.warning('End select purchase, total: %s' % len(line_ids))
for line in line_pool.browse(cr, uid, line_ids, context=context):
if line.product_id.id not in line_db:
line_db[line.product_id.id] = []
line_db[line.product_id.id].append(line)
# Check stock move for sold in year
_logger.warning('Start select product')
move_ids = move_pool.search(cr, uid, [
('product_id', 'in', product_ids),
('state', '=', 'done'),
], context=context)
_logger.warning('End select move, total: %s' % len(move_ids))
for move in move_pool.browse(cr, uid, move_ids, context=context):
if move.product_id.id not in move_db:
move_db[move.product_id.id] = []
move_db[move.product_id.id].append(move)
# Create database list for product:
_logger.warning('Start export product')
f_log.write('Codice|INV|Costo azienda|Fatt.|Prezzo diff.|Movimentato 2016|OF|MM\n')
for product in product_pool.browse(
cr, uid, product_ids, context=context):
# OF status
of_status = ''
price_unit = 0.0
price_difference = False
of_name = ''
for line in line_db.get(product.id, []):
if line.order_id.name.startswith('INV'):
continue
if not price_unit:
price_unit = line.price_unit
elif price_unit != line.price_unit:
price_difference = True
of_name += '%s ' % line.order_id.name
of_status += '[ %s doc. %s ]' % (
line.price_unit,
line.order_id.date_order[:10],
)
# MM status
mm_status = ''
move_date = '' # TODO
moved = False
for line in move_db.get(product.id, []):
if line.picking_id.name.startswith('WH/IN/00005'):
continue
if not line.picking_id.name.startswith('WH/IN'):
continue
if line.create_date > '2016-01-01':
moved = True
mm_status += '[ %s doc. %s ]' % (
line.create_date[:10],
line.picking_id.name, # date_done
)
f_log.write('%s|%s|%s|%s|%s|%s|%s|%s\n' % (
product.default_code,
product.mx_start_qty,
product.company_cost,
of_name,
'X' if price_difference else '',
'X' if moved else '',
of_status,
mm_status,
))
f_log.close()
_logger.warning('End export product')
return True
def save_cost_in_cost_method(self, cr, uid, ids, context=None):
''' Migrate 3 cost from old part in new cost management
'''
# Log operation
log_file = '/home/administrator/photo/output/indoor_cost_migration.csv'
f_log = open(log_file, 'w')
_logger.warning('Start migrate cost log on: %s' % log_file)
product_pool = self.pool.get('product.product')
product_ids = product_pool.search(cr, uid, [
('statistic_category', 'in', (
'I01', 'I02', 'I03', 'I04', 'I05', 'I06')),
], context=context)
f_log.write(
'Codice|INV|Cat. Stat.|Costo fornitore|Azienda Da|A|Cliente Da|A\n')
res = {}
for product in product_pool.browse(
cr, uid, product_ids, context=context):
f_log.write(
'%s|%s|%s|%s|%s|%s|%s\n' % (
product.default_code,
product.statistic_category,
product.standard_price,
product.cost_in_stock,
product.company_cost,
product.cost_for_sale,
product.customer_cost,
))
res[product.id] = {
'company_cost': product.cost_in_stock,
'customer_cost': product.cost_for_sale,
}
for product_id, data in res.iteritems():
product_pool.write(cr, uid, product_id, data, context=context)
_logger.info('Migrated data!')
f_log.close()
return True
add ppcamion
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class ResCompany(orm.Model):
""" Model name: ResCompany
"""
_inherit = 'res.company'
# Procedure:
def export_product_status_for_inventory(self, cr, uid, ids, context=None):
''' Export inventory data from order and picking
'''
# Output file:
log_file = '/home/administrator/photo/output/C01_inventory.csv'
f_log = open(log_file, 'w')
# Pool used:
product_pool = self.pool.get('product.product')
line_pool = self.pool.get('purchase.order.line')
move_pool = self.pool.get('stock.move')
# Database:
move_db = {}
line_db = {}
# Search only product in cat. stat: C01:
_logger.warning('Start select product')
product_ids = product_pool.search(cr, uid, [
('statistic_category', '=', 'C01'),
], context=context)
_logger.warning('End select, total: %s' % len(product_ids))
# Check purchase line for price:
_logger.warning('Start select product')
line_ids = line_pool.search(cr, uid, [
('product_id', 'in', product_ids),
('order_id.state', 'not in', ('draft', 'cancel')),
], context=context)
_logger.warning('End select purchase, total: %s' % len(line_ids))
for line in line_pool.browse(cr, uid, line_ids, context=context):
if line.product_id.id not in line_db:
line_db[line.product_id.id] = []
line_db[line.product_id.id].append(line)
# Check stock move for sold in year
_logger.warning('Start select product')
move_ids = move_pool.search(cr, uid, [
('product_id', 'in', product_ids),
('state', '=', 'done'),
], context=context)
_logger.warning('End select move, total: %s' % len(move_ids))
for move in move_pool.browse(cr, uid, move_ids, context=context):
if move.product_id.id not in move_db:
move_db[move.product_id.id] = []
move_db[move.product_id.id].append(move)
# Create database list for product:
_logger.warning('Start export product')
f_log.write('Codice|INV|Costo azienda|Pz. camion|Prezzo diff.|Movimentato 2016|Fatt.|OF|MM\n')
for product in product_pool.browse(
cr, uid, product_ids, context=context):
# OF status
of_status = ''
price_unit = 0.0
price_difference = False
of_name = ''
for line in line_db.get(product.id, []):
if line.order_id.name.startswith('INV'):
continue
if not price_unit:
price_unit = line.price_unit
elif price_unit != line.price_unit:
price_difference = True
of_name += '%s ' % line.order_id.name
of_status += '[ %s doc. %s ]' % (
line.price_unit,
line.order_id.date_order[:10],
)
# MM status
mm_status = ''
move_date = '' # TODO
moved = False
for line in move_db.get(product.id, []):
if line.picking_id.name.startswith('WH/IN/00005'):
continue
if not line.picking_id.name.startswith('WH/IN'):
continue
if line.create_date > '2016-01-01':
moved = True
mm_status += '[ %s doc. %s ]' % (
line.create_date[:10],
line.picking_id.name, # date_done
)
f_log.write('%s|%s|%s|%s|%s|%s|%s|%s\n' % (
product.default_code,
product.mx_start_qty,
product.company_cost,
'%s' % ([item.quantity for item in product.transport_ids], )
'X' if price_difference else '',
'X' if moved else '',
of_name,
of_status,
mm_status,
))
f_log.close()
_logger.warning('End export product')
return True
def save_cost_in_cost_method(self, cr, uid, ids, context=None):
''' Migrate 3 cost from old part in new cost management
'''
# Log operation
log_file = '/home/administrator/photo/output/indoor_cost_migration.csv'
f_log = open(log_file, 'w')
_logger.warning('Start migrate cost log on: %s' % log_file)
product_pool = self.pool.get('product.product')
product_ids = product_pool.search(cr, uid, [
('statistic_category', 'in', (
'I01', 'I02', 'I03', 'I04', 'I05', 'I06')),
], context=context)
f_log.write(
'Codice|INV|Cat. Stat.|Costo fornitore|Azienda Da|A|Cliente Da|A\n')
res = {}
for product in product_pool.browse(
cr, uid, product_ids, context=context):
f_log.write(
'%s|%s|%s|%s|%s|%s|%s\n' % (
product.default_code,
product.statistic_category,
product.standard_price,
product.cost_in_stock,
product.company_cost,
product.cost_for_sale,
product.customer_cost,
))
res[product.id] = {
'company_cost': product.cost_in_stock,
'customer_cost': product.cost_for_sale,
}
for product_id, data in res.iteritems():
product_pool.write(cr, uid, product_id, data, context=context)
_logger.info('Migrated data!')
f_log.close()
return True
|
from django.test import TestCase
from django.contrib.auth.models import User
from account.models import SignupCode
from kaleo.models import JoinInvitation
class TestsJoinInvitation(TestCase):
def setUp(self):
self.to_user = User.objects.create(username='foo1')
self.from_user = User.objects.create(username='foo2')
self.signup_code = SignupCode.create(email="me@you.com")
self.signup_code.save()
self.status = JoinInvitation.STATUS_ACCEPTED
self.invitation = JoinInvitation.objects.create(
from_user=self.from_user,
status=self.status,
signup_code=self.signup_code,
)
def test_to_user_email(self):
self.assertEqual(self.signup_code.email, "me@you.com")
def test_accept(self):
self.invitation.accept(self.to_user)
self.assertEqual(self.from_user.invitationstat.invites_accepted, 1)
def test_process_independent_joins(self):
JoinInvitation.process_independent_joins(self.to_user, "me@you.com")
invite = JoinInvitation.objects.get(pk=self.invitation.pk)
self.assertEqual(invite.status, JoinInvitation.STATUS_JOINED_INDEPENDENTLY)
Make sure receivers are activated for tests
from django.test import TestCase
from django.contrib.auth.models import User
import kaleo.receivers # noqa
from account.models import SignupCode
from kaleo.models import JoinInvitation
class TestsJoinInvitation(TestCase):
def setUp(self):
self.to_user = User.objects.create(username='foo1')
self.from_user = User.objects.create(username='foo2')
self.signup_code = SignupCode.create(email="me@you.com")
self.signup_code.save()
self.status = JoinInvitation.STATUS_ACCEPTED
self.invitation = JoinInvitation.objects.create(
from_user=self.from_user,
status=self.status,
signup_code=self.signup_code,
)
def test_to_user_email(self):
self.assertEqual(self.signup_code.email, "me@you.com")
def test_accept(self):
self.invitation.accept(self.to_user)
self.assertEqual(self.from_user.invitationstat.invites_accepted, 1)
def test_process_independent_joins(self):
JoinInvitation.process_independent_joins(self.to_user, "me@you.com")
invite = JoinInvitation.objects.get(pk=self.invitation.pk)
self.assertEqual(invite.status, JoinInvitation.STATUS_JOINED_INDEPENDENTLY)
|
# -*- coding: utf-8 -*-
"""
VK.com OpenAPI, OAuth2 and Iframe application OAuth2 backends, docs at:
http://psa.matiasaguirre.net/docs/backends/vk.html
"""
from time import time
from hashlib import md5
from social.utils import parse_qs
from social.backends.base import BaseAuth
from social.backends.oauth import BaseOAuth2
from social.exceptions import AuthTokenRevoked, AuthException
class VKontakteOpenAPI(BaseAuth):
"""VK.COM OpenAPI authentication backend"""
name = 'vk-openapi'
ID_KEY = 'id'
def get_user_details(self, response):
"""Return user details from VK.com request"""
nickname = response.get('nickname') or ''
fullname, first_name, last_name = self.get_user_names(
first_name=response.get('first_name', [''])[0],
last_name=response.get('last_name', [''])[0]
)
return {
'username': response['id'] if len(nickname) == 0 else nickname,
'email': '',
'fullname': fullname,
'first_name': first_name,
'last_name': last_name
}
def user_data(self, access_token, *args, **kwargs):
return self.data
def auth_html(self):
"""Returns local VK authentication page, not necessary for
VK to authenticate.
"""
ctx = {'VK_APP_ID': self.setting('APP_ID'),
'VK_COMPLETE_URL': self.redirect_uri}
local_html = self.setting('LOCAL_HTML', 'vkontakte.html')
return self.strategy.render_html(tpl=local_html, context=ctx)
def auth_complete(self, *args, **kwargs):
"""Performs check of authentication in VKontakte, returns User if
succeeded"""
session_value = self.strategy.session_get(
'vk_app_' + self.setting('APP_ID')
)
if 'id' not in self.data or not session_value:
raise ValueError('VK.com authentication is not completed')
mapping = parse_qs(session_value)
check_str = ''.join(item + '=' + mapping[item]
for item in ['expire', 'mid', 'secret', 'sid'])
key, secret = self.get_key_and_secret()
hash = md5((check_str + secret).encode('utf-8')).hexdigest()
if hash != mapping['sig'] or int(mapping['expire']) < time():
raise ValueError('VK.com authentication failed: Invalid Hash')
kwargs.update({'backend': self,
'response': self.user_data(mapping['mid'])})
return self.strategy.authenticate(*args, **kwargs)
def uses_redirect(self):
"""VK.com does not require visiting server url in order
to do authentication, so auth_xxx methods are not needed to be called.
Their current implementation is just an example"""
return False
class VKOAuth2(BaseOAuth2):
"""VKOAuth2 authentication backend"""
name = 'vk-oauth2'
ID_KEY = 'user_id'
AUTHORIZATION_URL = 'http://oauth.vk.com/authorize'
ACCESS_TOKEN_URL = 'https://oauth.vk.com/access_token'
ACCESS_TOKEN_METHOD = 'POST'
EXTRA_DATA = [
('id', 'id'),
('expires_in', 'expires')
]
def get_user_details(self, response):
"""Return user details from VK.com account"""
fullname, first_name, last_name = self.get_user_names(
first_name=response.get('first_name'),
last_name=response.get('last_name')
)
return {'username': response.get('screen_name'),
'email': response.get('email', ''),
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, response, *args, **kwargs):
"""Loads user data from service"""
request_data = ['first_name', 'last_name', 'screen_name', 'nickname',
'photo'] + self.setting('EXTRA_DATA', [])
fields = ','.join(set(request_data))
data = vk_api(self, 'users.get', {
'access_token': access_token,
'fields': fields,
'uids': response.get('user_id')
})
if data.get('error'):
error = data['error']
msg = error.get('error_msg', 'Unknown error')
if error.get('error_code') == 5:
raise AuthTokenRevoked(self, msg)
else:
raise AuthException(self, msg)
if data:
data = data.get('response')[0]
data['user_photo'] = data.get('photo') # Backward compatibility
return data
class VKAppOAuth2(VKOAuth2):
"""VK.com Application Authentication support"""
name = 'vk-app'
def user_profile(self, user_id, access_token=None):
request_data = ['first_name', 'last_name', 'screen_name', 'nickname',
'photo'] + self.setting('EXTRA_DATA', [])
fields = ','.join(set(request_data))
data = {'uids': user_id, 'fields': fields}
if access_token:
data['access_token'] = access_token
profiles = vk_api(self, 'getProfiles', data).get('response')
if profiles:
return profiles[0]
def auth_complete(self, *args, **kwargs):
required_params = ('is_app_user', 'viewer_id', 'access_token',
'api_id')
if not all(param in self.data for param in required_params):
return None
auth_key = self.data.get('auth_key')
# Verify signature, if present
key, secret = self.get_key_and_secret()
if auth_key:
check_key = md5('_'.join([key,
self.data.get('viewer_id'),
secret]).encode('utf-8')).hexdigest()
if check_key != auth_key:
raise ValueError('VK.com authentication failed: invalid '
'auth key')
user_check = self.setting('USERMODE')
user_id = self.data.get('viewer_id')
if user_check is not None:
user_check = int(user_check)
if user_check == 1:
is_user = self.data.get('is_app_user')
elif user_check == 2:
is_user = vk_api(self, 'isAppUser',
{'uid': user_id}).get('response', 0)
if not int(is_user):
return None
auth_data = {
'auth': self,
'backend': self,
'request': self.strategy.request_data(),
'response': {
'user_id': user_id,
}
}
auth_data['response'].update(self.user_profile(user_id))
return self.strategy.authenticate(*args, **auth_data)
def vk_api(backend, method, data):
"""
Calls VK.com OpenAPI method, check:
https://vk.com/apiclub
http://goo.gl/yLcaa
"""
# We need to perform server-side call if no access_token
if not 'access_token' in data:
if not 'v' in data:
data['v'] = '3.0'
key, secret = backend.get_key_and_secret()
if not 'api_id' in data:
data['api_id'] = key
data['method'] = method
data['format'] = 'json'
url = 'http://api.vk.com/api.php'
param_list = sorted(list(item + '=' + data[item] for item in data))
data['sig'] = md5(
(''.join(param_list) + secret).encode('utf-8')
).hexdigest()
else:
url = 'https://api.vk.com/method/' + method
try:
return backend.get_json(url, params=data)
except (TypeError, KeyError, IOError, ValueError, IndexError):
return None
It was impossible to change the version API Vkotnakte
# -*- coding: utf-8 -*-
"""
VK.com OpenAPI, OAuth2 and Iframe application OAuth2 backends, docs at:
http://psa.matiasaguirre.net/docs/backends/vk.html
"""
from time import time
from hashlib import md5
from social.utils import parse_qs
from social.backends.base import BaseAuth
from social.backends.oauth import BaseOAuth2
from social.exceptions import AuthTokenRevoked, AuthException
class VKontakteOpenAPI(BaseAuth):
"""VK.COM OpenAPI authentication backend"""
name = 'vk-openapi'
ID_KEY = 'id'
def get_user_details(self, response):
"""Return user details from VK.com request"""
nickname = response.get('nickname') or ''
fullname, first_name, last_name = self.get_user_names(
first_name=response.get('first_name', [''])[0],
last_name=response.get('last_name', [''])[0]
)
return {
'username': response['id'] if len(nickname) == 0 else nickname,
'email': '',
'fullname': fullname,
'first_name': first_name,
'last_name': last_name
}
def user_data(self, access_token, *args, **kwargs):
return self.data
def auth_html(self):
"""Returns local VK authentication page, not necessary for
VK to authenticate.
"""
ctx = {'VK_APP_ID': self.setting('APP_ID'),
'VK_COMPLETE_URL': self.redirect_uri}
local_html = self.setting('LOCAL_HTML', 'vkontakte.html')
return self.strategy.render_html(tpl=local_html, context=ctx)
def auth_complete(self, *args, **kwargs):
"""Performs check of authentication in VKontakte, returns User if
succeeded"""
session_value = self.strategy.session_get(
'vk_app_' + self.setting('APP_ID')
)
if 'id' not in self.data or not session_value:
raise ValueError('VK.com authentication is not completed')
mapping = parse_qs(session_value)
check_str = ''.join(item + '=' + mapping[item]
for item in ['expire', 'mid', 'secret', 'sid'])
key, secret = self.get_key_and_secret()
hash = md5((check_str + secret).encode('utf-8')).hexdigest()
if hash != mapping['sig'] or int(mapping['expire']) < time():
raise ValueError('VK.com authentication failed: Invalid Hash')
kwargs.update({'backend': self,
'response': self.user_data(mapping['mid'])})
return self.strategy.authenticate(*args, **kwargs)
def uses_redirect(self):
"""VK.com does not require visiting server url in order
to do authentication, so auth_xxx methods are not needed to be called.
Their current implementation is just an example"""
return False
class VKOAuth2(BaseOAuth2):
"""VKOAuth2 authentication backend"""
name = 'vk-oauth2'
ID_KEY = 'user_id'
AUTHORIZATION_URL = 'http://oauth.vk.com/authorize'
ACCESS_TOKEN_URL = 'https://oauth.vk.com/access_token'
ACCESS_TOKEN_METHOD = 'POST'
EXTRA_DATA = [
('id', 'id'),
('expires_in', 'expires')
]
def get_user_details(self, response):
"""Return user details from VK.com account"""
fullname, first_name, last_name = self.get_user_names(
first_name=response.get('first_name'),
last_name=response.get('last_name')
)
return {'username': response.get('screen_name'),
'email': response.get('email', ''),
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, response, *args, **kwargs):
"""Loads user data from service"""
request_data = ['first_name', 'last_name', 'screen_name', 'nickname',
'photo'] + self.setting('EXTRA_DATA', [])
fields = ','.join(set(request_data))
data = vk_api(self, 'users.get', {
'access_token': access_token,
'fields': fields,
'uids': response.get('user_id')
})
if data.get('error'):
error = data['error']
msg = error.get('error_msg', 'Unknown error')
if error.get('error_code') == 5:
raise AuthTokenRevoked(self, msg)
else:
raise AuthException(self, msg)
if data:
data = data.get('response')[0]
data['user_photo'] = data.get('photo') # Backward compatibility
return data
class VKAppOAuth2(VKOAuth2):
"""VK.com Application Authentication support"""
name = 'vk-app'
def user_profile(self, user_id, access_token=None):
request_data = ['first_name', 'last_name', 'screen_name', 'nickname',
'photo'] + self.setting('EXTRA_DATA', [])
fields = ','.join(set(request_data))
data = {'uids': user_id, 'fields': fields}
if access_token:
data['access_token'] = access_token
profiles = vk_api(self, 'getProfiles', data).get('response')
if profiles:
return profiles[0]
def auth_complete(self, *args, **kwargs):
required_params = ('is_app_user', 'viewer_id', 'access_token',
'api_id')
if not all(param in self.data for param in required_params):
return None
auth_key = self.data.get('auth_key')
# Verify signature, if present
key, secret = self.get_key_and_secret()
if auth_key:
check_key = md5('_'.join([key,
self.data.get('viewer_id'),
secret]).encode('utf-8')).hexdigest()
if check_key != auth_key:
raise ValueError('VK.com authentication failed: invalid '
'auth key')
user_check = self.setting('USERMODE')
user_id = self.data.get('viewer_id')
if user_check is not None:
user_check = int(user_check)
if user_check == 1:
is_user = self.data.get('is_app_user')
elif user_check == 2:
is_user = vk_api(self, 'isAppUser',
{'uid': user_id}).get('response', 0)
if not int(is_user):
return None
auth_data = {
'auth': self,
'backend': self,
'request': self.strategy.request_data(),
'response': {
'user_id': user_id,
}
}
auth_data['response'].update(self.user_profile(user_id))
return self.strategy.authenticate(*args, **auth_data)
def vk_api(backend, method, data):
"""
Calls VK.com OpenAPI method, check:
https://vk.com/apiclub
http://goo.gl/yLcaa
"""
# We need to perform server-side call if no access_token
data['v'] = backend.setting('API_VERSION', '3.0')
if not 'access_token' in data:
key, secret = backend.get_key_and_secret()
if not 'api_id' in data:
data['api_id'] = key
data['method'] = method
data['format'] = 'json'
url = 'http://api.vk.com/api.php'
param_list = sorted(list(item + '=' + data[item] for item in data))
data['sig'] = md5(
(''.join(param_list) + secret).encode('utf-8')
).hexdigest()
else:
url = 'https://api.vk.com/method/' + method
try:
return backend.get_json(url, params=data)
except (TypeError, KeyError, IOError, ValueError, IndexError):
return None
|
"""
Custom tags to use in templates or code to render file lists etc.
History
03/09/2012 - Sjoerd - Created this file
"""
import pdb
import csv, numpy
import datetime
import ntpath
import os
import random
import re
import string
import StringIO
import sys
import traceback
import logging
from collections import Counter
from exceptions import Exception
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist,ImproperlyConfigured
from django.core.urlresolvers import NoReverseMatch
from django.contrib.auth.models import Group, User, Permission
from django.core.files.storage import DefaultStorage
from django.template import RequestContext, defaulttags
from django.utils.html import escape
from profiles.forms import SignupFormExtra
from profiles.models import UserProfile
from comicmodels.models import FileSystemDataset, UploadModel, DropboxFolder,RegistrationRequest # FIXME: abstract Dataset should be imported here, not explicit filesystemdataset. the template tag should not care about the type of dataset.
from comicmodels.models import ComicSite, Page
import comicsite.views
from comicsite.utils.html import escape_for_html_id
from dropbox.rest import ErrorResponse
from dataproviders import FileSystemDataProvider
from dataproviders.DropboxDataProvider import DropboxDataProvider, HtmlLinkReplacer # TODO: move HtmlLinkReplacer to better location..
from dataproviders.ProjectExcelReader import ProjectExcelReader
#---------#---------#---------#---------#---------#---------#---------#---------
# This is needed to use the @register.tag decorator
#register = template.Library()
from comicsite.templatetags import library_plus
register = library_plus.LibraryPlus()
logger = logging.getLogger("django")
def parseKeyValueToken(token):
"""
Parses the given token string and returns a parameter dictionary
\param token A string given by the templatetag which is assumes to look like
\this:
visualization key1:value1,key2:value2,...
\return A dictionary
"""
split = token.split_contents()
tag = split[0]
args = split[1:]
return dict([param.split(":") for param in args])
def get_usagestr(function_name):
"""
Return usage string for a registered template tag function. For displaying
this info in errors or tag overviews
"""
if register.usagestrings.has_key(function_name):
usagestr = register.usagestrings[function_name]
else:
usagestr = ""
return sanitize_django_items(usagestr)
@register.tag(name="taglist",
usagestr="""
<% taglist %> :
show all available tags
"""
)
def get_taglist(parser, token):
return TagListNode()
#=========#=========#=========#=========#=========#=========#=========#=========#=========
def subdomain_is_projectname():
""" Check whether this setting is true in settings. Return false if not found
"""
if hasattr(settings,"SUBDOMAIN_IS_PROJECTNAME"):
subdomain_is_projectname = settings.SUBDOMAIN_IS_PROJECTNAME
if subdomain_is_projectname and not hasattr(settings,"MAIN_HOST_NAME"):
msg = """Key 'SUBDOMAIN_IS_PROJECTNAME' was defined in settings,
but 'MAIN_HOST_NAME' was not. These belong together. Please
add 'MAIN_HOST_NAME' and set it to the hostname of your site."""
raise ImproperlyConfigured(msg)
else:
subdomain_is_projectname = False
return subdomain_is_projectname
@register.tag
def url(parser, token):
""" Overwrite built in url tag. It works identicaly, except that where possible
it will use subdomains to refer to a project instead of a full url path.
For example, if the subdomain is vessel12.domain.com it will refer to a page
'details' as /details/ instead of /site/vessel12/details/
REQUIREMENTS:
* MIDDLEWARE_CLASSES in settings should contain
'comicsite.middleware.subdomain.SubdomainMiddleware'
* These keys should be in the django settings file:
SUBDOMAIN_IS_PROJECTNAME = True
MAIN_HOST_NAME = <your site's hostname>
* APACHE url rewriting should be in effect to rewrite subdomain to
site/project/. To get you started: the following apache config does this
for the domain 'devcomicframework.org'
(put this in your apache config file)
RewriteEngine on
RewriteCond $1 .*/$
RewriteCond $1 !^/site/.*
RewriteCond %{HTTP_HOST} !^devcomicframework\.org$
RewriteCond %{HTTP_HOST} !^www.devcomicframework\.org$
RewriteCond %{HTTP_HOST} ^([^.]+)\.devcomicframework\.org$
RewriteRule (.*) /site/%1$1 [PT]
TODO: turn on and off this behaviour in settings, maybe explicitly define
base domain to also make it possible to use dots in the base domain.
"""
orgnode = defaulttags.url(parser,token)
return comic_URLNode(orgnode.view_name,orgnode.args, orgnode.kwargs, orgnode.asvar)
class comic_URLNode(defaulttags.URLNode):
def render(self, context):
# TODO: How to refer to method in this file nicely? This seems a bit cumbersome
subdomain_is_projectname = comicsite.templatetags.comic_templatetags.subdomain_is_projectname()
#get the url the default django method would give.
url = super(comic_URLNode, self).render(context)
url = url.lower()
if subdomain_is_projectname:
if hasattr(context['request'],"subdomain"):
subdomain = context['request'].subdomain
else:
subdomain = ""
if subdomain == "":
#we are on the regular domain, do not change any links
return url
else:
# Interpret subdomain as a comicsite. What would normally be the
# path to this comicsite?
path_to_site = reverse("comicsite.views.site",args=[subdomain]).lower()
if url.startswith(path_to_site):
return url.replace(path_to_site,"/")
else:
# this url cannot use the domain name shortcut, so it is
# probably meant as a link the main comicframework site.
# in that case hardcode the domain to make sure the sub-
# domain is gone after following this link
return settings.MAIN_HOST_NAME + url
else:
return url
class TagListNode(template.Node):
""" Print available tags as text
"""
def __init__(self):
pass
def render(self, context):
html_out = "<table class =\"comictable taglist\">"
html_out = html_out + "<tr><th>tagname</th><th>description</th></tr>"
rowclass = "odd"
for key,val in register.usagestrings.iteritems():
html_out = html_out + "<tr class=\"%s\"><td>%s</td><td>%s</td></tr>\
" %(rowclass, key, sanitize_django_items(val))
if rowclass == "odd":
rowclass = "even"
else:
rowclass = "odd"
html_out = html_out + "</table>"
return html_out
def sanitize_django_items(string):
"""
remove {{,{% and other items which would be rendered as tags by django
"""
out = string
out = out.replace("{{","{{")
out = out.replace("}}","}}")
out = out.replace("{%","{%")
out = out.replace("%}","%}")
out = out.replace(">",">")
out = out.replace("<","<")
out = out.replace("\n","<br/>")
return out
@register.simple_tag
def metafooterpages():
""" Get html for links to general pages like 'contact' """
html_string = "<div class='text'><span>COMIC:</span></div>"
pages = comicsite.views.getPages(settings.MAIN_PROJECT_NAME)
for p in pages:
if not p.hidden:
url = reverse('comicsite.views.comicmain', kwargs={'page_title':p.title})
if comicsite.templatetags.comic_templatetags.subdomain_is_projectname():
url = settings.MAIN_HOST_NAME + url
html_string += "<a class='metaFooterMenuItem' href='%s'>" % url
html_string += p.display_title == "" and p.title or p.display_title
html_string += "</a>"
return html_string
@register.tag(name="filelist")
def do_get_files(parser, token):
try:
# split_contents() knows not to split quoted strings.
tag_name, filefolder = token.split_contents()
format_string = "\"%Y-%m-%d %I:%M %p\""
except ValueError:
raise template.TemplateSyntaxError("%r tag requires a single argument" % token.contents.split()[0])
if not (format_string[0] == format_string[-1] and format_string[0] in ('"', "'")):
raise template.TemplateSyntaxError("%r tag's argument should be in quotes" % tag_name)
return FileListNode(format_string[1:-1], filefolder[1:-1])
class FileListNode(template.Node):
""" Show list of files in given dir
"""
def __init__(self, format_string, filefolder):
self.format_string = format_string
self.filefolder = filefolder
def render(self, context):
dp = FileSystemDataProvider.FileSystemDataProvider(self.filefolder)
images = dp.getImages()
htmlOut = "available files:" + ", ".join(images)
return htmlOut
#========#========#========#========#========#========#========#========
@register.tag(name="dataset",
usagestr= """Tag usage: {% dataset <datasetname>,<comicsitename> %}. <comicsitename> can be\
omitted, defaults to current site"""
)
def render_dataset(parser, token):
""" Given a challenge and a dataset name, show all files in this dataset as list"""
#usagestr = DatasetNode.usagestr
usagestr = get_usagestr("render_dataset")
# check some basic stuff
try:
tag_name, args = token.split_contents()
except ValueError:
errormsg = "Error rendering {% " + token.contents + " %}: tag requires at least one \
argument. " + usagestr
# raise template.TemplateSyntaxError(errormsg)
return TemplateErrorNode(errormsg)
if args.count(",") == 0:
dataset_title = args
project_name = ""
elif args.count(",") == 1 :
dataset_title, project_name = args.split(",")
else:
errormsg = "Error rendering {% " + token.contents + " %}: found " + str(args.count(",")) + \
" comma's, expected at most 1." + usagestr
return TemplateErrorNode(errormsg)
return DatasetNode(dataset_title, project_name)
class DatasetNode(template.Node):
""" Show list of linked files for given dataset
"""
usagestr = """{% dataset <datasetname>,<comicsitename> %}
Tag usage: {% dataset <datasetname>,<comicsitename> %}. <comicsitename> can be\
omitted, defaults to current site"""
def __init__(self, dataset_title, project_name):
self.dataset_title = dataset_title
self.project_name = project_name
def make_dataset_error_msg(self, msg):
errormsg = "Error rendering DataSet '" + self.dataset_title + "' for project '" + self.project_name + "': " + msg
return makeErrorMsgHtml(errormsg)
def render(self, context):
if self.project_name == "":
self.project_name = context.page.comicsite.short_name
try:
dataset = FileSystemDataset.objects.get(comicsite__short_name=self.project_name, title=self.dataset_title)
except ObjectDoesNotExist as e:
return self.make_dataset_error_msg("could not find object in database")
else:
self.filefolder = dataset.get_full_folder_path()
dp = FileSystemDataProvider.FileSystemDataProvider(self.filefolder)
try:
filenames = dp.getAllFileNames()
except (OSError) as e:
return self.make_dataset_error_msg(str(e))
filenames.sort()
links = []
for filename in filenames:
downloadlink = reverse('filetransfers.views.download_handler_dataset_file', kwargs={'project_name':dataset.comicsite.short_name,
'dataset_title':dataset.title,
'filename':filename})
# <a href="{% url filetransfers.views.download_handler_dataset_file project_name='VESSEL12' dataset_title='vessel12' filename='test.png' %}">test </a>
links.append("<li><a href=\"" + downloadlink + "\">" + filename + " </a></li>")
description = dataset.description
htmlOut = description + "<ul class=\"dataset\">" + "".join(links) + "</ul>"
return htmlOut
@register.tag(name="listdir",
usagestr= """Tag usage: {% listdir path:string extensionFilter:ext1,ext2,ext3 %}
path: directory relative to this projects dropbox folder to list files from. Do not use leading slash.
extensionFilter: An include filter to specify the file types which should be displayd in the filebrowser.
"""
)
def listdir(parser, token):
""" show all files in dir as a downloadable list"""
usagestr = get_usagestr("listdir")
try:
args = parseKeyValueToken(token)
except ValueError:
errormsg = "Error rendering {% " + token.contents + " %}: Error parsing token. " + usagestr
return TemplateErrorNode(errormsg)
if "path" not in args.keys():
errormsg = "Error rendering {% " + token.contents + " %}: dataset argument is missing." + usagestr
return TemplateErrorNode(errormsg)
return ListDirNode(args)
class ListDirNode(template.Node):
""" Show list of linked files for given directory
"""
usagestr = get_usagestr("listdir")
def __init__(self, args):
self.path = args['path']
self.args = args
def make_dataset_error_msg(self, msg):
errormsg = "Error listing folder '" + self.path + "': " + msg
return makeErrorMsgHtml(errormsg)
def render(self, context):
project_name = context.page.comicsite.short_name
projectpath = project_name + "/" + self.path
storage = DefaultStorage()
try:
filenames = storage.listdir(projectpath)[1]
except OSError as e:
return self.make_dataset_error_msg(str(e))
filenames.sort()
# if extensionsFilter is given, show only filenames with those extensions
if 'extensionFilter' in self.args.keys():
extensions = self.args['extensionFilter'].split(",")
filtered = []
for extension in extensions:
filtered = filtered + [f for f in filenames if f.endswith(extension)]
filenames = filtered
links = []
for filename in filenames:
downloadlink = reverse('project_serve_file',
kwargs={'project_name':project_name,
'path':self.path+"/"+filename})
links.append("<li><a href=\"" + downloadlink + "\">" + filename + " </a></li>")
htmlOut = "<ul class=\"dataset\">" + "".join(links) + "</ul>"
return htmlOut
@register.tag(name = "visualization")
def render_visualization(parser, token):
""" Given a dataset name, show a 2D visualization for that """
usagestr = """Tag usage: {% visualization dataset:string
width:number
height:number
deferredLoad:0|1
extensionFilter:ext1,ext2,ext3%}
The only mandatory argument is dataset.
width/heigth: Size of the 2D view area.
defferedLoad: If active, user has to click on the area to load the viewer.
extensionFilter: An include filter to specify the file types which should be displayd in the filebrowser.
"""
try:
args = parseKeyValueToken(token)
except ValueError:
errormsg = "Error rendering {% " + token.contents + " %}: Error parsing token. " + usagestr
return TemplateErrorNode(errormsg)
if "dataset" not in args.keys():
errormsg = "Error rendering {% " + token.contents + " %}: dataset argument is missing." + usagestr
return TemplateErrorNode(errormsg)
return VisualizationNode(args)
class VisualizationNode(template.Node):
"""
Renders the ComicWebWorkstation using MeVisLab
"""
def __init__(self, args):
self.args = args
def make_dataset_error_msg(self, msg):
errormsg = "Error rendering Visualization '" + str(self.args) + ":" + msg
return makeErrorMsgHtml(errormsg)
def render(self, context):
htmlOut = """
<div class="COMICWebWorkstationButtons">
<button id="comicViewerSetSmallSize%(id)d"> small </button>
<button id="comicViewerSetLargeSize%(id)d"> large </button>
<button id="comicViewerFullscreenToggle%(id)d"> fullscreen </button>
</div>
<div id="comicViewer%(id)d" style="width: %(width)spx; height:%(height)spx"></div>
<script type="text/javascript">
var fmeViewer%(id)d = null;
//$(document).ready(function() {
console.log('fmeviewee')
fmeViewer%(id)d = new COMICWebWorkstationWrapper("comicViewer%(id)d");
var options = {'path':'%(path)s',
'deferredLoad':%(deferredLoad)s,
'extensionFilter':'%(extensionFilter)s',
'width':%(width)s,
'height':%(height)s,
'application': 'COMICWebWorkstation_1.2',
'webSocketHostName':%(webSocketHostName)s,
'webSocketPort':%(webSocketPort)s,
'urlToMLABRoot': "/static/js" };
fmeViewer%(id)d.init(options);
//});
$("#comicViewerSetSmallSize%(id)d").click(function(){
fmeViewer%(id)d.setSmallSize()
})
$("#comicViewerSetLargeSize%(id)d").click(function(){
fmeViewer%(id)d.setLargeSize()
})
$("#comicViewerFullscreenToggle%(id)d").click(function(){
fmeViewer%(id)d.gotoFullscreen()
})
</script>
""" % ({"id": id(self),
"width": self.args.get("width", "600"),
"height": self.args.get("height", "400"),
"path": self.args.get("dataset"),
"extensionFilter": self.args.get("extensionFilter", ""),
"deferredLoad": self.args.get("deferredLoad", "0"),
"webSocketHostName": self.args.get("webSocketHostName",
"undefined"),
"webSocketPort": self.args.get("webSocketPort", "undefined")})
return htmlOut
@register.tag(name="dropbox")
def render_dropbox(parser, token):
""" Given a django_dropbox item title, render a file from this dropbox """
usagestr = """Tag usage: {% dropbox title:string file:filepath %}
title: the title of an autorized django_dropbox item
file: path to a file in your dropbox /apps/COMIC folder
"""
try:
args = parseKeyValueToken(token)
except ValueError:
errormsg = "Error rendering {% " + token.contents + " %}: Error parsing token. " + usagestr
return TemplateErrorNode(errormsg)
if "title" not in args.keys():
errormsg = "Error rendering {% " + token.contents + " %}: title argument is missing." + usagestr
return TemplateErrorNode(errormsg)
if "file" not in args.keys():
errormsg = "Error rendering {% " + token.contents + " %}: file argument is missing." + usagestr
return TemplateErrorNode(errormsg)
try:
df = DropboxFolder.objects.get(title=args['title'])
except ObjectDoesNotExist as e:
return TemplateErrorNode("could not find dropbox titled '" + args['title'] + "' in database")
provider = df.get_dropbox_data_provider()
replacer = HtmlLinkReplacer()
return DropboxNode(args, df, provider, replacer)
class DropboxNode(template.Node):
def __init__(self, args, df, provider, replacer):
self.args = args
self.df = df
self.provider = provider
self.replacer = replacer
def make_dropbox_error_msg(self, msg):
errormsg = "Error rendering dropbox '" + str(self.args) + ": " + msg
return makeErrorMsgHtml(errormsg)
def render(self, context):
try:
contents = self.provider.read(self.args["file"])
except ErrorResponse as e:
return self.make_dropbox_error_msg(str(e))
# any relative link inside included file has to be replaced to make it work within the COMIC
# context.
baseURL = reverse('comicsite.views.dropboxpage', kwargs={'site_short_name':context.page.comicsite.short_name,
'page_title':context.page.title,
'dropboxname':self.args['title'],
'dropboxpath':"remove"})
# for some reason reverse matching does not work for emtpy dropboxpath (maybe views.dropboxpage
# throws an error?. Workaround is to add 'remove' as path and chop this off the returned link
# nice.
baseURL = baseURL[:-7] # remove "remove/" from baseURL
currentpath = ntpath.dirname(self.args['file']) + "/" # path of currently rendered dropbox file
replaced = self.replacer.replace_links(contents, baseURL, currentpath)
htmlOut = replaced
return htmlOut
def add_quotes(string):
""" add quotes to string if not there
"""
if string.startswith("'") or string.startswith("'"):
return string
else:
return "'"+ string +"'"
def strip_quotes(string):
""" strip outermost quotes from string if there
"""
stripped = string
if string.startswith("'") or string.startswith("'"):
stripped = stripped[1:]
if string.endswith("'") or string.endswith("'"):
stripped = stripped[:-1]
return stripped
def in_list(needles,haystack):
""" return True if any of the strings in string array needles is in haystack
"""
for needle in needles:
if needle in haystack:
return True
return False
def inlist(needles,haystack):
""" Return true if any of the items in list needles is in haystack
"""
for needle in needles:
if needle in haystack:
return True
return False
# {% insertfile results/test.txt %}
@register.tag(name="insert_file")
def insert_file(parser, token):
""" Render a file from the local dropbox folder of the current project"""
usagestr = """Tag usage: {% insertfile <file> %}
<file>: filepath relative to project dropboxfolder.
Example: {% insertfile results/test.txt %}
You can use url parameters in <file> by using {{curly braces}}.
Example: {% insterfile {{id}}/result.txt %} called with ?id=1234
appended to the url will show the contents of "1234/result.txt".
"""
split = token.split_contents()
tag = split[0]
all_args = split[1:]
if len(all_args) != 1:
error_message = "Expected 1 argument, found " + str(len(all_args))
return TemplateErrorNode(error_message)
else:
args = {}
filename = all_args[0]
args["file"] = add_quotes(filename)
replacer = HtmlLinkReplacer()
return InsertFileNode(args, replacer, parser)
class InsertFileNode(template.Node):
def __init__(self, args, replacer,parser):
self.args = args
self.replacer = replacer
self.parser = parser
def make_error_msg(self, msg):
errormsg = "Error including file '" + "," + self.args["file"] + "': " + msg
return makeErrorMsgHtml(errormsg)
def is_inside_project_data_folder(self,folder,project):
""" For making sure nosey people do not use too many ../../../ in paths
to snoop around in the filesystem.
folder: string containing a filepath
project: a comicsite object
"""
data_folder = project.get_project_data_folder()
if folder.startswith(data_folder):
return True
else:
return False
def make_canonical_path(self,path):
""" Make this a nice path, with / separators
"""
path = path.replace("\\\\","/")
return path.replace("\\","/")
def substitute(self, string, substitutions):
"""
Take each key in the substitutions dict. See if this key exists
between double curly braces in string. If so replace with value.
Example:
substitute("my name is {{name}}.",{version:1,name=John})
> "my name is John"
"""
for key, value in substitutions:
string = re.sub("{{" + key + "}}", value, string)
return string
def replace_links(self, filename, contents, currentpage):
"""Relative urls which work on disk might not
work properly when used in included file. Make sure any links in contents
still point to the right place
"""
# any relative link inside included file has to be replaced to make it work within the COMIC
# context.
base_url = reverse('comicsite.views.insertedpage', kwargs={'site_short_name':currentpage.comicsite.short_name,
'page_title':currentpage.title,
'dropboxpath':"remove"})
# for some reason reverse matching does not work for emtpy dropboxpath (maybe views.dropboxpage
# throws an error?. Workaround is to add 'remove' as path and chop this off the returned link.
# nice.
base_url = base_url[:-7] # remove "remove/" from baseURL
current_path = ntpath.dirname(filename) + "/" # path of currently inserted file
replaced = self.replacer.replace_links(contents,
base_url,
current_path)
html_out = replaced
return html_out
def render(self, context):
#text typed in the tag
token = self.args['file']
# the token (parameter) given to this tag can be one of three types:
# * a raw filename like "stuff.html" or "results/table1.txt"
# * a filname containing a variable like "results/{{teamid}}/table1.txt"
# * a django template variable like "site.short_name"
# Find out what type it is:
# If it contains any / or {{ resolving as django var
# is going to throw an error. Prevent unneeded exception, just skip
# rendering as var in that case.
filename_resolved = ""
if not in_list(["{","}","\\","/"],token):
filter = self.parser.compile_filter(strip_quotes(token))
filename_resolved = filter.resolve(context)
# if resolved filename is empty, resolution failed, just treat this
# param as a filepath
if filename_resolved == "":
filename = strip_quotes(token)
else:
filename = filename_resolved
# if there are {{}}'s in there, try to substitute this with url
# parameter given in the url
filename = self.substitute(filename, context["request"].GET.items())
# If any {{parameters}} are still in filename they were not replaced.
# This filename is missing information, show this as error text.
if re.search("{{\w+}}", filename):
missed_parameters = re.findall("{{\w+}}", filename)
found_parameters = context["request"].GET.items()
if found_parameters == []:
found_parameters = "None"
error_msg = "I am missing required url parameter(s) %s, url parameter(s) found: %s "\
"" % (missed_parameters, found_parameters)
return self.make_error_msg(error_msg)
project_name = context["site"].short_name
filepath = os.path.join(settings.DROPBOX_ROOT, project_name, filename)
filepath = os.path.abspath(filepath)
filepath = self.make_canonical_path(filepath)
# when all rendering is done, check if the final path is still not getting
# into places it should not go.
if not self.is_inside_project_data_folder(filepath,context["site"]):
error_msg = "'{}' cannot be opened because it is outside the current project.".format(filepath)
return self.make_error_msg(error_msg)
storage = DefaultStorage()
try:
contents = storage.open(filepath, "r").read()
except Exception as e:
return self.make_error_msg("error opening file:" + str(e))
# TODO check content safety
# For some special pages like login and signup, there is no current page
# In that case just don't try any link rewriting
# TODO: here confused coding comes to light: I need to have the page
# object that this template tag is on in order to process it properly.
# I use both the element .page, added by
# ComicSiteRequestContext, and a key 'currentpage' added by the view.
# I think both are not ideal, and should be rewritten so all template
# tags are implicitly passed page (and project) by default. It think
# this needs custom template context processors or custom middleware.
# As a workaround, just checking for both conditions.
if context.has_key("currentpage"):
currentpage = context["currentpage"]
elif hasattr(context,"page"):
currentpage = context.page
else:
currentpage = None
if currentpage and os.path.splitext(filename)[1] != ".css":
html_out = self.replace_links(filename, contents, currentpage)
# rewrite relative links
else:
html_out = contents
return html_out
@register.tag(name="insert_graph")
def insert_graph(parser, token):
""" Render a csv file from the local dropbox to a graph """
usagestr = """Tag usage: {% insert_graph <file> type:<type>%}
<file>: filepath relative to project dropboxfolder.
<type>: how should the file be parsed and rendered? default
is to render an FROC curve for a an csv with first column
for x and subsequent columns for y, first row for short
var names, second row for verbose names.
Example: {% insert_graph results/test.txt %}
You can use url parameters in <file> by using {{curly braces}}.
Example: {% inster_graphfile {{id}}/result.txt %} called with ?id=1234
appended to the url will show the contents of "1234/result.txt".
"""
split = token.split_contents()
tag = split[0]
all_args = split[1:]
if len(all_args) > 2:
error_message = "Expected no more than 2 arguments, found " + str(len(all_args))
return TemplateErrorNode(error_message + "usage: \n" + usagestr)
else:
args = {}
args["file"] = all_args[0]
if len(all_args) == 2:
args["type"] = all_args[1].split(":")[1]
else:
args["type"] = "csv" # default
replacer = HtmlLinkReplacer()
return InsertGraphNode(args, replacer)
class InsertGraphNode(template.Node):
def __init__(self, args, replacer):
self.args = args
self.replacer = replacer
def make_error_msg(self, msg):
errormsg = "Error rendering graph from file '" + "," + self.args["file"] + "': " + msg
return makeErrorMsgHtml(errormsg)
def substitute(self, string, substitutions):
"""
Take each key in the substitutions dict. See if this key exists
between double curly braces in string. If so replace with value.
Example:
substitute("my name is {{name}}.",{version:1,name=John})
> "my name is John"
"""
for key, value in substitutions:
string = re.sub("{{" + key + "}}", value, string)
return string
def render(self, context):
filename_raw = self.args['file']
filename_clean = self.substitute(filename_raw, context["request"].GET.items())
# If any url parameters are still in filename they were not replaced. This filename
# is missing information..
if re.search("{{\w+}}", filename_clean):
missed_parameters = re.findall("{{\w+}}", filename_clean)
found_parameters = context["request"].GET.items()
if found_parameters == []:
found_parameters = "None"
error_msg = "I am missing required url parameter(s) %s, url parameter(s) found: %s "\
"" % (missed_parameters, found_parameters)
return self.make_error_msg(error_msg)
project_name = context.page.comicsite.short_name
filename = os.path.join(settings.DROPBOX_ROOT, project_name, filename_clean)
storage = DefaultStorage()
try:
contents = storage.open(filename, "r").read()
except Exception as e:
return self.make_error_msg(str(e))
# TODO check content safety
# any relative link inside included file has to be replaced to make it work within the COMIC
# context.
base_url = reverse('comicsite.views.insertedpage', kwargs={'site_short_name':context.page.comicsite.short_name,
'page_title':context.page.title,
'dropboxpath':"remove"})
# for some reason reverse matching does not work for emtpy dropboxpath (maybe views.dropboxpage
# throws an error?. Workaround is to add 'remove' as path and chop this off the returned link
# nice.
base_url = base_url[:-7] # remove "remove/" from baseURL
current_path = ntpath.dirname(filename_clean) + "/" # path of currently inserted file
try:
render_function = getrenderer(self.args["type"])
# (table,headers) = read_function(filename)
except Exception as e:
return self.make_error_msg(str("getrenderer:" + e.message))
try:
svg_data = render_function(filename)
# except Exception as e:
except:
raise
# return self.make_error_msg(str("Error calling render funtion '%s()' : %s" %(render_function.__name__,
# traceback.format_exc(0))))
# self.get_graph_svg(table,headers)
# html_out = "A graph rendered! source: '%s' <br/><br/> %s" %(filename_clean,svg_data)
html_out = svg_data
# rewrite relative links
return html_out
def getrenderer(format):
"""Holds list of functions which can take in a filepath and return html to show a graph.
By using this function we can easily list all available renderers and provide some safety:
only functions listed here can be called from the template tag render_graph.
"""
renderers = {"csv":render_FROC,
"anode09":render_anode09_result,
"anode09_table":render_anode09_table, }
if not renderers.has_key(format):
raise Exception("reader for format '%s' not found. Available formats: %s" % (format, \
",".join(renderers.keys())))
return renderers[format]
def get_graph_svg(table, headers):
""" return svg instructions as string to plot a froc curve of csvfile
"""
# del table[-1]
columns = zip(*table)
fig = Figure(facecolor='white')
canvas = FigureCanvas(fig)
for i in range(1, len(columns)):
fig.gca().plot(columns[0], columns[i], label=headers[i], gid=headers[i])
fig.gca().set_xlim([10 ** -2, 10 ** 2])
fig.gca().set_ylim([0, 1])
fig.gca().legend(loc='best', prop={'size':10})
fig.gca().grid()
fig.gca().grid(which='minor')
fig.gca().set_xlabel('False positives/scan')
fig.gca().set_ylabel('Sensitivity')
fig.gca().set_xscale("log")
fig.set_size_inches(8, 6)
return canvas_to_svg(canvas)
def canvas_to_svg(canvas):
""" Render matplotlib canvas as string containing html/svg instructions. These instructions can be
pasted into any html page and will be rendered as graph by any modern browser.
"""
imgdata = StringIO.StringIO()
imgdata.seek(0, os.SEEK_END)
canvas.print_svg(imgdata, format='svg')
svg_data = imgdata.getvalue()
imgdata.close()
return svg_data
# readers for graph data.
def render_FROC(filename):
""" Read in csv file with the following format:
x_value, all nodules, peri-fissural nodules, ...N
0.02, 0.31401, 0.0169492, ...N
First column must be x values, subsequent columns can be any number of y
values, one for each line to plot.
First column should be header names to return with each column.
Returns: string containing html/svg instruction to render an FROC curve
of all the variables found in file
"""
has_header = True
table = []
storage = DefaultStorage()
f = storage.open(filename, 'r')
csvreader = csv.reader(f)
i = 0
headers = []
for row in csvreader:
if not has_header or i > 0:
for j, cell in enumerate(row):
row[j] = float(cell)
table.append(row)
elif has_header:
headers = row
# nonFloatColumns = [x % len(headers) for x in nonFloatColumns]
# print nonFloatColumns
i = i + 1
f.close()
columns = zip(*table)
escaped_headers = [escape_for_html_id(x) for x in headers]
fig = Figure(facecolor='white')
canvas = FigureCanvas(fig)
for i in range(1, len(columns)):
fig.gca().plot(columns[0], columns[i], label=headers[i], gid=escaped_headers[i])
fig.gca().set_xlim([10 ** -2, 10 ** 2])
fig.gca().set_ylim([0, 1])
fig.gca().legend(loc='best', prop={'size':10})
fig.gca().grid()
fig.gca().grid(which='minor')
fig.gca().set_xlabel('False positives/image')
fig.gca().set_ylabel('Sensitivity')
fig.gca().set_xscale("log")
fig.set_size_inches(8, 6)
return canvas_to_svg(canvas)
def render_anode09_result(filename):
""" Read in a file with the anode09 result format, to be able to read this without
changing the evaluation executable. anode09 results have the following format:
<?php
$x=array(1e-39,1e-39,1e-39,1e-39,1e-39,1e-39,1e-39,1e-39,1e-39,0.02,0.02,0.04,0.06,0.06,0.08,0.08,0.0 etc..
$frocy=array(0,0.00483092,0.00966184,0.0144928,0.0144928,0.0144928,0.0193237,0.0241546,0.0289855,0.02 etc..
$frocscore=array(0.135266,0.149758,0.193237,0.236715,0.246377,0.26087,0.26087,0.21187);
$pleuraly=array(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0.0169492,0.0169492,0.0169492,0.016 etc..
$pleuralscore=array(0.0508475,0.0508475,0.0677966,0.118644,0.135593,0.152542,0.152542,0.104116);
$fissurey=array(0,0,0,0.0285714,0.0285714,0.0285714,0.0571429,0.0571429,0.0571429,0.0571429,0.0571429 etc..
$fissurescore=array(0.171429,0.171429,0.285714,0.314286,0.314286,0.314286,0.314286,0.269388);
$vasculary=array(0,0.0116279,0.0116279,0.0116279,0.0116279,0.0116279,0.0116279,0.0116279,0.0116279,0. etc..
$vascularscore=array(0.116279,0.139535,0.186047,0.209302,0.22093,0.244186,0.244186,0.194352);
$isolatedy=array(0,0,0.0238095,0.0238095,0.0238095,0.0238095,0.0238095,0.047619,0.0714286,0.0714286,0 etc..
$isolatedscore=array(0.238095,0.261905,0.309524,0.380952,0.380952,0.380952,0.380952,0.333333);
$largey=array(0,0.0111111,0.0111111,0.0111111,0.0111111,0.0111111,0.0111111,0.0222222,0.0222222,0.022 etc..
$largescore=array(0.111111,0.122222,0.144444,0.177778,0.177778,0.188889,0.188889,0.15873);
$smally=array(0,0,0.00854701,0.017094,0.017094,0.017094,0.025641,0.025641,0.034188,0.034188,0.034188, etc..
$smallscore=array(0.153846,0.17094,0.230769,0.282051,0.299145,0.316239,0.316239,0.252747);
?>
First row are x values, followed by alternating rows of FROC scores for each x value and
xxxscore variables which contain FROC scores at
[1/8 1/4 1/2 1 2 4 8 average] respectively and are meant to be
plotted in a table
Returns: string containing html/svg instruction to render an anode09 FROC curve
of all the variables found in file
"""
# small nodules,large nodules, isolated nodules,vascular nodules,pleural nodules,peri-fissural nodules,all nodules
vars = parse_php_arrays(filename)
assert vars != {}, "parsed result of '%s' was emtpy. I cannot plot anything" % filename
fig = Figure(facecolor='white')
canvas = FigureCanvas(fig)
fig.gca().plot(vars["x"], vars["smally"], label="nodules < 5mm", gid="small")
fig.gca().plot(vars["x"], vars["largey"], label="nodules > 5mm", gid="large")
fig.gca().plot(vars["x"], vars["isolatedy"], label="isolated nodules", gid="isolated")
fig.gca().plot(vars["x"], vars["vasculary"], label="vascular nodules", gid="vascular")
fig.gca().plot(vars["x"], vars["pleuraly"], label="pleural nodules", gid="pleural")
fig.gca().plot(vars["x"], vars["fissurey"], label="peri-fissural nodules", gid="fissure")
fig.gca().plot(vars["x"], vars["frocy"], label="all nodules", gid="frocy")
fig.gca().set_xlim([10 ** -2, 10 ** 2])
fig.gca().set_ylim([0, 1])
fig.gca().legend(loc='best', prop={'size':10})
fig.gca().grid()
fig.gca().grid(which='minor')
fig.gca().set_xlabel('Average FPs per scan')
fig.gca().set_ylabel('Sensitivity')
fig.gca().set_xscale("log")
fig.set_size_inches(8, 6)
return canvas_to_svg(canvas)
def render_anode09_table(filename):
""" Read in a file with the anode09 result format and output html for an anode09 table
anode09 results have the following format:
<?php
$x=array(1e-39,1e-39,1e-39,1e-39,1e-39,1e-39,1e-39,1e-39,1e-39,0.02,0.02,0.04,0.06,0.06,0.08,0.08,0.0 etc..
$frocy=array(0,0.00483092,0.00966184,0.0144928,0.0144928,0.0144928,0.0193237,0.0241546,0.0289855,0.02 etc..
$frocscore=array(0.135266,0.149758,0.193237,0.236715,0.246377,0.26087,0.26087,0.21187);
$pleuraly=array(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0.0169492,0.0169492,0.0169492,0.016 etc..
$pleuralscore=array(0.0508475,0.0508475,0.0677966,0.118644,0.135593,0.152542,0.152542,0.104116);
$fissurey=array(0,0,0,0.0285714,0.0285714,0.0285714,0.0571429,0.0571429,0.0571429,0.0571429,0.0571429 etc..
$fissurescore=array(0.171429,0.171429,0.285714,0.314286,0.314286,0.314286,0.314286,0.269388);
$vasculary=array(0,0.0116279,0.0116279,0.0116279,0.0116279,0.0116279,0.0116279,0.0116279,0.0116279,0. etc..
$vascularscore=array(0.116279,0.139535,0.186047,0.209302,0.22093,0.244186,0.244186,0.194352);
$isolatedy=array(0,0,0.0238095,0.0238095,0.0238095,0.0238095,0.0238095,0.047619,0.0714286,0.0714286,0 etc..
$isolatedscore=array(0.238095,0.261905,0.309524,0.380952,0.380952,0.380952,0.380952,0.333333);
$largey=array(0,0.0111111,0.0111111,0.0111111,0.0111111,0.0111111,0.0111111,0.0222222,0.0222222,0.022 etc..
$largescore=array(0.111111,0.122222,0.144444,0.177778,0.177778,0.188889,0.188889,0.15873);
$smally=array(0,0,0.00854701,0.017094,0.017094,0.017094,0.025641,0.025641,0.034188,0.034188,0.034188, etc..
$smallscore=array(0.153846,0.17094,0.230769,0.282051,0.299145,0.316239,0.316239,0.252747);
?>
First row are x values, followed by alternating rows of FROC scores for each x value and
xxxscore variables which contain FROC scores at
[1/8 1/4 1/2 1 2 4 8 average] respectively and are meant to be
plotted in a table
Returns: string containing html/svg instruction to render an anode09 FROC curve
of all the variables found in file
"""
# small nodules,large nodules, isolated nodules,vascular nodules,pleural nodules,peri-fissural nodules,all nodules
vars = parse_php_arrays(filename)
assert vars != {}, "parsed result of '%s' was emtpy. I cannot create table" % filename
table_id = id_generator()
tableHTML = """<table border=1 class = "comictable csvtable sortable" id="%s">
<thead><tr>
<td class ="firstcol">FPs/scan</td><td align=center width='54'>1/8</td>
<td align=center width='54'>1/4</td>
<td align=center width='54'>1/2</td><td align=center width='54'>1</td>
<td align=center width='54'>2</td><td align=center width='54'>4</td>
<td align=center width='54'>8</td><td align=center width='54'>average</td>
</tr></thead>""" % table_id
tableHTML = tableHTML + "<tbody>"
tableHTML = tableHTML + array_to_table_row(["small nodules"] + vars["smallscore"])
tableHTML = tableHTML + array_to_table_row(["large nodules"] + vars["largescore"])
tableHTML = tableHTML + array_to_table_row(["isolated nodules"] + vars["isolatedscore"])
tableHTML = tableHTML + array_to_table_row(["vascular nodules"] + vars["vascularscore"])
tableHTML = tableHTML + array_to_table_row(["pleural nodules"] + vars["pleuralscore"])
tableHTML = tableHTML + array_to_table_row(["peri-fissural nodules"] + vars["fissurescore"])
tableHTML = tableHTML + array_to_table_row(["all nodules"] + vars["frocscore"])
tableHTML = tableHTML + "</tbody>"
tableHTML = tableHTML + "</table>"
# FIXME: create a temporary solution to including javascript and css with template tags
script = """<script type="text/javascript">
$('#%s').dataTable({
"bJQueryUI": true,
"sPaginationType": "full_numbers",
"bPaginate": false,
"bLengthChange": false,
"bFilter": false,
"bInfo": false,
"bAutoWidth": false
});
</script>""" % table_id
return "<div class=\"comictablecontainer\">" + tableHTML + "</div>"
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
""" thanks to Ignacio Vazquez-Abrams on stackoverflow"""
return ''.join(random.choice(chars) for x in range(size))
def array_to_table_row(rowvalues, trclass=""):
output = "<tr class = \"%s\">" % trclass
for value in rowvalues:
if type(value) is float:
output = output + "<td>%.3f</td>" % (value)
else:
output = output + "<td>%s</td>" % (str(value))
output = output + "</tr>"
return output
def parse_php_arrays(filename):
""" Parse a php page containing only php arrays like $x=(1,2,3). Created to parse anode09 eval results.
Returns: dict{"varname1",array1,....},
array1 is a float array
"""
verbose = False
output = {}
storage = DefaultStorage()
with storage.open(filename, 'r') as f:
content = f.read()
content = content.replace("\n", "")
php = re.compile("\<\?php(.*?)\?\>",re.DOTALL)
phpcontent = php.search(content).group(1)
assert phpcontent != "" , "could not find anything like <?php ?> in '%s'" % filename
phpvars = phpcontent.split("$")
phpvars = [x for x in phpvars if x != ''] # remove empty
if verbose:
print "found %d php variables in %s. " % (len(phpvars), filename)
print "parsing %s into int arrays.. " % (filename)
# check wheteher this looks like a php var
phpvar = re.compile("([a-zA-Z]+[a-zA-Z0-9]*?)=array\((.*?)\);",re.DOTALL)
for var in phpvars:
result = phpvar.search(var)
#TODO Log these messages as info
if result == None :
msg = "Could not match regex pattern '%s' to '%s'\
" % (phpvar.pattern, var)
continue
if len(result.groups()) != 2:
msg = "Expected to find varname and content,\
but regex '%s' found %d items:%s " % (phpvar.pattern, len(result.groups()),
"[" + ",".join(result.groups()) + "]")
continue
(varname, varcontent) = result.groups()
output[varname] = [float(x) for x in varcontent.split(",")]
return output
@register.tag(name="url_parameter")
def url_parameter(parser, token):
""" Try to read given variable from given url. """
usagestr = """Tag usage: {% url_parameter <param_name> %}
<param_name>: The parameter to read from the requested url.
Example: {% url_parameter name %} will write "John" when the
requested url included ?name=John.
"""
split = token.split_contents()
tag = split[0]
all_args = split[1:]
if len(all_args) != 1:
error_message = "Expected 1 argument, found " + str(len(all_args))
return TemplateErrorNode(error_message)
else:
args = {}
args["url_parameter"] = all_args[0]
args["token"] = token
return UrlParameterNode(args)
class UrlParameterNode(template.Node):
def __init__(self, args):
self.args = args
def make_error_msg(self, msg):
errormsg = "Error in url_parameter tag: '" + ",".join(self.args) + "': " + msg
return makeErrorMsgHtml(errormsg)
def render(self, context):
# request= context["request"].GET[]
if context['request'].GET.has_key(self.args['url_parameter']):
return context['request'].GET[self.args['url_parameter']] # FIXME style: is this too much in one line?
else:
error_message = "Error rendering %s: Parameter '%s' not found in request URL" % ("{% " + self.args['token'].contents + "%}",
self.args['url_parameter'])
return makeErrorMsgHtml(error_message)
@register.tag(name="all_projects")
def render_all_projects(parser, token):
""" Render an overview of all projects """
try:
projects = ComicSite.objects.non_hidden()
except ObjectDoesNotExist as e:
errormsg = "Error rendering {% " + token.contents + " %}: Could not find any comicSite object.."
return TemplateErrorNode(errormsg)
return AllProjectsNode(projects)
class AllProjectsNode(template.Node):
""" return html list listing all projects in COMIC
"""
def __init__(self, projects):
self.projects = projects
def render(self, context):
html = ""
for project in self.projects:
html += self.project_summary_html(project)
return html
def project_summary_html(self,project):
if comicsite.templatetags.comic_templatetags.subdomain_is_projectname():
protocol,domainname = settings.MAIN_HOST_NAME.split("//")
url = protocol + "//" +project.short_name +"."+ domainname
return comicsite.views.comic_site_to_html(project,url)
else:
return comicsite.views.comic_site_to_html(project)
@register.tag(name="all_projectlinks")
def render_all_projectlinks(parser, token):
""" Render an overview of all projects including all links to external
projects and challenges
"""
usagestr = """Tag usage: {% all_projectlinks max_projects:int,comic_only=1|0}
max_projects is an optional parameter.
max_projects: show at most this number of projects.
if set, do not group projects per year but show all
also, show only projects hosted on comic, not
external links
"""
try:
args = parseKeyValueToken(token)
except ValueError:
errormsg = "Error rendering {% " + token.contents + " %}: Error parsing token. " + usagestr
return TemplateErrorNode(errormsg)
if len(args) > 1:
errormsg = "Error rendering {% {0} %}: expected at most one argument, but found [{1}]".format(token.contents,
",".join(args.keys()))
return TemplateErrorNode(errormsg)
if len(args) == 1:
if args.keys()[0] != "max_projects":
errormsg = "Error rendering {% {0} %}: expected argument 'max_projects' but found '{1}' instead".format(token.contents,
args.keys()[0])
return TemplateErrorNode(errormsg)
else:
args["max_projects"] = int(args["max_projects"])
try:
projects = ComicSite.objects.non_hidden()
except ObjectDoesNotExist as e:
errormsg = "Error rendering {% " + token.contents + " %}: Could not find any comicSite object.."
return TemplateErrorNode(errormsg)
return AllProjectLinksNode(projects,args)
class AllProjectLinksNode(template.Node):
""" return html list listing all projects in COMIC
"""
def __init__(self, projects,args):
self.projects = projects
self.args = args
def render(self, context):
projectlinks = []
for project in self.projects:
projectlinks.append(project.to_projectlink())
if self.args:
html = self.render_project_links(projectlinks,self.args["max_projects"])
else:
projectlinks += self.read_grand_challenge_projectlinks()
html = self.render_project_links_per_year(projectlinks)
#html = ""
#for projectlink in projectlinks:
# html += projectlink.render_to_html()
html = """<div id='projectlinks'>
<ul>{0}
<div style='clear:both'></div>
</ul>
</div> """.format(html)
return html
def render_project_links(self,projectlinks,max_projects):
""" Show all projectlinks in one big list, sorted by date, most recent first
@param max_projects: int show only this number
"""
projectlinks = sorted(projectlinks,key=lambda x: x.date,reverse=True)
if max_projects:
projectlinks = projectlinks[0:max_projects]
html = "\n".join([self.render_to_html(p) for p in projectlinks])
return html
def render_project_links_per_year(self,projectlinks):
""" Create html to show each projectlink with subheadings per year sorted
by diminishing year
"""
#go throught all projectlinks and bin per year
years = {}
for projectlink in projectlinks:
year = projectlink.date.year
if years.has_key(year):
years[year].append(projectlink)
else:
years[year] = [projectlink]
years = years.items()
years = sorted(years,key=lambda x: x[0],reverse=True)
html = ""
for year in years:
yearheader = "<div class ='yearHeader' id ='{0}'><a class ='yearHeaderAnchor'>{0}</a></div>".format(year[0])
#html += yearheader
#html += "\n".join([link.render_to_html() for link in year[1]])
projectlinks = "\n".join([self.render_to_html(link) for link in year[1]])
html += "<div class=projectlinksyearcontainer \
style='background-color:{0}'>{1}{2} <div style='clear:both;'>\
</div></div>".format("none",
yearheader,
projectlinks)
return html
def get_background_color(self,idx=-1):
""" Each year has a different background returns color of css format
rgb(xxx,xxx,xxx) """
colors = [(207,229,222),
(240,100,100),
(208,153,131),
(138,148,175),
(186,217,226),
(138,148,175),
(208,153,131),
(200,210,230),
(003,100,104),
(100,160,100)
]
#random.seed(int(seed))
#idx = random.randint(0,9)
if idx == -1:
idx = idx = random.randint(0,len(colors))
idx = idx % len(colors);
css_color = "rgb({},{},{})".format(*colors[idx])
return css_color
def render_to_html(self,projectlink):
""" return html representation of projectlink """
#html = '<div class = "projectlink"></div>'
html = """
<div class = "projectlink {link_class} {year} {comiclabel}">
<div class ="top">
<a href="{url}">
<img alt="" src="{thumb_image_url}" height="100" border="0" width="100">
</a>
<div class="stats">{stats} </div>
</div>
<div class ="bottom">
<div class="projectname"> {projectname} </div>
<div class="description"> {description} </div>
</div>
</div>
""".format(link_class = projectlink.find_link_class(),
comiclabel = self.get_comic_label(projectlink),
year = str(projectlink.params["year"]),
url=projectlink.params["URL"],
thumb_image_url=self.get_thumb_url(projectlink),
projectname=projectlink.params["abreviation"],
description = projectlink.params["description"],
stats = self.get_stats_html(projectlink)
)
return html
def capitalize(self,string):
return string[0].upper()+string[1:]
def get_comic_label(self,projectlink):
""" For add this as id, for jquery filtering later on
"""
if projectlink.params["hosted on comic"]:
return "comic"
else:
return ""
def get_stats_html(self,projectlink):
""" Returns html to render number of downloads, participants etc..
if a value is not found it is ommitted from the html so there will
be no 'participants: <empty>' strings shown """
stats = []
stats.append("" + projectlink.get_short_project_type())
#if projectlink.params["registered teams"]:
# stats.append("registered: " + str(projectlink.params["registered teams"]))
if projectlink.params["dataset downloads"]:
stats.append("downloads: " + str(projectlink.params["dataset downloads"]))
if projectlink.params["submitted results"]:
stats.append("submissions: " + str(projectlink.params["submitted results"]))
if projectlink.params["workshop date"] and projectlink.UPCOMING in projectlink.find_link_class():
stats.append("workshop: " + self.format_date(projectlink.params["workshop date"]))
if projectlink.params["last submission date"]:
stats.append("last subm.: " + self.format_date(projectlink.params["last submission date"]))
if projectlink.params["event name"]:
stats.append("event: " + self.make_event_link(projectlink))
stats_caps = []
for string in stats:
stats_caps.append(self.capitalize(string))
#put divs around each statistic in the stats list
stats_html = "".join(["<div>{}</div>".format(stat) for stat in stats_caps])
return stats_html
def make_event_link(self,projectlink):
""" To link to event, like ISBI 2013 in overviews
"""
return "<a href='{0}' class='eventlink'>{1}</a>".format(projectlink.params["event URL"],
projectlink.params["event name"])
def get_thumb_url(self,projectlink):
""" For displaying a little thumbnail image for each project, in
project overviews
"""
if projectlink.is_hosted_on_comic():
#thumb_image_url = "https://i.duckduckgo.com/i/764237a0.jpg"
thumb_image_url = projectlink.params["thumb_image_url"]
else:
thumb_image_url = "http://shared.runmc-radiology.nl/mediawiki/challenges/localImage.php?file="+projectlink.params["abreviation"]+".png"
return thumb_image_url
def project_summary_html(self,project):
""" get a link to this project """
if comicsite.templatetags.comic_templatetags.subdomain_is_projectname():
protocol,domainname = settings.MAIN_HOST_NAME.split("//")
url = protocol + "//" +project.short_name +"."+ domainname
html = comicsite.views.comic_site_to_grand_challenge_html(project,url)
else:
html = comicsite.views.comic_site_to_grand_challenge_html(project)
return html
def read_grand_challenge_projectlinks(self):
filename = "challengestats.xls"
project_name = settings.MAIN_PROJECT_NAME
filepath = os.path.join(settings.DROPBOX_ROOT, project_name, filename)
reader = ProjectExcelReader(filepath,'Challenges')
try:
projectlinks = reader.get_project_links()
except IOError as e:
logger.warning("Could not read any projectlink information from"
" '%s' returning empty list. trace: %s " %(filepath,traceback.format_exc()))
projectlinks = []
except UnicodeEncodeError as e:
logger.warning("Encoding error in reading excel from "
" '%s' returning empty list. trace: %s " %(filepath,traceback.format_exc()))
projectlinks = []
projectlinks_clean = []
for projectlink in projectlinks:
projectlinks_clean.append(self.clean_grand_challenge_projectlink(projectlink))
return projectlinks_clean
def clean_grand_challenge_projectlink(self,projectlink):
""" Specifically for the grand challenges excel file, make everything strings,
change weird values, like having more downloads than registered users
"""
# cast all to int as there are no float values in the excel file, I'd
# rather do this here than change the way excelreader reads them in
for key in projectlink.params.keys():
param = projectlink.params[key]
if type(param) == float:
projectlink.params[key] = int(param)
if projectlink.params["last submission date"]:
projectlink.params["last submission date"] = self.determine_project_date(projectlink.params["last submission date"])
if projectlink.params["workshop date"]:
projectlink.params["workshop date"] = self.determine_project_date(projectlink.params["workshop date"])
return projectlink
def determine_project_date(self,datefloat):
""" Parse float (e.g. 20130425.0) read by excelreader into python date
"""
date = str(datefloat)
parsed = datetime.datetime(year=int(date[0:4]),
month=int(date[4:6]),
day=int(date[6:8]))
return parsed
def format_date(self,date):
return date.strftime('%b %d, %Y')
@register.tag(name="image_url")
def render_image_url(parser, token):
""" render image based on image title """
# split_contents() knows not to split quoted strings.
tag_name, args = token.split_contents()
imagetitle = args
try:
image = UploadModel.objects.get(title=imagetitle)
except ObjectDoesNotExist as e:
errormsg = "Error rendering {% " + token.contents + " %}: Could not find any images named '" + imagetitle + "' in database."
# raise template.TemplateSyntaxError(errormsg)
return TemplateErrorNode(errormsg)
except ValueError:
raise template.TemplateSyntaxError("%r tag requires a single argument" % token.contents.split()[0])
[isImage, errorMessage] = hasImgExtension(str(image.file))
if not isImage:
errormsg = "Error rendering {% " + token.contents + " %}:" + errorMessage
# raise template.TemplateSyntaxError(errormsg)
return TemplateErrorNode(errormsg)
return imagePathNode(image)
class imagePathNode(template.Node):
""" return local path to the given UploadModel
"""
def __init__(self, image):
self.image = image
def render(self, context):
path = "/static/media/" + str(self.image.file)
return path
@register.tag(name="registration")
def render_registration_form(parser, token):
""" Render a registration form for the current site """
try:
projects = ComicSite.objects.all()
except ObjectDoesNotExist as e:
errormsg = "Error rendering {% " + token.contents + " %}: Could not find any comicSite object.."
return TemplateErrorNode(errormsg)
return RegistrationFormNode(projects)
class RegistrationFormNode(template.Node):
""" return HTML form of registration, which links to main registration
Currently just links to registration
"""
def __init__(self, projects):
self.projects = projects
def render(self, context):
project = context.page.comicsite
pagetitle = context.page.title
signup_url = reverse('comicsite_signin',args=[project.short_name]) + "?next=" \
+ reverse('comicsite.views.page', kwargs={'site_short_name':project.short_name, 'page_title':pagetitle})
signuplink = makeHTMLLink(signup_url, "sign in")
registerlink = makeHTMLLink(reverse('comicsite_signup',args=[project.short_name]), "register")
if not context['user'].is_authenticated():
return "To register for " + project.short_name + ", you need be logged in to COMIC.\
please " + signuplink + " or " + registerlink
else:
if project.is_participant(context['user']):
msg = "You have already registered for " + project.short_name
else:
register_url = reverse('comicsite.views._register', kwargs={'site_short_name':project.short_name})
# nested if loops through the roof. What would uncle Bob say?
# "nested if loops are a missed chance for inheritance."
# TODO: possible way out: create some kind of registration request
# manager which can be asked these things
if project.require_participant_review:
pending = RegistrationRequest.objects.get_pending_registration_requests(context['user'],project)
if pending:
msg = pending[0].status_to_string()
else:
msg = makeHTMLLink(register_url, "Request registration for " + project.short_name)
else:
msg = makeHTMLLink(register_url, "Register for " + project.short_name)
return msg
class TemplateErrorNode(template.Node):
"""Render error message in place of this template tag. This makes it directly obvious where the error occured
"""
def __init__(self, errormsg):
self.msg = HTML_encode_django_chars(errormsg)
def render(self, context):
return makeErrorMsgHtml(self.msg)
def HTML_encode_django_chars(string):
"""replace curly braces and percent signs by their html encoded equivolents
"""
string = string.replace("{", "{")
string = string.replace("}", "}")
string = string.replace("%", "%")
return string
def makeHTMLLink(url, linktext):
return "<a href=\"" + url + "\">" + linktext + "</a>"
def hasImgExtension(filename):
allowedextensions = [".jpg", ".jpeg", ".gif", ".png", ".bmp"]
ext = path.splitext(filename)[1]
if ext in allowedextensions:
return [True, ""]
else:
return [False, "file \"" + filename + "\" does not look like an image. Allowed extensions: [" + ",".join(allowedextensions) + "]"]
def makeErrorMsgHtml(text):
errorMsgHTML = "<p><span class=\"pageError\"> " + HTML_encode_django_chars(text) + " </span></p>"
return errorMsgHTML;
@register.tag(name="project_statistics")
def display_project_statistics(parser, token):
""" Parser for the project statistics tag.
"""
return ProjectStatisticsNode()
class ProjectStatisticsNode(template.Node):
def __init__(self):
pass
def render(self, context):
project_name = context.page.comicsite.short_name
snippet_header = "<div class='statistics'>"
snippet_footer = "</div>"
# Get the users belonging to this project
perm = Group.objects.get(name='{}_participants'.format(project_name))
users = User.objects.filter(groups=perm).distinct()
countries = [u.get_profile().get_country_display() for u in users]
hist_countries = Counter(countries)
chart_data = [['Country', '#Participants']]
for key, val in hist_countries.iteritems():
chart_data.append([str(key), val])
snippet_geochart = """
<script type='text/javascript' src='https://www.google.com/jsapi'></script>
<script type='text/javascript'>
google.load('visualization', '1', {{'packages': ['geochart']}});
google.setOnLoadCallback(drawRegionsMap);
function drawRegionsMap() {{
var data = google.visualization.arrayToDataTable(
{data}
);
var options = {{}};
var chart = new google.visualization.GeoChart(document.getElementById('chart_div'));
chart.draw(data, options);
}};
</script>
<div id="chart_div" style="width: 100%; height: 170px;"></div>
""".format(data=chart_data)
snippet = """
<h1>Statistics</h1><br>
<p># of users: {num_users}</p>
{geochart}
""".format(num_users=len(users), geochart=snippet_geochart)
return snippet_header + snippet + snippet_footer
Short debug commit.
"""
Custom tags to use in templates or code to render file lists etc.
History
03/09/2012 - Sjoerd - Created this file
"""
import pdb
import csv, numpy
import datetime
import ntpath
import os
import random
import re
import string
import StringIO
import sys
import traceback
import logging
from collections import Counter
from exceptions import Exception
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist,ImproperlyConfigured
from django.core.urlresolvers import NoReverseMatch
from django.contrib.auth.models import Group, User, Permission
from django.core.files.storage import DefaultStorage
from django.template import RequestContext, defaulttags
from django.utils.html import escape
from profiles.forms import SignupFormExtra
from profiles.models import UserProfile
from comicmodels.models import FileSystemDataset, UploadModel, DropboxFolder,RegistrationRequest # FIXME: abstract Dataset should be imported here, not explicit filesystemdataset. the template tag should not care about the type of dataset.
from comicmodels.models import ComicSite, Page
import comicsite.views
from comicsite.utils.html import escape_for_html_id
from dropbox.rest import ErrorResponse
from dataproviders import FileSystemDataProvider
from dataproviders.DropboxDataProvider import DropboxDataProvider, HtmlLinkReplacer # TODO: move HtmlLinkReplacer to better location..
from dataproviders.ProjectExcelReader import ProjectExcelReader
#---------#---------#---------#---------#---------#---------#---------#---------
# This is needed to use the @register.tag decorator
#register = template.Library()
from comicsite.templatetags import library_plus
register = library_plus.LibraryPlus()
logger = logging.getLogger("django")
def parseKeyValueToken(token):
"""
Parses the given token string and returns a parameter dictionary
\param token A string given by the templatetag which is assumes to look like
\this:
visualization key1:value1,key2:value2,...
\return A dictionary
"""
split = token.split_contents()
tag = split[0]
args = split[1:]
return dict([param.split(":") for param in args])
def get_usagestr(function_name):
"""
Return usage string for a registered template tag function. For displaying
this info in errors or tag overviews
"""
if register.usagestrings.has_key(function_name):
usagestr = register.usagestrings[function_name]
else:
usagestr = ""
return sanitize_django_items(usagestr)
@register.tag(name="taglist",
usagestr="""
<% taglist %> :
show all available tags
"""
)
def get_taglist(parser, token):
return TagListNode()
#=========#=========#=========#=========#=========#=========#=========#=========#=========
def subdomain_is_projectname():
""" Check whether this setting is true in settings. Return false if not found
"""
if hasattr(settings,"SUBDOMAIN_IS_PROJECTNAME"):
subdomain_is_projectname = settings.SUBDOMAIN_IS_PROJECTNAME
if subdomain_is_projectname and not hasattr(settings,"MAIN_HOST_NAME"):
msg = """Key 'SUBDOMAIN_IS_PROJECTNAME' was defined in settings,
but 'MAIN_HOST_NAME' was not. These belong together. Please
add 'MAIN_HOST_NAME' and set it to the hostname of your site."""
raise ImproperlyConfigured(msg)
else:
subdomain_is_projectname = False
return subdomain_is_projectname
@register.tag
def url(parser, token):
""" Overwrite built in url tag. It works identicaly, except that where possible
it will use subdomains to refer to a project instead of a full url path.
For example, if the subdomain is vessel12.domain.com it will refer to a page
'details' as /details/ instead of /site/vessel12/details/
REQUIREMENTS:
* MIDDLEWARE_CLASSES in settings should contain
'comicsite.middleware.subdomain.SubdomainMiddleware'
* These keys should be in the django settings file:
SUBDOMAIN_IS_PROJECTNAME = True
MAIN_HOST_NAME = <your site's hostname>
* APACHE url rewriting should be in effect to rewrite subdomain to
site/project/. To get you started: the following apache config does this
for the domain 'devcomicframework.org'
(put this in your apache config file)
RewriteEngine on
RewriteCond $1 .*/$
RewriteCond $1 !^/site/.*
RewriteCond %{HTTP_HOST} !^devcomicframework\.org$
RewriteCond %{HTTP_HOST} !^www.devcomicframework\.org$
RewriteCond %{HTTP_HOST} ^([^.]+)\.devcomicframework\.org$
RewriteRule (.*) /site/%1$1 [PT]
TODO: turn on and off this behaviour in settings, maybe explicitly define
base domain to also make it possible to use dots in the base domain.
"""
orgnode = defaulttags.url(parser,token)
return comic_URLNode(orgnode.view_name,orgnode.args, orgnode.kwargs, orgnode.asvar)
class comic_URLNode(defaulttags.URLNode):
def render(self, context):
# TODO: How to refer to method in this file nicely? This seems a bit cumbersome
subdomain_is_projectname = comicsite.templatetags.comic_templatetags.subdomain_is_projectname()
#get the url the default django method would give.
url = super(comic_URLNode, self).render(context)
url = url.lower()
if subdomain_is_projectname:
if hasattr(context['request'],"subdomain"):
subdomain = context['request'].subdomain
else:
subdomain = ""
if subdomain == "":
#we are on the regular domain, do not change any links
return url
else:
# Interpret subdomain as a comicsite. What would normally be the
# path to this comicsite?
path_to_site = reverse("comicsite.views.site",args=[subdomain]).lower()
if url.startswith(path_to_site):
return url.replace(path_to_site,"/")
else:
# this url cannot use the domain name shortcut, so it is
# probably meant as a link the main comicframework site.
# in that case hardcode the domain to make sure the sub-
# domain is gone after following this link
return settings.MAIN_HOST_NAME + url
else:
return url
class TagListNode(template.Node):
""" Print available tags as text
"""
def __init__(self):
pass
def render(self, context):
html_out = "<table class =\"comictable taglist\">"
html_out = html_out + "<tr><th>tagname</th><th>description</th></tr>"
rowclass = "odd"
for key,val in register.usagestrings.iteritems():
html_out = html_out + "<tr class=\"%s\"><td>%s</td><td>%s</td></tr>\
" %(rowclass, key, sanitize_django_items(val))
if rowclass == "odd":
rowclass = "even"
else:
rowclass = "odd"
html_out = html_out + "</table>"
return html_out
def sanitize_django_items(string):
"""
remove {{,{% and other items which would be rendered as tags by django
"""
out = string
out = out.replace("{{","{{")
out = out.replace("}}","}}")
out = out.replace("{%","{%")
out = out.replace("%}","%}")
out = out.replace(">",">")
out = out.replace("<","<")
out = out.replace("\n","<br/>")
return out
@register.simple_tag
def metafooterpages():
""" Get html for links to general pages like 'contact' """
html_string = "<div class='text'><span>COMIC:</span></div>"
pages = comicsite.views.getPages(settings.MAIN_PROJECT_NAME)
for p in pages:
if not p.hidden:
url = reverse('comicsite.views.comicmain', kwargs={'page_title':p.title})
if comicsite.templatetags.comic_templatetags.subdomain_is_projectname():
url = settings.MAIN_HOST_NAME + url
html_string += "<a class='metaFooterMenuItem' href='%s'>" % url
html_string += p.display_title == "" and p.title or p.display_title
html_string += "</a>"
return html_string
@register.tag(name="filelist")
def do_get_files(parser, token):
try:
# split_contents() knows not to split quoted strings.
tag_name, filefolder = token.split_contents()
format_string = "\"%Y-%m-%d %I:%M %p\""
except ValueError:
raise template.TemplateSyntaxError("%r tag requires a single argument" % token.contents.split()[0])
if not (format_string[0] == format_string[-1] and format_string[0] in ('"', "'")):
raise template.TemplateSyntaxError("%r tag's argument should be in quotes" % tag_name)
return FileListNode(format_string[1:-1], filefolder[1:-1])
class FileListNode(template.Node):
""" Show list of files in given dir
"""
def __init__(self, format_string, filefolder):
self.format_string = format_string
self.filefolder = filefolder
def render(self, context):
dp = FileSystemDataProvider.FileSystemDataProvider(self.filefolder)
images = dp.getImages()
htmlOut = "available files:" + ", ".join(images)
return htmlOut
#========#========#========#========#========#========#========#========
@register.tag(name="dataset",
usagestr= """Tag usage: {% dataset <datasetname>,<comicsitename> %}. <comicsitename> can be\
omitted, defaults to current site"""
)
def render_dataset(parser, token):
""" Given a challenge and a dataset name, show all files in this dataset as list"""
#usagestr = DatasetNode.usagestr
usagestr = get_usagestr("render_dataset")
# check some basic stuff
try:
tag_name, args = token.split_contents()
except ValueError:
errormsg = "Error rendering {% " + token.contents + " %}: tag requires at least one \
argument. " + usagestr
# raise template.TemplateSyntaxError(errormsg)
return TemplateErrorNode(errormsg)
if args.count(",") == 0:
dataset_title = args
project_name = ""
elif args.count(",") == 1 :
dataset_title, project_name = args.split(",")
else:
errormsg = "Error rendering {% " + token.contents + " %}: found " + str(args.count(",")) + \
" comma's, expected at most 1." + usagestr
return TemplateErrorNode(errormsg)
return DatasetNode(dataset_title, project_name)
class DatasetNode(template.Node):
""" Show list of linked files for given dataset
"""
usagestr = """{% dataset <datasetname>,<comicsitename> %}
Tag usage: {% dataset <datasetname>,<comicsitename> %}. <comicsitename> can be\
omitted, defaults to current site"""
def __init__(self, dataset_title, project_name):
self.dataset_title = dataset_title
self.project_name = project_name
def make_dataset_error_msg(self, msg):
errormsg = "Error rendering DataSet '" + self.dataset_title + "' for project '" + self.project_name + "': " + msg
return makeErrorMsgHtml(errormsg)
def render(self, context):
if self.project_name == "":
self.project_name = context.page.comicsite.short_name
try:
dataset = FileSystemDataset.objects.get(comicsite__short_name=self.project_name, title=self.dataset_title)
except ObjectDoesNotExist as e:
return self.make_dataset_error_msg("could not find object in database")
else:
self.filefolder = dataset.get_full_folder_path()
dp = FileSystemDataProvider.FileSystemDataProvider(self.filefolder)
try:
filenames = dp.getAllFileNames()
except (OSError) as e:
return self.make_dataset_error_msg(str(e))
filenames.sort()
links = []
for filename in filenames:
downloadlink = reverse('filetransfers.views.download_handler_dataset_file', kwargs={'project_name':dataset.comicsite.short_name,
'dataset_title':dataset.title,
'filename':filename})
# <a href="{% url filetransfers.views.download_handler_dataset_file project_name='VESSEL12' dataset_title='vessel12' filename='test.png' %}">test </a>
links.append("<li><a href=\"" + downloadlink + "\">" + filename + " </a></li>")
description = dataset.description
htmlOut = description + "<ul class=\"dataset\">" + "".join(links) + "</ul>"
return htmlOut
@register.tag(name="listdir",
usagestr= """Tag usage: {% listdir path:string extensionFilter:ext1,ext2,ext3 %}
path: directory relative to this projects dropbox folder to list files from. Do not use leading slash.
extensionFilter: An include filter to specify the file types which should be displayd in the filebrowser.
"""
)
def listdir(parser, token):
""" show all files in dir as a downloadable list"""
usagestr = get_usagestr("listdir")
try:
args = parseKeyValueToken(token)
except ValueError:
errormsg = "Error rendering {% " + token.contents + " %}: Error parsing token. " + usagestr
return TemplateErrorNode(errormsg)
if "path" not in args.keys():
errormsg = "Error rendering {% " + token.contents + " %}: dataset argument is missing." + usagestr
return TemplateErrorNode(errormsg)
return ListDirNode(args)
class ListDirNode(template.Node):
""" Show list of linked files for given directory
"""
usagestr = get_usagestr("listdir")
def __init__(self, args):
self.path = args['path']
self.args = args
def make_dataset_error_msg(self, msg):
errormsg = "Error listing folder '" + self.path + "': " + msg
return makeErrorMsgHtml(errormsg)
def render(self, context):
project_name = context.page.comicsite.short_name
projectpath = project_name + "/" + self.path
storage = DefaultStorage()
try:
filenames = storage.listdir(projectpath)[1]
except OSError as e:
return self.make_dataset_error_msg(str(e))
filenames.sort()
# if extensionsFilter is given, show only filenames with those extensions
if 'extensionFilter' in self.args.keys():
extensions = self.args['extensionFilter'].split(",")
filtered = []
for extension in extensions:
filtered = filtered + [f for f in filenames if f.endswith(extension)]
filenames = filtered
links = []
for filename in filenames:
downloadlink = reverse('project_serve_file',
kwargs={'project_name':project_name,
'path':self.path+"/"+filename})
links.append("<li><a href=\"" + downloadlink + "\">" + filename + " </a></li>")
htmlOut = "<ul class=\"dataset\">" + "".join(links) + "</ul>"
return htmlOut
@register.tag(name = "visualization")
def render_visualization(parser, token):
""" Given a dataset name, show a 2D visualization for that """
usagestr = """Tag usage: {% visualization dataset:string
width:number
height:number
deferredLoad:0|1
extensionFilter:ext1,ext2,ext3%}
The only mandatory argument is dataset.
width/heigth: Size of the 2D view area.
defferedLoad: If active, user has to click on the area to load the viewer.
extensionFilter: An include filter to specify the file types which should be displayd in the filebrowser.
"""
try:
args = parseKeyValueToken(token)
except ValueError:
errormsg = "Error rendering {% " + token.contents + " %}: Error parsing token. " + usagestr
return TemplateErrorNode(errormsg)
if "dataset" not in args.keys():
errormsg = "Error rendering {% " + token.contents + " %}: dataset argument is missing." + usagestr
return TemplateErrorNode(errormsg)
return VisualizationNode(args)
class VisualizationNode(template.Node):
"""
Renders the ComicWebWorkstation using MeVisLab
"""
def __init__(self, args):
self.args = args
def make_dataset_error_msg(self, msg):
errormsg = "Error rendering Visualization '" + str(self.args) + ":" + msg
return makeErrorMsgHtml(errormsg)
def render(self, context):
htmlOut = """
<div class="COMICWebWorkstationButtons">
<button id="comicViewerSetSmallSize%(id)d"> small </button>
<button id="comicViewerSetLargeSize%(id)d"> large </button>
<button id="comicViewerFullscreenToggle%(id)d"> fullscreen </button>
</div>
<div id="comicViewer%(id)d" style="width: %(width)spx; height:%(height)spx"></div>
<script type="text/javascript">
var fmeViewer%(id)d = null;
//$(document).ready(function() {
console.log('fmeviewee')
fmeViewer%(id)d = new COMICWebWorkstationWrapper("comicViewer%(id)d");
var options = {'path':'%(path)s',
'deferredLoad':%(deferredLoad)s,
'extensionFilter':'%(extensionFilter)s',
'width':%(width)s,
'height':%(height)s,
'application': 'COMICWebWorkstation_1.2',
'webSocketHostName':%(webSocketHostName)s,
'webSocketPort':%(webSocketPort)s,
'urlToMLABRoot': "/static/js" };
fmeViewer%(id)d.init(options);
//});
$("#comicViewerSetSmallSize%(id)d").click(function(){
fmeViewer%(id)d.setSmallSize()
})
$("#comicViewerSetLargeSize%(id)d").click(function(){
fmeViewer%(id)d.setLargeSize()
})
$("#comicViewerFullscreenToggle%(id)d").click(function(){
fmeViewer%(id)d.gotoFullscreen()
})
</script>
""" % ({"id": id(self),
"width": self.args.get("width", "600"),
"height": self.args.get("height", "400"),
"path": self.args.get("dataset"),
"extensionFilter": self.args.get("extensionFilter", ""),
"deferredLoad": self.args.get("deferredLoad", "0"),
"webSocketHostName": self.args.get("webSocketHostName",
"undefined"),
"webSocketPort": self.args.get("webSocketPort", "undefined")})
return htmlOut
@register.tag(name="dropbox")
def render_dropbox(parser, token):
""" Given a django_dropbox item title, render a file from this dropbox """
usagestr = """Tag usage: {% dropbox title:string file:filepath %}
title: the title of an autorized django_dropbox item
file: path to a file in your dropbox /apps/COMIC folder
"""
try:
args = parseKeyValueToken(token)
except ValueError:
errormsg = "Error rendering {% " + token.contents + " %}: Error parsing token. " + usagestr
return TemplateErrorNode(errormsg)
if "title" not in args.keys():
errormsg = "Error rendering {% " + token.contents + " %}: title argument is missing." + usagestr
return TemplateErrorNode(errormsg)
if "file" not in args.keys():
errormsg = "Error rendering {% " + token.contents + " %}: file argument is missing." + usagestr
return TemplateErrorNode(errormsg)
try:
df = DropboxFolder.objects.get(title=args['title'])
except ObjectDoesNotExist as e:
return TemplateErrorNode("could not find dropbox titled '" + args['title'] + "' in database")
provider = df.get_dropbox_data_provider()
replacer = HtmlLinkReplacer()
return DropboxNode(args, df, provider, replacer)
class DropboxNode(template.Node):
def __init__(self, args, df, provider, replacer):
self.args = args
self.df = df
self.provider = provider
self.replacer = replacer
def make_dropbox_error_msg(self, msg):
errormsg = "Error rendering dropbox '" + str(self.args) + ": " + msg
return makeErrorMsgHtml(errormsg)
def render(self, context):
try:
contents = self.provider.read(self.args["file"])
except ErrorResponse as e:
return self.make_dropbox_error_msg(str(e))
# any relative link inside included file has to be replaced to make it work within the COMIC
# context.
baseURL = reverse('comicsite.views.dropboxpage', kwargs={'site_short_name':context.page.comicsite.short_name,
'page_title':context.page.title,
'dropboxname':self.args['title'],
'dropboxpath':"remove"})
# for some reason reverse matching does not work for emtpy dropboxpath (maybe views.dropboxpage
# throws an error?. Workaround is to add 'remove' as path and chop this off the returned link
# nice.
baseURL = baseURL[:-7] # remove "remove/" from baseURL
currentpath = ntpath.dirname(self.args['file']) + "/" # path of currently rendered dropbox file
replaced = self.replacer.replace_links(contents, baseURL, currentpath)
htmlOut = replaced
return htmlOut
def add_quotes(string):
""" add quotes to string if not there
"""
if string.startswith("'") or string.startswith("'"):
return string
else:
return "'"+ string +"'"
def strip_quotes(string):
""" strip outermost quotes from string if there
"""
stripped = string
if string.startswith("'") or string.startswith("'"):
stripped = stripped[1:]
if string.endswith("'") or string.endswith("'"):
stripped = stripped[:-1]
return stripped
def in_list(needles,haystack):
""" return True if any of the strings in string array needles is in haystack
"""
for needle in needles:
if needle in haystack:
return True
return False
def inlist(needles,haystack):
""" Return true if any of the items in list needles is in haystack
"""
for needle in needles:
if needle in haystack:
return True
return False
# {% insertfile results/test.txt %}
@register.tag(name="insert_file")
def insert_file(parser, token):
""" Render a file from the local dropbox folder of the current project"""
usagestr = """Tag usage: {% insertfile <file> %}
<file>: filepath relative to project dropboxfolder.
Example: {% insertfile results/test.txt %}
You can use url parameters in <file> by using {{curly braces}}.
Example: {% insterfile {{id}}/result.txt %} called with ?id=1234
appended to the url will show the contents of "1234/result.txt".
"""
split = token.split_contents()
tag = split[0]
all_args = split[1:]
if len(all_args) != 1:
error_message = "Expected 1 argument, found " + str(len(all_args))
return TemplateErrorNode(error_message)
else:
args = {}
filename = all_args[0]
args["file"] = add_quotes(filename)
replacer = HtmlLinkReplacer()
return InsertFileNode(args, replacer, parser)
class InsertFileNode(template.Node):
def __init__(self, args, replacer,parser):
self.args = args
self.replacer = replacer
self.parser = parser
def make_error_msg(self, msg):
errormsg = "Error including file '" + "," + self.args["file"] + "': " + msg
return makeErrorMsgHtml(errormsg)
def is_inside_project_data_folder(self,folder,project):
""" For making sure nosey people do not use too many ../../../ in paths
to snoop around in the filesystem.
folder: string containing a filepath
project: a comicsite object
"""
data_folder = project.get_project_data_folder()
if folder.startswith(data_folder):
return True
else:
return False
def make_canonical_path(self,path):
""" Make this a nice path, with / separators
"""
path = path.replace("\\\\","/")
return path.replace("\\","/")
def substitute(self, string, substitutions):
"""
Take each key in the substitutions dict. See if this key exists
between double curly braces in string. If so replace with value.
Example:
substitute("my name is {{name}}.",{version:1,name=John})
> "my name is John"
"""
for key, value in substitutions:
string = re.sub("{{" + key + "}}", value, string)
return string
def replace_links(self, filename, contents, currentpage):
"""Relative urls which work on disk might not
work properly when used in included file. Make sure any links in contents
still point to the right place
"""
# any relative link inside included file has to be replaced to make it work within the COMIC
# context.
base_url = reverse('comicsite.views.insertedpage', kwargs={'site_short_name':currentpage.comicsite.short_name,
'page_title':currentpage.title,
'dropboxpath':"remove"})
# for some reason reverse matching does not work for emtpy dropboxpath (maybe views.dropboxpage
# throws an error?. Workaround is to add 'remove' as path and chop this off the returned link.
# nice.
base_url = base_url[:-7] # remove "remove/" from baseURL
current_path = ntpath.dirname(filename) + "/" # path of currently inserted file
replaced = self.replacer.replace_links(contents,
base_url,
current_path)
html_out = replaced
return html_out
def render(self, context):
#text typed in the tag
token = self.args['file']
# the token (parameter) given to this tag can be one of three types:
# * a raw filename like "stuff.html" or "results/table1.txt"
# * a filname containing a variable like "results/{{teamid}}/table1.txt"
# * a django template variable like "site.short_name"
# Find out what type it is:
# If it contains any / or {{ resolving as django var
# is going to throw an error. Prevent unneeded exception, just skip
# rendering as var in that case.
filename_resolved = ""
if not in_list(["{","}","\\","/"],token):
filter = self.parser.compile_filter(strip_quotes(token))
filename_resolved = filter.resolve(context)
# if resolved filename is empty, resolution failed, just treat this
# param as a filepath
if filename_resolved == "":
filename = strip_quotes(token)
else:
filename = filename_resolved
# if there are {{}}'s in there, try to substitute this with url
# parameter given in the url
filename = self.substitute(filename, context["request"].GET.items())
# If any {{parameters}} are still in filename they were not replaced.
# This filename is missing information, show this as error text.
if re.search("{{\w+}}", filename):
missed_parameters = re.findall("{{\w+}}", filename)
found_parameters = context["request"].GET.items()
if found_parameters == []:
found_parameters = "None"
error_msg = "I am missing required url parameter(s) %s, url parameter(s) found: %s "\
"" % (missed_parameters, found_parameters)
return self.make_error_msg(error_msg)
project_name = context["site"].short_name
filepath = os.path.join(settings.DROPBOX_ROOT, project_name, filename)
filepath = os.path.abspath(filepath)
filepath = self.make_canonical_path(filepath)
# when all rendering is done, check if the final path is still not getting
# into places it should not go.
if not self.is_inside_project_data_folder(filepath,context["site"]):
error_msg = "'{}' cannot be opened because it is outside the current project.".format(filepath)
return self.make_error_msg(error_msg)
storage = DefaultStorage()
try:
contents = storage.open(filepath, "r").read()
except Exception as e:
return self.make_error_msg("error opening file:" + str(e))
# TODO check content safety
# For some special pages like login and signup, there is no current page
# In that case just don't try any link rewriting
# TODO: here confused coding comes to light: I need to have the page
# object that this template tag is on in order to process it properly.
# I use both the element .page, added by
# ComicSiteRequestContext, and a key 'currentpage' added by the view.
# I think both are not ideal, and should be rewritten so all template
# tags are implicitly passed page (and project) by default. It think
# this needs custom template context processors or custom middleware.
# As a workaround, just checking for both conditions.
if context.has_key("currentpage"):
currentpage = context["currentpage"]
elif hasattr(context,"page"):
currentpage = context.page
else:
currentpage = None
if currentpage and os.path.splitext(filename)[1] != ".css":
html_out = self.replace_links(filename, contents, currentpage)
# rewrite relative links
else:
html_out = contents
return html_out
@register.tag(name="insert_graph")
def insert_graph(parser, token):
""" Render a csv file from the local dropbox to a graph """
usagestr = """Tag usage: {% insert_graph <file> type:<type>%}
<file>: filepath relative to project dropboxfolder.
<type>: how should the file be parsed and rendered? default
is to render an FROC curve for a an csv with first column
for x and subsequent columns for y, first row for short
var names, second row for verbose names.
Example: {% insert_graph results/test.txt %}
You can use url parameters in <file> by using {{curly braces}}.
Example: {% inster_graphfile {{id}}/result.txt %} called with ?id=1234
appended to the url will show the contents of "1234/result.txt".
"""
split = token.split_contents()
tag = split[0]
all_args = split[1:]
if len(all_args) > 2:
error_message = "Expected no more than 2 arguments, found " + str(len(all_args))
return TemplateErrorNode(error_message + "usage: \n" + usagestr)
else:
args = {}
args["file"] = all_args[0]
if len(all_args) == 2:
args["type"] = all_args[1].split(":")[1]
else:
args["type"] = "csv" # default
replacer = HtmlLinkReplacer()
return InsertGraphNode(args, replacer)
class InsertGraphNode(template.Node):
def __init__(self, args, replacer):
self.args = args
self.replacer = replacer
def make_error_msg(self, msg):
errormsg = "Error rendering graph from file '" + "," + self.args["file"] + "': " + msg
return makeErrorMsgHtml(errormsg)
def substitute(self, string, substitutions):
"""
Take each key in the substitutions dict. See if this key exists
between double curly braces in string. If so replace with value.
Example:
substitute("my name is {{name}}.",{version:1,name=John})
> "my name is John"
"""
for key, value in substitutions:
string = re.sub("{{" + key + "}}", value, string)
return string
def render(self, context):
filename_raw = self.args['file']
filename_clean = self.substitute(filename_raw, context["request"].GET.items())
# If any url parameters are still in filename they were not replaced. This filename
# is missing information..
if re.search("{{\w+}}", filename_clean):
missed_parameters = re.findall("{{\w+}}", filename_clean)
found_parameters = context["request"].GET.items()
if found_parameters == []:
found_parameters = "None"
error_msg = "I am missing required url parameter(s) %s, url parameter(s) found: %s "\
"" % (missed_parameters, found_parameters)
return self.make_error_msg(error_msg)
project_name = context.page.comicsite.short_name
filename = os.path.join(settings.DROPBOX_ROOT, project_name, filename_clean)
storage = DefaultStorage()
try:
contents = storage.open(filename, "r").read()
except Exception as e:
return self.make_error_msg(str(e))
# TODO check content safety
# any relative link inside included file has to be replaced to make it work within the COMIC
# context.
base_url = reverse('comicsite.views.insertedpage', kwargs={'site_short_name':context.page.comicsite.short_name,
'page_title':context.page.title,
'dropboxpath':"remove"})
# for some reason reverse matching does not work for emtpy dropboxpath (maybe views.dropboxpage
# throws an error?. Workaround is to add 'remove' as path and chop this off the returned link
# nice.
base_url = base_url[:-7] # remove "remove/" from baseURL
current_path = ntpath.dirname(filename_clean) + "/" # path of currently inserted file
try:
render_function = getrenderer(self.args["type"])
# (table,headers) = read_function(filename)
except Exception as e:
return self.make_error_msg(str("getrenderer:" + e.message))
try:
svg_data = render_function(filename)
# except Exception as e:
except:
raise
# return self.make_error_msg(str("Error calling render funtion '%s()' : %s" %(render_function.__name__,
# traceback.format_exc(0))))
# self.get_graph_svg(table,headers)
# html_out = "A graph rendered! source: '%s' <br/><br/> %s" %(filename_clean,svg_data)
html_out = svg_data
# rewrite relative links
return html_out
def getrenderer(format):
"""Holds list of functions which can take in a filepath and return html to show a graph.
By using this function we can easily list all available renderers and provide some safety:
only functions listed here can be called from the template tag render_graph.
"""
renderers = {"csv":render_FROC,
"anode09":render_anode09_result,
"anode09_table":render_anode09_table, }
if not renderers.has_key(format):
raise Exception("reader for format '%s' not found. Available formats: %s" % (format, \
",".join(renderers.keys())))
return renderers[format]
def get_graph_svg(table, headers):
""" return svg instructions as string to plot a froc curve of csvfile
"""
# del table[-1]
columns = zip(*table)
fig = Figure(facecolor='white')
canvas = FigureCanvas(fig)
for i in range(1, len(columns)):
fig.gca().plot(columns[0], columns[i], label=headers[i], gid=headers[i])
fig.gca().set_xlim([10 ** -2, 10 ** 2])
fig.gca().set_ylim([0, 1])
fig.gca().legend(loc='best', prop={'size':10})
fig.gca().grid()
fig.gca().grid(which='minor')
fig.gca().set_xlabel('False positives/scan')
fig.gca().set_ylabel('Sensitivity')
fig.gca().set_xscale("log")
fig.set_size_inches(8, 6)
return canvas_to_svg(canvas)
def canvas_to_svg(canvas):
""" Render matplotlib canvas as string containing html/svg instructions. These instructions can be
pasted into any html page and will be rendered as graph by any modern browser.
"""
imgdata = StringIO.StringIO()
imgdata.seek(0, os.SEEK_END)
canvas.print_svg(imgdata, format='svg')
svg_data = imgdata.getvalue()
imgdata.close()
return svg_data
# readers for graph data.
def render_FROC(filename):
""" Read in csv file with the following format:
x_value, all nodules, peri-fissural nodules, ...N
0.02, 0.31401, 0.0169492, ...N
First column must be x values, subsequent columns can be any number of y
values, one for each line to plot.
First column should be header names to return with each column.
Returns: string containing html/svg instruction to render an FROC curve
of all the variables found in file
"""
has_header = True
table = []
storage = DefaultStorage()
f = storage.open(filename, 'r')
csvreader = csv.reader(f)
i = 0
headers = []
for row in csvreader:
if not has_header or i > 0:
for j, cell in enumerate(row):
row[j] = float(cell)
table.append(row)
elif has_header:
headers = row
# nonFloatColumns = [x % len(headers) for x in nonFloatColumns]
# print nonFloatColumns
i = i + 1
f.close()
columns = zip(*table)
escaped_headers = [escape_for_html_id(x) for x in headers]
fig = Figure(facecolor='white')
canvas = FigureCanvas(fig)
for i in range(1, len(columns)):
fig.gca().plot(columns[0], columns[i], label=headers[i], gid=escaped_headers[i])
fig.gca().set_xlim([10 ** -2, 10 ** 2])
fig.gca().set_ylim([0, 1])
fig.gca().legend(loc='best', prop={'size':10})
fig.gca().grid()
fig.gca().grid(which='minor')
fig.gca().set_xlabel('False positives/image')
fig.gca().set_ylabel('Sensitivity')
fig.gca().set_xscale("log")
fig.set_size_inches(8, 6)
return canvas_to_svg(canvas)
def render_anode09_result(filename):
""" Read in a file with the anode09 result format, to be able to read this without
changing the evaluation executable. anode09 results have the following format:
<?php
$x=array(1e-39,1e-39,1e-39,1e-39,1e-39,1e-39,1e-39,1e-39,1e-39,0.02,0.02,0.04,0.06,0.06,0.08,0.08,0.0 etc..
$frocy=array(0,0.00483092,0.00966184,0.0144928,0.0144928,0.0144928,0.0193237,0.0241546,0.0289855,0.02 etc..
$frocscore=array(0.135266,0.149758,0.193237,0.236715,0.246377,0.26087,0.26087,0.21187);
$pleuraly=array(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0.0169492,0.0169492,0.0169492,0.016 etc..
$pleuralscore=array(0.0508475,0.0508475,0.0677966,0.118644,0.135593,0.152542,0.152542,0.104116);
$fissurey=array(0,0,0,0.0285714,0.0285714,0.0285714,0.0571429,0.0571429,0.0571429,0.0571429,0.0571429 etc..
$fissurescore=array(0.171429,0.171429,0.285714,0.314286,0.314286,0.314286,0.314286,0.269388);
$vasculary=array(0,0.0116279,0.0116279,0.0116279,0.0116279,0.0116279,0.0116279,0.0116279,0.0116279,0. etc..
$vascularscore=array(0.116279,0.139535,0.186047,0.209302,0.22093,0.244186,0.244186,0.194352);
$isolatedy=array(0,0,0.0238095,0.0238095,0.0238095,0.0238095,0.0238095,0.047619,0.0714286,0.0714286,0 etc..
$isolatedscore=array(0.238095,0.261905,0.309524,0.380952,0.380952,0.380952,0.380952,0.333333);
$largey=array(0,0.0111111,0.0111111,0.0111111,0.0111111,0.0111111,0.0111111,0.0222222,0.0222222,0.022 etc..
$largescore=array(0.111111,0.122222,0.144444,0.177778,0.177778,0.188889,0.188889,0.15873);
$smally=array(0,0,0.00854701,0.017094,0.017094,0.017094,0.025641,0.025641,0.034188,0.034188,0.034188, etc..
$smallscore=array(0.153846,0.17094,0.230769,0.282051,0.299145,0.316239,0.316239,0.252747);
?>
First row are x values, followed by alternating rows of FROC scores for each x value and
xxxscore variables which contain FROC scores at
[1/8 1/4 1/2 1 2 4 8 average] respectively and are meant to be
plotted in a table
Returns: string containing html/svg instruction to render an anode09 FROC curve
of all the variables found in file
"""
# small nodules,large nodules, isolated nodules,vascular nodules,pleural nodules,peri-fissural nodules,all nodules
vars = parse_php_arrays(filename)
assert vars != {}, "parsed result of '%s' was emtpy. I cannot plot anything" % filename
fig = Figure(facecolor='white')
canvas = FigureCanvas(fig)
fig.gca().plot(vars["x"], vars["smally"], label="nodules < 5mm", gid="small")
fig.gca().plot(vars["x"], vars["largey"], label="nodules > 5mm", gid="large")
fig.gca().plot(vars["x"], vars["isolatedy"], label="isolated nodules", gid="isolated")
fig.gca().plot(vars["x"], vars["vasculary"], label="vascular nodules", gid="vascular")
fig.gca().plot(vars["x"], vars["pleuraly"], label="pleural nodules", gid="pleural")
fig.gca().plot(vars["x"], vars["fissurey"], label="peri-fissural nodules", gid="fissure")
fig.gca().plot(vars["x"], vars["frocy"], label="all nodules", gid="frocy")
fig.gca().set_xlim([10 ** -2, 10 ** 2])
fig.gca().set_ylim([0, 1])
fig.gca().legend(loc='best', prop={'size':10})
fig.gca().grid()
fig.gca().grid(which='minor')
fig.gca().set_xlabel('Average FPs per scan')
fig.gca().set_ylabel('Sensitivity')
fig.gca().set_xscale("log")
fig.set_size_inches(8, 6)
return canvas_to_svg(canvas)
def render_anode09_table(filename):
""" Read in a file with the anode09 result format and output html for an anode09 table
anode09 results have the following format:
<?php
$x=array(1e-39,1e-39,1e-39,1e-39,1e-39,1e-39,1e-39,1e-39,1e-39,0.02,0.02,0.04,0.06,0.06,0.08,0.08,0.0 etc..
$frocy=array(0,0.00483092,0.00966184,0.0144928,0.0144928,0.0144928,0.0193237,0.0241546,0.0289855,0.02 etc..
$frocscore=array(0.135266,0.149758,0.193237,0.236715,0.246377,0.26087,0.26087,0.21187);
$pleuraly=array(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0.0169492,0.0169492,0.0169492,0.016 etc..
$pleuralscore=array(0.0508475,0.0508475,0.0677966,0.118644,0.135593,0.152542,0.152542,0.104116);
$fissurey=array(0,0,0,0.0285714,0.0285714,0.0285714,0.0571429,0.0571429,0.0571429,0.0571429,0.0571429 etc..
$fissurescore=array(0.171429,0.171429,0.285714,0.314286,0.314286,0.314286,0.314286,0.269388);
$vasculary=array(0,0.0116279,0.0116279,0.0116279,0.0116279,0.0116279,0.0116279,0.0116279,0.0116279,0. etc..
$vascularscore=array(0.116279,0.139535,0.186047,0.209302,0.22093,0.244186,0.244186,0.194352);
$isolatedy=array(0,0,0.0238095,0.0238095,0.0238095,0.0238095,0.0238095,0.047619,0.0714286,0.0714286,0 etc..
$isolatedscore=array(0.238095,0.261905,0.309524,0.380952,0.380952,0.380952,0.380952,0.333333);
$largey=array(0,0.0111111,0.0111111,0.0111111,0.0111111,0.0111111,0.0111111,0.0222222,0.0222222,0.022 etc..
$largescore=array(0.111111,0.122222,0.144444,0.177778,0.177778,0.188889,0.188889,0.15873);
$smally=array(0,0,0.00854701,0.017094,0.017094,0.017094,0.025641,0.025641,0.034188,0.034188,0.034188, etc..
$smallscore=array(0.153846,0.17094,0.230769,0.282051,0.299145,0.316239,0.316239,0.252747);
?>
First row are x values, followed by alternating rows of FROC scores for each x value and
xxxscore variables which contain FROC scores at
[1/8 1/4 1/2 1 2 4 8 average] respectively and are meant to be
plotted in a table
Returns: string containing html/svg instruction to render an anode09 FROC curve
of all the variables found in file
"""
# small nodules,large nodules, isolated nodules,vascular nodules,pleural nodules,peri-fissural nodules,all nodules
vars = parse_php_arrays(filename)
assert vars != {}, "parsed result of '%s' was emtpy. I cannot create table" % filename
table_id = id_generator()
tableHTML = """<table border=1 class = "comictable csvtable sortable" id="%s">
<thead><tr>
<td class ="firstcol">FPs/scan</td><td align=center width='54'>1/8</td>
<td align=center width='54'>1/4</td>
<td align=center width='54'>1/2</td><td align=center width='54'>1</td>
<td align=center width='54'>2</td><td align=center width='54'>4</td>
<td align=center width='54'>8</td><td align=center width='54'>average</td>
</tr></thead>""" % table_id
tableHTML = tableHTML + "<tbody>"
tableHTML = tableHTML + array_to_table_row(["small nodules"] + vars["smallscore"])
tableHTML = tableHTML + array_to_table_row(["large nodules"] + vars["largescore"])
tableHTML = tableHTML + array_to_table_row(["isolated nodules"] + vars["isolatedscore"])
tableHTML = tableHTML + array_to_table_row(["vascular nodules"] + vars["vascularscore"])
tableHTML = tableHTML + array_to_table_row(["pleural nodules"] + vars["pleuralscore"])
tableHTML = tableHTML + array_to_table_row(["peri-fissural nodules"] + vars["fissurescore"])
tableHTML = tableHTML + array_to_table_row(["all nodules"] + vars["frocscore"])
tableHTML = tableHTML + "</tbody>"
tableHTML = tableHTML + "</table>"
# FIXME: create a temporary solution to including javascript and css with template tags
script = """<script type="text/javascript">
$('#%s').dataTable({
"bJQueryUI": true,
"sPaginationType": "full_numbers",
"bPaginate": false,
"bLengthChange": false,
"bFilter": false,
"bInfo": false,
"bAutoWidth": false
});
</script>""" % table_id
return "<div class=\"comictablecontainer\">" + tableHTML + "</div>"
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
""" thanks to Ignacio Vazquez-Abrams on stackoverflow"""
return ''.join(random.choice(chars) for x in range(size))
def array_to_table_row(rowvalues, trclass=""):
output = "<tr class = \"%s\">" % trclass
for value in rowvalues:
if type(value) is float:
output = output + "<td>%.3f</td>" % (value)
else:
output = output + "<td>%s</td>" % (str(value))
output = output + "</tr>"
return output
def parse_php_arrays(filename):
""" Parse a php page containing only php arrays like $x=(1,2,3). Created to parse anode09 eval results.
Returns: dict{"varname1",array1,....},
array1 is a float array
"""
verbose = False
output = {}
storage = DefaultStorage()
with storage.open(filename, 'r') as f:
content = f.read()
content = content.replace("\n", "")
php = re.compile("\<\?php(.*?)\?\>",re.DOTALL)
phpcontent = php.search(content).group(1)
assert phpcontent != "" , "could not find anything like <?php ?> in '%s'" % filename
phpvars = phpcontent.split("$")
phpvars = [x for x in phpvars if x != ''] # remove empty
if verbose:
print "found %d php variables in %s. " % (len(phpvars), filename)
print "parsing %s into int arrays.. " % (filename)
# check wheteher this looks like a php var
phpvar = re.compile("([a-zA-Z]+[a-zA-Z0-9]*?)=array\((.*?)\);",re.DOTALL)
for var in phpvars:
result = phpvar.search(var)
#TODO Log these messages as info
if result == None :
msg = "Could not match regex pattern '%s' to '%s'\
" % (phpvar.pattern, var)
continue
if len(result.groups()) != 2:
msg = "Expected to find varname and content,\
but regex '%s' found %d items:%s " % (phpvar.pattern, len(result.groups()),
"[" + ",".join(result.groups()) + "]")
continue
(varname, varcontent) = result.groups()
output[varname] = [float(x) for x in varcontent.split(",")]
return output
@register.tag(name="url_parameter")
def url_parameter(parser, token):
""" Try to read given variable from given url. """
usagestr = """Tag usage: {% url_parameter <param_name> %}
<param_name>: The parameter to read from the requested url.
Example: {% url_parameter name %} will write "John" when the
requested url included ?name=John.
"""
split = token.split_contents()
tag = split[0]
all_args = split[1:]
if len(all_args) != 1:
error_message = "Expected 1 argument, found " + str(len(all_args))
return TemplateErrorNode(error_message)
else:
args = {}
args["url_parameter"] = all_args[0]
args["token"] = token
return UrlParameterNode(args)
class UrlParameterNode(template.Node):
def __init__(self, args):
self.args = args
def make_error_msg(self, msg):
errormsg = "Error in url_parameter tag: '" + ",".join(self.args) + "': " + msg
return makeErrorMsgHtml(errormsg)
def render(self, context):
# request= context["request"].GET[]
if context['request'].GET.has_key(self.args['url_parameter']):
return context['request'].GET[self.args['url_parameter']] # FIXME style: is this too much in one line?
else:
error_message = "Error rendering %s: Parameter '%s' not found in request URL" % ("{% " + self.args['token'].contents + "%}",
self.args['url_parameter'])
return makeErrorMsgHtml(error_message)
@register.tag(name="all_projects")
def render_all_projects(parser, token):
""" Render an overview of all projects """
try:
projects = ComicSite.objects.non_hidden()
except ObjectDoesNotExist as e:
errormsg = "Error rendering {% " + token.contents + " %}: Could not find any comicSite object.."
return TemplateErrorNode(errormsg)
return AllProjectsNode(projects)
class AllProjectsNode(template.Node):
""" return html list listing all projects in COMIC
"""
def __init__(self, projects):
self.projects = projects
def render(self, context):
html = ""
for project in self.projects:
html += self.project_summary_html(project)
return html
def project_summary_html(self,project):
if comicsite.templatetags.comic_templatetags.subdomain_is_projectname():
protocol,domainname = settings.MAIN_HOST_NAME.split("//")
url = protocol + "//" +project.short_name +"."+ domainname
return comicsite.views.comic_site_to_html(project,url)
else:
return comicsite.views.comic_site_to_html(project)
@register.tag(name="all_projectlinks")
def render_all_projectlinks(parser, token):
""" Render an overview of all projects including all links to external
projects and challenges
"""
usagestr = """Tag usage: {% all_projectlinks max_projects:int,comic_only=1|0}
max_projects is an optional parameter.
max_projects: show at most this number of projects.
if set, do not group projects per year but show all
also, show only projects hosted on comic, not
external links
"""
try:
args = parseKeyValueToken(token)
except ValueError:
errormsg = "Error rendering {% " + token.contents + " %}: Error parsing token. " + usagestr
return TemplateErrorNode(errormsg)
if len(args) > 1:
errormsg = "Error rendering {% {0} %}: expected at most one argument, but found [{1}]".format(token.contents,
",".join(args.keys()))
return TemplateErrorNode(errormsg)
if len(args) == 1:
if args.keys()[0] != "max_projects":
errormsg = "Error rendering {% {0} %}: expected argument 'max_projects' but found '{1}' instead".format(token.contents,
args.keys()[0])
return TemplateErrorNode(errormsg)
else:
args["max_projects"] = int(args["max_projects"])
try:
projects = ComicSite.objects.non_hidden()
except ObjectDoesNotExist as e:
errormsg = "Error rendering {% " + token.contents + " %}: Could not find any comicSite object.."
return TemplateErrorNode(errormsg)
return AllProjectLinksNode(projects,args)
class AllProjectLinksNode(template.Node):
""" return html list listing all projects in COMIC
"""
def __init__(self, projects,args):
self.projects = projects
self.args = args
def render(self, context):
projectlinks = []
for project in self.projects:
projectlinks.append(project.to_projectlink())
if self.args:
html = self.render_project_links(projectlinks,self.args["max_projects"])
else:
projectlinks += self.read_grand_challenge_projectlinks()
html = self.render_project_links_per_year(projectlinks)
#html = ""
#for projectlink in projectlinks:
# html += projectlink.render_to_html()
html = """<div id='projectlinks'>
<ul>{0}
<div style='clear:both'></div>
</ul>
</div> """.format(html)
return html
def render_project_links(self,projectlinks,max_projects):
""" Show all projectlinks in one big list, sorted by date, most recent first
@param max_projects: int show only this number
"""
projectlinks = sorted(projectlinks,key=lambda x: x.date,reverse=True)
if max_projects:
projectlinks = projectlinks[0:max_projects]
html = "\n".join([self.render_to_html(p) for p in projectlinks])
return html
def render_project_links_per_year(self,projectlinks):
""" Create html to show each projectlink with subheadings per year sorted
by diminishing year
"""
#go throught all projectlinks and bin per year
years = {}
for projectlink in projectlinks:
year = projectlink.date.year
if years.has_key(year):
years[year].append(projectlink)
else:
years[year] = [projectlink]
years = years.items()
years = sorted(years,key=lambda x: x[0],reverse=True)
html = ""
for year in years:
yearheader = "<div class ='yearHeader' id ='{0}'><a class ='yearHeaderAnchor'>{0}</a></div>".format(year[0])
#html += yearheader
#html += "\n".join([link.render_to_html() for link in year[1]])
projectlinks = "\n".join([self.render_to_html(link) for link in year[1]])
html += "<div class=projectlinksyearcontainer \
style='background-color:{0}'>{1}{2} <div style='clear:both;'>\
</div></div>".format("none",
yearheader,
projectlinks)
return html
def get_background_color(self,idx=-1):
""" Each year has a different background returns color of css format
rgb(xxx,xxx,xxx) """
colors = [(207,229,222),
(240,100,100),
(208,153,131),
(138,148,175),
(186,217,226),
(138,148,175),
(208,153,131),
(200,210,230),
(003,100,104),
(100,160,100)
]
#random.seed(int(seed))
#idx = random.randint(0,9)
if idx == -1:
idx = idx = random.randint(0,len(colors))
idx = idx % len(colors);
css_color = "rgb({},{},{})".format(*colors[idx])
return css_color
def render_to_html(self,projectlink):
""" return html representation of projectlink """
#html = '<div class = "projectlink"></div>'
html = """
<div class = "projectlink {link_class} {year} {comiclabel}">
<div class ="top">
<a href="{url}">
<img alt="" src="{thumb_image_url}" height="100" border="0" width="100">
</a>
<div class="stats">{stats} </div>
</div>
<div class ="bottom">
<div class="projectname"> {projectname} </div>
<div class="description"> {description} </div>
</div>
</div>
""".format(link_class = projectlink.find_link_class(),
comiclabel = self.get_comic_label(projectlink),
year = str(projectlink.params["year"]),
url=projectlink.params["URL"],
thumb_image_url=self.get_thumb_url(projectlink),
projectname=projectlink.params["abreviation"],
description = projectlink.params["description"],
stats = self.get_stats_html(projectlink)
)
return html
def capitalize(self,string):
return string[0].upper()+string[1:]
def get_comic_label(self,projectlink):
""" For add this as id, for jquery filtering later on
"""
if projectlink.params["hosted on comic"]:
return "comic"
else:
return ""
def get_stats_html(self,projectlink):
""" Returns html to render number of downloads, participants etc..
if a value is not found it is ommitted from the html so there will
be no 'participants: <empty>' strings shown """
stats = []
stats.append("" + projectlink.get_short_project_type())
#if projectlink.params["registered teams"]:
# stats.append("registered: " + str(projectlink.params["registered teams"]))
if projectlink.params["dataset downloads"]:
stats.append("downloads: " + str(projectlink.params["dataset downloads"]))
if projectlink.params["submitted results"]:
stats.append("submissions: " + str(projectlink.params["submitted results"]))
if projectlink.params["workshop date"] and projectlink.UPCOMING in projectlink.find_link_class():
stats.append("workshop: " + self.format_date(projectlink.params["workshop date"]))
if projectlink.params["last submission date"]:
stats.append("last subm.: " + self.format_date(projectlink.params["last submission date"]))
if projectlink.params["event name"]:
stats.append("event: " + self.make_event_link(projectlink))
stats_caps = []
for string in stats:
stats_caps.append(self.capitalize(string))
#put divs around each statistic in the stats list
stats_html = "".join(["<div>{}</div>".format(stat) for stat in stats_caps])
return stats_html
def make_event_link(self,projectlink):
""" To link to event, like ISBI 2013 in overviews
"""
return "<a href='{0}' class='eventlink'>{1}</a>".format(projectlink.params["event URL"],
projectlink.params["event name"])
def get_thumb_url(self,projectlink):
""" For displaying a little thumbnail image for each project, in
project overviews
"""
if projectlink.is_hosted_on_comic():
#thumb_image_url = "https://i.duckduckgo.com/i/764237a0.jpg"
thumb_image_url = projectlink.params["thumb_image_url"]
else:
thumb_image_url = "http://shared.runmc-radiology.nl/mediawiki/challenges/localImage.php?file="+projectlink.params["abreviation"]+".png"
return thumb_image_url
def project_summary_html(self,project):
""" get a link to this project """
if comicsite.templatetags.comic_templatetags.subdomain_is_projectname():
protocol,domainname = settings.MAIN_HOST_NAME.split("//")
url = protocol + "//" +project.short_name +"."+ domainname
html = comicsite.views.comic_site_to_grand_challenge_html(project,url)
else:
html = comicsite.views.comic_site_to_grand_challenge_html(project)
return html
def read_grand_challenge_projectlinks(self):
filename = "challengestats.xls"
project_name = settings.MAIN_PROJECT_NAME
filepath = os.path.join(settings.DROPBOX_ROOT, project_name, filename)
reader = ProjectExcelReader(filepath,'Challenges')
logger.info("Reading projects excel from '%s'" %(filepath))
try:
projectlinks = reader.get_project_links()
except IOError as e:
logger.warning("Could not read any projectlink information from"
" '%s' returning empty list. trace: %s " %(filepath,traceback.format_exc()))
projectlinks = []
projectlinks_clean = []
for projectlink in projectlinks:
projectlinks_clean.append(self.clean_grand_challenge_projectlink(projectlink))
return projectlinks_clean
def clean_grand_challenge_projectlink(self,projectlink):
""" Specifically for the grand challenges excel file, make everything strings,
change weird values, like having more downloads than registered users
"""
# cast all to int as there are no float values in the excel file, I'd
# rather do this here than change the way excelreader reads them in
for key in projectlink.params.keys():
param = projectlink.params[key]
if type(param) == float:
projectlink.params[key] = int(param)
if projectlink.params["last submission date"]:
projectlink.params["last submission date"] = self.determine_project_date(projectlink.params["last submission date"])
if projectlink.params["workshop date"]:
projectlink.params["workshop date"] = self.determine_project_date(projectlink.params["workshop date"])
return projectlink
def determine_project_date(self,datefloat):
""" Parse float (e.g. 20130425.0) read by excelreader into python date
"""
date = str(datefloat)
parsed = datetime.datetime(year=int(date[0:4]),
month=int(date[4:6]),
day=int(date[6:8]))
return parsed
def format_date(self,date):
return date.strftime('%b %d, %Y')
@register.tag(name="image_url")
def render_image_url(parser, token):
""" render image based on image title """
# split_contents() knows not to split quoted strings.
tag_name, args = token.split_contents()
imagetitle = args
try:
image = UploadModel.objects.get(title=imagetitle)
except ObjectDoesNotExist as e:
errormsg = "Error rendering {% " + token.contents + " %}: Could not find any images named '" + imagetitle + "' in database."
# raise template.TemplateSyntaxError(errormsg)
return TemplateErrorNode(errormsg)
except ValueError:
raise template.TemplateSyntaxError("%r tag requires a single argument" % token.contents.split()[0])
[isImage, errorMessage] = hasImgExtension(str(image.file))
if not isImage:
errormsg = "Error rendering {% " + token.contents + " %}:" + errorMessage
# raise template.TemplateSyntaxError(errormsg)
return TemplateErrorNode(errormsg)
return imagePathNode(image)
class imagePathNode(template.Node):
""" return local path to the given UploadModel
"""
def __init__(self, image):
self.image = image
def render(self, context):
path = "/static/media/" + str(self.image.file)
return path
@register.tag(name="registration")
def render_registration_form(parser, token):
""" Render a registration form for the current site """
try:
projects = ComicSite.objects.all()
except ObjectDoesNotExist as e:
errormsg = "Error rendering {% " + token.contents + " %}: Could not find any comicSite object.."
return TemplateErrorNode(errormsg)
return RegistrationFormNode(projects)
class RegistrationFormNode(template.Node):
""" return HTML form of registration, which links to main registration
Currently just links to registration
"""
def __init__(self, projects):
self.projects = projects
def render(self, context):
project = context.page.comicsite
pagetitle = context.page.title
signup_url = reverse('comicsite_signin',args=[project.short_name]) + "?next=" \
+ reverse('comicsite.views.page', kwargs={'site_short_name':project.short_name, 'page_title':pagetitle})
signuplink = makeHTMLLink(signup_url, "sign in")
registerlink = makeHTMLLink(reverse('comicsite_signup',args=[project.short_name]), "register")
if not context['user'].is_authenticated():
return "To register for " + project.short_name + ", you need be logged in to COMIC.\
please " + signuplink + " or " + registerlink
else:
if project.is_participant(context['user']):
msg = "You have already registered for " + project.short_name
else:
register_url = reverse('comicsite.views._register', kwargs={'site_short_name':project.short_name})
# nested if loops through the roof. What would uncle Bob say?
# "nested if loops are a missed chance for inheritance."
# TODO: possible way out: create some kind of registration request
# manager which can be asked these things
if project.require_participant_review:
pending = RegistrationRequest.objects.get_pending_registration_requests(context['user'],project)
if pending:
msg = pending[0].status_to_string()
else:
msg = makeHTMLLink(register_url, "Request registration for " + project.short_name)
else:
msg = makeHTMLLink(register_url, "Register for " + project.short_name)
return msg
class TemplateErrorNode(template.Node):
"""Render error message in place of this template tag. This makes it directly obvious where the error occured
"""
def __init__(self, errormsg):
self.msg = HTML_encode_django_chars(errormsg)
def render(self, context):
return makeErrorMsgHtml(self.msg)
def HTML_encode_django_chars(string):
"""replace curly braces and percent signs by their html encoded equivolents
"""
string = string.replace("{", "{")
string = string.replace("}", "}")
string = string.replace("%", "%")
return string
def makeHTMLLink(url, linktext):
return "<a href=\"" + url + "\">" + linktext + "</a>"
def hasImgExtension(filename):
allowedextensions = [".jpg", ".jpeg", ".gif", ".png", ".bmp"]
ext = path.splitext(filename)[1]
if ext in allowedextensions:
return [True, ""]
else:
return [False, "file \"" + filename + "\" does not look like an image. Allowed extensions: [" + ",".join(allowedextensions) + "]"]
def makeErrorMsgHtml(text):
errorMsgHTML = "<p><span class=\"pageError\"> " + HTML_encode_django_chars(text) + " </span></p>"
return errorMsgHTML;
@register.tag(name="project_statistics")
def display_project_statistics(parser, token):
""" Parser for the project statistics tag.
"""
return ProjectStatisticsNode()
class ProjectStatisticsNode(template.Node):
def __init__(self):
pass
def render(self, context):
project_name = context.page.comicsite.short_name
snippet_header = "<div class='statistics'>"
snippet_footer = "</div>"
# Get the users belonging to this project
perm = Group.objects.get(name='{}_participants'.format(project_name))
users = User.objects.filter(groups=perm).distinct()
countries = [u.get_profile().get_country_display() for u in users]
hist_countries = Counter(countries)
chart_data = [['Country', '#Participants']]
for key, val in hist_countries.iteritems():
chart_data.append([str(key), val])
snippet_geochart = """
<script type='text/javascript' src='https://www.google.com/jsapi'></script>
<script type='text/javascript'>
google.load('visualization', '1', {{'packages': ['geochart']}});
google.setOnLoadCallback(drawRegionsMap);
function drawRegionsMap() {{
var data = google.visualization.arrayToDataTable(
{data}
);
var options = {{}};
var chart = new google.visualization.GeoChart(document.getElementById('chart_div'));
chart.draw(data, options);
}};
</script>
<div id="chart_div" style="width: 100%; height: 170px;"></div>
""".format(data=chart_data)
snippet = """
<h1>Statistics</h1><br>
<p># of users: {num_users}</p>
{geochart}
""".format(num_users=len(users), geochart=snippet_geochart)
return snippet_header + snippet + snippet_footer |
#!/usr/bin/env python3
# Eduardo Frazão
# 2014/11/24
# yeah, it's an ugly code, but it works
from google import search
from sys import argv
import urllib.request
def writer(x,y):
log = open(x, "a")
log.write(("%s\n" % y))
log.close()
def chk(x):
try:
cntd = (str((urllib.request.urlopen(x)).read().decode('utf8')))
cntd = ((((((cntd.replace("\\r"," ")).replace(":"," ")).replace("\\t","\t")).replace("\\n","\n")).replace("="," ")).replace(")"," "))
cntd = ((((((cntd.replace("("," ")).replace("!"," ")).replace("\\"," ")).replace("..", " ")).replace(""", " ")).replace("|"," "))
cntd = (((((((cntd.replace(">", " ")).replace("<"," ")).replace(";"," ")).replace(","," ")).lower()).replace("'"," ")).replace('"'," "))
cntd = cntd.split()
for blk in cntd:
if (("@" in blk) and (".com" in blk)) and (("http" not in blk) and ("/" not in blk)):
blk = (blk.strip("."))
if (blk not in mails and "@xx" not in blk) and ("meudominio" not in blk and "seuemail" and "seublog" not in blk):
if (len(blk[:(blk.find("@"))]) < 3):
pass
else:
mails.append(blk)
print("*\t%s" % blk)
writer(file_name, blk)
except Exception as erro:
pass
if len(argv) != 3:
print('\n\td_f.py file_name_output.ext "keys to find"\n\tor...\n\td_f.py file_name_output.ext %s\n' % (''' '"keys to find"' '''))
quit()
else:
file_name = argv[1]
keys = argv[2]
mails = []
print("\noutput file: %s\nSearching Key(s): %s\n" % (argv[1], argv[2]))
for url in search(keys, stop=None):
print("URL: %s" % url)
mails = []
chk(url)
add .replace for "[" and "]"
#!/usr/bin/env python3
# Eduardo Frazão
# 2014/11/24
# yeah, it's an ugly code, but it works
from google import search
from sys import argv
import urllib.request
def writer(x,y):
log = open(x, "a")
log.write(("%s\n" % y))
log.close()
def chk(x):
try:
cntd = (str((urllib.request.urlopen(x)).read().decode('utf8')))
cntd = ((((((cntd.replace("\\r"," ")).replace(":"," ")).replace("\\t","\t")).replace("\\n","\n")).replace("="," ")).replace(")"," "))
cntd = ((((((cntd.replace("("," ")).replace("!"," ")).replace("\\"," ")).replace("..", " ")).replace(""", " ")).replace("|"," "))
cntd = ((cntd.replace("["," ")).replace("]"," "))
cntd = (((((((cntd.replace(">", " ")).replace("<"," ")).replace(";"," ")).replace(","," ")).lower()).replace("'"," ")).replace('"'," "))
cntd = cntd.split()
for blk in cntd:
if (("@" in blk) and (".com" in blk)) and (("http" not in blk) and ("/" not in blk)):
blk = (blk.strip("."))
if (blk not in mails and "@xx" not in blk) and ("meudominio" not in blk and "seuemail" and "seublog" not in blk):
if (len(blk[:(blk.find("@"))]) < 3):
pass
else:
mails.append(blk)
print("*\t%s" % blk)
writer(file_name, blk)
except Exception as erro:
pass
if len(argv) != 3:
print('\n\td_f.py file_name_output.ext "keys to find"\n\tor...\n\td_f.py file_name_output.ext %s\n' % (''' '"keys to find"' '''))
quit()
else:
file_name = argv[1]
keys = argv[2]
mails = []
print("\noutput file: %s\nSearching Key(s): %s\n" % (argv[1], argv[2]))
for url in search(keys, stop=None):
print("URL: %s" % url)
mails = []
chk(url)
|
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
# To see the list of xml schema builtins recognized by this parser, run defn.py
# in this package.
from collections import defaultdict
import logging
logger = logging.getLogger(__name__)
import os
from itertools import chain
from pprint import pformat
from copy import copy
from os.path import dirname
from os.path import abspath
from os.path import join
from lxml import etree
from spyne.util import memoize
from spyne.util.odict import odict
from spyne.model import Null
from spyne.model import XmlData
from spyne.model import XmlAttribute
from spyne.model import Array
from spyne.model import ComplexModelBase
from spyne.model import ComplexModelMeta
from spyne.model.complex import XmlModifier
from spyne.protocol.xml import XmlDocument
from spyne.interface.xml_schema.defn import TYPE_MAP
from spyne.interface.xml_schema.defn import SchemaBase
from spyne.interface.xml_schema.defn import XmlSchema10
from spyne.util.color import R, G, B, M, Y
PARSER = etree.XMLParser(remove_comments=True)
_prot = XmlDocument()
class _Schema(object):
def __init__(self):
self.types = {}
self.elements = {}
self.imports = set()
# FIXME: Needs to emit delayed assignment of recursive structures instead of
# lousy ellipses.
@memoize
def Thier_repr(with_ns=False):
"""Template for ``hier_repr``, a ``repr`` variant that shows spyne
``ComplexModel``s in a hierarchical format.
:param with_ns: either bool or a callable that returns the class name
as string
"""
if with_ns is False:
def get_class_name(c):
return c.get_type_name()
elif with_ns is True or with_ns is 1:
def get_class_name(c):
return "{%s}%s" % (c.get_namespace(), c.get_type_name())
else:
def get_class_name(c):
return with_ns(c.get_namespace(), c.get_type_name())
def hier_repr(inst, i0=0, I=' ', tags=None):
if tags is None:
tags = set()
cls = inst.__class__
if not hasattr(cls, '_type_info'):
return repr(inst)
clsid = "%s" % (get_class_name(cls))
if id(inst) in tags:
return clsid
tags.add(id(inst))
i1 = i0 + 1
i2 = i1 + 1
retval = [clsid, '(']
xtba = cls.Attributes._xml_tag_body_as
if xtba is not None:
xtba_key, xtba_type = next(xtba)
if xtba_key is not None:
value = getattr(inst, xtba_key, None)
retval.append("%s,\n" % hier_repr(value, i1, I, tags))
else:
retval.append('\n')
for k, v in inst.get_flat_type_info(cls).items():
value = getattr(inst, k, None)
if (issubclass(v, Array) or v.Attributes.max_occurs > 1) and \
value is not None:
retval.append("%s%s=[\n" % (I * i1, k))
for subval in value:
retval.append("%s%s,\n" % (I * i2,
hier_repr(subval, i2, I, tags)))
retval.append('%s],\n' % (I * i1))
elif issubclass(v, XmlData):
pass
else:
retval.append("%s%s=%s,\n" % (I * i1, k,
hier_repr(value, i1, I, tags)))
retval.append('%s)' % (I * i0))
return ''.join(retval)
return hier_repr
SchemaBase.__repr__ = Thier_repr()
hier_repr = Thier_repr()
hier_repr_ns = Thier_repr(with_ns=True)
class XmlSchemaParser(object):
def __init__(self, files, base_dir=None, repr_=Thier_repr(with_ns=False),
skip_errors=False):
self.retval = {}
self.indent = 0
self.files = files
self.base_dir = base_dir
self.repr = repr_
if self.base_dir is None:
self.base_dir = os.getcwd()
self.parent = None
self.children = None
self.nsmap = None
self.schema = None
self.prefmap = None
self.tns = None
self.pending_elements = None
self.pending_types = None
self.pending_type_tree = None
self.skip_errors = skip_errors
def clone(self, indent=0, base_dir=None):
retval = copy(self)
if retval.parent is None:
retval.parent = self
if self.children is None:
self.children = [retval]
else:
self.children.append(retval)
else:
retval.parent.children.append(retval)
retval.indent = self.indent + indent
if base_dir is not None:
retval.base_dir = base_dir
return retval
def debug0(self, s, *args, **kwargs):
logger.debug("%s%s" % (" " * self.indent, s), *args, **kwargs)
def debug1(self, s, *args, **kwargs):
logger.debug("%s%s" % (" " * (self.indent + 1), s), *args, **kwargs)
def debug2(self, s, *args, **kwargs):
logger.debug("%s%s" % (" " * (self.indent + 2), s), *args, **kwargs)
def parse_schema_file(self, file_name):
elt = etree.fromstring(open(file_name, 'rb').read(), parser=PARSER)
return self.parse_schema(elt)
def process_includes(self, include):
file_name = include.schema_location
if file_name is None:
return
self.debug1("including %s %s", self.base_dir, file_name)
file_name = abspath(join(self.base_dir, file_name))
data = open(file_name, 'rb').read()
elt = etree.fromstring(data, parser=PARSER)
self.nsmap.update(elt.nsmap)
self.prefmap = dict([(v, k) for k, v in self.nsmap.items()])
sub_schema = _prot.from_element(None, XmlSchema10, elt)
if sub_schema.includes:
for inc in sub_schema.includes:
base_dir = dirname(file_name)
child_ctx = self.clone(base_dir=base_dir)
self.process_includes(inc)
self.nsmap.update(child_ctx.nsmap)
self.prefmap = dict([(v, k) for k, v in self.nsmap.items()])
for attr in ('imports', 'simple_types', 'complex_types', 'elements'):
sub = getattr(sub_schema, attr)
if sub is None:
sub = []
own = getattr(self.schema, attr)
if own is None:
own = []
own.extend(sub)
setattr(self.schema, attr, own)
def process_simple_type_list(self, s, name=None):
item_type = s.list.item_type
if item_type is None:
self.debug1("skipping simple type: %s because its list itemType "
"could not be found", name)
return
base = self.get_type(item_type)
if base is None:
self.pending_type_tree[self.get_name(item_type)].add((s, name))
self.debug1("pending simple type list: %s "
"because of unseen base %s", name, item_type)
return
self.debug1("adding simple type list: %s", name)
retval = Array(base, serialize_as='sd-list') # FIXME: to be implemented
retval.__type_name__ = name
retval.__namespace__ = self.tns
assert not retval.get_type_name() is retval.Empty
return retval
def process_simple_type_restriction(self, s, name=None):
base_name = s.restriction.base
if base_name is None:
self.debug1("skipping simple type: %s because its restriction base "
"could not be found", name)
return
base = self.get_type(base_name)
if base is None:
self.pending_type_tree[self.get_name(base_name)].add((s, name))
self.debug1("pending simple type: %s because of unseen base %s",
name, base_name)
return
self.debug1("adding simple type: %s", name)
kwargs = {}
restriction = s.restriction
if restriction.enumeration:
kwargs['values'] = [e.value for e in restriction.enumeration]
if restriction.max_length:
if restriction.max_length.value:
kwargs['max_len'] = int(restriction.max_length.value)
if restriction.min_length:
if restriction.min_length.value:
kwargs['min_len'] = int(restriction.min_length.value)
if restriction.pattern:
if restriction.pattern.value:
kwargs['pattern'] = restriction.pattern.value
retval = base.customize(**kwargs)
retval.__type_name__ = name
retval.__namespace__ = self.tns
if retval.__orig__ is None:
retval.__orig__ = base
if retval.__extends__ is None:
retval.__extends__ = base
assert not retval.get_type_name() is retval.Empty
return retval
def process_simple_type_union(self, s, name=None):
self.debug1("skipping simple type: %s because its union is not "
"implemented", name)
def process_simple_type(self, s, name=None):
"""Returns the simple Spyne type from `<simpleType>` tag."""
if name is None:
name = s.name
if s.list is not None:
return self.process_simple_type_list(s, name)
if s.union is not None:
return self.process_simple_type_union(s, name)
if s.restriction is not None:
return self.process_simple_type_restriction(s, name)
self.debug1("skipping simple type: %s", name)
def process_schema_element(self, e):
if e.name is None:
return
self.debug1("adding element: %s", e.name)
t = self.get_type(e.type)
if t:
if e.name in self.pending_elements:
del self.pending_elements[e.name]
self.retval[self.tns].elements[e.name] = e
else:
self.pending_elements[e.name] = e
def process_attribute(self, a):
if a.ref is not None:
t = self.get_type(a.ref)
return t.type.get_type_name(), t
if a.type is not None and a.simple_type is not None:
raise ValueError(a, "Both type and simple_type are defined.")
elif a.type is not None:
t = self.get_type(a.type)
if t is None:
raise ValueError(a, 'type %r not found' % a.type)
elif a.simple_type is not None:
t = self.process_simple_type(a.simple_type, a.name)
if t is None:
raise ValueError(a, 'simple type %r not found' % a.simple_type)
else:
raise Exception("dunno attr")
kwargs = {}
if a.default is not None:
kwargs['default'] = _prot.from_string(t, a.default)
if len(kwargs) > 0:
t = t.customize(**kwargs)
self.debug2("t = t.customize(**%r)" % kwargs)
return a.name, XmlAttribute(t)
def process_complex_type(self, c):
def process_type(tn, name, wrapper=None, element=None, attribute=None):
if wrapper is None:
wrapper = lambda x: x
else:
assert issubclass(wrapper, XmlModifier), wrapper
t = self.get_type(tn)
key = (c.name, name)
if t is None:
self.pending_types[key] = c
self.debug2("not found: %r(%s)", key, tn)
return
if key in self.pending_types:
del self.pending_types[key]
assert name is not None, (key, e)
kwargs = {}
if element is not None:
if e.min_occurs != "0": # spyne default
kwargs['min_occurs'] = int(e.min_occurs)
if e.max_occurs == "unbounded":
kwargs['max_occurs'] = e.max_occurs
elif e.max_occurs != "1":
kwargs['max_occurs'] = int(e.max_occurs)
if e.nillable != True: # spyne default
kwargs['nillable'] = e.nillable
if e.default is not None:
kwargs['default'] = _prot.from_string(t, e.default)
if len(kwargs) > 0:
t = t.customize(**kwargs)
if attribute is not None:
if attribute.default is not None:
kwargs['default'] = _prot.from_string(t, a.default)
if len(kwargs) > 0:
t = t.customize(**kwargs)
ti.append( (name, wrapper(t)) )
self.debug2(" found: %r(%s), c: %r", key, tn, kwargs)
def process_element(e):
if e.ref is not None:
tn = e.ref
name = e.ref.split(":", 1)[-1]
elif e.name is not None:
tn = e.type
name = e.name
else:
raise Exception("dunno")
process_type(tn, name, element=e)
ti = []
base = ComplexModelBase
if c.name in self.retval[self.tns].types:
self.debug1("modifying existing %r", c.name)
else:
self.debug1("adding complex type: %s", c.name)
if c.sequence is not None:
if c.sequence.elements is not None:
for e in c.sequence.elements:
process_element(e)
if c.sequence.choices is not None:
for ch in c.sequence.choices:
if ch.elements is not None:
for e in ch.elements:
process_element(e)
if c.choice is not None:
if c.choice.elements is not None:
for e in c.choice.elements:
process_element(e)
if c.attributes is not None:
for a in c.attributes:
if a.name is None:
continue
if a.type is None:
continue
process_type(a.type, a.name, XmlAttribute, attribute=a)
if c.simple_content is not None:
sc = c.simple_content
ext = sc.extension
restr = sc.restriction
if ext is not None:
base_name = ext.base
b = self.get_type(ext.base)
if ext.attributes is not None:
for a in ext.attributes:
ti.append(self.process_attribute(a))
elif restr is not None:
base_name = restr.base
b = self.get_type(restr.base)
if restr.attributes is not None:
for a in restr.attributes:
ti.append(self.process_attribute(a))
else:
raise Exception("Invalid simpleContent tag: %r", sc)
if issubclass(b, ComplexModelBase):
base = b
else:
process_type(base_name, "_data", XmlData)
if c.name in self.retval[self.tns].types:
r = self.retval[self.tns].types[c.name]
r._type_info.update(ti)
else:
cls_dict = odict({
'__type_name__': c.name,
'__namespace__': self.tns,
'_type_info': ti,
})
if self.repr is not None:
cls_dict['__repr__'] = self.repr
r = ComplexModelMeta(str(c.name), (base,), cls_dict)
self.retval[self.tns].types[c.name] = r
return r
def get_name(self, tn):
if tn.startswith("{"):
ns, qn = tn[1:].split('}', 1)
elif ":" in tn:
ns, qn = tn.split(":", 1)
ns = self.nsmap[ns]
else:
if None in self.nsmap:
ns, qn = self.nsmap[None], tn
else:
ns, qn = self.tns, tn
return ns, qn
def get_type(self, tn):
if tn is None:
return Null
ns, qn = self.get_name(tn)
ti = self.retval.get(ns)
if ti is not None:
t = ti.types.get(qn)
if t:
return t
e = ti.elements.get(qn)
if e:
if ":" in e.type:
return self.get_type(e.type)
else:
retval = self.get_type("{%s}%s" % (ns, e.type))
if retval is None and None in self.nsmap:
retval = self.get_type("{%s}%s" %
(self.nsmap[None], e.type))
return retval
return TYPE_MAP.get("{%s}%s" % (ns, qn))
def process_pending(self):
# process pending
self.debug0("6 %s processing pending complex_types", B(self.tns))
for (c_name, e_name), _v in list(self.pending_types.items()):
self.process_complex_type(_v)
self.debug0("7 %s processing pending elements", Y(self.tns))
for _k, _v in self.pending_elements.items():
self.process_schema_element(_v)
def print_pending(self, fail=False):
ptt_pending = sum((len(v) for v in self.pending_type_tree.values())) > 0
if len(self.pending_elements) > 0 or len(self.pending_types) > 0 or \
ptt_pending:
if fail:
logging.basicConfig(level=logging.DEBUG)
self.debug0("%" * 50)
self.debug0(self.tns)
self.debug0("")
self.debug0("elements")
self.debug0(pformat(self.pending_elements))
self.debug0("")
self.debug0("types")
self.debug0(pformat(self.pending_types))
self.debug0("%" * 50)
self.debug0("type tree")
self.debug0(pformat(self.pending_type_tree))
self.debug0("%" * 50)
if fail:
raise Exception("there are still unresolved elements")
def parse_schema(self, elt):
self.nsmap = dict(elt.nsmap.items())
self.prefmap = dict([(v, k) for k, v in self.nsmap.items()])
self.schema = schema = _prot.from_element(self, XmlSchema10, elt)
self.pending_types = {}
self.pending_elements = {}
self.pending_type_tree = defaultdict(set)
self.tns = tns = schema.target_namespace
if self.tns is None:
self.tns = tns = '__no_ns__'
if tns in self.retval:
return
self.retval[tns] = _Schema()
self.debug0("1 %s processing includes", M(tns))
if schema.includes:
for include in schema.includes:
self.process_includes(include)
if schema.elements:
schema.elements = odict([(e.name, e) for e in schema.elements])
if schema.complex_types:
schema.complex_types = odict([(c.name, c)
for c in schema.complex_types])
if schema.simple_types:
schema.simple_types = odict([(s.name, s)
for s in schema.simple_types])
if schema.attributes:
schema.attributes = odict([(a.name, a) for a in schema.attributes])
self.debug0("2 %s processing imports", R(tns))
if schema.imports:
for imp in schema.imports:
if not imp.namespace in self.retval:
self.debug1("%s importing %s", tns, imp.namespace)
fname = self.files[imp.namespace]
self.clone(2, dirname(fname)).parse_schema_file(fname)
self.retval[tns].imports.add(imp.namespace)
self.debug0("3 %s processing simple_types", G(tns))
if schema.simple_types:
for s in schema.simple_types.values():
st = self.process_simple_type(s)
if st is not None:
self.retval[self.tns].types[s.name] = st
key = self.get_name(s.name)
dependents = self.pending_type_tree[key]
for s, name in set(dependents):
st = self.process_simple_type(s, name)
if st is not None:
self.retval[self.tns].types[s.name] = st
self.debug2("added back simple type: %s", s.name)
dependents.remove((s, name))
if len(dependents) == 0:
del self.pending_type_tree[key]
# check no simple types are left behind.
assert sum((len(v) for v in self.pending_type_tree.values())) == 0, \
self.pending_type_tree.values()
self.debug0("4 %s processing attributes", G(tns))
if schema.attributes:
for s in schema.attributes.values():
n, t = self.process_attribute(s)
self.retval[self.tns].types[n] = t
self.debug0("5 %s processing complex_types", B(tns))
if schema.complex_types:
for c in schema.complex_types.values():
self.process_complex_type(c)
self.debug0("6 %s processing elements", Y(tns))
if schema.elements:
for e in schema.elements.values():
self.process_schema_element(e)
self.process_pending()
if self.parent is None: # for the top-most schema
if self.children is not None: # if it uses <include> or <import>
# This is needed for schemas with circular imports
for c in chain([self], self.children):
c.print_pending()
self.debug0('')
# FIXME: should put this in a while loop that loops until no
# changes occur
for c in chain([self], self.children):
c.process_pending()
for c in chain([self], self.children):
c.process_pending()
self.debug0('')
for c in chain([self], self.children):
c.print_pending(fail=(not self.skip_errors))
return self.retval
simple type parsing refactor.
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
# To see the list of xml schema builtins recognized by this parser, run defn.py
# in this package.
from collections import defaultdict
import logging
logger = logging.getLogger(__name__)
import os
from itertools import chain
from pprint import pformat
from copy import copy
from os.path import dirname
from os.path import abspath
from os.path import join
from lxml import etree
from spyne.util import memoize
from spyne.util.odict import odict
from spyne.model import Null
from spyne.model import XmlData
from spyne.model import XmlAttribute
from spyne.model import Array
from spyne.model import ComplexModelBase
from spyne.model import ComplexModelMeta
from spyne.model.complex import XmlModifier
from spyne.protocol.xml import XmlDocument
from spyne.interface.xml_schema.defn import TYPE_MAP
from spyne.interface.xml_schema.defn import SchemaBase
from spyne.interface.xml_schema.defn import XmlSchema10
from spyne.util.color import R, G, B, M, Y
PARSER = etree.XMLParser(remove_comments=True)
_prot = XmlDocument()
class _Schema(object):
def __init__(self):
self.types = {}
self.elements = {}
self.imports = set()
# FIXME: Needs to emit delayed assignment of recursive structures instead of
# lousy ellipses.
@memoize
def Thier_repr(with_ns=False):
"""Template for ``hier_repr``, a ``repr`` variant that shows spyne
``ComplexModel``s in a hierarchical format.
:param with_ns: either bool or a callable that returns the class name
as string
"""
if with_ns is False:
def get_class_name(c):
return c.get_type_name()
elif with_ns is True or with_ns is 1:
def get_class_name(c):
return "{%s}%s" % (c.get_namespace(), c.get_type_name())
else:
def get_class_name(c):
return with_ns(c.get_namespace(), c.get_type_name())
def hier_repr(inst, i0=0, I=' ', tags=None):
if tags is None:
tags = set()
cls = inst.__class__
if not hasattr(cls, '_type_info'):
return repr(inst)
clsid = "%s" % (get_class_name(cls))
if id(inst) in tags:
return clsid
tags.add(id(inst))
i1 = i0 + 1
i2 = i1 + 1
retval = [clsid, '(']
xtba = cls.Attributes._xml_tag_body_as
if xtba is not None:
xtba_key, xtba_type = next(xtba)
if xtba_key is not None:
value = getattr(inst, xtba_key, None)
retval.append("%s,\n" % hier_repr(value, i1, I, tags))
else:
retval.append('\n')
for k, v in inst.get_flat_type_info(cls).items():
value = getattr(inst, k, None)
if (issubclass(v, Array) or v.Attributes.max_occurs > 1) and \
value is not None:
retval.append("%s%s=[\n" % (I * i1, k))
for subval in value:
retval.append("%s%s,\n" % (I * i2,
hier_repr(subval, i2, I, tags)))
retval.append('%s],\n' % (I * i1))
elif issubclass(v, XmlData):
pass
else:
retval.append("%s%s=%s,\n" % (I * i1, k,
hier_repr(value, i1, I, tags)))
retval.append('%s)' % (I * i0))
return ''.join(retval)
return hier_repr
SchemaBase.__repr__ = Thier_repr()
hier_repr = Thier_repr()
hier_repr_ns = Thier_repr(with_ns=True)
class XmlSchemaParser(object):
def __init__(self, files, base_dir=None, repr_=Thier_repr(with_ns=False),
skip_errors=False):
self.retval = {}
self.indent = 0
self.files = files
self.base_dir = base_dir
self.repr = repr_
if self.base_dir is None:
self.base_dir = os.getcwd()
self.parent = None
self.children = None
self.nsmap = None
self.schema = None
self.prefmap = None
self.tns = None
self.pending_elements = None
self.pending_types = None
self.skip_errors = skip_errors
self.pending_simple_types = defaultdict(set)
def clone(self, indent=0, base_dir=None):
retval = copy(self)
if retval.parent is None:
retval.parent = self
if self.children is None:
self.children = [retval]
else:
self.children.append(retval)
else:
retval.parent.children.append(retval)
retval.indent = self.indent + indent
if base_dir is not None:
retval.base_dir = base_dir
return retval
def debug0(self, s, *args, **kwargs):
logger.debug("%s%s" % (" " * self.indent, s), *args, **kwargs)
def debug1(self, s, *args, **kwargs):
logger.debug("%s%s" % (" " * (self.indent + 1), s), *args, **kwargs)
def debug2(self, s, *args, **kwargs):
logger.debug("%s%s" % (" " * (self.indent + 2), s), *args, **kwargs)
def parse_schema_file(self, file_name):
elt = etree.fromstring(open(file_name, 'rb').read(), parser=PARSER)
return self.parse_schema(elt)
def process_includes(self, include):
file_name = include.schema_location
if file_name is None:
return
self.debug1("including %s %s", self.base_dir, file_name)
file_name = abspath(join(self.base_dir, file_name))
data = open(file_name, 'rb').read()
elt = etree.fromstring(data, parser=PARSER)
self.nsmap.update(elt.nsmap)
self.prefmap = dict([(v, k) for k, v in self.nsmap.items()])
sub_schema = _prot.from_element(None, XmlSchema10, elt)
if sub_schema.includes:
for inc in sub_schema.includes:
base_dir = dirname(file_name)
child_ctx = self.clone(base_dir=base_dir)
self.process_includes(inc)
self.nsmap.update(child_ctx.nsmap)
self.prefmap = dict([(v, k) for k, v in self.nsmap.items()])
for attr in ('imports', 'simple_types', 'complex_types', 'elements'):
sub = getattr(sub_schema, attr)
if sub is None:
sub = []
own = getattr(self.schema, attr)
if own is None:
own = []
own.extend(sub)
setattr(self.schema, attr, own)
def process_simple_type_list(self, s, name=None):
item_type = s.list.item_type
if item_type is None:
self.debug1("skipping simple type: %s because its list itemType "
"could not be found", name)
return
base = self.get_type(item_type)
if base is None:
self.pending_simple_types[self.get_name(item_type)].add((s, name))
self.debug1("pending simple type list: %s "
"because of unseen base %s", name, item_type)
return
self.debug1("adding simple type list: %s", name)
retval = Array(base, serialize_as='sd-list') # FIXME: to be implemented
retval.__type_name__ = name
retval.__namespace__ = self.tns
assert not retval.get_type_name() is retval.Empty
return retval
def process_simple_type_restriction(self, s, name=None):
base_name = s.restriction.base
if base_name is None:
self.debug1("skipping simple type: %s because its restriction base "
"could not be found", name)
return
base = self.get_type(base_name)
if base is None:
self.pending_simple_types[self.get_name(base_name)].add((s, name))
self.debug1("pending simple type: %s because of unseen base %s",
name, base_name)
return
self.debug1("adding simple type: %s", name)
kwargs = {}
restriction = s.restriction
if restriction.enumeration:
kwargs['values'] = [e.value for e in restriction.enumeration]
if restriction.max_length:
if restriction.max_length.value:
kwargs['max_len'] = int(restriction.max_length.value)
if restriction.min_length:
if restriction.min_length.value:
kwargs['min_len'] = int(restriction.min_length.value)
if restriction.pattern:
if restriction.pattern.value:
kwargs['pattern'] = restriction.pattern.value
retval = base.customize(**kwargs)
retval.__type_name__ = name
retval.__namespace__ = self.tns
if retval.__orig__ is None:
retval.__orig__ = base
if retval.__extends__ is None:
retval.__extends__ = base
assert not retval.get_type_name() is retval.Empty
return retval
def process_simple_type_union(self, s, name=None):
self.debug1("skipping simple type: %s because its union is not "
"implemented", name)
def process_simple_type(self, s, name=None):
"""Returns the simple Spyne type from `<simpleType>` tag."""
retval = None
if name is None:
name = s.name
if s.list is not None:
retval = self.process_simple_type_list(s, name)
elif s.union is not None:
retval = self.process_simple_type_union(s, name)
elif s.restriction is not None:
retval = self.process_simple_type_restriction(s, name)
if retval is None:
self.debug1("skipping simple type: %s", name)
return
self.retval[self.tns].types[s.name] = retval
key = self.get_name(name)
dependents = self.pending_simple_types[key]
for s, name in set(dependents):
st = self.process_simple_type(s, name)
if st is not None:
self.retval[self.tns].types[s.name] = st
self.debug2("added back simple type: %s", s.name)
dependents.remove((s, name))
if len(dependents) == 0:
del self.pending_simple_types[key]
return retval
def process_schema_element(self, e):
if e.name is None:
return
self.debug1("adding element: %s", e.name)
t = self.get_type(e.type)
if t:
if e.name in self.pending_elements:
del self.pending_elements[e.name]
self.retval[self.tns].elements[e.name] = e
else:
self.pending_elements[e.name] = e
def process_attribute(self, a):
if a.ref is not None:
t = self.get_type(a.ref)
return t.type.get_type_name(), t
if a.type is not None and a.simple_type is not None:
raise ValueError(a, "Both type and simple_type are defined.")
elif a.type is not None:
t = self.get_type(a.type)
if t is None:
raise ValueError(a, 'type %r not found' % a.type)
elif a.simple_type is not None:
t = self.process_simple_type(a.simple_type, a.name)
if t is None:
raise ValueError(a, 'simple type %r not found' % a.simple_type)
else:
raise Exception("dunno attr")
kwargs = {}
if a.default is not None:
kwargs['default'] = _prot.from_string(t, a.default)
if len(kwargs) > 0:
t = t.customize(**kwargs)
self.debug2("t = t.customize(**%r)" % kwargs)
return a.name, XmlAttribute(t)
def process_complex_type(self, c):
def process_type(tn, name, wrapper=None, element=None, attribute=None):
if wrapper is None:
wrapper = lambda x: x
else:
assert issubclass(wrapper, XmlModifier), wrapper
t = self.get_type(tn)
key = (c.name, name)
if t is None:
self.pending_types[key] = c
self.debug2("not found: %r(%s)", key, tn)
return
if key in self.pending_types:
del self.pending_types[key]
assert name is not None, (key, e)
kwargs = {}
if element is not None:
if e.min_occurs != "0": # spyne default
kwargs['min_occurs'] = int(e.min_occurs)
if e.max_occurs == "unbounded":
kwargs['max_occurs'] = e.max_occurs
elif e.max_occurs != "1":
kwargs['max_occurs'] = int(e.max_occurs)
if e.nillable != True: # spyne default
kwargs['nillable'] = e.nillable
if e.default is not None:
kwargs['default'] = _prot.from_string(t, e.default)
if len(kwargs) > 0:
t = t.customize(**kwargs)
if attribute is not None:
if attribute.default is not None:
kwargs['default'] = _prot.from_string(t, a.default)
if len(kwargs) > 0:
t = t.customize(**kwargs)
ti.append( (name, wrapper(t)) )
self.debug2(" found: %r(%s), c: %r", key, tn, kwargs)
def process_element(e):
if e.ref is not None:
tn = e.ref
name = e.ref.split(":", 1)[-1]
elif e.name is not None:
tn = e.type
name = e.name
else:
raise Exception("dunno")
process_type(tn, name, element=e)
ti = []
base = ComplexModelBase
if c.name in self.retval[self.tns].types:
self.debug1("modifying existing %r", c.name)
else:
self.debug1("adding complex type: %s", c.name)
if c.sequence is not None:
if c.sequence.elements is not None:
for e in c.sequence.elements:
process_element(e)
if c.sequence.choices is not None:
for ch in c.sequence.choices:
if ch.elements is not None:
for e in ch.elements:
process_element(e)
if c.choice is not None:
if c.choice.elements is not None:
for e in c.choice.elements:
process_element(e)
if c.attributes is not None:
for a in c.attributes:
if a.name is None:
continue
if a.type is None:
continue
process_type(a.type, a.name, XmlAttribute, attribute=a)
if c.simple_content is not None:
sc = c.simple_content
ext = sc.extension
restr = sc.restriction
if ext is not None:
base_name = ext.base
b = self.get_type(ext.base)
if ext.attributes is not None:
for a in ext.attributes:
ti.append(self.process_attribute(a))
elif restr is not None:
base_name = restr.base
b = self.get_type(restr.base)
if restr.attributes is not None:
for a in restr.attributes:
ti.append(self.process_attribute(a))
else:
raise Exception("Invalid simpleContent tag: %r", sc)
if issubclass(b, ComplexModelBase):
base = b
else:
process_type(base_name, "_data", XmlData)
if c.name in self.retval[self.tns].types:
r = self.retval[self.tns].types[c.name]
r._type_info.update(ti)
else:
cls_dict = odict({
'__type_name__': c.name,
'__namespace__': self.tns,
'_type_info': ti,
})
if self.repr is not None:
cls_dict['__repr__'] = self.repr
r = ComplexModelMeta(str(c.name), (base,), cls_dict)
self.retval[self.tns].types[c.name] = r
return r
def get_name(self, tn):
if tn.startswith("{"):
ns, qn = tn[1:].split('}', 1)
elif ":" in tn:
ns, qn = tn.split(":", 1)
ns = self.nsmap[ns]
else:
if None in self.nsmap:
ns, qn = self.nsmap[None], tn
else:
ns, qn = self.tns, tn
return ns, qn
def get_type(self, tn):
if tn is None:
return Null
ns, qn = self.get_name(tn)
ti = self.retval.get(ns)
if ti is not None:
t = ti.types.get(qn)
if t:
return t
e = ti.elements.get(qn)
if e:
if ":" in e.type:
return self.get_type(e.type)
else:
retval = self.get_type("{%s}%s" % (ns, e.type))
if retval is None and None in self.nsmap:
retval = self.get_type("{%s}%s" %
(self.nsmap[None], e.type))
return retval
return TYPE_MAP.get("{%s}%s" % (ns, qn))
def process_pending(self):
# process pending
self.debug0("6 %s processing pending complex_types", B(self.tns))
for (c_name, e_name), _v in list(self.pending_types.items()):
self.process_complex_type(_v)
self.debug0("7 %s processing pending elements", Y(self.tns))
for _k, _v in self.pending_elements.items():
self.process_schema_element(_v)
def print_pending(self, fail=False):
ptt_pending = sum((len(v) for v in self.pending_simple_types.values())) > 0
if len(self.pending_elements) > 0 or len(self.pending_types) > 0 or \
ptt_pending:
if fail:
logging.basicConfig(level=logging.DEBUG)
self.debug0("%" * 50)
self.debug0(self.tns)
self.debug0("")
self.debug0("elements")
self.debug0(pformat(self.pending_elements))
self.debug0("")
self.debug0("simple types")
self.debug0(pformat(self.pending_simple_types))
self.debug0("%" * 50)
self.debug0("complex types")
self.debug0(pformat(self.pending_types))
self.debug0("%" * 50)
if fail:
raise Exception("there are still unresolved elements")
def parse_schema(self, elt):
self.nsmap = dict(elt.nsmap.items())
self.prefmap = dict([(v, k) for k, v in self.nsmap.items()])
self.schema = schema = _prot.from_element(self, XmlSchema10, elt)
self.pending_types = {}
self.pending_elements = {}
self.tns = tns = schema.target_namespace
if self.tns is None:
self.tns = tns = '__no_ns__'
if tns in self.retval:
return
self.retval[tns] = _Schema()
self.debug0("1 %s processing includes", M(tns))
if schema.includes:
for include in schema.includes:
self.process_includes(include)
if schema.elements:
schema.elements = odict([(e.name, e) for e in schema.elements])
if schema.complex_types:
schema.complex_types = odict([(c.name, c)
for c in schema.complex_types])
if schema.simple_types:
schema.simple_types = odict([(s.name, s)
for s in schema.simple_types])
if schema.attributes:
schema.attributes = odict([(a.name, a) for a in schema.attributes])
self.debug0("2 %s processing imports", R(tns))
if schema.imports:
for imp in schema.imports:
if not imp.namespace in self.retval:
self.debug1("%s importing %s", tns, imp.namespace)
fname = self.files[imp.namespace]
self.clone(2, dirname(fname)).parse_schema_file(fname)
self.retval[tns].imports.add(imp.namespace)
self.debug0("3 %s processing simple_types", G(tns))
if schema.simple_types:
for s in schema.simple_types.values():
self.process_simple_type(s)
# no simple types should have been left behind.
assert sum((len(v) for v in self.pending_simple_types.values())) == 0, \
self.pending_simple_types.values()
self.debug0("4 %s processing attributes", G(tns))
if schema.attributes:
for s in schema.attributes.values():
n, t = self.process_attribute(s)
self.retval[self.tns].types[n] = t
self.debug0("5 %s processing complex_types", B(tns))
if schema.complex_types:
for c in schema.complex_types.values():
self.process_complex_type(c)
self.debug0("6 %s processing elements", Y(tns))
if schema.elements:
for e in schema.elements.values():
self.process_schema_element(e)
self.process_pending()
if self.parent is None: # for the top-most schema
if self.children is not None: # if it uses <include> or <import>
# This is needed for schemas with circular imports
for c in chain([self], self.children):
c.print_pending()
self.debug0('')
# FIXME: should put this in a while loop that loops until no
# changes occur
for c in chain([self], self.children):
c.process_pending()
for c in chain([self], self.children):
c.process_pending()
self.debug0('')
for c in chain([self], self.children):
c.print_pending(fail=(not self.skip_errors))
return self.retval
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# l10n_cr_base.py
# l10n_cr_base
# First author: Carlos Vásquez <carlos.vasquez@clearcorp.co.cr> (ClearCorp S.A.)
# Copyright (c) 2010-TODAY ClearCorp S.A. (http://clearcorp.co.cr). All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those of the
# authors and should not be interpreted as representing official policies, either expressed
# or implied, of ClearCorp S.A..
#
##############################################################################
from osv import osv,fields
class res_partner_function(osv.osv):
'''
Inherits res.partner.function to add translation to code and name fields
'''
_inherit = 'res.partner.function'
_columns = {
'name': fields.char('Function Name', size=64, required=True, translate=True),
'code': fields.char('Code', size=8, required=True, translate=True),
}
res_partner_function()
class res_partner_title(osv.osv):
'''
Inherits res.partner.title to add translation to shortcut field
'''
_inherit = 'res.partner.title'
_columns = {
'shortcut': fields.char('Shortcut', required=True, size=16, translate=True),
}
res_partner_title()
class res_partner(osv.osv):
'''
Inherits res.partner to add id_number field
'''
_inherit = 'res.partner'
_columns = {
'id_number': fields.char('ID Number', size=30,required=False, select=1),
'lang': fields.selection(_lang_get, 'Language', size=5, required=True, help="If the selected language is loaded in the system, all documents related to this partner will be printed in this language. If not, it will be english."),
}
_defaults = {
'lang': 'es_ES',
}
res_partner()
[FIX] l10n_cr_base.py: partner lang function call
# -*- encoding: utf-8 -*-
##############################################################################
#
# l10n_cr_base.py
# l10n_cr_base
# First author: Carlos Vásquez <carlos.vasquez@clearcorp.co.cr> (ClearCorp S.A.)
# Copyright (c) 2010-TODAY ClearCorp S.A. (http://clearcorp.co.cr). All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those of the
# authors and should not be interpreted as representing official policies, either expressed
# or implied, of ClearCorp S.A..
#
##############################################################################
from osv import osv,fields
class res_partner_function(osv.osv):
'''
Inherits res.partner.function to add translation to code and name fields
'''
_inherit = 'res.partner.function'
_columns = {
'name': fields.char('Function Name', size=64, required=True, translate=True),
'code': fields.char('Code', size=8, required=True, translate=True),
}
res_partner_function()
class res_partner_title(osv.osv):
'''
Inherits res.partner.title to add translation to shortcut field
'''
_inherit = 'res.partner.title'
_columns = {
'shortcut': fields.char('Shortcut', required=True, size=16, translate=True),
}
res_partner_title()
class res_partner(osv.osv):
'''
Inherits res.partner to add id_number field
'''
_inherit = 'res.partner'
_columns = {
'id_number': fields.char('ID Number', size=30,required=False, select=1),
'lang': fields.selection(base.res.partner.partner._lang_get, 'Language', size=5, required=True, help="If the selected language is loaded in the system, all documents related to this partner will be printed in this language. If not, it will be english."),
}
_defaults = {
'lang': 'es_ES',
}
res_partner()
|
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
import netaddr
from tempest_lib.common.utils import data_utils
from tempest.api.network import base
from tempest.api.network import base_security_groups as sec_base
from tempest.common import custom_matchers
from tempest import config
from tempest import test
CONF = config.CONF
class PortsTestJSON(sec_base.BaseSecGroupTest):
"""
Test the following operations for ports:
port create
port delete
port list
port show
port update
"""
@classmethod
def resource_setup(cls):
super(PortsTestJSON, cls).resource_setup()
cls.network = cls.create_network()
cls.port = cls.create_port(cls.network)
def _delete_port(self, port_id):
self.client.delete_port(port_id)
body = self.client.list_ports()
ports_list = body['ports']
self.assertFalse(port_id in [n['id'] for n in ports_list])
@test.attr(type='smoke')
@test.idempotent_id('c72c1c0c-2193-4aca-aaa4-b1442640f51c')
def test_create_update_delete_port(self):
# Verify port creation
body = self.client.create_port(network_id=self.network['id'])
port = body['port']
# Schedule port deletion with verification upon test completion
self.addCleanup(self._delete_port, port['id'])
self.assertTrue(port['admin_state_up'])
# Verify port update
new_name = "New_Port"
body = self.client.update_port(port['id'],
name=new_name,
admin_state_up=False)
updated_port = body['port']
self.assertEqual(updated_port['name'], new_name)
self.assertFalse(updated_port['admin_state_up'])
@test.idempotent_id('67f1b811-f8db-43e2-86bd-72c074d4a42c')
def test_create_bulk_port(self):
network1 = self.network
name = data_utils.rand_name('network-')
network2 = self.create_network(network_name=name)
network_list = [network1['id'], network2['id']]
port_list = [{'network_id': net_id} for net_id in network_list]
body = self.client.create_bulk_port(port_list)
created_ports = body['ports']
port1 = created_ports[0]
port2 = created_ports[1]
self.addCleanup(self._delete_port, port1['id'])
self.addCleanup(self._delete_port, port2['id'])
self.assertEqual(port1['network_id'], network1['id'])
self.assertEqual(port2['network_id'], network2['id'])
self.assertTrue(port1['admin_state_up'])
self.assertTrue(port2['admin_state_up'])
@classmethod
def _get_ipaddress_from_tempest_conf(cls):
"""Return first subnet gateway for configured CIDR """
if cls._ip_version == 4:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
elif cls._ip_version == 6:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
return netaddr.IPAddress(cidr)
@test.attr(type='smoke')
@test.idempotent_id('0435f278-40ae-48cb-a404-b8a087bc09b1')
def test_create_port_in_allowed_allocation_pools(self):
network = self.create_network()
net_id = network['id']
address = self._get_ipaddress_from_tempest_conf()
allocation_pools = {'allocation_pools': [{'start': str(address + 4),
'end': str(address + 6)}]}
subnet = self.create_subnet(network, **allocation_pools)
self.addCleanup(self.client.delete_subnet, subnet['id'])
body = self.client.create_port(network_id=net_id)
self.addCleanup(self.client.delete_port, body['port']['id'])
port = body['port']
ip_address = port['fixed_ips'][0]['ip_address']
start_ip_address = allocation_pools['allocation_pools'][0]['start']
end_ip_address = allocation_pools['allocation_pools'][0]['end']
ip_range = netaddr.IPRange(start_ip_address, end_ip_address)
self.assertIn(ip_address, ip_range)
@test.attr(type='smoke')
@test.idempotent_id('c9a685bd-e83f-499c-939f-9f7863ca259f')
def test_show_port(self):
# Verify the details of port
body = self.client.show_port(self.port['id'])
port = body['port']
self.assertIn('id', port)
# TODO(Santosh)- This is a temporary workaround to compare create_port
# and show_port dict elements.Remove this once extra_dhcp_opts issue
# gets fixed in neutron.( bug - 1365341.)
self.assertThat(self.port,
custom_matchers.MatchesDictExceptForKeys
(port, excluded_keys=['extra_dhcp_opts']))
@test.idempotent_id('45fcdaf2-dab0-4c13-ac6c-fcddfb579dbd')
def test_show_port_fields(self):
# Verify specific fields of a port
fields = ['id', 'mac_address']
body = self.client.show_port(self.port['id'],
fields=fields)
port = body['port']
self.assertEqual(sorted(port.keys()), sorted(fields))
for field_name in fields:
self.assertEqual(port[field_name], self.port[field_name])
@test.attr(type='smoke')
@test.idempotent_id('cf95b358-3e92-4a29-a148-52445e1ac50e')
def test_list_ports(self):
# Verify the port exists in the list of all ports
body = self.client.list_ports()
ports = [port['id'] for port in body['ports']
if port['id'] == self.port['id']]
self.assertNotEmpty(ports, "Created port not found in the list")
@test.idempotent_id('e7fe260b-1e79-4dd3-86d9-bec6a7959fc5')
def test_port_list_filter_by_ip(self):
# Create network and subnet
network = self.create_network()
subnet = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet['id'])
# Create two ports specifying a fixed_ips
address = self._get_ipaddress_from_tempest_conf()
_fixed_ip_1 = str(address + 3)
_fixed_ip_2 = str(address + 4)
fixed_ips_1 = [{'ip_address': _fixed_ip_1}]
port_1 = self.client.create_port(network_id=network['id'],
fixed_ips=fixed_ips_1)
self.addCleanup(self.client.delete_port, port_1['port']['id'])
fixed_ips_2 = [{'ip_address': _fixed_ip_2}]
port_2 = self.client.create_port(network_id=network['id'],
fixed_ips=fixed_ips_2)
self.addCleanup(self.client.delete_port, port_2['port']['id'])
# List ports filtered by fixed_ips
fixed_ips = 'ip_address=' + _fixed_ip_1
port_list = self.client.list_ports(fixed_ips=fixed_ips)
ports = port_list['ports']
self.assertEqual(len(ports), 1)
self.assertEqual(ports[0]['id'], port_1['port']['id'])
self.assertEqual(ports[0]['fixed_ips'][0]['ip_address'],
_fixed_ip_1)
self.assertEqual(ports[0]['network_id'], network['id'])
@test.idempotent_id('5ad01ed0-0e6e-4c5d-8194-232801b15c72')
def test_port_list_filter_by_router_id(self):
# Create a router
network = self.create_network()
self.addCleanup(self.client.delete_network, network['id'])
subnet = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet['id'])
router = self.create_router(data_utils.rand_name('router-'))
self.addCleanup(self.client.delete_router, router['id'])
port = self.client.create_port(network_id=network['id'])
# Add router interface to port created above
self.client.add_router_interface_with_port_id(
router['id'], port['port']['id'])
self.addCleanup(self.client.remove_router_interface_with_port_id,
router['id'], port['port']['id'])
# List ports filtered by router_id
port_list = self.client.list_ports(device_id=router['id'])
ports = port_list['ports']
self.assertEqual(len(ports), 1)
self.assertEqual(ports[0]['id'], port['port']['id'])
self.assertEqual(ports[0]['device_id'], router['id'])
@test.idempotent_id('ff7f117f-f034-4e0e-abff-ccef05c454b4')
def test_list_ports_fields(self):
# Verify specific fields of ports
fields = ['id', 'mac_address']
body = self.client.list_ports(fields=fields)
ports = body['ports']
self.assertNotEmpty(ports, "Port list returned is empty")
# Asserting the fields returned are correct
for port in ports:
self.assertEqual(sorted(fields), sorted(port.keys()))
@test.idempotent_id('63aeadd4-3b49-427f-a3b1-19ca81f06270')
def test_create_update_port_with_second_ip(self):
# Create a network with two subnets
network = self.create_network()
self.addCleanup(self.client.delete_network, network['id'])
subnet_1 = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet_1['id'])
subnet_2 = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet_2['id'])
fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
fixed_ips = fixed_ip_1 + fixed_ip_2
# Create a port with multiple IP addresses
port = self.create_port(network,
fixed_ips=fixed_ips)
self.addCleanup(self.client.delete_port, port['id'])
self.assertEqual(2, len(port['fixed_ips']))
check_fixed_ips = [subnet_1['id'], subnet_2['id']]
for item in port['fixed_ips']:
self.assertIn(item['subnet_id'], check_fixed_ips)
# Update the port to return to a single IP address
port = self.update_port(port, fixed_ips=fixed_ip_1)
self.assertEqual(1, len(port['fixed_ips']))
# Update the port with a second IP address from second subnet
port = self.update_port(port, fixed_ips=fixed_ips)
self.assertEqual(2, len(port['fixed_ips']))
def _update_port_with_security_groups(self, security_groups_names):
subnet_1 = self.create_subnet(self.network)
self.addCleanup(self.client.delete_subnet, subnet_1['id'])
fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
security_groups_list = list()
for name in security_groups_names:
group_create_body = self.client.create_security_group(
name=name)
self.addCleanup(self.client.delete_security_group,
group_create_body['security_group']['id'])
security_groups_list.append(group_create_body['security_group']
['id'])
# Create a port
sec_grp_name = data_utils.rand_name('secgroup')
security_group = self.client.create_security_group(name=sec_grp_name)
self.addCleanup(self.client.delete_security_group,
security_group['security_group']['id'])
post_body = {
"name": data_utils.rand_name('port-'),
"security_groups": [security_group['security_group']['id']],
"network_id": self.network['id'],
"admin_state_up": True,
"fixed_ips": fixed_ip_1}
body = self.client.create_port(**post_body)
self.addCleanup(self.client.delete_port, body['port']['id'])
port = body['port']
# Update the port with security groups
subnet_2 = self.create_subnet(self.network)
fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
update_body = {"name": data_utils.rand_name('port-'),
"admin_state_up": False,
"fixed_ips": fixed_ip_2,
"security_groups": security_groups_list}
body = self.client.update_port(port['id'], **update_body)
port_show = body['port']
# Verify the security groups and other attributes updated to port
exclude_keys = set(port_show).symmetric_difference(update_body)
exclude_keys.add('fixed_ips')
exclude_keys.add('security_groups')
self.assertThat(port_show, custom_matchers.MatchesDictExceptForKeys(
update_body, exclude_keys))
self.assertEqual(fixed_ip_2[0]['subnet_id'],
port_show['fixed_ips'][0]['subnet_id'])
for security_group in security_groups_list:
self.assertIn(security_group, port_show['security_groups'])
@test.idempotent_id('58091b66-4ff4-4cc1-a549-05d60c7acd1a')
def test_update_port_with_security_group_and_extra_attributes(self):
self._update_port_with_security_groups(
[data_utils.rand_name('secgroup')])
@test.idempotent_id('edf6766d-3d40-4621-bc6e-2521a44c257d')
def test_update_port_with_two_security_groups_and_extra_attributes(self):
self._update_port_with_security_groups(
[data_utils.rand_name('secgroup'),
data_utils.rand_name('secgroup')])
@test.idempotent_id('13e95171-6cbd-489c-9d7c-3f9c58215c18')
def test_create_show_delete_port_user_defined_mac(self):
# Create a port for a legal mac
body = self.client.create_port(network_id=self.network['id'])
old_port = body['port']
free_mac_address = old_port['mac_address']
self.client.delete_port(old_port['id'])
# Create a new port with user defined mac
body = self.client.create_port(network_id=self.network['id'],
mac_address=free_mac_address)
self.addCleanup(self.client.delete_port, body['port']['id'])
port = body['port']
body = self.client.show_port(port['id'])
show_port = body['port']
self.assertEqual(free_mac_address,
show_port['mac_address'])
@test.attr(type='smoke')
@test.idempotent_id('4179dcb9-1382-4ced-84fe-1b91c54f5735')
def test_create_port_with_no_securitygroups(self):
network = self.create_network()
self.addCleanup(self.client.delete_network, network['id'])
subnet = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet['id'])
port = self.create_port(network, security_groups=[])
self.addCleanup(self.client.delete_port, port['id'])
self.assertIsNotNone(port['security_groups'])
self.assertEmpty(port['security_groups'])
class PortsAdminExtendedAttrsTestJSON(base.BaseAdminNetworkTest):
@classmethod
def setup_clients(cls):
super(PortsAdminExtendedAttrsTestJSON, cls).setup_clients()
cls.identity_client = cls.os_adm.identity_client
@classmethod
def resource_setup(cls):
super(PortsAdminExtendedAttrsTestJSON, cls).resource_setup()
cls.network = cls.create_network()
cls.host_id = socket.gethostname()
cls.tenant = cls.identity_client.get_tenant_by_name(
CONF.identity.tenant_name)
@test.idempotent_id('8e8569c1-9ac7-44db-8bc1-f5fb2814f29b')
def test_create_port_binding_ext_attr(self):
post_body = {"network_id": self.network['id'],
"binding:host_id": self.host_id}
body = self.admin_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
host_id = port['binding:host_id']
self.assertIsNotNone(host_id)
self.assertEqual(self.host_id, host_id)
@test.idempotent_id('6f6c412c-711f-444d-8502-0ac30fbf5dd5')
def test_update_port_binding_ext_attr(self):
post_body = {"network_id": self.network['id']}
body = self.admin_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
update_body = {"binding:host_id": self.host_id}
body = self.admin_client.update_port(port['id'], **update_body)
updated_port = body['port']
host_id = updated_port['binding:host_id']
self.assertIsNotNone(host_id)
self.assertEqual(self.host_id, host_id)
@test.idempotent_id('1c82a44a-6c6e-48ff-89e1-abe7eaf8f9f8')
def test_list_ports_binding_ext_attr(self):
# Create a new port
post_body = {"network_id": self.network['id']}
body = self.admin_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
# Update the port's binding attributes so that is now 'bound'
# to a host
update_body = {"binding:host_id": self.host_id}
self.admin_client.update_port(port['id'], **update_body)
# List all ports, ensure new port is part of list and its binding
# attributes are set and accurate
body = self.admin_client.list_ports()
ports_list = body['ports']
pids_list = [p['id'] for p in ports_list]
self.assertIn(port['id'], pids_list)
listed_port = [p for p in ports_list if p['id'] == port['id']]
self.assertEqual(1, len(listed_port),
'Multiple ports listed with id %s in ports listing: '
'%s' % (port['id'], ports_list))
self.assertEqual(self.host_id, listed_port[0]['binding:host_id'])
@test.idempotent_id('b54ac0ff-35fc-4c79-9ca3-c7dbd4ea4f13')
def test_show_port_binding_ext_attr(self):
body = self.admin_client.create_port(network_id=self.network['id'])
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
body = self.admin_client.show_port(port['id'])
show_port = body['port']
self.assertEqual(port['binding:host_id'],
show_port['binding:host_id'])
self.assertEqual(port['binding:vif_type'],
show_port['binding:vif_type'])
self.assertEqual(port['binding:vif_details'],
show_port['binding:vif_details'])
class PortsIpV6TestJSON(PortsTestJSON):
_ip_version = 6
_tenant_network_cidr = CONF.network.tenant_network_v6_cidr
_tenant_network_mask_bits = CONF.network.tenant_network_v6_mask_bits
class PortsAdminExtendedAttrsIpV6TestJSON(PortsAdminExtendedAttrsTestJSON):
_ip_version = 6
_tenant_network_cidr = CONF.network.tenant_network_v6_cidr
_tenant_network_mask_bits = CONF.network.tenant_network_v6_mask_bits
Remove unnecesarry code in PortsAdminExtendedAttrsTest
Unnecesarry code for this test causes unessential fail.
This fix it.
Change-Id: Ia60a8460ac30546b22f8e18d685746201e028325
Closes-Bug: 1471689
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
import netaddr
from tempest_lib.common.utils import data_utils
from tempest.api.network import base
from tempest.api.network import base_security_groups as sec_base
from tempest.common import custom_matchers
from tempest import config
from tempest import test
CONF = config.CONF
class PortsTestJSON(sec_base.BaseSecGroupTest):
"""
Test the following operations for ports:
port create
port delete
port list
port show
port update
"""
@classmethod
def resource_setup(cls):
super(PortsTestJSON, cls).resource_setup()
cls.network = cls.create_network()
cls.port = cls.create_port(cls.network)
def _delete_port(self, port_id):
self.client.delete_port(port_id)
body = self.client.list_ports()
ports_list = body['ports']
self.assertFalse(port_id in [n['id'] for n in ports_list])
@test.attr(type='smoke')
@test.idempotent_id('c72c1c0c-2193-4aca-aaa4-b1442640f51c')
def test_create_update_delete_port(self):
# Verify port creation
body = self.client.create_port(network_id=self.network['id'])
port = body['port']
# Schedule port deletion with verification upon test completion
self.addCleanup(self._delete_port, port['id'])
self.assertTrue(port['admin_state_up'])
# Verify port update
new_name = "New_Port"
body = self.client.update_port(port['id'],
name=new_name,
admin_state_up=False)
updated_port = body['port']
self.assertEqual(updated_port['name'], new_name)
self.assertFalse(updated_port['admin_state_up'])
@test.idempotent_id('67f1b811-f8db-43e2-86bd-72c074d4a42c')
def test_create_bulk_port(self):
network1 = self.network
name = data_utils.rand_name('network-')
network2 = self.create_network(network_name=name)
network_list = [network1['id'], network2['id']]
port_list = [{'network_id': net_id} for net_id in network_list]
body = self.client.create_bulk_port(port_list)
created_ports = body['ports']
port1 = created_ports[0]
port2 = created_ports[1]
self.addCleanup(self._delete_port, port1['id'])
self.addCleanup(self._delete_port, port2['id'])
self.assertEqual(port1['network_id'], network1['id'])
self.assertEqual(port2['network_id'], network2['id'])
self.assertTrue(port1['admin_state_up'])
self.assertTrue(port2['admin_state_up'])
@classmethod
def _get_ipaddress_from_tempest_conf(cls):
"""Return first subnet gateway for configured CIDR """
if cls._ip_version == 4:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
elif cls._ip_version == 6:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
return netaddr.IPAddress(cidr)
@test.attr(type='smoke')
@test.idempotent_id('0435f278-40ae-48cb-a404-b8a087bc09b1')
def test_create_port_in_allowed_allocation_pools(self):
network = self.create_network()
net_id = network['id']
address = self._get_ipaddress_from_tempest_conf()
allocation_pools = {'allocation_pools': [{'start': str(address + 4),
'end': str(address + 6)}]}
subnet = self.create_subnet(network, **allocation_pools)
self.addCleanup(self.client.delete_subnet, subnet['id'])
body = self.client.create_port(network_id=net_id)
self.addCleanup(self.client.delete_port, body['port']['id'])
port = body['port']
ip_address = port['fixed_ips'][0]['ip_address']
start_ip_address = allocation_pools['allocation_pools'][0]['start']
end_ip_address = allocation_pools['allocation_pools'][0]['end']
ip_range = netaddr.IPRange(start_ip_address, end_ip_address)
self.assertIn(ip_address, ip_range)
@test.attr(type='smoke')
@test.idempotent_id('c9a685bd-e83f-499c-939f-9f7863ca259f')
def test_show_port(self):
# Verify the details of port
body = self.client.show_port(self.port['id'])
port = body['port']
self.assertIn('id', port)
# TODO(Santosh)- This is a temporary workaround to compare create_port
# and show_port dict elements.Remove this once extra_dhcp_opts issue
# gets fixed in neutron.( bug - 1365341.)
self.assertThat(self.port,
custom_matchers.MatchesDictExceptForKeys
(port, excluded_keys=['extra_dhcp_opts']))
@test.idempotent_id('45fcdaf2-dab0-4c13-ac6c-fcddfb579dbd')
def test_show_port_fields(self):
# Verify specific fields of a port
fields = ['id', 'mac_address']
body = self.client.show_port(self.port['id'],
fields=fields)
port = body['port']
self.assertEqual(sorted(port.keys()), sorted(fields))
for field_name in fields:
self.assertEqual(port[field_name], self.port[field_name])
@test.attr(type='smoke')
@test.idempotent_id('cf95b358-3e92-4a29-a148-52445e1ac50e')
def test_list_ports(self):
# Verify the port exists in the list of all ports
body = self.client.list_ports()
ports = [port['id'] for port in body['ports']
if port['id'] == self.port['id']]
self.assertNotEmpty(ports, "Created port not found in the list")
@test.idempotent_id('e7fe260b-1e79-4dd3-86d9-bec6a7959fc5')
def test_port_list_filter_by_ip(self):
# Create network and subnet
network = self.create_network()
subnet = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet['id'])
# Create two ports specifying a fixed_ips
address = self._get_ipaddress_from_tempest_conf()
_fixed_ip_1 = str(address + 3)
_fixed_ip_2 = str(address + 4)
fixed_ips_1 = [{'ip_address': _fixed_ip_1}]
port_1 = self.client.create_port(network_id=network['id'],
fixed_ips=fixed_ips_1)
self.addCleanup(self.client.delete_port, port_1['port']['id'])
fixed_ips_2 = [{'ip_address': _fixed_ip_2}]
port_2 = self.client.create_port(network_id=network['id'],
fixed_ips=fixed_ips_2)
self.addCleanup(self.client.delete_port, port_2['port']['id'])
# List ports filtered by fixed_ips
fixed_ips = 'ip_address=' + _fixed_ip_1
port_list = self.client.list_ports(fixed_ips=fixed_ips)
ports = port_list['ports']
self.assertEqual(len(ports), 1)
self.assertEqual(ports[0]['id'], port_1['port']['id'])
self.assertEqual(ports[0]['fixed_ips'][0]['ip_address'],
_fixed_ip_1)
self.assertEqual(ports[0]['network_id'], network['id'])
@test.idempotent_id('5ad01ed0-0e6e-4c5d-8194-232801b15c72')
def test_port_list_filter_by_router_id(self):
# Create a router
network = self.create_network()
self.addCleanup(self.client.delete_network, network['id'])
subnet = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet['id'])
router = self.create_router(data_utils.rand_name('router-'))
self.addCleanup(self.client.delete_router, router['id'])
port = self.client.create_port(network_id=network['id'])
# Add router interface to port created above
self.client.add_router_interface_with_port_id(
router['id'], port['port']['id'])
self.addCleanup(self.client.remove_router_interface_with_port_id,
router['id'], port['port']['id'])
# List ports filtered by router_id
port_list = self.client.list_ports(device_id=router['id'])
ports = port_list['ports']
self.assertEqual(len(ports), 1)
self.assertEqual(ports[0]['id'], port['port']['id'])
self.assertEqual(ports[0]['device_id'], router['id'])
@test.idempotent_id('ff7f117f-f034-4e0e-abff-ccef05c454b4')
def test_list_ports_fields(self):
# Verify specific fields of ports
fields = ['id', 'mac_address']
body = self.client.list_ports(fields=fields)
ports = body['ports']
self.assertNotEmpty(ports, "Port list returned is empty")
# Asserting the fields returned are correct
for port in ports:
self.assertEqual(sorted(fields), sorted(port.keys()))
@test.idempotent_id('63aeadd4-3b49-427f-a3b1-19ca81f06270')
def test_create_update_port_with_second_ip(self):
# Create a network with two subnets
network = self.create_network()
self.addCleanup(self.client.delete_network, network['id'])
subnet_1 = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet_1['id'])
subnet_2 = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet_2['id'])
fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
fixed_ips = fixed_ip_1 + fixed_ip_2
# Create a port with multiple IP addresses
port = self.create_port(network,
fixed_ips=fixed_ips)
self.addCleanup(self.client.delete_port, port['id'])
self.assertEqual(2, len(port['fixed_ips']))
check_fixed_ips = [subnet_1['id'], subnet_2['id']]
for item in port['fixed_ips']:
self.assertIn(item['subnet_id'], check_fixed_ips)
# Update the port to return to a single IP address
port = self.update_port(port, fixed_ips=fixed_ip_1)
self.assertEqual(1, len(port['fixed_ips']))
# Update the port with a second IP address from second subnet
port = self.update_port(port, fixed_ips=fixed_ips)
self.assertEqual(2, len(port['fixed_ips']))
def _update_port_with_security_groups(self, security_groups_names):
subnet_1 = self.create_subnet(self.network)
self.addCleanup(self.client.delete_subnet, subnet_1['id'])
fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
security_groups_list = list()
for name in security_groups_names:
group_create_body = self.client.create_security_group(
name=name)
self.addCleanup(self.client.delete_security_group,
group_create_body['security_group']['id'])
security_groups_list.append(group_create_body['security_group']
['id'])
# Create a port
sec_grp_name = data_utils.rand_name('secgroup')
security_group = self.client.create_security_group(name=sec_grp_name)
self.addCleanup(self.client.delete_security_group,
security_group['security_group']['id'])
post_body = {
"name": data_utils.rand_name('port-'),
"security_groups": [security_group['security_group']['id']],
"network_id": self.network['id'],
"admin_state_up": True,
"fixed_ips": fixed_ip_1}
body = self.client.create_port(**post_body)
self.addCleanup(self.client.delete_port, body['port']['id'])
port = body['port']
# Update the port with security groups
subnet_2 = self.create_subnet(self.network)
fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
update_body = {"name": data_utils.rand_name('port-'),
"admin_state_up": False,
"fixed_ips": fixed_ip_2,
"security_groups": security_groups_list}
body = self.client.update_port(port['id'], **update_body)
port_show = body['port']
# Verify the security groups and other attributes updated to port
exclude_keys = set(port_show).symmetric_difference(update_body)
exclude_keys.add('fixed_ips')
exclude_keys.add('security_groups')
self.assertThat(port_show, custom_matchers.MatchesDictExceptForKeys(
update_body, exclude_keys))
self.assertEqual(fixed_ip_2[0]['subnet_id'],
port_show['fixed_ips'][0]['subnet_id'])
for security_group in security_groups_list:
self.assertIn(security_group, port_show['security_groups'])
@test.idempotent_id('58091b66-4ff4-4cc1-a549-05d60c7acd1a')
def test_update_port_with_security_group_and_extra_attributes(self):
self._update_port_with_security_groups(
[data_utils.rand_name('secgroup')])
@test.idempotent_id('edf6766d-3d40-4621-bc6e-2521a44c257d')
def test_update_port_with_two_security_groups_and_extra_attributes(self):
self._update_port_with_security_groups(
[data_utils.rand_name('secgroup'),
data_utils.rand_name('secgroup')])
@test.idempotent_id('13e95171-6cbd-489c-9d7c-3f9c58215c18')
def test_create_show_delete_port_user_defined_mac(self):
# Create a port for a legal mac
body = self.client.create_port(network_id=self.network['id'])
old_port = body['port']
free_mac_address = old_port['mac_address']
self.client.delete_port(old_port['id'])
# Create a new port with user defined mac
body = self.client.create_port(network_id=self.network['id'],
mac_address=free_mac_address)
self.addCleanup(self.client.delete_port, body['port']['id'])
port = body['port']
body = self.client.show_port(port['id'])
show_port = body['port']
self.assertEqual(free_mac_address,
show_port['mac_address'])
@test.attr(type='smoke')
@test.idempotent_id('4179dcb9-1382-4ced-84fe-1b91c54f5735')
def test_create_port_with_no_securitygroups(self):
network = self.create_network()
self.addCleanup(self.client.delete_network, network['id'])
subnet = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet['id'])
port = self.create_port(network, security_groups=[])
self.addCleanup(self.client.delete_port, port['id'])
self.assertIsNotNone(port['security_groups'])
self.assertEmpty(port['security_groups'])
class PortsAdminExtendedAttrsTestJSON(base.BaseAdminNetworkTest):
@classmethod
def setup_clients(cls):
super(PortsAdminExtendedAttrsTestJSON, cls).setup_clients()
cls.identity_client = cls.os_adm.identity_client
@classmethod
def resource_setup(cls):
super(PortsAdminExtendedAttrsTestJSON, cls).resource_setup()
cls.network = cls.create_network()
cls.host_id = socket.gethostname()
@test.idempotent_id('8e8569c1-9ac7-44db-8bc1-f5fb2814f29b')
def test_create_port_binding_ext_attr(self):
post_body = {"network_id": self.network['id'],
"binding:host_id": self.host_id}
body = self.admin_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
host_id = port['binding:host_id']
self.assertIsNotNone(host_id)
self.assertEqual(self.host_id, host_id)
@test.idempotent_id('6f6c412c-711f-444d-8502-0ac30fbf5dd5')
def test_update_port_binding_ext_attr(self):
post_body = {"network_id": self.network['id']}
body = self.admin_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
update_body = {"binding:host_id": self.host_id}
body = self.admin_client.update_port(port['id'], **update_body)
updated_port = body['port']
host_id = updated_port['binding:host_id']
self.assertIsNotNone(host_id)
self.assertEqual(self.host_id, host_id)
@test.idempotent_id('1c82a44a-6c6e-48ff-89e1-abe7eaf8f9f8')
def test_list_ports_binding_ext_attr(self):
# Create a new port
post_body = {"network_id": self.network['id']}
body = self.admin_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
# Update the port's binding attributes so that is now 'bound'
# to a host
update_body = {"binding:host_id": self.host_id}
self.admin_client.update_port(port['id'], **update_body)
# List all ports, ensure new port is part of list and its binding
# attributes are set and accurate
body = self.admin_client.list_ports()
ports_list = body['ports']
pids_list = [p['id'] for p in ports_list]
self.assertIn(port['id'], pids_list)
listed_port = [p for p in ports_list if p['id'] == port['id']]
self.assertEqual(1, len(listed_port),
'Multiple ports listed with id %s in ports listing: '
'%s' % (port['id'], ports_list))
self.assertEqual(self.host_id, listed_port[0]['binding:host_id'])
@test.idempotent_id('b54ac0ff-35fc-4c79-9ca3-c7dbd4ea4f13')
def test_show_port_binding_ext_attr(self):
body = self.admin_client.create_port(network_id=self.network['id'])
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
body = self.admin_client.show_port(port['id'])
show_port = body['port']
self.assertEqual(port['binding:host_id'],
show_port['binding:host_id'])
self.assertEqual(port['binding:vif_type'],
show_port['binding:vif_type'])
self.assertEqual(port['binding:vif_details'],
show_port['binding:vif_details'])
class PortsIpV6TestJSON(PortsTestJSON):
_ip_version = 6
_tenant_network_cidr = CONF.network.tenant_network_v6_cidr
_tenant_network_mask_bits = CONF.network.tenant_network_v6_mask_bits
class PortsAdminExtendedAttrsIpV6TestJSON(PortsAdminExtendedAttrsTestJSON):
_ip_version = 6
_tenant_network_cidr = CONF.network.tenant_network_v6_cidr
_tenant_network_mask_bits = CONF.network.tenant_network_v6_mask_bits
|
# -*- coding: utf-8 -*-
"""
VK.com OpenAPI, OAuth2 and Iframe application OAuth2 backends, docs at:
http://psa.matiasaguirre.net/docs/backends/vk.html
"""
from time import time
from hashlib import md5
from social.backends.base import BaseAuth
from social.backends.oauth import BaseOAuth2
from social.exceptions import AuthTokenRevoked, AuthException
class VKontakteOpenAPI(BaseAuth):
"""VK.COM OpenAPI authentication backend"""
name = 'vk-openapi'
ID_KEY = 'id'
def get_user_details(self, response):
"""Return user details from VK.com request"""
nickname = response.get('nickname') or ''
return {
'username': response['id'] if len(nickname) == 0 else nickname,
'email': '',
'fullname': '',
'first_name': response.get('first_name')[0]
if 'first_name' in response else '',
'last_name': response.get('last_name')[0]
if 'last_name' in response else ''
}
def user_data(self, access_token, *args, **kwargs):
return self.data
def auth_html(self):
"""Returns local VK authentication page, not necessary for
VK to authenticate.
"""
ctx = {'VK_APP_ID': self.setting('APP_ID'),
'VK_COMPLETE_URL': self.redirect_uri}
local_html = self.setting('LOCAL_HTML', 'vkontakte.html')
return self.strategy.render_html(tpl=local_html, context=ctx)
def auth_complete(self, *args, **kwargs):
"""Performs check of authentication in VKontakte, returns User if
succeeded"""
app_cookie = 'vk_app_' + self.setting('APP_ID')
if not 'id' in self.data or not self.strategy.cookie_get(app_cookie):
raise ValueError('VK.com authentication is not completed')
key, secret = self.get_key_and_secret()
cookie_dict = dict(item.split('=') for item in
self.strategy.cookie_get(app_cookie).split('&'))
check_str = ''.join(item + '=' + cookie_dict[item]
for item in ['expire', 'mid', 'secret', 'sid'])
hash = md5((check_str + secret).encode('utf-8')).hexdigest()
if hash != cookie_dict['sig'] or int(cookie_dict['expire']) < time():
raise ValueError('VK.com authentication failed: invalid hash')
else:
kwargs.update({'backend': self,
'response': self.user_data(cookie_dict['mid'])})
return self.strategy.authenticate(*args, **kwargs)
def uses_redirect(self):
"""VK.com does not require visiting server url in order
to do authentication, so auth_xxx methods are not needed to be called.
Their current implementation is just an example"""
return False
class VKOAuth2(BaseOAuth2):
"""VKOAuth2 authentication backend"""
name = 'vk-oauth2'
ID_KEY = 'user_id'
AUTHORIZATION_URL = 'http://oauth.vk.com/authorize'
ACCESS_TOKEN_URL = 'https://oauth.vk.com/access_token'
ACCESS_TOKEN_METHOD = 'POST'
EXTRA_DATA = [
('id', 'id'),
('expires_in', 'expires')
]
def get_user_details(self, response):
"""Return user details from VK.com account"""
return {'username': response.get('screen_name'),
'email': '',
'first_name': response.get('first_name'),
'last_name': response.get('last_name')}
def user_data(self, access_token, response, *args, **kwargs):
"""Loads user data from service"""
request_data = ['first_name', 'last_name', 'screen_name', 'nickname',
'photo'] + self.setting('EXTRA_DATA', [])
fields = ','.join(set(request_data))
data = vk_api(self, 'users.get', {
'access_token': access_token,
'fields': fields,
'uids': response.get('user_id')
})
if data.get('error'):
error = data['error']
msg = error.get('error_msg', 'Unknown error')
if error.get('error_code') == 5:
raise AuthTokenRevoked(self, msg)
else:
raise AuthException(self, msg)
if data:
data = data.get('response')[0]
data['user_photo'] = data.get('photo') # Backward compatibility
return data
class VKAppOAuth2(VKOAuth2):
"""VK.com Application Authentication support"""
name = 'vk-app'
def user_profile(self, user_id, access_token=None):
request_data = ['first_name', 'last_name', 'screen_name', 'nickname',
'photo'] + self.setting('EXTRA_DATA', [])
fields = ','.join(set(request_data))
data = {'uids': user_id, 'fields': fields}
if access_token:
data['access_token'] = access_token
profiles = vk_api(self, 'getProfiles', data).get('response')
if profiles:
return profiles[0]
def auth_complete(self, *args, **kwargs):
required_params = ('is_app_user', 'viewer_id', 'access_token',
'api_id')
if not all(param in self.data for param in required_params):
return None
auth_key = self.data.get('auth_key')
# Verify signature, if present
key, secret = self.get_key_and_secret()
if auth_key:
check_key = md5('_'.join([key,
self.data.get('viewer_id'),
secret]).encode('utf-8')).hexdigest()
if check_key != auth_key:
raise ValueError('VK.com authentication failed: invalid '
'auth key')
user_check = self.setting('USERMODE')
user_id = self.data.get('viewer_id')
if user_check is not None:
user_check = int(user_check)
if user_check == 1:
is_user = self.data.get('is_app_user')
elif user_check == 2:
is_user = vk_api(self, 'isAppUser',
{'uid': user_id}).get('response', 0)
if not int(is_user):
return None
auth_data = {
'auth': self,
'backend': self,
'request': self.strategy.request,
'response': {
'user_id': user_id,
}
}
auth_data['response'].update(self.user_profile(user_id))
return self.strategy.authenticate(*args, **auth_data)
def vk_api(backend, method, data):
"""
Calls VK.com OpenAPI method, check:
https://vk.com/apiclub
http://goo.gl/yLcaa
"""
# We need to perform server-side call if no access_token
if not 'access_token' in data:
if not 'v' in data:
data['v'] = '3.0'
key, secret = backend.get_key_and_secret()
if not 'api_id' in data:
data['api_id'] = key
data['method'] = method
data['format'] = 'json'
url = 'http://api.vk.com/api.php'
param_list = sorted(list(item + '=' + data[item] for item in data))
data['sig'] = md5(
(''.join(param_list) + secret).encode('utf-8')
).hexdigest()
else:
url = 'https://api.vk.com/method/' + method
try:
return backend.get_json(url, params=data)
except (TypeError, KeyError, IOError, ValueError, IndexError):
return None
Switch VK OpenAPI to session intead of cookies.
Refs #250
# -*- coding: utf-8 -*-
"""
VK.com OpenAPI, OAuth2 and Iframe application OAuth2 backends, docs at:
http://psa.matiasaguirre.net/docs/backends/vk.html
"""
from time import time
from hashlib import md5
from social.utils import parse_qs
from social.backends.base import BaseAuth
from social.backends.oauth import BaseOAuth2
from social.exceptions import AuthTokenRevoked, AuthException
class VKontakteOpenAPI(BaseAuth):
"""VK.COM OpenAPI authentication backend"""
name = 'vk-openapi'
ID_KEY = 'id'
def get_user_details(self, response):
"""Return user details from VK.com request"""
nickname = response.get('nickname') or ''
return {
'username': response['id'] if len(nickname) == 0 else nickname,
'email': '',
'fullname': '',
'first_name': response.get('first_name')[0]
if 'first_name' in response else '',
'last_name': response.get('last_name')[0]
if 'last_name' in response else ''
}
def user_data(self, access_token, *args, **kwargs):
return self.data
def auth_html(self):
"""Returns local VK authentication page, not necessary for
VK to authenticate.
"""
ctx = {'VK_APP_ID': self.setting('APP_ID'),
'VK_COMPLETE_URL': self.redirect_uri}
local_html = self.setting('LOCAL_HTML', 'vkontakte.html')
return self.strategy.render_html(tpl=local_html, context=ctx)
def auth_complete(self, *args, **kwargs):
"""Performs check of authentication in VKontakte, returns User if
succeeded"""
session_value = self.strategy.session_get(
'vk_app_' + self.setting('APP_ID')
)
if 'id' not in self.data or not session_value:
raise ValueError('VK.com authentication is not completed')
mapping = parse_qs(session_value)
check_str = ''.join(item + '=' + mapping[item]
for item in ['expire', 'mid', 'secret', 'sid'])
key, secret = self.get_key_and_secret()
hash = md5((check_str + secret).encode('utf-8')).hexdigest()
if hash != mapping['sig'] or int(mapping['expire']) < time():
raise ValueError('VK.com authentication failed: Invalid Hash')
kwargs.update({'backend': self,
'response': self.user_data(mapping['mid'])})
return self.strategy.authenticate(*args, **kwargs)
def uses_redirect(self):
"""VK.com does not require visiting server url in order
to do authentication, so auth_xxx methods are not needed to be called.
Their current implementation is just an example"""
return False
class VKOAuth2(BaseOAuth2):
"""VKOAuth2 authentication backend"""
name = 'vk-oauth2'
ID_KEY = 'user_id'
AUTHORIZATION_URL = 'http://oauth.vk.com/authorize'
ACCESS_TOKEN_URL = 'https://oauth.vk.com/access_token'
ACCESS_TOKEN_METHOD = 'POST'
EXTRA_DATA = [
('id', 'id'),
('expires_in', 'expires')
]
def get_user_details(self, response):
"""Return user details from VK.com account"""
return {'username': response.get('screen_name'),
'email': '',
'first_name': response.get('first_name'),
'last_name': response.get('last_name')}
def user_data(self, access_token, response, *args, **kwargs):
"""Loads user data from service"""
request_data = ['first_name', 'last_name', 'screen_name', 'nickname',
'photo'] + self.setting('EXTRA_DATA', [])
fields = ','.join(set(request_data))
data = vk_api(self, 'users.get', {
'access_token': access_token,
'fields': fields,
'uids': response.get('user_id')
})
if data.get('error'):
error = data['error']
msg = error.get('error_msg', 'Unknown error')
if error.get('error_code') == 5:
raise AuthTokenRevoked(self, msg)
else:
raise AuthException(self, msg)
if data:
data = data.get('response')[0]
data['user_photo'] = data.get('photo') # Backward compatibility
return data
class VKAppOAuth2(VKOAuth2):
"""VK.com Application Authentication support"""
name = 'vk-app'
def user_profile(self, user_id, access_token=None):
request_data = ['first_name', 'last_name', 'screen_name', 'nickname',
'photo'] + self.setting('EXTRA_DATA', [])
fields = ','.join(set(request_data))
data = {'uids': user_id, 'fields': fields}
if access_token:
data['access_token'] = access_token
profiles = vk_api(self, 'getProfiles', data).get('response')
if profiles:
return profiles[0]
def auth_complete(self, *args, **kwargs):
required_params = ('is_app_user', 'viewer_id', 'access_token',
'api_id')
if not all(param in self.data for param in required_params):
return None
auth_key = self.data.get('auth_key')
# Verify signature, if present
key, secret = self.get_key_and_secret()
if auth_key:
check_key = md5('_'.join([key,
self.data.get('viewer_id'),
secret]).encode('utf-8')).hexdigest()
if check_key != auth_key:
raise ValueError('VK.com authentication failed: invalid '
'auth key')
user_check = self.setting('USERMODE')
user_id = self.data.get('viewer_id')
if user_check is not None:
user_check = int(user_check)
if user_check == 1:
is_user = self.data.get('is_app_user')
elif user_check == 2:
is_user = vk_api(self, 'isAppUser',
{'uid': user_id}).get('response', 0)
if not int(is_user):
return None
auth_data = {
'auth': self,
'backend': self,
'request': self.strategy.request,
'response': {
'user_id': user_id,
}
}
auth_data['response'].update(self.user_profile(user_id))
return self.strategy.authenticate(*args, **auth_data)
def vk_api(backend, method, data):
"""
Calls VK.com OpenAPI method, check:
https://vk.com/apiclub
http://goo.gl/yLcaa
"""
# We need to perform server-side call if no access_token
if not 'access_token' in data:
if not 'v' in data:
data['v'] = '3.0'
key, secret = backend.get_key_and_secret()
if not 'api_id' in data:
data['api_id'] = key
data['method'] = method
data['format'] = 'json'
url = 'http://api.vk.com/api.php'
param_list = sorted(list(item + '=' + data[item] for item in data))
data['sig'] = md5(
(''.join(param_list) + secret).encode('utf-8')
).hexdigest()
else:
url = 'https://api.vk.com/method/' + method
try:
return backend.get_json(url, params=data)
except (TypeError, KeyError, IOError, ValueError, IndexError):
return None
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
from txtorcon._metadata import __version__, __author__, __contact__
from txtorcon._metadata import __license__, __copyright__, __url__
from txtorcon.router import Router
from txtorcon.circuit import Circuit
from txtorcon.stream import Stream
from txtorcon.torcontrolprotocol import TorControlProtocol
from txtorcon.torcontrolprotocol import TorProtocolError
from txtorcon.torcontrolprotocol import TorProtocolFactory
from txtorcon.torcontrolprotocol import DEFAULT_VALUE
from txtorcon.torstate import TorState
from txtorcon.torstate import build_tor_connection
from txtorcon.torstate import build_local_tor_connection
from txtorcon.torconfig import TorConfig
from txtorcon.torconfig import HiddenService
from txtorcon.torconfig import EphemeralHiddenService
from txtorcon.torconfig import TorProcessProtocol
from txtorcon.torconfig import launch_tor
from txtorcon.torconfig import TorNotFound
from txtorcon.torinfo import TorInfo
from txtorcon.addrmap import AddrMap
from txtorcon.endpoints import TorOnionAddress
from txtorcon.endpoints import TorOnionListeningPort
from txtorcon.endpoints import TCPHiddenServiceEndpoint
from txtorcon.endpoints import TCPHiddenServiceEndpointParser
from txtorcon.endpoints import TorClientEndpoint
from txtorcon.endpoints import TorClientEndpointStringParser
from txtorcon.endpoints import IHiddenService, IProgressProvider
from txtorcon.endpoints import get_global_tor
from . import util
from . import interface
from txtorcon.interface import (
ITorControlProtocol,
IStreamListener, IStreamAttacher, StreamListenerMixin,
ICircuitContainer, ICircuitListener,
IRouterContainer, IAddrListener,
)
__all__ = [
"Router",
"Circuit",
"Stream",
"TorControlProtocol", "TorProtocolError", "TorProtocolFactory",
"TorState", "DEFAULT_VALUE",
"TorInfo",
"build_tor_connection", "build_local_tor_connection", "launch_tor",
"TorNotFound", "TorConfig", "HiddenService", "EphemeralHiddenService",
"TorProcessProtocol",
"TorInfo",
"TCPHiddenServiceEndpoint", "TCPHiddenServiceEndpointParser",
"TorClientEndpoint", "TorClientEndpointStringParser",
"IHiddenService", "IProgressProvider",
"TorOnionAddress", "TorOnionListeningPort",
"get_global_tor",
"AddrMap",
"util", "interface",
"ITorControlProtocol",
"IStreamListener", "IStreamAttacher", "StreamListenerMixin",
"ICircuitContainer", "ICircuitListener", "CircuitListenerMixin",
"IRouterContainer", "IAddrListener", "IProgressProvider",
"__version__", "__author__", "__contact__",
"__license__", "__copyright__", "__url__",
]
Add build timed circuit to module scope
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
from txtorcon._metadata import __version__, __author__, __contact__
from txtorcon._metadata import __license__, __copyright__, __url__
from txtorcon.router import Router
from txtorcon.circuit import Circuit
from txtorcon.circuit import build_timeout_circuit
from txtorcon.stream import Stream
from txtorcon.torcontrolprotocol import TorControlProtocol
from txtorcon.torcontrolprotocol import TorProtocolError
from txtorcon.torcontrolprotocol import TorProtocolFactory
from txtorcon.torcontrolprotocol import DEFAULT_VALUE
from txtorcon.torstate import TorState
from txtorcon.torstate import build_tor_connection
from txtorcon.torstate import build_local_tor_connection
from txtorcon.torconfig import TorConfig
from txtorcon.torconfig import HiddenService
from txtorcon.torconfig import EphemeralHiddenService
from txtorcon.torconfig import TorProcessProtocol
from txtorcon.torconfig import launch_tor
from txtorcon.torconfig import TorNotFound
from txtorcon.torinfo import TorInfo
from txtorcon.addrmap import AddrMap
from txtorcon.endpoints import TorOnionAddress
from txtorcon.endpoints import TorOnionListeningPort
from txtorcon.endpoints import TCPHiddenServiceEndpoint
from txtorcon.endpoints import TCPHiddenServiceEndpointParser
from txtorcon.endpoints import TorClientEndpoint
from txtorcon.endpoints import TorClientEndpointStringParser
from txtorcon.endpoints import IHiddenService, IProgressProvider
from txtorcon.endpoints import get_global_tor
from . import util
from . import interface
from txtorcon.interface import (
ITorControlProtocol,
IStreamListener, IStreamAttacher, StreamListenerMixin,
ICircuitContainer, ICircuitListener,
IRouterContainer, IAddrListener,
)
__all__ = [
"Router",
"Circuit",
"Stream",
"TorControlProtocol", "TorProtocolError", "TorProtocolFactory",
"TorState", "DEFAULT_VALUE",
"TorInfo",
"build_tor_connection", "build_local_tor_connection", "launch_tor",
"TorNotFound", "TorConfig", "HiddenService", "EphemeralHiddenService",
"TorProcessProtocol",
"TorInfo",
"TCPHiddenServiceEndpoint", "TCPHiddenServiceEndpointParser",
"TorClientEndpoint", "TorClientEndpointStringParser",
"IHiddenService", "IProgressProvider",
"TorOnionAddress", "TorOnionListeningPort",
"get_global_tor",
"build_timeout_circuit",
"AddrMap",
"util", "interface",
"ITorControlProtocol",
"IStreamListener", "IStreamAttacher", "StreamListenerMixin",
"ICircuitContainer", "ICircuitListener", "CircuitListenerMixin",
"IRouterContainer", "IAddrListener", "IProgressProvider",
"__version__", "__author__", "__contact__",
"__license__", "__copyright__", "__url__",
]
|
from django import template
from django_bitcoin import currency
import json
from decimal import Decimal
import urllib
from django.core.urlresolvers import reverse, NoReverseMatch
register = template.Library()
# currency conversion functions
@register.filter
def bitcoinformat(value):
if value == None:
return None
if not (isinstance(value, float) or isinstance(value, Decimal)):
return value
return ("%.8f" % value).rstrip('0').rstrip('.')
@register.filter
def currencyformat(value):
if value == None:
return None
if not (isinstance(value, float) or isinstance(value, Decimal)):
return value
return ("%.2f" % value)
@register.filter
def btc2usd(value):
return (Decimal(value)*currency.exchange.get_rate('USD')).quantize(Decimal("0.01"))
@register.filter
def usd2btc(value):
return (Decimal(value)/currency.exchange.get_rate('USD')).quantize(Decimal("0.00000001"))
@register.filter
def btc2eur(value):
return (Decimal(value)*currency.exchange.get_rate('EUR')).quantize(Decimal("0.01"))
@register.filter
def eur2btc(value):
return (Decimal(value)/currency.exchange.get_rate('EUR')).quantize(Decimal("0.00000001"))
@register.filter
def btc2currency(value, other_currency="USD", rate_period="24h"):
if other_currency=="BTC":
return bitcoinformat(value)
return currencyformat(currency.btc2currency(value, other_currency, rate_period))
@register.filter
def currency2btc(value, other_currency="USD", rate_period="24h"):
if other_currency=="BTC":
return currencyformat(value)
return bitcoinformat(currency.currency2btc(value, other_currency, rate_period))
@register.simple_tag
def exchangerates_json():
return json.dumps(currency.get_rate_table())
@register.inclusion_tag('wallet_history.html')
def wallet_history(wallet):
return {'wallet': wallet}
@register.filter
def show_addr(address, arg):
'''
Display a bitcoin address with plus the link to its blockexplorer page.
'''
# note: i disapprove including somewhat unnecessary depencies such as this, especially since blockexplorer is unreliable service
link ="<a href='http://blockexplorer.com/%s/'>%s</a>"
if arg == 'long':
return link % (address, address)
else:
return link % (address, address[:8])
@register.inclusion_tag('wallet_tagline.html')
def wallet_tagline(wallet):
return {'wallet': wallet, 'balance_usd': btc2usd(wallet.total_balance())}
@register.inclusion_tag('bitcoin_payment_qr.html')
def bitcoin_payment_qr(address, amount=Decimal("0"), description='', display_currency=''):
currency_amount=Decimal(0)
if display_currency:
currency_amount=(Decimal(amount)*currency.exchange.get_rate(display_currency)).quantize(Decimal("0.01"))
try:
image_url = reverse('qrcode', args=('dummy',))
except NoReverseMatch,e:
raise ImproperlyConfigured('Make sure you\'ve included django_bitcoin.urls')
qr = "bitcoin:"+address+("", "?amount="+str(amount))[amount>0]
qr = urllib.quote(qr)
address_qrcode = reverse('qrcode', args=(qr,))
return {'address': address,
'address_qrcode': address_qrcode,
'amount': amount,
'description': description,
'display_currency': display_currency,
'currency_amount': currency_amount,
}
removed comment
from django import template
from django_bitcoin import currency
import json
from decimal import Decimal
import urllib
from django.core.urlresolvers import reverse, NoReverseMatch
register = template.Library()
# currency conversion functions
@register.filter
def bitcoinformat(value):
# print "bitcoinformat", value
if value == None:
return None
if not (isinstance(value, float) or isinstance(value, Decimal)):
return str(value).rstrip('0').rstrip('.')
return ("%.8f" % value).rstrip('0').rstrip('.')
@register.filter
def currencyformat(value):
if value == None:
return None
if not (isinstance(value, float) or isinstance(value, Decimal)):
return str(value).rstrip('0').rstrip('.')
return ("%.2f" % value)
@register.filter
def btc2usd(value):
return (Decimal(value)*currency.exchange.get_rate('USD')).quantize(Decimal("0.01"))
@register.filter
def usd2btc(value):
return (Decimal(value)/currency.exchange.get_rate('USD')).quantize(Decimal("0.00000001"))
@register.filter
def btc2eur(value):
return (Decimal(value)*currency.exchange.get_rate('EUR')).quantize(Decimal("0.01"))
@register.filter
def eur2btc(value):
return (Decimal(value)/currency.exchange.get_rate('EUR')).quantize(Decimal("0.00000001"))
@register.filter
def btc2currency(value, other_currency="USD", rate_period="24h"):
if other_currency=="BTC":
return bitcoinformat(value)
return currencyformat(currency.btc2currency(value, other_currency, rate_period))
@register.filter
def currency2btc(value, other_currency="USD", rate_period="24h"):
if other_currency=="BTC":
return currencyformat(value)
return bitcoinformat(currency.currency2btc(value, other_currency, rate_period))
@register.simple_tag
def exchangerates_json():
return json.dumps(currency.get_rate_table())
@register.inclusion_tag('wallet_history.html')
def wallet_history(wallet):
return {'wallet': wallet}
@register.filter
def show_addr(address, arg):
'''
Display a bitcoin address with plus the link to its blockexplorer page.
'''
# note: i disapprove including somewhat unnecessary depencies such as this, especially since blockexplorer is unreliable service
link ="<a href='http://blockexplorer.com/%s/'>%s</a>"
if arg == 'long':
return link % (address, address)
else:
return link % (address, address[:8])
@register.inclusion_tag('wallet_tagline.html')
def wallet_tagline(wallet):
return {'wallet': wallet, 'balance_usd': btc2usd(wallet.total_balance())}
@register.inclusion_tag('bitcoin_payment_qr.html')
def bitcoin_payment_qr(address, amount=Decimal("0"), description='', display_currency=''):
currency_amount=Decimal(0)
if display_currency:
currency_amount=(Decimal(amount)*currency.exchange.get_rate(display_currency)).quantize(Decimal("0.01"))
try:
image_url = reverse('qrcode', args=('dummy',))
except NoReverseMatch,e:
raise ImproperlyConfigured('Make sure you\'ve included django_bitcoin.urls')
qr = "bitcoin:"+address+("", "?amount="+str(amount))[amount>0]
qr = urllib.quote(qr)
address_qrcode = reverse('qrcode', args=(qr,))
return {'address': address,
'address_qrcode': address_qrcode,
'amount': amount,
'description': description,
'display_currency': display_currency,
'currency_amount': currency_amount,
}
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2014, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import unittest
import mock
from openquake.hazardlib import geo as hazardlib_geo
from openquake.commonlib import readini
from openquake.engine import engine
from openquake.engine.calculators.hazard import general
from openquake.engine.utils import get_calculator_class
from openquake.engine.db import models
from openquake.engine.tests.utils import helpers
class StoreSiteModelTestCase(unittest.TestCase):
def test_store_site_model(self):
# Setup
site_model = helpers.get_data_path('site_model.xml')
exp_site_model = [
dict(lon=-122.5, lat=37.5, vs30=800.0, vs30_type="measured",
z1pt0=100.0, z2pt5=5.0),
dict(lon=-122.6, lat=37.6, vs30=801.0, vs30_type="measured",
z1pt0=101.0, z2pt5=5.1),
dict(lon=-122.7, lat=37.7, vs30=802.0, vs30_type="measured",
z1pt0=102.0, z2pt5=5.2),
dict(lon=-122.8, lat=37.8, vs30=803.0, vs30_type="measured",
z1pt0=103.0, z2pt5=5.3),
dict(lon=-122.9, lat=37.9, vs30=804.0, vs30_type="measured",
z1pt0=104.0, z2pt5=5.4),
]
job = models.OqJob.objects.create(user_name="openquake")
ids = general.store_site_model(job, site_model)
actual_site_model = models.SiteModel.objects.filter(
job=job).order_by('id')
for i, exp in enumerate(exp_site_model):
act = actual_site_model[i]
self.assertAlmostEqual(exp['lon'], act.location.x)
self.assertAlmostEqual(exp['lat'], act.location.y)
self.assertAlmostEqual(exp['vs30'], act.vs30)
self.assertEqual(exp['vs30_type'], act.vs30_type)
self.assertAlmostEqual(exp['z1pt0'], act.z1pt0)
self.assertAlmostEqual(exp['z2pt5'], act.z2pt5)
# last, check that the `store_site_model` function returns all of the
# newly-inserted records
for i, s in enumerate(ids):
self.assertEqual(s, actual_site_model[i].id)
class ClosestSiteModelTestCase(unittest.TestCase):
def setUp(self):
self.hc = models.HazardCalculation.objects.create(
maximum_distance=200,
calculation_mode="classical",
inputs={'site_model': ['fake']})
self.job = models.OqJob.objects.create(
user_name="openquake", hazard_calculation=self.hc)
def test_get_closest_site_model_data(self):
# This test scenario is the following:
# Site model data nodes arranged 2 degrees apart (longitudinally) along
# the same parallel (indicated below by 'd' characters).
#
# The sites of interest are located at (-0.0000001, 0) and
# (0.0000001, 0) (from left to right).
# Sites of interest are indicated by 's' characters.
#
# To illustrate, a super high-tech nethack-style diagram:
#
# -1.........0.........1 V ← oh no, a vampire!
# d s s d
sm1 = models.SiteModel(
job=self.job, vs30_type='measured', vs30=0.0000001,
z1pt0=0.0000001, z2pt5=0.0000001, location='POINT(-1 0)'
)
sm1.save()
sm2 = models.SiteModel(
job=self.job, vs30_type='inferred', vs30=0.0000002,
z1pt0=0.0000002, z2pt5=0.0000002, location='POINT(1 0)'
)
sm2.save()
# NOTE(larsbutler): I tried testing the site (0, 0), but the result
# actually alternated between the the two site model nodes on each test
# run. It's very strange indeed. It must be a PostGIS thing.
# (Or we can blame the vampire.)
#
# Thus, I decided to not include this in my test case, since it caused
# the test to intermittently fail.
point1 = hazardlib_geo.Point(-0.0000001, 0)
point2 = hazardlib_geo.Point(0.0000001, 0)
res1 = self.hc.get_closest_site_model_data(point1)
res2 = self.hc.get_closest_site_model_data(point2)
self.assertEqual(sm1, res1)
self.assertEqual(sm2, res2)
class ParseRiskModelsTestCase(unittest.TestCase):
def test(self):
# check that if risk models are provided, then the ``points to
# compute`` and the imls are got from there
username = helpers.default_user()
job = engine.prepare_job(username)
cfg = helpers.get_data_path('classical_job-sd-imt.ini')
params = readini.parse_config(open(cfg, 'r'))
haz_calc = engine.create_calculation(models.HazardCalculation, params)
haz_calc = models.HazardCalculation.objects.get(id=haz_calc.id)
job.hazard_calculation = haz_calc
job.is_running = True
job.save()
calc = get_calculator_class(
'hazard',
job.hazard_calculation.calculation_mode)(job)
calc.parse_risk_models()
self.assertEqual([(1.0, -1.0), (0.0, 0.0)],
[(point.latitude, point.longitude)
for point in haz_calc.points_to_compute()])
self.assertEqual(['PGA'], haz_calc.get_imts())
self.assertEqual(
3, haz_calc.oqjob.exposuremodel.exposuredata_set.count())
return job
class InitializeSourcesTestCase(unittest.TestCase):
# this is a based on a demo with 3 realizations, 2 sources and 2 sites
@classmethod
def setUpClass(cls):
cfg = helpers.get_data_path(
'calculators/hazard/classical/haz_map_test_job.ini')
job = helpers.get_job(cfg)
models.JobStats.objects.create(oq_job=job)
hc = job.hazard_calculation
cls.calc = get_calculator_class('hazard', hc.calculation_mode)(job)
cls.calc.initialize_site_model()
assert len(hc.site_collection) == 2, len(hc.site_collection)
def test_filtering_sources(self):
self.calc.initialize_sources()
m1, m2, m3 = models.LtSourceModel.objects.filter(
hazard_calculation=self.calc.hc)
self.assertEqual(
[m1.get_num_sources(), m2.get_num_sources(), m3.get_num_sources()],
[2, 2, 2])
self.calc.process_sources()
self.assertEqual(
[m1.get_num_sources(), m2.get_num_sources(), m3.get_num_sources()],
[1, 1, 1])
class CalculationLimitsTestCase(unittest.TestCase):
def test_check_limits_classical(self):
# this is a based on a demo with 3 realizations, 2 sites and 4 rlzs
cfg = helpers.get_data_path(
'calculators/hazard/classical/haz_map_test_job.ini')
job = helpers.get_job(cfg)
models.JobStats.objects.create(oq_job=job)
hc = job.hazard_calculation
calc = get_calculator_class('hazard', hc.calculation_mode)(job)
input_weight, output_weight = calc.pre_execute()
self.assertEqual(input_weight, 225)
self.assertEqual(output_weight, 24)
calc.max_input_weight = 1
with self.assertRaises(general.InputWeightLimit):
calc.check_limits(input_weight, output_weight)
calc.max_input_weight = 1000
calc.max_output_weight = 1
with self.assertRaises(general.OutputWeightLimit):
calc.check_limits(input_weight, output_weight)
def test_check_limits_event_based(self):
# this is a based on a demo with 2 realizations, 5 ses,
# 2 imt and 121 sites
cfg = helpers.get_data_path(
'event_based_hazard/job.ini')
job = helpers.get_job(cfg)
models.JobStats.objects.create(oq_job=job)
hc = job.hazard_calculation
calc = get_calculator_class('hazard', hc.calculation_mode)(job)
input_weight, output_weight = calc.pre_execute()
self.assertEqual(input_weight, 1352.75)
self.assertEqual(output_weight, 12.1)
# NB: 12.1 = 121 sites * 2 IMT * 2 rlzs * 5 SES * 50/10000 years
class NonEmptyQuantileTestCase(unittest.TestCase):
# you cannot compute the quantiles if there is only 1 realization
def test(self):
cfg = helpers.get_data_path('simple_fault_demo_hazard/job.ini')
with mock.patch('openquake.engine.logs.LOG.warn') as warn:
helpers.run_job(cfg, number_of_logic_tree_samples=1,
quantile_hazard_curves='0.1 0.2',
hazard_maps=None, uniform_hazard_spectra=None)
msg = warn.call_args[0][0]
self.assertEqual(
msg, 'There is only one realization, the configuration'
' parameter quantile_hazard_curves should not be set')
Fixed a test broken by the change in the output_weight
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2014, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import unittest
import mock
from openquake.hazardlib import geo as hazardlib_geo
from openquake.commonlib import readini
from openquake.engine import engine
from openquake.engine.calculators.hazard import general
from openquake.engine.utils import get_calculator_class
from openquake.engine.db import models
from openquake.engine.tests.utils import helpers
class StoreSiteModelTestCase(unittest.TestCase):
def test_store_site_model(self):
# Setup
site_model = helpers.get_data_path('site_model.xml')
exp_site_model = [
dict(lon=-122.5, lat=37.5, vs30=800.0, vs30_type="measured",
z1pt0=100.0, z2pt5=5.0),
dict(lon=-122.6, lat=37.6, vs30=801.0, vs30_type="measured",
z1pt0=101.0, z2pt5=5.1),
dict(lon=-122.7, lat=37.7, vs30=802.0, vs30_type="measured",
z1pt0=102.0, z2pt5=5.2),
dict(lon=-122.8, lat=37.8, vs30=803.0, vs30_type="measured",
z1pt0=103.0, z2pt5=5.3),
dict(lon=-122.9, lat=37.9, vs30=804.0, vs30_type="measured",
z1pt0=104.0, z2pt5=5.4),
]
job = models.OqJob.objects.create(user_name="openquake")
ids = general.store_site_model(job, site_model)
actual_site_model = models.SiteModel.objects.filter(
job=job).order_by('id')
for i, exp in enumerate(exp_site_model):
act = actual_site_model[i]
self.assertAlmostEqual(exp['lon'], act.location.x)
self.assertAlmostEqual(exp['lat'], act.location.y)
self.assertAlmostEqual(exp['vs30'], act.vs30)
self.assertEqual(exp['vs30_type'], act.vs30_type)
self.assertAlmostEqual(exp['z1pt0'], act.z1pt0)
self.assertAlmostEqual(exp['z2pt5'], act.z2pt5)
# last, check that the `store_site_model` function returns all of the
# newly-inserted records
for i, s in enumerate(ids):
self.assertEqual(s, actual_site_model[i].id)
class ClosestSiteModelTestCase(unittest.TestCase):
def setUp(self):
self.hc = models.HazardCalculation.objects.create(
maximum_distance=200,
calculation_mode="classical",
inputs={'site_model': ['fake']})
self.job = models.OqJob.objects.create(
user_name="openquake", hazard_calculation=self.hc)
def test_get_closest_site_model_data(self):
# This test scenario is the following:
# Site model data nodes arranged 2 degrees apart (longitudinally) along
# the same parallel (indicated below by 'd' characters).
#
# The sites of interest are located at (-0.0000001, 0) and
# (0.0000001, 0) (from left to right).
# Sites of interest are indicated by 's' characters.
#
# To illustrate, a super high-tech nethack-style diagram:
#
# -1.........0.........1 V ← oh no, a vampire!
# d s s d
sm1 = models.SiteModel(
job=self.job, vs30_type='measured', vs30=0.0000001,
z1pt0=0.0000001, z2pt5=0.0000001, location='POINT(-1 0)'
)
sm1.save()
sm2 = models.SiteModel(
job=self.job, vs30_type='inferred', vs30=0.0000002,
z1pt0=0.0000002, z2pt5=0.0000002, location='POINT(1 0)'
)
sm2.save()
# NOTE(larsbutler): I tried testing the site (0, 0), but the result
# actually alternated between the the two site model nodes on each test
# run. It's very strange indeed. It must be a PostGIS thing.
# (Or we can blame the vampire.)
#
# Thus, I decided to not include this in my test case, since it caused
# the test to intermittently fail.
point1 = hazardlib_geo.Point(-0.0000001, 0)
point2 = hazardlib_geo.Point(0.0000001, 0)
res1 = self.hc.get_closest_site_model_data(point1)
res2 = self.hc.get_closest_site_model_data(point2)
self.assertEqual(sm1, res1)
self.assertEqual(sm2, res2)
class ParseRiskModelsTestCase(unittest.TestCase):
def test(self):
# check that if risk models are provided, then the ``points to
# compute`` and the imls are got from there
username = helpers.default_user()
job = engine.prepare_job(username)
cfg = helpers.get_data_path('classical_job-sd-imt.ini')
params = readini.parse_config(open(cfg, 'r'))
haz_calc = engine.create_calculation(models.HazardCalculation, params)
haz_calc = models.HazardCalculation.objects.get(id=haz_calc.id)
job.hazard_calculation = haz_calc
job.is_running = True
job.save()
calc = get_calculator_class(
'hazard',
job.hazard_calculation.calculation_mode)(job)
calc.parse_risk_models()
self.assertEqual([(1.0, -1.0), (0.0, 0.0)],
[(point.latitude, point.longitude)
for point in haz_calc.points_to_compute()])
self.assertEqual(['PGA'], haz_calc.get_imts())
self.assertEqual(
3, haz_calc.oqjob.exposuremodel.exposuredata_set.count())
return job
class InitializeSourcesTestCase(unittest.TestCase):
# this is a based on a demo with 3 realizations, 2 sources and 2 sites
@classmethod
def setUpClass(cls):
cfg = helpers.get_data_path(
'calculators/hazard/classical/haz_map_test_job.ini')
job = helpers.get_job(cfg)
models.JobStats.objects.create(oq_job=job)
hc = job.hazard_calculation
cls.calc = get_calculator_class('hazard', hc.calculation_mode)(job)
cls.calc.initialize_site_model()
assert len(hc.site_collection) == 2, len(hc.site_collection)
def test_filtering_sources(self):
self.calc.initialize_sources()
m1, m2, m3 = models.LtSourceModel.objects.filter(
hazard_calculation=self.calc.hc)
self.assertEqual(
[m1.get_num_sources(), m2.get_num_sources(), m3.get_num_sources()],
[2, 2, 2])
self.calc.process_sources()
self.assertEqual(
[m1.get_num_sources(), m2.get_num_sources(), m3.get_num_sources()],
[1, 1, 1])
class CalculationLimitsTestCase(unittest.TestCase):
def test_check_limits_classical(self):
# this is a based on a demo with 3 realizations, 2 sites and 4 rlzs
cfg = helpers.get_data_path(
'calculators/hazard/classical/haz_map_test_job.ini')
job = helpers.get_job(cfg)
models.JobStats.objects.create(oq_job=job)
hc = job.hazard_calculation
calc = get_calculator_class('hazard', hc.calculation_mode)(job)
input_weight, output_weight = calc.pre_execute()
self.assertEqual(input_weight, 225)
self.assertEqual(output_weight, 24)
calc.max_input_weight = 1
with self.assertRaises(general.InputWeightLimit):
calc.check_limits(input_weight, output_weight)
calc.max_input_weight = 1000
calc.max_output_weight = 1
with self.assertRaises(general.OutputWeightLimit):
calc.check_limits(input_weight, output_weight)
def test_check_limits_event_based(self):
# this is a based on a demo with 2 realizations, 5 ses,
# 2 imt and 121 sites
cfg = helpers.get_data_path(
'event_based_hazard/job.ini')
job = helpers.get_job(cfg)
models.JobStats.objects.create(oq_job=job)
hc = job.hazard_calculation
calc = get_calculator_class('hazard', hc.calculation_mode)(job)
input_weight, output_weight = calc.pre_execute()
self.assertEqual(input_weight, 1352.75)
self.assertAlmostEqual(output_weight, 12.1)
# NB: 12.1 = 121 sites * 2 IMT * 2 rlzs * 5 SES * 50/10000 years
class NonEmptyQuantileTestCase(unittest.TestCase):
# you cannot compute the quantiles if there is only 1 realization
def test(self):
cfg = helpers.get_data_path('simple_fault_demo_hazard/job.ini')
with mock.patch('openquake.engine.logs.LOG.warn') as warn:
helpers.run_job(cfg, number_of_logic_tree_samples=1,
quantile_hazard_curves='0.1 0.2',
hazard_maps=None, uniform_hazard_spectra=None)
msg = warn.call_args[0][0]
self.assertEqual(
msg, 'There is only one realization, the configuration'
' parameter quantile_hazard_curves should not be set')
|
from time import time
from util import elapsed
from util import safe_commit
import argparse
from models import emailer
from collections import defaultdict
"""
Call from command line to add ORCID profiles based on IDs in a local CSV.
"""
def email_everyone(filename):
with open(filename, "r") as f:
lines = f.read().split("\n")
print "found {} lines".format(len(lines))
total_start = time()
row_num = 0
people_to_email = defaultdict(dict)
# skip header row
for line in lines[1:]:
row_num += 1
try:
(url_slug,orcid_id,twitter_id,email,stripe_id,is_advisor,given_name,surname,created,last_viewed_profile) = line.split(",")
is_subscribed = len(stripe_id)>0 or is_advisor=="t"
people_to_email[email] = {
"orcid_id": orcid_id,
"is_subscribed": is_subscribed,
"given_name": given_name,
"surname": surname,
"refunded": False
}
print u"added person {} {} {}".format(row_num, email, people_to_email[email])
except ValueError:
print u"couldn't parse", line
with open("data/impactstory_refunds.csv", "r") as f:
lines = f.read().split("\r")
print "found {} lines".format(len(lines))
for line in lines[1:]:
try:
(stripe_created,full_name,email) = line.split(",")
if email in people_to_email:
people_to_email[email]["refunded"] = True
print "added refunded true to dict for", email
else:
people_to_email[email] = {
"orcid_id": None,
"is_subscribed": False,
"refunded": False
}
print "added new emailee true to dict for", email
except ValueError:
print "couldn't parse"
# email = "heather@impactstory.org"
# send_tng_email("heather@impactstory.org", people_to_email[email])
for email, addressee_dict in people_to_email.iteritems():
# print ",",
send_tng_email(email, addressee_dict)
pass
def send_tng_email(email, addressee_dict, now=None):
# if os.getenv("ENVIRONMENT", "testing") == "production":
# email = profile.email
# else:
# email = "heather@impactstory.org"
report_dict = {"profile": addressee_dict}
#### KEEEP THIS HERE FOR NOW, so that don't spam other people
# email = 'hpiwowar@gmail.com'
msg = emailer.send(email, "The new Impactstory: Better. Freer.", "welcome", report_dict)
print "SENT EMAIL to ", email
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run stuff.")
# just for updating lots
parser.add_argument('filename', type=str, help="filename to import")
parsed = parser.parse_args()
start = time()
email_everyone(parsed.filename)
print "finished update in {}sec".format(elapsed(start))
another go at emailing
from time import time
from util import elapsed
from util import safe_commit
import argparse
from models import emailer
from collections import defaultdict
emails_sent = """
peter.cumpson@ncl.ac.uk
tsp@ces.uc.pt
ukronman@kth.se
metwaly_eldakar@yahoo.com
simon.cobbold@hotmail.com
wtb1859@gmail.com
sshurtz@library.tamu.edu
muellerf.research@gmail.com
clari.gosling@open.ac.uk
jnfrdvsn@uw.edu
brommelstroet@gmail.com
guido.fanelli@unipr.it
m.van.selm@xs4all.nl
fintel@mit.edu
michael.thompson@complete-hv.com
me@prashanthvarma.com
tdietz@msu.edu
t.haastrup@kent.ac.uk
rturne15@uthsc.edu
showell3@nd.edu
bronwen.whitney@northumbria.ac.uk
ap113@cam.ac.uk
weixia@smu.edu.sg
stuart.a.lawson@gmail.com
jodi.a.schneider@gmail.com
wgsawyer@ufl.edu
lfurlong@imim.es
dr.mohammadhasan@gmail.com
arunkumar.962@rediffmail.com
edurne.zabaleta@gmail.com
david_osterbur@hms.harvard.edu
anlloyd@csu.edu.au
eng.franco@gmail.com
dinglei@sdu.edu.cn
vivoni@asu.edu
matthias.berth@gmail.com
aliciat@schoolph.umass.edu
cristina.tomas@uv.es
marchitelli@gmail.com
bcampb02@calpoly.edu
thoerns@gwdg.de
jacob.moorad
nicolas@limare.net
ruth.inigo@upc.edu
elarich@umich.edu
hinefuku@iastate.edu
armand.seguin@nrcan.gc.ca
ajith.abraham@ieee.org
raorben@gmail.com
erik.mullers@ki.se
jean-christophe.renauld@uclouvain.be
s.guerzoni@fondazioneimc.it
frodeman@unt.edu
jennielevineknies@gmail.com
koolaie@gmail.com
jmairal@gmail.com
hsinha@gmail.com
emma.yearwood@unimelb.edu.au
alira@sedici.unlp.edu.ar
laurie.wright@solent.ac.uk
w.sheate@imperial.ac.uk
i.lynch@bham.ac.uk
leonard.kemper@student.unisg.ch
costas.bouyioukos@issb.genopole.fr
angus.ferraro@googlemail.com
bunck@caltech.edu
scorullon@gmail.com
mhensle1@illinois.edu
romahane@indiana.edu
fite.abdelaziz.alrihawi@gmail.com
diriano@gmail.com
daniamann@gmail.com
pgkmohan@yahoo.com
ccloutie@ucalgary.ca
rec@secardiologia.es
carly.milliren@childrens.harvard.edu
caetanods1@gmail.com
bschmidt@sub.uni-goettingen.de
rajen.water@gmail.com
lynley.pound@gmail.com
m.patterson@elifesciences.org
klblock40@yahoo.com
yongweon.yi@gmail.com
juliacar@ceu.es
veeerendra@gmail.com
minohu@gmx.net
ppsimoes@gmail.com
memcvg@gmail.com
g.abt@hull.ac.uk
gokulkesavan@gmail.com
champa72@gmail.com
loveneet@dietkart.com
fergua@yahoo.ca
jackie.edwards@uts.edu.au
torressalinas@gmail.com
erinmaochu@gmail.com
fdebaste@ulb.ac.be
marschaefer@gmail.com
solbritt.andersson@lnu.se
jenny.c.dunn@gmail.com
mozfest2013a@hotmail.com
asa.matsuda@ieee.org
philip.machanick@gmail.com
omeally@gmail.com
nahid_lotfian@yahoo.com
mark@theaberdours.co.uk
jennie.burroughs@gmail.com
christine.chambers@dal.ca
bwolski@bond.edu.au
marta.lagom@udc.es
indla.pharmacology@gmail.com
ecsalomon@gmail.com
h.marshall@exeter.ac.uk
albertoclaro@albertoclaro.pro.br
aramirez@ramirezlab.net
dominika@work.swmed.edu
anders.lanzen@gmail.com
tomas.lagunas@udc.es
patrice.chalon@gmail.com
matt_andrews@hks.harvard.edu
grhyasen@gmail.com
c.inwood@griffith.edu.au
marc.deconchat@toulouse.inra.fr
jneubert@zbw.eu
vilhelmina.ullemar@ki.se
m
lanninj@missouri.edu
arunbiotechnologist@gmail.com
lkappos@uhbs.ch
vfridmacher@hotmail.com
ann.ewbank@asu.edu
lm.ogorman@qut.edu.au
erinrobinson@esipfed.org
b.drinkwater@bristol.ac.uk
verrico@bcm.edu
charlotte.brown@gmail.com
ketterso@indiana.edu
thwillia@gmail.com
kallarahul@gmail.com
kavubob@gmail.com
exergamelab@gmail.com
imre.vida@charite.de
buckleyy@tcd.ie
juan@uniovi.es
djrozell@gmail.com
monirehgharibeniazi@yahoo.com
oliver.jones@rmit.edu.au
kenny048@umn.edu
erik.stattin@gmail.com
sglegg@cw.bc.ca
ohalloranc@ecu.edu
zmj@zmjones.com
daniysusbromas@yahoo.ees
danbrowne@gmail.com
sgantayat67@rediffmail.com
jacob.leachman@wsu.edu
iftachn@post.tau.ac.il
r.fagan@sheffield.ac.uk
dagmara.chojecki@ualberta.ca
geraldine.laloux@yale.edu
still.pestill@gmail.com
rolando.milian@yale.edu
mail_me_on_line@yahoo.com
deloubens@irphe.univ-mrs.fr
dirk.haehnel@phys.uni-goettingen.de
chayanikabarman123@gmail.com
oroszgy@gmail.com
anahi.balbi@frontiersin.org
ea866311@ohio.edu
marco.tullney@fu-berlin.de
dkolah@rice.edu
g.louppe@gmail.com
pfafard@uottawa.ca
veronica.salido@um.es
wolass@gmail.com
apai@spelman.edu
dalloliogm
admin@fisica.udea.edu.co
andre.goergens@uk-essen.de
vblago@gmail.com
richarow@usc.edu
joaoantonio@yahoo.com
pultz@ku.edu
kaltrina.nuredini@yahoo.com
sandraklatt@gmx.net
stefan.Mueller@fu-berlin.de
noha@cybrarians.info
kristoffer.l.karlsson@chalmers.se
turner@usf.edu
jordipapsgmail.com
mikeknee@gmail.com
paola.villani@polimi.it
rociomorcillo@gmail.com
ganley@psy.fsu.edu
klaas.vandepoele@gmail.com
grosd@musc.edu
negaresma@gmail.com
ab@ab.com
christina.godfrey@articuatescience.com
lewis.mitchell@adelaide.edu.au
emily.alvino@qut.edu.au
boes@pitt.edu
gustavosmotta@gmail.com
amy_brand@harvard.edu
f.murphy@latrobe.edu.au
m.p.morgenstern@gmail.com
paulatraver@gmail.com
tomofumi.okuda@jp.sony.com
hankin@ufl.edu
sadegh.hatamkhani@yahoo.con
peter.doorn@dans.knaw.nl
biserkov@gmail.com
lisa.lodwick@googlemail.com
da.frost@qut.edu.au
josekarvalho@gmail.com
gbb@ipac.caltech.edu
a.gaskett@auckland.ac.nz
uchikawa214@gmail.com
takeoutweight@hotmail.com
mal.ross@nonlinear.com
gfr.abdul@yahoo.co.in
c.oyler@orcid.org
contact@jfstich.com
piotrsmolnicki@gmail.com
shenmeng@live.unc.edu
acabrera@universidadecotec.edu.ec
ahchen@ua.edu
gabriel.wallau@gmail.com
dfreelon@gmail.com
ca.mclean@auckland.ac.nz
shelleyminteer@gmail.com
aizenman@cox.net
patternizer@gmail.com
s.hu@awmc.uq.edu.au
iremaseda@gmail.com
andree@uri.edu
joan.moranta@ba.ieo.es
luisdiazdelrio@hotmail.com
cb1914a@student.american.edu
awasom.afuh@ttu.edu
cpachecoiii@gmail.com
jpo@ebi.ac.uk
ashikasjayanthy@gmail.com
qaziuzaina@gmail.com
andrea.burattin@gmail.com
bussonniermatthias@gmail.com
a.alabdali@warwick.ac.uk
erich.brenner@i-med.ac.at
evelien.vandeperre@ugent.be
marialisa.scata@gmail.com
kelleydwhitten@gmail.com
garnier@wehi.edu.au
kganeshp@gmail.com
carlcold.cc@gmail.com
dr.jahuja@gmail.com
challa.anilkumar@gmail.com
marc.robinsonrechavi@gmail.com
rnpcp942@yahoo.co.jp
maria.jenmalm@me.com
jenserikmai@gmail.com
mathijs_van_leeuwen@hotmail.com
rmcw@st-andrews.ac.uk
donovan.maryk@gmail.com
kate.weatherall@meditechmedia.com
fernan@iib.unsam.edu.ar
kevin.drees@okstate.edu
artemij.keidan@uniroma1.it
bjshops@yahoo.com
violeta_gh@usal.es
cmarsh12@gmail.com
christopher.hodge@visioneyeinstitute.com.au
contato@skrol.com
rhilliker@columbia.edu
administrator@palaeontologyonline.com
will.whiteley@gmail.com
ejimenez@gmail.com
consort@ohri.ca
breiter@usf.edu
caterina.viglianisi@unifi.it
analauda@gmail.com
mikhail.spivakov@babraham.ac.uk
elizabeth.kingdom@gmail.com
matt.hall@unibas.ch
julia@turningforward.org
mforeman@msm.edu
dfboehni@utmb.edu
delgado@gmail.com
briganti@ifo.it
gianluca@dellavedova.org
andrew2153@gmail.com
simon@simula.no
weaverj@unimelb.edu.au
alimulyohadidr@gmail.com
kevin.mansfield@ucl.ac.uk
mark.skilton@wbs.ac.uk
iveta.simera@csm.ox.ac.uk
svetal.shukla@nirmauni.ac.in
a.dempsey@murdoch.edu.au
murdiea@missouri.edu
moussa.benhamed@u-psud.fr
maxima.bolanos@uv.es
nicholas.badcock@mq.edu.au
isyelueze@gmail.com
dhocking@unh.edu
schacht@geneseo.edu
nkannankutty@yahoo.com
lskalla@michaeldbaker.com
cwilhite@salud.unm.edu
hct194@gmail.com
susanleemburg@gmail.com
amanda.cooper@queensu.ca
mardomidepaz@gmail.com
jake@jakebowers.org
am187k@nih.gov
eschenk@usgs.gov
hlapp+impst1@drycafe.net
melanie.bertrand@asu.edu
bgagee@vt.edu
kelly.elizabeth.miller@gmail.com
leonardo.trasande@nyumc.org
carola.tilgmann@med.lu.se
dargan@atmos.washington.edu
kirby.shannon@gmail.com
nick.gardner@gmail.com
blwetze@terpmail.umd.edu
manusfonseca@gmail.com
mahbubadilruba@gmail.com
alexander.pisarchik@ctb.upm.es
paul.thirion@ulg.ac.be
ajw51@le.ac.uk
jim.witschey@gmail.com
daniel_von_schiller@hotmail.com
margarida.rego@fd.unl.pt
manuel.durand-barthez@enc.sorbonne.fr
jimbowen1979@gmail.com
leonderczynski@gmail.com
nicholasjameshudson@yahoo.com
dave.number8@gmail.com
m.calver@murdoch.edu.au
harriet.barker@surrey.ac.uk
phil.levin@netzero.net
gemma.masdeu@ub.edu
kkoray87@gmail.com
salhandivya@gmail.com
titus@idyll.org
nglazer@fas.harvard.edu
billy.meinke@gmail.com
mmichalak@gmail.com
pittmixer@gmail.com
kamakshi.rajagopal@gmail.com
dritoshi@gmail.com
ramsyagha@gmail.com
mrtz.milani@gmail.com
susanne.manz@luks.ch
jacqueline.arciniega@nyumc.org
m-allkhamis@hotmail.com
lorna.peterson2401@gmail.com
joe.mirza@uclh.nhs.uk
ggruere@gmail.com
e.largy@gmail.com
shibbyin@gmail.com
rosieusedv@gmail.com
barwil@gmail.com
nikdholakia@gmail.com
ddecarv@uhnresearch.ca
vegapchirinos@gmail.com
danielrandles@gmail.com
matt.holland@nwas.nhs.uk
ikeuchi.ui@gmail.com
ssiyahhan@gmail.com
gupta59@illinois.edu
simon.elliott@tyndall.ie
alicia.franco@udc.es
terinthanas@gmail.com
t.espinosa.s@gmail.com
omidalighasem49@gmail.com
prateek.mahalwar@tuebingen.mpg.de
marc.neumann@bc3research.org
jburkhardt@uri.edu
tmartins@bcs.uc.pt
adela.feldru@gmail.com
mikko.ojanen@helsinki.fi
berridge@umich.edu
jbhogen@yahoo.com
jennifer_costanza@ncsu.edu
yildiraykeskin@yahoo.com
dan.lawson@bristol.ac.uk
axfelix@gmail.com
1920wr@gmail.com
amparocosta71@gmail.com
toshifum@ualberta.ca
thhaverk@gmail.com
mrassafiani@gmail.com
keith.collier@rubriq.com
ghre
miika.tapio@gmail.com
digitalbio@gmail.com
phillip.white@duke.edu
soiland-reyes@cs.manchester.ac.uk
beatrice.marselli@epfl.ch
simon.sherry@dal.ca
cyc3700@gmail.com
m.salamattalab@gmail.com
tricia.mccabe@sydney.edu.au
matthewomeagher@gmail.com
bsul@nih.gov
baeza.antonio@gmail.com
chris.carswell@springer.com
rhonda.allard.ctr@usuhs.edu
samantha.stehbens@gmail.com
ahmedbassi@gmail.com
deveshkumarjoshi@gmail.com
a.n.scott@ids.ac.uk
mihai.podgoreanu@duke.edu
lemosbioinfo@gmail.com
sanzce@gmail.com
muliasulistiyono@hotmail.com
jeramia.ory@gmail.com
patshine@gmail.com
steve.p.lee@gmail.com
anders.wandahl@ki.se
walter.finsinger@univ-montp2.fr
cynthia.parr@ars.usda.gov
test23@e.com
cawein@live.unc.edu
scgooch@uwaterloo.ca
ngomez@udc.es
nicoleca@stanford.edu
altmetrics.ifado@gmx.de
pbeile@mail.ucf.edu
contact@ryanlfoster.com
juanmaldonado.ortiz@gmail.com
david.w.carter@noaa.gov
a.algra@umcutrecht.nl
raymond.white@uwa.edu.au
makman@ucdavis.edu
nethmin999@gmail.com
barbara.prainsack@gmail.com
linder.bastian@googlemail.com
dgrapov@gmail.com
ucfagls@gmail.com
foster@uchicago.edu
barbro.hellquist@onkologi.umu.se
colditzjb@gmail.com
shoaibsufi@gmail.com
amdrauch@ucdavis.edu
pkiprof@d.umn.edu
iserra73@gmail.com
manubue@yahoo.es
kljensen@alaska.edu
t.gruber@lboro.ac.uk
cesareni@uniroma2.it
claire-stewart@northwestern.edu
sportart@gmail.com
f.correia.profissional@gmail.com
andy@mydocumate.com
mjvs8822@gmail.com
quackenbushs@missouri.edu
sanand@nichq.org
bouche.fred@gmail.com
pierre-michel.forget@mnhn.fr
freaner@unam.mx
i.munfer@gmeil.com
ciaran.quinn@nuim.ie
jan.havlicek@ruk.cuni.cz
julia_sollenberger@urmc.rochester.edu
mokhtari21@hotmail.com
fatima.raja@ucl.ac.uk
gormleya@landcareresearch.co.nz
rosarie.coughlan@queensu.ca
psm_bu@india.com
farhadshokraneh@gmail.com
hsenior@aracnet.com
drsaraserag@aucegypt.edu
sally.a.keith@gmail.com
b.hall@bangor.ac.uk
mbjones.89@gmail.com
pierrich.plusquellec@umontreal.ca
mzs227@gmail.com
nakul777@gmail.com
quinn.jamiem@gmail.com
rafael.calsaverini@gmail.com
ccbenner@ucdavis.edu
kbranch@uri.edu
sandra_destradi@yahoo.de
kdough03@gmail.com
a.scott@ids.ac.uk
ir46@le.ac.uk
l.kenny@ioe.ac.uk
jsoutter@uwindsor.ca
michaela.saisana@jrc.ec.europa.eu
canthony@jcu.edu
djacobs@rider.edu
kat.bussey@gmail.com
kumbharrajendra@yahoo.co.in
lmtd@sun.ac.za
b.yousefi@tum.de
adamt@uow.edu.au
kate.parr@liverpool.ac.uk
alfonso.infante@uhu.es
d.dunlap@neu.edu
xosearegos@gmail.com
dwanecateslaw@yahoo.com
sadaf.ashfaque@gmail.com
fjmanza@ugr.es
david.kalfert@email.cz
matthew.parker@uky.edu
pjbh1@stir.ac.uk
totalimpact@jcachat.com
meri.raggi@unibo.it
mickic20@yahoo.com
markel.vigo@manchester.ac.uk
rdaniel@ohri.ca
m.boyle@griffith.edu.au
jessica.breiman@gmail.com
asa.langefors@biol.lu.se
jsmith@sympatico.ca
kzborzynska@gmail.com
mark.farrar@manchester.ac.uk
alebisson@gmail.com
ekarakaya@gmail.com
eguacimara@gmail.com
bgoodridge@bren.ucsb.edu
bruno.bellisario@gmail.com
amir.sariaslan@psych.ox.ac.uk
stacy.konkiel+nachos@gmail.com
cchan3330@gmail.com
ulrich.schroeders@uni-bamberg.de
j.bosman@uu.nl
dtpalmer@hku.hk
majkaweber@aol.de
n03er953@gmail.com
j.kazbekov@cgiar.org
trevor.johnowens@gmail.com
dieter.lukas@gmail.com
spergam@fhcrc.org
mitchell.thompson@berkeley.edu
erlingj@rki.de
stacy.konkiel+buttons@gmail.com
hcrogman@yahoo.com
mdfrade@gmail.com
jjotto@rutgers.edu
goldman@med.unc.edu
leonardo.candela@isti.cnr.it
twheatland@assumption.edu
gilles.frison@polytechnique.edu
kn11284@seeu.edu.mk
pontika.nancy@gmail.com
jon.hill@imperial.ac.uk
trujillo.valentina@gmail.com
a.teacher@exeter.ac.uk
barry@barold.com
david.bailey@glasgow.ac.uk
onkenj@mail.nih.gov
abreiter@informatik.uni-bremen.de
regan.early@gmail.com
sadaf.ashfaque@yahoo.com
davidwright37@aol.com
marc.c-scott@vu.edu.au
kaveh@bazargan.org
gianluigi.filippelli@gmail.com
h.talebiyan@gmail.com
degoss@gmail.com
r.a.higman@reading.ac.uk
bruno.danis@ulb.ac.be
aakella@aip.org
ekuru@indiana.edu
loet@leydesdorff.net
rachel.nowak@monash.edu
fatemeh.nadimi5@gmail.com
sumanta.patro@yahoo.com
naoto.kojima@gmail.com
thabash@apa.org
adam.byron@gmail.com
r.bryant@orcid.org
apanigab@gmail.com
annelewis40th@gmail.com
rams.aguilar@gmail.com
bct3@psu.edu
assafzar@gmail.com
david.ross@sagepub.co.uk
danielclark@bpp.com
ericaburl@gmail.com
cng_kng@yahoo.com
eirik.sovik@gmail.com
mpace01s@illinois.edu
bflammang@post.harvard.edu
gattuso2@obs-vlfr.fr
john.parker@asu.edu
egil@du.edu
rchampieux@gmail.com
johannes.hoja@gmail.com
aalfonso@unav.es
mmalves@fe.up.pt
gigi@biocomp.unibo.it
nicola.misani@unibocconi.it
waterhlz@gmail.com
andrew.treloar@gmail.com
nathaliasavila@gmail.com
jens.malmkvist@anis.au.dk
afbailey@vt.edu
nardello@unica.it
tkind@ucdavis.edu
maren@tamu.edu
adavis-alteri@albany.edu
tjacobson@albany.edu
peter.bower@manchester.ac.uk
samuel.bolton77@mail.com
dr.jonte@gmail.com
siouxsie.wiles@gmail.com
villa@lcc.uma.es
wkeithcampbell@gmail.com
g.lozano@csic.es
katinatoufexis@hotmail.com
keith@uri.edu
gatien.lokossou@gmail.com
d.mcelroy@uel.ac.uk
herrie.schalekamp@uct.ac.za
gss1@cornell.edu
prabhakar.marepalli@gmail.com
tritemio@gmail.com
mirdelpal@gmail.com
martin.kamler@gmail.com
barbara@bbneves.com
sjones@sc.edu
mdriscoll@library.ucsb.edu
pennyb@gmail.com
karen.vella@qut.edu.au
paul.frankland@gmail.com
barchas@austin.utexas.edu
j.beggs@auckland.ac.nz
bgallagher@mail.uri.edu
paul.maharg@anu.edu.au
renytyson@hotmail.com
wangfeng.w@gmail.com
krother@academis.eu
cgcamero@gmail.com
paolo.righi@unibo.it
schang72@umd.edu
lherzberg@ku.edu
gary.motteram@manchester.ac.uk
mullain@fas.harvard.edu
karen.gutzman@northwestern.edu
michelle.carnegie@gmail.com
dzwinel@agh.edu.pl
torsten.seemann@gmail.com
renata.freitas@ibmc.up.pt
amir.aryani@gmail.com
rmasmuss@gmail.com
warrenkoch@gmail.com
mpop@umd.edu
ykondo@kumamoto-u.ac.jp
lettner.chr@gmail.com
fmylonas@image.ntua.gr
p.loria@uws.edu.au
juliema@illinois.edu
krantisinha@rediffmail.com
sandy.campbell@ualberta.ca
robert_campbell@cbu.ca
ashley.cnchen@gmail.com
gandipalem@gmail.com
idf2@cornell.edu
phillip.melton@uwa.edu.au
akinlo@gmail.com
rogersm@pitt.edu
lalba@flog.uned.es
manuelbehmel@gmail.com
p.crook@latrobe.edu.au
girish-bathla@uiowa.edu
ssampson@sun.ac.za
curttalkthai@gmail.com
florian.duclot@med.fsu.edu
manuela.degregori@unipv.it
h.webb
brookss1@kgh.kari.net
science@vort.org
sa.kornilov@gmail.com
machtmes@ohio.edu
erin.braswell@gmail.com
siul.shl@gmail.com
mlitton@utk.edu
h2izadi@uwaterloo.ca
jshepard@library.berkeley.edu
meihalt@gmail.com
phaedra@surgery.org
pnvaughn@uh.edu
wilcoxcl@hawaii.edu
ridavide@gmail.com
vanesa.loureiro@gmail.com
deborah.fitchett@gmail.com
bleveck@ucmerced.edu
gerben@gerbenzaagsma.org
bette.rathe@unco.edu
kelly.bogh@nrcresearchpress.com
amogh.ambardekar@gmail.com
liefeld@broadinstitute.org
julia.leong@rmit.edu.au
dgilton@mail.uri.edu
belazzbelazz@gmail.com
demariasn@mail.nih.gov
tarja.kokkola@uef.fi
claire.cobley@gmail.com
mehdi.golari@gmail.com
elizabeth.farrell@yahoo.com
thomas.kastner@aau.at
j.ewart@griffith.edu.au
john.cronin@eui.eu
mdgroover@bsu.edu
carawong@illinois.edu
wjavac@hotmail.com
william.gunn@mendeley.com
sophiebuigues@gmail.com
shop@brianmcgill.org
jmoses@primaryresearch.com
nicstah@hotmail.com
C.Rowan@warwick.ac.uk
david.michels@dal.ca
apding@gmail.com
julian.garcia@pobox.com
westonplatter@gmail.com
a.nuriddinov94@gmail.com
abeisler@unr.edu
vtsiligiris@gmail.com
jennifer.fishman@mcgill.ca
cristina.blancoandujar.09@ucl.ac.uk
mattecologist@gmail.com
angelica.risquez@ul.ie
david.gatfield@unil.ch
sfm@mail.nih.gov
beaufrer@uoguelph.ca
dkbaldr@gmail.com
angelamaria.rizzo@unimi.it
tom.finger@ucdenver.edu""".split()
def email_everyone(filename):
with open(filename, "r") as f:
lines = f.read().split("\n")
print "found {} lines".format(len(lines))
total_start = time()
row_num = 0
people_to_email = defaultdict(dict)
# skip header row
for line in lines[1:]:
row_num += 1
try:
(url_slug,orcid_id,twitter_id,email,stripe_id,is_advisor,given_name,surname,created,last_viewed_profile) = line.split(",")
is_subscribed = len(stripe_id)>0 or is_advisor=="t"
people_to_email[email] = {
"orcid_id": orcid_id,
"is_subscribed": is_subscribed,
"given_name": given_name,
"surname": surname,
"refunded": False
}
print u"added person {} {} {}".format(row_num, email, people_to_email[email])
except ValueError:
print u"couldn't parse", line
with open("data/impactstory_refunds.csv", "r") as f:
lines = f.read().split("\r")
print "found {} lines".format(len(lines))
for line in lines[1:]:
try:
(stripe_created,full_name,email) = line.split(",")
if email in people_to_email:
people_to_email[email]["refunded"] = True
print "added refunded true to dict for", email
else:
people_to_email[email] = {
"orcid_id": None,
"is_subscribed": False,
"refunded": False
}
print "added new emailee true to dict for", email
except ValueError:
print "couldn't parse"
# email = "heather@impactstory.org"
# send_tng_email("heather@impactstory.org", people_to_email[email])
num_sending = 0
num_not_sending = 0
for email, addressee_dict in people_to_email.iteritems():
if addressee_dict["is_subscribed"] or addressee_dict["refunded"]:
if email in emails_sent:
num_not_sending += 1
print "not sending email to", email, "because already sent"
else:
print "WOULD send email to", email
num_sending += 1
send_tng_email(email, addressee_dict)
print "num_not_sending", num_not_sending
print "num_sending", num_sending
def send_tng_email(email, addressee_dict, now=None):
# if os.getenv("ENVIRONMENT", "testing") == "production":
# email = profile.email
# else:
# email = "heather@impactstory.org"
report_dict = {"profile": addressee_dict}
#### KEEEP THIS HERE FOR NOW, so that don't spam other people
# email = 'hpiwowar@gmail.com'
msg = emailer.send(email, "The new Impactstory: Better. Freer.", "welcome", report_dict)
print "SENT EMAIL to ", email
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run stuff.")
# just for updating lots
parser.add_argument('filename', type=str, help="filename to import")
parsed = parser.parse_args()
start = time()
email_everyone(parsed.filename)
print "finished update in {}sec".format(elapsed(start))
|
# Copyright 2004-2011 Luis Manuel Angueira Blanco - Pexego
# Copyright 2013-2019 Ignacio Ibeas - Acysos S.L. (http://acysos.com)
# Copyright 2015 Ainara Galdona <agaldona@avanzosc.com>
# Copyright 2016 Antonio Espinosa <antonio.espinosa@tecnativa.com>
# Copyright 2018 Juan Vicente Pascual <jvpascual@puntsistemes.es>
# Copyright 2019 Tecnativa - Carlos Dauden
# Copyright 2013-2021 Tecnativa - Pedro M. Baeza
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl
{
"name": "AEAT Base",
"summary": "Modulo base para declaraciones de la AEAT",
"version": "15.0.2.0.4",
"author": "Pexego, "
"Acysos S.L., "
"AvanzOSC, "
"Tecnativa, "
"Odoo Community Association (OCA)",
"license": "AGPL-3",
"website": "https://github.com/OCA/l10n-spain",
"category": "Accounting & Finance",
"development_status": "Mature",
"depends": ["l10n_es", "account_tax_balance"],
# odoo_test_helper is needed for the tests
"external_dependencies": {"python": ["unidecode", "cryptography"]},
"data": [
"security/aeat_security.xml",
"security/ir.model.access.csv",
"data/aeat_partner.xml",
"data/ir_config_parameter.xml",
"data/aeat_tax_agency_data.xml",
"wizard/export_to_boe_wizard.xml",
"wizard/compare_boe_file_views.xml",
"wizard/aeat_certificate_password_view.xml",
"views/aeat_menuitem.xml",
"views/aeat_report_view.xml",
"views/aeat_tax_agency_view.xml",
"views/aeat_tax_line_view.xml",
"views/aeat_export_configuration_view.xml",
"views/aeat_tax_code_mapping_view.xml",
"views/account_move_line_view.xml",
"views/res_company_view.xml",
"views/res_partner_view.xml",
"views/aeat_certificate_view.xml",
],
"installable": True,
"maintainers": ["pedrobaeza"],
}
l10n_es_aeat 15.0.2.1.0
# Copyright 2004-2011 Luis Manuel Angueira Blanco - Pexego
# Copyright 2013-2019 Ignacio Ibeas - Acysos S.L. (http://acysos.com)
# Copyright 2015 Ainara Galdona <agaldona@avanzosc.com>
# Copyright 2016 Antonio Espinosa <antonio.espinosa@tecnativa.com>
# Copyright 2018 Juan Vicente Pascual <jvpascual@puntsistemes.es>
# Copyright 2019 Tecnativa - Carlos Dauden
# Copyright 2013-2021 Tecnativa - Pedro M. Baeza
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl
{
"name": "AEAT Base",
"summary": "Modulo base para declaraciones de la AEAT",
"version": "15.0.2.1.0",
"author": "Pexego, "
"Acysos S.L., "
"AvanzOSC, "
"Tecnativa, "
"Odoo Community Association (OCA)",
"license": "AGPL-3",
"website": "https://github.com/OCA/l10n-spain",
"category": "Accounting & Finance",
"development_status": "Mature",
"depends": ["l10n_es", "account_tax_balance"],
# odoo_test_helper is needed for the tests
"external_dependencies": {"python": ["unidecode", "cryptography"]},
"data": [
"security/aeat_security.xml",
"security/ir.model.access.csv",
"data/aeat_partner.xml",
"data/ir_config_parameter.xml",
"data/aeat_tax_agency_data.xml",
"wizard/export_to_boe_wizard.xml",
"wizard/compare_boe_file_views.xml",
"wizard/aeat_certificate_password_view.xml",
"views/aeat_menuitem.xml",
"views/aeat_report_view.xml",
"views/aeat_tax_agency_view.xml",
"views/aeat_tax_line_view.xml",
"views/aeat_export_configuration_view.xml",
"views/aeat_tax_code_mapping_view.xml",
"views/account_move_line_view.xml",
"views/res_company_view.xml",
"views/res_partner_view.xml",
"views/aeat_certificate_view.xml",
],
"installable": True,
"maintainers": ["pedrobaeza"],
}
|
class NullLogger(object):
level_name = None
def remove(self, handler_id=None): # pragma: no cover
pass
def add(self, **kwargs): # pragma: no cover
pass
def disable(self, name): # pragma: no cover
pass
def enable(self, name): # pragma: no cover
pass
def critical(self, __message, *args, **kwargs): # pragma: no cover
pass
def debug(self, __message, *args, **kwargs): # pragma: no cover
pass
def error(self, __message, *args, **kwargs): # pragma: no cover
pass
def exception(self, __message, *args, **kwargs): # pragma: no cover
pass
def info(self, __message, *args, **kwargs): # pragma: no cover
pass
def log(self, __level, __message, *args, **kwargs): # pragma: no cover
pass
def success(self, __message, *args, **kwargs): # pragma: no cover
pass
def trace(self, __message, *args, **kwargs): # pragma: no cover
pass
def warning(self, __message, *args, **kwargs): # pragma: no cover
pass
Fix NullLogger
class NullLogger(object):
level_name = None
def remove(self, handler_id=None): # pragma: no cover
pass
def add(self, sink, **kwargs): # pragma: no cover
pass
def disable(self, name): # pragma: no cover
pass
def enable(self, name): # pragma: no cover
pass
def critical(self, __message, *args, **kwargs): # pragma: no cover
pass
def debug(self, __message, *args, **kwargs): # pragma: no cover
pass
def error(self, __message, *args, **kwargs): # pragma: no cover
pass
def exception(self, __message, *args, **kwargs): # pragma: no cover
pass
def info(self, __message, *args, **kwargs): # pragma: no cover
pass
def log(self, __level, __message, *args, **kwargs): # pragma: no cover
pass
def success(self, __message, *args, **kwargs): # pragma: no cover
pass
def trace(self, __message, *args, **kwargs): # pragma: no cover
pass
def warning(self, __message, *args, **kwargs): # pragma: no cover
pass
|
import re
import xbmc
import xbmcgui
from devhelper import pykodi
from devhelper import quickjson
from devhelper.pykodi import log
import mediatypes
from artworkselection import prompt_for_artwork
from gatherer import Gatherer, list_missing_arttypes, NOAUTO_IMAGE
from utils import SortedDisplay, natural_sort, localize as L
addon = pykodi.get_main_addon()
MODE_AUTO = 'auto'
MODE_GUI = 'gui'
THROTTLE_TIME = 0.15
DEFAULT_IMAGESIZE = '1920x1080'
imagesizes = {'1920x1080': (1920, 1080, 700), '1280x720': (1280, 720, 520)}
tvshow_properties = ['art', 'imdbnumber', 'season', 'file']
movie_properties = ['art', 'imdbnumber', 'file']
episode_properties = ['art', 'uniqueid', 'tvshowid', 'season', 'file']
SOMETHING_MISSING = 32001
FINAL_MESSAGE = 32019
ADDING_ARTWORK_MESSAGE = 32020
NOT_AVAILABLE_MESSAGE = 32021
ARTWORK_ADDED_MESSAGE = 32022
NO_ARTWORK_ADDED_MESSAGE = 32023
PROVIDER_ERROR_MESSAGE = 32024
NOT_SUPPORTED_MESSAGE = 32025
CURRENT_ART = 13512
class ArtworkProcessor(object):
def __init__(self, monitor=None):
self.monitor = monitor or xbmc.Monitor()
self.language = None
self.autolanguages = None
self.progress = xbmcgui.DialogProgressBG()
self.visible = False
self.update_settings()
def update_settings(self):
self.titlefree_fanart = addon.get_setting('titlefree_fanart')
self.only_filesystem = addon.get_setting('only_filesystem')
sizesetting = addon.get_setting('preferredsize')
if sizesetting in imagesizes:
self.preferredsize = imagesizes[sizesetting][0:2]
self.minimum_size = imagesizes[sizesetting][2]
else:
self.preferredsize = imagesizes[DEFAULT_IMAGESIZE][0:2]
self.minimum_size = imagesizes[DEFAULT_IMAGESIZE][2]
addon.set_setting('preferredsize', DEFAULT_IMAGESIZE)
def create_progress(self):
if not self.visible:
self.progress.create(L(ADDING_ARTWORK_MESSAGE), "")
self.visible = True
def close_progress(self):
if self.visible:
self.progress.close()
self.visible = False
def init_run(self, show_progress=False):
self.setlanguages()
if show_progress:
self.create_progress()
def finish_run(self):
if self.visible:
self.close_progress()
@property
def processor_busy(self):
return pykodi.get_conditional('!StringCompare(Window(Home).Property(ArtworkBeef.Status),idle)')
def process_item(self, mediatype, dbid, mode):
if self.processor_busy:
return
if mode == MODE_GUI:
xbmc.executebuiltin('ActivateWindow(busydialog)')
if mediatype == mediatypes.TVSHOW:
mediaitem = quickjson.get_tvshow_details(dbid, tvshow_properties)
elif mediatype == mediatypes.MOVIE:
mediaitem = quickjson.get_movie_details(dbid, movie_properties)
elif mediatype == mediatypes.EPISODE:
mediaitem = quickjson.get_episode_details(dbid, episode_properties)
else:
xbmc.executebuiltin('Dialog.Close(busydialog)')
xbmcgui.Dialog().notification("Artwork Beef", L(NOT_SUPPORTED_MESSAGE).format(mediatype), '-', 6500)
return
if mode == MODE_GUI:
self.init_run()
self.add_additional_iteminfo(mediaitem)
gatherer = Gatherer(self.monitor, self.titlefree_fanart, self.only_filesystem)
forcedart, availableart, _, error = gatherer.getartwork(mediaitem, False)
if error:
header = L(PROVIDER_ERROR_MESSAGE).format(error['providername'])
xbmcgui.Dialog().notification(header, error['message'], xbmcgui.NOTIFICATION_ERROR)
log('{0}\n{1}'.format(header, error['message']))
for arttype, imagelist in availableart.iteritems():
self.sort_images(arttype, imagelist, mediaitem['file'])
xbmc.executebuiltin('Dialog.Close(busydialog)')
if availableart:
tag_forcedandexisting_art(availableart, forcedart, mediaitem['art'])
selectedarttype, selectedart = prompt_for_artwork(mediaitem['mediatype'],
mediaitem['label'], availableart, self.monitor)
if selectedarttype:
if mediatypes.artinfo[mediaitem['mediatype']][selectedarttype]['multiselect']:
existingurls = [url for exacttype, url in mediaitem['art'].iteritems() if exacttype.startswith(selectedarttype)]
urls_toset = [url for url in existingurls if url not in selectedart[1]]
newurls = [url for url in selectedart[0] if url not in urls_toset]
count = len(newurls)
urls_toset.extend(newurls)
selectedart = {}
i = 0
for url in urls_toset:
selectedart[selectedarttype + (str(i) if i else '')] = url
i += 1
selectedart.update(dict((arttype, None) for arttype in mediaitem['art'].keys() if arttype.startswith(selectedarttype) and arttype not in selectedart.keys()))
else:
selectedart = {selectedarttype: selectedart}
count = 1
add_art_to_library(mediaitem['mediatype'], mediaitem.get('seasons'), mediaitem['dbid'], selectedart)
notifycount(count)
else:
xbmcgui.Dialog().notification(L(NOT_AVAILABLE_MESSAGE),
L(SOMETHING_MISSING) + ' ' + L(FINAL_MESSAGE), '-', 8000)
self.finish_run()
else:
medialist = [mediaitem]
autoaddepisodes = addon.get_setting('autoaddepisodes_list') if addon.get_setting('episode.fanart') else ()
if mediatype == mediatypes.TVSHOW and mediaitem['imdbnumber'] in autoaddepisodes:
medialist.extend(quickjson.get_episodes(dbid, properties=episode_properties))
self.process_medialist(medialist, True)
def process_medialist(self, medialist, alwaysnotify=False, stop_on_error=False):
self.init_run(len(medialist) > 10)
processed = {'tvshow': {}, 'movie': [], 'episode': []}
artcount = 0
currentitem = 0
gatherer = Gatherer(self.monitor, self.titlefree_fanart, self.only_filesystem)
for mediaitem in medialist:
if self.visible:
self.progress.update(currentitem * 100 // len(medialist), message=mediaitem['label'])
currentitem += 1
self.add_additional_iteminfo(mediaitem)
forcedart, availableart, services_hit, error = gatherer.getartwork(mediaitem)
if error:
header = L(PROVIDER_ERROR_MESSAGE).format(error['providername'])
xbmcgui.Dialog().notification(header, error['message'], xbmcgui.NOTIFICATION_ERROR)
log('{0}\n{1}'.format(header, error['message']))
if stop_on_error:
break
add_processeditem(processed, mediaitem)
for arttype, imagelist in availableart.iteritems():
self.sort_images(arttype, imagelist, mediaitem['file'])
existingart = mediaitem['art']
selectedart = dict((key, artlist['url']) for key, artlist in forcedart.iteritems())
existingart.update(selectedart)
selectedart.update(self.get_top_missing_art(mediaitem['mediatype'], existingart, availableart, mediaitem.get('seasons')))
if selectedart:
add_art_to_library(mediaitem['mediatype'], mediaitem.get('seasons'), mediaitem['dbid'], selectedart)
artcount += len(selectedart)
if not services_hit:
if self.monitor.abortRequested():
break
elif self.monitor.waitForAbort(THROTTLE_TIME):
break
self.finish_run()
if alwaysnotify or artcount:
notifycount(artcount)
return processed
def setlanguages(self):
self.language = pykodi.get_language(xbmc.ISO_639_1)
if self.language == 'en':
self.autolanguages = (self.language, None)
else:
self.autolanguages = (self.language, 'en', None)
def add_additional_iteminfo(self, mediaitem):
log('Processing {0}'.format(mediaitem['label']), xbmc.LOGINFO)
if 'episodeid' in mediaitem:
mediaitem['mediatype'] = mediatypes.EPISODE
mediaitem['dbid'] = mediaitem['episodeid']
elif 'tvshowid' in mediaitem:
mediaitem['mediatype'] = mediatypes.TVSHOW
mediaitem['dbid'] = mediaitem['tvshowid']
elif 'movieid' in mediaitem:
mediaitem['mediatype'] = mediatypes.MOVIE
mediaitem['dbid'] = mediaitem['movieid']
else:
log('Not sure what mediatype this is.', xbmc.LOGWARNING)
log(mediaitem, xbmc.LOGWARNING)
return False
if mediaitem['mediatype'] == mediatypes.TVSHOW:
mediaitem['seasons'], art = self._get_seasons_artwork(quickjson.get_seasons(mediaitem['dbid']))
mediaitem['art'].update(art)
elif mediaitem['mediatype'] == mediatypes.EPISODE:
mediaitem['imdbnumber'] = self._get_episodeid(mediaitem)
mediaitem['art'] = dict((arttype, pykodi.unquoteimage(url)) for arttype, url in mediaitem['art'].iteritems())
return True
def sort_images(self, arttype, imagelist, mediapath):
# 1. Language, preferring fanart with no language/title if configured
# 2a. Match discart to media source
# 2. Separate on status, like goofy images
# 3. Size (in 200px groups), up to preferredsize
# 4. Rating
imagelist.sort(key=lambda image: image['rating'].sort, reverse=True)
imagelist.sort(key=self.size_sort, reverse=True)
imagelist.sort(key=lambda image: image['status'].sort)
if arttype == 'discart':
mediasubtype = get_media_source(mediapath)
if mediasubtype != 'unknown':
imagelist.sort(key=lambda image: 0 if image.get('subtype', SortedDisplay(None, '')).sort == mediasubtype else 1)
imagelist.sort(key=lambda image: self._imagelanguage_sort(image, arttype))
def size_sort(self, image):
imagesplit = image['size'].display.split('x')
if len(imagesplit) != 2:
return image['size'].sort
try:
imagesize = int(imagesplit[0]), int(imagesplit[1])
except ValueError:
return image['size'].sort
if imagesize[0] > self.preferredsize[0]:
shrink = self.preferredsize[0] / float(imagesize[0])
imagesize = self.preferredsize[0], imagesize[1] * shrink
if imagesize[1] > self.preferredsize[1]:
shrink = self.preferredsize[1] / float(imagesize[1])
imagesize = imagesize[0] * shrink, self.preferredsize[1]
return max(imagesize) // 200
def _imagelanguage_sort(self, image, arttype):
if arttype.endswith('fanart'):
if not image.get('language'):
return 0
elif image['language'] == self.language:
return 1 if self.titlefree_fanart else 0
else:
return image['language']
else:
return 0 if image['language'] == self.language else image['language']
def get_top_missing_art(self, mediatype, existingart, availableart, seasons):
if not availableart:
return {}
newartwork = {}
for missingart in list_missing_arttypes(mediatype, seasons, existingart.keys()):
if missingart.startswith(mediatypes.SEASON):
itemtype = mediatypes.SEASON
artkey = missingart.rsplit('.', 1)[1]
else:
itemtype = mediatype
artkey = missingart
if missingart not in availableart:
continue
if mediatypes.artinfo[itemtype][artkey]['multiselect']:
existingurls = []
existingartnames = []
for art, url in existingart.iteritems():
if art.startswith(missingart) and url:
existingurls.append(url)
existingartnames.append(art)
newart = [art for art in availableart[missingart] if self._auto_filter(missingart, art, existingurls)]
if not newart:
continue
newartcount = 0
for i in range(0, mediatypes.artinfo[itemtype][artkey]['autolimit']):
exacttype = '%s%s' % (artkey, i if i else '')
if exacttype not in existingartnames:
if newartcount >= len(newart):
break
if exacttype not in newartwork:
newartwork[exacttype] = []
newartwork[exacttype] = newart[newartcount]['url']
newartcount += 1
else:
newart = next((art for art in availableart[missingart] if self._auto_filter(missingart, art)), None)
if newart:
newartwork[missingart] = newart['url']
return newartwork
def _get_seasons_artwork(self, seasons):
resultseasons = {}
resultart = {}
for season in seasons:
resultseasons[season['season']] = season['seasonid']
for arttype, url in season['art'].iteritems():
if not arttype.startswith(('tvshow.', 'season.')):
resultart['%s.%s.%s' % (mediatypes.SEASON, season['season'], arttype)] = url
return resultseasons, resultart
def _get_episodeid(self, episode):
if 'unknown' in episode['uniqueid']:
return episode['uniqueid']['unknown']
else:
idsource, result = episode['uniqueid'].iteritems()[0] if episode['uniqueid'] else '', ''
if result:
# I don't know what this might be, I'm not even sure Kodi can do anything else at the moment, but just in case
log("Didn't find 'unknown' uniqueid for episode, just picked the first, from '%s'." % idsource, xbmc.LOGINFO)
else:
log("Didn't find a uniqueid for episode '%s', can't look it up. I expect the ID from TheTVDB, which generally comes from the scraper." % episode['label'], xbmc.LOGNOTICE)
return result
def _auto_filter(self, arttype, art, ignoreurls=()):
if art['rating'].sort < 5:
return False
if arttype.endswith('fanart') and art['size'].sort < self.minimum_size:
return False
return (art['language'] in self.autolanguages or arttype.endswith('fanart')) and art['status'] != NOAUTO_IMAGE and art['url'] not in ignoreurls
def add_art_to_library(mediatype, seasons, dbid, selectedart):
if not selectedart:
return
if mediatype == mediatypes.TVSHOW:
seriesart = {}
allseasonart = {}
for arttype, url in selectedart.iteritems():
if arttype.startswith(mediatypes.SEASON + '.'):
season, arttype = arttype.rsplit('.', 2)[1:3]
season = seasons[int(season)]
if season not in allseasonart:
allseasonart[season] = {}
allseasonart[season][arttype] = url
else:
seriesart[arttype] = url
if seriesart:
quickjson.set_tvshow_details(dbid, art=seriesart)
for seasonid, seasonart in allseasonart.iteritems():
quickjson.set_season_details(seasonid, art=seasonart)
elif mediatype == mediatypes.MOVIE:
quickjson.set_movie_details(dbid, art=selectedart)
elif mediatype == mediatypes.EPISODE:
quickjson.set_episode_details(dbid, art=selectedart)
def notifycount(count):
log(L(ARTWORK_ADDED_MESSAGE).format(count), xbmc.LOGINFO)
if count:
xbmcgui.Dialog().notification(L(ARTWORK_ADDED_MESSAGE).format(count), L(FINAL_MESSAGE), '-', 7500)
else:
xbmcgui.Dialog().notification(L(NO_ARTWORK_ADDED_MESSAGE),
L(SOMETHING_MISSING) + ' ' + L(FINAL_MESSAGE), '-', 8000)
def add_processeditem(processed, mediaitem):
if mediaitem['mediatype'] == 'tvshow':
processed['tvshow'][mediaitem['dbid']] = mediaitem['season']
else:
processed[mediaitem['mediatype']].append(mediaitem['dbid'])
def get_media_source(mediapath):
mediapath = mediapath.lower()
if re.search(r'\b3d\b', mediapath):
return '3d'
if re.search(r'blu-?ray|b[rd]-?rip', mediapath) or mediapath.endswith('.bdmv'):
return 'bluray'
if re.search(r'\bdvd', mediapath) or mediapath.endswith('.ifo'):
return 'dvd'
return 'unknown'
def tag_forcedandexisting_art(availableart, forcedart, existingart):
typeinsert = {}
for exacttype, artlist in sorted(forcedart.iteritems(), key=lambda arttype: natural_sort(arttype[0])):
arttype = exacttype.rstrip('0123456789')
if arttype not in availableart:
availableart[arttype] = artlist
else:
for image in artlist:
match = next((available for available in availableart[arttype] if available['url'] == image['url']), None)
if match:
if 'title' in image and 'title' not in match:
match['title'] = image['title']
match['second provider'] = image['provider'].display
else:
typeinsert[arttype] = typeinsert[arttype] + 1 if arttype in typeinsert else 0
availableart[arttype].insert(typeinsert[arttype], image)
typeinsert = {}
for exacttype, existingurl in existingart.iteritems():
arttype = exacttype.rstrip('0123456789')
if arttype not in availableart:
image = {'url': existingurl, 'preview': existingurl, 'title': exacttype,
'existing': True, 'provider': SortedDisplay('current', L(CURRENT_ART))}
availableart[arttype] = [image]
else:
match = next((available for available in availableart[arttype] if available['url'] == existingurl), None)
if match:
match['preview'] = existingurl
match['existing'] = True
else:
typeinsert[arttype] = typeinsert[arttype] + 1 if arttype in typeinsert else 0
image = {'url': existingurl, 'preview': existingurl, 'title': exacttype,
'existing': True, 'provider': SortedDisplay('current', L(CURRENT_ART))}
availableart[arttype].insert(typeinsert[arttype], image)
Remove existing local artwork if no longer available
import re
import xbmc
import xbmcgui
from devhelper import pykodi
from devhelper import quickjson
from devhelper.pykodi import log
import mediatypes
from artworkselection import prompt_for_artwork
from gatherer import Gatherer, list_missing_arttypes, NOAUTO_IMAGE
from utils import SortedDisplay, natural_sort, localize as L
addon = pykodi.get_main_addon()
MODE_AUTO = 'auto'
MODE_GUI = 'gui'
THROTTLE_TIME = 0.15
DEFAULT_IMAGESIZE = '1920x1080'
imagesizes = {'1920x1080': (1920, 1080, 700), '1280x720': (1280, 720, 520)}
tvshow_properties = ['art', 'imdbnumber', 'season', 'file']
movie_properties = ['art', 'imdbnumber', 'file']
episode_properties = ['art', 'uniqueid', 'tvshowid', 'season', 'file']
SOMETHING_MISSING = 32001
FINAL_MESSAGE = 32019
ADDING_ARTWORK_MESSAGE = 32020
NOT_AVAILABLE_MESSAGE = 32021
ARTWORK_ADDED_MESSAGE = 32022
NO_ARTWORK_ADDED_MESSAGE = 32023
PROVIDER_ERROR_MESSAGE = 32024
NOT_SUPPORTED_MESSAGE = 32025
CURRENT_ART = 13512
class ArtworkProcessor(object):
def __init__(self, monitor=None):
self.monitor = monitor or xbmc.Monitor()
self.language = None
self.autolanguages = None
self.progress = xbmcgui.DialogProgressBG()
self.visible = False
self.update_settings()
def update_settings(self):
self.titlefree_fanart = addon.get_setting('titlefree_fanart')
self.only_filesystem = addon.get_setting('only_filesystem')
sizesetting = addon.get_setting('preferredsize')
if sizesetting in imagesizes:
self.preferredsize = imagesizes[sizesetting][0:2]
self.minimum_size = imagesizes[sizesetting][2]
else:
self.preferredsize = imagesizes[DEFAULT_IMAGESIZE][0:2]
self.minimum_size = imagesizes[DEFAULT_IMAGESIZE][2]
addon.set_setting('preferredsize', DEFAULT_IMAGESIZE)
def create_progress(self):
if not self.visible:
self.progress.create(L(ADDING_ARTWORK_MESSAGE), "")
self.visible = True
def close_progress(self):
if self.visible:
self.progress.close()
self.visible = False
def init_run(self, show_progress=False):
self.setlanguages()
if show_progress:
self.create_progress()
def finish_run(self):
if self.visible:
self.close_progress()
@property
def processor_busy(self):
return pykodi.get_conditional('!StringCompare(Window(Home).Property(ArtworkBeef.Status),idle)')
def process_item(self, mediatype, dbid, mode):
if self.processor_busy:
return
if mode == MODE_GUI:
xbmc.executebuiltin('ActivateWindow(busydialog)')
if mediatype == mediatypes.TVSHOW:
mediaitem = quickjson.get_tvshow_details(dbid, tvshow_properties)
elif mediatype == mediatypes.MOVIE:
mediaitem = quickjson.get_movie_details(dbid, movie_properties)
elif mediatype == mediatypes.EPISODE:
mediaitem = quickjson.get_episode_details(dbid, episode_properties)
else:
xbmc.executebuiltin('Dialog.Close(busydialog)')
xbmcgui.Dialog().notification("Artwork Beef", L(NOT_SUPPORTED_MESSAGE).format(mediatype), '-', 6500)
return
if mode == MODE_GUI:
self.init_run()
self.add_additional_iteminfo(mediaitem)
gatherer = Gatherer(self.monitor, self.titlefree_fanart, self.only_filesystem)
forcedart, availableart, _, error = gatherer.getartwork(mediaitem, False)
if error:
header = L(PROVIDER_ERROR_MESSAGE).format(error['providername'])
xbmcgui.Dialog().notification(header, error['message'], xbmcgui.NOTIFICATION_ERROR)
log('{0}\n{1}'.format(header, error['message']))
for arttype, imagelist in availableart.iteritems():
self.sort_images(arttype, imagelist, mediaitem['file'])
xbmc.executebuiltin('Dialog.Close(busydialog)')
if availableart:
tag_forcedandexisting_art(availableart, forcedart, mediaitem['art'])
selectedarttype, selectedart = prompt_for_artwork(mediaitem['mediatype'],
mediaitem['label'], availableart, self.monitor)
if selectedarttype:
if mediatypes.artinfo[mediaitem['mediatype']][selectedarttype]['multiselect']:
existingurls = [url for exacttype, url in mediaitem['art'].iteritems() if exacttype.startswith(selectedarttype)]
urls_toset = [url for url in existingurls if url not in selectedart[1]]
newurls = [url for url in selectedart[0] if url not in urls_toset]
count = len(newurls)
urls_toset.extend(newurls)
selectedart = {}
i = 0
for url in urls_toset:
selectedart[selectedarttype + (str(i) if i else '')] = url
i += 1
selectedart.update(dict((arttype, None) for arttype in mediaitem['art'].keys() if arttype.startswith(selectedarttype) and arttype not in selectedart.keys()))
else:
selectedart = {selectedarttype: selectedart}
count = 1
add_art_to_library(mediaitem['mediatype'], mediaitem.get('seasons'), mediaitem['dbid'], selectedart)
notifycount(count)
else:
xbmcgui.Dialog().notification(L(NOT_AVAILABLE_MESSAGE),
L(SOMETHING_MISSING) + ' ' + L(FINAL_MESSAGE), '-', 8000)
self.finish_run()
else:
medialist = [mediaitem]
autoaddepisodes = addon.get_setting('autoaddepisodes_list') if addon.get_setting('episode.fanart') else ()
if mediatype == mediatypes.TVSHOW and mediaitem['imdbnumber'] in autoaddepisodes:
medialist.extend(quickjson.get_episodes(dbid, properties=episode_properties))
self.process_medialist(medialist, True)
def process_medialist(self, medialist, alwaysnotify=False, stop_on_error=False):
self.init_run(len(medialist) > 10)
processed = {'tvshow': {}, 'movie': [], 'episode': []}
artcount = 0
currentitem = 0
gatherer = Gatherer(self.monitor, self.titlefree_fanart, self.only_filesystem)
for mediaitem in medialist:
if self.visible:
self.progress.update(currentitem * 100 // len(medialist), message=mediaitem['label'])
currentitem += 1
self.add_additional_iteminfo(mediaitem)
forcedart, availableart, services_hit, error = gatherer.getartwork(mediaitem)
if error:
header = L(PROVIDER_ERROR_MESSAGE).format(error['providername'])
xbmcgui.Dialog().notification(header, error['message'], xbmcgui.NOTIFICATION_ERROR)
log('{0}\n{1}'.format(header, error['message']))
if stop_on_error:
break
add_processeditem(processed, mediaitem)
for arttype, imagelist in availableart.iteritems():
self.sort_images(arttype, imagelist, mediaitem['file'])
existingart = dict(mediaitem['art'])
selectedart = dict((key, image['url']) for key, image in forcedart.iteritems())
# Remove existing local artwork if it is no longer available
localurls = [(arttype, image['url']) for arttype, image in forcedart.iteritems() if not image['url'].startswith(('http', 'image://video'))]
for arttype, url in existingart.iteritems():
if not url.startswith(('http', 'image://video')) and (arttype, url) not in localurls and (
mediaitem['mediatype'] != mediatypes.EPISODE or '.' not in arttype):
selectedart[arttype] = None
for arttype, url in selectedart.iteritems():
if url:
existingart[arttype] = url
elif arttype in existingart:
del existingart[arttype]
selectedart.update(self.get_top_missing_art(mediaitem['mediatype'], existingart, availableart, mediaitem.get('seasons')))
if selectedart:
add_art_to_library(mediaitem['mediatype'], mediaitem.get('seasons'), mediaitem['dbid'], selectedart)
artcount += len(selectedart)
if not services_hit:
if self.monitor.abortRequested():
break
elif self.monitor.waitForAbort(THROTTLE_TIME):
break
self.finish_run()
if alwaysnotify or artcount:
notifycount(artcount)
return processed
def setlanguages(self):
self.language = pykodi.get_language(xbmc.ISO_639_1)
if self.language == 'en':
self.autolanguages = (self.language, None)
else:
self.autolanguages = (self.language, 'en', None)
def add_additional_iteminfo(self, mediaitem):
log('Processing {0}'.format(mediaitem['label']), xbmc.LOGINFO)
if 'episodeid' in mediaitem:
mediaitem['mediatype'] = mediatypes.EPISODE
mediaitem['dbid'] = mediaitem['episodeid']
elif 'tvshowid' in mediaitem:
mediaitem['mediatype'] = mediatypes.TVSHOW
mediaitem['dbid'] = mediaitem['tvshowid']
elif 'movieid' in mediaitem:
mediaitem['mediatype'] = mediatypes.MOVIE
mediaitem['dbid'] = mediaitem['movieid']
else:
log('Not sure what mediatype this is.', xbmc.LOGWARNING)
log(mediaitem, xbmc.LOGWARNING)
return False
if mediaitem['mediatype'] == mediatypes.TVSHOW:
mediaitem['seasons'], art = self._get_seasons_artwork(quickjson.get_seasons(mediaitem['dbid']))
mediaitem['art'].update(art)
elif mediaitem['mediatype'] == mediatypes.EPISODE:
mediaitem['imdbnumber'] = self._get_episodeid(mediaitem)
mediaitem['art'] = dict((arttype, pykodi.unquoteimage(url)) for arttype, url in mediaitem['art'].iteritems())
return True
def sort_images(self, arttype, imagelist, mediapath):
# 1. Language, preferring fanart with no language/title if configured
# 2a. Match discart to media source
# 2. Separate on status, like goofy images
# 3. Size (in 200px groups), up to preferredsize
# 4. Rating
imagelist.sort(key=lambda image: image['rating'].sort, reverse=True)
imagelist.sort(key=self.size_sort, reverse=True)
imagelist.sort(key=lambda image: image['status'].sort)
if arttype == 'discart':
mediasubtype = get_media_source(mediapath)
if mediasubtype != 'unknown':
imagelist.sort(key=lambda image: 0 if image.get('subtype', SortedDisplay(None, '')).sort == mediasubtype else 1)
imagelist.sort(key=lambda image: self._imagelanguage_sort(image, arttype))
def size_sort(self, image):
imagesplit = image['size'].display.split('x')
if len(imagesplit) != 2:
return image['size'].sort
try:
imagesize = int(imagesplit[0]), int(imagesplit[1])
except ValueError:
return image['size'].sort
if imagesize[0] > self.preferredsize[0]:
shrink = self.preferredsize[0] / float(imagesize[0])
imagesize = self.preferredsize[0], imagesize[1] * shrink
if imagesize[1] > self.preferredsize[1]:
shrink = self.preferredsize[1] / float(imagesize[1])
imagesize = imagesize[0] * shrink, self.preferredsize[1]
return max(imagesize) // 200
def _imagelanguage_sort(self, image, arttype):
if arttype.endswith('fanart'):
if not image.get('language'):
return 0
elif image['language'] == self.language:
return 1 if self.titlefree_fanart else 0
else:
return image['language']
else:
return 0 if image['language'] == self.language else image['language']
def get_top_missing_art(self, mediatype, existingart, availableart, seasons):
if not availableart:
return {}
newartwork = {}
for missingart in list_missing_arttypes(mediatype, seasons, existingart.keys()):
if missingart.startswith(mediatypes.SEASON):
itemtype = mediatypes.SEASON
artkey = missingart.rsplit('.', 1)[1]
else:
itemtype = mediatype
artkey = missingart
if missingart not in availableart:
continue
if mediatypes.artinfo[itemtype][artkey]['multiselect']:
existingurls = []
existingartnames = []
for art, url in existingart.iteritems():
if art.startswith(missingart) and url:
existingurls.append(url)
existingartnames.append(art)
newart = [art for art in availableart[missingart] if self._auto_filter(missingart, art, existingurls)]
if not newart:
continue
newartcount = 0
for i in range(0, mediatypes.artinfo[itemtype][artkey]['autolimit']):
exacttype = '%s%s' % (artkey, i if i else '')
if exacttype not in existingartnames:
if newartcount >= len(newart):
break
if exacttype not in newartwork:
newartwork[exacttype] = []
newartwork[exacttype] = newart[newartcount]['url']
newartcount += 1
else:
newart = next((art for art in availableart[missingart] if self._auto_filter(missingart, art)), None)
if newart:
newartwork[missingart] = newart['url']
return newartwork
def _get_seasons_artwork(self, seasons):
resultseasons = {}
resultart = {}
for season in seasons:
resultseasons[season['season']] = season['seasonid']
for arttype, url in season['art'].iteritems():
if not arttype.startswith(('tvshow.', 'season.')):
resultart['%s.%s.%s' % (mediatypes.SEASON, season['season'], arttype)] = url
return resultseasons, resultart
def _get_episodeid(self, episode):
if 'unknown' in episode['uniqueid']:
return episode['uniqueid']['unknown']
else:
idsource, result = episode['uniqueid'].iteritems()[0] if episode['uniqueid'] else '', ''
if result:
# I don't know what this might be, I'm not even sure Kodi can do anything else at the moment, but just in case
log("Didn't find 'unknown' uniqueid for episode, just picked the first, from '%s'." % idsource, xbmc.LOGINFO)
else:
log("Didn't find a uniqueid for episode '%s', can't look it up. I expect the ID from TheTVDB, which generally comes from the scraper." % episode['label'], xbmc.LOGNOTICE)
return result
def _auto_filter(self, arttype, art, ignoreurls=()):
if art['rating'].sort < 5:
return False
if arttype.endswith('fanart') and art['size'].sort < self.minimum_size:
return False
return (art['language'] in self.autolanguages or arttype.endswith('fanart')) and art['status'] != NOAUTO_IMAGE and art['url'] not in ignoreurls
def add_art_to_library(mediatype, seasons, dbid, selectedart):
if not selectedart:
return
if mediatype == mediatypes.TVSHOW:
seriesart = {}
allseasonart = {}
for arttype, url in selectedart.iteritems():
if arttype.startswith(mediatypes.SEASON + '.'):
season, arttype = arttype.rsplit('.', 2)[1:3]
season = seasons[int(season)]
if season not in allseasonart:
allseasonart[season] = {}
allseasonart[season][arttype] = url
else:
seriesart[arttype] = url
if seriesart:
quickjson.set_tvshow_details(dbid, art=seriesart)
for seasonid, seasonart in allseasonart.iteritems():
quickjson.set_season_details(seasonid, art=seasonart)
elif mediatype == mediatypes.MOVIE:
quickjson.set_movie_details(dbid, art=selectedart)
elif mediatype == mediatypes.EPISODE:
quickjson.set_episode_details(dbid, art=selectedart)
def notifycount(count):
log(L(ARTWORK_ADDED_MESSAGE).format(count), xbmc.LOGINFO)
if count:
xbmcgui.Dialog().notification(L(ARTWORK_ADDED_MESSAGE).format(count), L(FINAL_MESSAGE), '-', 7500)
else:
xbmcgui.Dialog().notification(L(NO_ARTWORK_ADDED_MESSAGE),
L(SOMETHING_MISSING) + ' ' + L(FINAL_MESSAGE), '-', 8000)
def add_processeditem(processed, mediaitem):
if mediaitem['mediatype'] == 'tvshow':
processed['tvshow'][mediaitem['dbid']] = mediaitem['season']
else:
processed[mediaitem['mediatype']].append(mediaitem['dbid'])
def get_media_source(mediapath):
mediapath = mediapath.lower()
if re.search(r'\b3d\b', mediapath):
return '3d'
if re.search(r'blu-?ray|b[rd]-?rip', mediapath) or mediapath.endswith('.bdmv'):
return 'bluray'
if re.search(r'\bdvd', mediapath) or mediapath.endswith('.ifo'):
return 'dvd'
return 'unknown'
def tag_forcedandexisting_art(availableart, forcedart, existingart):
typeinsert = {}
for exacttype, artlist in sorted(forcedart.iteritems(), key=lambda arttype: natural_sort(arttype[0])):
arttype = exacttype.rstrip('0123456789')
if arttype not in availableart:
availableart[arttype] = artlist
else:
for image in artlist:
match = next((available for available in availableart[arttype] if available['url'] == image['url']), None)
if match:
if 'title' in image and 'title' not in match:
match['title'] = image['title']
match['second provider'] = image['provider'].display
else:
typeinsert[arttype] = typeinsert[arttype] + 1 if arttype in typeinsert else 0
availableart[arttype].insert(typeinsert[arttype], image)
typeinsert = {}
for exacttype, existingurl in existingart.iteritems():
arttype = exacttype.rstrip('0123456789')
if arttype not in availableart:
image = {'url': existingurl, 'preview': existingurl, 'title': exacttype,
'existing': True, 'provider': SortedDisplay('current', L(CURRENT_ART))}
availableart[arttype] = [image]
else:
match = next((available for available in availableart[arttype] if available['url'] == existingurl), None)
if match:
match['preview'] = existingurl
match['existing'] = True
else:
typeinsert[arttype] = typeinsert[arttype] + 1 if arttype in typeinsert else 0
image = {'url': existingurl, 'preview': existingurl, 'title': exacttype,
'existing': True, 'provider': SortedDisplay('current', L(CURRENT_ART))}
availableart[arttype].insert(typeinsert[arttype], image)
|
import logging
import time
import django_rq
import os
import requests
import shutil
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
from osmaxx.conversion import models as conversion_models
from osmaxx.conversion._settings import CONVERSION_SETTINGS
logging.basicConfig()
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'updates currently active jobs - runs until interrupted'
def handle(self, *args, **options):
while True:
logger.info('handling running jobs')
self._handle_running_jobs()
logger.info('handling failed jobs')
self._handle_failed_jobs()
time.sleep(CONVERSION_SETTINGS['result_harvest_interval_seconds'])
def _handle_failed_jobs(self):
failed_queue = django_rq.get_failed_queue()
for job_id in failed_queue.job_ids:
self._update_job(job_id=job_id, queue=failed_queue)
def _handle_running_jobs(self):
queue = django_rq.get_queue()
active_jobs = conversion_models.Job.objects.exclude(status__in=conversion_models.Job.STATUSES_FINAL)\
.values_list('rq_job_id', flat=True)
for job_id in active_jobs:
self._update_job(job_id=job_id, queue=queue)
def _update_job(self, job_id, queue):
job = queue.fetch_job(job_id)
if job is None: # already processed by someone else
return
logger.info('updating job %d', job_id)
try:
conversion_job = conversion_models.Job.objects.get(rq_job_id=job_id)
conversion_job.status = job.status
self._notify(conversion_job)
from osmaxx.conversion.models import Job
if job.status == Job.FINISHED:
add_file_to_job(conversion_job=conversion_job, result_zip_file=job.kwargs['output_zip_file_path'])
conversion_job.save()
except ObjectDoesNotExist as e:
logger.exception(e)
pass
if job.status in conversion_models.Job.STATUSES_FINAL:
job.delete()
def _notify(self, conversion_job):
data = {'status': conversion_job.status, 'job': conversion_job.get_absolute_url()}
try:
requests.get(conversion_job.callback_url, params=data)
except:
logger.error('failed to send notification for job {}'.format(conversion_job.id))
pass
def add_file_to_job(*, conversion_job, result_zip_file):
conversion_job.resulting_file.name = conversion_job.zip_file_relative_path()
new_path = os.path.join(settings.MEDIA_ROOT, conversion_job.resulting_file.name)
if not os.path.exists(os.path.dirname(new_path)):
os.makedirs(os.path.dirname(new_path), exist_ok=True)
shutil.move(result_zip_file, new_path)
return new_path
extract variable
import logging
import time
import django_rq
import os
import requests
import shutil
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
from osmaxx.conversion import models as conversion_models
from osmaxx.conversion._settings import CONVERSION_SETTINGS
logging.basicConfig()
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'updates currently active jobs - runs until interrupted'
def handle(self, *args, **options):
while True:
logger.info('handling running jobs')
self._handle_running_jobs()
logger.info('handling failed jobs')
self._handle_failed_jobs()
time.sleep(CONVERSION_SETTINGS['result_harvest_interval_seconds'])
def _handle_failed_jobs(self):
failed_queue = django_rq.get_failed_queue()
for job_id in failed_queue.job_ids:
self._update_job(job_id=job_id, queue=failed_queue)
def _handle_running_jobs(self):
queue = django_rq.get_queue()
active_jobs = conversion_models.Job.objects.exclude(status__in=conversion_models.Job.STATUSES_FINAL)\
.values_list('rq_job_id', flat=True)
for job_id in active_jobs:
self._update_job(job_id=job_id, queue=queue)
def _update_job(self, job_id, queue):
job = queue.fetch_job(job_id)
if job is None: # already processed by someone else
return
logger.info('updating job %d', job_id)
try:
conversion_job = conversion_models.Job.objects.get(rq_job_id=job_id)
conversion_job.status = job.status
self._notify(conversion_job)
from osmaxx.conversion.models import Job
if job.status == Job.FINISHED:
add_file_to_job(conversion_job=conversion_job, result_zip_file=job.kwargs['output_zip_file_path'])
conversion_job.save()
except ObjectDoesNotExist as e:
logger.exception(e)
pass
if job.status in conversion_models.Job.STATUSES_FINAL:
job.delete()
def _notify(self, conversion_job):
data = {'status': conversion_job.status, 'job': conversion_job.get_absolute_url()}
try:
requests.get(conversion_job.callback_url, params=data)
except:
logger.error('failed to send notification for job {}'.format(conversion_job.id))
pass
def add_file_to_job(*, conversion_job, result_zip_file):
conversion_job.resulting_file.name = conversion_job.zip_file_relative_path()
new_path = os.path.join(settings.MEDIA_ROOT, conversion_job.resulting_file.name)
new_directory_path = os.path.dirname(new_path)
if not os.path.exists(new_directory_path):
os.makedirs(new_directory_path, exist_ok=True)
shutil.move(result_zip_file, new_path)
return new_path
|
import types
from rx.concurrency import current_thread_scheduler
from rx.disposables import Disposable
from .autodetachobserver import AutoDetachObserver
from .observable import Observable
class AnonymousObservable(Observable):
"""Class to create an Observable instance from a delegate-based
implementation of the Subscribe method."""
def __init__(self, subscribe):
"""Creates an observable sequence object from the specified subscription
function.
Keyword arguments:
subscribe -- Subscribe method implementation."""
def _subscribe(observer):
"""Decorator for subscribe. It wraps the observer in an
AutoDetachObserver and fixes the returned disposable"""
def fix_subscriber(subscriber):
"""Fixes subscriber to make sure it returns a Disposable instead
of None or a dispose function"""
if subscriber is None:
subscriber = Disposable.empty()
elif type(subscriber) == types.FunctionType:
subscriber = Disposable(subscriber)
return subscriber
def set_disposable(scheduler=None, value=None):
try:
auto_detach_observer.disposable = fix_subscriber(subscribe(auto_detach_observer))
except Exception as ex:
if not auto_detach_observer.fail(ex):
raise ex
auto_detach_observer = AutoDetachObserver(observer)
# Subscribe needs to set up the trampoline before for subscribing.
# Actually, the first call to Subscribe creates the trampoline so
# that it may assign its disposable before any observer executes
# OnNext over the CurrentThreadScheduler. This enables single-
# threaded cancellation
# https://social.msdn.microsoft.com/Forums/en-US/eb82f593-9684-4e27-
# 97b9-8b8886da5c33/whats-the-rationale-behind-how-currentthreadsche
# dulerschedulerequired-behaves?forum=rx
if current_thread_scheduler.schedule_required():
current_thread_scheduler.schedule(set_disposable)
else:
set_disposable()
return auto_detach_observer
super(AnonymousObservable, self).__init__(_subscribe)
Simpler check for non-disposable
from rx.concurrency import current_thread_scheduler
from rx.disposables import Disposable
from .autodetachobserver import AutoDetachObserver
from .observable import Observable
class AnonymousObservable(Observable):
"""Class to create an Observable instance from a delegate-based
implementation of the Subscribe method."""
def __init__(self, subscribe):
"""Creates an observable sequence object from the specified subscription
function.
Keyword arguments:
subscribe -- Subscribe method implementation."""
def _subscribe(observer):
"""Decorator for subscribe. It wraps the observer in an
AutoDetachObserver and fixes the returned disposable"""
def fix_subscriber(subscriber):
"""Fixes subscriber to make sure it returns a Disposable instead
of None or a dispose function"""
if not hasattr(subscriber, "dispose"):
subscriber = Disposable(subscriber)
return subscriber
def set_disposable(scheduler=None, value=None):
try:
auto_detach_observer.disposable = fix_subscriber(subscribe(auto_detach_observer))
except Exception as ex:
if not auto_detach_observer.fail(ex):
raise ex
auto_detach_observer = AutoDetachObserver(observer)
# Subscribe needs to set up the trampoline before for subscribing.
# Actually, the first call to Subscribe creates the trampoline so
# that it may assign its disposable before any observer executes
# OnNext over the CurrentThreadScheduler. This enables single-
# threaded cancellation
# https://social.msdn.microsoft.com/Forums/en-US/eb82f593-9684-4e27-
# 97b9-8b8886da5c33/whats-the-rationale-behind-how-currentthreadsche
# dulerschedulerequired-behaves?forum=rx
if current_thread_scheduler.schedule_required():
current_thread_scheduler.schedule(set_disposable)
else:
set_disposable()
return auto_detach_observer
super(AnonymousObservable, self).__init__(_subscribe)
|
import datetime
import os
import stat
import types
import warnings
from twisted.python import log
from twisted.internet import defer
from twisted.internet.endpoints import TCP4ClientEndpoint, UNIXClientEndpoint
from twisted.internet.interfaces import IReactorCore, IStreamClientEndpoint
from zope.interface import implements
from txtorcon import TorProtocolFactory
from txtorcon.stream import Stream
from txtorcon.circuit import Circuit
from txtorcon.router import Router, hashFromHexId
from txtorcon.addrmap import AddrMap
from txtorcon.torcontrolprotocol import parse_keywords
from txtorcon.log import txtorlog
from txtorcon.torcontrolprotocol import TorProtocolError
from txtorcon.interface import ITorControlProtocol, IRouterContainer, ICircuitListener
from txtorcon.interface import ICircuitContainer, IStreamListener, IStreamAttacher
from spaghetti import FSM, State, Transition
def _build_state(proto):
state = TorState(proto)
return state.post_bootstrap
def _wait_for_proto(proto):
return proto.post_bootstrap
def build_tor_connection(connection, build_state=True, wait_for_proto=True,
password_function=lambda: None):
"""
This is used to build a valid TorState (which has .protocol for
the TorControlProtocol). For example::
from twisted.internet import reactor
from twisted.internet.endpoints import TCP4ClientEndpoint
import txtorcon
def example(state):
print "Fully bootstrapped state:",state
print " with bootstrapped protocol:",state.protocol
d = txtorcon.build_tor_connection(TCP4ClientEndpoint(reactor,
"localhost",
9051))
d.addCallback(example)
reactor.run()
:param password_function:
See :class:`txtorcon.TorControlProtocol`
:param build_state:
If True (the default) a TorState object will be
built as well. If False, just a TorControlProtocol will be
returned via the Deferred.
:return:
a Deferred that fires with a TorControlProtocol or, if you
specified build_state=True, a TorState. In both cases, the
object has finished bootstrapping
(i.e. TorControlProtocol.post_bootstrap or
TorState.post_bootstap has fired, as needed)
"""
if IStreamClientEndpoint.providedBy(connection):
endpoint = connection
elif isinstance(connection, tuple):
if len(connection) == 2:
reactor, socket = connection
if (os.path.exists(socket) and
os.stat(socket).st_mode & (stat.S_IRGRP | stat.S_IRUSR |
stat.S_IROTH)):
endpoint = UNIXClientEndpoint(reactor, socket)
else:
raise ValueError('Can\'t use "%s" as a socket' % (socket, ))
elif len(connection) == 3:
endpoint = TCP4ClientEndpoint(*connection)
else:
raise TypeError('Expected either a (reactor, socket)- or a '
'(reactor, host, port)-tuple for argument '
'"connection", got %s' % (connection, ))
else:
raise TypeError('Expected a (reactor, socket)- or a (reactor, host, '
'port)-tuple or an object implementing IStreamClient'
'Endpoint for argument "connection", got %s' %
(connection, ))
d = endpoint.connect(TorProtocolFactory(password_function=password_function))
if build_state:
d.addCallback(build_state if callable(build_state) else _build_state)
elif wait_for_proto:
d.addCallback(wait_for_proto if callable(wait_for_proto) else
_wait_for_proto)
return d
def build_local_tor_connection(reactor, host='127.0.0.1', port=9051,
socket='/var/run/tor/control', *args, **kwargs):
"""
This builds a connection to a local Tor, either via 127.0.0.1:9051
(which is tried first) or /var/run/tor/control (by default). See
also :meth:`build_tor_connection
<txtorcon.torstate.build_tor_connection>` for other key-word
arguments that are accepted here also.
:param host:
An IP address to find Tor at. Corresponds to the
ControlListenAddress torrc option.
:param port:
The port to use with the address when trying to contact
Tor. This corresponds to the ControlPort option in torrc
(default is 9051).
"""
try:
return build_tor_connection((reactor, socket), *args, **kwargs)
except:
return build_tor_connection((reactor, host, port), *args, **kwargs)
def flags_from_dict(kw):
"""
This turns a dict with keys that are flags (e.g. for CLOSECIRCUIT,
CLOSESTREAM) only if the values are true.
"""
if len(kw) == 0:
return ''
flags = ''
for (k, v) in kw.iteritems():
if v:
flags += ' ' + str(k)
# note that we want the leading space if there's at least one
# flag.
return flags
class TorState(object):
"""
This tracks the current state of Tor using a TorControlProtocol.
On setup it first queries the initial state of streams and
circuits. It then asks for updates via the listeners. It requires
an ITorControlProtocol instance. The control protocol doesn't need
to be bootstrapped yet. The Deferred .post_boostrap is driggered
when the TorState instance is fully ready to go. The easiest way
is to use the helper method
:func:`txtorcon.build_tor_connection`. For details, see the
implementation of that.
You may add an :class:`txtorcon.interface.IStreamAttacher` to
provide a custom mapping for Strams to Circuits (by default Tor
picks by itself).
This is also a good example of the various listeners, and acts as
an :class:`txtorcon.interface.ICircuitContainer` and
:class:`txtorcon.interface.IRouterContainer`.
"""
implements(ICircuitListener, ICircuitContainer, IRouterContainer,
IStreamListener)
def __init__(self, protocol, bootstrap=True, write_state_diagram=False):
self.protocol = ITorControlProtocol(protocol)
## fixme could use protocol.on_disconnect to re-connect; see issue #3
## could override these to get your own Circuit/Stream subclasses
## to track these things
self.circuit_factory = Circuit
self.stream_factory = Stream
self.attacher = None
"""If set, provides
:class:`txtorcon.interface.IStreamAttacher` to attach new
streams we hear about."""
self.tor_binary = 'tor'
self.circuit_listeners = []
self.stream_listeners = []
self.addrmap = AddrMap()
self.circuits = {} # keys on id (integer)
self.streams = {} # keys on id (integer)
self.routers = {} # keys by hexid (string) and by unique names
self.routers_by_name = {} # keys on name, value always list (many duplicate "Unnamed" routers, for example)
self.routers_by_hash = {} # keys by hexid (string)
self.guards = {} # potentially-usable as entry guards, I think? (any router with 'Guard' flag)
self.entry_guards = {} # from GETINFO entry-guards, our current entry guards
self.unusable_entry_guards = [] # list of entry guards we didn't parse out
self.authorities = {} # keys by name
self.cleanup = None # see set_attacher
class die(object):
__name__ = 'die' # FIXME? just to ease spagetti.py:82's pain
def __init__(self, msg):
self.msg = msg
def __call__(self, *args):
raise RuntimeError(self.msg % tuple(args))
def nothing(*args):
pass
waiting_r = State("waiting_r")
waiting_w = State("waiting_w")
waiting_p = State("waiting_p")
waiting_s = State("waiting_s")
def ignorable_line(x):
return x.strip() == '.' or x.strip() == 'OK' or x[:3] == 'ns/' or x.strip() == ''
waiting_r.add_transition(Transition(waiting_r, ignorable_line, nothing))
waiting_r.add_transition(Transition(waiting_s, lambda x: x[:2] == 'r ', self._router_begin))
## FIXME use better method/func than die!!
waiting_r.add_transition(Transition(waiting_r, lambda x: x[:2] != 'r ', die('Expected "r " while parsing routers not "%s"')))
waiting_s.add_transition(Transition(waiting_w, lambda x: x[:2] == 's ', self._router_flags))
waiting_s.add_transition(Transition(waiting_s, lambda x: x[:2] == 'a ', self._router_address))
waiting_s.add_transition(Transition(waiting_r, ignorable_line, nothing))
waiting_s.add_transition(Transition(waiting_r, lambda x: x[:2] != 's ' and x[:2] != 'a ', die('Expected "s " while parsing routers not "%s"')))
waiting_s.add_transition(Transition(waiting_r, lambda x: x.strip() == '.', nothing))
waiting_w.add_transition(Transition(waiting_p, lambda x: x[:2] == 'w ', self._router_bandwidth))
waiting_w.add_transition(Transition(waiting_r, ignorable_line, nothing))
waiting_w.add_transition(Transition(waiting_s, lambda x: x[:2] == 'r ', self._router_begin)) # "w" lines are optional
waiting_w.add_transition(Transition(waiting_r, lambda x: x[:2] != 'w ', die('Expected "w " while parsing routers not "%s"')))
waiting_w.add_transition(Transition(waiting_r, lambda x: x.strip() == '.', nothing))
waiting_p.add_transition(Transition(waiting_r, lambda x: x[:2] == 'p ', self._router_policy))
waiting_p.add_transition(Transition(waiting_r, ignorable_line, nothing))
waiting_p.add_transition(Transition(waiting_s, lambda x: x[:2] == 'r ', self._router_begin)) # "p" lines are optional
waiting_p.add_transition(Transition(waiting_r, lambda x: x[:2] != 'p ', die('Expected "p " while parsing routers not "%s"')))
waiting_p.add_transition(Transition(waiting_r, lambda x: x.strip() == '.', nothing))
self._network_status_parser = FSM([waiting_r, waiting_s, waiting_w, waiting_p])
if write_state_diagram:
with open('routerfsm.dot', 'w') as fsmfile:
fsmfile.write(self._network_status_parser.dotty())
self.post_bootstrap = defer.Deferred()
if bootstrap:
self.protocol.post_bootstrap.addCallback(self._bootstrap).addErrback(self.post_bootstrap.errback)
def _router_begin(self, data):
args = data.split()
self._router = Router(self.protocol)
self._router.from_consensus = True
self._router.update(args[1], # nickname
args[2], # idhash
args[3], # orhash
datetime.datetime.strptime(args[4] + args[5], '%Y-%m-%f%H:%M:%S'),
args[6], # ip address
args[7], # ORPort
args[8]) # DirPort
if self._router.id_hex in self.routers:
## FIXME should I do an update() on this one??
self._router = self.routers[self._router.id_hex]
return
if self._router.name in self.routers_by_name:
self.routers_by_name[self._router.name].append(self._router)
else:
self.routers_by_name[self._router.name] = [self._router]
if self._router.name in self.routers:
self.routers[self._router.name] = None
else:
self.routers[self._router.name] = self._router
self.routers[self._router.id_hex] = self._router
self.routers_by_hash[self._router.id_hex] = self._router
def _router_flags(self, data):
args = data.split()
self._router.flags = args[1:]
if 'guard' in self._router.flags:
self.guards[self._router.id_hex] = self._router
if 'authority' in self._router.flags:
self.authorities[self._router.name] = self._router
def _router_address(self, data):
"""only for IPv6 addresses"""
self._router.ip_v6.append(data.split()[1].strip())
def _router_bandwidth(self, data):
args = data.split()
self._router.bandwidth = int(args[1].split('=')[1])
def _router_policy(self, data):
args = data.split()
self._router.policy = args[1:]
self._router = None
@defer.inlineCallbacks
def _bootstrap(self, arg=None):
"This takes an arg so we can use it as a callback (see __init__)."
## update list of routers (must be before we do the
## circuit-status) note that we're feeding each line
## incrementally to a state-machine called
## _network_status_parser, set up in constructor. "ns" should
## be the empty string, but we call _update_network_status for
## the de-duplication of named routers
ns = yield self.protocol.get_info_incremental('ns/all',
self._network_status_parser.process)
self._update_network_status(ns)
## update list of existing circuits
cs = yield self.protocol.get_info_raw('circuit-status')
self._circuit_status(cs)
## update list of streams
ss = yield self.protocol.get_info_raw('stream-status')
self._stream_status(ss)
## update list of existing address-maps
key = 'address-mappings/all'
am = yield self.protocol.get_info_raw(key)
## strip addressmappsings/all= and OK\n from raw data
am = am[len(key) + 1:]
for line in am.split('\n'):
if len(line.strip()) == 0:
continue # FIXME
self.addrmap.update(line)
self._add_events()
entries = yield self.protocol.get_info_raw("entry-guards")
for line in entries.split('\n')[1:]:
if len(line.strip()) == 0 or line.strip() == 'OK':
continue
args = line.split()
(name, status) = args[:2]
name = name[:41]
## this is sometimes redundant, as a missing entry guard
## usually means it won't be in our list of routers right
## now, but just being on the safe side
if status.lower() != 'up':
self.unusable_entry_guards.append(line)
continue
try:
self.entry_guards[name] = self.router_from_id(name)
except KeyError:
self.unusable_entry_guards.append(line)
## in case process/pid doesn't exist and we don't know the PID
## because we own it, we just leave it as 0 (previously
## guessed using psutil, but that only works if there's
## exactly one tor running anyway)
try:
pid = yield self.protocol.get_info_raw("process/pid")
except TorProtocolError:
pid = None
self.tor_pid = 0
if pid:
try:
pid = parse_keywords(pid)['process/pid']
self.tor_pid = int(pid)
except KeyError:
self.tor_pid = 0
elif self.protocol.is_owned:
self.tor_pid = self.protocol.is_owned
self.post_bootstrap.callback(self)
self.post_boostrap = None
def undo_attacher(self):
"""
Shouldn't Tor handle this by turning this back to 0 if the
controller that twiddled it disconnects?
"""
return self.protocol.set_conf("__LeaveStreamsUnattached", 0)
def set_attacher(self, attacher, myreactor):
"""
Provide an :class:`txtorcon.interface.IStreamAttacher` to
associate streams to circuits. This won't get turned on until
after bootstrapping is completed. ('__LeaveStreamsUnattached'
needs to be set to '1' and the existing circuits list needs to
be populated).
"""
react = IReactorCore(myreactor)
if attacher:
self.attacher = IStreamAttacher(attacher)
else:
self.attacher = None
if self.attacher is None:
self.undo_attacher()
if self.cleanup:
react.removeSystemEventTrigger(self.cleanup)
self.cleanup = None
else:
self.protocol.set_conf("__LeaveStreamsUnattached", "1")
self.cleanup = react.addSystemEventTrigger('before', 'shutdown',
self.undo_attacher)
return None
stream_close_reasons = {
'REASON_MISC': 1, # (catch-all for unlisted reasons)
'REASON_RESOLVEFAILED': 2, # (couldn't look up hostname)
'REASON_CONNECTREFUSED': 3, # (remote host refused connection) [*]
'REASON_EXITPOLICY': 4, # (OR refuses to connect to host or port)
'REASON_DESTROY': 5, # (Circuit is being destroyed)
'REASON_DONE': 6, # (Anonymized TCP connection was closed)
'REASON_TIMEOUT': 7, # (Connection timed out, or OR timed out while connecting)
'REASON_NOROUTE': 8, # (Routing error while attempting to contact destination)
'REASON_HIBERNATING': 9, # (OR is temporarily hibernating)
'REASON_INTERNAL': 10, # (Internal error at the OR)
'REASON_RESOURCELIMIT': 11, # (OR has no resources to fulfill request)
'REASON_CONNRESET': 12, # (Connection was unexpectedly reset)
'REASON_TORPROTOCOL': 13, # (Sent when closing connection because of Tor protocol violations.)
'REASON_NOTDIRECTORY': 14} # (Client sent RELAY_BEGIN_DIR to a non-directory relay.)
def close_stream(self, stream, reason='REASON_MISC', **kwargs):
"""
This sends a STREAMCLOSE command, using the specified reason
(either an int or one of the 14 strings in section 6.3 of
tor-spec.txt if the argument is a string). Any kwards are
passed through as flags if they evaluated to true
(e.g. "SomeFlag=True"). Currently there are none that Tor accepts.
"""
if type(stream) != int:
## assume it's a Stream instance
stream = stream.id
try:
reason = int(reason)
except ValueError:
try:
reason = TorState.stream_close_reasons[reason]
except KeyError:
raise ValueError('Unknown stream close reason "%s"' % str(reason))
flags = flags_from_dict(kwargs)
## stream is now an ID no matter what we passed in
cmd = 'CLOSESTREAM %d %d%s' % (stream, reason, flags)
return self.protocol.queue_command(cmd)
def close_circuit(self, circid, **kwargs):
"""
This sends a CLOSECIRCUIT command, using any keyword arguments
passed as the Flags (currently, that is just 'IfUnused' which
means to only close the circuit when it is no longer used by
any streams).
:param circid:
Either a circuit-id (int) or a Circuit instance
:return:
a Deferred which callbacks with the result of queuing the
command to Tor (usually "OK"). If you want to instead know
when the circuit is actually-gone, see
:meth:`Circuit.close <txtorcon.circuit.Circuit.close>`
"""
if type(circid) != int:
## assume it's a Circuit instance
circid = circid.id
flags = flags_from_dict(kwargs)
return self.protocol.queue_command('CLOSECIRCUIT %s%s' % (circid, flags))
def add_circuit_listener(self, icircuitlistener):
listen = ICircuitListener(icircuitlistener)
for circ in self.circuits.values():
circ.listen(listen)
self.circuit_listeners.append(listen)
def add_stream_listener(self, istreamlistener):
listen = IStreamListener(istreamlistener)
for stream in self.streams.values():
stream.listen(listen)
self.stream_listeners.append(listen)
def _find_circuit_after_extend(self, x):
ex, circ_id = x.split()
if ex != 'EXTENDED':
raise RuntimeError('Expected EXTENDED, got "%s"' % x)
circ_id = int(circ_id)
circ = self._maybe_create_circuit(circ_id)
circ.update([str(circ_id), 'EXTENDED'])
return circ
def build_circuit(self, routers=None, using_guards=True):
"""
Builds a circuit consisting of exactly the routers specified,
in order. This issues an EXTENDCIRCUIT call to Tor with all
the routers specified.
:param routers: a list of Router instances which is the path
desired. To allow Tor to choose the routers itself, pass
None (the default) for routers.
:param using_guards: A warning is issued if the first router
isn't in self.entry_guards.
:return:
A Deferred that will callback with a Circuit instance
(with the .id member being valid, and probably nothing
else).
"""
if routers is None or routers == []:
cmd = "EXTENDCIRCUIT 0"
else:
if using_guards and routers[0] not in self.entry_guards.values():
warnings.warn("Building a circuit not starting with a guard: %s" % (str(routers),), RuntimeWarning)
cmd = "EXTENDCIRCUIT 0 "
first = True
for router in routers:
if first:
first = False
else:
cmd += ','
if isinstance(router, types.StringType) and len(router) == 40 and hashFromHexId(router):
cmd += router
else:
cmd += router.id_hex[1:]
d = self.protocol.queue_command(cmd)
d.addCallback(self._find_circuit_after_extend)
return d
DO_NOT_ATTACH = object()
def _maybe_attach(self, stream):
"""
If we've got a custom stream-attachment instance (see
set_attacher) this will ask it for the appropriate
circuit. Note that we ignore .exit URIs and let Tor deal with
those (by passing circuit ID 0).
The stream attacher is allowed to return a Deferred which will
callback with the desired circuit.
You may return the special object DO_NOT_ATTACH which will
cause the circuit attacher to simply ignore the stream
(neither attaching it, nor telling Tor to attach it).
"""
if self.attacher:
if stream.target_host is not None and '.exit' in stream.target_host:
## we want to totally ignore .exit URIs as these are
## used to specify a particular exit node, and trying
## to do STREAMATTACH on them will fail with an error
## from Tor anyway.
txtorlog.msg("ignore attacher:", stream)
return
circ = IStreamAttacher(self.attacher).attach_stream(stream, self.circuits)
if circ is self.DO_NOT_ATTACH:
return
if circ is None:
self.protocol.queue_command("ATTACHSTREAM %d 0" % stream.id)
else:
if isinstance(circ, defer.Deferred):
class IssueStreamAttach:
def __init__(self, state, streamid):
self.stream_id = streamid
self.state = state
def __call__(self, arg):
circid = arg.id
self.state.protocol.queue_command("ATTACHSTREAM %d %d" % (self.stream_id, circid))
circ.addCallback(IssueStreamAttach(self, stream.id)).addErrback(log.err)
else:
if circ.id not in self.circuits:
raise RuntimeError("Attacher returned a circuit unknown to me.")
if circ.state != 'BUILT':
raise RuntimeError("Can only attach to BUILT circuits; %d is in %s." % (circ.id, circ.state))
self.protocol.queue_command("ATTACHSTREAM %d %d" % (stream.id, circ.id))
def _circuit_status(self, data):
"""Used internally as a callback for updating Circuit information"""
data = data[len('circuit-status='):].split('\n')
## sometimes there's a newline after circuit-status= and
## sometimes not, so we get rid of it
if len(data) and len(data[0].strip()) == 0:
data = data[1:]
for line in data:
self._circuit_update(line)
def _stream_status(self, data):
"Used internally as a callback for updating Stream information"
# there's a slight issue with a single-stream vs >= 2 streams,
# in that in the latter case we have a line by itself with
# "stream-status=" on it followed by the streams EXCEPT in the
# single-stream case which has "stream-status=123 blahblah"
# (i.e. the key + value on one line)
lines = data.split('\n')
if len(lines) == 1:
d = lines[0][len('stream-status='):]
# if there are actually 0 streams, then there's nothing
# left to parse
if len(d):
self._stream_update(d)
else:
[self._stream_update(line) for line in lines[1:]]
def _update_network_status(self, data):
"""
Used internally as a callback for updating Router information
from NS and NEWCONSENSUS events.
"""
for line in data.split('\n'):
self._network_status_parser.process(line)
txtorlog.msg(len(self.routers_by_name), "named routers found.")
## remove any names we added that turned out to have dups
for (k, v) in self.routers.items():
if v is None:
txtorlog.msg(len(self.routers_by_name[k]), "dups:", k)
del self.routers[k]
txtorlog.msg(len(self.guards), "GUARDs")
def _maybe_create_circuit(self, circ_id):
if circ_id not in self.circuits:
c = self.circuit_factory(self)
c.listen(self)
[c.listen(x) for x in self.circuit_listeners]
else:
c = self.circuits[circ_id]
return c
def _circuit_update(self, line):
"""
Used internally as a callback to update Circuit information
from CIRC events.
"""
#print "circuit_update",line
args = line.split()
circ_id = int(args[0])
c = self._maybe_create_circuit(circ_id)
c.update(args)
def _stream_update(self, line):
"""
Used internally as a callback to update Stream information
from STREAM events.
"""
#print "stream_update",line
if line.strip() == 'stream-status=':
## this happens if there are no active streams
return
args = line.split()
assert len(args) >= 3
stream_id = int(args[0])
wasnew = False
if stream_id not in self.streams:
stream = self.stream_factory(self)
self.streams[stream_id] = stream
stream.listen(self)
[stream.listen(x) for x in self.stream_listeners]
wasnew = True
self.streams[stream_id].update(args)
## if the update closed the stream, it won't be in our list
## anymore. FIXME: how can we ever hit such a case as the
## first update being a CLOSE?
if wasnew and stream_id in self.streams:
self._maybe_attach(self.streams[stream_id])
def _addr_map(self, addr):
"Internal callback to update DNS cache. Listens to ADDRMAP."
txtorlog.msg(" --> addr_map", addr)
self.addrmap.update(addr)
event_map = {'STREAM': _stream_update,
'CIRC': _circuit_update,
'NS': _update_network_status,
'NEWCONSENSUS': _update_network_status,
'ADDRMAP': _addr_map}
"""event_map used by add_events to map event_name -> unbound method"""
@defer.inlineCallbacks
def _add_events(self):
"""
Add listeners for all the events the controller is interested in.
"""
for (event, func) in self.event_map.items():
## the map contains unbound methods, so we bind them
## to self so they call the right thing
yield self.protocol.add_event_listener(event, types.MethodType(func, self, TorState))
## ICircuitContainer
def find_circuit(self, circid):
"ICircuitContainer API"
return self.circuits[circid]
## IRouterContainer
def router_from_id(self, routerid):
"""IRouterContainer API"""
try:
return self.routers[routerid[:41]]
except KeyError:
if routerid[0] != '$':
raise # just re-raise the KeyError
router = Router(self.protocol)
idhash = routerid[1:41]
nick = ''
is_named = False
if len(routerid) > 41:
nick = routerid[42:]
is_named = routerid[41] == '='
router.update(nick, hashFromHexId(idhash), '0' * 27, 'unknown',
'unknown', '0', '0')
router.name_is_unique = is_named
self.routers[router.id_hex] = router
return router
## implement IStreamListener
def stream_new(self, stream):
"IStreamListener: a new stream has been created"
txtorlog.msg("stream_new", stream)
def stream_succeeded(self, stream):
"IStreamListener: stream has succeeded"
txtorlog.msg("stream_succeeded", stream)
def stream_attach(self, stream, circuit):
"""
IStreamListener: the stream has been attached to a circuit. It
seems you get an attach to None followed by an attach to real
circuit fairly frequently. Perhaps related to __LeaveStreamsUnattached?
"""
txtorlog.msg("stream_attach", stream.id,
stream.target_host, " -> ", circuit)
def stream_detach(self, stream, **kw):
"""
IStreamListener
"""
txtorlog.msg("stream_detach", stream.id)
def stream_closed(self, stream, **kw):
"""
IStreamListener: stream has been closed (won't be in
controller's list anymore)
"""
txtorlog.msg("stream_closed", stream.id)
del self.streams[stream.id]
def stream_failed(self, stream, **kw):
"""
IStreamListener: stream failed for some reason (won't be in
controller's list anymore)
"""
txtorlog.msg("stream_failed", stream.id)
del self.streams[stream.id]
## implement ICircuitListener
def circuit_launched(self, circuit):
"ICircuitListener API"
txtorlog.msg("circuit_launched", circuit)
self.circuits[circuit.id] = circuit
def circuit_extend(self, circuit, router):
"ICircuitListener API"
txtorlog.msg("circuit_extend:", circuit.id, router)
def circuit_built(self, circuit):
"ICircuitListener API"
txtorlog.msg("circuit_built:", circuit.id,
"->".join("%s.%s" % (x.name, x.location.countrycode) for x in circuit.path),
circuit.streams)
def circuit_new(self, circuit):
"ICircuitListener API"
txtorlog.msg("circuit_new:", circuit.id)
self.circuits[circuit.id] = circuit
def circuit_destroy(self, circuit):
"Used by circuit_closed and circuit_failed (below)"
txtorlog.msg("circuit_destroy:", circuit.id)
del self.circuits[circuit.id]
def circuit_closed(self, circuit, **kw):
"ICircuitListener API"
txtorlog.msg("circuit_closed", circuit)
self.circuit_destroy(circuit)
def circuit_failed(self, circuit, **kw):
"ICircuitListener API"
txtorlog.msg("circuit_failed", circuit, str(kw))
self.circuit_destroy(circuit)
add all_routers set() to TorControlProtocol
import datetime
import os
import stat
import types
import warnings
from twisted.python import log
from twisted.internet import defer
from twisted.internet.endpoints import TCP4ClientEndpoint, UNIXClientEndpoint
from twisted.internet.interfaces import IReactorCore, IStreamClientEndpoint
from zope.interface import implements
from txtorcon import TorProtocolFactory
from txtorcon.stream import Stream
from txtorcon.circuit import Circuit
from txtorcon.router import Router, hashFromHexId
from txtorcon.addrmap import AddrMap
from txtorcon.torcontrolprotocol import parse_keywords
from txtorcon.log import txtorlog
from txtorcon.torcontrolprotocol import TorProtocolError
from txtorcon.interface import ITorControlProtocol, IRouterContainer, ICircuitListener
from txtorcon.interface import ICircuitContainer, IStreamListener, IStreamAttacher
from spaghetti import FSM, State, Transition
def _build_state(proto):
state = TorState(proto)
return state.post_bootstrap
def _wait_for_proto(proto):
return proto.post_bootstrap
def build_tor_connection(connection, build_state=True, wait_for_proto=True,
password_function=lambda: None):
"""
This is used to build a valid TorState (which has .protocol for
the TorControlProtocol). For example::
from twisted.internet import reactor
from twisted.internet.endpoints import TCP4ClientEndpoint
import txtorcon
def example(state):
print "Fully bootstrapped state:",state
print " with bootstrapped protocol:",state.protocol
d = txtorcon.build_tor_connection(TCP4ClientEndpoint(reactor,
"localhost",
9051))
d.addCallback(example)
reactor.run()
:param password_function:
See :class:`txtorcon.TorControlProtocol`
:param build_state:
If True (the default) a TorState object will be
built as well. If False, just a TorControlProtocol will be
returned via the Deferred.
:return:
a Deferred that fires with a TorControlProtocol or, if you
specified build_state=True, a TorState. In both cases, the
object has finished bootstrapping
(i.e. TorControlProtocol.post_bootstrap or
TorState.post_bootstap has fired, as needed)
"""
if IStreamClientEndpoint.providedBy(connection):
endpoint = connection
elif isinstance(connection, tuple):
if len(connection) == 2:
reactor, socket = connection
if (os.path.exists(socket) and
os.stat(socket).st_mode & (stat.S_IRGRP | stat.S_IRUSR |
stat.S_IROTH)):
endpoint = UNIXClientEndpoint(reactor, socket)
else:
raise ValueError('Can\'t use "%s" as a socket' % (socket, ))
elif len(connection) == 3:
endpoint = TCP4ClientEndpoint(*connection)
else:
raise TypeError('Expected either a (reactor, socket)- or a '
'(reactor, host, port)-tuple for argument '
'"connection", got %s' % (connection, ))
else:
raise TypeError('Expected a (reactor, socket)- or a (reactor, host, '
'port)-tuple or an object implementing IStreamClient'
'Endpoint for argument "connection", got %s' %
(connection, ))
d = endpoint.connect(TorProtocolFactory(password_function=password_function))
if build_state:
d.addCallback(build_state if callable(build_state) else _build_state)
elif wait_for_proto:
d.addCallback(wait_for_proto if callable(wait_for_proto) else
_wait_for_proto)
return d
def build_local_tor_connection(reactor, host='127.0.0.1', port=9051,
socket='/var/run/tor/control', *args, **kwargs):
"""
This builds a connection to a local Tor, either via 127.0.0.1:9051
(which is tried first) or /var/run/tor/control (by default). See
also :meth:`build_tor_connection
<txtorcon.torstate.build_tor_connection>` for other key-word
arguments that are accepted here also.
:param host:
An IP address to find Tor at. Corresponds to the
ControlListenAddress torrc option.
:param port:
The port to use with the address when trying to contact
Tor. This corresponds to the ControlPort option in torrc
(default is 9051).
"""
try:
return build_tor_connection((reactor, socket), *args, **kwargs)
except:
return build_tor_connection((reactor, host, port), *args, **kwargs)
def flags_from_dict(kw):
"""
This turns a dict with keys that are flags (e.g. for CLOSECIRCUIT,
CLOSESTREAM) only if the values are true.
"""
if len(kw) == 0:
return ''
flags = ''
for (k, v) in kw.iteritems():
if v:
flags += ' ' + str(k)
# note that we want the leading space if there's at least one
# flag.
return flags
class TorState(object):
"""
This tracks the current state of Tor using a TorControlProtocol.
On setup it first queries the initial state of streams and
circuits. It then asks for updates via the listeners. It requires
an ITorControlProtocol instance. The control protocol doesn't need
to be bootstrapped yet. The Deferred .post_boostrap is driggered
when the TorState instance is fully ready to go. The easiest way
is to use the helper method
:func:`txtorcon.build_tor_connection`. For details, see the
implementation of that.
You may add an :class:`txtorcon.interface.IStreamAttacher` to
provide a custom mapping for Strams to Circuits (by default Tor
picks by itself).
This is also a good example of the various listeners, and acts as
an :class:`txtorcon.interface.ICircuitContainer` and
:class:`txtorcon.interface.IRouterContainer`.
"""
implements(ICircuitListener, ICircuitContainer, IRouterContainer,
IStreamListener)
def __init__(self, protocol, bootstrap=True, write_state_diagram=False):
self.protocol = ITorControlProtocol(protocol)
## fixme could use protocol.on_disconnect to re-connect; see issue #3
## could override these to get your own Circuit/Stream subclasses
## to track these things
self.circuit_factory = Circuit
self.stream_factory = Stream
self.attacher = None
"""If set, provides
:class:`txtorcon.interface.IStreamAttacher` to attach new
streams we hear about."""
self.tor_binary = 'tor'
self.circuit_listeners = []
self.stream_listeners = []
self.addrmap = AddrMap()
self.circuits = {} # keys on id (integer)
self.streams = {} # keys on id (integer)
self.all_routers = set() # list of unique routers
self.routers = {} # keys by hexid (string) and by unique names
self.routers_by_name = {} # keys on name, value always list (many duplicate "Unnamed" routers, for example)
self.routers_by_hash = {} # keys by hexid (string)
self.guards = {} # potentially-usable as entry guards, I think? (any router with 'Guard' flag)
self.entry_guards = {} # from GETINFO entry-guards, our current entry guards
self.unusable_entry_guards = [] # list of entry guards we didn't parse out
self.authorities = {} # keys by name
self.cleanup = None # see set_attacher
class die(object):
__name__ = 'die' # FIXME? just to ease spagetti.py:82's pain
def __init__(self, msg):
self.msg = msg
def __call__(self, *args):
raise RuntimeError(self.msg % tuple(args))
def nothing(*args):
pass
waiting_r = State("waiting_r")
waiting_w = State("waiting_w")
waiting_p = State("waiting_p")
waiting_s = State("waiting_s")
def ignorable_line(x):
return x.strip() == '.' or x.strip() == 'OK' or x[:3] == 'ns/' or x.strip() == ''
waiting_r.add_transition(Transition(waiting_r, ignorable_line, nothing))
waiting_r.add_transition(Transition(waiting_s, lambda x: x[:2] == 'r ', self._router_begin))
## FIXME use better method/func than die!!
waiting_r.add_transition(Transition(waiting_r, lambda x: x[:2] != 'r ', die('Expected "r " while parsing routers not "%s"')))
waiting_s.add_transition(Transition(waiting_w, lambda x: x[:2] == 's ', self._router_flags))
waiting_s.add_transition(Transition(waiting_s, lambda x: x[:2] == 'a ', self._router_address))
waiting_s.add_transition(Transition(waiting_r, ignorable_line, nothing))
waiting_s.add_transition(Transition(waiting_r, lambda x: x[:2] != 's ' and x[:2] != 'a ', die('Expected "s " while parsing routers not "%s"')))
waiting_s.add_transition(Transition(waiting_r, lambda x: x.strip() == '.', nothing))
waiting_w.add_transition(Transition(waiting_p, lambda x: x[:2] == 'w ', self._router_bandwidth))
waiting_w.add_transition(Transition(waiting_r, ignorable_line, nothing))
waiting_w.add_transition(Transition(waiting_s, lambda x: x[:2] == 'r ', self._router_begin)) # "w" lines are optional
waiting_w.add_transition(Transition(waiting_r, lambda x: x[:2] != 'w ', die('Expected "w " while parsing routers not "%s"')))
waiting_w.add_transition(Transition(waiting_r, lambda x: x.strip() == '.', nothing))
waiting_p.add_transition(Transition(waiting_r, lambda x: x[:2] == 'p ', self._router_policy))
waiting_p.add_transition(Transition(waiting_r, ignorable_line, nothing))
waiting_p.add_transition(Transition(waiting_s, lambda x: x[:2] == 'r ', self._router_begin)) # "p" lines are optional
waiting_p.add_transition(Transition(waiting_r, lambda x: x[:2] != 'p ', die('Expected "p " while parsing routers not "%s"')))
waiting_p.add_transition(Transition(waiting_r, lambda x: x.strip() == '.', nothing))
self._network_status_parser = FSM([waiting_r, waiting_s, waiting_w, waiting_p])
if write_state_diagram:
with open('routerfsm.dot', 'w') as fsmfile:
fsmfile.write(self._network_status_parser.dotty())
self.post_bootstrap = defer.Deferred()
if bootstrap:
self.protocol.post_bootstrap.addCallback(self._bootstrap).addErrback(self.post_bootstrap.errback)
def _router_begin(self, data):
args = data.split()
self._router = Router(self.protocol)
self._router.from_consensus = True
self._router.update(args[1], # nickname
args[2], # idhash
args[3], # orhash
datetime.datetime.strptime(args[4] + args[5], '%Y-%m-%f%H:%M:%S'),
args[6], # ip address
args[7], # ORPort
args[8]) # DirPort
if self._router.id_hex in self.routers:
## FIXME should I do an update() on this one??
self._router = self.routers[self._router.id_hex]
return
if self._router.name in self.routers_by_name:
self.routers_by_name[self._router.name].append(self._router)
else:
self.routers_by_name[self._router.name] = [self._router]
if self._router.name in self.routers:
self.routers[self._router.name] = None
else:
self.routers[self._router.name] = self._router
self.routers[self._router.id_hex] = self._router
self.routers_by_hash[self._router.id_hex] = self._router
self.all_routers.add(self._router)
def _router_flags(self, data):
args = data.split()
self._router.flags = args[1:]
if 'guard' in self._router.flags:
self.guards[self._router.id_hex] = self._router
if 'authority' in self._router.flags:
self.authorities[self._router.name] = self._router
def _router_address(self, data):
"""only for IPv6 addresses"""
self._router.ip_v6.append(data.split()[1].strip())
def _router_bandwidth(self, data):
args = data.split()
self._router.bandwidth = int(args[1].split('=')[1])
def _router_policy(self, data):
args = data.split()
self._router.policy = args[1:]
self._router = None
@defer.inlineCallbacks
def _bootstrap(self, arg=None):
"This takes an arg so we can use it as a callback (see __init__)."
## update list of routers (must be before we do the
## circuit-status) note that we're feeding each line
## incrementally to a state-machine called
## _network_status_parser, set up in constructor. "ns" should
## be the empty string, but we call _update_network_status for
## the de-duplication of named routers
ns = yield self.protocol.get_info_incremental('ns/all',
self._network_status_parser.process)
self._update_network_status(ns)
## update list of existing circuits
cs = yield self.protocol.get_info_raw('circuit-status')
self._circuit_status(cs)
## update list of streams
ss = yield self.protocol.get_info_raw('stream-status')
self._stream_status(ss)
## update list of existing address-maps
key = 'address-mappings/all'
am = yield self.protocol.get_info_raw(key)
## strip addressmappsings/all= and OK\n from raw data
am = am[len(key) + 1:]
for line in am.split('\n'):
if len(line.strip()) == 0:
continue # FIXME
self.addrmap.update(line)
self._add_events()
entries = yield self.protocol.get_info_raw("entry-guards")
for line in entries.split('\n')[1:]:
if len(line.strip()) == 0 or line.strip() == 'OK':
continue
args = line.split()
(name, status) = args[:2]
name = name[:41]
## this is sometimes redundant, as a missing entry guard
## usually means it won't be in our list of routers right
## now, but just being on the safe side
if status.lower() != 'up':
self.unusable_entry_guards.append(line)
continue
try:
self.entry_guards[name] = self.router_from_id(name)
except KeyError:
self.unusable_entry_guards.append(line)
## in case process/pid doesn't exist and we don't know the PID
## because we own it, we just leave it as 0 (previously
## guessed using psutil, but that only works if there's
## exactly one tor running anyway)
try:
pid = yield self.protocol.get_info_raw("process/pid")
except TorProtocolError:
pid = None
self.tor_pid = 0
if pid:
try:
pid = parse_keywords(pid)['process/pid']
self.tor_pid = int(pid)
except KeyError:
self.tor_pid = 0
elif self.protocol.is_owned:
self.tor_pid = self.protocol.is_owned
self.post_bootstrap.callback(self)
self.post_boostrap = None
def undo_attacher(self):
"""
Shouldn't Tor handle this by turning this back to 0 if the
controller that twiddled it disconnects?
"""
return self.protocol.set_conf("__LeaveStreamsUnattached", 0)
def set_attacher(self, attacher, myreactor):
"""
Provide an :class:`txtorcon.interface.IStreamAttacher` to
associate streams to circuits. This won't get turned on until
after bootstrapping is completed. ('__LeaveStreamsUnattached'
needs to be set to '1' and the existing circuits list needs to
be populated).
"""
react = IReactorCore(myreactor)
if attacher:
self.attacher = IStreamAttacher(attacher)
else:
self.attacher = None
if self.attacher is None:
self.undo_attacher()
if self.cleanup:
react.removeSystemEventTrigger(self.cleanup)
self.cleanup = None
else:
self.protocol.set_conf("__LeaveStreamsUnattached", "1")
self.cleanup = react.addSystemEventTrigger('before', 'shutdown',
self.undo_attacher)
return None
stream_close_reasons = {
'REASON_MISC': 1, # (catch-all for unlisted reasons)
'REASON_RESOLVEFAILED': 2, # (couldn't look up hostname)
'REASON_CONNECTREFUSED': 3, # (remote host refused connection) [*]
'REASON_EXITPOLICY': 4, # (OR refuses to connect to host or port)
'REASON_DESTROY': 5, # (Circuit is being destroyed)
'REASON_DONE': 6, # (Anonymized TCP connection was closed)
'REASON_TIMEOUT': 7, # (Connection timed out, or OR timed out while connecting)
'REASON_NOROUTE': 8, # (Routing error while attempting to contact destination)
'REASON_HIBERNATING': 9, # (OR is temporarily hibernating)
'REASON_INTERNAL': 10, # (Internal error at the OR)
'REASON_RESOURCELIMIT': 11, # (OR has no resources to fulfill request)
'REASON_CONNRESET': 12, # (Connection was unexpectedly reset)
'REASON_TORPROTOCOL': 13, # (Sent when closing connection because of Tor protocol violations.)
'REASON_NOTDIRECTORY': 14} # (Client sent RELAY_BEGIN_DIR to a non-directory relay.)
def close_stream(self, stream, reason='REASON_MISC', **kwargs):
"""
This sends a STREAMCLOSE command, using the specified reason
(either an int or one of the 14 strings in section 6.3 of
tor-spec.txt if the argument is a string). Any kwards are
passed through as flags if they evaluated to true
(e.g. "SomeFlag=True"). Currently there are none that Tor accepts.
"""
if type(stream) != int:
## assume it's a Stream instance
stream = stream.id
try:
reason = int(reason)
except ValueError:
try:
reason = TorState.stream_close_reasons[reason]
except KeyError:
raise ValueError('Unknown stream close reason "%s"' % str(reason))
flags = flags_from_dict(kwargs)
## stream is now an ID no matter what we passed in
cmd = 'CLOSESTREAM %d %d%s' % (stream, reason, flags)
return self.protocol.queue_command(cmd)
def close_circuit(self, circid, **kwargs):
"""
This sends a CLOSECIRCUIT command, using any keyword arguments
passed as the Flags (currently, that is just 'IfUnused' which
means to only close the circuit when it is no longer used by
any streams).
:param circid:
Either a circuit-id (int) or a Circuit instance
:return:
a Deferred which callbacks with the result of queuing the
command to Tor (usually "OK"). If you want to instead know
when the circuit is actually-gone, see
:meth:`Circuit.close <txtorcon.circuit.Circuit.close>`
"""
if type(circid) != int:
## assume it's a Circuit instance
circid = circid.id
flags = flags_from_dict(kwargs)
return self.protocol.queue_command('CLOSECIRCUIT %s%s' % (circid, flags))
def add_circuit_listener(self, icircuitlistener):
listen = ICircuitListener(icircuitlistener)
for circ in self.circuits.values():
circ.listen(listen)
self.circuit_listeners.append(listen)
def add_stream_listener(self, istreamlistener):
listen = IStreamListener(istreamlistener)
for stream in self.streams.values():
stream.listen(listen)
self.stream_listeners.append(listen)
def _find_circuit_after_extend(self, x):
ex, circ_id = x.split()
if ex != 'EXTENDED':
raise RuntimeError('Expected EXTENDED, got "%s"' % x)
circ_id = int(circ_id)
circ = self._maybe_create_circuit(circ_id)
circ.update([str(circ_id), 'EXTENDED'])
return circ
def build_circuit(self, routers=None, using_guards=True):
"""
Builds a circuit consisting of exactly the routers specified,
in order. This issues an EXTENDCIRCUIT call to Tor with all
the routers specified.
:param routers: a list of Router instances which is the path
desired. To allow Tor to choose the routers itself, pass
None (the default) for routers.
:param using_guards: A warning is issued if the first router
isn't in self.entry_guards.
:return:
A Deferred that will callback with a Circuit instance
(with the .id member being valid, and probably nothing
else).
"""
if routers is None or routers == []:
cmd = "EXTENDCIRCUIT 0"
else:
if using_guards and routers[0] not in self.entry_guards.values():
warnings.warn("Building a circuit not starting with a guard: %s" % (str(routers),), RuntimeWarning)
cmd = "EXTENDCIRCUIT 0 "
first = True
for router in routers:
if first:
first = False
else:
cmd += ','
if isinstance(router, types.StringType) and len(router) == 40 and hashFromHexId(router):
cmd += router
else:
cmd += router.id_hex[1:]
d = self.protocol.queue_command(cmd)
d.addCallback(self._find_circuit_after_extend)
return d
DO_NOT_ATTACH = object()
def _maybe_attach(self, stream):
"""
If we've got a custom stream-attachment instance (see
set_attacher) this will ask it for the appropriate
circuit. Note that we ignore .exit URIs and let Tor deal with
those (by passing circuit ID 0).
The stream attacher is allowed to return a Deferred which will
callback with the desired circuit.
You may return the special object DO_NOT_ATTACH which will
cause the circuit attacher to simply ignore the stream
(neither attaching it, nor telling Tor to attach it).
"""
if self.attacher:
if stream.target_host is not None and '.exit' in stream.target_host:
## we want to totally ignore .exit URIs as these are
## used to specify a particular exit node, and trying
## to do STREAMATTACH on them will fail with an error
## from Tor anyway.
txtorlog.msg("ignore attacher:", stream)
return
circ = IStreamAttacher(self.attacher).attach_stream(stream, self.circuits)
if circ is self.DO_NOT_ATTACH:
return
if circ is None:
self.protocol.queue_command("ATTACHSTREAM %d 0" % stream.id)
else:
if isinstance(circ, defer.Deferred):
class IssueStreamAttach:
def __init__(self, state, streamid):
self.stream_id = streamid
self.state = state
def __call__(self, arg):
circid = arg.id
self.state.protocol.queue_command("ATTACHSTREAM %d %d" % (self.stream_id, circid))
circ.addCallback(IssueStreamAttach(self, stream.id)).addErrback(log.err)
else:
if circ.id not in self.circuits:
raise RuntimeError("Attacher returned a circuit unknown to me.")
if circ.state != 'BUILT':
raise RuntimeError("Can only attach to BUILT circuits; %d is in %s." % (circ.id, circ.state))
self.protocol.queue_command("ATTACHSTREAM %d %d" % (stream.id, circ.id))
def _circuit_status(self, data):
"""Used internally as a callback for updating Circuit information"""
data = data[len('circuit-status='):].split('\n')
## sometimes there's a newline after circuit-status= and
## sometimes not, so we get rid of it
if len(data) and len(data[0].strip()) == 0:
data = data[1:]
for line in data:
self._circuit_update(line)
def _stream_status(self, data):
"Used internally as a callback for updating Stream information"
# there's a slight issue with a single-stream vs >= 2 streams,
# in that in the latter case we have a line by itself with
# "stream-status=" on it followed by the streams EXCEPT in the
# single-stream case which has "stream-status=123 blahblah"
# (i.e. the key + value on one line)
lines = data.split('\n')
if len(lines) == 1:
d = lines[0][len('stream-status='):]
# if there are actually 0 streams, then there's nothing
# left to parse
if len(d):
self._stream_update(d)
else:
[self._stream_update(line) for line in lines[1:]]
def _update_network_status(self, data):
"""
Used internally as a callback for updating Router information
from NS and NEWCONSENSUS events.
"""
self.all_routers = set()
for line in data.split('\n'):
self._network_status_parser.process(line)
txtorlog.msg(len(self.routers_by_name), "named routers found.")
## remove any names we added that turned out to have dups
for (k, v) in self.routers.items():
if v is None:
txtorlog.msg(len(self.routers_by_name[k]), "dups:", k)
del self.routers[k]
txtorlog.msg(len(self.guards), "GUARDs")
def _maybe_create_circuit(self, circ_id):
if circ_id not in self.circuits:
c = self.circuit_factory(self)
c.listen(self)
[c.listen(x) for x in self.circuit_listeners]
else:
c = self.circuits[circ_id]
return c
def _circuit_update(self, line):
"""
Used internally as a callback to update Circuit information
from CIRC events.
"""
#print "circuit_update",line
args = line.split()
circ_id = int(args[0])
c = self._maybe_create_circuit(circ_id)
c.update(args)
def _stream_update(self, line):
"""
Used internally as a callback to update Stream information
from STREAM events.
"""
#print "stream_update",line
if line.strip() == 'stream-status=':
## this happens if there are no active streams
return
args = line.split()
assert len(args) >= 3
stream_id = int(args[0])
wasnew = False
if stream_id not in self.streams:
stream = self.stream_factory(self)
self.streams[stream_id] = stream
stream.listen(self)
[stream.listen(x) for x in self.stream_listeners]
wasnew = True
self.streams[stream_id].update(args)
## if the update closed the stream, it won't be in our list
## anymore. FIXME: how can we ever hit such a case as the
## first update being a CLOSE?
if wasnew and stream_id in self.streams:
self._maybe_attach(self.streams[stream_id])
def _addr_map(self, addr):
"Internal callback to update DNS cache. Listens to ADDRMAP."
txtorlog.msg(" --> addr_map", addr)
self.addrmap.update(addr)
event_map = {'STREAM': _stream_update,
'CIRC': _circuit_update,
'NS': _update_network_status,
'NEWCONSENSUS': _update_network_status,
'ADDRMAP': _addr_map}
"""event_map used by add_events to map event_name -> unbound method"""
@defer.inlineCallbacks
def _add_events(self):
"""
Add listeners for all the events the controller is interested in.
"""
for (event, func) in self.event_map.items():
## the map contains unbound methods, so we bind them
## to self so they call the right thing
yield self.protocol.add_event_listener(event, types.MethodType(func, self, TorState))
## ICircuitContainer
def find_circuit(self, circid):
"ICircuitContainer API"
return self.circuits[circid]
## IRouterContainer
def router_from_id(self, routerid):
"""IRouterContainer API"""
try:
return self.routers[routerid[:41]]
except KeyError:
if routerid[0] != '$':
raise # just re-raise the KeyError
router = Router(self.protocol)
idhash = routerid[1:41]
nick = ''
is_named = False
if len(routerid) > 41:
nick = routerid[42:]
is_named = routerid[41] == '='
router.update(nick, hashFromHexId(idhash), '0' * 27, 'unknown',
'unknown', '0', '0')
router.name_is_unique = is_named
self.routers[router.id_hex] = router
return router
## implement IStreamListener
def stream_new(self, stream):
"IStreamListener: a new stream has been created"
txtorlog.msg("stream_new", stream)
def stream_succeeded(self, stream):
"IStreamListener: stream has succeeded"
txtorlog.msg("stream_succeeded", stream)
def stream_attach(self, stream, circuit):
"""
IStreamListener: the stream has been attached to a circuit. It
seems you get an attach to None followed by an attach to real
circuit fairly frequently. Perhaps related to __LeaveStreamsUnattached?
"""
txtorlog.msg("stream_attach", stream.id,
stream.target_host, " -> ", circuit)
def stream_detach(self, stream, **kw):
"""
IStreamListener
"""
txtorlog.msg("stream_detach", stream.id)
def stream_closed(self, stream, **kw):
"""
IStreamListener: stream has been closed (won't be in
controller's list anymore)
"""
txtorlog.msg("stream_closed", stream.id)
del self.streams[stream.id]
def stream_failed(self, stream, **kw):
"""
IStreamListener: stream failed for some reason (won't be in
controller's list anymore)
"""
txtorlog.msg("stream_failed", stream.id)
del self.streams[stream.id]
## implement ICircuitListener
def circuit_launched(self, circuit):
"ICircuitListener API"
txtorlog.msg("circuit_launched", circuit)
self.circuits[circuit.id] = circuit
def circuit_extend(self, circuit, router):
"ICircuitListener API"
txtorlog.msg("circuit_extend:", circuit.id, router)
def circuit_built(self, circuit):
"ICircuitListener API"
txtorlog.msg("circuit_built:", circuit.id,
"->".join("%s.%s" % (x.name, x.location.countrycode) for x in circuit.path),
circuit.streams)
def circuit_new(self, circuit):
"ICircuitListener API"
txtorlog.msg("circuit_new:", circuit.id)
self.circuits[circuit.id] = circuit
def circuit_destroy(self, circuit):
"Used by circuit_closed and circuit_failed (below)"
txtorlog.msg("circuit_destroy:", circuit.id)
del self.circuits[circuit.id]
def circuit_closed(self, circuit, **kw):
"ICircuitListener API"
txtorlog.msg("circuit_closed", circuit)
self.circuit_destroy(circuit)
def circuit_failed(self, circuit, **kw):
"ICircuitListener API"
txtorlog.msg("circuit_failed", circuit, str(kw))
self.circuit_destroy(circuit)
|
from logging import getLogger
from django.http import QueryDict
from django.http import HttpResponseNotAllowed
from django.utils.datastructures import MultiValueDict
from django.core.files.uploadedfile import TemporaryUploadedFile
from .distutils_views import ACTION_VIEWS
log = getLogger(__name__)
def _get_distutils_action(request):
if request.method == 'POST':
parse_distutils_request(request)
action = request.POST.get(':action', None)
else:
action = request.GET.get(':action', None)
return action
def is_distutils_request(request):
return _get_distutils_action(request) is not None
def handle_distutils_request(request):
action = _get_distutils_action(request)
if action not in ACTION_VIEWS:
log.error('Invalid action encountered: %r', action)
return HttpResponseNotAllowed(ACTION_VIEWS.keys())
return ACTION_VIEWS[action](request)
def _parse_header(header):
headers = {}
for kvpair in filter(lambda p: p,
map(lambda p: p.strip(),
header.split(';'))):
try:
key, value = kvpair.split("=",1)
except ValueError:
continue
headers[key.strip()] = value.strip('"')
return headers
def parse_distutils_request(request):
""" This is being used because the built in request parser that Django uses,
django.http.multipartparser.MultiPartParser is interperting the POST data
incorrectly and/or the post data coming from distutils is invalid.
One portion of this is the end marker: \r\n\r\n (what Django expects)
versus \n\n (what distutils is sending).
"""
try:
sep = request.raw_post_data.splitlines()[1]
except:
raise ValueError('Invalid post data')
request.POST = QueryDict('',mutable=True)
try:
request._files = MultiValueDict()
except Exception, e:
pass
for part in filter(lambda e: e.strip(), request.raw_post_data.split(sep)):
try:
header, content = part.lstrip().split('\n',1)
except Exception, e:
continue
if content.startswith('\n'):
content = content[1:]
if content.endswith('\n'):
content = content[:-1]
headers = _parse_header(header)
if "name" not in headers:
continue
if "filename" in headers:
dist = TemporaryUploadedFile(name=headers["filename"],
size=len(content),
content_type="application/gzip",
charset='utf-8')
dist.write(content)
dist.seek(0)
request.FILES.appendlist(headers['name'], dist)
else:
request.POST.appendlist(headers["name"],content)
return
Minimal changes for Django 1.6+ compatibility.
from logging import getLogger
from django.http import QueryDict
from django.http import HttpResponseNotAllowed
from django.utils.datastructures import MultiValueDict
from django.core.files.uploadedfile import TemporaryUploadedFile
from .distutils_views import ACTION_VIEWS
log = getLogger(__name__)
def _get_distutils_action(request):
if request.method == 'POST':
parse_distutils_request(request)
action = request.POST.get(':action', None)
else:
action = request.GET.get(':action', None)
return action
def is_distutils_request(request):
return _get_distutils_action(request) is not None
def handle_distutils_request(request):
action = _get_distutils_action(request)
if action not in ACTION_VIEWS:
log.error('Invalid action encountered: %r', action)
return HttpResponseNotAllowed(ACTION_VIEWS.keys())
return ACTION_VIEWS[action](request)
def _parse_header(header):
headers = {}
for kvpair in filter(lambda p: p,
map(lambda p: p.strip(),
header.split(';'))):
try:
key, value = kvpair.split("=",1)
except ValueError:
continue
headers[key.strip()] = value.strip('"')
return headers
def parse_distutils_request(request):
""" This is being used because the built in request parser that Django uses,
django.http.multipartparser.MultiPartParser is interperting the POST data
incorrectly and/or the post data coming from distutils is invalid.
One portion of this is the end marker: \r\n\r\n (what Django expects)
versus \n\n (what distutils is sending).
"""
# request.raw_post_data has been renamed request.body
# see: https://code.djangoproject.com/ticket/17323
try:
sep = request.body.splitlines()[1]
except:
raise ValueError('Invalid post data')
request.POST = QueryDict('',mutable=True)
try:
request._files = MultiValueDict()
except Exception, e:
pass
for part in filter(lambda e: e.strip(), request.body.split(sep)):
# normalize line endings:
# newer distutils can submit \r\n end-of-line marks.
part = part.replace('\r\n', '\n')
try:
header, content = part.lstrip().split('\n',1)
except Exception, e:
continue
if content.startswith('\n'):
content = content[1:]
if content.endswith('\n'):
content = content[:-1]
headers = _parse_header(header)
if "name" not in headers:
continue
if "filename" in headers:
dist = TemporaryUploadedFile(name=headers["filename"],
size=len(content),
content_type="application/gzip",
charset='utf-8')
dist.write(content)
dist.seek(0)
request.FILES.appendlist(headers['name'], dist)
else:
request.POST.appendlist(headers["name"],content)
return
|
# Create your views here.
# -*- coding: utf-8 -*-
import os
import time
import json
import subprocess
import datetime
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.core.exceptions import ObjectDoesNotExist
from urqa.models import Session
from urqa.models import Sessionevent
from urqa.models import Projects
from urqa.models import Errors
from urqa.models import Instances
from urqa.models import Eventpaths
from urqa.models import Tags
from urqa.models import Appruncount
from urqa.models import Sofiles
from urqa.models import Appstatistics
from urqa.models import Osstatistics
from urqa.models import Devicestatistics
from urqa.models import Countrystatistics
from urqa.models import Activitystatistics
from urqa.models import Proguardmap
from utility import naive2aware
from utility import getUTCDatetime
from utility import getUTCawaredate
from utility import RANK
from config import get_config
#삭제요망
from common import validUserPjtError
@csrf_exempt
def connect(request):
jsonData = json.loads(request.body,encoding='utf-8')
#print jsonData
#step1: apikey를 이용하여 project찾기
try:
apikey = jsonData['apikey']
projectElement = Projects.objects.get(apikey=apikey)
except ObjectDoesNotExist:
print 'Invalid from client(connect)'
return HttpResponse(json.dumps({'idsession':'0'}), 'application/json');
#step2: idsession 발급하기
appversion = jsonData['appversion']
idsession = long(time.time() * 1000)
Session.objects.create(idsession=idsession,pid=projectElement,appversion=appversion)
print 'Project: %s, Ver: %s, new idsession: %d' % (projectElement.name,appversion,idsession)
#step3: app version별 누적카운트 증가하기
appruncountElement, created = Appruncount.objects.get_or_create(pid=projectElement,appversion=appversion,defaults={'runcount':1},date=getUTCawaredate())
if created == False:
appruncountElement.runcount += 1
appruncountElement.save()
else:
print 'project: %s, new version: %s' % (projectElement.name,appruncountElement.appversion)
return HttpResponse(json.dumps({'idsession':idsession}), 'application/json');
def proguard_retrace_oneline(str,linenum,map_path,mapElement):
if mapElement == None:
return str
fp = open(os.path.join(map_path,'temp.txt') , 'wb')
fp.write('at\t'+str+'\t(:%s)' % linenum)
fp.close()
arg = ['java','-jar',get_config('proguard_retrace_path'),'-verbose',os.path.join(map_path,mapElement.filename),os.path.join(map_path,'temp.txt')]
#print arg
fd_popen = subprocess.Popen(arg, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = fd_popen.communicate()
stdout_split = stdout.split('\t')
str = stdout_split[1]
os.remove(os.path.join(map_path,'temp.txt'))
return str
def proguard_retrace_callstack(str,map_path,mapElement):
if mapElement == None:
return str
fp = open(os.path.join(map_path,'temp.txt') , 'wb')
fp.write(str)
fp.close()
arg = ['java','-jar',get_config('proguard_retrace_path'),'-verbose',os.path.join(map_path,mapElement.filename),os.path.join(map_path,'temp.txt')]
#print arg
fd_popen = subprocess.Popen(arg, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = fd_popen.communicate()
str = stdout
os.remove(os.path.join(map_path,'temp.txt'))
return str
@csrf_exempt
def receive_exception(request):
jsonData = json.loads(request.body,encoding='utf-8')
print 'receive_exception requested'
#step1: apikey를 이용하여 project찾기
#apikey가 validate한지 확인하기.
try:
apikey = jsonData['apikey']
projectElement = Projects.objects.get(apikey=apikey)
except ObjectDoesNotExist:
print 'Invalid apikey'
return HttpResponse('Invalid apikey')
#step2: errorname, errorclassname, linenum을 이용하여 동일한 에러가 있는지 찾기
errorname = jsonData['errorname']
errorclassname = jsonData['errorclassname']
linenum = jsonData['linenum']
print '%s %s %s' % (errorname,errorclassname,linenum)
#step2-0: Proguard 적용 확인
appversion = jsonData['appversion']
map_path = get_config('proguard_map_path')
map_path = os.path.join(map_path,projectElement.apikey)
map_path = os.path.join(map_path,appversion)
try:
mapElement = Proguardmap.objects.get(pid=projectElement,appversion=appversion)
errorname = proguard_retrace_oneline(errorname,linenum,map_path,mapElement)
errorclassname = proguard_retrace_oneline(errorclassname,linenum,map_path,mapElement)
callstack = proguard_retrace_callstack(jsonData['callstack'],map_path,mapElement)
except ObjectDoesNotExist:
mapElement = None
callstack = jsonData['callstack']
print 'no proguard mapfile'
try:
errorElement = Errors.objects.get(pid=projectElement,errorname=errorname,errorclassname=errorclassname,linenum=linenum)
#새로온 인스턴스 정보로 시간 갱신
errorElement.lastdate = naive2aware(jsonData['datetime'])
errorElement.numofinstances += 1
errorElement.totalmemusage += jsonData['appmemtotal']
errorElement.wifion += int(jsonData['wifion'])
errorElement.gpson += int(jsonData['gpson'])
errorElement.mobileon += int(jsonData['mobileon'])
errorElement.totalmemusage += int(jsonData['appmemtotal'])
errorElement.save()
e, created = Appstatistics.objects.get_or_create(iderror=errorElement,appversion=jsonData['appversion'],defaults={'count':1})
if not created:
e.count += 1
e.save()
e, created = Osstatistics.objects.get_or_create(iderror=errorElement,osversion=jsonData['osversion'],defaults={'count':1})
if not created:
e.count += 1
e.save()
e, created = Devicestatistics.objects.get_or_create(iderror=errorElement,devicename=jsonData['device'],defaults={'count':1})
if not created:
e.count += 1
e.save()
e, created = Countrystatistics.objects.get_or_create(iderror=errorElement,countryname=jsonData['country'],defaults={'count':1})
if not created:
e.count += 1
e.save()
e, created = Activitystatistics.objects.get_or_create(iderror=errorElement,activityname=jsonData['lastactivity'],defaults={'count':1})
if not created:
e.count += 1
e.save()
#에러 스코어 계산
calc_errorScore(errorElement)
except ObjectDoesNotExist:
#새로 들어온 에러라면 새로운 에러 생성
if int(jsonData['rank']) == -1:
autodetermine = 1 #True
else:
autodetermine = 0 #False
errorElement = Errors(
pid = projectElement,
errorname = errorname,
errorclassname = errorclassname,
linenum = linenum,
autodetermine = autodetermine,
rank = int(jsonData['rank']), # Undesided = -1, unhandled = 0, critical = 1, major = 2, minor = 3, native = 4
status = 0, # 0 = new, 1 = open, 2 = fixed, 3 = ignore
createdate = naive2aware(jsonData['datetime']),
lastdate = naive2aware(jsonData['datetime']),
numofinstances = 1,
callstack = callstack,
wifion = jsonData['wifion'],
gpson = jsonData['gpson'],
mobileon = jsonData['mobileon'],
totalmemusage = jsonData['appmemtotal'],
errorweight = 10,
recur = 0,
)
errorElement.save()
Appstatistics.objects.create(iderror=errorElement,appversion=jsonData['appversion'],count=1)
Osstatistics.objects.create(iderror=errorElement,osversion=jsonData['osversion'],count=1)
Devicestatistics.objects.create(iderror=errorElement,devicename=jsonData['device'],count=1)
Countrystatistics.objects.create(iderror=errorElement,countryname=jsonData['country'],count=1)
Activitystatistics.objects.create(iderror=errorElement,activityname=jsonData['lastactivity'],count=1)
#error score 계산
calc_errorScore(errorElement)
#step3: 테그 저장
if jsonData['tag']:
tagstr = jsonData['tag']
tagElement, created = Tags.objects.get_or_create(iderror=errorElement,pid=projectElement,tag=tagstr)
#step4: 인스턴스 생성하기
instanceElement = Instances(
iderror = errorElement,
ins_count = errorElement.numofinstances,
sdkversion = jsonData['sdkversion'],
appversion = jsonData['appversion'],
osversion = jsonData['osversion'],
kernelversion = jsonData['kernelversion'],
appmemmax = jsonData['appmemmax'],
appmemfree = jsonData['appmemfree'],
appmemtotal = jsonData['appmemtotal'],
country = jsonData['country'],
datetime = naive2aware(jsonData['datetime']),
locale = jsonData['locale'],
mobileon = jsonData['mobileon'],
gpson = jsonData['gpson'],
wifion = jsonData['wifion'],
device = jsonData['device'],
rooted = jsonData['rooted'],
scrheight = jsonData['scrheight'],
scrwidth = jsonData['scrwidth'],
scrorientation = jsonData['scrorientation'],
sysmemlow = jsonData['sysmemlow'],
log_path = '',
batterylevel = jsonData['batterylevel'],
availsdcard = jsonData['availsdcard'],
xdpi = jsonData['xdpi'],
ydpi = jsonData['ydpi'],
lastactivity = jsonData['lastactivity'],
callstack = callstack,
)
# primary key가 Auto-incrementing이기 때문에 save한 후 primary key를 읽을 수 있다.
instanceElement.save()
#step5: 이벤트패스 생성
#print 'here! ' + instanceElement.idinstance
#instanceElement.update()
print 'instanceElement.idinstance',instanceElement.idinstance
eventpath = jsonData['eventpaths']
depth = 10
for event in reversed(eventpath):
temp_str = event['classname'] + '.' + event['methodname']
temp_str = proguard_retrace_oneline(temp_str,event['linenum'],map_path,mapElement)
flag = temp_str.rfind('.')
classname = temp_str[0:flag]
methodname = temp_str[flag+1:]
if not 'label' in event: #event path에 label적용, 기존버전과 호환성을 확보하기위해 'label'초기화를 해줌 client ver 0.91 ->
event['label'] = ""
Eventpaths.objects.create(
idinstance = instanceElement,
iderror = errorElement,
ins_count = errorElement.numofinstances,
datetime = naive2aware(event['datetime']),
classname = classname,
methodname = methodname,
linenum = event['linenum'],
label = event['label'],
depth = depth
)
depth -= 1
#calc_eventpath(errorElement)
return HttpResponse(json.dumps({'idinstance':instanceElement.idinstance}), 'application/json');
@csrf_exempt
def receive_exception_log(request, idinstance):
#step1: idinstance에 해당하는 인스턴스 구하기
try:
instanceElement = Instances.objects.get(idinstance=int(idinstance))
#이미 로그가 저장되었다면 다음으로 들어오는 로그는 버그? 또는 외부공격으로 생각하고 차단
if instanceElement.log_path:
return HttpResponse('Already exists')
except ObjectDoesNotExist:
print 'Invalid idinstance %d' % int(idinstance)
return HttpResponse('Fail')
#step2: log파일 저장하기
log_path = os.path.join(get_config('log_pool_path'), '%s.txt' % str(idinstance))
f = file(log_path,'w')
f.write(request.body)
f.close()
print 'log received : %s' % log_path
#step3: 저장한 로그파일을 db에 명시하기
instanceElement.log_path = log_path
instanceElement.save()
return HttpResponse('success')
@csrf_exempt
def receive_eventpath(request):
#print request.body
jsonData = json.loads(request.body,encoding='utf-8')
#print jsonData
idsession = jsonData['idsession']
eventpath = jsonData['eventpaths']
session_key = Session.objects.get(idsession=idsession)
for event in eventpath:
Sessionevent.objects.create(idsession=session_key,
datetime=naive2aware(event['datetime']),
classname=event['classname'],
methodname=event['methodname'],
linenum=event['linenum'])
return HttpResponse('success')
@csrf_exempt
def receive_native(request):
print 'receive_native requested'
#print request.body
jsonData = json.loads(request.body,encoding='utf-8')
#print jsonData
#step1: apikey를 이용하여 project찾기
#apikey가 validate한지 확인하기.
try:
apikey = jsonData['apikey']
projectElement = Projects.objects.get(apikey=apikey)
except ObjectDoesNotExist:
print 'Invalid apikey'
return HttpResponse('Invalid apikey')
#step2: dummy errorElement생성
#새로 들어온 에러라면 새로운 에러 생성
if int(jsonData['rank']) == -1:
autodetermine = 1 #True
else:
autodetermine = 0 #False
errorElement = Errors(
pid = projectElement,
errorname = 'dummy',
errorclassname = 'native',
linenum = 0,
autodetermine = autodetermine,
rank = int(jsonData['rank']), # Undesided = -1, unhandled = 0, critical = 1, major = 2, minor = 3, native = 4
status = 0, # 0 = new, 1 = open, 2 = ignore, 3 = renew
createdate = naive2aware(jsonData['datetime']),
lastdate = naive2aware(jsonData['datetime']),
numofinstances = 1,
callstack = '',#jsonData['callstack'],
wifion = jsonData['wifion'],
gpson = jsonData['gpson'],
mobileon = jsonData['mobileon'],
totalmemusage = jsonData['appmemtotal'],
errorweight = 10,
recur = 0,
)
errorElement.save()
#step3: 테그 저장
tagstr = jsonData['tag']
if tagstr:
tagElement, created = Tags.objects.get_or_create(iderror=errorElement,pid=projectElement,tag=tagstr)
#step4: 인스턴스 생성하기
instanceElement = Instances(
iderror = errorElement,
ins_count = errorElement.numofinstances,
sdkversion = jsonData['sdkversion'],
appversion = jsonData['appversion'],
osversion = jsonData['osversion'],
kernelversion = jsonData['kernelversion'],
appmemmax = jsonData['appmemmax'],
appmemfree = jsonData['appmemfree'],
appmemtotal = jsonData['appmemtotal'],
country = jsonData['country'],
datetime = naive2aware(jsonData['datetime']),
locale = jsonData['locale'],
mobileon = jsonData['mobileon'],
gpson = jsonData['gpson'],
wifion = jsonData['wifion'],
device = jsonData['device'],
rooted = jsonData['rooted'],
scrheight = jsonData['scrheight'],
scrwidth = jsonData['scrwidth'],
scrorientation = jsonData['scrorientation'],
sysmemlow = jsonData['sysmemlow'],
log_path = '',
batterylevel = jsonData['batterylevel'],
availsdcard = jsonData['availsdcard'],
xdpi = jsonData['xdpi'],
ydpi = jsonData['ydpi'],
lastactivity = jsonData['lastactivity'],
)
# primary key가 Auto-incrementing이기 때문에 save한 후 primary key를 읽을 수 있다.
instanceElement.save()
#step5: 이벤트패스 생성
#print 'here! ' + instanceElement.idinstance
#instanceElement.update()
appversion = jsonData['appversion']
map_path = get_config('proguard_map_path')
map_path = os.path.join(map_path,projectElement.apikey)
map_path = os.path.join(map_path,appversion)
mapElement = Proguardmap.objects.get(pid=projectElement,appversion=appversion)
print 'instanceElement.idinstance',instanceElement.idinstance
eventpath = jsonData['eventpaths']
depth = 10
for event in reversed(eventpath):
temp_str = event['classname'] + '.' + event['methodname']
temp_str = proguard_retrace_oneline(temp_str,event['linenum'],map_path,mapElement,projectElement,appversion)
flag = temp_str.rfind('.')
classname = temp_str[0:flag]
methodname = temp_str[flag+1:]
if not 'label' in event: #event path에 label적용, 기존버전과 호환성을 확보하기위해 'label'초기화를 해줌 client ver 0.91 ->
event['label'] = ""
Eventpaths.objects.create(
idinstance = instanceElement,
iderror = errorElement,
ins_count = errorElement.numofinstances,
datetime = naive2aware(event['datetime']),
classname = classname,
methodname = methodname,
linenum = event['linenum'],
label = event['label'],
depth = depth,
)
depth -= 1
return HttpResponse(json.dumps({'idinstance':instanceElement.idinstance}), 'application/json');
class Ignore_clib:
list = [
'libdvm.so',
'libc.so',
'libcutils.so',
'app_process',
'libandroid_runtime.so',
'libutils.so',
'libbinder.so',
'libjavacore.so',
'librs_jni.so',
'linker',
]
@csrf_exempt
def receive_native_dump(request, idinstance):
#step1: idinstance에 해당하는 인스턴스 구하기
try:
instanceElement = Instances.objects.get(idinstance=int(idinstance))
errorElement = instanceElement.iderror
projectElement = errorElement.pid
#이미 로그가 저장되었다면 다음으로 들어오는 로그는 버그 또는 외부공격으로 생각하고 차단
if instanceElement.dump_path:
return HttpResponse('Already exists')
except ObjectDoesNotExist:
print 'Invalid idinstance %d' % int(idinstance)
return HttpResponse('Fail')
#step2: dump파일 저장하기
dump_path = os.path.join(get_config('dmp_pool_path'), '%s.dmp' % str(idinstance))
f = file(dump_path,'w')
f.write(request.body)
f.close()
print 'log received : %s' % dump_path
#step3: 저장한 로그파일을 db에 명시하기
instanceElement.dump_path = dump_path
instanceElement.save()
#step4: dmp파일 분석(with nosym)
#sym_pool_path = os.path.join(get_config('sym_pool_path'),str(projectElement.apikey))
#sym_pool_path = os.path.join(sym_pool_path, instanceElement.appversion)
#arg = [get_config('minidump_stackwalk_path') , dump_path, sym_pool_path]
arg = [get_config('minidump_stackwalk_path') , dump_path]
fd_popen = subprocess.Popen(arg, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = fd_popen.communicate()
#so library 추출
libs = []
stderr_split = stderr.splitlines()
for line in stderr_split:
if line.find('Couldn\'t load symbols') == -1: #magic keyword
continue
lib = line[line.find('for: ')+5:].split('|')
if lib[1] == '000000000000000000000000000000000' or lib[0] in Ignore_clib.list:
continue
#print lib[1] + ' ' + lib[0]
libs.append(lib)
#DB저장하기
for lib in libs:
sofileElement, created = Sofiles.objects.get_or_create(pid=projectElement, appversion=instanceElement.appversion, versionkey=lib[1], filename=lib[0],defaults={'uploaded':'X'})
if created:
print 'new version key : ', lib[1], lib[0]
else:
print 'version key:', lib[1], lib[0], 'already exists'
#ErrorName, ErrorClassname, linenum 추출하기
cs_flag = 0
errorname = ''
errorclassname = ''
linenum = ''
stdout_split = stdout.splitlines()
for line in stdout_split:
if line.find('Crash reason:') != -1:
errorname = line.split()[2]
if cs_flag:
if line.find('Thread') != -1 or errorclassname:
break
#errorclassname 찾기
for lib in libs:
flag = line.find(lib[0])
if flag == -1:
continue
separator = line.find(' + ')
if separator != -1:
errorclassname = line[flag:separator]
linenum = line[separator+3:]
else:
errorclassname = line[flag:]
linenum = 0
break
if line.find('(crashed)') != -1:
cs_flag = 1
#dmp파일 분석(with sym)
sym_pool_path = os.path.join(get_config('sym_pool_path'),str(projectElement.apikey))
sym_pool_path = os.path.join(sym_pool_path, instanceElement.appversion)
arg = [get_config('minidump_stackwalk_path') , dump_path, sym_pool_path]
fd_popen = subprocess.Popen(arg, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = fd_popen.communicate()
cs_count = 0
callstack = ''
stdout_split = stdout.splitlines()
for line in stdout_split:
if line.find('(crashed)') != -1:
callstack = line
cs_count = cs_count + 1
elif cs_count:
if line.find('Thread') != -1 or cs_count > 40:
break;
callstack += '\n'
callstack += line
cs_count = cs_count + 1
#print callstack
try:
errorElement_exist = Errors.objects.get(pid=projectElement, errorname=errorname, errorclassname=errorclassname, linenum=linenum)
errorElement_exist.lastdate = errorElement.lastdate
errorElement_exist.numofinstances += 1
errorElement_exist.wifion += errorElement.wifion
errorElement_exist.gpson += errorElement.gpson
errorElement_exist.mobileon += errorElement.mobileon
errorElement_exist.totalmemusage += errorElement.totalmemusage
errorElement_exist.save()
instanceElement.iderror = errorElement_exist
instanceElement.save()
e, created = Appstatistics.objects.get_or_create(iderror=errorElement,appversion=instanceElement.appversion,defaults={'count':1})
if not created:
e.count += 1
e.save()
e, created = Osstatistics.objects.get_or_create(iderror=errorElement,osversion=instanceElement.osversion,defaults={'count':1})
if not created:
e.count += 1
e.save()
e, created = Devicestatistics.objects.get_or_create(iderror=errorElement,devicename=instanceElement.device,defaults={'count':1})
if not created:
e.count += 1
e.save()
e, created = Countrystatistics.objects.get_or_create(iderror=errorElement,countryname=instanceElement.country,defaults={'count':1})
if not created:
e.count += 1
e.save()
e, created = Activitystatistics.objects.get_or_create(iderror=errorElement,activityname=instanceElement.lastactivity,defaults={'count':1})
if not created:
e.count += 1
e.save()
errorElement.delete()
#errorscore 계산
calc_errorScore(errorElement_exist)
print 'native error %s:%s already exist' % (errorname, errorclassname)
except ObjectDoesNotExist:
errorElement.errorname = errorname
errorElement.errorclassname = errorclassname
errorElement.callstack = callstack
errorElement.linenum = linenum
errorElement.save()
Appstatistics.objects.create(iderror=errorElement,appversion=instanceElement.appversion,count=1)
Osstatistics.objects.create(iderror=errorElement,osversion=instanceElement.osversion,count=1)
Devicestatistics.objects.create(iderror=errorElement,devicename=instanceElement.device,count=1)
Countrystatistics.objects.create(iderror=errorElement,countryname=instanceElement.country,count=1)
Activitystatistics.objects.create(iderror=errorElement,activityname=instanceElement.lastactivity,count=1)
#errorscore 계산
calc_errorScore(errorElement)
return HttpResponse('Success')
def calc_errorScore(errorElement):
erscore_parameter = json.loads(get_config('error_score_parameter'))
date_er_score = 0.0
quantity_er_score = 0.0
rank_er_score = 0.0
#date 계산 k1
#print naive2aware(getUTCDatetime())
#print errorElement.lastdate
d = naive2aware(getUTCDatetime()) - errorElement.lastdate
#print d
#print d.days, d.seconds, d.microseconds
#print erscore_parameter['retention']
#print float(erscore_parameter['retention'])
date_er_score = ((erscore_parameter['retention'])/ (erscore_parameter['retention'] + float(d.days) )) * erscore_parameter['date_constant']
#print 'bunmo : '+ str((erscore_parameter['retention'] + d.days))
#print 'daily delta : ' +str(d.days)
#print 'bunja: ' + str(erscore_parameter['retention'])
#print 'date cal : ' + str(date_er_score)
#quantity 계산 k2
runcounts = Appruncount.objects.filter(pid=errorElement.pid) #전체 앱버전별 실행수
errorcounts = Appstatistics.objects.filter(iderror=errorElement) #1개 에러에 대한 앱버전별 실행수
tlb = []
for r in runcounts:
for e in errorcounts:
if r.appversion == e.appversion:
tlb.append({'appversion':r.appversion,'runcount':r.runcount,'errcount':e.count})
break;
#print 'calc_errorscore',tlb
wholeErrorCounter = 0.0
errorCounter = 0.0
for version_statics in tlb:
wholeErrorCounter += version_statics['runcount']
errorCounter += version_statics['errcount']
quantity_er_score = errorCounter/wholeErrorCounter
#print 'whole : ' + str(wholeErrorCounter)
#print 'errorcount : ' + str(errorCounter)
#print 'quantity cal : ' + str(quantity_er_score)
#rank 계산 k3
rank_er_score = 0
rank_er_score = rank_to_constant(errorElement.rank) * erscore_parameter['rank_ratio_constant']
#print 'RANK : ' + RANK.toString[errorElement.rank]
#print 'rank cal : ' + str(rank_er_score)
#최종 ErrorScore 계산
error_Score = (date_er_score + quantity_er_score + rank_er_score) * erscore_parameter['constant']
#print 'last Error Score : ' + str(error_Score)
#디비에 저장
errorElement.errorweight = error_Score
errorElement.gain1 = float(date_er_score)
errorElement.gain2 = float(quantity_er_score)
errorElement.save()
def rank_to_constant(int):
erscore_parameter = json.loads(get_config('error_score_parameter'))
if int == RANK.Native:
return erscore_parameter['na']
elif int == RANK.Unhandle:
return erscore_parameter['un']
elif int == RANK.Critical:
return erscore_parameter['cr']
elif int == RANK.Major:
return erscore_parameter['ma']
elif int == RANK.Minor:
return erscore_parameter['mi']
else:
return 'fail'
Total memory 중복제거
# Create your views here.
# -*- coding: utf-8 -*-
import os
import time
import json
import subprocess
import datetime
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.core.exceptions import ObjectDoesNotExist
from urqa.models import Session
from urqa.models import Sessionevent
from urqa.models import Projects
from urqa.models import Errors
from urqa.models import Instances
from urqa.models import Eventpaths
from urqa.models import Tags
from urqa.models import Appruncount
from urqa.models import Sofiles
from urqa.models import Appstatistics
from urqa.models import Osstatistics
from urqa.models import Devicestatistics
from urqa.models import Countrystatistics
from urqa.models import Activitystatistics
from urqa.models import Proguardmap
from utility import naive2aware
from utility import getUTCDatetime
from utility import getUTCawaredate
from utility import RANK
from config import get_config
#삭제요망
from common import validUserPjtError
@csrf_exempt
def connect(request):
jsonData = json.loads(request.body,encoding='utf-8')
#print jsonData
#step1: apikey를 이용하여 project찾기
try:
apikey = jsonData['apikey']
projectElement = Projects.objects.get(apikey=apikey)
except ObjectDoesNotExist:
print 'Invalid from client(connect)'
return HttpResponse(json.dumps({'idsession':'0'}), 'application/json');
#step2: idsession 발급하기
appversion = jsonData['appversion']
idsession = long(time.time() * 1000)
Session.objects.create(idsession=idsession,pid=projectElement,appversion=appversion)
print 'Project: %s, Ver: %s, new idsession: %d' % (projectElement.name,appversion,idsession)
#step3: app version별 누적카운트 증가하기
appruncountElement, created = Appruncount.objects.get_or_create(pid=projectElement,appversion=appversion,defaults={'runcount':1},date=getUTCawaredate())
if created == False:
appruncountElement.runcount += 1
appruncountElement.save()
else:
print 'project: %s, new version: %s' % (projectElement.name,appruncountElement.appversion)
return HttpResponse(json.dumps({'idsession':idsession}), 'application/json');
def proguard_retrace_oneline(str,linenum,map_path,mapElement):
if mapElement == None:
return str
fp = open(os.path.join(map_path,'temp.txt') , 'wb')
fp.write('at\t'+str+'\t(:%s)' % linenum)
fp.close()
arg = ['java','-jar',get_config('proguard_retrace_path'),'-verbose',os.path.join(map_path,mapElement.filename),os.path.join(map_path,'temp.txt')]
#print arg
fd_popen = subprocess.Popen(arg, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = fd_popen.communicate()
stdout_split = stdout.split('\t')
str = stdout_split[1]
os.remove(os.path.join(map_path,'temp.txt'))
return str
def proguard_retrace_callstack(str,map_path,mapElement):
if mapElement == None:
return str
fp = open(os.path.join(map_path,'temp.txt') , 'wb')
fp.write(str)
fp.close()
arg = ['java','-jar',get_config('proguard_retrace_path'),'-verbose',os.path.join(map_path,mapElement.filename),os.path.join(map_path,'temp.txt')]
#print arg
fd_popen = subprocess.Popen(arg, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = fd_popen.communicate()
str = stdout
os.remove(os.path.join(map_path,'temp.txt'))
return str
@csrf_exempt
def receive_exception(request):
jsonData = json.loads(request.body,encoding='utf-8')
print 'receive_exception requested'
#step1: apikey를 이용하여 project찾기
#apikey가 validate한지 확인하기.
try:
apikey = jsonData['apikey']
projectElement = Projects.objects.get(apikey=apikey)
except ObjectDoesNotExist:
print 'Invalid apikey'
return HttpResponse('Invalid apikey')
#step2: errorname, errorclassname, linenum을 이용하여 동일한 에러가 있는지 찾기
errorname = jsonData['errorname']
errorclassname = jsonData['errorclassname']
linenum = jsonData['linenum']
print '%s %s %s' % (errorname,errorclassname,linenum)
#step2-0: Proguard 적용 확인
appversion = jsonData['appversion']
map_path = get_config('proguard_map_path')
map_path = os.path.join(map_path,projectElement.apikey)
map_path = os.path.join(map_path,appversion)
try:
mapElement = Proguardmap.objects.get(pid=projectElement,appversion=appversion)
errorname = proguard_retrace_oneline(errorname,linenum,map_path,mapElement)
errorclassname = proguard_retrace_oneline(errorclassname,linenum,map_path,mapElement)
callstack = proguard_retrace_callstack(jsonData['callstack'],map_path,mapElement)
except ObjectDoesNotExist:
mapElement = None
callstack = jsonData['callstack']
print 'no proguard mapfile'
try:
errorElement = Errors.objects.get(pid=projectElement,errorname=errorname,errorclassname=errorclassname,linenum=linenum)
#새로온 인스턴스 정보로 시간 갱신
errorElement.lastdate = naive2aware(jsonData['datetime'])
errorElement.numofinstances += 1
#errorElement.totalmemusage += jsonData['appmemtotal']
errorElement.wifion += int(jsonData['wifion'])
errorElement.gpson += int(jsonData['gpson'])
errorElement.mobileon += int(jsonData['mobileon'])
errorElement.totalmemusage += int(jsonData['appmemtotal'])
errorElement.save()
e, created = Appstatistics.objects.get_or_create(iderror=errorElement,appversion=jsonData['appversion'],defaults={'count':1})
if not created:
e.count += 1
e.save()
e, created = Osstatistics.objects.get_or_create(iderror=errorElement,osversion=jsonData['osversion'],defaults={'count':1})
if not created:
e.count += 1
e.save()
e, created = Devicestatistics.objects.get_or_create(iderror=errorElement,devicename=jsonData['device'],defaults={'count':1})
if not created:
e.count += 1
e.save()
e, created = Countrystatistics.objects.get_or_create(iderror=errorElement,countryname=jsonData['country'],defaults={'count':1})
if not created:
e.count += 1
e.save()
e, created = Activitystatistics.objects.get_or_create(iderror=errorElement,activityname=jsonData['lastactivity'],defaults={'count':1})
if not created:
e.count += 1
e.save()
#에러 스코어 계산
calc_errorScore(errorElement)
except ObjectDoesNotExist:
#새로 들어온 에러라면 새로운 에러 생성
if int(jsonData['rank']) == -1:
autodetermine = 1 #True
else:
autodetermine = 0 #False
errorElement = Errors(
pid = projectElement,
errorname = errorname,
errorclassname = errorclassname,
linenum = linenum,
autodetermine = autodetermine,
rank = int(jsonData['rank']), # Undesided = -1, unhandled = 0, critical = 1, major = 2, minor = 3, native = 4
status = 0, # 0 = new, 1 = open, 2 = fixed, 3 = ignore
createdate = naive2aware(jsonData['datetime']),
lastdate = naive2aware(jsonData['datetime']),
numofinstances = 1,
callstack = callstack,
wifion = jsonData['wifion'],
gpson = jsonData['gpson'],
mobileon = jsonData['mobileon'],
totalmemusage = jsonData['appmemtotal'],
errorweight = 10,
recur = 0,
)
errorElement.save()
Appstatistics.objects.create(iderror=errorElement,appversion=jsonData['appversion'],count=1)
Osstatistics.objects.create(iderror=errorElement,osversion=jsonData['osversion'],count=1)
Devicestatistics.objects.create(iderror=errorElement,devicename=jsonData['device'],count=1)
Countrystatistics.objects.create(iderror=errorElement,countryname=jsonData['country'],count=1)
Activitystatistics.objects.create(iderror=errorElement,activityname=jsonData['lastactivity'],count=1)
#error score 계산
calc_errorScore(errorElement)
#step3: 테그 저장
if jsonData['tag']:
tagstr = jsonData['tag']
tagElement, created = Tags.objects.get_or_create(iderror=errorElement,pid=projectElement,tag=tagstr)
#step4: 인스턴스 생성하기
instanceElement = Instances(
iderror = errorElement,
ins_count = errorElement.numofinstances,
sdkversion = jsonData['sdkversion'],
appversion = jsonData['appversion'],
osversion = jsonData['osversion'],
kernelversion = jsonData['kernelversion'],
appmemmax = jsonData['appmemmax'],
appmemfree = jsonData['appmemfree'],
appmemtotal = jsonData['appmemtotal'],
country = jsonData['country'],
datetime = naive2aware(jsonData['datetime']),
locale = jsonData['locale'],
mobileon = jsonData['mobileon'],
gpson = jsonData['gpson'],
wifion = jsonData['wifion'],
device = jsonData['device'],
rooted = jsonData['rooted'],
scrheight = jsonData['scrheight'],
scrwidth = jsonData['scrwidth'],
scrorientation = jsonData['scrorientation'],
sysmemlow = jsonData['sysmemlow'],
log_path = '',
batterylevel = jsonData['batterylevel'],
availsdcard = jsonData['availsdcard'],
xdpi = jsonData['xdpi'],
ydpi = jsonData['ydpi'],
lastactivity = jsonData['lastactivity'],
callstack = callstack,
)
# primary key가 Auto-incrementing이기 때문에 save한 후 primary key를 읽을 수 있다.
instanceElement.save()
#step5: 이벤트패스 생성
#print 'here! ' + instanceElement.idinstance
#instanceElement.update()
print 'instanceElement.idinstance',instanceElement.idinstance
eventpath = jsonData['eventpaths']
depth = 10
for event in reversed(eventpath):
temp_str = event['classname'] + '.' + event['methodname']
temp_str = proguard_retrace_oneline(temp_str,event['linenum'],map_path,mapElement)
flag = temp_str.rfind('.')
classname = temp_str[0:flag]
methodname = temp_str[flag+1:]
if not 'label' in event: #event path에 label적용, 기존버전과 호환성을 확보하기위해 'label'초기화를 해줌 client ver 0.91 ->
event['label'] = ""
Eventpaths.objects.create(
idinstance = instanceElement,
iderror = errorElement,
ins_count = errorElement.numofinstances,
datetime = naive2aware(event['datetime']),
classname = classname,
methodname = methodname,
linenum = event['linenum'],
label = event['label'],
depth = depth
)
depth -= 1
#calc_eventpath(errorElement)
return HttpResponse(json.dumps({'idinstance':instanceElement.idinstance}), 'application/json');
@csrf_exempt
def receive_exception_log(request, idinstance):
#step1: idinstance에 해당하는 인스턴스 구하기
try:
instanceElement = Instances.objects.get(idinstance=int(idinstance))
#이미 로그가 저장되었다면 다음으로 들어오는 로그는 버그? 또는 외부공격으로 생각하고 차단
if instanceElement.log_path:
return HttpResponse('Already exists')
except ObjectDoesNotExist:
print 'Invalid idinstance %d' % int(idinstance)
return HttpResponse('Fail')
#step2: log파일 저장하기
log_path = os.path.join(get_config('log_pool_path'), '%s.txt' % str(idinstance))
f = file(log_path,'w')
f.write(request.body)
f.close()
print 'log received : %s' % log_path
#step3: 저장한 로그파일을 db에 명시하기
instanceElement.log_path = log_path
instanceElement.save()
return HttpResponse('success')
@csrf_exempt
def receive_eventpath(request):
#print request.body
jsonData = json.loads(request.body,encoding='utf-8')
#print jsonData
idsession = jsonData['idsession']
eventpath = jsonData['eventpaths']
session_key = Session.objects.get(idsession=idsession)
for event in eventpath:
Sessionevent.objects.create(idsession=session_key,
datetime=naive2aware(event['datetime']),
classname=event['classname'],
methodname=event['methodname'],
linenum=event['linenum'])
return HttpResponse('success')
@csrf_exempt
def receive_native(request):
print 'receive_native requested'
#print request.body
jsonData = json.loads(request.body,encoding='utf-8')
#print jsonData
#step1: apikey를 이용하여 project찾기
#apikey가 validate한지 확인하기.
try:
apikey = jsonData['apikey']
projectElement = Projects.objects.get(apikey=apikey)
except ObjectDoesNotExist:
print 'Invalid apikey'
return HttpResponse('Invalid apikey')
#step2: dummy errorElement생성
#새로 들어온 에러라면 새로운 에러 생성
if int(jsonData['rank']) == -1:
autodetermine = 1 #True
else:
autodetermine = 0 #False
errorElement = Errors(
pid = projectElement,
errorname = 'dummy',
errorclassname = 'native',
linenum = 0,
autodetermine = autodetermine,
rank = int(jsonData['rank']), # Undesided = -1, unhandled = 0, critical = 1, major = 2, minor = 3, native = 4
status = 0, # 0 = new, 1 = open, 2 = ignore, 3 = renew
createdate = naive2aware(jsonData['datetime']),
lastdate = naive2aware(jsonData['datetime']),
numofinstances = 1,
callstack = '',#jsonData['callstack'],
wifion = jsonData['wifion'],
gpson = jsonData['gpson'],
mobileon = jsonData['mobileon'],
totalmemusage = jsonData['appmemtotal'],
errorweight = 10,
recur = 0,
)
errorElement.save()
#step3: 테그 저장
tagstr = jsonData['tag']
if tagstr:
tagElement, created = Tags.objects.get_or_create(iderror=errorElement,pid=projectElement,tag=tagstr)
#step4: 인스턴스 생성하기
instanceElement = Instances(
iderror = errorElement,
ins_count = errorElement.numofinstances,
sdkversion = jsonData['sdkversion'],
appversion = jsonData['appversion'],
osversion = jsonData['osversion'],
kernelversion = jsonData['kernelversion'],
appmemmax = jsonData['appmemmax'],
appmemfree = jsonData['appmemfree'],
appmemtotal = jsonData['appmemtotal'],
country = jsonData['country'],
datetime = naive2aware(jsonData['datetime']),
locale = jsonData['locale'],
mobileon = jsonData['mobileon'],
gpson = jsonData['gpson'],
wifion = jsonData['wifion'],
device = jsonData['device'],
rooted = jsonData['rooted'],
scrheight = jsonData['scrheight'],
scrwidth = jsonData['scrwidth'],
scrorientation = jsonData['scrorientation'],
sysmemlow = jsonData['sysmemlow'],
log_path = '',
batterylevel = jsonData['batterylevel'],
availsdcard = jsonData['availsdcard'],
xdpi = jsonData['xdpi'],
ydpi = jsonData['ydpi'],
lastactivity = jsonData['lastactivity'],
)
# primary key가 Auto-incrementing이기 때문에 save한 후 primary key를 읽을 수 있다.
instanceElement.save()
#step5: 이벤트패스 생성
#print 'here! ' + instanceElement.idinstance
#instanceElement.update()
appversion = jsonData['appversion']
map_path = get_config('proguard_map_path')
map_path = os.path.join(map_path,projectElement.apikey)
map_path = os.path.join(map_path,appversion)
mapElement = Proguardmap.objects.get(pid=projectElement,appversion=appversion)
print 'instanceElement.idinstance',instanceElement.idinstance
eventpath = jsonData['eventpaths']
depth = 10
for event in reversed(eventpath):
temp_str = event['classname'] + '.' + event['methodname']
temp_str = proguard_retrace_oneline(temp_str,event['linenum'],map_path,mapElement,projectElement,appversion)
flag = temp_str.rfind('.')
classname = temp_str[0:flag]
methodname = temp_str[flag+1:]
if not 'label' in event: #event path에 label적용, 기존버전과 호환성을 확보하기위해 'label'초기화를 해줌 client ver 0.91 ->
event['label'] = ""
Eventpaths.objects.create(
idinstance = instanceElement,
iderror = errorElement,
ins_count = errorElement.numofinstances,
datetime = naive2aware(event['datetime']),
classname = classname,
methodname = methodname,
linenum = event['linenum'],
label = event['label'],
depth = depth,
)
depth -= 1
return HttpResponse(json.dumps({'idinstance':instanceElement.idinstance}), 'application/json');
class Ignore_clib:
list = [
'libdvm.so',
'libc.so',
'libcutils.so',
'app_process',
'libandroid_runtime.so',
'libutils.so',
'libbinder.so',
'libjavacore.so',
'librs_jni.so',
'linker',
]
@csrf_exempt
def receive_native_dump(request, idinstance):
#step1: idinstance에 해당하는 인스턴스 구하기
try:
instanceElement = Instances.objects.get(idinstance=int(idinstance))
errorElement = instanceElement.iderror
projectElement = errorElement.pid
#이미 로그가 저장되었다면 다음으로 들어오는 로그는 버그 또는 외부공격으로 생각하고 차단
if instanceElement.dump_path:
return HttpResponse('Already exists')
except ObjectDoesNotExist:
print 'Invalid idinstance %d' % int(idinstance)
return HttpResponse('Fail')
#step2: dump파일 저장하기
dump_path = os.path.join(get_config('dmp_pool_path'), '%s.dmp' % str(idinstance))
f = file(dump_path,'w')
f.write(request.body)
f.close()
print 'log received : %s' % dump_path
#step3: 저장한 로그파일을 db에 명시하기
instanceElement.dump_path = dump_path
instanceElement.save()
#step4: dmp파일 분석(with nosym)
#sym_pool_path = os.path.join(get_config('sym_pool_path'),str(projectElement.apikey))
#sym_pool_path = os.path.join(sym_pool_path, instanceElement.appversion)
#arg = [get_config('minidump_stackwalk_path') , dump_path, sym_pool_path]
arg = [get_config('minidump_stackwalk_path') , dump_path]
fd_popen = subprocess.Popen(arg, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = fd_popen.communicate()
#so library 추출
libs = []
stderr_split = stderr.splitlines()
for line in stderr_split:
if line.find('Couldn\'t load symbols') == -1: #magic keyword
continue
lib = line[line.find('for: ')+5:].split('|')
if lib[1] == '000000000000000000000000000000000' or lib[0] in Ignore_clib.list:
continue
#print lib[1] + ' ' + lib[0]
libs.append(lib)
#DB저장하기
for lib in libs:
sofileElement, created = Sofiles.objects.get_or_create(pid=projectElement, appversion=instanceElement.appversion, versionkey=lib[1], filename=lib[0],defaults={'uploaded':'X'})
if created:
print 'new version key : ', lib[1], lib[0]
else:
print 'version key:', lib[1], lib[0], 'already exists'
#ErrorName, ErrorClassname, linenum 추출하기
cs_flag = 0
errorname = ''
errorclassname = ''
linenum = ''
stdout_split = stdout.splitlines()
for line in stdout_split:
if line.find('Crash reason:') != -1:
errorname = line.split()[2]
if cs_flag:
if line.find('Thread') != -1 or errorclassname:
break
#errorclassname 찾기
for lib in libs:
flag = line.find(lib[0])
if flag == -1:
continue
separator = line.find(' + ')
if separator != -1:
errorclassname = line[flag:separator]
linenum = line[separator+3:]
else:
errorclassname = line[flag:]
linenum = 0
break
if line.find('(crashed)') != -1:
cs_flag = 1
#dmp파일 분석(with sym)
sym_pool_path = os.path.join(get_config('sym_pool_path'),str(projectElement.apikey))
sym_pool_path = os.path.join(sym_pool_path, instanceElement.appversion)
arg = [get_config('minidump_stackwalk_path') , dump_path, sym_pool_path]
fd_popen = subprocess.Popen(arg, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = fd_popen.communicate()
cs_count = 0
callstack = ''
stdout_split = stdout.splitlines()
for line in stdout_split:
if line.find('(crashed)') != -1:
callstack = line
cs_count = cs_count + 1
elif cs_count:
if line.find('Thread') != -1 or cs_count > 40:
break;
callstack += '\n'
callstack += line
cs_count = cs_count + 1
#print callstack
try:
errorElement_exist = Errors.objects.get(pid=projectElement, errorname=errorname, errorclassname=errorclassname, linenum=linenum)
errorElement_exist.lastdate = errorElement.lastdate
errorElement_exist.numofinstances += 1
errorElement_exist.wifion += errorElement.wifion
errorElement_exist.gpson += errorElement.gpson
errorElement_exist.mobileon += errorElement.mobileon
errorElement_exist.totalmemusage += errorElement.totalmemusage
errorElement_exist.save()
instanceElement.iderror = errorElement_exist
instanceElement.save()
e, created = Appstatistics.objects.get_or_create(iderror=errorElement,appversion=instanceElement.appversion,defaults={'count':1})
if not created:
e.count += 1
e.save()
e, created = Osstatistics.objects.get_or_create(iderror=errorElement,osversion=instanceElement.osversion,defaults={'count':1})
if not created:
e.count += 1
e.save()
e, created = Devicestatistics.objects.get_or_create(iderror=errorElement,devicename=instanceElement.device,defaults={'count':1})
if not created:
e.count += 1
e.save()
e, created = Countrystatistics.objects.get_or_create(iderror=errorElement,countryname=instanceElement.country,defaults={'count':1})
if not created:
e.count += 1
e.save()
e, created = Activitystatistics.objects.get_or_create(iderror=errorElement,activityname=instanceElement.lastactivity,defaults={'count':1})
if not created:
e.count += 1
e.save()
errorElement.delete()
#errorscore 계산
calc_errorScore(errorElement_exist)
print 'native error %s:%s already exist' % (errorname, errorclassname)
except ObjectDoesNotExist:
errorElement.errorname = errorname
errorElement.errorclassname = errorclassname
errorElement.callstack = callstack
errorElement.linenum = linenum
errorElement.save()
Appstatistics.objects.create(iderror=errorElement,appversion=instanceElement.appversion,count=1)
Osstatistics.objects.create(iderror=errorElement,osversion=instanceElement.osversion,count=1)
Devicestatistics.objects.create(iderror=errorElement,devicename=instanceElement.device,count=1)
Countrystatistics.objects.create(iderror=errorElement,countryname=instanceElement.country,count=1)
Activitystatistics.objects.create(iderror=errorElement,activityname=instanceElement.lastactivity,count=1)
#errorscore 계산
calc_errorScore(errorElement)
return HttpResponse('Success')
def calc_errorScore(errorElement):
erscore_parameter = json.loads(get_config('error_score_parameter'))
date_er_score = 0.0
quantity_er_score = 0.0
rank_er_score = 0.0
#date 계산 k1
#print naive2aware(getUTCDatetime())
#print errorElement.lastdate
d = naive2aware(getUTCDatetime()) - errorElement.lastdate
#print d
#print d.days, d.seconds, d.microseconds
#print erscore_parameter['retention']
#print float(erscore_parameter['retention'])
date_er_score = ((erscore_parameter['retention'])/ (erscore_parameter['retention'] + float(d.days) )) * erscore_parameter['date_constant']
#print 'bunmo : '+ str((erscore_parameter['retention'] + d.days))
#print 'daily delta : ' +str(d.days)
#print 'bunja: ' + str(erscore_parameter['retention'])
#print 'date cal : ' + str(date_er_score)
#quantity 계산 k2
runcounts = Appruncount.objects.filter(pid=errorElement.pid) #전체 앱버전별 실행수
errorcounts = Appstatistics.objects.filter(iderror=errorElement) #1개 에러에 대한 앱버전별 실행수
tlb = []
for r in runcounts:
for e in errorcounts:
if r.appversion == e.appversion:
tlb.append({'appversion':r.appversion,'runcount':r.runcount,'errcount':e.count})
break;
#print 'calc_errorscore',tlb
wholeErrorCounter = 0.0
errorCounter = 0.0
for version_statics in tlb:
wholeErrorCounter += version_statics['runcount']
errorCounter += version_statics['errcount']
quantity_er_score = errorCounter/wholeErrorCounter
#print 'whole : ' + str(wholeErrorCounter)
#print 'errorcount : ' + str(errorCounter)
#print 'quantity cal : ' + str(quantity_er_score)
#rank 계산 k3
rank_er_score = 0
rank_er_score = rank_to_constant(errorElement.rank) * erscore_parameter['rank_ratio_constant']
#print 'RANK : ' + RANK.toString[errorElement.rank]
#print 'rank cal : ' + str(rank_er_score)
#최종 ErrorScore 계산
error_Score = (date_er_score + quantity_er_score + rank_er_score) * erscore_parameter['constant']
#print 'last Error Score : ' + str(error_Score)
#디비에 저장
errorElement.errorweight = error_Score
errorElement.gain1 = float(date_er_score)
errorElement.gain2 = float(quantity_er_score)
errorElement.save()
def rank_to_constant(int):
erscore_parameter = json.loads(get_config('error_score_parameter'))
if int == RANK.Native:
return erscore_parameter['na']
elif int == RANK.Unhandle:
return erscore_parameter['un']
elif int == RANK.Critical:
return erscore_parameter['cr']
elif int == RANK.Major:
return erscore_parameter['ma']
elif int == RANK.Minor:
return erscore_parameter['mi']
else:
return 'fail'
|
import logging; logger = logging.getLogger("minimalKB."+__name__);
DEBUG_LEVEL=logging.DEBUG
from minimalkb.exceptions import KbServerError
def query(db, vars, patterns, models):
"""
'vars' is the list of unbound variables that are expected to be returned.
Each of them must start with a '?'.
'patterns' is a list/set of 3-tuples (s,p,o). Each tuple may contain
unbound variables, that MUST start with a '?'.
"""
vars = set(vars)
allvars = set()
for p in patterns:
allvars |= set(get_vars(p))
if not allvars >= vars:
logger.warn("Some requested vars are not present in the patterns. Returning []")
return []
if len(patterns) == 1:
return singlepattern(db, patterns[0], models)
independentpatterns = {p for p in patterns if nb_variables(p) == 1}
dependentpatterns = set(patterns) - independentpatterns
directpatterns = {}
candidates = {}
for v in allvars:
directpatterns[v] = {p for p in patterns if v in p}
# first, execute simple queries to determine potential candidates:
# resolve patterns that contain *only* the desired output variable
for p in (independentpatterns & directpatterns[v]):
if v not in candidates:
candidates[v] = simplequery(db, p, models)
else:
# intersection with previous candidates
candidates[v] = candidates[v] & simplequery(db, p, models)
# if any of the requested var appears in an independant pattern but has no match for
# this pattern, return []
for var in allvars:
if var in candidates and not candidates[var]:
return []
if len(vars) == 1:
var = vars.pop()
# no dependent pattern? no need to filter!
if not dependentpatterns:
return list(candidates[var])
candidate = set()
for pattern in dependentpatterns:
if var not in pattern:
raise NotImplementedError("Can not handle pattern %s with requested variable %s." % (pattern, var))
def prepare(tok):
if tok==var:
return None
return candidates.get(tok, [tok])
s, p, o = [prepare(tok) for tok in pattern]
if not candidate:
candidate = selectfromset(db, s, p, o, models)
else:
candidate &= selectfromset(db, s, p, o, models)
return list(candidate)
else:
if not dependentpatterns:
raise NotImplementedError("Multiple variable in independent patterns not yet supported.")
raise NotImplementedError("Only a single variable in queries can be currently requested.")
### TODO !!! ###
while dependentpatterns:
pattern = dependentpatterns.pop()
s, p, o = pattern
stmts = [(r[1], r[2], r[3]) for r in matchingstmt(db, pattern, models)]
if is_variable(s):
pass
def singlepattern(db, pattern, models):
""" Returns the list of statements that match
a single pattern (like "* likes ?toto").
If only one unbound variable is present, it returns
the list of possible values for this variable.
If 2 or 3 tokens are unbound, it returns a list of
complete statments (s,p,o).
"""
if nb_variables(pattern) == 1:
return list(simplequery(db, pattern, models))
else:
results = matchingstmt(db, pattern, models)
return [[res[1], res[2], res[3]] for res in results]
def get_vars(s):
return [x for x in s if x.startswith('?')]
def nb_variables(s):
return len(get_vars(s))
def is_variable(tok):
return tok and tok.startswith('?')
def matchingstmt(db, pattern, models = [], assertedonly = False):
s,p,o = pattern
params = {'s':s,
'p':p,
'o':o,
}
# workaround to feed a variable number of models
models = list(models)
for i in range(len(models)):
params["m%s"%i] = models[i]
query = "SELECT * FROM triples "
conditions = []
if not is_variable(s):
conditions += ["subject=:s"]
if not is_variable(p):
conditions += ["predicate=:p"]
if not is_variable(o):
conditions += ["object=:o"]
if assertedonly:
conditions += ["inferred=0"]
if models:
conditions += ["model IN (%s)" % (",".join([":m%s" % i for i in range(len(models))]))]
if conditions:
query += "WHERE (" + " AND ".join(conditions) + ")"
return [row for row in db.execute(query, params)]
def selectfromset(db, subject = None, predicate = None, object = None, models = [], assertedonly = False):
if (not subject and not predicate) or \
(not subject and not object) or \
(not predicate and not object) or \
(subject and predicate and object):
import pdb;pdb.set_trace()
raise KbServerError("Exactly one of subject, predicate or object must be None")
params = {}
# workaround to feed a variable number of models
models = list(models)
for i in range(len(models)):
params["m%s"%i] = models[i]
selectedcolumn = "subject" if not subject else ("predicate" if not predicate else "object")
query = "SELECT %s FROM triples " % selectedcolumn
conditions = []
if subject:
conditions += ["subject IN ('" + "','".join(subject) + "')"]
if predicate:
conditions += ["predicate IN ('" + "','".join(predicate) + "')"]
if object:
conditions += ["object IN ('" + "','".join(object) + "')"]
if assertedonly:
conditions += ["inferred=0"]
if models:
conditions += ["model IN (%s)" % (",".join([":m%s" % i for i in range(len(models))]))]
if conditions:
query += "WHERE (" + " AND ".join(conditions) + ")"
return {row[0] for row in db.execute(query, params)}
def simplequery(db, pattern, models = [], assertedonly = False):
""" A 'simple query' is a query with only *one* unbound variable.
Return the list of possible values for this variable
"""
s,p,o = pattern
params = {'s':s,
'p':p,
'o':o,
}
# workaround to feed a variable number of models
models = list(models)
for i in range(len(models)):
params["m%s"%i] = models[i]
query = "SELECT "
if is_variable(s):
query += "subject FROM triples WHERE (predicate=:p AND object=:o)"
elif is_variable(p):
query += "predicate FROM triples WHERE (subject=:s AND object=:o)"
elif is_variable(o):
query += "object FROM triples WHERE (subject=:s AND predicate=:p)"
else:
query += "hash FROM triples WHERE (subject=:s AND predicate=:p AND object=:o)"
if assertedonly:
query += " AND inferred=0"
if models:
query += " AND model IN (%s)" % (",".join([":m%s" % i for i in range(len(models))]))
return {row[0] for row in db.execute(query, params)}
[doc] docstring in sql_queries
import logging; logger = logging.getLogger("minimalKB."+__name__);
DEBUG_LEVEL=logging.DEBUG
from minimalkb.exceptions import KbServerError
def query(db, vars, patterns, models):
"""
'vars' is the list of unbound variables that are expected to be returned.
Each of them must start with a '?'.
'patterns' is a list/set of 3-tuples (s,p,o). Each tuple may contain
unbound variables, that MUST start with a '?'.
"""
vars = set(vars)
allvars = set()
for p in patterns:
allvars |= set(get_vars(p))
if not allvars >= vars:
logger.warn("Some requested vars are not present in the patterns. Returning []")
return []
if len(patterns) == 1:
return singlepattern(db, patterns[0], models)
independentpatterns = {p for p in patterns if nb_variables(p) == 1}
dependentpatterns = set(patterns) - independentpatterns
directpatterns = {}
candidates = {}
for v in allvars:
directpatterns[v] = {p for p in patterns if v in p}
# first, execute simple queries to determine potential candidates:
# resolve patterns that contain *only* the desired output variable
for p in (independentpatterns & directpatterns[v]):
if v not in candidates:
candidates[v] = simplequery(db, p, models)
else:
# intersection with previous candidates
candidates[v] = candidates[v] & simplequery(db, p, models)
# if any of the requested var appears in an independant pattern but has no match for
# this pattern, return []
for var in allvars:
if var in candidates and not candidates[var]:
return []
if len(vars) == 1:
var = vars.pop()
# no dependent pattern? no need to filter!
if not dependentpatterns:
return list(candidates[var])
candidate = set()
for pattern in dependentpatterns:
if var not in pattern:
raise NotImplementedError("Can not handle pattern %s with requested variable %s." % (pattern, var))
def prepare(tok):
if tok==var:
return None
return candidates.get(tok, [tok])
s, p, o = [prepare(tok) for tok in pattern]
if not candidate:
candidate = selectfromset(db, s, p, o, models)
else:
candidate &= selectfromset(db, s, p, o, models)
return list(candidate)
else:
if not dependentpatterns:
raise NotImplementedError("Multiple variable in independent patterns not yet supported.")
raise NotImplementedError("Only a single variable in queries can be currently requested.")
### TODO !!! ###
while dependentpatterns:
pattern = dependentpatterns.pop()
s, p, o = pattern
stmts = [(r[1], r[2], r[3]) for r in matchingstmt(db, pattern, models)]
if is_variable(s):
pass
def singlepattern(db, pattern, models):
""" Returns the list of statements that match
a single pattern (like "* likes ?toto").
If only one unbound variable is present, it returns
the list of possible values for this variable.
If 2 or 3 tokens are unbound, it returns a list of
complete statments (s,p,o).
"""
if nb_variables(pattern) == 1:
return list(simplequery(db, pattern, models))
else:
results = matchingstmt(db, pattern, models)
return [[res[1], res[2], res[3]] for res in results]
def get_vars(s):
return [x for x in s if x.startswith('?')]
def nb_variables(s):
return len(get_vars(s))
def is_variable(tok):
return tok and tok.startswith('?')
def matchingstmt(db, pattern, models = [], assertedonly = False):
"""Returns the list of statements matching a given pattern.
If assertedonly is True, statements infered by reasoning are
excluded.
"""
s,p,o = pattern
params = {'s':s,
'p':p,
'o':o,
}
# workaround to feed a variable number of models
models = list(models)
for i in range(len(models)):
params["m%s"%i] = models[i]
query = "SELECT * FROM triples "
conditions = []
if not is_variable(s):
conditions += ["subject=:s"]
if not is_variable(p):
conditions += ["predicate=:p"]
if not is_variable(o):
conditions += ["object=:o"]
if assertedonly:
conditions += ["inferred=0"]
if models:
conditions += ["model IN (%s)" % (",".join([":m%s" % i for i in range(len(models))]))]
if conditions:
query += "WHERE (" + " AND ".join(conditions) + ")"
return [row for row in db.execute(query, params)]
def selectfromset(db, subject = None, predicate = None, object = None, models = [], assertedonly = False):
if (not subject and not predicate) or \
(not subject and not object) or \
(not predicate and not object) or \
(subject and predicate and object):
import pdb;pdb.set_trace()
raise KbServerError("Exactly one of subject, predicate or object must be None")
params = {}
# workaround to feed a variable number of models
models = list(models)
for i in range(len(models)):
params["m%s"%i] = models[i]
selectedcolumn = "subject" if not subject else ("predicate" if not predicate else "object")
query = "SELECT %s FROM triples " % selectedcolumn
conditions = []
if subject:
conditions += ["subject IN ('" + "','".join(subject) + "')"]
if predicate:
conditions += ["predicate IN ('" + "','".join(predicate) + "')"]
if object:
conditions += ["object IN ('" + "','".join(object) + "')"]
if assertedonly:
conditions += ["inferred=0"]
if models:
conditions += ["model IN (%s)" % (",".join([":m%s" % i for i in range(len(models))]))]
if conditions:
query += "WHERE (" + " AND ".join(conditions) + ")"
return {row[0] for row in db.execute(query, params)}
def simplequery(db, pattern, models = [], assertedonly = False):
""" A 'simple query' is a query with only *one* unbound variable.
Return the list of possible values for this variable
"""
s,p,o = pattern
params = {'s':s,
'p':p,
'o':o,
}
# workaround to feed a variable number of models
models = list(models)
for i in range(len(models)):
params["m%s"%i] = models[i]
query = "SELECT "
if is_variable(s):
query += "subject FROM triples WHERE (predicate=:p AND object=:o)"
elif is_variable(p):
query += "predicate FROM triples WHERE (subject=:s AND object=:o)"
elif is_variable(o):
query += "object FROM triples WHERE (subject=:s AND predicate=:p)"
else:
query += "hash FROM triples WHERE (subject=:s AND predicate=:p AND object=:o)"
if assertedonly:
query += " AND inferred=0"
if models:
query += " AND model IN (%s)" % (",".join([":m%s" % i for i in range(len(models))]))
return {row[0] for row in db.execute(query, params)}
|
# -*- coding: utf-8 -*-
from . import db
from .mail import MailQueue
import logging
from Queue import Queue
import subprocess
from threading import Thread
import time
from core.util import add_to_tags_str
from core.util import del_from_tags_str
from core.util import EscapedDict
from core.util import tags_contain
from urllib import urlencode
import urllib2
from core.settings import Settings
class GitCommand(subprocess.Popen):
def __init__(self, *args, **kwargs):
_args = ['git'] + list(args)
_kwargs = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
_kwargs.update(kwargs)
subprocess.Popen.__init__(self, _args, **_kwargs)
def run(self):
stdout, stderr = self.communicate()
return self.returncode, stdout, stderr
class GitQueue(object):
request_queue = Queue()
worker_thread = None
EXCLUDE_FROM_GIT_VERIFICATION = Settings['git']['exclude_from_verification']
@classmethod
def request_is_excluded_from_git_verification(cls, request):
"""Some tags modify the workflow and are excluded from repository
verification.
"""
return tags_contain(request['tags'], cls.EXCLUDE_FROM_GIT_VERIFICATION)
@classmethod
def start_worker(cls):
if cls.worker_thread is not None:
return
cls.worker_thread = Thread(target=cls.process_queue, name='git-queue')
cls.worker_thread.daemon = True
cls.worker_thread.start()
@classmethod
def _get_repository_uri(cls, repository):
scheme = Settings['git']['scheme']
netloc = Settings['git']['servername']
if Settings['git']['auth']:
netloc = '%s@%s' % (Settings['git']['auth'], netloc)
if Settings['git']['port']:
netloc = '%s:%s' % (netloc, Settings['git']['port'])
if repository == Settings['git']['main_repository']:
repository = '%s://%s/%s' % (Settings['git']['scheme'], netloc, Settings['git']['main_repository'])
else:
repository = '%s://%s/%s/%s' % (Settings['git']['scheme'], netloc, Settings['git']['dev_repositories_dir'], repository)
return repository
@classmethod
def _get_branch_sha_from_repo(cls, req):
user_to_notify = req['user']
repository = cls._get_repository_uri(req['repo'])
ls_remote = GitCommand('ls-remote', '-h', repository, req['branch'])
rc, stdout, stderr = ls_remote.run()
stdout = stdout.strip()
query_details = {
'user': req['user'],
'title': req['title'],
'repo': req['repo'],
'branch': req['branch'],
'repository': repository,
'stderr': stderr,
}
if rc:
msg = (
"""
<p>
There was an error verifying your push request in Git:
</p>
<p>
<strong>%(user)s - %(title)s</strong><br />
<em>%(repo)s/%(branch)s</em>
</p>
<p>
Attempting to query the specified repository (%(repository)s) failed with
the following error(s):
</p>
<pre>
%(stderr)s
</pre>
<p>
Regards,<br/>
PushManager
</p>
""")
msg %= EscapedDict(query_details)
subject = '[push error] %s - %s' % (req['user'], req['title'])
#MailQueue.enqueue_user_email([request_info['user']], msg, subject)
MailQueue.enqueue_user_email([user_to_notify], msg, subject)
return None
# successful ls-remote, build up the refs list
tokens = (tok for tok in stdout.split())
refs = zip(tokens,tokens)
for sha, ref in refs:
if ref == ('refs/heads/%s' % req['branch']):
return sha
else:
msg = (
"""
<p>
There was an error verifying your push request in Git:
</p>
<p>
<strong>%(user)s - %(title)s</strong><br />
<em>%(repo)s/%(branch)s</em>
</p>
<p>
The specified branch (%(branch)s) was not found in the repository (%(repository)s).
</p>
<p>
Regards,<br/>
PushManager
</p>
""")
msg %= EscapedDict(query_details)
subject = '[push error] %s - %s' % (req['user'], req['title'])
#MailQueue.enqueue_user_email([request_info['user']], msg, subject)
MailQueue.enqueue_user_email([user_to_notify], msg, subject)
return None
@classmethod
def _get_request(cls, request_id):
result = [None]
def on_db_return(success, db_results):
assert success, "Database error."
result[0] = db_results.first()
request_info_query = db.push_requests.select().where(
db.push_requests.c.id == request_id
)
db.execute_cb(request_info_query, on_db_return)
req = result[0]
if req:
req = dict(req.items())
return req
@classmethod
def _get_request_with_sha(cls, sha):
result = [None]
def on_db_return(success, db_results):
assert success, "Database error."
result[0] = db_results.first()
request_info_query = db.push_requests.select().where(
db.push_requests.c.revision == sha
)
db.execute_cb(request_info_query, on_db_return)
req = result[0]
if req:
req = dict(req.items())
return req
@classmethod
def _update_request(cls, req, updated_values):
result = [None]
def on_db_return(success, db_results):
result[0] = db_results[1].first()
assert success, "Database error."
update_query = db.push_requests.update().where(
db.push_requests.c.id == req['id']
).values(updated_values)
select_query = db.push_requests.select().where(
db.push_requests.c.id == req['id']
)
db.execute_transaction_cb([update_query, select_query], on_db_return)
updated_request = result[0]
if updated_request:
updated_request = dict(updated_request.items())
if not updated_request:
logging.error("Git-queue worker failed to update the request (id %s)." % req['id'])
logging.error("Updated Request values were: %s" % repr(updated_values))
return updated_request
@classmethod
def update_request(cls, request_id):
req = cls._get_request(request_id)
if not req:
# Just log this and return. We won't be able to get more
# data out of the request.
error_msg = "Git queue worker received a job for non-existent request id %s" % request_id
logging.error(error_msg)
return
if cls.request_is_excluded_from_git_verification(req):
return
if not req['branch']:
error_msg = "Git queue worker received a job for request with no branch (id %s)" % request_id
return cls.update_request_failure(req, error_msg)
sha = cls._get_branch_sha_from_repo(req)
if sha is None:
error_msg = "Git queue worker could not get the revision from request branch (id %s)" % request_id
return cls.update_request_failure(req, error_msg)
duplicate_req = cls._get_request_with_sha(sha)
if duplicate_req and duplicate_req.has_key('state') and not duplicate_req['state'] == "discarded":
error_msg = "Git queue worker found another request with the same revision sha (ids %s and %s)" % (
duplicate_req['id'],
request_id
)
return cls.update_request_failure(req, error_msg)
updated_tags = add_to_tags_str(req['tags'], 'git-ok')
updated_tags = del_from_tags_str(updated_tags, 'git-error')
updated_values = {'revision': sha, 'tags': updated_tags}
updated_request = cls._update_request(req, updated_values)
if updated_request:
cls.update_request_successful(updated_request)
@classmethod
def update_request_successful(cls, updated_request):
msg = (
"""
<p>
PushManager has verified the branch for your request.
</p>
<p>
<strong>%(user)s - %(title)s</strong><br />
<em>%(repo)s/%(branch)s</em><br />
<a href="https://%(pushmanager_servername)s/request?id=%(id)s">https://%(pushmanager_servername)s/request?id=%(id)s</a>
</p>
<p>
Review # (if specified): <a href="https://%(reviewboard_servername)s/r/%(reviewid)s">%(reviewid)s</a>
</p>
<p>
Verified revision: <code>%(revision)s</code><br/>
<em>(If this is <strong>not</strong> the revision you expected,
make sure you've pushed your latest version to the correct repo!)</em>
</p>
<p>
Regards,<br/>
PushManager
</p>
""")
updated_request.update({
'pushmanager_servername': Settings['main_app']['servername'],
'reviewboard_servername': Settings['reviewboard']['servername']
})
msg %= EscapedDict(updated_request)
subject = '[push] %s - %s' % (updated_request['user'], updated_request['title'])
user_to_notify = updated_request['user']
MailQueue.enqueue_user_email([user_to_notify], msg, subject)
webhook_req(
'pushrequest',
updated_request['id'],
'ref',
updated_request['branch'],
)
webhook_req(
'pushrequest',
updated_request['id'],
'commit',
updated_request['revision'],
)
if updated_request['reviewid']:
webhook_req(
'pushrequest',
updated_request['id'],
'review',
updated_request['reviewid'],
)
@classmethod
def update_request_failure(cls, request, failure_msg):
logging.error(failure_msg)
updated_tags = add_to_tags_str(request['tags'], 'git-error')
updated_tags = del_from_tags_str(updated_tags, 'git-ok')
updated_values = {'tags': updated_tags}
cls._update_request(request, updated_values)
msg = (
"""
<p>
<em>PushManager could <strong>not</strong> verify the branch for your request.</em>
</p>
<p>
<strong>%(user)s - %(title)s</strong><br />
<em>%(repo)s/%(branch)s</em><br />
<a href="https://%(pushmanager_servername)s/request?id=%(id)s">https://%(pushmanager_servername)s/request?id=%(id)s</a>
</p>
<p>
<strong>Error message</strong>:<br />
%(failure_msg)s
</p>
<p>
Review # (if specified): <a href="https://%(reviewboard_servername)s/r/%(reviewid)s">%(reviewid)s</a>
</p>
<p>
Verified revision: <code>%(revision)s</code><br/>
<em>(If this is <strong>not</strong> the revision you expected,
make sure you've pushed your latest version to the correct repo!)</em>
</p>
<p>
Regards,<br/>
PushManager
</p>
""")
request.update({
'failure_msg': failure_msg,
'pushmanager_servername': Settings['main_app']['servername'],
'reviewboard_servername': Settings['reviewboard']['servername']
})
msg %= EscapedDict(request)
subject = '[push] %s - %s' % (request['user'], request['title'])
user_to_notify = request['user']
MailQueue.enqueue_user_email([user_to_notify], msg, subject)
@classmethod
def process_queue(cls):
while True:
# Throttle
time.sleep(1)
request_id = cls.request_queue.get()
try:
cls.update_request(request_id)
except Exception:
logging.error('THREAD ERROR:', exc_info=True)
finally:
cls.request_queue.task_done()
@classmethod
def enqueue_request(cls, request_id):
cls.request_queue.put(request_id)
def webhook_req(left_type, left_token, right_type, right_token):
webhook_url = Settings['web_hooks']['post_url']
body=urlencode({
'reason': 'pushmanager',
'left_type': left_type,
'left_token': left_token,
'right_type': right_type,
'right_token': right_token,
})
try:
f = urllib2.urlopen(webhook_url, body, timeout=3)
f.close()
except urllib2.URLError:
logging.error("Web hook POST failed:", exc_info=True)
__all__ = ['GitQueue']
Removed repository information from verbose logging message. This will prevent an accidental exposure of information should, for example, the complete repostory URI contain a password
# -*- coding: utf-8 -*-
from . import db
from .mail import MailQueue
import logging
from Queue import Queue
import subprocess
from threading import Thread
import time
from core.util import add_to_tags_str
from core.util import del_from_tags_str
from core.util import EscapedDict
from core.util import tags_contain
from urllib import urlencode
import urllib2
from core.settings import Settings
class GitCommand(subprocess.Popen):
def __init__(self, *args, **kwargs):
_args = ['git'] + list(args)
_kwargs = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
_kwargs.update(kwargs)
subprocess.Popen.__init__(self, _args, **_kwargs)
def run(self):
stdout, stderr = self.communicate()
return self.returncode, stdout, stderr
class GitQueue(object):
request_queue = Queue()
worker_thread = None
EXCLUDE_FROM_GIT_VERIFICATION = Settings['git']['exclude_from_verification']
@classmethod
def request_is_excluded_from_git_verification(cls, request):
"""Some tags modify the workflow and are excluded from repository
verification.
"""
return tags_contain(request['tags'], cls.EXCLUDE_FROM_GIT_VERIFICATION)
@classmethod
def start_worker(cls):
if cls.worker_thread is not None:
return
cls.worker_thread = Thread(target=cls.process_queue, name='git-queue')
cls.worker_thread.daemon = True
cls.worker_thread.start()
@classmethod
def _get_repository_uri(cls, repository):
scheme = Settings['git']['scheme']
netloc = Settings['git']['servername']
if Settings['git']['auth']:
netloc = '%s@%s' % (Settings['git']['auth'], netloc)
if Settings['git']['port']:
netloc = '%s:%s' % (netloc, Settings['git']['port'])
if repository == Settings['git']['main_repository']:
repository = '%s://%s/%s' % (Settings['git']['scheme'], netloc, Settings['git']['main_repository'])
else:
repository = '%s://%s/%s/%s' % (Settings['git']['scheme'], netloc, Settings['git']['dev_repositories_dir'], repository)
return repository
@classmethod
def _get_branch_sha_from_repo(cls, req):
user_to_notify = req['user']
repository = cls._get_repository_uri(req['repo'])
ls_remote = GitCommand('ls-remote', '-h', repository, req['branch'])
rc, stdout, stderr = ls_remote.run()
stdout = stdout.strip()
query_details = {
'user': req['user'],
'title': req['title'],
'repo': req['repo'],
'branch': req['branch'],
'stderr': stderr,
}
if rc:
msg = (
"""
<p>
There was an error verifying your push request in Git:
</p>
<p>
<strong>%(user)s - %(title)s</strong><br />
<em>%(repo)s/%(branch)s</em>
</p>
<p>
Attempting to query the specified repository failed with
the following error(s):
</p>
<pre>
%(stderr)s
</pre>
<p>
Regards,<br/>
PushManager
</p>
""")
msg %= EscapedDict(query_details)
subject = '[push error] %s - %s' % (req['user'], req['title'])
MailQueue.enqueue_user_email([user_to_notify], msg, subject)
return None
# successful ls-remote, build up the refs list
tokens = (tok for tok in stdout.split())
refs = zip(tokens,tokens)
for sha, ref in refs:
if ref == ('refs/heads/%s' % req['branch']):
return sha
else:
msg = (
"""
<p>
There was an error verifying your push request in Git:
</p>
<p>
<strong>%(user)s - %(title)s</strong><br />
<em>%(repo)s/%(branch)s</em>
</p>
<p>
The specified branch (%(branch)s) was not found in the repository (%(repository)s).
</p>
<p>
Regards,<br/>
PushManager
</p>
""")
msg %= EscapedDict(query_details)
subject = '[push error] %s - %s' % (req['user'], req['title'])
#MailQueue.enqueue_user_email([request_info['user']], msg, subject)
MailQueue.enqueue_user_email([user_to_notify], msg, subject)
return None
@classmethod
def _get_request(cls, request_id):
result = [None]
def on_db_return(success, db_results):
assert success, "Database error."
result[0] = db_results.first()
request_info_query = db.push_requests.select().where(
db.push_requests.c.id == request_id
)
db.execute_cb(request_info_query, on_db_return)
req = result[0]
if req:
req = dict(req.items())
return req
@classmethod
def _get_request_with_sha(cls, sha):
result = [None]
def on_db_return(success, db_results):
assert success, "Database error."
result[0] = db_results.first()
request_info_query = db.push_requests.select().where(
db.push_requests.c.revision == sha
)
db.execute_cb(request_info_query, on_db_return)
req = result[0]
if req:
req = dict(req.items())
return req
@classmethod
def _update_request(cls, req, updated_values):
result = [None]
def on_db_return(success, db_results):
result[0] = db_results[1].first()
assert success, "Database error."
update_query = db.push_requests.update().where(
db.push_requests.c.id == req['id']
).values(updated_values)
select_query = db.push_requests.select().where(
db.push_requests.c.id == req['id']
)
db.execute_transaction_cb([update_query, select_query], on_db_return)
updated_request = result[0]
if updated_request:
updated_request = dict(updated_request.items())
if not updated_request:
logging.error("Git-queue worker failed to update the request (id %s)." % req['id'])
logging.error("Updated Request values were: %s" % repr(updated_values))
return updated_request
@classmethod
def update_request(cls, request_id):
req = cls._get_request(request_id)
if not req:
# Just log this and return. We won't be able to get more
# data out of the request.
error_msg = "Git queue worker received a job for non-existent request id %s" % request_id
logging.error(error_msg)
return
if cls.request_is_excluded_from_git_verification(req):
return
if not req['branch']:
error_msg = "Git queue worker received a job for request with no branch (id %s)" % request_id
return cls.update_request_failure(req, error_msg)
sha = cls._get_branch_sha_from_repo(req)
if sha is None:
error_msg = "Git queue worker could not get the revision from request branch (id %s)" % request_id
return cls.update_request_failure(req, error_msg)
duplicate_req = cls._get_request_with_sha(sha)
if duplicate_req and duplicate_req.has_key('state') and not duplicate_req['state'] == "discarded":
error_msg = "Git queue worker found another request with the same revision sha (ids %s and %s)" % (
duplicate_req['id'],
request_id
)
return cls.update_request_failure(req, error_msg)
updated_tags = add_to_tags_str(req['tags'], 'git-ok')
updated_tags = del_from_tags_str(updated_tags, 'git-error')
updated_values = {'revision': sha, 'tags': updated_tags}
updated_request = cls._update_request(req, updated_values)
if updated_request:
cls.update_request_successful(updated_request)
@classmethod
def update_request_successful(cls, updated_request):
msg = (
"""
<p>
PushManager has verified the branch for your request.
</p>
<p>
<strong>%(user)s - %(title)s</strong><br />
<em>%(repo)s/%(branch)s</em><br />
<a href="https://%(pushmanager_servername)s/request?id=%(id)s">https://%(pushmanager_servername)s/request?id=%(id)s</a>
</p>
<p>
Review # (if specified): <a href="https://%(reviewboard_servername)s/r/%(reviewid)s">%(reviewid)s</a>
</p>
<p>
Verified revision: <code>%(revision)s</code><br/>
<em>(If this is <strong>not</strong> the revision you expected,
make sure you've pushed your latest version to the correct repo!)</em>
</p>
<p>
Regards,<br/>
PushManager
</p>
""")
updated_request.update({
'pushmanager_servername': Settings['main_app']['servername'],
'reviewboard_servername': Settings['reviewboard']['servername']
})
msg %= EscapedDict(updated_request)
subject = '[push] %s - %s' % (updated_request['user'], updated_request['title'])
user_to_notify = updated_request['user']
MailQueue.enqueue_user_email([user_to_notify], msg, subject)
webhook_req(
'pushrequest',
updated_request['id'],
'ref',
updated_request['branch'],
)
webhook_req(
'pushrequest',
updated_request['id'],
'commit',
updated_request['revision'],
)
if updated_request['reviewid']:
webhook_req(
'pushrequest',
updated_request['id'],
'review',
updated_request['reviewid'],
)
@classmethod
def update_request_failure(cls, request, failure_msg):
logging.error(failure_msg)
updated_tags = add_to_tags_str(request['tags'], 'git-error')
updated_tags = del_from_tags_str(updated_tags, 'git-ok')
updated_values = {'tags': updated_tags}
cls._update_request(request, updated_values)
msg = (
"""
<p>
<em>PushManager could <strong>not</strong> verify the branch for your request.</em>
</p>
<p>
<strong>%(user)s - %(title)s</strong><br />
<em>%(repo)s/%(branch)s</em><br />
<a href="https://%(pushmanager_servername)s/request?id=%(id)s">https://%(pushmanager_servername)s/request?id=%(id)s</a>
</p>
<p>
<strong>Error message</strong>:<br />
%(failure_msg)s
</p>
<p>
Review # (if specified): <a href="https://%(reviewboard_servername)s/r/%(reviewid)s">%(reviewid)s</a>
</p>
<p>
Verified revision: <code>%(revision)s</code><br/>
<em>(If this is <strong>not</strong> the revision you expected,
make sure you've pushed your latest version to the correct repo!)</em>
</p>
<p>
Regards,<br/>
PushManager
</p>
""")
request.update({
'failure_msg': failure_msg,
'pushmanager_servername': Settings['main_app']['servername'],
'reviewboard_servername': Settings['reviewboard']['servername']
})
msg %= EscapedDict(request)
subject = '[push] %s - %s' % (request['user'], request['title'])
user_to_notify = request['user']
MailQueue.enqueue_user_email([user_to_notify], msg, subject)
@classmethod
def process_queue(cls):
while True:
# Throttle
time.sleep(1)
request_id = cls.request_queue.get()
try:
cls.update_request(request_id)
except Exception:
logging.error('THREAD ERROR:', exc_info=True)
finally:
cls.request_queue.task_done()
@classmethod
def enqueue_request(cls, request_id):
cls.request_queue.put(request_id)
def webhook_req(left_type, left_token, right_type, right_token):
webhook_url = Settings['web_hooks']['post_url']
body=urlencode({
'reason': 'pushmanager',
'left_type': left_type,
'left_token': left_token,
'right_type': right_type,
'right_token': right_token,
})
try:
f = urllib2.urlopen(webhook_url, body, timeout=3)
f.close()
except urllib2.URLError:
logging.error("Web hook POST failed:", exc_info=True)
__all__ = ['GitQueue']
|
import argparse
import boto3
import subprocess
import getpass
import time
import os
import zipfile
def _get_client(aws_region):
return boto3.client('emr', region_name=aws_region)
def add_step_to_job_flow(job_flow_id=None,
python_path=None,
spark_main=None,
py_files=None,
num_of_steps=1,
use_mysql=False,
spark_main_args=None,
s3_work_bucket=None,
aws_region=None,
send_success_email_to=None):
assert(job_flow_id)
assert(aws_region)
job_flow_name = _create_job_flow_name(spark_main)
steps = _create_steps(job_flow_name=job_flow_name,
python_path=python_path,
spark_main=spark_main,
py_files=py_files,
num_of_steps=num_of_steps,
use_mysql=use_mysql,
spark_main_args=spark_main_args,
s3_work_bucket=s3_work_bucket,
send_success_email_to=send_success_email_to)
client = _get_client(aws_region)
step_response = client.add_job_flow_steps(
JobFlowId=job_flow_id,
Steps=steps
)
step_ids = step_response['StepIds']
print "Created steps: {}".format(step_ids)
print "job_flow_id: {}".format(job_flow_id)
def _create_job_flow_name(spark_main):
return '{}.{}.{}'.format(getpass.getuser(),
spark_main,
time.strftime("%H%M%S", time.gmtime()))
def _ls_recursive(dir, suffix=None):
files = [os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(dir)) for f in fn]
if suffix:
files = filter(lambda f: f.endswith(suffix), files)
return files
def _create_steps(job_flow_name=None,
python_path=None,
spark_main=None,
py_files=[],
num_of_steps=1,
spark_main_args=None,
s3_work_bucket=None,
use_mysql=False,
send_success_email_to=None):
assert(python_path)
assert(spark_main)
assert(s3_work_bucket)
zip_file = 'spark_zip.zip'
sources_rel_path = job_flow_name
sources_on_host = '/home/hadoop/{}'.format(sources_rel_path)
local_zip_file = '/tmp/{}'.format(zip_file)
python_path_files = _ls_recursive(python_path, '.py')
with zipfile.ZipFile(local_zip_file, 'w') as myzip:
for f in python_path_files:
myzip.write(f)
if py_files:
for py_file in py_files:
if py_file.endswith('.zip'): # Currently only support sip files
with zipfile.ZipFile(py_file, 'r') as openzip:
[myzip.writestr(t[0], t[1].read())
for t in ((n, openzip.open(n))
for n in openzip.namelist())]
s3sources = 's3://{}/sources/{}'.format(s3_work_bucket, sources_rel_path)
zip_file_on_s3 = '{}/{}'.format(s3sources, zip_file)
print 'Storing python sources on {}'.format(s3sources)
# TODO: Change these subprocess calls to use python native API instead of shell
subprocess.check_call('aws s3 cp {} {}'.format(local_zip_file, zip_file_on_s3), shell=True)
zip_file_on_host = '{}/{}'.format(sources_on_host, zip_file)
spark_main_on_host = '{}/{}'.format(sources_on_host, spark_main)
# spark_main_args = spark_main_args.split() if spark_main_args else ['']
packages_to_add = []
if use_mysql:
packages_to_add.append('mysql:mysql-connector-java:5.1.39')
packages = ['--packages'] + packages_to_add if packages_to_add else []
steps = []
steps.append({
'Name': 'setup - copy files',
'ActionOnFailure': 'CANCEL_AND_WAIT',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['aws', 's3', 'cp', zip_file_on_s3, sources_on_host + '/']
}
})
steps.append({
'Name': 'setup - extract files',
'ActionOnFailure': 'CANCEL_AND_WAIT',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['unzip', zip_file_on_host, '-d', sources_on_host]
}
})
for i in range(num_of_steps):
steps.append({
'Name': 'run spark {}'.format(spark_main),
'ActionOnFailure': 'CANCEL_AND_WAIT',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': (['spark-submit'] +
packages +
['--py-files', zip_file_on_host, spark_main_on_host] +
spark_main_args.format(i).split())
}
})
if send_success_email_to is not None:
steps.append({
'Name': 'Send success email to {}'.format(send_success_email_to),
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['aws', 'ses', 'send-email', '--from', 'ops@yodas.com',
'--to', send_success_email_to, '--subject',
'EMR COMPLETED SUCCESSFULY', '--text', 'Life is good']
}
})
return steps
def _create_debug_steps(setup_debug):
if setup_debug:
return [
{
'Name': 'Setup Debugging',
'ActionOnFailure': 'TERMINATE_CLUSTER',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['state-pusher-script']
}
},
]
else:
return []
def create_cluster_and_run_job_flow(create_cluster_master_type=None,
create_cluster_slave_type=None,
create_cluster_num_hosts=1,
create_cluster_ec2_key_name=None,
create_cluster_ec2_subnet_id=None,
create_cluster_setup_debug=None,
create_cluster_keep_alive_when_done=None,
bid_price=None,
python_path=None,
num_of_steps=1,
spark_main=None,
py_files=None,
spark_main_args=None,
s3_work_bucket=None,
use_mysql=False,
aws_region=None,
send_success_email_to=None):
assert(create_cluster_master_type)
assert(create_cluster_slave_type)
assert(aws_region)
s3_logs_uri = 's3n://{}/logs/{}/'.format(s3_work_bucket, getpass.getuser())
job_flow_name = _create_job_flow_name(spark_main)
steps = _create_steps(job_flow_name=job_flow_name,
python_path=python_path,
spark_main=spark_main,
py_files=py_files,
num_of_steps=num_of_steps,
spark_main_args=spark_main_args,
s3_work_bucket=s3_work_bucket,
use_mysql=use_mysql,
send_success_email_to=send_success_email_to)
client = _get_client(aws_region)
debug_steps = _create_debug_steps(create_cluster_setup_debug)
if bid_price:
instances = {
'InstanceGroups': [
{
'Name': 'EmrMaster',
'Market': 'SPOT',
'InstanceRole': 'MASTER',
'BidPrice': bid_price,
'InstanceType': create_cluster_master_type,
'InstanceCount': 1,
},
{
'Name': 'EmrCore',
'Market': 'SPOT',
'InstanceRole': 'CORE',
'BidPrice': bid_price,
'InstanceType': create_cluster_slave_type,
'InstanceCount': create_cluster_num_hosts,
},
],
'Ec2KeyName': create_cluster_ec2_key_name,
'KeepJobFlowAliveWhenNoSteps': create_cluster_keep_alive_when_done,
'TerminationProtected': False,
'Ec2SubnetId': create_cluster_ec2_subnet_id
}
else:
instances = {
'MasterInstanceType': create_cluster_master_type,
'SlaveInstanceType': create_cluster_slave_type,
'InstanceCount': create_cluster_num_hosts,
'Ec2KeyName': create_cluster_ec2_key_name,
'KeepJobFlowAliveWhenNoSteps': create_cluster_keep_alive_when_done,
'TerminationProtected': False,
'Ec2SubnetId': create_cluster_ec2_subnet_id
}
response = client.run_job_flow(
Name=job_flow_name,
LogUri=s3_logs_uri,
ReleaseLabel='emr-4.6.0',
Instances=instances,
Steps=debug_steps + steps,
Applications=[{'Name': 'Ganglia'}, {'Name': 'Spark'}],
Configurations=[
{
'Classification': 'spark',
'Properties': {
'maximizeResourceAllocation': 'true'
}
},
{
"Classification": "spark-defaults",
"Properties": {
"spark.dynamicAllocation.enabled": "true",
"spark.executor.instances": "0"
}
}
],
VisibleToAllUsers=True,
JobFlowRole='EMR_EC2_DefaultRole',
ServiceRole='EMR_DefaultRole',
Tags=[{'Key': 'Name', 'Value': spark_main}]
)
job_flow_id = response['JobFlowId']
print 'Created Job Flow: {}'.format(job_flow_id)
step_ids = _get_step_ids_for_job_flow(job_flow_id, client)
print 'Created Job steps: {}'.format(step_ids)
print '''Waiting for steps to finish. Visit on aws portal:
https://{0}.console.aws.amazon.com/elasticmapreduce/home?region={0}#cluster-details:{1}'''.format(aws_region, job_flow_id)
print "Find logs here: {0}{1}/".format(s3_logs_uri, job_flow_id)
return job_flow_id
def _get_step_ids_for_job_flow(job_flow_id, client):
steps = client.list_steps(ClusterId=job_flow_id)
step_ids = map(lambda s: s['Id'], steps['Steps'])
return step_ids
def _wait_for_job_flow(aws_region, job_flow_id, step_ids=[]):
while True:
time.sleep(30)
client = _get_client(aws_region)
cluster = client.describe_cluster(ClusterId=job_flow_id)
state = cluster['Cluster']['Status']['State']
state_failed = state in ['TERMINATED_WITH_ERRORS']
p = []
p.append('Cluster: {}'.format(state))
all_done = True
for step_id in step_ids:
step = client.describe_step(ClusterId=job_flow_id, StepId=step_id)
step_state = step['Step']['Status']['State']
step_failed = step_state in ['FAILED', 'CANCELLED']
step_success = step_state in ['COMPLETED']
step_done = step_success or step_failed
step_name = step['Step']['Name']
if not step_success:
p.append('{} ({}) - {}'.format(step_name, step_id, step_state))
all_done = all_done and step_done
if step_failed:
print '!!! STEP FAILED: {} ({})'.format(step_name, step_id)
print '\t'.join(p)
if all_done:
print "All done"
break
if state_failed:
print ">>>>>>>>>>>>>>>> FAILED <<<<<<<<<<<<<<<<<<"
print "Error message: {}".format(cluster['Cluster']
['Status']['Message'])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--create_cluster',
help='Create a new cluster (and destroy it when it ' +
'is done',
action='store_true')
parser.add_argument('--create_cluster_master_type', help='Number of hosts',
default='m1.medium')
parser.add_argument('--create_cluster_slave_type', help='Number of hosts',
default='m3.xlarge')
parser.add_argument('--create_cluster_num_hosts', help='Number of hosts',
type=int, default=1)
parser.add_argument('--create_cluster_ec2_key_name', help='Keyfile when ' +
'you want to create a new cluster and connect to it')
parser.add_argument('--create_cluster_ec2_subnet_id', help='')
parser.add_argument('--create_cluster_keep_alive_when_done', default=False,
action='store_true',
help='Terminate the cluster when execution is done')
parser.add_argument('--create_cluster_setup_debug', default=True,
help='Whether to setup the cluster for debugging',
action='store_true')
parser.add_argument('--aws_region', help='AWS region', required=True)
parser.add_argument('--job_flow_id',
help='Job flow ID (EMR cluster) to submit to')
parser.add_argument('--python_path', required=True,
help='Path to python files to zip and upload to the' +
' server and add to the python path. This should ' +
'include the python_main file`')
parser.add_argument('--spark_main', required=True,
help='Main python file for spark')
parser.add_argument('--spark_main_args',
help='Arguments passed to your spark script')
parser.add_argument('--s3_work_bucket', required=True,
help='Name of s3 bucket where sources and logs are ' +
'uploaded')
parser.add_argument('--py-files', nargs='*', dest='py_files',
help='A list of py or zip or egg files to pass over ' +
'to spark-submit')
parser.add_argument('--use_mysql', default=False,
help='Whether to setup mysql dataframes jar',
action='store_true')
parser.add_argument('--send_success_email_to', default=None,
help='Email address to send on success')
parser.add_argument('--num_of_steps', default=1, type=int)
parser.add_argument('--bid_price', default=None)
args = parser.parse_args()
if args.job_flow_id:
add_step_to_job_flow(job_flow_id=args.job_flow_id,
python_path=args.python_path,
spark_main=args.spark_main,
spark_main_args=args.spark_main_args,
num_of_steps=args.num_of_steps,
py_files=args.py_files,
use_mysql=args.use_mysql,
s3_work_bucket=args.s3_work_bucket,
aws_region=args.aws_region,
send_success_email_to=args.send_success_email_to)
elif args.create_cluster:
job_flow_id = create_cluster_and_run_job_flow(
create_cluster_master_type=args.create_cluster_master_type,
create_cluster_slave_type=args.create_cluster_slave_type,
create_cluster_num_hosts=args.create_cluster_num_hosts,
create_cluster_ec2_key_name=args.create_cluster_ec2_key_name,
create_cluster_ec2_subnet_id=args.create_cluster_ec2_subnet_id,
create_cluster_setup_debug=args.create_cluster_setup_debug,
create_cluster_keep_alive_when_done=args.create_cluster_keep_alive_when_done,
bid_price=args.bid_price,
python_path=args.python_path,
spark_main=args.spark_main,
py_files=args.py_files,
use_mysql=args.use_mysql,
spark_main_args=args.spark_main_args,
num_of_steps=args.num_of_steps,
s3_work_bucket=args.s3_work_bucket,
aws_region=args.aws_region,
send_success_email_to=args.send_success_email_to)
with open('.job_flow_id.txt', 'w') as f:
f.write(job_flow_id)
else:
print "Nothing to do"
parser.print_help()
exclude the master
import argparse
import boto3
import subprocess
import getpass
import time
import os
import zipfile
def _get_client(aws_region):
return boto3.client('emr', region_name=aws_region)
def add_step_to_job_flow(job_flow_id=None,
python_path=None,
spark_main=None,
py_files=None,
num_of_steps=1,
use_mysql=False,
spark_main_args=None,
s3_work_bucket=None,
aws_region=None,
send_success_email_to=None):
assert(job_flow_id)
assert(aws_region)
job_flow_name = _create_job_flow_name(spark_main)
steps = _create_steps(job_flow_name=job_flow_name,
python_path=python_path,
spark_main=spark_main,
py_files=py_files,
num_of_steps=num_of_steps,
use_mysql=use_mysql,
spark_main_args=spark_main_args,
s3_work_bucket=s3_work_bucket,
send_success_email_to=send_success_email_to)
client = _get_client(aws_region)
step_response = client.add_job_flow_steps(
JobFlowId=job_flow_id,
Steps=steps
)
step_ids = step_response['StepIds']
print "Created steps: {}".format(step_ids)
print "job_flow_id: {}".format(job_flow_id)
def _create_job_flow_name(spark_main):
return '{}.{}.{}'.format(getpass.getuser(),
spark_main,
time.strftime("%H%M%S", time.gmtime()))
def _ls_recursive(dir, suffix=None):
files = [os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(dir)) for f in fn]
if suffix:
files = filter(lambda f: f.endswith(suffix), files)
return files
def _create_steps(job_flow_name=None,
python_path=None,
spark_main=None,
py_files=[],
num_of_steps=1,
spark_main_args=None,
s3_work_bucket=None,
use_mysql=False,
send_success_email_to=None):
assert(python_path)
assert(spark_main)
assert(s3_work_bucket)
zip_file = 'spark_zip.zip'
sources_rel_path = job_flow_name
sources_on_host = '/home/hadoop/{}'.format(sources_rel_path)
local_zip_file = '/tmp/{}'.format(zip_file)
python_path_files = _ls_recursive(python_path, '.py')
with zipfile.ZipFile(local_zip_file, 'w') as myzip:
for f in python_path_files:
myzip.write(f)
if py_files:
for py_file in py_files:
if py_file.endswith('.zip'): # Currently only support sip files
with zipfile.ZipFile(py_file, 'r') as openzip:
[myzip.writestr(t[0], t[1].read())
for t in ((n, openzip.open(n))
for n in openzip.namelist())]
s3sources = 's3://{}/sources/{}'.format(s3_work_bucket, sources_rel_path)
zip_file_on_s3 = '{}/{}'.format(s3sources, zip_file)
print 'Storing python sources on {}'.format(s3sources)
# TODO: Change these subprocess calls to use python native API instead of shell
subprocess.check_call('aws s3 cp {} {}'.format(local_zip_file, zip_file_on_s3), shell=True)
zip_file_on_host = '{}/{}'.format(sources_on_host, zip_file)
spark_main_on_host = '{}/{}'.format(sources_on_host, spark_main)
# spark_main_args = spark_main_args.split() if spark_main_args else ['']
packages_to_add = []
if use_mysql:
packages_to_add.append('mysql:mysql-connector-java:5.1.39')
packages = ['--packages'] + packages_to_add if packages_to_add else []
steps = []
steps.append({
'Name': 'setup - copy files',
'ActionOnFailure': 'CANCEL_AND_WAIT',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['aws', 's3', 'cp', zip_file_on_s3, sources_on_host + '/']
}
})
steps.append({
'Name': 'setup - extract files',
'ActionOnFailure': 'CANCEL_AND_WAIT',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['unzip', zip_file_on_host, '-d', sources_on_host]
}
})
for i in range(num_of_steps):
steps.append({
'Name': 'run spark {}'.format(spark_main),
'ActionOnFailure': 'CANCEL_AND_WAIT',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': (['spark-submit'] +
packages +
['--py-files', zip_file_on_host, spark_main_on_host] +
spark_main_args.format(i).split())
}
})
if send_success_email_to is not None:
steps.append({
'Name': 'Send success email to {}'.format(send_success_email_to),
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['aws', 'ses', 'send-email', '--from', 'ops@yodas.com',
'--to', send_success_email_to, '--subject',
'EMR COMPLETED SUCCESSFULY', '--text', 'Life is good']
}
})
return steps
def _create_debug_steps(setup_debug):
if setup_debug:
return [
{
'Name': 'Setup Debugging',
'ActionOnFailure': 'TERMINATE_CLUSTER',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['state-pusher-script']
}
},
]
else:
return []
def create_cluster_and_run_job_flow(create_cluster_master_type=None,
create_cluster_slave_type=None,
create_cluster_num_hosts=1,
create_cluster_ec2_key_name=None,
create_cluster_ec2_subnet_id=None,
create_cluster_setup_debug=None,
create_cluster_keep_alive_when_done=None,
bid_price=None,
python_path=None,
num_of_steps=1,
spark_main=None,
py_files=None,
spark_main_args=None,
s3_work_bucket=None,
use_mysql=False,
aws_region=None,
send_success_email_to=None):
assert(create_cluster_master_type)
assert(create_cluster_slave_type)
assert(aws_region)
s3_logs_uri = 's3n://{}/logs/{}/'.format(s3_work_bucket, getpass.getuser())
job_flow_name = _create_job_flow_name(spark_main)
steps = _create_steps(job_flow_name=job_flow_name,
python_path=python_path,
spark_main=spark_main,
py_files=py_files,
num_of_steps=num_of_steps,
spark_main_args=spark_main_args,
s3_work_bucket=s3_work_bucket,
use_mysql=use_mysql,
send_success_email_to=send_success_email_to)
client = _get_client(aws_region)
debug_steps = _create_debug_steps(create_cluster_setup_debug)
if bid_price:
instances = {
'InstanceGroups': [
{
'Name': 'EmrMaster',
'InstanceRole': 'MASTER',
'InstanceType': create_cluster_master_type,
'InstanceCount': 1
},
{
'Name': 'EmrCore',
'Market': 'SPOT',
'InstanceRole': 'CORE',
'BidPrice': bid_price,
'InstanceType': create_cluster_slave_type,
'InstanceCount': create_cluster_num_hosts
},
],
'Ec2KeyName': create_cluster_ec2_key_name,
'KeepJobFlowAliveWhenNoSteps': create_cluster_keep_alive_when_done,
'TerminationProtected': False,
'Ec2SubnetId': create_cluster_ec2_subnet_id
}
else:
instances = {
'MasterInstanceType': create_cluster_master_type,
'SlaveInstanceType': create_cluster_slave_type,
'InstanceCount': create_cluster_num_hosts,
'Ec2KeyName': create_cluster_ec2_key_name,
'KeepJobFlowAliveWhenNoSteps': create_cluster_keep_alive_when_done,
'TerminationProtected': False,
'Ec2SubnetId': create_cluster_ec2_subnet_id
}
response = client.run_job_flow(
Name=job_flow_name,
LogUri=s3_logs_uri,
ReleaseLabel='emr-4.6.0',
Instances=instances,
Steps=debug_steps + steps,
Applications=[{'Name': 'Ganglia'}, {'Name': 'Spark'}],
Configurations=[
{
'Classification': 'spark',
'Properties': {
'maximizeResourceAllocation': 'true'
}
},
{
"Classification": "spark-defaults",
"Properties": {
"spark.dynamicAllocation.enabled": "true",
"spark.executor.instances": "0"
}
}
],
VisibleToAllUsers=True,
JobFlowRole='EMR_EC2_DefaultRole',
ServiceRole='EMR_DefaultRole',
Tags=[{'Key': 'Name', 'Value': spark_main}]
)
job_flow_id = response['JobFlowId']
print 'Created Job Flow: {}'.format(job_flow_id)
step_ids = _get_step_ids_for_job_flow(job_flow_id, client)
print 'Created Job steps: {}'.format(step_ids)
print '''Waiting for steps to finish. Visit on aws portal:
https://{0}.console.aws.amazon.com/elasticmapreduce/home?region={0}#cluster-details:{1}'''.format(aws_region, job_flow_id)
print "Find logs here: {0}{1}/".format(s3_logs_uri, job_flow_id)
return job_flow_id
def _get_step_ids_for_job_flow(job_flow_id, client):
steps = client.list_steps(ClusterId=job_flow_id)
step_ids = map(lambda s: s['Id'], steps['Steps'])
return step_ids
def _wait_for_job_flow(aws_region, job_flow_id, step_ids=[]):
while True:
time.sleep(30)
client = _get_client(aws_region)
cluster = client.describe_cluster(ClusterId=job_flow_id)
state = cluster['Cluster']['Status']['State']
state_failed = state in ['TERMINATED_WITH_ERRORS']
p = []
p.append('Cluster: {}'.format(state))
all_done = True
for step_id in step_ids:
step = client.describe_step(ClusterId=job_flow_id, StepId=step_id)
step_state = step['Step']['Status']['State']
step_failed = step_state in ['FAILED', 'CANCELLED']
step_success = step_state in ['COMPLETED']
step_done = step_success or step_failed
step_name = step['Step']['Name']
if not step_success:
p.append('{} ({}) - {}'.format(step_name, step_id, step_state))
all_done = all_done and step_done
if step_failed:
print '!!! STEP FAILED: {} ({})'.format(step_name, step_id)
print '\t'.join(p)
if all_done:
print "All done"
break
if state_failed:
print ">>>>>>>>>>>>>>>> FAILED <<<<<<<<<<<<<<<<<<"
print "Error message: {}".format(cluster['Cluster']
['Status']['Message'])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--create_cluster',
help='Create a new cluster (and destroy it when it ' +
'is done',
action='store_true')
parser.add_argument('--create_cluster_master_type', help='Number of hosts',
default='m1.medium')
parser.add_argument('--create_cluster_slave_type', help='Number of hosts',
default='m3.xlarge')
parser.add_argument('--create_cluster_num_hosts', help='Number of hosts',
type=int, default=1)
parser.add_argument('--create_cluster_ec2_key_name', help='Keyfile when ' +
'you want to create a new cluster and connect to it')
parser.add_argument('--create_cluster_ec2_subnet_id', help='')
parser.add_argument('--create_cluster_keep_alive_when_done', default=False,
action='store_true',
help='Terminate the cluster when execution is done')
parser.add_argument('--create_cluster_setup_debug', default=True,
help='Whether to setup the cluster for debugging',
action='store_true')
parser.add_argument('--aws_region', help='AWS region', required=True)
parser.add_argument('--job_flow_id',
help='Job flow ID (EMR cluster) to submit to')
parser.add_argument('--python_path', required=True,
help='Path to python files to zip and upload to the' +
' server and add to the python path. This should ' +
'include the python_main file`')
parser.add_argument('--spark_main', required=True,
help='Main python file for spark')
parser.add_argument('--spark_main_args',
help='Arguments passed to your spark script')
parser.add_argument('--s3_work_bucket', required=True,
help='Name of s3 bucket where sources and logs are ' +
'uploaded')
parser.add_argument('--py-files', nargs='*', dest='py_files',
help='A list of py or zip or egg files to pass over ' +
'to spark-submit')
parser.add_argument('--use_mysql', default=False,
help='Whether to setup mysql dataframes jar',
action='store_true')
parser.add_argument('--send_success_email_to', default=None,
help='Email address to send on success')
parser.add_argument('--num_of_steps', default=1, type=int)
parser.add_argument('--bid_price', default=None)
args = parser.parse_args()
if args.job_flow_id:
add_step_to_job_flow(job_flow_id=args.job_flow_id,
python_path=args.python_path,
spark_main=args.spark_main,
spark_main_args=args.spark_main_args,
num_of_steps=args.num_of_steps,
py_files=args.py_files,
use_mysql=args.use_mysql,
s3_work_bucket=args.s3_work_bucket,
aws_region=args.aws_region,
send_success_email_to=args.send_success_email_to)
elif args.create_cluster:
job_flow_id = create_cluster_and_run_job_flow(
create_cluster_master_type=args.create_cluster_master_type,
create_cluster_slave_type=args.create_cluster_slave_type,
create_cluster_num_hosts=args.create_cluster_num_hosts,
create_cluster_ec2_key_name=args.create_cluster_ec2_key_name,
create_cluster_ec2_subnet_id=args.create_cluster_ec2_subnet_id,
create_cluster_setup_debug=args.create_cluster_setup_debug,
create_cluster_keep_alive_when_done=args.create_cluster_keep_alive_when_done,
bid_price=args.bid_price,
python_path=args.python_path,
spark_main=args.spark_main,
py_files=args.py_files,
use_mysql=args.use_mysql,
spark_main_args=args.spark_main_args,
num_of_steps=args.num_of_steps,
s3_work_bucket=args.s3_work_bucket,
aws_region=args.aws_region,
send_success_email_to=args.send_success_email_to)
with open('.job_flow_id.txt', 'w') as f:
f.write(job_flow_id)
else:
print "Nothing to do"
parser.print_help()
|
import time
import operator
import theano
import theano.tensor as T
import numpy as np
from theano.gradient import grad_clip
class GRUTheano:
# Initialization function
def __init__(self, word_dim, hidden_dim=128, bptt_truncate=-1):
self.word_dim = word_dim
self.hidden_dim = hidden_dim
self.bptt_truncate = bptt_truncate
# Initialize network parameters
E = np.random.uniform(-np.sqrt(1./word_dim), np.sqrt(1./word_dim), (hidden_dim, word_dim))
U = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (6, hidden_dim, hidden_dim))
W = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (6, hidden_dim, hidden_dim))
V = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (word_dim, hidden_dim))
b = np.zeros((6, hidden_dim))
c = np.zeros(word_dim)
# Create Theano shared variables
self.E = theano.shared(name='E', value=E.astype(theano.config.floatX))
self.U = theano.shared(name='U', value=W.astype(theano.config.floatX))
self.W = theano.shared(name='W', value=U.astype(theano.config.floatX))
self.V = theano.shared(name='V', value=V.astype(theano.config.floatX))
self.b = theano.shared(name='b', value=b.astype(theano.config.floatX))
self.c = theano.shared(name='c', value=c.astype(theano.config.floatX))
# SGD parameters
self.mE = theano.shared(name='mE', value=np.zeros(E.shape).astype(theano.config.floatX))
self.mU = theano.shared(name='mU', value=np.zeros(U.shape).astype(theano.config.floatX))
self.mW = theano.shared(name='mW', value=np.zeros(W.shape).astype(theano.config.floatX))
self.mV = theano.shared(name='mV', value=np.zeros(V.shape).astype(theano.config.floatX))
self.mb = theano.shared(name='mb', value=np.zeros(b.shape).astype(theano.config.floatX))
self.mc = theano.shared(name='mc', value=np.zeros(c.shape).astype(theano.config.floatX))
# Theano
self.theano = {}
self.__theano_build__()
def __theano_build__(self):
E, U, W, V, b, c = self.E, self.U, self.W, self.V, self.b, self.c
# Theano Vectors
x = T.ivector('x')
y = T.ivector('y')
def forwardPropStep(x_t, s_t1_prev, s_t2_prev):
# Word embedding layer
x_e = E[:,x_t]
# GRU Layer 1
# Optimize by doing major multiplactions now
U_c = U.transpose().dot(np.diag([x_e, x_e, x_e, 1, 1, 1])).transpose()
W_c = W.transpose().dot(np.diag([s_t1_prev, s_t1_prev, 1, s_t2_prev, s_t2_prev, 1])).transpose()
z_t1 = T.nnet.hard_sigmoid(U_c[0] + W_c[0] + b[0])
r_t1 = T.nnet.hard_sigmoid(U_c[1] + W_c[1] + b[1])
c_t1 = T.tanh(U_c[2] + W[2].dot(s_t1_prev * r_t1) + b[2])
s_t1 = (T.ones_like(z_t1) - z_t1) * c_t1 + z_t1 * s_t1_prev
# GRU Layer 2
# Do some more large matrix multiplaction
U_c = U.transpose().dot(np.diag([1, 1, 1, s_t1, s_t1, s_t1])).transpose()
z_t2 = T.nnet.hard_sigmoid(U_c[3] + W_c[3] + b[3])
r_t2 = T.nnet.hard_sigmoid(U_c[4] + W_c[4] + b[4])
c_t2 = T.tanh(U_c[5] + W[5].dot(s_t2_prev * r_t2) + b[5])
s_t2 = (T.ones_like(z_t2) - z_t2) * c_t2 + z_t2 * s_t2_prev
# Final calculation
o_t = T.nnet.softmax(V.dot(s_t2) + c)[0]
return [o_t, s_t1, s_t2]
# Theano Looping, using scan
[o, s, s2], updates = theano.scan(
forwardPropStep,
sequences=x,
truncate_gradient=self.bptt_truncate,
outputs_info=[None,
dict(initial=T.zeros(self.hidden_dim)),
dict(initial=T.zeros(self.hidden_dim))])
# Prediction and error params
prediction = T.argmax(o, axis=1)
o_error = T.sum(T.nnet.categorical_crossentropy(o,y))
# Total Cost (can add regularization here)
cost = o_error
# Gradients
dE = T.grad(cost, E)
dU = T.grad(cost, U)
dW = T.grad(cost, W)
dV = T.grad(cost, V)
db = T.grad(cost, b)
dc = T.grad(cost, c)
# Theano functions
self.predict = theano.function([x], o)
self.predictClass = theano.function([x], prediction)
self.ceError = theano.function([x, y], cost)
self.bptt = theano.function([x, y], [dE, dU, dW, dV, db, dc])
# SGD Parameters
learning_rate = T.scalar('learning_rate')
decay = T.scalar('decay')
# rmsprop cache updates
mE = decay * self.mE + (1 - decay) * dE ** 2
mU = decay * self.mU + (1 - decay) * dU ** 2
mW = decay * self.mW + (1 - decay) * dW ** 2
mV = decay * self.mV + (1 - decay) * dV ** 2
mb = decay * self.mb + (1 - decay) * db ** 2
mc = decay * self.mc + (1 - decay) * dc ** 2
# SGD Step function
# This function is the function that trains the model
self.sgdStep = theano.function(
[x, y, learning_rate, theano.Param(decay, default=0.9)],
[],
updates=[(E, E - learning_rate * dE / T.sqrt(mE + 1e-6)),
(U, U - learning_rate * dU / T.sqrt(mU + 1e-6)),
(W, W - learning_rate * dW / T.sqrt(mW + 1e-6)),
(V, V - learning_rate * dV / T.sqrt(mV + 1e-6)),
(b, b - learning_rate * db / T.sqrt(mb + 1e-6)),
(c, c - learning_rate * dc / T.sqrt(mc + 1e-6)),
(self.mE, mE),
(self.mU, mU),
(self.mW, mW),
(self.mV, mV),
(self.mb, mb),
(self.mc, mc)])
# Calculate Loss Functions
def calculateTotalLoss(self, X, Y):
return np.sum([self.ceError(x,y) for x,y in zip(X,Y)])
def calculateLoss(self, X, Y):
# Get average loss per word
num_words = np.sum([len(y) for y in Y])
return self.calculateTotalLoss(X,Y) / float(num_words)
Fixed GRUTheano class. Had to remove matrix multications as they did not do what I thought
import time
import operator
import theano
import theano.tensor as T
import numpy as np
from theano.gradient import grad_clip
class GRUTheano:
# Initialization function
def __init__(self, word_dim, hidden_dim=128, bptt_truncate=-1):
self.word_dim = word_dim
self.hidden_dim = hidden_dim
self.bptt_truncate = bptt_truncate
# Initialize network parameters
E = np.random.uniform(-np.sqrt(1./word_dim), np.sqrt(1./word_dim), (hidden_dim, word_dim))
U = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (6, hidden_dim, hidden_dim))
W = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (6, hidden_dim, hidden_dim))
V = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (word_dim, hidden_dim))
b = np.zeros((6, hidden_dim))
c = np.zeros(word_dim)
# Create Theano shared variables
self.E = theano.shared(name='E', value=E.astype(theano.config.floatX))
self.U = theano.shared(name='U', value=W.astype(theano.config.floatX))
self.W = theano.shared(name='W', value=U.astype(theano.config.floatX))
self.V = theano.shared(name='V', value=V.astype(theano.config.floatX))
self.b = theano.shared(name='b', value=b.astype(theano.config.floatX))
self.c = theano.shared(name='c', value=c.astype(theano.config.floatX))
# SGD parameters
self.mE = theano.shared(name='mE', value=np.zeros(E.shape).astype(theano.config.floatX))
self.mU = theano.shared(name='mU', value=np.zeros(U.shape).astype(theano.config.floatX))
self.mW = theano.shared(name='mW', value=np.zeros(W.shape).astype(theano.config.floatX))
self.mV = theano.shared(name='mV', value=np.zeros(V.shape).astype(theano.config.floatX))
self.mb = theano.shared(name='mb', value=np.zeros(b.shape).astype(theano.config.floatX))
self.mc = theano.shared(name='mc', value=np.zeros(c.shape).astype(theano.config.floatX))
# Theano
self.theano = {}
self.__theano_build__()
def __theano_build__(self):
E, U, W, V, b, c = self.E, self.U, self.W, self.V, self.b, self.c
# Theano Vectors
x = T.ivector('x')
y = T.ivector('y')
def forwardPropStep(x_t, s_t1_prev, s_t2_prev):
# Word embedding layer
x_e = E[:,x_t]
# GRU Layer 1
z_t1 = T.nnet.hard_sigmoid(U[0].dot(x_e) + W[0].dot(s_t1_prev) + b[0])
r_t1 = T.nnet.hard_sigmoid(U[1].dot(x_e) + W[1].dot(s_t1_prev) + b[1])
c_t1 = T.tanh(U[2].dot(x_e) + W[2].dot(s_t1_prev * r_t1) + b[2])
s_t1 = (T.ones_like(z_t1) - z_t1) * c_t1 + z_t1 * s_t1_prev
# GRU Layer 2
z_t2 = T.nnet.hard_sigmoid(U[3].dot(s_t1) + W[3].dot(s_t2_prev) + b[3])
r_t2 = T.nnet.hard_sigmoid(U[4].dot(s_t1) + W[4].dot(s_t2_prev) + b[4])
c_t2 = T.tanh(U[5].dot(s_t1) + W[5].dot(s_t2_prev * r_t2) + b[5])
s_t2 = (T.ones_like(z_t2) - z_t2) * c_t2 + z_t2 * s_t2_prev
# Final calculation
o_t = T.nnet.softmax(V.dot(s_t2) + c)[0]
return [o_t, s_t1, s_t2]
# Theano Looping, using scan
[o, s, s2], updates = theano.scan(
forwardPropStep,
sequences=x,
truncate_gradient=self.bptt_truncate,
outputs_info=[None,
dict(initial=T.zeros(self.hidden_dim)),
dict(initial=T.zeros(self.hidden_dim))])
# Prediction and error params
prediction = T.argmax(o, axis=1)
o_error = T.sum(T.nnet.categorical_crossentropy(o,y))
# Total Cost (can add regularization here)
cost = o_error
# Gradients
dE = T.grad(cost, E)
dU = T.grad(cost, U)
dW = T.grad(cost, W)
dV = T.grad(cost, V)
db = T.grad(cost, b)
dc = T.grad(cost, c)
# Theano functions
self.predict = theano.function([x], o)
self.predictClass = theano.function([x], prediction)
self.ceError = theano.function([x, y], cost)
self.bptt = theano.function([x, y], [dE, dU, dW, dV, db, dc])
# SGD Parameters
learning_rate = T.scalar('learning_rate')
decay = T.scalar('decay')
# rmsprop cache updates
mE = decay * self.mE + (1 - decay) * dE ** 2
mU = decay * self.mU + (1 - decay) * dU ** 2
mW = decay * self.mW + (1 - decay) * dW ** 2
mV = decay * self.mV + (1 - decay) * dV ** 2
mb = decay * self.mb + (1 - decay) * db ** 2
mc = decay * self.mc + (1 - decay) * dc ** 2
# SGD Step function
# This function is the function that trains the model
self.sgdStep = theano.function(
[x, y, learning_rate, theano.Param(decay, default=0.9)],
[],
updates=[(E, E - learning_rate * dE / T.sqrt(mE + 1e-6)),
(U, U - learning_rate * dU / T.sqrt(mU + 1e-6)),
(W, W - learning_rate * dW / T.sqrt(mW + 1e-6)),
(V, V - learning_rate * dV / T.sqrt(mV + 1e-6)),
(b, b - learning_rate * db / T.sqrt(mb + 1e-6)),
(c, c - learning_rate * dc / T.sqrt(mc + 1e-6)),
(self.mE, mE),
(self.mU, mU),
(self.mW, mW),
(self.mV, mV),
(self.mb, mb),
(self.mc, mc)])
# Calculate Loss Functions
def calculateTotalLoss(self, X, Y):
return np.sum([self.ceError(x,y) for x,y in zip(X,Y)])
def calculateLoss(self, X, Y):
# Get average loss per word
num_words = np.sum([len(y) for y in Y])
return self.calculateTotalLoss(X,Y) / float(num_words)
|
"""File-backed data interface
"""
import copy
import csv
import glob
import os
import re
from functools import lru_cache, wraps
import pyarrow as pa
from smif.data_layer.data_interface import (DataExistsError, DataInterface,
DataMismatchError,
DataNotFoundError, DataReadError)
from smif.data_layer.load import dump, load
from smif.metadata import Spec
# Import fiona if available (optional dependency)
try:
import fiona
except ImportError:
pass
# Note: these decorators must be defined before being used below
def check_exists(dtype):
"""Decorator to check an item of dtype exists
"""
def wrapper(func):
"""Decorator specialised by dtype (class/item type)
"""
@wraps(func)
def wrapped(self, name, primary=None, secondary=None, *func_args, **func_kwargs):
"""Wrapper to implement error checking
"""
_assert_no_mismatch(dtype, name, primary, secondary)
if dtype in _file_dtypes():
_assert_file_exists(self.file_dir, dtype, name)
if dtype in _config_dtypes():
config = self.read_project_config()
_assert_config_item_exists(config, dtype, name)
if dtype in _nested_config_dtypes():
config = self.read_project_config()
_assert_nested_config_item_exists(config, dtype, name, primary)
if primary is None:
return func(self, name, *func_args, **func_kwargs)
elif secondary is None:
return func(self, name, primary, *func_args, **func_kwargs)
return func(self, name, primary, secondary, *func_args, **func_kwargs)
return wrapped
return wrapper
def check_not_exists(dtype):
"""Decorator creator to check an item of dtype does not exist
"""
def wrapper(func):
"""Decorator specialised by dtype (class/item type)
"""
@wraps(func)
def wrapped(self, primary, secondary=None, *func_args, **func_kwargs):
"""Wrapper to implement error checking
"""
if dtype in _file_dtypes():
_assert_file_not_exists(self.file_dir, dtype, primary['name'])
if dtype in _config_dtypes():
config = self.read_project_config()
_assert_config_item_not_exists(config, dtype, primary['name'])
if dtype in _nested_config_dtypes():
config = self.read_project_config()
_assert_nested_config_item_not_exists(config, dtype, primary, secondary)
if secondary is None:
return func(self, primary, *func_args, **func_kwargs)
return func(self, primary, secondary, *func_args, **func_kwargs)
return wrapped
return wrapper
class DatafileInterface(DataInterface):
"""Read and write interface to YAML / CSV configuration files
and intermediate CSV / native-binary data storage.
Project.yml
Arguments
---------
base_folder: str
The path to the configuration and data files
storage_format: str
The format used to store intermediate data (local_csv, local_binary)
"""
def __init__(self, base_folder, storage_format='local_binary'):
super().__init__()
self.base_folder = base_folder
self.storage_format = storage_format
self.file_dir = {}
self.file_dir['project'] = os.path.join(base_folder, 'config')
self.file_dir['results'] = os.path.join(base_folder, 'results')
# cache results of reading project_config (invalidate on write)
self._project_config_cache_invalid = True
# MUST ONLY access through self._read_project_config()
self._project_config_cache = None
config_folders = {
'model_runs': 'config',
'sos_models': 'config',
'sector_models': 'config',
'strategies': 'data',
'interventions': 'data',
'initial_conditions': 'data',
'dimensions': 'data',
'coefficients': 'data',
'scenarios': 'data',
'narratives': 'data',
}
for category, folder in config_folders.items():
dirname = os.path.join(base_folder, folder, category)
# ensure each directory exists
os.makedirs(dirname, exist_ok=True)
# store dirname
self.file_dir[category] = dirname
# region Model runs
def read_model_runs(self):
names = self._read_filenames_in_dir(self.file_dir['model_runs'], '.yml')
model_runs = [self.read_model_run(name) for name in names]
return model_runs
@check_exists(dtype='model_run')
def read_model_run(self, model_run_name):
return self._read_yaml_file(self.file_dir['model_runs'], model_run_name)
@check_not_exists(dtype='model_run')
def write_model_run(self, model_run):
self._write_yaml_file(self.file_dir['model_runs'], model_run['name'], model_run)
@check_exists(dtype='model_run')
def update_model_run(self, model_run_name, model_run):
self._write_yaml_file(self.file_dir['model_runs'], model_run['name'], model_run)
@check_exists(dtype='model_run')
def delete_model_run(self, model_run_name):
os.remove(os.path.join(self.file_dir['model_runs'], model_run_name + '.yml'))
# endregion
# region System-of-system models
def read_sos_models(self):
names = self._read_filenames_in_dir(self.file_dir['sos_models'], '.yml')
sos_models = [self.read_sos_model(name) for name in names]
return sos_models
@check_exists(dtype='sos_model')
def read_sos_model(self, sos_model_name):
data = self._read_yaml_file(self.file_dir['sos_models'], sos_model_name)
return data
@check_not_exists(dtype='sos_model')
def write_sos_model(self, sos_model):
self._write_yaml_file(self.file_dir['sos_models'], sos_model['name'], sos_model)
@check_exists(dtype='sos_model')
def update_sos_model(self, sos_model_name, sos_model):
self._write_yaml_file(self.file_dir['sos_models'], sos_model['name'], sos_model)
@check_exists(dtype='sos_model')
def delete_sos_model(self, sos_model_name):
os.remove(os.path.join(self.file_dir['sos_models'], sos_model_name + '.yml'))
# endregion
# region Sector models
def read_sector_models(self):
names = self._read_filenames_in_dir(self.file_dir['sector_models'], '.yml')
sector_models = [self.read_sector_model(name) for name in names]
return sector_models
@check_exists(dtype='sector_model')
def read_sector_model(self, sector_model_name):
sector_model = self._read_yaml_file(self.file_dir['sector_models'], sector_model_name)
self._set_list_coords(sector_model['inputs'])
self._set_list_coords(sector_model['outputs'])
self._set_list_coords(sector_model['parameters'])
return sector_model
@check_not_exists(dtype='sector_model')
def write_sector_model(self, sector_model):
if sector_model['interventions']:
self.logger.warning("Ignoring interventions")
sector_model['interventions'] = []
self._write_yaml_file(
self.file_dir['sector_models'], sector_model['name'], sector_model)
@check_exists(dtype='sector_model')
def update_sector_model(self, sector_model_name, sector_model):
# ignore interventions and initial conditions which the app doesn't handle
if sector_model['interventions'] or sector_model['initial_conditions']:
old_sector_model = self._read_yaml_file(
self.file_dir['sector_models'], sector_model['name'])
if sector_model['interventions']:
self.logger.warning("Ignoring interventions write")
sector_model['interventions'] = old_sector_model['interventions']
if sector_model['initial_conditions']:
self.logger.warning("Ignoring initial conditions write")
sector_model['initial_conditions'] = old_sector_model['initial_conditions']
self._write_yaml_file(
self.file_dir['sector_models'], sector_model['name'], sector_model)
@check_exists(dtype='sector_model')
def delete_sector_model(self, sector_model_name):
os.remove(os.path.join(self.file_dir['sector_models'], sector_model_name + '.yml'))
@check_exists(dtype='sector_model')
def read_interventions(self, sector_model_name):
all_interventions = {}
sector_model = self._read_yaml_file(self.file_dir['sector_models'], sector_model_name)
interventions = self._read_interventions_files(
sector_model['interventions'], 'interventions')
for entry in interventions:
name = entry.pop('name')
if name in all_interventions:
msg = "An entry for intervention {} already exists"
raise ValueError(msg.format(name))
else:
all_interventions[name] = entry
return all_interventions
@check_exists(dtype='sector_model')
def read_initial_conditions(self, sector_model_name):
sector_model = self._read_yaml_file(self.file_dir['sector_models'], sector_model_name)
return self._read_interventions_files(
sector_model['initial_conditions'], 'initial_conditions')
def _read_interventions_files(self, filenames, dirname):
intervention_list = []
for filename in filenames:
interventions = self._read_interventions_file(filename, dirname)
intervention_list.extend(interventions)
return intervention_list
def _read_interventions_file(self, filename, dirname):
"""Read the planned intervention data from a file
Planned interventions are stored either a csv or yaml file. In the case
of the former, the file should look like this::
name,build_year
asset_a,2010
asset_b,2015
In the case of a yaml, file, the format is as follows::
- name: asset_a
build_year: 2010
- name: asset_b
build_year: 2015
Arguments
---------
filename: str
The name of the strategy yml or csv file to read in
dirname: str
The key of the dirname e.g. ``strategies`` or ``initial_conditions``
Returns
-------
dict of dict
Dict of intervention attribute dicts, keyed by intervention name
"""
filepath = self.file_dir[dirname]
_, ext = os.path.splitext(filename)
if ext == '.csv':
data = self._read_state_file(os.path.join(filepath, filename))
try:
data = self._reshape_csv_interventions(data)
except ValueError:
raise ValueError("Error reshaping data for {}".format(filename))
else:
data = self._read_yaml_file(filepath, filename, extension='')
return data
def _reshape_csv_interventions(self, data):
"""
Arguments
---------
data : list of dict
A list of dicts containing intervention data
Returns
-------
dict of dicts
"""
new_data = []
for element in data:
reshaped_data = {}
for key, value in element.items():
if key.endswith(('_value', '_unit')):
new_key, sub_key = key.rsplit(sep="_", maxsplit=1)
if new_key in reshaped_data:
if not isinstance(reshaped_data[new_key], dict):
msg = "Duplicate heading in csv data: {}"
raise ValueError(msg.format(new_key))
else:
reshaped_data[new_key].update({sub_key: value})
else:
reshaped_data[new_key] = {sub_key: value}
else:
if key in reshaped_data:
msg = "Duplicate heading in csv data: {}"
raise ValueError(msg.format(new_key))
else:
reshaped_data[key] = value
new_data.append(reshaped_data)
return new_data
# endregion
# region Strategies
def read_strategies(self, model_run_name):
strategies = []
model_run_config = self.read_model_run(model_run_name)
for strategy in model_run_config['strategies']:
if strategy['strategy'] == 'pre-specified-planning':
decisions = self._read_interventions_file(strategy['filename'], 'strategies')
if decisions is None:
decisions = []
del strategy['filename']
strategy['interventions'] = decisions
self.logger.info("Added %s pre-specified planning interventions to %s",
len(decisions), strategy['model_name'])
strategies.append(strategy)
return strategies
# endregion
# region State
def read_state(self, modelrun_name, timestep, decision_iteration=None):
fname = self._get_state_filename(modelrun_name, timestep, decision_iteration)
if not os.path.exists(fname):
msg = "State file does not exist for timestep {} and iteration {}"
raise DataNotFoundError(msg.format(timestep, decision_iteration))
state = self._read_state_file(fname)
return state
def write_state(self, state, modelrun_name, timestep=None, decision_iteration=None):
fname = self._get_state_filename(modelrun_name, timestep, decision_iteration)
os.makedirs(os.path.dirname(fname), exist_ok=True)
with open(fname, 'w+') as file_handle:
writer = csv.DictWriter(file_handle, fieldnames=('name', 'build_year'))
writer.writeheader()
for row in state:
writer.writerow(row)
def _get_state_filename(self, modelrun_name, timestep=None, decision_iteration=None):
"""Compose a unique filename for state file:
state_{timestep|0000}[_decision_{iteration}].csv
"""
results_dir = self.file_dir['results']
if timestep is None:
timestep = '0000'
if decision_iteration is None:
separator = ''
decision_iteration = ''
else:
separator = '_decision_'
fmt = 'state_{}{}{}.csv'
fname = os.path.join(
results_dir, modelrun_name, fmt.format(timestep, separator, decision_iteration))
return fname
@staticmethod
def _read_state_file(fname):
"""Read list of {name, build_year} dicts from state file
"""
with open(fname, 'r') as file_handle:
reader = csv.DictReader(file_handle)
state = list(reader)
return state
# endregion
# region Units
def read_unit_definitions(self):
project_config = self.read_project_config()
filename = project_config['units']
if filename is not None:
path = os.path.join(self.base_folder, 'data', filename)
try:
with open(path, 'r') as units_fh:
return [line.strip() for line in units_fh]
except FileNotFoundError as ex:
raise DataNotFoundError('Units file not found:' + str(ex)) from ex
else:
return []
# endregion
# region Dimensions
def read_dimensions(self):
project_config = self.read_project_config()
for dim in project_config['dimensions']:
dim['elements'] = self._read_dimension_file(dim['elements'])
return project_config['dimensions']
@check_exists(dtype='dimension')
def read_dimension(self, dimension_name):
project_config = self.read_project_config()
dim = _pick_from_list(project_config['dimensions'], dimension_name)
dim['elements'] = self._read_dimension_file(dim['elements'])
return dim
def _set_list_coords(self, list_):
for item in list_:
self._set_item_coords(item)
def _set_item_coords(self, item):
if 'dims' in item:
item['coords'] = {
dim: self.read_dimension(dim)['elements']
for dim in item['dims']
}
@lru_cache(maxsize=32)
def _read_dimension_file(self, filename):
filepath = os.path.join(self.file_dir['dimensions'], filename)
_, ext = os.path.splitext(filename)
if ext in ('yml', 'yaml'):
data = self._read_yaml_file(filepath)
elif ext == 'csv':
data = self._get_data_from_csv(filepath)
elif ext in ('geojson', 'shp'):
data = self._read_spatial_file(filepath)
else:
msg = "Extension {} not recognised, expected one of ('csv', 'yml', 'yaml', "
msg += "'geojson', 'shp') when reading {}"
raise DataReadError(msg.format(ext, filepath))
return data
def _delete_dimension_file(self, filename):
os.remove(os.path.join(self.file_dir['dimensions'], filename))
@check_not_exists(dtype='dimension')
def write_dimension(self, dimension):
project_config = self.read_project_config()
project_config['dimensions'].append(dimension)
# TODO write elements file
self._write_project_config(project_config)
@check_exists(dtype='dimension')
def update_dimension(self, dimension_name, dimension):
project_config = self.read_project_config()
idx = _idx_in_list(project_config['dimensions'], dimension_name)
project_config['dimensions'][idx] = dimension
# TODO update elements file
self._write_project_config(project_config)
@check_exists(dtype='dimension')
def delete_dimension(self, dimension_name, dimension):
project_config = self.read_project_config()
idx = _idx_in_list(project_config['dimensions'], dimension_name)
del project_config['dimensions'][idx]
# TODO delete elements file
self._write_project_config(project_config)
# endregion
# region Conversion coefficients
def read_coefficients(self, source_spec, destination_spec):
results_path = self._get_coefficients_path(source_spec, destination_spec)
if os.path.isfile(results_path):
return self._get_data_from_native_file(results_path)
msg = "Could not find the coefficients file for %s to %s"
self.logger.warning(msg, source_spec, destination_spec)
return None
def write_coefficients(self, source_spec, destination_spec, data):
results_path = self._get_coefficients_path(source_spec, destination_spec)
self._write_data_to_native_file(results_path, data)
def _get_coefficients_path(self, source_spec, destination_spec):
results_dir = self.file_dir['coefficients']
path = os.path.join(
results_dir,
"{}_{}.{}_{}.dat".format(
source_spec.name, "-".join(source_spec.dims),
destination_spec.name, "-".join(destination_spec.dims)
)
)
return path
# endregion
# region Scenarios
def read_scenarios(self):
project_config = self.read_project_config()
return project_config['scenarios']
@check_exists(dtype='scenario')
def read_scenario(self, scenario_name):
project_config = self.read_project_config()
scenario = _pick_from_list(project_config['scenarios'], scenario_name)
self._set_list_coords(scenario['provides'])
return scenario
@check_not_exists(dtype='scenario')
def write_scenario(self, scenario):
project_config = self.read_project_config()
project_config['scenarios'].append(scenario)
self._write_project_config(project_config)
@check_exists(dtype='scenario')
def update_scenario(self, scenario_name, scenario):
project_config = self.read_project_config()
idx = _idx_in_list(project_config['scenarios'], scenario_name)
project_config['scenarios'][idx] = scenario
self._write_project_config(project_config)
@check_exists(dtype='scenario')
def delete_scenario(self, scenario_name):
project_config = self.read_project_config()
idx = _idx_in_list(project_config['scenarios'], scenario_name)
del project_config['scenarios'][idx]
self._write_project_config(project_config)
@check_exists(dtype='scenario')
def read_scenario_variants(self, scenario_name):
project_config = self.read_project_config()
scenario = _pick_from_list(project_config['scenarios'], scenario_name)
return scenario['variants']
@check_exists(dtype='scenario_variant')
def read_scenario_variant(self, scenario_name, variant_name):
variants = self.read_scenario_variants(scenario_name)
return _pick_from_list(variants, variant_name)
@check_not_exists(dtype='scenario_variant')
def write_scenario_variant(self, scenario_name, variant):
project_config = self.read_project_config()
s_idx = _idx_in_list(project_config['scenarios'], scenario_name)
project_config['scenarios'][s_idx]['variants'].append(variant)
self._write_project_config(project_config)
@check_exists(dtype='scenario_variant')
def update_scenario_variant(self, scenario_name, variant_name, variant):
project_config = self.read_project_config()
s_idx = _idx_in_list(project_config['scenarios'], scenario_name)
v_idx = _idx_in_list(project_config['scenarios'][s_idx]['variants'], variant_name)
project_config['scenarios'][s_idx]['variants'][v_idx] = variant
self._write_project_config(project_config)
@check_exists(dtype='scenario_variant')
def delete_scenario_variant(self, scenario_name, variant_name):
project_config = self.read_project_config()
s_idx = _idx_in_list(project_config['scenarios'], scenario_name)
v_idx = _idx_in_list(project_config['scenarios'][s_idx]['variants'], variant_name)
del project_config['scenarios'][s_idx]['variants'][v_idx]
self._write_project_config(project_config)
@check_exists(dtype='scenario_variant')
def read_scenario_variant_data(self, scenario_name, variant_name, variable, timestep=None):
spec = self._read_scenario_variable_spec(scenario_name, variable)
filepath = self._get_scenario_variant_filepath(scenario_name, variant_name, variable)
data = self._get_data_from_csv(filepath)
if timestep is not None:
data = [datum for datum in data if int(datum['timestep']) == timestep]
try:
array = self.data_list_to_ndarray(data, spec)
except DataMismatchError as ex:
msg = "DataMismatch in scenario: {}:{}.{}, from {}"
raise DataMismatchError(
msg.format(scenario_name, variant_name, variable, str(ex))
) from ex
return array
@check_exists(dtype='scenario_variant')
def write_scenario_variant_data(self, data, scenario_name, variant_name, variable,
timestep=None):
spec = self._read_scenario_variable_spec(scenario_name, variable)
data = self.ndarray_to_data_list(data, spec)
filepath = self._get_scenario_variant_filepath(scenario_name, variant_name, variable)
self._write_data_to_csv(filepath, data, spec)
def _get_scenario_variant_filepath(self, scenario_name, variant_name, variable):
variant = self.read_scenario_variant(scenario_name, variant_name)
if 'data' not in variant or variable not in variant['data']:
raise DataNotFoundError(
"Scenario data file not defined for {}:{}, {}".format(
scenario_name, variant_name, variable)
)
filename = variant['data'][variable]
return os.path.join(self.file_dir['scenarios'], filename)
def _read_scenario_variable_spec(self, scenario_name, variable):
# Read spec from scenario->provides->variable
scenario = self.read_scenario(scenario_name)
spec = _pick_from_list(scenario['provides'], variable)
self._set_item_coords(spec)
return Spec.from_dict(spec)
# endregion
# region Narratives
def read_narratives(self):
# Find filename for this narrative
project_config = self.read_project_config()
return project_config['narratives']
@check_exists(dtype='narrative')
def read_narrative(self, narrative_name):
project_config = self.read_project_config()
return _pick_from_list(project_config['narratives'], narrative_name)
@check_not_exists(dtype='narrative')
def write_narrative(self, narrative):
project_config = self.read_project_config()
project_config['narratives'].append(narrative)
self._write_project_config(project_config)
@check_exists(dtype='narrative')
def update_narrative(self, narrative_name, narrative):
project_config = self.read_project_config()
idx = _idx_in_list(project_config['narratives'], narrative_name)
project_config['narratives'][idx] = narrative
self._write_project_config(project_config)
@check_exists(dtype='narrative')
def delete_narrative(self, narrative_name):
project_config = self.read_project_config()
idx = _idx_in_list(project_config['narratives'], narrative_name)
del project_config['narratives'][idx]
self._write_project_config(project_config)
@check_exists(dtype='narrative')
def read_narrative_variants(self, narrative_name):
project_config = self.read_project_config()
return project_config['narratives'][narrative_name]['variants']
@check_exists(dtype='narrative_variant')
def read_narrative_variant(self, narrative_name, variant_name):
project_config = self.read_project_config()
n_idx = _idx_in_list(project_config['narratives'], narrative_name)
variants = project_config['narratives'][n_idx]['variants']
return _pick_from_list(variants, variant_name)
@check_not_exists(dtype='narrative_variant')
def write_narrative_variant(self, narrative_name, variant):
project_config = self.read_project_config()
project_config['narratives'][narrative_name]['variants'].append(variant)
self._write_project_config(project_config)
@check_exists(dtype='narrative_variant')
def update_narrative_variant(self, narrative_name, variant_name, variant):
project_config = self.read_project_config()
n_idx = _idx_in_list(project_config['narratives'], narrative_name)
v_idx = _idx_in_list(project_config['narratives'][n_idx]['variants'], variant_name)
project_config['narratives'][n_idx]['variants'][v_idx] = variant
self._write_project_config(project_config)
@check_exists(dtype='narrative_variant')
def delete_narrative_variant(self, narrative_name, variant_name):
project_config = self.read_project_config()
n_idx = _idx_in_list(project_config['narratives'], narrative_name)
v_idx = _idx_in_list(project_config['narratives'][n_idx]['variants'], variant_name)
del project_config['narratives'][n_idx]['variants'][v_idx]
self._write_project_config(project_config)
@check_exists(dtype='narrative_variant')
def read_narrative_variant_data(self, narrative_name, variant_name, variable,
timestep=None):
spec = self._read_narrative_variable_spec(narrative_name, variable)
filepath = self._get_narrative_variant_filepath(narrative_name, variant_name, variable)
data = self._get_data_from_csv(filepath)
if timestep is not None:
data = [datum for datum in data if int(datum['timestep']) == timestep]
try:
array = self.data_list_to_ndarray(data, spec)
except DataMismatchError as ex:
msg = "DataMismatch in narrative: {}:{}, {}, from {}"
raise DataMismatchError(
msg.format(narrative_name, variant_name, variable, str(ex))
) from ex
return array
@check_exists(dtype='narrative_variant')
def write_narrative_variant_data(self, data, narrative_name, variant_name, variable,
timestep=None):
spec = self._read_narrative_variable_spec(narrative_name, variable)
data = self.ndarray_to_data_list(data, spec)
filepath = self._get_narrative_variant_filepath(narrative_name, variant_name, variable)
self._write_data_to_csv(filepath, data, spec)
def _get_narrative_variant_filepath(self, narrative_name, variant_name, variable):
variant = self.read_narrative_variant(narrative_name, variant_name)
if 'data' not in variant or variable not in variant['data']:
raise DataNotFoundError(
"narrative data file not defined for {}:{}, {}".format(
narrative_name, variant_name, variable)
)
filename = variant['data'][variable]
return os.path.join(self.file_dir['narratives'], filename)
def _read_narrative_variable_spec(self, narrative_name, variable):
# Read spec from narrative->provides->variable
narrative = self.read_narrative(narrative_name)
spec = _pick_from_list(narrative['provides'], variable)
self._set_item_coords(spec)
return Spec.from_dict(spec)
# endregion
# region Results
def read_results(self, modelrun_id, model_name, output_spec, timestep=None,
modelset_iteration=None, decision_iteration=None):
if timestep is None:
raise NotImplementedError()
results_path = self._get_results_path(
modelrun_id, model_name, output_spec.name,
timestep, modelset_iteration, decision_iteration
)
if self.storage_format == 'local_csv':
data = self._get_data_from_csv(results_path)
return self.data_list_to_ndarray(data, output_spec)
if self.storage_format == 'local_binary':
return self._get_data_from_native_file(results_path)
def write_results(self, data, modelrun_id, model_name, output_spec, timestep=None,
modelset_iteration=None, decision_iteration=None):
if timestep is None:
raise NotImplementedError()
results_path = self._get_results_path(
modelrun_id, model_name, output_spec.name,
timestep, modelset_iteration, decision_iteration
)
os.makedirs(os.path.dirname(results_path), exist_ok=True)
if self.storage_format == 'local_csv':
data = self.ndarray_to_data_list(data, output_spec)
self._write_data_to_csv(results_path, data, output_spec)
if self.storage_format == 'local_binary':
self._write_data_to_native_file(results_path, data)
def _results_exist(self, modelrun_name):
"""Checks whether modelrun results exists on the filesystem
for a particular modelrun_name
Parameters
----------
modelrun_name: str
Returns
-------
bool: True when results exist for this modelrun_name
"""
previous_results_dir = os.path.join(self.file_dir['results'], modelrun_name)
return list(
glob.iglob(os.path.join(previous_results_dir, '**/*.*'), recursive=True))
def prepare_warm_start(self, modelrun_id):
results_dir = os.path.join(self.file_dir['results'], modelrun_id)
# Return if path to previous modelruns does not exist
if not os.path.isdir(results_dir):
self.logger.info("Warm start not possible because modelrun has "
"no previous results (path does not exist)")
return None
# Return if no results exist in last modelrun
if not self._results_exist(modelrun_id):
self.logger.info("Warm start not possible because the "
"modelrun does not have any results")
return None
# Return if previous results were stored in a different format
previous_results_dir = os.path.join(self.file_dir['results'], modelrun_id)
results = list(glob.iglob(os.path.join(previous_results_dir, '**/*.*'),
recursive=True))
for filename in results:
warn = (self.storage_format == 'local_csv' and not filename.endswith(".csv")) or \
(self.storage_format == 'local_binary' and not filename.endswith(".dat"))
if warn:
self.logger.info("Warm start not possible because a different "
"storage mode was used in the previous run")
return None
# Perform warm start
self.logger.info("Warm start %s", modelrun_id)
# Get metadata for all results
result_metadata = []
for filename in glob.iglob(os.path.join(results_dir, '**/*.*'), recursive=True):
result_metadata.append(self._parse_results_path(
filename.replace(self.file_dir['results'], '')[1:]))
# Find latest timestep
result_metadata = sorted(result_metadata, key=lambda k: k['timestep'], reverse=True)
latest_timestep = result_metadata[0]['timestep']
# Remove all results with this timestep
results_to_remove = [
result for result in result_metadata
if result['timestep'] == latest_timestep
]
for result in results_to_remove:
os.remove(
self._get_results_path(
result['modelrun_id'],
result['model_name'],
result['output_name'],
result['timestep'],
result['modelset_iteration'],
result['decision_iteration']))
self.logger.info("Warm start will resume at timestep %s", latest_timestep)
return latest_timestep
def _get_results_path(self, modelrun_id, model_name, output_name, timestep,
modelset_iteration=None, decision_iteration=None):
"""Return path to filename for a given output without file extension
On the pattern of:
results/<modelrun_name>/<model_name>/
decision_<id>_modelset_<id>/
output_<output_name>_timestep_<timestep>.csv
Parameters
----------
modelrun_id : str
model_name : str
output_name : str
timestep : str or int
modelset_iteration : int, optional
decision_iteration : int, optional
Returns
-------
path : strs
"""
results_dir = self.file_dir['results']
if modelset_iteration is None:
modelset_iteration = 'none'
if decision_iteration is None:
decision_iteration = 'none'
if self.storage_format == 'local_csv':
ext = 'csv'
elif self.storage_format == 'local_binary':
ext = 'dat'
else:
ext = 'unknown'
path = os.path.join(
results_dir, modelrun_id, model_name,
"decision_{}_modelset_{}".format(decision_iteration, modelset_iteration),
"output_{}_timestep_{}.{}".format(output_name, timestep, ext)
)
return path
def _parse_results_path(self, path):
"""Return result metadata for a given result path
On the pattern of:
results/<modelrun_name>/<model_name>/
decision_<id>_modelset_<id>/
output_<output_name>_timestep_<timestep>.csv
Parameters
----------
path : str
Returns
-------
dict : A dict containing all of the metadata
"""
modelset_iteration = None
decision_iteration = None
data = re.findall(r"[\w']+", path)
for section in data[2:len(data)]:
if 'modelset' in section or 'decision' in section:
regex_decision = re.findall(r"decision_(\d{1,})", section)
regex_modelset = re.findall(r"modelset_(\d{1,})", section)
if regex_decision:
decision_iteration = int(regex_decision[0])
if regex_decision:
modelset_iteration = int(regex_modelset[0])
elif section.startswith('output'):
results = self._parse_output_section(section)
elif section == 'csv':
storage_format = 'local_csv'
elif section == 'dat':
storage_format = 'local_binary'
return {
'modelrun_id': data[0],
'model_name': data[1],
'output_name': '_'.join(results['output']),
'timestep': results['timestep'],
'modelset_iteration': modelset_iteration,
'decision_iteration': decision_iteration,
'storage_format': storage_format
}
def _parse_output_section(self, section):
result_elements = re.findall(r"[^_]+", section)
results = {}
parse_element = ""
for element in result_elements:
if element in ('output', 'timestep', 'regions', 'intervals') and \
parse_element != element:
parse_element = element
elif parse_element == 'output':
results.setdefault('output', []).append(element)
elif parse_element == 'timestep':
results['timestep'] = int(element)
elif parse_element == 'regions':
results.setdefault('regions', []).append(element)
elif parse_element == 'intervals':
results.setdefault('intervals', []).append(element)
return results
# endregion
# region Common methods
def read_project_config(self):
"""Read the project configuration
Returns
-------
dict
The project configuration
"""
if self._project_config_cache_invalid:
self._project_config_cache = self._read_yaml_file(
self.file_dir['project'], 'project')
self._project_config_cache_invalid = False
return copy.deepcopy(self._project_config_cache)
def _write_project_config(self, data):
"""Write the project configuration
Argument
--------
data: dict
The project configuration
"""
self._project_config_cache_invalid = True
self._project_config_cache = None
self._write_yaml_file(self.file_dir['project'], 'project', data)
@staticmethod
def _read_filenames_in_dir(path, extension):
"""Returns the name of the Yaml files in a certain directory
Arguments
---------
path: str
Path to directory
extension: str
Extension of files (such as: '.yml' or '.csv')
Returns
-------
list
The list of files in `path` with extension
"""
files = []
for filename in os.listdir(path):
if filename.endswith(extension):
files.append(os.path.splitext(filename)[0])
return files
@staticmethod
def _get_data_from_csv(filepath):
with open(filepath, 'r') as csvfile:
reader = csv.DictReader(csvfile)
scenario_data = list(reader)
return scenario_data
@staticmethod
def _write_data_to_csv(filepath, data, spec):
with open(filepath, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=tuple(spec.dims) + (spec.name, ))
writer.writeheader()
for row in data:
writer.writerow(row)
@staticmethod
def _get_data_from_native_file(filepath):
with pa.memory_map(filepath, 'rb') as native_file:
native_file.seek(0)
buf = native_file.read_buffer()
data = pa.deserialize(buf)
return data
@staticmethod
def _write_data_to_native_file(filepath, data):
with pa.OSFile(filepath, 'wb') as native_file:
native_file.write(pa.serialize(data).to_buffer())
@staticmethod
def _read_yaml_file(path, filename=None, extension='.yml'):
"""Read a Data dict from a Yaml file
Arguments
---------
path: str
Path to directory
name: str
Name of file
extension: str, default='.yml'
The file extension
Returns
-------
dict
"""
if filename is not None:
filename = filename + extension
filepath = os.path.join(path, filename)
else:
filepath = path
return load(filepath)
@staticmethod
def _write_yaml_file(path, filename=None, data=None, extension='.yml'):
"""Write a data dict to a Yaml file
Arguments
---------
path: str
Path to directory
name: str
Name of file
data: dict
Data to be written to the file
extension: str, default='.yml'
The file extension
"""
if filename is not None:
filename = filename + extension
filepath = os.path.join(path, filename)
else:
filepath = path
dump(data, filepath)
@staticmethod
def _read_spatial_file(filepath):
try:
with fiona.drivers():
with fiona.open(filepath) as src:
data = []
for f in src:
element = {
'name': f['properties']['name'],
'feature': f
}
data.append(element)
return data
except NameError as ex:
msg = "Could not read spatial dimension definition. Please install fiona to read"
msg += "geographic data files. Try running: \n"
msg += " pip install smif[spatial]\n"
msg += "or:\n"
msg += " conda install fiona shapely rtree\n"
raise DataReadError(msg) from ex
# endregion
def _file_dtypes():
return ('model_run', 'sos_model', 'sector_model')
def _config_dtypes():
return ('dimension', 'narrative', 'scenario')
def _nested_config_dtypes():
return ('narrative_variant', 'scenario_variant')
def _assert_no_mismatch(dtype, name, obj, secondary=None):
if obj is not None and 'name' in obj and name != obj['name']:
raise DataMismatchError("%s name '%s' must match '%s'" % (dtype, name, obj['name']))
def _file_exists(file_dir, dtype, name):
dir_key = "%ss" % dtype
return os.path.exists(os.path.join(file_dir[dir_key], name + '.yml'))
def _assert_file_exists(file_dir, dtype, name):
if not _file_exists(file_dir, dtype, name):
raise DataNotFoundError("%s '%s' not found" % (dtype, name))
def _assert_file_not_exists(file_dir, dtype, name):
if _file_exists(file_dir, dtype, name):
raise DataExistsError("%s '%s' already exists" % (dtype, name))
def _config_item_exists(config, dtype, name):
key = "%ss" % dtype
return key in config and _name_in_list(config[key], name)
def _nested_config_item_exists(config, dtype, parent_name, child_name):
keys = dtype.split("_")
parent_key = "%ss" % keys[0]
child_key = "%ss" % keys[1]
if parent_key not in config:
return False
parent_idx = _idx_in_list(config[parent_key], parent_name)
if parent_idx is None:
return False
if child_key not in config[parent_key][parent_idx]:
return False
return _name_in_list(config[parent_key][parent_idx][child_key], child_name)
def _name_in_list(list_of_dicts, name):
for item in list_of_dicts:
if 'name' in item and item['name'] == name:
return True
return False
def _pick_from_list(list_of_dicts, name):
for item in list_of_dicts:
if 'name' in item and item['name'] == name:
return item
return None
def _idx_in_list(list_of_dicts, name):
for i, item in enumerate(list_of_dicts):
if 'name' in item and item['name'] == name:
return i
return None
def _assert_config_item_exists(config, dtype, name):
if not _config_item_exists(config, dtype, name):
raise DataNotFoundError("%s '%s' not found" % (dtype, name))
def _assert_config_item_not_exists(config, dtype, name):
if _config_item_exists(config, dtype, name):
raise DataExistsError("%s '%s' already exists" % (dtype, name))
def _assert_nested_config_item_exists(config, dtype, primary, secondary):
if not _nested_config_item_exists(config, dtype, primary, secondary):
raise DataNotFoundError("%s '%s:%s' not found" % (dtype, primary, secondary))
def _assert_nested_config_item_not_exists(config, dtype, primary, secondary):
if _nested_config_item_exists(config, dtype, primary, secondary):
raise DataExistsError("%s '%s:%s' already exists" % (dtype, primary, secondary))
Write/update/delete dimension element files
"""File-backed data interface
"""
import copy
import csv
import glob
import os
import re
from functools import lru_cache, wraps
import pyarrow as pa
from smif.data_layer.data_interface import (DataExistsError, DataInterface,
DataMismatchError,
DataNotFoundError, DataReadError)
from smif.data_layer.load import dump, load
from smif.metadata import Spec
# Import fiona if available (optional dependency)
try:
import fiona
except ImportError:
pass
# Note: these decorators must be defined before being used below
def check_exists(dtype):
"""Decorator to check an item of dtype exists
"""
def wrapper(func):
"""Decorator specialised by dtype (class/item type)
"""
@wraps(func)
def wrapped(self, name, primary=None, secondary=None, *func_args, **func_kwargs):
"""Wrapper to implement error checking
"""
_assert_no_mismatch(dtype, name, primary, secondary)
if dtype in _file_dtypes():
_assert_file_exists(self.file_dir, dtype, name)
if dtype in _config_dtypes():
config = self.read_project_config()
_assert_config_item_exists(config, dtype, name)
if dtype in _nested_config_dtypes():
config = self.read_project_config()
_assert_nested_config_item_exists(config, dtype, name, primary)
if primary is None:
return func(self, name, *func_args, **func_kwargs)
elif secondary is None:
return func(self, name, primary, *func_args, **func_kwargs)
return func(self, name, primary, secondary, *func_args, **func_kwargs)
return wrapped
return wrapper
def check_not_exists(dtype):
"""Decorator creator to check an item of dtype does not exist
"""
def wrapper(func):
"""Decorator specialised by dtype (class/item type)
"""
@wraps(func)
def wrapped(self, primary, secondary=None, *func_args, **func_kwargs):
"""Wrapper to implement error checking
"""
if dtype in _file_dtypes():
_assert_file_not_exists(self.file_dir, dtype, primary['name'])
if dtype in _config_dtypes():
config = self.read_project_config()
_assert_config_item_not_exists(config, dtype, primary['name'])
if dtype in _nested_config_dtypes():
config = self.read_project_config()
_assert_nested_config_item_not_exists(config, dtype, primary, secondary)
if secondary is None:
return func(self, primary, *func_args, **func_kwargs)
return func(self, primary, secondary, *func_args, **func_kwargs)
return wrapped
return wrapper
class DatafileInterface(DataInterface):
"""Read and write interface to YAML / CSV configuration files
and intermediate CSV / native-binary data storage.
Project.yml
Arguments
---------
base_folder: str
The path to the configuration and data files
storage_format: str
The format used to store intermediate data (local_csv, local_binary)
"""
def __init__(self, base_folder, storage_format='local_binary'):
super().__init__()
self.base_folder = base_folder
self.storage_format = storage_format
self.file_dir = {}
self.file_dir['project'] = os.path.join(base_folder, 'config')
self.file_dir['results'] = os.path.join(base_folder, 'results')
# cache results of reading project_config (invalidate on write)
self._project_config_cache_invalid = True
# MUST ONLY access through self._read_project_config()
self._project_config_cache = None
config_folders = {
'model_runs': 'config',
'sos_models': 'config',
'sector_models': 'config',
'strategies': 'data',
'interventions': 'data',
'initial_conditions': 'data',
'dimensions': 'data',
'coefficients': 'data',
'scenarios': 'data',
'narratives': 'data',
}
for category, folder in config_folders.items():
dirname = os.path.join(base_folder, folder, category)
# ensure each directory exists
os.makedirs(dirname, exist_ok=True)
# store dirname
self.file_dir[category] = dirname
# region Model runs
def read_model_runs(self):
names = self._read_filenames_in_dir(self.file_dir['model_runs'], '.yml')
model_runs = [self.read_model_run(name) for name in names]
return model_runs
@check_exists(dtype='model_run')
def read_model_run(self, model_run_name):
return self._read_yaml_file(self.file_dir['model_runs'], model_run_name)
@check_not_exists(dtype='model_run')
def write_model_run(self, model_run):
self._write_yaml_file(self.file_dir['model_runs'], model_run['name'], model_run)
@check_exists(dtype='model_run')
def update_model_run(self, model_run_name, model_run):
self._write_yaml_file(self.file_dir['model_runs'], model_run['name'], model_run)
@check_exists(dtype='model_run')
def delete_model_run(self, model_run_name):
os.remove(os.path.join(self.file_dir['model_runs'], model_run_name + '.yml'))
# endregion
# region System-of-system models
def read_sos_models(self):
names = self._read_filenames_in_dir(self.file_dir['sos_models'], '.yml')
sos_models = [self.read_sos_model(name) for name in names]
return sos_models
@check_exists(dtype='sos_model')
def read_sos_model(self, sos_model_name):
data = self._read_yaml_file(self.file_dir['sos_models'], sos_model_name)
return data
@check_not_exists(dtype='sos_model')
def write_sos_model(self, sos_model):
self._write_yaml_file(self.file_dir['sos_models'], sos_model['name'], sos_model)
@check_exists(dtype='sos_model')
def update_sos_model(self, sos_model_name, sos_model):
self._write_yaml_file(self.file_dir['sos_models'], sos_model['name'], sos_model)
@check_exists(dtype='sos_model')
def delete_sos_model(self, sos_model_name):
os.remove(os.path.join(self.file_dir['sos_models'], sos_model_name + '.yml'))
# endregion
# region Sector models
def read_sector_models(self):
names = self._read_filenames_in_dir(self.file_dir['sector_models'], '.yml')
sector_models = [self.read_sector_model(name) for name in names]
return sector_models
@check_exists(dtype='sector_model')
def read_sector_model(self, sector_model_name):
sector_model = self._read_yaml_file(self.file_dir['sector_models'], sector_model_name)
self._set_list_coords(sector_model['inputs'])
self._set_list_coords(sector_model['outputs'])
self._set_list_coords(sector_model['parameters'])
return sector_model
@check_not_exists(dtype='sector_model')
def write_sector_model(self, sector_model):
if sector_model['interventions']:
self.logger.warning("Ignoring interventions")
sector_model['interventions'] = []
self._write_yaml_file(
self.file_dir['sector_models'], sector_model['name'], sector_model)
@check_exists(dtype='sector_model')
def update_sector_model(self, sector_model_name, sector_model):
# ignore interventions and initial conditions which the app doesn't handle
if sector_model['interventions'] or sector_model['initial_conditions']:
old_sector_model = self._read_yaml_file(
self.file_dir['sector_models'], sector_model['name'])
if sector_model['interventions']:
self.logger.warning("Ignoring interventions write")
sector_model['interventions'] = old_sector_model['interventions']
if sector_model['initial_conditions']:
self.logger.warning("Ignoring initial conditions write")
sector_model['initial_conditions'] = old_sector_model['initial_conditions']
self._write_yaml_file(
self.file_dir['sector_models'], sector_model['name'], sector_model)
@check_exists(dtype='sector_model')
def delete_sector_model(self, sector_model_name):
os.remove(os.path.join(self.file_dir['sector_models'], sector_model_name + '.yml'))
@check_exists(dtype='sector_model')
def read_interventions(self, sector_model_name):
all_interventions = {}
sector_model = self._read_yaml_file(self.file_dir['sector_models'], sector_model_name)
interventions = self._read_interventions_files(
sector_model['interventions'], 'interventions')
for entry in interventions:
name = entry.pop('name')
if name in all_interventions:
msg = "An entry for intervention {} already exists"
raise ValueError(msg.format(name))
else:
all_interventions[name] = entry
return all_interventions
@check_exists(dtype='sector_model')
def read_initial_conditions(self, sector_model_name):
sector_model = self._read_yaml_file(self.file_dir['sector_models'], sector_model_name)
return self._read_interventions_files(
sector_model['initial_conditions'], 'initial_conditions')
def _read_interventions_files(self, filenames, dirname):
intervention_list = []
for filename in filenames:
interventions = self._read_interventions_file(filename, dirname)
intervention_list.extend(interventions)
return intervention_list
def _read_interventions_file(self, filename, dirname):
"""Read the planned intervention data from a file
Planned interventions are stored either a csv or yaml file. In the case
of the former, the file should look like this::
name,build_year
asset_a,2010
asset_b,2015
In the case of a yaml, file, the format is as follows::
- name: asset_a
build_year: 2010
- name: asset_b
build_year: 2015
Arguments
---------
filename: str
The name of the strategy yml or csv file to read in
dirname: str
The key of the dirname e.g. ``strategies`` or ``initial_conditions``
Returns
-------
dict of dict
Dict of intervention attribute dicts, keyed by intervention name
"""
filepath = self.file_dir[dirname]
_, ext = os.path.splitext(filename)
if ext == '.csv':
data = self._read_state_file(os.path.join(filepath, filename))
try:
data = self._reshape_csv_interventions(data)
except ValueError:
raise ValueError("Error reshaping data for {}".format(filename))
else:
data = self._read_yaml_file(filepath, filename, extension='')
return data
def _reshape_csv_interventions(self, data):
"""
Arguments
---------
data : list of dict
A list of dicts containing intervention data
Returns
-------
dict of dicts
"""
new_data = []
for element in data:
reshaped_data = {}
for key, value in element.items():
if key.endswith(('_value', '_unit')):
new_key, sub_key = key.rsplit(sep="_", maxsplit=1)
if new_key in reshaped_data:
if not isinstance(reshaped_data[new_key], dict):
msg = "Duplicate heading in csv data: {}"
raise ValueError(msg.format(new_key))
else:
reshaped_data[new_key].update({sub_key: value})
else:
reshaped_data[new_key] = {sub_key: value}
else:
if key in reshaped_data:
msg = "Duplicate heading in csv data: {}"
raise ValueError(msg.format(new_key))
else:
reshaped_data[key] = value
new_data.append(reshaped_data)
return new_data
# endregion
# region Strategies
def read_strategies(self, model_run_name):
strategies = []
model_run_config = self.read_model_run(model_run_name)
for strategy in model_run_config['strategies']:
if strategy['strategy'] == 'pre-specified-planning':
decisions = self._read_interventions_file(strategy['filename'], 'strategies')
if decisions is None:
decisions = []
del strategy['filename']
strategy['interventions'] = decisions
self.logger.info("Added %s pre-specified planning interventions to %s",
len(decisions), strategy['model_name'])
strategies.append(strategy)
return strategies
# endregion
# region State
def read_state(self, modelrun_name, timestep, decision_iteration=None):
fname = self._get_state_filename(modelrun_name, timestep, decision_iteration)
if not os.path.exists(fname):
msg = "State file does not exist for timestep {} and iteration {}"
raise DataNotFoundError(msg.format(timestep, decision_iteration))
state = self._read_state_file(fname)
return state
def write_state(self, state, modelrun_name, timestep=None, decision_iteration=None):
fname = self._get_state_filename(modelrun_name, timestep, decision_iteration)
os.makedirs(os.path.dirname(fname), exist_ok=True)
with open(fname, 'w+') as file_handle:
writer = csv.DictWriter(file_handle, fieldnames=('name', 'build_year'))
writer.writeheader()
for row in state:
writer.writerow(row)
def _get_state_filename(self, modelrun_name, timestep=None, decision_iteration=None):
"""Compose a unique filename for state file:
state_{timestep|0000}[_decision_{iteration}].csv
"""
results_dir = self.file_dir['results']
if timestep is None:
timestep = '0000'
if decision_iteration is None:
separator = ''
decision_iteration = ''
else:
separator = '_decision_'
fmt = 'state_{}{}{}.csv'
fname = os.path.join(
results_dir, modelrun_name, fmt.format(timestep, separator, decision_iteration))
return fname
@staticmethod
def _read_state_file(fname):
"""Read list of {name, build_year} dicts from state file
"""
with open(fname, 'r') as file_handle:
reader = csv.DictReader(file_handle)
state = list(reader)
return state
# endregion
# region Units
def read_unit_definitions(self):
project_config = self.read_project_config()
filename = project_config['units']
if filename is not None:
path = os.path.join(self.base_folder, 'data', filename)
try:
with open(path, 'r') as units_fh:
return [line.strip() for line in units_fh]
except FileNotFoundError as ex:
raise DataNotFoundError('Units file not found:' + str(ex)) from ex
else:
return []
# endregion
# region Dimensions
def read_dimensions(self):
project_config = self.read_project_config()
for dim in project_config['dimensions']:
dim['elements'] = self._read_dimension_file(dim['elements'])
return project_config['dimensions']
@check_exists(dtype='dimension')
def read_dimension(self, dimension_name):
project_config = self.read_project_config()
dim = _pick_from_list(project_config['dimensions'], dimension_name)
dim['elements'] = self._read_dimension_file(dim['elements'])
return dim
def _set_list_coords(self, list_):
for item in list_:
self._set_item_coords(item)
def _set_item_coords(self, item):
if 'dims' in item:
item['coords'] = {
dim: self.read_dimension(dim)['elements']
for dim in item['dims']
}
@lru_cache(maxsize=32)
def _read_dimension_file(self, filename):
filepath = os.path.join(self.file_dir['dimensions'], filename)
_, ext = os.path.splitext(filename)
if ext in ('.yml', '.yaml'):
data = self._read_yaml_file(filepath)
elif ext == '.csv':
data = self._get_data_from_csv(filepath)
elif ext in ('.geojson', '.shp'):
data = self._read_spatial_file(filepath)
else:
msg = "Extension '{}' not recognised, expected one of ('.csv', '.yml', '.yaml', "
msg += "'.geojson', '.shp') when reading {}"
raise DataReadError(msg.format(ext, filepath))
return data
def _write_dimension_file(self, filename, data):
# lru_cache may now be invalid, so clear it
self._read_dimension_file.cache_clear()
filepath = os.path.join(self.file_dir['dimensions'], filename)
_, ext = os.path.splitext(filename)
if ext in ('.yml', '.yaml'):
self._write_yaml_file(filepath, data=data)
elif ext == '.csv':
self._write_data_to_csv(filepath, data)
elif ext in ('.geojson', '.shp'):
raise NotImplementedError("Writing spatial dimensions not yet supported")
# self._write_spatial_file(filepath)
else:
msg = "Extension '{}' not recognised, expected one of ('.csv', '.yml', '.yaml', "
msg += "'.geojson', '.shp') when writing {}"
raise DataReadError(msg.format(ext, filepath))
return data
def _delete_dimension_file(self, filename):
os.remove(os.path.join(self.file_dir['dimensions'], filename))
@check_not_exists(dtype='dimension')
def write_dimension(self, dimension):
project_config = self.read_project_config()
# write elements to yml file (by default, can handle any nested data)
filename = "{}.yml".format(dimension['name'])
elements = dimension['elements']
self._write_dimension_file(filename, elements)
# refer to elements by filename and add to config
dimension['elements'] = filename
project_config['dimensions'].append(dimension)
self._write_project_config(project_config)
@check_exists(dtype='dimension')
def update_dimension(self, dimension_name, dimension):
project_config = self.read_project_config()
idx = _idx_in_list(project_config['dimensions'], dimension_name)
# look up project-config filename and write elements
filename = project_config['dimensions'][idx]['elements']
elements = dimension['elements']
self._write_dimension_file(filename, elements)
# refer to elements by filename and update config
dimension['elements'] = filename
project_config['dimensions'][idx] = dimension
self._write_project_config(project_config)
@check_exists(dtype='dimension')
def delete_dimension(self, dimension_name):
project_config = self.read_project_config()
idx = _idx_in_list(project_config['dimensions'], dimension_name)
# look up project-config filename and delete file
filename = project_config['dimensions'][idx]['elements']
self._delete_dimension_file(filename)
# delete from config
del project_config['dimensions'][idx]
self._write_project_config(project_config)
# endregion
# region Conversion coefficients
def read_coefficients(self, source_spec, destination_spec):
results_path = self._get_coefficients_path(source_spec, destination_spec)
if os.path.isfile(results_path):
return self._get_data_from_native_file(results_path)
msg = "Could not find the coefficients file for %s to %s"
self.logger.warning(msg, source_spec, destination_spec)
return None
def write_coefficients(self, source_spec, destination_spec, data):
results_path = self._get_coefficients_path(source_spec, destination_spec)
self._write_data_to_native_file(results_path, data)
def _get_coefficients_path(self, source_spec, destination_spec):
results_dir = self.file_dir['coefficients']
path = os.path.join(
results_dir,
"{}_{}.{}_{}.dat".format(
source_spec.name, "-".join(source_spec.dims),
destination_spec.name, "-".join(destination_spec.dims)
)
)
return path
# endregion
# region Scenarios
def read_scenarios(self):
project_config = self.read_project_config()
return project_config['scenarios']
@check_exists(dtype='scenario')
def read_scenario(self, scenario_name):
project_config = self.read_project_config()
scenario = _pick_from_list(project_config['scenarios'], scenario_name)
self._set_list_coords(scenario['provides'])
return scenario
@check_not_exists(dtype='scenario')
def write_scenario(self, scenario):
project_config = self.read_project_config()
project_config['scenarios'].append(scenario)
self._write_project_config(project_config)
@check_exists(dtype='scenario')
def update_scenario(self, scenario_name, scenario):
project_config = self.read_project_config()
idx = _idx_in_list(project_config['scenarios'], scenario_name)
project_config['scenarios'][idx] = scenario
self._write_project_config(project_config)
@check_exists(dtype='scenario')
def delete_scenario(self, scenario_name):
project_config = self.read_project_config()
idx = _idx_in_list(project_config['scenarios'], scenario_name)
del project_config['scenarios'][idx]
self._write_project_config(project_config)
@check_exists(dtype='scenario')
def read_scenario_variants(self, scenario_name):
project_config = self.read_project_config()
scenario = _pick_from_list(project_config['scenarios'], scenario_name)
return scenario['variants']
@check_exists(dtype='scenario_variant')
def read_scenario_variant(self, scenario_name, variant_name):
variants = self.read_scenario_variants(scenario_name)
return _pick_from_list(variants, variant_name)
@check_not_exists(dtype='scenario_variant')
def write_scenario_variant(self, scenario_name, variant):
project_config = self.read_project_config()
s_idx = _idx_in_list(project_config['scenarios'], scenario_name)
project_config['scenarios'][s_idx]['variants'].append(variant)
self._write_project_config(project_config)
@check_exists(dtype='scenario_variant')
def update_scenario_variant(self, scenario_name, variant_name, variant):
project_config = self.read_project_config()
s_idx = _idx_in_list(project_config['scenarios'], scenario_name)
v_idx = _idx_in_list(project_config['scenarios'][s_idx]['variants'], variant_name)
project_config['scenarios'][s_idx]['variants'][v_idx] = variant
self._write_project_config(project_config)
@check_exists(dtype='scenario_variant')
def delete_scenario_variant(self, scenario_name, variant_name):
project_config = self.read_project_config()
s_idx = _idx_in_list(project_config['scenarios'], scenario_name)
v_idx = _idx_in_list(project_config['scenarios'][s_idx]['variants'], variant_name)
del project_config['scenarios'][s_idx]['variants'][v_idx]
self._write_project_config(project_config)
@check_exists(dtype='scenario_variant')
def read_scenario_variant_data(self, scenario_name, variant_name, variable, timestep=None):
spec = self._read_scenario_variable_spec(scenario_name, variable)
filepath = self._get_scenario_variant_filepath(scenario_name, variant_name, variable)
data = self._get_data_from_csv(filepath)
if timestep is not None:
data = [datum for datum in data if int(datum['timestep']) == timestep]
try:
array = self.data_list_to_ndarray(data, spec)
except DataMismatchError as ex:
msg = "DataMismatch in scenario: {}:{}.{}, from {}"
raise DataMismatchError(
msg.format(scenario_name, variant_name, variable, str(ex))
) from ex
return array
@check_exists(dtype='scenario_variant')
def write_scenario_variant_data(self, data, scenario_name, variant_name, variable,
timestep=None):
spec = self._read_scenario_variable_spec(scenario_name, variable)
data = self.ndarray_to_data_list(data, spec)
filepath = self._get_scenario_variant_filepath(scenario_name, variant_name, variable)
self._write_data_to_csv(filepath, data, spec=spec)
def _get_scenario_variant_filepath(self, scenario_name, variant_name, variable):
variant = self.read_scenario_variant(scenario_name, variant_name)
if 'data' not in variant or variable not in variant['data']:
raise DataNotFoundError(
"Scenario data file not defined for {}:{}, {}".format(
scenario_name, variant_name, variable)
)
filename = variant['data'][variable]
return os.path.join(self.file_dir['scenarios'], filename)
def _read_scenario_variable_spec(self, scenario_name, variable):
# Read spec from scenario->provides->variable
scenario = self.read_scenario(scenario_name)
spec = _pick_from_list(scenario['provides'], variable)
self._set_item_coords(spec)
return Spec.from_dict(spec)
# endregion
# region Narratives
def read_narratives(self):
# Find filename for this narrative
project_config = self.read_project_config()
return project_config['narratives']
@check_exists(dtype='narrative')
def read_narrative(self, narrative_name):
project_config = self.read_project_config()
return _pick_from_list(project_config['narratives'], narrative_name)
@check_not_exists(dtype='narrative')
def write_narrative(self, narrative):
project_config = self.read_project_config()
project_config['narratives'].append(narrative)
self._write_project_config(project_config)
@check_exists(dtype='narrative')
def update_narrative(self, narrative_name, narrative):
project_config = self.read_project_config()
idx = _idx_in_list(project_config['narratives'], narrative_name)
project_config['narratives'][idx] = narrative
self._write_project_config(project_config)
@check_exists(dtype='narrative')
def delete_narrative(self, narrative_name):
project_config = self.read_project_config()
idx = _idx_in_list(project_config['narratives'], narrative_name)
del project_config['narratives'][idx]
self._write_project_config(project_config)
@check_exists(dtype='narrative')
def read_narrative_variants(self, narrative_name):
project_config = self.read_project_config()
return project_config['narratives'][narrative_name]['variants']
@check_exists(dtype='narrative_variant')
def read_narrative_variant(self, narrative_name, variant_name):
project_config = self.read_project_config()
n_idx = _idx_in_list(project_config['narratives'], narrative_name)
variants = project_config['narratives'][n_idx]['variants']
return _pick_from_list(variants, variant_name)
@check_not_exists(dtype='narrative_variant')
def write_narrative_variant(self, narrative_name, variant):
project_config = self.read_project_config()
project_config['narratives'][narrative_name]['variants'].append(variant)
self._write_project_config(project_config)
@check_exists(dtype='narrative_variant')
def update_narrative_variant(self, narrative_name, variant_name, variant):
project_config = self.read_project_config()
n_idx = _idx_in_list(project_config['narratives'], narrative_name)
v_idx = _idx_in_list(project_config['narratives'][n_idx]['variants'], variant_name)
project_config['narratives'][n_idx]['variants'][v_idx] = variant
self._write_project_config(project_config)
@check_exists(dtype='narrative_variant')
def delete_narrative_variant(self, narrative_name, variant_name):
project_config = self.read_project_config()
n_idx = _idx_in_list(project_config['narratives'], narrative_name)
v_idx = _idx_in_list(project_config['narratives'][n_idx]['variants'], variant_name)
del project_config['narratives'][n_idx]['variants'][v_idx]
self._write_project_config(project_config)
@check_exists(dtype='narrative_variant')
def read_narrative_variant_data(self, narrative_name, variant_name, variable,
timestep=None):
spec = self._read_narrative_variable_spec(narrative_name, variable)
filepath = self._get_narrative_variant_filepath(narrative_name, variant_name, variable)
data = self._get_data_from_csv(filepath)
if timestep is not None:
data = [datum for datum in data if int(datum['timestep']) == timestep]
try:
array = self.data_list_to_ndarray(data, spec)
except DataMismatchError as ex:
msg = "DataMismatch in narrative: {}:{}, {}, from {}"
raise DataMismatchError(
msg.format(narrative_name, variant_name, variable, str(ex))
) from ex
return array
@check_exists(dtype='narrative_variant')
def write_narrative_variant_data(self, data, narrative_name, variant_name, variable,
timestep=None):
spec = self._read_narrative_variable_spec(narrative_name, variable)
data = self.ndarray_to_data_list(data, spec)
filepath = self._get_narrative_variant_filepath(narrative_name, variant_name, variable)
self._write_data_to_csv(filepath, data, spec=spec)
def _get_narrative_variant_filepath(self, narrative_name, variant_name, variable):
variant = self.read_narrative_variant(narrative_name, variant_name)
if 'data' not in variant or variable not in variant['data']:
raise DataNotFoundError(
"narrative data file not defined for {}:{}, {}".format(
narrative_name, variant_name, variable)
)
filename = variant['data'][variable]
return os.path.join(self.file_dir['narratives'], filename)
def _read_narrative_variable_spec(self, narrative_name, variable):
# Read spec from narrative->provides->variable
narrative = self.read_narrative(narrative_name)
spec = _pick_from_list(narrative['provides'], variable)
self._set_item_coords(spec)
return Spec.from_dict(spec)
# endregion
# region Results
def read_results(self, modelrun_id, model_name, output_spec, timestep=None,
modelset_iteration=None, decision_iteration=None):
if timestep is None:
raise NotImplementedError()
results_path = self._get_results_path(
modelrun_id, model_name, output_spec.name,
timestep, modelset_iteration, decision_iteration
)
if self.storage_format == 'local_csv':
data = self._get_data_from_csv(results_path)
return self.data_list_to_ndarray(data, output_spec)
if self.storage_format == 'local_binary':
return self._get_data_from_native_file(results_path)
def write_results(self, data, modelrun_id, model_name, output_spec, timestep=None,
modelset_iteration=None, decision_iteration=None):
if timestep is None:
raise NotImplementedError()
results_path = self._get_results_path(
modelrun_id, model_name, output_spec.name,
timestep, modelset_iteration, decision_iteration
)
os.makedirs(os.path.dirname(results_path), exist_ok=True)
if self.storage_format == 'local_csv':
data = self.ndarray_to_data_list(data, output_spec)
self._write_data_to_csv(results_path, data, spec=output_spec)
if self.storage_format == 'local_binary':
self._write_data_to_native_file(results_path, data)
def _results_exist(self, modelrun_name):
"""Checks whether modelrun results exists on the filesystem
for a particular modelrun_name
Parameters
----------
modelrun_name: str
Returns
-------
bool: True when results exist for this modelrun_name
"""
previous_results_dir = os.path.join(self.file_dir['results'], modelrun_name)
return list(
glob.iglob(os.path.join(previous_results_dir, '**/*.*'), recursive=True))
def prepare_warm_start(self, modelrun_id):
results_dir = os.path.join(self.file_dir['results'], modelrun_id)
# Return if path to previous modelruns does not exist
if not os.path.isdir(results_dir):
self.logger.info("Warm start not possible because modelrun has "
"no previous results (path does not exist)")
return None
# Return if no results exist in last modelrun
if not self._results_exist(modelrun_id):
self.logger.info("Warm start not possible because the "
"modelrun does not have any results")
return None
# Return if previous results were stored in a different format
previous_results_dir = os.path.join(self.file_dir['results'], modelrun_id)
results = list(glob.iglob(os.path.join(previous_results_dir, '**/*.*'),
recursive=True))
for filename in results:
warn = (self.storage_format == 'local_csv' and not filename.endswith(".csv")) or \
(self.storage_format == 'local_binary' and not filename.endswith(".dat"))
if warn:
self.logger.info("Warm start not possible because a different "
"storage mode was used in the previous run")
return None
# Perform warm start
self.logger.info("Warm start %s", modelrun_id)
# Get metadata for all results
result_metadata = []
for filename in glob.iglob(os.path.join(results_dir, '**/*.*'), recursive=True):
result_metadata.append(self._parse_results_path(
filename.replace(self.file_dir['results'], '')[1:]))
# Find latest timestep
result_metadata = sorted(result_metadata, key=lambda k: k['timestep'], reverse=True)
latest_timestep = result_metadata[0]['timestep']
# Remove all results with this timestep
results_to_remove = [
result for result in result_metadata
if result['timestep'] == latest_timestep
]
for result in results_to_remove:
os.remove(
self._get_results_path(
result['modelrun_id'],
result['model_name'],
result['output_name'],
result['timestep'],
result['modelset_iteration'],
result['decision_iteration']))
self.logger.info("Warm start will resume at timestep %s", latest_timestep)
return latest_timestep
def _get_results_path(self, modelrun_id, model_name, output_name, timestep,
modelset_iteration=None, decision_iteration=None):
"""Return path to filename for a given output without file extension
On the pattern of:
results/<modelrun_name>/<model_name>/
decision_<id>_modelset_<id>/
output_<output_name>_timestep_<timestep>.csv
Parameters
----------
modelrun_id : str
model_name : str
output_name : str
timestep : str or int
modelset_iteration : int, optional
decision_iteration : int, optional
Returns
-------
path : strs
"""
results_dir = self.file_dir['results']
if modelset_iteration is None:
modelset_iteration = 'none'
if decision_iteration is None:
decision_iteration = 'none'
if self.storage_format == 'local_csv':
ext = 'csv'
elif self.storage_format == 'local_binary':
ext = 'dat'
else:
ext = 'unknown'
path = os.path.join(
results_dir, modelrun_id, model_name,
"decision_{}_modelset_{}".format(decision_iteration, modelset_iteration),
"output_{}_timestep_{}.{}".format(output_name, timestep, ext)
)
return path
def _parse_results_path(self, path):
"""Return result metadata for a given result path
On the pattern of:
results/<modelrun_name>/<model_name>/
decision_<id>_modelset_<id>/
output_<output_name>_timestep_<timestep>.csv
Parameters
----------
path : str
Returns
-------
dict : A dict containing all of the metadata
"""
modelset_iteration = None
decision_iteration = None
data = re.findall(r"[\w']+", path)
for section in data[2:len(data)]:
if 'modelset' in section or 'decision' in section:
regex_decision = re.findall(r"decision_(\d{1,})", section)
regex_modelset = re.findall(r"modelset_(\d{1,})", section)
if regex_decision:
decision_iteration = int(regex_decision[0])
if regex_decision:
modelset_iteration = int(regex_modelset[0])
elif section.startswith('output'):
results = self._parse_output_section(section)
elif section == 'csv':
storage_format = 'local_csv'
elif section == 'dat':
storage_format = 'local_binary'
return {
'modelrun_id': data[0],
'model_name': data[1],
'output_name': '_'.join(results['output']),
'timestep': results['timestep'],
'modelset_iteration': modelset_iteration,
'decision_iteration': decision_iteration,
'storage_format': storage_format
}
def _parse_output_section(self, section):
result_elements = re.findall(r"[^_]+", section)
results = {}
parse_element = ""
for element in result_elements:
if element in ('output', 'timestep', 'regions', 'intervals') and \
parse_element != element:
parse_element = element
elif parse_element == 'output':
results.setdefault('output', []).append(element)
elif parse_element == 'timestep':
results['timestep'] = int(element)
elif parse_element == 'regions':
results.setdefault('regions', []).append(element)
elif parse_element == 'intervals':
results.setdefault('intervals', []).append(element)
return results
# endregion
# region Common methods
def read_project_config(self):
"""Read the project configuration
Returns
-------
dict
The project configuration
"""
if self._project_config_cache_invalid:
self._project_config_cache = self._read_yaml_file(
self.file_dir['project'], 'project')
self._project_config_cache_invalid = False
return copy.deepcopy(self._project_config_cache)
def _write_project_config(self, data):
"""Write the project configuration
Argument
--------
data: dict
The project configuration
"""
self._project_config_cache_invalid = True
self._project_config_cache = None
self._write_yaml_file(self.file_dir['project'], 'project', data)
@staticmethod
def _read_filenames_in_dir(path, extension):
"""Returns the name of the Yaml files in a certain directory
Arguments
---------
path: str
Path to directory
extension: str
Extension of files (such as: '.yml' or '.csv')
Returns
-------
list
The list of files in `path` with extension
"""
files = []
for filename in os.listdir(path):
if filename.endswith(extension):
files.append(os.path.splitext(filename)[0])
return files
@staticmethod
def _get_data_from_csv(filepath):
with open(filepath, 'r') as csvfile:
reader = csv.DictReader(csvfile)
scenario_data = list(reader)
return scenario_data
@staticmethod
def _write_data_to_csv(filepath, data, spec=None, fieldnames=None):
if fieldnames is not None:
pass
elif spec is not None:
fieldnames = tuple(spec.dims) + (spec.name, )
else:
fieldnames = tuple(data[0].keys())
with open(filepath, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
@staticmethod
def _get_data_from_native_file(filepath):
with pa.memory_map(filepath, 'rb') as native_file:
native_file.seek(0)
buf = native_file.read_buffer()
data = pa.deserialize(buf)
return data
@staticmethod
def _write_data_to_native_file(filepath, data):
with pa.OSFile(filepath, 'wb') as native_file:
native_file.write(pa.serialize(data).to_buffer())
@staticmethod
def _read_yaml_file(path, filename=None, extension='.yml'):
"""Read a Data dict from a Yaml file
Arguments
---------
path: str
Path to directory
name: str
Name of file
extension: str, default='.yml'
The file extension
Returns
-------
dict
"""
if filename is not None:
filename = filename + extension
filepath = os.path.join(path, filename)
else:
filepath = path
return load(filepath)
@staticmethod
def _write_yaml_file(path, filename=None, data=None, extension='.yml'):
"""Write a data dict to a Yaml file
Arguments
---------
path: str
Path to directory
name: str
Name of file
data: dict
Data to be written to the file
extension: str, default='.yml'
The file extension
"""
if filename is not None:
filename = filename + extension
filepath = os.path.join(path, filename)
else:
filepath = path
dump(data, filepath)
@staticmethod
def _read_spatial_file(filepath):
try:
with fiona.drivers():
with fiona.open(filepath) as src:
data = []
for f in src:
element = {
'name': f['properties']['name'],
'feature': f
}
data.append(element)
return data
except NameError as ex:
msg = "Could not read spatial dimension definition. Please install fiona to read"
msg += "geographic data files. Try running: \n"
msg += " pip install smif[spatial]\n"
msg += "or:\n"
msg += " conda install fiona shapely rtree\n"
raise DataReadError(msg) from ex
# endregion
def _file_dtypes():
return ('model_run', 'sos_model', 'sector_model')
def _config_dtypes():
return ('dimension', 'narrative', 'scenario')
def _nested_config_dtypes():
return ('narrative_variant', 'scenario_variant')
def _assert_no_mismatch(dtype, name, obj, secondary=None):
if obj is not None and 'name' in obj and name != obj['name']:
raise DataMismatchError("%s name '%s' must match '%s'" % (dtype, name, obj['name']))
def _file_exists(file_dir, dtype, name):
dir_key = "%ss" % dtype
return os.path.exists(os.path.join(file_dir[dir_key], name + '.yml'))
def _assert_file_exists(file_dir, dtype, name):
if not _file_exists(file_dir, dtype, name):
raise DataNotFoundError("%s '%s' not found" % (dtype, name))
def _assert_file_not_exists(file_dir, dtype, name):
if _file_exists(file_dir, dtype, name):
raise DataExistsError("%s '%s' already exists" % (dtype, name))
def _config_item_exists(config, dtype, name):
key = "%ss" % dtype
return key in config and _name_in_list(config[key], name)
def _nested_config_item_exists(config, dtype, parent_name, child_name):
keys = dtype.split("_")
parent_key = "%ss" % keys[0]
child_key = "%ss" % keys[1]
if parent_key not in config:
return False
parent_idx = _idx_in_list(config[parent_key], parent_name)
if parent_idx is None:
return False
if child_key not in config[parent_key][parent_idx]:
return False
return _name_in_list(config[parent_key][parent_idx][child_key], child_name)
def _name_in_list(list_of_dicts, name):
for item in list_of_dicts:
if 'name' in item and item['name'] == name:
return True
return False
def _pick_from_list(list_of_dicts, name):
for item in list_of_dicts:
if 'name' in item and item['name'] == name:
return item
return None
def _idx_in_list(list_of_dicts, name):
for i, item in enumerate(list_of_dicts):
if 'name' in item and item['name'] == name:
return i
return None
def _assert_config_item_exists(config, dtype, name):
if not _config_item_exists(config, dtype, name):
raise DataNotFoundError("%s '%s' not found" % (dtype, name))
def _assert_config_item_not_exists(config, dtype, name):
if _config_item_exists(config, dtype, name):
raise DataExistsError("%s '%s' already exists" % (dtype, name))
def _assert_nested_config_item_exists(config, dtype, primary, secondary):
if not _nested_config_item_exists(config, dtype, primary, secondary):
raise DataNotFoundError("%s '%s:%s' not found" % (dtype, primary, secondary))
def _assert_nested_config_item_not_exists(config, dtype, primary, secondary):
if _nested_config_item_exists(config, dtype, primary, secondary):
raise DataExistsError("%s '%s:%s' already exists" % (dtype, primary, secondary))
|
"""scons.Node.FS
File system nodes.
These Nodes represent the canonical external objects that people think
of when they think of building software: files and directories.
This holds a "default_fs" variable that should be initialized with an FS
that can be used by scripts or modules looking for the canonical default.
"""
#
# Copyright (c) 2001 - 2019 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
__revision__ = "src/engine/SCons/Node/FS.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import fnmatch
import os
import re
import shutil
import stat
import sys
import time
import codecs
from itertools import chain
import SCons.Action
import SCons.Debug
from SCons.Debug import logInstanceCreation
import SCons.Errors
import SCons.Memoize
import SCons.Node
import SCons.Node.Alias
import SCons.Subst
import SCons.Util
import SCons.Warnings
from SCons.Debug import Trace
print_duplicate = 0
MD5_TIMESTAMP_DEBUG = False
def sconsign_none(node):
raise NotImplementedError
def sconsign_dir(node):
"""Return the .sconsign file info for this directory,
creating it first if necessary."""
if not node._sconsign:
import SCons.SConsign
node._sconsign = SCons.SConsign.ForDirectory(node)
return node._sconsign
_sconsign_map = {0 : sconsign_none,
1 : sconsign_dir}
class FileBuildInfoFileToCsigMappingError(Exception):
pass
class EntryProxyAttributeError(AttributeError):
"""
An AttributeError subclass for recording and displaying the name
of the underlying Entry involved in an AttributeError exception.
"""
def __init__(self, entry_proxy, attribute):
AttributeError.__init__(self)
self.entry_proxy = entry_proxy
self.attribute = attribute
def __str__(self):
entry = self.entry_proxy.get()
fmt = "%s instance %s has no attribute %s"
return fmt % (entry.__class__.__name__,
repr(entry.name),
repr(self.attribute))
# The max_drift value: by default, use a cached signature value for
# any file that's been untouched for more than two days.
default_max_drift = 2*24*60*60
#
# We stringify these file system Nodes a lot. Turning a file system Node
# into a string is non-trivial, because the final string representation
# can depend on a lot of factors: whether it's a derived target or not,
# whether it's linked to a repository or source directory, and whether
# there's duplication going on. The normal technique for optimizing
# calculations like this is to memoize (cache) the string value, so you
# only have to do the calculation once.
#
# A number of the above factors, however, can be set after we've already
# been asked to return a string for a Node, because a Repository() or
# VariantDir() call or the like may not occur until later in SConscript
# files. So this variable controls whether we bother trying to save
# string values for Nodes. The wrapper interface can set this whenever
# they're done mucking with Repository and VariantDir and the other stuff,
# to let this module know it can start returning saved string values
# for Nodes.
#
Save_Strings = None
def save_strings(val):
global Save_Strings
Save_Strings = val
#
# Avoid unnecessary function calls by recording a Boolean value that
# tells us whether or not os.path.splitdrive() actually does anything
# on this system, and therefore whether we need to bother calling it
# when looking up path names in various methods below.
#
do_splitdrive = None
_my_splitdrive =None
def initialize_do_splitdrive():
global do_splitdrive
global has_unc
drive, path = os.path.splitdrive('X:/foo')
# splitunc is removed from python 3.7 and newer
# so we can also just test if splitdrive works with UNC
has_unc = (hasattr(os.path, 'splitunc')
or os.path.splitdrive(r'\\split\drive\test')[0] == r'\\split\drive')
do_splitdrive = not not drive or has_unc
global _my_splitdrive
if has_unc:
def splitdrive(p):
if p[1:2] == ':':
return p[:2], p[2:]
if p[0:2] == '//':
# Note that we leave a leading slash in the path
# because UNC paths are always absolute.
return '//', p[1:]
return '', p
else:
def splitdrive(p):
if p[1:2] == ':':
return p[:2], p[2:]
return '', p
_my_splitdrive = splitdrive
# Keep some commonly used values in global variables to skip to
# module look-up costs.
global OS_SEP
global UNC_PREFIX
global os_sep_is_slash
OS_SEP = os.sep
UNC_PREFIX = OS_SEP + OS_SEP
os_sep_is_slash = OS_SEP == '/'
initialize_do_splitdrive()
# Used to avoid invoking os.path.normpath if not necessary.
needs_normpath_check = re.compile(
r'''
# We need to renormalize the path if it contains any consecutive
# '/' characters.
.*// |
# We need to renormalize the path if it contains a '..' directory.
# Note that we check for all the following cases:
#
# a) The path is a single '..'
# b) The path starts with '..'. E.g. '../' or '../moredirs'
# but we not match '..abc/'.
# c) The path ends with '..'. E.g. '/..' or 'dirs/..'
# d) The path contains a '..' in the middle.
# E.g. dirs/../moredirs
(.*/)?\.\.(?:/|$) |
# We need to renormalize the path if it contains a '.'
# directory, but NOT if it is a single '.' '/' characters. We
# do not want to match a single '.' because this case is checked
# for explicitly since this is common enough case.
#
# Note that we check for all the following cases:
#
# a) We don't match a single '.'
# b) We match if the path starts with '.'. E.g. './' or
# './moredirs' but we not match '.abc/'.
# c) We match if the path ends with '.'. E.g. '/.' or
# 'dirs/.'
# d) We match if the path contains a '.' in the middle.
# E.g. dirs/./moredirs
\./|.*/\.(?:/|$)
''',
re.VERBOSE
)
needs_normpath_match = needs_normpath_check.match
#
# SCons.Action objects for interacting with the outside world.
#
# The Node.FS methods in this module should use these actions to
# create and/or remove files and directories; they should *not* use
# os.{link,symlink,unlink,mkdir}(), etc., directly.
#
# Using these SCons.Action objects ensures that descriptions of these
# external activities are properly displayed, that the displays are
# suppressed when the -s (silent) option is used, and (most importantly)
# the actions are disabled when the the -n option is used, in which case
# there should be *no* changes to the external file system(s)...
#
# For Now disable hard & softlinks for win32
# PY3 supports them, but the rest of SCons is not ready for this
# in some cases user permissions may be required.
# TODO: See if theres a reasonable way to enable using links on win32/64
if hasattr(os, 'link') and sys.platform != 'win32':
def _hardlink_func(fs, src, dst):
# If the source is a symlink, we can't just hard-link to it
# because a relative symlink may point somewhere completely
# different. We must disambiguate the symlink and then
# hard-link the final destination file.
while fs.islink(src):
link = fs.readlink(src)
if not os.path.isabs(link):
src = link
else:
src = os.path.join(os.path.dirname(src), link)
fs.link(src, dst)
else:
_hardlink_func = None
if hasattr(os, 'symlink') and sys.platform != 'win32':
def _softlink_func(fs, src, dst):
fs.symlink(src, dst)
else:
_softlink_func = None
def _copy_func(fs, src, dest):
shutil.copy2(src, dest)
st = fs.stat(src)
fs.chmod(dest, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
Valid_Duplicates = ['hard-soft-copy', 'soft-hard-copy',
'hard-copy', 'soft-copy', 'copy']
Link_Funcs = [] # contains the callables of the specified duplication style
def set_duplicate(duplicate):
# Fill in the Link_Funcs list according to the argument
# (discarding those not available on the platform).
# Set up the dictionary that maps the argument names to the
# underlying implementations. We do this inside this function,
# not in the top-level module code, so that we can remap os.link
# and os.symlink for testing purposes.
link_dict = {
'hard' : _hardlink_func,
'soft' : _softlink_func,
'copy' : _copy_func
}
if duplicate not in Valid_Duplicates:
raise SCons.Errors.InternalError("The argument of set_duplicate "
"should be in Valid_Duplicates")
global Link_Funcs
Link_Funcs = []
for func in duplicate.split('-'):
if link_dict[func]:
Link_Funcs.append(link_dict[func])
def LinkFunc(target, source, env):
"""
Relative paths cause problems with symbolic links, so
we use absolute paths, which may be a problem for people
who want to move their soft-linked src-trees around. Those
people should use the 'hard-copy' mode, softlinks cannot be
used for that; at least I have no idea how ...
"""
src = source[0].get_abspath()
dest = target[0].get_abspath()
dir, file = os.path.split(dest)
if dir and not target[0].fs.isdir(dir):
os.makedirs(dir)
if not Link_Funcs:
# Set a default order of link functions.
set_duplicate('hard-soft-copy')
fs = source[0].fs
# Now link the files with the previously specified order.
for func in Link_Funcs:
try:
func(fs, src, dest)
break
except (IOError, OSError):
# An OSError indicates something happened like a permissions
# problem or an attempt to symlink across file-system
# boundaries. An IOError indicates something like the file
# not existing. In either case, keeping trying additional
# functions in the list and only raise an error if the last
# one failed.
if func == Link_Funcs[-1]:
# exception of the last link method (copy) are fatal
raise
return 0
Link = SCons.Action.Action(LinkFunc, None)
def LocalString(target, source, env):
return 'Local copy of %s from %s' % (target[0], source[0])
LocalCopy = SCons.Action.Action(LinkFunc, LocalString)
def UnlinkFunc(target, source, env):
t = target[0]
t.fs.unlink(t.get_abspath())
return 0
Unlink = SCons.Action.Action(UnlinkFunc, None)
def MkdirFunc(target, source, env):
t = target[0]
# This os.path.exists test looks redundant, but it's possible
# when using Install() to install multiple dirs outside the
# source tree to get a case where t.exists() is true but
# the path does already exist, so this prevents spurious
# build failures in that case. See test/Install/multi-dir.
if not t.exists() and not os.path.exists(t.get_abspath()):
t.fs.mkdir(t.get_abspath())
return 0
Mkdir = SCons.Action.Action(MkdirFunc, None, presub=None)
MkdirBuilder = None
def get_MkdirBuilder():
global MkdirBuilder
if MkdirBuilder is None:
import SCons.Builder
import SCons.Defaults
# "env" will get filled in by Executor.get_build_env()
# calling SCons.Defaults.DefaultEnvironment() when necessary.
MkdirBuilder = SCons.Builder.Builder(action = Mkdir,
env = None,
explain = None,
is_explicit = None,
target_scanner = SCons.Defaults.DirEntryScanner,
name = "MkdirBuilder")
return MkdirBuilder
class _Null(object):
pass
_null = _Null()
# Cygwin's os.path.normcase pretends it's on a case-sensitive filesystem.
_is_cygwin = sys.platform == "cygwin"
if os.path.normcase("TeSt") == os.path.normpath("TeSt") and not _is_cygwin:
def _my_normcase(x):
return x
else:
def _my_normcase(x):
return x.upper()
class DiskChecker(object):
def __init__(self, type, do, ignore):
self.type = type
self.do = do
self.ignore = ignore
self.func = do
def __call__(self, *args, **kw):
return self.func(*args, **kw)
def set(self, list):
if self.type in list:
self.func = self.do
else:
self.func = self.ignore
def do_diskcheck_match(node, predicate, errorfmt):
result = predicate()
try:
# If calling the predicate() cached a None value from stat(),
# remove it so it doesn't interfere with later attempts to
# build this Node as we walk the DAG. (This isn't a great way
# to do this, we're reaching into an interface that doesn't
# really belong to us, but it's all about performance, so
# for now we'll just document the dependency...)
if node._memo['stat'] is None:
del node._memo['stat']
except (AttributeError, KeyError):
pass
if result:
raise TypeError(errorfmt % node.get_abspath())
def ignore_diskcheck_match(node, predicate, errorfmt):
pass
diskcheck_match = DiskChecker('match', do_diskcheck_match, ignore_diskcheck_match)
diskcheckers = [
diskcheck_match,
]
def set_diskcheck(list):
for dc in diskcheckers:
dc.set(list)
def diskcheck_types():
return [dc.type for dc in diskcheckers]
class EntryProxy(SCons.Util.Proxy):
__str__ = SCons.Util.Delegate('__str__')
# In PY3 if a class defines __eq__, then it must explicitly provide
# __hash__. Since SCons.Util.Proxy provides __eq__ we need the following
# see: https://docs.python.org/3.1/reference/datamodel.html#object.__hash__
__hash__ = SCons.Util.Delegate('__hash__')
def __get_abspath(self):
entry = self.get()
return SCons.Subst.SpecialAttrWrapper(entry.get_abspath(),
entry.name + "_abspath")
def __get_filebase(self):
name = self.get().name
return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(name)[0],
name + "_filebase")
def __get_suffix(self):
name = self.get().name
return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(name)[1],
name + "_suffix")
def __get_file(self):
name = self.get().name
return SCons.Subst.SpecialAttrWrapper(name, name + "_file")
def __get_base_path(self):
"""Return the file's directory and file name, with the
suffix stripped."""
entry = self.get()
return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(entry.get_path())[0],
entry.name + "_base")
def __get_posix_path(self):
"""Return the path with / as the path separator,
regardless of platform."""
if os_sep_is_slash:
return self
else:
entry = self.get()
r = entry.get_path().replace(OS_SEP, '/')
return SCons.Subst.SpecialAttrWrapper(r, entry.name + "_posix")
def __get_windows_path(self):
r"""Return the path with \ as the path separator,
regardless of platform."""
if OS_SEP == '\\':
return self
else:
entry = self.get()
r = entry.get_path().replace(OS_SEP, '\\')
return SCons.Subst.SpecialAttrWrapper(r, entry.name + "_windows")
def __get_srcnode(self):
return EntryProxy(self.get().srcnode())
def __get_srcdir(self):
"""Returns the directory containing the source node linked to this
node via VariantDir(), or the directory of this node if not linked."""
return EntryProxy(self.get().srcnode().dir)
def __get_rsrcnode(self):
return EntryProxy(self.get().srcnode().rfile())
def __get_rsrcdir(self):
"""Returns the directory containing the source node linked to this
node via VariantDir(), or the directory of this node if not linked."""
return EntryProxy(self.get().srcnode().rfile().dir)
def __get_dir(self):
return EntryProxy(self.get().dir)
dictSpecialAttrs = { "base" : __get_base_path,
"posix" : __get_posix_path,
"windows" : __get_windows_path,
"win32" : __get_windows_path,
"srcpath" : __get_srcnode,
"srcdir" : __get_srcdir,
"dir" : __get_dir,
"abspath" : __get_abspath,
"filebase" : __get_filebase,
"suffix" : __get_suffix,
"file" : __get_file,
"rsrcpath" : __get_rsrcnode,
"rsrcdir" : __get_rsrcdir,
}
def __getattr__(self, name):
# This is how we implement the "special" attributes
# such as base, posix, srcdir, etc.
try:
attr_function = self.dictSpecialAttrs[name]
except KeyError:
try:
attr = SCons.Util.Proxy.__getattr__(self, name)
except AttributeError:
# Raise our own AttributeError subclass with an
# overridden __str__() method that identifies the
# name of the entry that caused the exception.
raise EntryProxyAttributeError(self, name)
return attr
else:
return attr_function(self)
class Base(SCons.Node.Node):
"""A generic class for file system entries. This class is for
when we don't know yet whether the entry being looked up is a file
or a directory. Instances of this class can morph into either
Dir or File objects by a later, more precise lookup.
Note: this class does not define __cmp__ and __hash__ for
efficiency reasons. SCons does a lot of comparing of
Node.FS.{Base,Entry,File,Dir} objects, so those operations must be
as fast as possible, which means we want to use Python's built-in
object identity comparisons.
"""
__slots__ = ['name',
'fs',
'_abspath',
'_labspath',
'_path',
'_tpath',
'_path_elements',
'dir',
'cwd',
'duplicate',
'_local',
'sbuilder',
'_proxy',
'_func_sconsign']
def __init__(self, name, directory, fs):
"""Initialize a generic Node.FS.Base object.
Call the superclass initialization, take care of setting up
our relative and absolute paths, identify our parent
directory, and indicate that this node should use
signatures."""
if SCons.Debug.track_instances: logInstanceCreation(self, 'Node.FS.Base')
SCons.Node.Node.__init__(self)
# Filenames and paths are probably reused and are intern'ed to save some memory.
# Filename with extension as it was specified when the object was
# created; to obtain filesystem path, use Python str() function
self.name = SCons.Util.silent_intern(name)
self.fs = fs #: Reference to parent Node.FS object
assert directory, "A directory must be provided"
self._abspath = None
self._labspath = None
self._path = None
self._tpath = None
self._path_elements = None
self.dir = directory
self.cwd = None # will hold the SConscript directory for target nodes
self.duplicate = directory.duplicate
self.changed_since_last_build = 2
self._func_sconsign = 0
self._func_exists = 2
self._func_rexists = 2
self._func_get_contents = 0
self._func_target_from_source = 1
self.store_info = 1
def str_for_display(self):
return '"' + self.__str__() + '"'
def must_be_same(self, klass):
"""
This node, which already existed, is being looked up as the
specified klass. Raise an exception if it isn't.
"""
if isinstance(self, klass) or klass is Entry:
return
raise TypeError("Tried to lookup %s '%s' as a %s." %\
(self.__class__.__name__, self.get_internal_path(), klass.__name__))
def get_dir(self):
return self.dir
def get_suffix(self):
return SCons.Util.splitext(self.name)[1]
def rfile(self):
return self
def __getattr__(self, attr):
""" Together with the node_bwcomp dict defined below,
this method provides a simple backward compatibility
layer for the Node attributes 'abspath', 'labspath',
'path', 'tpath', 'suffix' and 'path_elements'. These Node
attributes used to be directly available in v2.3 and earlier, but
have been replaced by getter methods that initialize the
single variables lazily when required, in order to save memory.
The redirection to the getters lets older Tools and
SConstruct continue to work without any additional changes,
fully transparent to the user.
Note, that __getattr__ is only called as fallback when the
requested attribute can't be found, so there should be no
speed performance penalty involved for standard builds.
"""
if attr in node_bwcomp:
return node_bwcomp[attr](self)
raise AttributeError("%r object has no attribute %r" %
(self.__class__, attr))
def __str__(self):
"""A Node.FS.Base object's string representation is its path
name."""
global Save_Strings
if Save_Strings:
return self._save_str()
return self._get_str()
def __lt__(self, other):
""" less than operator used by sorting on py3"""
return str(self) < str(other)
@SCons.Memoize.CountMethodCall
def _save_str(self):
try:
return self._memo['_save_str']
except KeyError:
pass
result = SCons.Util.silent_intern(self._get_str())
self._memo['_save_str'] = result
return result
def _get_str(self):
global Save_Strings
if self.duplicate or self.is_derived():
return self.get_path()
srcnode = self.srcnode()
if srcnode.stat() is None and self.stat() is not None:
result = self.get_path()
else:
result = srcnode.get_path()
if not Save_Strings:
# We're not at the point where we're saving the string
# representations of FS Nodes (because we haven't finished
# reading the SConscript files and need to have str() return
# things relative to them). That also means we can't yet
# cache values returned (or not returned) by stat(), since
# Python code in the SConscript files might still create
# or otherwise affect the on-disk file. So get rid of the
# values that the underlying stat() method saved.
try: del self._memo['stat']
except KeyError: pass
if self is not srcnode:
try: del srcnode._memo['stat']
except KeyError: pass
return result
rstr = __str__
@SCons.Memoize.CountMethodCall
def stat(self):
try:
return self._memo['stat']
except KeyError:
pass
try:
result = self.fs.stat(self.get_abspath())
except os.error:
result = None
self._memo['stat'] = result
return result
def exists(self):
return SCons.Node._exists_map[self._func_exists](self)
def rexists(self):
return SCons.Node._rexists_map[self._func_rexists](self)
def getmtime(self):
st = self.stat()
if st:
return st[stat.ST_MTIME]
else:
return None
def getsize(self):
st = self.stat()
if st:
return st[stat.ST_SIZE]
else:
return None
def isdir(self):
st = self.stat()
return st is not None and stat.S_ISDIR(st[stat.ST_MODE])
def isfile(self):
st = self.stat()
return st is not None and stat.S_ISREG(st[stat.ST_MODE])
if hasattr(os, 'symlink'):
def islink(self):
try: st = self.fs.lstat(self.get_abspath())
except os.error: return 0
return stat.S_ISLNK(st[stat.ST_MODE])
else:
def islink(self):
return 0 # no symlinks
def is_under(self, dir):
if self is dir:
return 1
else:
return self.dir.is_under(dir)
def set_local(self):
self._local = 1
def srcnode(self):
"""If this node is in a build path, return the node
corresponding to its source file. Otherwise, return
ourself.
"""
srcdir_list = self.dir.srcdir_list()
if srcdir_list:
srcnode = srcdir_list[0].Entry(self.name)
srcnode.must_be_same(self.__class__)
return srcnode
return self
def get_path(self, dir=None):
"""Return path relative to the current working directory of the
Node.FS.Base object that owns us."""
if not dir:
dir = self.fs.getcwd()
if self == dir:
return '.'
path_elems = self.get_path_elements()
pathname = ''
try: i = path_elems.index(dir)
except ValueError:
for p in path_elems[:-1]:
pathname += p.dirname
else:
for p in path_elems[i+1:-1]:
pathname += p.dirname
return pathname + path_elems[-1].name
def set_src_builder(self, builder):
"""Set the source code builder for this node."""
self.sbuilder = builder
if not self.has_builder():
self.builder_set(builder)
def src_builder(self):
"""Fetch the source code builder for this node.
If there isn't one, we cache the source code builder specified
for the directory (which in turn will cache the value from its
parent directory, and so on up to the file system root).
"""
try:
scb = self.sbuilder
except AttributeError:
scb = self.dir.src_builder()
self.sbuilder = scb
return scb
def get_abspath(self):
"""Get the absolute path of the file."""
return self.dir.entry_abspath(self.name)
def get_labspath(self):
"""Get the absolute path of the file."""
return self.dir.entry_labspath(self.name)
def get_internal_path(self):
if self.dir._path == '.':
return self.name
else:
return self.dir.entry_path(self.name)
def get_tpath(self):
if self.dir._tpath == '.':
return self.name
else:
return self.dir.entry_tpath(self.name)
def get_path_elements(self):
return self.dir._path_elements + [self]
def for_signature(self):
# Return just our name. Even an absolute path would not work,
# because that can change thanks to symlinks or remapped network
# paths.
return self.name
def get_subst_proxy(self):
try:
return self._proxy
except AttributeError:
ret = EntryProxy(self)
self._proxy = ret
return ret
def target_from_source(self, prefix, suffix, splitext=SCons.Util.splitext):
"""
Generates a target entry that corresponds to this entry (usually
a source file) with the specified prefix and suffix.
Note that this method can be overridden dynamically for generated
files that need different behavior. See Tool/swig.py for
an example.
"""
return SCons.Node._target_from_source_map[self._func_target_from_source](self, prefix, suffix, splitext)
def _Rfindalldirs_key(self, pathlist):
return pathlist
@SCons.Memoize.CountDictCall(_Rfindalldirs_key)
def Rfindalldirs(self, pathlist):
"""
Return all of the directories for a given path list, including
corresponding "backing" directories in any repositories.
The Node lookups are relative to this Node (typically a
directory), so memoizing result saves cycles from looking
up the same path for each target in a given directory.
"""
try:
memo_dict = self._memo['Rfindalldirs']
except KeyError:
memo_dict = {}
self._memo['Rfindalldirs'] = memo_dict
else:
try:
return memo_dict[pathlist]
except KeyError:
pass
create_dir_relative_to_self = self.Dir
result = []
for path in pathlist:
if isinstance(path, SCons.Node.Node):
result.append(path)
else:
dir = create_dir_relative_to_self(path)
result.extend(dir.get_all_rdirs())
memo_dict[pathlist] = result
return result
def RDirs(self, pathlist):
"""Search for a list of directories in the Repository list."""
cwd = self.cwd or self.fs._cwd
return cwd.Rfindalldirs(pathlist)
@SCons.Memoize.CountMethodCall
def rentry(self):
try:
return self._memo['rentry']
except KeyError:
pass
result = self
if not self.exists():
norm_name = _my_normcase(self.name)
for dir in self.dir.get_all_rdirs():
try:
node = dir.entries[norm_name]
except KeyError:
if dir.entry_exists_on_disk(self.name):
result = dir.Entry(self.name)
break
self._memo['rentry'] = result
return result
def _glob1(self, pattern, ondisk=True, source=False, strings=False):
return []
# Dict that provides a simple backward compatibility
# layer for the Node attributes 'abspath', 'labspath',
# 'path', 'tpath' and 'path_elements'.
# @see Base.__getattr__ above
node_bwcomp = {'abspath' : Base.get_abspath,
'labspath' : Base.get_labspath,
'path' : Base.get_internal_path,
'tpath' : Base.get_tpath,
'path_elements' : Base.get_path_elements,
'suffix' : Base.get_suffix}
class Entry(Base):
"""This is the class for generic Node.FS entries--that is, things
that could be a File or a Dir, but we're just not sure yet.
Consequently, the methods in this class really exist just to
transform their associated object into the right class when the
time comes, and then call the same-named method in the transformed
class."""
__slots__ = ['scanner_paths',
'cachedir_csig',
'cachesig',
'repositories',
'srcdir',
'entries',
'searched',
'_sconsign',
'variant_dirs',
'root',
'dirname',
'on_disk_entries',
'released_target_info',
'contentsig']
def __init__(self, name, directory, fs):
Base.__init__(self, name, directory, fs)
self._func_exists = 3
self._func_get_contents = 1
def diskcheck_match(self):
pass
def disambiguate(self, must_exist=None):
"""
"""
if self.isfile():
self.__class__ = File
self._morph()
self.clear()
elif self.isdir():
self.__class__ = Dir
self._morph()
else:
# There was nothing on-disk at this location, so look in
# the src directory.
#
# We can't just use self.srcnode() straight away because
# that would create an actual Node for this file in the src
# directory, and there might not be one. Instead, use the
# dir_on_disk() method to see if there's something on-disk
# with that name, in which case we can go ahead and call
# self.srcnode() to create the right type of entry.
srcdir = self.dir.srcnode()
if srcdir != self.dir and \
srcdir.entry_exists_on_disk(self.name) and \
self.srcnode().isdir():
self.__class__ = Dir
self._morph()
elif must_exist:
msg = "No such file or directory: '%s'" % self.get_abspath()
raise SCons.Errors.UserError(msg)
else:
self.__class__ = File
self._morph()
self.clear()
return self
def rfile(self):
"""We're a generic Entry, but the caller is actually looking for
a File at this point, so morph into one."""
self.__class__ = File
self._morph()
self.clear()
return File.rfile(self)
def scanner_key(self):
return self.get_suffix()
def get_contents(self):
"""Fetch the contents of the entry. Returns the exact binary
contents of the file."""
return SCons.Node._get_contents_map[self._func_get_contents](self)
def get_text_contents(self):
"""Fetch the decoded text contents of a Unicode encoded Entry.
Since this should return the text contents from the file
system, we check to see into what sort of subclass we should
morph this Entry."""
try:
self = self.disambiguate(must_exist=1)
except SCons.Errors.UserError:
# There was nothing on disk with which to disambiguate
# this entry. Leave it as an Entry, but return a null
# string so calls to get_text_contents() in emitters and
# the like (e.g. in qt.py) don't have to disambiguate by
# hand or catch the exception.
return ''
else:
return self.get_text_contents()
def must_be_same(self, klass):
"""Called to make sure a Node is a Dir. Since we're an
Entry, we can morph into one."""
if self.__class__ is not klass:
self.__class__ = klass
self._morph()
self.clear()
# The following methods can get called before the Taskmaster has
# had a chance to call disambiguate() directly to see if this Entry
# should really be a Dir or a File. We therefore use these to call
# disambiguate() transparently (from our caller's point of view).
#
# Right now, this minimal set of methods has been derived by just
# looking at some of the methods that will obviously be called early
# in any of the various Taskmasters' calling sequences, and then
# empirically figuring out which additional methods are necessary
# to make various tests pass.
def exists(self):
return SCons.Node._exists_map[self._func_exists](self)
def rel_path(self, other):
d = self.disambiguate()
if d.__class__ is Entry:
raise Exception("rel_path() could not disambiguate File/Dir")
return d.rel_path(other)
def new_ninfo(self):
return self.disambiguate().new_ninfo()
def _glob1(self, pattern, ondisk=True, source=False, strings=False):
return self.disambiguate()._glob1(pattern, ondisk, source, strings)
def get_subst_proxy(self):
return self.disambiguate().get_subst_proxy()
# This is for later so we can differentiate between Entry the class and Entry
# the method of the FS class.
_classEntry = Entry
class LocalFS(object):
"""
This class implements an abstraction layer for operations involving
a local file system. Essentially, this wraps any function in
the os, os.path or shutil modules that we use to actually go do
anything with or to the local file system.
Note that there's a very good chance we'll refactor this part of
the architecture in some way as we really implement the interface(s)
for remote file system Nodes. For example, the right architecture
might be to have this be a subclass instead of a base class.
Nevertheless, we're using this as a first step in that direction.
We're not using chdir() yet because the calling subclass method
needs to use os.chdir() directly to avoid recursion. Will we
really need this one?
"""
#def chdir(self, path):
# return os.chdir(path)
def chmod(self, path, mode):
return os.chmod(path, mode)
def copy(self, src, dst):
return shutil.copy(src, dst)
def copy2(self, src, dst):
return shutil.copy2(src, dst)
def exists(self, path):
return os.path.exists(path)
def getmtime(self, path):
return os.path.getmtime(path)
def getsize(self, path):
return os.path.getsize(path)
def isdir(self, path):
return os.path.isdir(path)
def isfile(self, path):
return os.path.isfile(path)
def link(self, src, dst):
return os.link(src, dst)
def lstat(self, path):
return os.lstat(path)
def listdir(self, path):
return os.listdir(path)
def makedirs(self, path):
return os.makedirs(path)
def mkdir(self, path):
return os.mkdir(path)
def rename(self, old, new):
return os.rename(old, new)
def stat(self, path):
return os.stat(path)
def symlink(self, src, dst):
return os.symlink(src, dst)
def open(self, path):
return open(path)
def unlink(self, path):
return os.unlink(path)
if hasattr(os, 'symlink'):
def islink(self, path):
return os.path.islink(path)
else:
def islink(self, path):
return 0 # no symlinks
if hasattr(os, 'readlink'):
def readlink(self, file):
return os.readlink(file)
else:
def readlink(self, file):
return ''
class FS(LocalFS):
def __init__(self, path = None):
"""Initialize the Node.FS subsystem.
The supplied path is the top of the source tree, where we
expect to find the top-level build file. If no path is
supplied, the current directory is the default.
The path argument must be a valid absolute path.
"""
if SCons.Debug.track_instances: logInstanceCreation(self, 'Node.FS')
self._memo = {}
self.Root = {}
self.SConstruct_dir = None
self.max_drift = default_max_drift
self.Top = None
if path is None:
self.pathTop = os.getcwd()
else:
self.pathTop = path
self.defaultDrive = _my_normcase(_my_splitdrive(self.pathTop)[0])
self.Top = self.Dir(self.pathTop)
self.Top._path = '.'
self.Top._tpath = '.'
self._cwd = self.Top
DirNodeInfo.fs = self
FileNodeInfo.fs = self
def set_SConstruct_dir(self, dir):
self.SConstruct_dir = dir
def get_max_drift(self):
return self.max_drift
def set_max_drift(self, max_drift):
self.max_drift = max_drift
def getcwd(self):
if hasattr(self, "_cwd"):
return self._cwd
else:
return "<no cwd>"
def chdir(self, dir, change_os_dir=0):
"""Change the current working directory for lookups.
If change_os_dir is true, we will also change the "real" cwd
to match.
"""
curr=self._cwd
try:
if dir is not None:
self._cwd = dir
if change_os_dir:
os.chdir(dir.get_abspath())
except OSError:
self._cwd = curr
raise
def get_root(self, drive):
"""
Returns the root directory for the specified drive, creating
it if necessary.
"""
drive = _my_normcase(drive)
try:
return self.Root[drive]
except KeyError:
root = RootDir(drive, self)
self.Root[drive] = root
if not drive:
self.Root[self.defaultDrive] = root
elif drive == self.defaultDrive:
self.Root[''] = root
return root
def _lookup(self, p, directory, fsclass, create=1):
"""
The generic entry point for Node lookup with user-supplied data.
This translates arbitrary input into a canonical Node.FS object
of the specified fsclass. The general approach for strings is
to turn it into a fully normalized absolute path and then call
the root directory's lookup_abs() method for the heavy lifting.
If the path name begins with '#', it is unconditionally
interpreted relative to the top-level directory of this FS. '#'
is treated as a synonym for the top-level SConstruct directory,
much like '~' is treated as a synonym for the user's home
directory in a UNIX shell. So both '#foo' and '#/foo' refer
to the 'foo' subdirectory underneath the top-level SConstruct
directory.
If the path name is relative, then the path is looked up relative
to the specified directory, or the current directory (self._cwd,
typically the SConscript directory) if the specified directory
is None.
"""
if isinstance(p, Base):
# It's already a Node.FS object. Make sure it's the right
# class and return.
p.must_be_same(fsclass)
return p
# str(p) in case it's something like a proxy object
p = str(p)
if not os_sep_is_slash:
p = p.replace(OS_SEP, '/')
if p[0:1] == '#':
# There was an initial '#', so we strip it and override
# whatever directory they may have specified with the
# top-level SConstruct directory.
p = p[1:]
directory = self.Top
# There might be a drive letter following the
# '#'. Although it is not described in the SCons man page,
# the regression test suite explicitly tests for that
# syntax. It seems to mean the following thing:
#
# Assuming the the SCons top dir is in C:/xxx/yyy,
# '#X:/toto' means X:/xxx/yyy/toto.
#
# i.e. it assumes that the X: drive has a directory
# structure similar to the one found on drive C:.
if do_splitdrive:
drive, p = _my_splitdrive(p)
if drive:
root = self.get_root(drive)
else:
root = directory.root
else:
root = directory.root
# We can only strip trailing after splitting the drive
# since the drive might the UNC '//' prefix.
p = p.strip('/')
needs_normpath = needs_normpath_match(p)
# The path is relative to the top-level SCons directory.
if p in ('', '.'):
p = directory.get_labspath()
else:
p = directory.get_labspath() + '/' + p
else:
if do_splitdrive:
drive, p = _my_splitdrive(p)
if drive and not p:
# This causes a naked drive letter to be treated
# as a synonym for the root directory on that
# drive.
p = '/'
else:
drive = ''
# We can only strip trailing '/' since the drive might the
# UNC '//' prefix.
if p != '/':
p = p.rstrip('/')
needs_normpath = needs_normpath_match(p)
if p[0:1] == '/':
# Absolute path
root = self.get_root(drive)
else:
# This is a relative lookup or to the current directory
# (the path name is not absolute). Add the string to the
# appropriate directory lookup path, after which the whole
# thing gets normalized.
if directory:
if not isinstance(directory, Dir):
directory = self.Dir(directory)
else:
directory = self._cwd
if p in ('', '.'):
p = directory.get_labspath()
else:
p = directory.get_labspath() + '/' + p
if drive:
root = self.get_root(drive)
else:
root = directory.root
if needs_normpath is not None:
# Normalize a pathname. Will return the same result for
# equivalent paths.
#
# We take advantage of the fact that we have an absolute
# path here for sure. In addition, we know that the
# components of lookup path are separated by slashes at
# this point. Because of this, this code is about 2X
# faster than calling os.path.normpath() followed by
# replacing os.sep with '/' again.
ins = p.split('/')[1:]
outs = []
for d in ins:
if d == '..':
try:
outs.pop()
except IndexError:
pass
elif d not in ('', '.'):
outs.append(d)
p = '/' + '/'.join(outs)
return root._lookup_abs(p, fsclass, create)
def Entry(self, name, directory = None, create = 1):
"""Look up or create a generic Entry node with the specified name.
If the name is a relative path (begins with ./, ../, or a file
name), then it is looked up relative to the supplied directory
node, or to the top level directory of the FS (supplied at
construction time) if no directory is supplied.
"""
return self._lookup(name, directory, Entry, create)
def File(self, name, directory = None, create = 1):
"""Look up or create a File node with the specified name. If
the name is a relative path (begins with ./, ../, or a file name),
then it is looked up relative to the supplied directory node,
or to the top level directory of the FS (supplied at construction
time) if no directory is supplied.
This method will raise TypeError if a directory is found at the
specified path.
"""
return self._lookup(name, directory, File, create)
def Dir(self, name, directory = None, create = True):
"""Look up or create a Dir node with the specified name. If
the name is a relative path (begins with ./, ../, or a file name),
then it is looked up relative to the supplied directory node,
or to the top level directory of the FS (supplied at construction
time) if no directory is supplied.
This method will raise TypeError if a normal file is found at the
specified path.
"""
return self._lookup(name, directory, Dir, create)
def VariantDir(self, variant_dir, src_dir, duplicate=1):
"""Link the supplied variant directory to the source directory
for purposes of building files."""
if not isinstance(src_dir, SCons.Node.Node):
src_dir = self.Dir(src_dir)
if not isinstance(variant_dir, SCons.Node.Node):
variant_dir = self.Dir(variant_dir)
if src_dir.is_under(variant_dir):
raise SCons.Errors.UserError("Source directory cannot be under variant directory.")
if variant_dir.srcdir:
if variant_dir.srcdir == src_dir:
return # We already did this.
raise SCons.Errors.UserError("'%s' already has a source directory: '%s'."%(variant_dir, variant_dir.srcdir))
variant_dir.link(src_dir, duplicate)
def Repository(self, *dirs):
"""Specify Repository directories to search."""
for d in dirs:
if not isinstance(d, SCons.Node.Node):
d = self.Dir(d)
self.Top.addRepository(d)
def PyPackageDir(self, modulename):
r"""Locate the directory of a given python module name
For example scons might resolve to
Windows: C:\Python27\Lib\site-packages\scons-2.5.1
Linux: /usr/lib/scons
This can be useful when we want to determine a toolpath based on a python module name"""
dirpath = ''
if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and sys.version_info[1] in (0,1,2,3,4)):
# Python2 Code
import imp
splitname = modulename.split('.')
srchpths = sys.path
for item in splitname:
file, path, desc = imp.find_module(item, srchpths)
if file is not None:
path = os.path.dirname(path)
srchpths = [path]
dirpath = path
else:
# Python3 Code
import importlib.util
modspec = importlib.util.find_spec(modulename)
dirpath = os.path.dirname(modspec.origin)
return self._lookup(dirpath, None, Dir, True)
def variant_dir_target_climb(self, orig, dir, tail):
"""Create targets in corresponding variant directories
Climb the directory tree, and look up path names
relative to any linked variant directories we find.
Even though this loops and walks up the tree, we don't memoize
the return value because this is really only used to process
the command-line targets.
"""
targets = []
message = None
fmt = "building associated VariantDir targets: %s"
start_dir = dir
while dir:
for bd in dir.variant_dirs:
if start_dir.is_under(bd):
# If already in the build-dir location, don't reflect
return [orig], fmt % str(orig)
p = os.path.join(bd._path, *tail)
targets.append(self.Entry(p))
tail = [dir.name] + tail
dir = dir.up()
if targets:
message = fmt % ' '.join(map(str, targets))
return targets, message
def Glob(self, pathname, ondisk=True, source=True, strings=False, exclude=None, cwd=None):
"""
Globs
This is mainly a shim layer
"""
if cwd is None:
cwd = self.getcwd()
return cwd.glob(pathname, ondisk, source, strings, exclude)
class DirNodeInfo(SCons.Node.NodeInfoBase):
__slots__ = ()
# This should get reset by the FS initialization.
current_version_id = 2
fs = None
def str_to_node(self, s):
top = self.fs.Top
root = top.root
if do_splitdrive:
drive, s = _my_splitdrive(s)
if drive:
root = self.fs.get_root(drive)
if not os.path.isabs(s):
s = top.get_labspath() + '/' + s
return root._lookup_abs(s, Entry)
class DirBuildInfo(SCons.Node.BuildInfoBase):
__slots__ = ()
current_version_id = 2
glob_magic_check = re.compile('[*?[]')
def has_glob_magic(s):
return glob_magic_check.search(s) is not None
class Dir(Base):
"""A class for directories in a file system.
"""
__slots__ = ['scanner_paths',
'cachedir_csig',
'cachesig',
'repositories',
'srcdir',
'entries',
'searched',
'_sconsign',
'variant_dirs',
'root',
'dirname',
'on_disk_entries',
'released_target_info',
'contentsig']
NodeInfo = DirNodeInfo
BuildInfo = DirBuildInfo
def __init__(self, name, directory, fs):
if SCons.Debug.track_instances: logInstanceCreation(self, 'Node.FS.Dir')
Base.__init__(self, name, directory, fs)
self._morph()
def _morph(self):
"""Turn a file system Node (either a freshly initialized directory
object or a separate Entry object) into a proper directory object.
Set up this directory's entries and hook it into the file
system tree. Specify that directories (this Node) don't use
signatures for calculating whether they're current.
"""
self.repositories = []
self.srcdir = None
self.entries = {}
self.entries['.'] = self
self.entries['..'] = self.dir
self.cwd = self
self.searched = 0
self._sconsign = None
self.variant_dirs = []
self.root = self.dir.root
self.changed_since_last_build = 3
self._func_sconsign = 1
self._func_exists = 2
self._func_get_contents = 2
self._abspath = SCons.Util.silent_intern(self.dir.entry_abspath(self.name))
self._labspath = SCons.Util.silent_intern(self.dir.entry_labspath(self.name))
if self.dir._path == '.':
self._path = SCons.Util.silent_intern(self.name)
else:
self._path = SCons.Util.silent_intern(self.dir.entry_path(self.name))
if self.dir._tpath == '.':
self._tpath = SCons.Util.silent_intern(self.name)
else:
self._tpath = SCons.Util.silent_intern(self.dir.entry_tpath(self.name))
self._path_elements = self.dir._path_elements + [self]
# For directories, we make a difference between the directory
# 'name' and the directory 'dirname'. The 'name' attribute is
# used when we need to print the 'name' of the directory or
# when we it is used as the last part of a path. The 'dirname'
# is used when the directory is not the last element of the
# path. The main reason for making that distinction is that
# for RoorDir's the dirname can not be easily inferred from
# the name. For example, we have to add a '/' after a drive
# letter but not after a UNC path prefix ('//').
self.dirname = self.name + OS_SEP
# Don't just reset the executor, replace its action list,
# because it might have some pre-or post-actions that need to
# be preserved.
#
# But don't reset the executor if there is a non-null executor
# attached already. The existing executor might have other
# targets, in which case replacing the action list with a
# Mkdir action is a big mistake.
if not hasattr(self, 'executor'):
self.builder = get_MkdirBuilder()
self.get_executor().set_action_list(self.builder.action)
else:
# Prepend MkdirBuilder action to existing action list
l = self.get_executor().action_list
a = get_MkdirBuilder().action
l.insert(0, a)
self.get_executor().set_action_list(l)
def diskcheck_match(self):
diskcheck_match(self, self.isfile,
"File %s found where directory expected.")
def __clearRepositoryCache(self, duplicate=None):
"""Called when we change the repository(ies) for a directory.
This clears any cached information that is invalidated by changing
the repository."""
for node in list(self.entries.values()):
if node != self.dir:
if node != self and isinstance(node, Dir):
node.__clearRepositoryCache(duplicate)
else:
node.clear()
try:
del node._srcreps
except AttributeError:
pass
if duplicate is not None:
node.duplicate=duplicate
def __resetDuplicate(self, node):
if node != self:
node.duplicate = node.get_dir().duplicate
def Entry(self, name):
"""
Looks up or creates an entry node named 'name' relative to
this directory.
"""
return self.fs.Entry(name, self)
def Dir(self, name, create=True):
"""
Looks up or creates a directory node named 'name' relative to
this directory.
"""
return self.fs.Dir(name, self, create)
def File(self, name):
"""
Looks up or creates a file node named 'name' relative to
this directory.
"""
return self.fs.File(name, self)
def link(self, srcdir, duplicate):
"""Set this directory as the variant directory for the
supplied source directory."""
self.srcdir = srcdir
self.duplicate = duplicate
self.__clearRepositoryCache(duplicate)
srcdir.variant_dirs.append(self)
def getRepositories(self):
"""Returns a list of repositories for this directory.
"""
if self.srcdir and not self.duplicate:
return self.srcdir.get_all_rdirs() + self.repositories
return self.repositories
@SCons.Memoize.CountMethodCall
def get_all_rdirs(self):
try:
return list(self._memo['get_all_rdirs'])
except KeyError:
pass
result = [self]
fname = '.'
dir = self
while dir:
for rep in dir.getRepositories():
result.append(rep.Dir(fname))
if fname == '.':
fname = dir.name
else:
fname = dir.name + OS_SEP + fname
dir = dir.up()
self._memo['get_all_rdirs'] = list(result)
return result
def addRepository(self, dir):
if dir != self and dir not in self.repositories:
self.repositories.append(dir)
dir._tpath = '.'
self.__clearRepositoryCache()
def up(self):
return self.dir
def _rel_path_key(self, other):
return str(other)
@SCons.Memoize.CountDictCall(_rel_path_key)
def rel_path(self, other):
"""Return a path to "other" relative to this directory.
"""
# This complicated and expensive method, which constructs relative
# paths between arbitrary Node.FS objects, is no longer used
# by SCons itself. It was introduced to store dependency paths
# in .sconsign files relative to the target, but that ended up
# being significantly inefficient.
#
# We're continuing to support the method because some SConstruct
# files out there started using it when it was available, and
# we're all about backwards compatibility..
try:
memo_dict = self._memo['rel_path']
except KeyError:
memo_dict = {}
self._memo['rel_path'] = memo_dict
else:
try:
return memo_dict[other]
except KeyError:
pass
if self is other:
result = '.'
elif other not in self._path_elements:
try:
other_dir = other.get_dir()
except AttributeError:
result = str(other)
else:
if other_dir is None:
result = other.name
else:
dir_rel_path = self.rel_path(other_dir)
if dir_rel_path == '.':
result = other.name
else:
result = dir_rel_path + OS_SEP + other.name
else:
i = self._path_elements.index(other) + 1
path_elems = ['..'] * (len(self._path_elements) - i) \
+ [n.name for n in other._path_elements[i:]]
result = OS_SEP.join(path_elems)
memo_dict[other] = result
return result
def get_env_scanner(self, env, kw={}):
import SCons.Defaults
return SCons.Defaults.DirEntryScanner
def get_target_scanner(self):
import SCons.Defaults
return SCons.Defaults.DirEntryScanner
def get_found_includes(self, env, scanner, path):
"""Return this directory's implicit dependencies.
We don't bother caching the results because the scan typically
shouldn't be requested more than once (as opposed to scanning
.h file contents, which can be requested as many times as the
files is #included by other files).
"""
if not scanner:
return []
# Clear cached info for this Dir. If we already visited this
# directory on our walk down the tree (because we didn't know at
# that point it was being used as the source for another Node)
# then we may have calculated build signature before realizing
# we had to scan the disk. Now that we have to, though, we need
# to invalidate the old calculated signature so that any node
# dependent on our directory structure gets one that includes
# info about everything on disk.
self.clear()
return scanner(self, env, path)
#
# Taskmaster interface subsystem
#
def prepare(self):
pass
def build(self, **kw):
"""A null "builder" for directories."""
global MkdirBuilder
if self.builder is not MkdirBuilder:
SCons.Node.Node.build(self, **kw)
#
#
#
def _create(self):
"""Create this directory, silently and without worrying about
whether the builder is the default or not."""
listDirs = []
parent = self
while parent:
if parent.exists():
break
listDirs.append(parent)
p = parent.up()
if p is None:
# Don't use while: - else: for this condition because
# if so, then parent is None and has no .path attribute.
raise SCons.Errors.StopError(parent._path)
parent = p
listDirs.reverse()
for dirnode in listDirs:
try:
# Don't call dirnode.build(), call the base Node method
# directly because we definitely *must* create this
# directory. The dirnode.build() method will suppress
# the build if it's the default builder.
SCons.Node.Node.build(dirnode)
dirnode.get_executor().nullify()
# The build() action may or may not have actually
# created the directory, depending on whether the -n
# option was used or not. Delete the _exists and
# _rexists attributes so they can be reevaluated.
dirnode.clear()
except OSError:
pass
def multiple_side_effect_has_builder(self):
global MkdirBuilder
return self.builder is not MkdirBuilder and self.has_builder()
def alter_targets(self):
"""Return any corresponding targets in a variant directory.
"""
return self.fs.variant_dir_target_climb(self, self, [])
def scanner_key(self):
"""A directory does not get scanned."""
return None
def get_text_contents(self):
"""We already emit things in text, so just return the binary
version."""
return self.get_contents()
def get_contents(self):
"""Return content signatures and names of all our children
separated by new-lines. Ensure that the nodes are sorted."""
return SCons.Node._get_contents_map[self._func_get_contents](self)
def get_csig(self):
"""Compute the content signature for Directory nodes. In
general, this is not needed and the content signature is not
stored in the DirNodeInfo. However, if get_contents on a Dir
node is called which has a child directory, the child
directory should return the hash of its contents."""
contents = self.get_contents()
return SCons.Util.MD5signature(contents)
def do_duplicate(self, src):
pass
def is_up_to_date(self):
"""If any child is not up-to-date, then this directory isn't,
either."""
if self.builder is not MkdirBuilder and not self.exists():
return 0
up_to_date = SCons.Node.up_to_date
for kid in self.children():
if kid.get_state() > up_to_date:
return 0
return 1
def rdir(self):
if not self.exists():
norm_name = _my_normcase(self.name)
for dir in self.dir.get_all_rdirs():
try: node = dir.entries[norm_name]
except KeyError: node = dir.dir_on_disk(self.name)
if node and node.exists() and \
(isinstance(dir, Dir) or isinstance(dir, Entry)):
return node
return self
def sconsign(self):
"""Return the .sconsign file info for this directory. """
return _sconsign_map[self._func_sconsign](self)
def srcnode(self):
"""Dir has a special need for srcnode()...if we
have a srcdir attribute set, then that *is* our srcnode."""
if self.srcdir:
return self.srcdir
return Base.srcnode(self)
def get_timestamp(self):
"""Return the latest timestamp from among our children"""
stamp = 0
for kid in self.children():
if kid.get_timestamp() > stamp:
stamp = kid.get_timestamp()
return stamp
def get_abspath(self):
"""Get the absolute path of the file."""
return self._abspath
def get_labspath(self):
"""Get the absolute path of the file."""
return self._labspath
def get_internal_path(self):
return self._path
def get_tpath(self):
return self._tpath
def get_path_elements(self):
return self._path_elements
def entry_abspath(self, name):
return self._abspath + OS_SEP + name
def entry_labspath(self, name):
return self._labspath + '/' + name
def entry_path(self, name):
return self._path + OS_SEP + name
def entry_tpath(self, name):
return self._tpath + OS_SEP + name
def entry_exists_on_disk(self, name):
""" Searches through the file/dir entries of the current
directory, and returns True if a physical entry with the given
name could be found.
@see rentry_exists_on_disk
"""
try:
d = self.on_disk_entries
except AttributeError:
d = {}
try:
entries = os.listdir(self._abspath)
except OSError:
pass
else:
for entry in map(_my_normcase, entries):
d[entry] = True
self.on_disk_entries = d
if sys.platform == 'win32' or sys.platform == 'cygwin':
name = _my_normcase(name)
result = d.get(name)
if result is None:
# Belt-and-suspenders for Windows: check directly for
# 8.3 file names that don't show up in os.listdir().
result = os.path.exists(self._abspath + OS_SEP + name)
d[name] = result
return result
else:
return name in d
def rentry_exists_on_disk(self, name):
""" Searches through the file/dir entries of the current
*and* all its remote directories (repos), and returns
True if a physical entry with the given name could be found.
The local directory (self) gets searched first, so
repositories take a lower precedence regarding the
searching order.
@see entry_exists_on_disk
"""
rentry_exists = self.entry_exists_on_disk(name)
if not rentry_exists:
# Search through the repository folders
norm_name = _my_normcase(name)
for rdir in self.get_all_rdirs():
try:
node = rdir.entries[norm_name]
if node:
rentry_exists = True
break
except KeyError:
if rdir.entry_exists_on_disk(name):
rentry_exists = True
break
return rentry_exists
@SCons.Memoize.CountMethodCall
def srcdir_list(self):
try:
return self._memo['srcdir_list']
except KeyError:
pass
result = []
dirname = '.'
dir = self
while dir:
if dir.srcdir:
result.append(dir.srcdir.Dir(dirname))
dirname = dir.name + OS_SEP + dirname
dir = dir.up()
self._memo['srcdir_list'] = result
return result
def srcdir_duplicate(self, name):
for dir in self.srcdir_list():
if self.is_under(dir):
# We shouldn't source from something in the build path;
# variant_dir is probably under src_dir, in which case
# we are reflecting.
break
if dir.entry_exists_on_disk(name):
srcnode = dir.Entry(name).disambiguate()
if self.duplicate:
node = self.Entry(name).disambiguate()
node.do_duplicate(srcnode)
return node
else:
return srcnode
return None
def _srcdir_find_file_key(self, filename):
return filename
@SCons.Memoize.CountDictCall(_srcdir_find_file_key)
def srcdir_find_file(self, filename):
try:
memo_dict = self._memo['srcdir_find_file']
except KeyError:
memo_dict = {}
self._memo['srcdir_find_file'] = memo_dict
else:
try:
return memo_dict[filename]
except KeyError:
pass
def func(node):
if (isinstance(node, File) or isinstance(node, Entry)) and \
(node.is_derived() or node.exists()):
return node
return None
norm_name = _my_normcase(filename)
for rdir in self.get_all_rdirs():
try: node = rdir.entries[norm_name]
except KeyError: node = rdir.file_on_disk(filename)
else: node = func(node)
if node:
result = (node, self)
memo_dict[filename] = result
return result
for srcdir in self.srcdir_list():
for rdir in srcdir.get_all_rdirs():
try: node = rdir.entries[norm_name]
except KeyError: node = rdir.file_on_disk(filename)
else: node = func(node)
if node:
result = (File(filename, self, self.fs), srcdir)
memo_dict[filename] = result
return result
result = (None, None)
memo_dict[filename] = result
return result
def dir_on_disk(self, name):
if self.entry_exists_on_disk(name):
try: return self.Dir(name)
except TypeError: pass
node = self.srcdir_duplicate(name)
if isinstance(node, File):
return None
return node
def file_on_disk(self, name):
if self.entry_exists_on_disk(name):
try: return self.File(name)
except TypeError: pass
node = self.srcdir_duplicate(name)
if isinstance(node, Dir):
return None
return node
def walk(self, func, arg):
"""
Walk this directory tree by calling the specified function
for each directory in the tree.
This behaves like the os.path.walk() function, but for in-memory
Node.FS.Dir objects. The function takes the same arguments as
the functions passed to os.path.walk():
func(arg, dirname, fnames)
Except that "dirname" will actually be the directory *Node*,
not the string. The '.' and '..' entries are excluded from
fnames. The fnames list may be modified in-place to filter the
subdirectories visited or otherwise impose a specific order.
The "arg" argument is always passed to func() and may be used
in any way (or ignored, passing None is common).
"""
entries = self.entries
names = list(entries.keys())
names.remove('.')
names.remove('..')
func(arg, self, names)
for dirname in [n for n in names if isinstance(entries[n], Dir)]:
entries[dirname].walk(func, arg)
def glob(self, pathname, ondisk=True, source=False, strings=False, exclude=None):
"""
Returns a list of Nodes (or strings) matching a specified
pathname pattern.
Pathname patterns follow UNIX shell semantics: * matches
any-length strings of any characters, ? matches any character,
and [] can enclose lists or ranges of characters. Matches do
not span directory separators.
The matches take into account Repositories, returning local
Nodes if a corresponding entry exists in a Repository (either
an in-memory Node or something on disk).
By defafult, the glob() function matches entries that exist
on-disk, in addition to in-memory Nodes. Setting the "ondisk"
argument to False (or some other non-true value) causes the glob()
function to only match in-memory Nodes. The default behavior is
to return both the on-disk and in-memory Nodes.
The "source" argument, when true, specifies that corresponding
source Nodes must be returned if you're globbing in a build
directory (initialized with VariantDir()). The default behavior
is to return Nodes local to the VariantDir().
The "strings" argument, when true, returns the matches as strings,
not Nodes. The strings are path names relative to this directory.
The "exclude" argument, if not None, must be a pattern or a list
of patterns following the same UNIX shell semantics.
Elements matching a least one pattern of this list will be excluded
from the result.
The underlying algorithm is adapted from the glob.glob() function
in the Python library (but heavily modified), and uses fnmatch()
under the covers.
"""
dirname, basename = os.path.split(pathname)
if not dirname:
result = self._glob1(basename, ondisk, source, strings)
else:
if has_glob_magic(dirname):
list = self.glob(dirname, ondisk, source, False, exclude)
else:
list = [self.Dir(dirname, create=True)]
result = []
for dir in list:
r = dir._glob1(basename, ondisk, source, strings)
if strings:
r = [os.path.join(str(dir), x) for x in r]
result.extend(r)
if exclude:
excludes = []
excludeList = SCons.Util.flatten(exclude)
for x in excludeList:
r = self.glob(x, ondisk, source, strings)
excludes.extend(r)
result = [x for x in result if not any(fnmatch.fnmatch(str(x), str(e)) for e in SCons.Util.flatten(excludes))]
return sorted(result, key=lambda a: str(a))
def _glob1(self, pattern, ondisk=True, source=False, strings=False):
"""
Globs for and returns a list of entry names matching a single
pattern in this directory.
This searches any repositories and source directories for
corresponding entries and returns a Node (or string) relative
to the current directory if an entry is found anywhere.
TODO: handle pattern with no wildcard
"""
search_dir_list = self.get_all_rdirs()
for srcdir in self.srcdir_list():
search_dir_list.extend(srcdir.get_all_rdirs())
selfEntry = self.Entry
names = []
for dir in search_dir_list:
# We use the .name attribute from the Node because the keys of
# the dir.entries dictionary are normalized (that is, all upper
# case) on case-insensitive systems like Windows.
node_names = [ v.name for k, v in dir.entries.items()
if k not in ('.', '..') ]
names.extend(node_names)
if not strings:
# Make sure the working directory (self) actually has
# entries for all Nodes in repositories or variant dirs.
for name in node_names: selfEntry(name)
if ondisk:
try:
disk_names = os.listdir(dir._abspath)
except os.error:
continue
names.extend(disk_names)
if not strings:
# We're going to return corresponding Nodes in
# the local directory, so we need to make sure
# those Nodes exist. We only want to create
# Nodes for the entries that will match the
# specified pattern, though, which means we
# need to filter the list here, even though
# the overall list will also be filtered later,
# after we exit this loop.
if pattern[0] != '.':
disk_names = [x for x in disk_names if x[0] != '.']
disk_names = fnmatch.filter(disk_names, pattern)
dirEntry = dir.Entry
for name in disk_names:
# Add './' before disk filename so that '#' at
# beginning of filename isn't interpreted.
name = './' + name
node = dirEntry(name).disambiguate()
n = selfEntry(name)
if n.__class__ != node.__class__:
n.__class__ = node.__class__
n._morph()
names = set(names)
if pattern[0] != '.':
names = [x for x in names if x[0] != '.']
names = fnmatch.filter(names, pattern)
if strings:
return names
return [self.entries[_my_normcase(n)] for n in names]
class RootDir(Dir):
"""A class for the root directory of a file system.
This is the same as a Dir class, except that the path separator
('/' or '\\') is actually part of the name, so we don't need to
add a separator when creating the path names of entries within
this directory.
"""
__slots__ = ('_lookupDict', )
def __init__(self, drive, fs):
if SCons.Debug.track_instances: logInstanceCreation(self, 'Node.FS.RootDir')
SCons.Node.Node.__init__(self)
# Handle all the types of drives:
if drive == '':
# No drive, regular UNIX root or Windows default drive.
name = OS_SEP
dirname = OS_SEP
elif drive == '//':
# UNC path
name = UNC_PREFIX
dirname = UNC_PREFIX
else:
# Windows drive letter
name = drive
dirname = drive + OS_SEP
# Filename with extension as it was specified when the object was
# created; to obtain filesystem path, use Python str() function
self.name = SCons.Util.silent_intern(name)
self.fs = fs #: Reference to parent Node.FS object
self._path_elements = [self]
self.dir = self
self._func_rexists = 2
self._func_target_from_source = 1
self.store_info = 1
# Now set our paths to what we really want them to be. The
# name should already contain any necessary separators, such
# as the initial drive letter (the name) plus the directory
# separator, except for the "lookup abspath," which does not
# have the drive letter.
self._abspath = dirname
self._labspath = ''
self._path = dirname
self._tpath = dirname
self.dirname = dirname
self._morph()
self.duplicate = 0
self._lookupDict = {}
self._lookupDict[''] = self
self._lookupDict['/'] = self
self.root = self
# The // entry is necessary because os.path.normpath()
# preserves double slashes at the beginning of a path on Posix
# platforms.
if not has_unc:
self._lookupDict['//'] = self
def _morph(self):
"""Turn a file system Node (either a freshly initialized directory
object or a separate Entry object) into a proper directory object.
Set up this directory's entries and hook it into the file
system tree. Specify that directories (this Node) don't use
signatures for calculating whether they're current.
"""
self.repositories = []
self.srcdir = None
self.entries = {}
self.entries['.'] = self
self.entries['..'] = self.dir
self.cwd = self
self.searched = 0
self._sconsign = None
self.variant_dirs = []
self.changed_since_last_build = 3
self._func_sconsign = 1
self._func_exists = 2
self._func_get_contents = 2
# Don't just reset the executor, replace its action list,
# because it might have some pre-or post-actions that need to
# be preserved.
#
# But don't reset the executor if there is a non-null executor
# attached already. The existing executor might have other
# targets, in which case replacing the action list with a
# Mkdir action is a big mistake.
if not hasattr(self, 'executor'):
self.builder = get_MkdirBuilder()
self.get_executor().set_action_list(self.builder.action)
else:
# Prepend MkdirBuilder action to existing action list
l = self.get_executor().action_list
a = get_MkdirBuilder().action
l.insert(0, a)
self.get_executor().set_action_list(l)
def must_be_same(self, klass):
if klass is Dir:
return
Base.must_be_same(self, klass)
def _lookup_abs(self, p, klass, create=1):
"""
Fast (?) lookup of a *normalized* absolute path.
This method is intended for use by internal lookups with
already-normalized path data. For general-purpose lookups,
use the FS.Entry(), FS.Dir() or FS.File() methods.
The caller is responsible for making sure we're passed a
normalized absolute path; we merely let Python's dictionary look
up and return the One True Node.FS object for the path.
If a Node for the specified "p" doesn't already exist, and
"create" is specified, the Node may be created after recursive
invocation to find or create the parent directory or directories.
"""
k = _my_normcase(p)
try:
result = self._lookupDict[k]
except KeyError:
if not create:
msg = "No such file or directory: '%s' in '%s' (and create is False)" % (p, str(self))
raise SCons.Errors.UserError(msg)
# There is no Node for this path name, and we're allowed
# to create it.
dir_name, file_name = p.rsplit('/',1)
dir_node = self._lookup_abs(dir_name, Dir)
result = klass(file_name, dir_node, self.fs)
# Double-check on disk (as configured) that the Node we
# created matches whatever is out there in the real world.
result.diskcheck_match()
self._lookupDict[k] = result
dir_node.entries[_my_normcase(file_name)] = result
dir_node.implicit = None
else:
# There is already a Node for this path name. Allow it to
# complain if we were looking for an inappropriate type.
result.must_be_same(klass)
return result
def __str__(self):
return self._abspath
def entry_abspath(self, name):
return self._abspath + name
def entry_labspath(self, name):
return '/' + name
def entry_path(self, name):
return self._path + name
def entry_tpath(self, name):
return self._tpath + name
def is_under(self, dir):
if self is dir:
return 1
else:
return 0
def up(self):
return None
def get_dir(self):
return None
def src_builder(self):
return _null
class FileNodeInfo(SCons.Node.NodeInfoBase):
__slots__ = ('csig', 'timestamp', 'size')
current_version_id = 2
field_list = ['csig', 'timestamp', 'size']
# This should get reset by the FS initialization.
fs = None
def str_to_node(self, s):
top = self.fs.Top
root = top.root
if do_splitdrive:
drive, s = _my_splitdrive(s)
if drive:
root = self.fs.get_root(drive)
if not os.path.isabs(s):
s = top.get_labspath() + '/' + s
return root._lookup_abs(s, Entry)
def __getstate__(self):
"""
Return all fields that shall be pickled. Walk the slots in the class
hierarchy and add those to the state dictionary. If a '__dict__' slot is
available, copy all entries to the dictionary. Also include the version
id, which is fixed for all instances of a class.
"""
state = getattr(self, '__dict__', {}).copy()
for obj in type(self).mro():
for name in getattr(obj, '__slots__', ()):
if hasattr(self, name):
state[name] = getattr(self, name)
state['_version_id'] = self.current_version_id
try:
del state['__weakref__']
except KeyError:
pass
return state
def __setstate__(self, state):
"""
Restore the attributes from a pickled state.
"""
# TODO check or discard version
del state['_version_id']
for key, value in state.items():
if key not in ('__weakref__',):
setattr(self, key, value)
def __eq__(self, other):
return self.csig == other.csig and self.timestamp == other.timestamp and self.size == other.size
def __ne__(self, other):
return not self.__eq__(other)
class FileBuildInfo(SCons.Node.BuildInfoBase):
"""
This is info loaded from sconsign.
Attributes unique to FileBuildInfo:
dependency_map : Caches file->csig mapping
for all dependencies. Currently this is only used when using
MD5-timestamp decider.
It's used to ensure that we copy the correct
csig from previous build to be written to .sconsign when current build
is done. Previously the matching of csig to file was strictly by order
they appeared in bdepends, bsources, or bimplicit, and so a change in order
or count of any of these could yield writing wrong csig, and then false positive
rebuilds
"""
__slots__ = ['dependency_map', ]
current_version_id = 2
def __setattr__(self, key, value):
# If any attributes are changed in FileBuildInfo, we need to
# invalidate the cached map of file name to content signature
# heald in dependency_map. Currently only used with
# MD5-timestamp decider
if key != 'dependency_map' and hasattr(self, 'dependency_map'):
del self.dependency_map
return super(FileBuildInfo, self).__setattr__(key, value)
def convert_to_sconsign(self):
"""
Converts this FileBuildInfo object for writing to a .sconsign file
This replaces each Node in our various dependency lists with its
usual string representation: relative to the top-level SConstruct
directory, or an absolute path if it's outside.
"""
if os_sep_is_slash:
node_to_str = str
else:
def node_to_str(n):
try:
s = n.get_internal_path()
except AttributeError:
s = str(n)
else:
s = s.replace(OS_SEP, '/')
return s
for attr in ['bsources', 'bdepends', 'bimplicit']:
try:
val = getattr(self, attr)
except AttributeError:
pass
else:
setattr(self, attr, list(map(node_to_str, val)))
def convert_from_sconsign(self, dir, name):
"""
Converts a newly-read FileBuildInfo object for in-SCons use
For normal up-to-date checking, we don't have any conversion to
perform--but we're leaving this method here to make that clear.
"""
pass
def prepare_dependencies(self):
"""
Prepares a FileBuildInfo object for explaining what changed
The bsources, bdepends and bimplicit lists have all been
stored on disk as paths relative to the top-level SConstruct
directory. Convert the strings to actual Nodes (for use by the
--debug=explain code and --implicit-cache).
"""
attrs = [
('bsources', 'bsourcesigs'),
('bdepends', 'bdependsigs'),
('bimplicit', 'bimplicitsigs'),
]
for (nattr, sattr) in attrs:
try:
strings = getattr(self, nattr)
nodeinfos = getattr(self, sattr)
except AttributeError:
continue
if strings is None or nodeinfos is None:
continue
nodes = []
for s, ni in zip(strings, nodeinfos):
if not isinstance(s, SCons.Node.Node):
s = ni.str_to_node(s)
nodes.append(s)
setattr(self, nattr, nodes)
def format(self, names=0):
result = []
bkids = self.bsources + self.bdepends + self.bimplicit
bkidsigs = self.bsourcesigs + self.bdependsigs + self.bimplicitsigs
for bkid, bkidsig in zip(bkids, bkidsigs):
result.append(str(bkid) + ': ' +
' '.join(bkidsig.format(names=names)))
if not hasattr(self,'bact'):
self.bact = "none"
result.append('%s [%s]' % (self.bactsig, self.bact))
return '\n'.join(result)
class File(Base):
"""A class for files in a file system.
"""
__slots__ = ['scanner_paths',
'cachedir_csig',
'cachesig',
'repositories',
'srcdir',
'entries',
'searched',
'_sconsign',
'variant_dirs',
'root',
'dirname',
'on_disk_entries',
'released_target_info',
'contentsig']
NodeInfo = FileNodeInfo
BuildInfo = FileBuildInfo
md5_chunksize = 64
def diskcheck_match(self):
diskcheck_match(self, self.isdir,
"Directory %s found where file expected.")
def __init__(self, name, directory, fs):
if SCons.Debug.track_instances: logInstanceCreation(self, 'Node.FS.File')
Base.__init__(self, name, directory, fs)
self._morph()
def Entry(self, name):
"""Create an entry node named 'name' relative to
the directory of this file."""
return self.dir.Entry(name)
def Dir(self, name, create=True):
"""Create a directory node named 'name' relative to
the directory of this file."""
return self.dir.Dir(name, create=create)
def Dirs(self, pathlist):
"""Create a list of directories relative to the SConscript
directory of this file."""
return [self.Dir(p) for p in pathlist]
def File(self, name):
"""Create a file node named 'name' relative to
the directory of this file."""
return self.dir.File(name)
def _morph(self):
"""Turn a file system node into a File object."""
self.scanner_paths = {}
if not hasattr(self, '_local'):
self._local = 0
if not hasattr(self, 'released_target_info'):
self.released_target_info = False
self.store_info = 1
self._func_exists = 4
self._func_get_contents = 3
# Initialize this Node's decider function to decide_source() because
# every file is a source file until it has a Builder attached...
self.changed_since_last_build = 4
# If there was already a Builder set on this entry, then
# we need to make sure we call the target-decider function,
# not the source-decider. Reaching in and doing this by hand
# is a little bogus. We'd prefer to handle this by adding
# an Entry.builder_set() method that disambiguates like the
# other methods, but that starts running into problems with the
# fragile way we initialize Dir Nodes with their Mkdir builders,
# yet still allow them to be overridden by the user. Since it's
# not clear right now how to fix that, stick with what works
# until it becomes clear...
if self.has_builder():
self.changed_since_last_build = 5
def scanner_key(self):
return self.get_suffix()
def get_contents(self):
return SCons.Node._get_contents_map[self._func_get_contents](self)
def get_text_contents(self):
"""
This attempts to figure out what the encoding of the text is
based upon the BOM bytes, and then decodes the contents so that
it's a valid python string.
"""
contents = self.get_contents()
# The behavior of various decode() methods and functions
# w.r.t. the initial BOM bytes is different for different
# encodings and/or Python versions. ('utf-8' does not strip
# them, but has a 'utf-8-sig' which does; 'utf-16' seems to
# strip them; etc.) Just sidestep all the complication by
# explicitly stripping the BOM before we decode().
if contents[:len(codecs.BOM_UTF8)] == codecs.BOM_UTF8:
return contents[len(codecs.BOM_UTF8):].decode('utf-8')
if contents[:len(codecs.BOM_UTF16_LE)] == codecs.BOM_UTF16_LE:
return contents[len(codecs.BOM_UTF16_LE):].decode('utf-16-le')
if contents[:len(codecs.BOM_UTF16_BE)] == codecs.BOM_UTF16_BE:
return contents[len(codecs.BOM_UTF16_BE):].decode('utf-16-be')
try:
return contents.decode('utf-8')
except UnicodeDecodeError as e:
try:
return contents.decode('latin-1')
except UnicodeDecodeError as e:
return contents.decode('utf-8', error='backslashreplace')
def get_content_hash(self):
"""
Compute and return the MD5 hash for this file.
"""
if not self.rexists():
return SCons.Util.MD5signature('')
fname = self.rfile().get_abspath()
try:
cs = SCons.Util.MD5filesignature(fname,
chunksize=SCons.Node.FS.File.md5_chunksize*1024)
except EnvironmentError as e:
if not e.filename:
e.filename = fname
raise
return cs
@SCons.Memoize.CountMethodCall
def get_size(self):
try:
return self._memo['get_size']
except KeyError:
pass
if self.rexists():
size = self.rfile().getsize()
else:
size = 0
self._memo['get_size'] = size
return size
@SCons.Memoize.CountMethodCall
def get_timestamp(self):
try:
return self._memo['get_timestamp']
except KeyError:
pass
if self.rexists():
timestamp = self.rfile().getmtime()
else:
timestamp = 0
self._memo['get_timestamp'] = timestamp
return timestamp
convert_copy_attrs = [
'bsources',
'bimplicit',
'bdepends',
'bact',
'bactsig',
'ninfo',
]
convert_sig_attrs = [
'bsourcesigs',
'bimplicitsigs',
'bdependsigs',
]
def convert_old_entry(self, old_entry):
# Convert a .sconsign entry from before the Big Signature
# Refactoring, doing what we can to convert its information
# to the new .sconsign entry format.
#
# The old format looked essentially like this:
#
# BuildInfo
# .ninfo (NodeInfo)
# .bsig
# .csig
# .timestamp
# .size
# .bsources
# .bsourcesigs ("signature" list)
# .bdepends
# .bdependsigs ("signature" list)
# .bimplicit
# .bimplicitsigs ("signature" list)
# .bact
# .bactsig
#
# The new format looks like this:
#
# .ninfo (NodeInfo)
# .bsig
# .csig
# .timestamp
# .size
# .binfo (BuildInfo)
# .bsources
# .bsourcesigs (NodeInfo list)
# .bsig
# .csig
# .timestamp
# .size
# .bdepends
# .bdependsigs (NodeInfo list)
# .bsig
# .csig
# .timestamp
# .size
# .bimplicit
# .bimplicitsigs (NodeInfo list)
# .bsig
# .csig
# .timestamp
# .size
# .bact
# .bactsig
#
# The basic idea of the new structure is that a NodeInfo always
# holds all available information about the state of a given Node
# at a certain point in time. The various .b*sigs lists can just
# be a list of pointers to the .ninfo attributes of the different
# dependent nodes, without any copying of information until it's
# time to pickle it for writing out to a .sconsign file.
#
# The complicating issue is that the *old* format only stored one
# "signature" per dependency, based on however the *last* build
# was configured. We don't know from just looking at it whether
# it was a build signature, a content signature, or a timestamp
# "signature". Since we no longer use build signatures, the
# best we can do is look at the length and if it's thirty two,
# assume that it was (or might have been) a content signature.
# If it was actually a build signature, then it will cause a
# rebuild anyway when it doesn't match the new content signature,
# but that's probably the best we can do.
import SCons.SConsign
new_entry = SCons.SConsign.SConsignEntry()
new_entry.binfo = self.new_binfo()
binfo = new_entry.binfo
for attr in self.convert_copy_attrs:
try:
value = getattr(old_entry, attr)
except AttributeError:
continue
setattr(binfo, attr, value)
delattr(old_entry, attr)
for attr in self.convert_sig_attrs:
try:
sig_list = getattr(old_entry, attr)
except AttributeError:
continue
value = []
for sig in sig_list:
ninfo = self.new_ninfo()
if len(sig) == 32:
ninfo.csig = sig
else:
ninfo.timestamp = sig
value.append(ninfo)
setattr(binfo, attr, value)
delattr(old_entry, attr)
return new_entry
@SCons.Memoize.CountMethodCall
def get_stored_info(self):
try:
return self._memo['get_stored_info']
except KeyError:
pass
try:
sconsign_entry = self.dir.sconsign().get_entry(self.name)
except (KeyError, EnvironmentError):
import SCons.SConsign
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = self.new_binfo()
sconsign_entry.ninfo = self.new_ninfo()
else:
if isinstance(sconsign_entry, FileBuildInfo):
# This is a .sconsign file from before the Big Signature
# Refactoring; convert it as best we can.
sconsign_entry = self.convert_old_entry(sconsign_entry)
try:
delattr(sconsign_entry.ninfo, 'bsig')
except AttributeError:
pass
self._memo['get_stored_info'] = sconsign_entry
return sconsign_entry
def get_stored_implicit(self):
binfo = self.get_stored_info().binfo
binfo.prepare_dependencies()
try: return binfo.bimplicit
except AttributeError: return None
def rel_path(self, other):
return self.dir.rel_path(other)
def _get_found_includes_key(self, env, scanner, path):
return (id(env), id(scanner), path)
@SCons.Memoize.CountDictCall(_get_found_includes_key)
def get_found_includes(self, env, scanner, path):
"""Return the included implicit dependencies in this file.
Cache results so we only scan the file once per path
regardless of how many times this information is requested.
"""
memo_key = (id(env), id(scanner), path)
try:
memo_dict = self._memo['get_found_includes']
except KeyError:
memo_dict = {}
self._memo['get_found_includes'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
if scanner:
result = [n.disambiguate() for n in scanner(self, env, path)]
else:
result = []
memo_dict[memo_key] = result
return result
def _createDir(self):
# ensure that the directories for this node are
# created.
self.dir._create()
def push_to_cache(self):
"""Try to push the node into a cache
"""
# This should get called before the Nodes' .built() method is
# called, which would clear the build signature if the file has
# a source scanner.
#
# We have to clear the local memoized values *before* we push
# the node to cache so that the memoization of the self.exists()
# return value doesn't interfere.
if self.nocache:
return
self.clear_memoized_values()
if self.exists():
self.get_build_env().get_CacheDir().push(self)
def retrieve_from_cache(self):
"""Try to retrieve the node's content from a cache
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff in
built().
Returns true if the node was successfully retrieved.
"""
if self.nocache:
return None
if not self.is_derived():
return None
return self.get_build_env().get_CacheDir().retrieve(self)
def visited(self):
if self.exists() and self.executor is not None:
self.get_build_env().get_CacheDir().push_if_forced(self)
ninfo = self.get_ninfo()
csig = self.get_max_drift_csig()
if csig:
ninfo.csig = csig
ninfo.timestamp = self.get_timestamp()
ninfo.size = self.get_size()
if not self.has_builder():
# This is a source file, but it might have been a target file
# in another build that included more of the DAG. Copy
# any build information that's stored in the .sconsign file
# into our binfo object so it doesn't get lost.
old = self.get_stored_info()
self.get_binfo().merge(old.binfo)
SCons.Node.store_info_map[self.store_info](self)
def release_target_info(self):
"""Called just after this node has been marked
up-to-date or was built completely.
This is where we try to release as many target node infos
as possible for clean builds and update runs, in order
to minimize the overall memory consumption.
We'd like to remove a lot more attributes like self.sources
and self.sources_set, but they might get used
in a next build step. For example, during configuration
the source files for a built E{*}.o file are used to figure out
which linker to use for the resulting Program (gcc vs. g++)!
That's why we check for the 'keep_targetinfo' attribute,
config Nodes and the Interactive mode just don't allow
an early release of most variables.
In the same manner, we can't simply remove the self.attributes
here. The smart linking relies on the shared flag, and some
parts of the java Tool use it to transport information
about nodes...
@see: built() and Node.release_target_info()
"""
if (self.released_target_info or SCons.Node.interactive):
return
if not hasattr(self.attributes, 'keep_targetinfo'):
# Cache some required values, before releasing
# stuff like env, executor and builder...
self.changed(allowcache=True)
self.get_contents_sig()
self.get_build_env()
# Now purge unneeded stuff to free memory...
self.executor = None
self._memo.pop('rfile', None)
self.prerequisites = None
# Cleanup lists, but only if they're empty
if not len(self.ignore_set):
self.ignore_set = None
if not len(self.implicit_set):
self.implicit_set = None
if not len(self.depends_set):
self.depends_set = None
if not len(self.ignore):
self.ignore = None
if not len(self.depends):
self.depends = None
# Mark this node as done, we only have to release
# the memory once...
self.released_target_info = True
def find_src_builder(self):
if self.rexists():
return None
scb = self.dir.src_builder()
if scb is _null:
scb = None
if scb is not None:
try:
b = self.builder
except AttributeError:
b = None
if b is None:
self.builder_set(scb)
return scb
def has_src_builder(self):
"""Return whether this Node has a source builder or not.
If this Node doesn't have an explicit source code builder, this
is where we figure out, on the fly, if there's a transparent
source code builder for it.
Note that if we found a source builder, we also set the
self.builder attribute, so that all of the methods that actually
*build* this file don't have to do anything different.
"""
try:
scb = self.sbuilder
except AttributeError:
scb = self.sbuilder = self.find_src_builder()
return scb is not None
def alter_targets(self):
"""Return any corresponding targets in a variant directory.
"""
if self.is_derived():
return [], None
return self.fs.variant_dir_target_climb(self, self.dir, [self.name])
def _rmv_existing(self):
self.clear_memoized_values()
if SCons.Node.print_duplicate:
print("dup: removing existing target {}".format(self))
e = Unlink(self, [], None)
if isinstance(e, SCons.Errors.BuildError):
raise e
#
# Taskmaster interface subsystem
#
def make_ready(self):
self.has_src_builder()
self.get_binfo()
def prepare(self):
"""Prepare for this file to be created."""
SCons.Node.Node.prepare(self)
if self.get_state() != SCons.Node.up_to_date:
if self.exists():
if self.is_derived() and not self.precious:
self._rmv_existing()
else:
try:
self._createDir()
except SCons.Errors.StopError as drive:
raise SCons.Errors.StopError("No drive `{}' for target `{}'.".format(drive, self))
#
#
#
def remove(self):
"""Remove this file."""
if self.exists() or self.islink():
self.fs.unlink(self.get_internal_path())
return 1
return None
def do_duplicate(self, src):
self._createDir()
if SCons.Node.print_duplicate:
print("dup: relinking variant '{}' from '{}'".format(self, src))
Unlink(self, None, None)
e = Link(self, src, None)
if isinstance(e, SCons.Errors.BuildError):
raise SCons.Errors.StopError("Cannot duplicate `{}' in `{}': {}.".format(src.get_internal_path(), self.dir._path, e.errstr))
self.linked = 1
# The Link() action may or may not have actually
# created the file, depending on whether the -n
# option was used or not. Delete the _exists and
# _rexists attributes so they can be reevaluated.
self.clear()
@SCons.Memoize.CountMethodCall
def exists(self):
try:
return self._memo['exists']
except KeyError:
pass
result = SCons.Node._exists_map[self._func_exists](self)
self._memo['exists'] = result
return result
#
# SIGNATURE SUBSYSTEM
#
def get_max_drift_csig(self):
"""
Returns the content signature currently stored for this node
if it's been unmodified longer than the max_drift value, or the
max_drift value is 0. Returns None otherwise.
"""
old = self.get_stored_info()
mtime = self.get_timestamp()
max_drift = self.fs.max_drift
if max_drift > 0:
if (time.time() - mtime) > max_drift:
try:
n = old.ninfo
if n.timestamp and n.csig and n.timestamp == mtime:
return n.csig
except AttributeError:
pass
elif max_drift == 0:
try:
return old.ninfo.csig
except AttributeError:
pass
return None
def get_csig(self):
"""
Generate a node's content signature, the digested signature
of its content.
node - the node
cache - alternate node to use for the signature cache
returns - the content signature
"""
ninfo = self.get_ninfo()
try:
return ninfo.csig
except AttributeError:
pass
csig = self.get_max_drift_csig()
if csig is None:
try:
if self.get_size() < SCons.Node.FS.File.md5_chunksize:
contents = self.get_contents()
else:
csig = self.get_content_hash()
except IOError:
# This can happen if there's actually a directory on-disk,
# which can be the case if they've disabled disk checks,
# or if an action with a File target actually happens to
# create a same-named directory by mistake.
csig = ''
else:
if not csig:
csig = SCons.Util.MD5signature(contents)
ninfo.csig = csig
return csig
#
# DECISION SUBSYSTEM
#
def builder_set(self, builder):
SCons.Node.Node.builder_set(self, builder)
self.changed_since_last_build = 5
def built(self):
"""Called just after this File node is successfully built.
Just like for 'release_target_info' we try to release
some more target node attributes in order to minimize the
overall memory consumption.
@see: release_target_info
"""
SCons.Node.Node.built(self)
if (not SCons.Node.interactive and
not hasattr(self.attributes, 'keep_targetinfo')):
# Ensure that the build infos get computed and cached...
SCons.Node.store_info_map[self.store_info](self)
# ... then release some more variables.
self._specific_sources = False
self._labspath = None
self._save_str()
self.cwd = None
self.scanner_paths = None
def changed(self, node=None, allowcache=False):
"""
Returns if the node is up-to-date with respect to the BuildInfo
stored last time it was built.
For File nodes this is basically a wrapper around Node.changed(),
but we allow the return value to get cached after the reference
to the Executor got released in release_target_info().
@see: Node.changed()
"""
if node is None:
try:
return self._memo['changed']
except KeyError:
pass
has_changed = SCons.Node.Node.changed(self, node)
if allowcache:
self._memo['changed'] = has_changed
return has_changed
def changed_content(self, target, prev_ni, repo_node=None):
cur_csig = self.get_csig()
try:
return cur_csig != prev_ni.csig
except AttributeError:
return 1
def changed_state(self, target, prev_ni, repo_node=None):
return self.state != SCons.Node.up_to_date
# Caching node -> string mapping for the below method
__dmap_cache = {}
__dmap_sig_cache = {}
def _build_dependency_map(self, binfo):
"""
Build mapping from file -> signature
Args:
self - self
binfo - buildinfo from node being considered
Returns:
dictionary of file->signature mappings
"""
# For an "empty" binfo properties like bsources
# do not exist: check this to avoid exception.
if (len(binfo.bsourcesigs) + len(binfo.bdependsigs) + \
len(binfo.bimplicitsigs)) == 0:
return {}
binfo.dependency_map = { child:signature for child, signature in zip(chain(binfo.bsources, binfo.bdepends, binfo.bimplicit),
chain(binfo.bsourcesigs, binfo.bdependsigs, binfo.bimplicitsigs))}
return binfo.dependency_map
# @profile
def _add_strings_to_dependency_map(self, dmap):
"""
In the case comparing node objects isn't sufficient, we'll add the strings for the nodes to the dependency map
:return:
"""
first_string = str(next(iter(dmap)))
# print("DMAP:%s"%id(dmap))
if first_string not in dmap:
string_dict = {str(child): signature for child, signature in dmap.items()}
dmap.update(string_dict)
return dmap
def _get_previous_signatures(self, dmap):
"""
Return a list of corresponding csigs from previous
build in order of the node/files in children.
Args:
self - self
dmap - Dictionary of file -> csig
Returns:
List of csigs for provided list of children
"""
prev = []
# MD5_TIMESTAMP_DEBUG = False
if len(dmap) == 0:
if MD5_TIMESTAMP_DEBUG: print("Nothing dmap shortcutting")
return None
elif MD5_TIMESTAMP_DEBUG: print("len(dmap):%d"%len(dmap))
# First try retrieving via Node
if MD5_TIMESTAMP_DEBUG: print("Checking if self is in map:%s id:%s type:%s"%(str(self), id(self), type(self)))
df = dmap.get(self, False)
if df:
return df
# Now check if self's repository file is in map.
rf = self.rfile()
if MD5_TIMESTAMP_DEBUG: print("Checking if self.rfile is in map:%s id:%s type:%s"%(str(rf), id(rf), type(rf)))
rfm = dmap.get(rf, False)
if rfm:
return rfm
# get default string for node and then also string swapping os.altsep for os.sep (/ for \)
c_strs = [str(self)]
if os.altsep:
c_strs.append(c_strs[0].replace(os.sep, os.altsep))
# In some cases the dependency_maps' keys are already strings check.
# Check if either string is now in dmap.
for s in c_strs:
if MD5_TIMESTAMP_DEBUG: print("Checking if str(self) is in map :%s" % s)
df = dmap.get(s, False)
if df:
return df
# Strings don't exist in map, add them and try again
# If there are no strings in this dmap, then add them.
# This may not be necessary, we could walk the nodes in the dmap and check each string
# rather than adding ALL the strings to dmap. In theory that would be n/2 vs 2n str() calls on node
# if not dmap.has_strings:
dmap = self._add_strings_to_dependency_map(dmap)
# In some cases the dependency_maps' keys are already strings check.
# Check if either string is now in dmap.
for s in c_strs:
if MD5_TIMESTAMP_DEBUG: print("Checking if str(self) is in map (now with strings) :%s" % s)
df = dmap.get(s, False)
if df:
return df
# Lastly use nodes get_path() to generate string and see if that's in dmap
if not df:
try:
# this should yield a path which matches what's in the sconsign
c_str = self.get_path()
if os.altsep:
c_str = c_str.replace(os.sep, os.altsep)
if MD5_TIMESTAMP_DEBUG: print("Checking if self.get_path is in map (now with strings) :%s" % s)
df = dmap.get(c_str, None)
except AttributeError as e:
raise FileBuildInfoFileToCsigMappingError("No mapping from file name to content signature for :%s"%c_str)
return df
def changed_timestamp_then_content(self, target, prev_ni, node=None):
"""
Used when decider for file is Timestamp-MD5
NOTE: If the timestamp hasn't changed this will skip md5'ing the
file and just copy the prev_ni provided. If the prev_ni
is wrong. It will propagate it.
See: https://github.com/SCons/scons/issues/2980
Args:
self - dependency
target - target
prev_ni - The NodeInfo object loaded from previous builds .sconsign
node - Node instance. Check this node for file existence/timestamp
if specified.
Returns:
Boolean - Indicates if node(File) has changed.
"""
if node is None:
node = self
# Now get sconsign name -> csig map and then get proper prev_ni if possible
bi = node.get_stored_info().binfo
rebuilt = False
try:
dependency_map = bi.dependency_map
except AttributeError as e:
dependency_map = self._build_dependency_map(bi)
rebuilt = True
if len(dependency_map) == 0:
# If there's no dependency map, there's no need to find the
# prev_ni as there aren't any
# shortcut the rest of the logic
if MD5_TIMESTAMP_DEBUG: print("Skipping checks len(dmap)=0")
# We still need to get the current file's csig
# This should be slightly faster than calling self.changed_content(target, new_prev_ni)
self.get_csig()
return True
new_prev_ni = self._get_previous_signatures(dependency_map)
new = self.changed_timestamp_match(target, new_prev_ni)
if MD5_TIMESTAMP_DEBUG:
old = self.changed_timestamp_match(target, prev_ni)
if old != new:
print("Mismatch self.changed_timestamp_match(%s, prev_ni) old:%s new:%s"%(str(target), old, new))
new_prev_ni = self._get_previous_signatures(dependency_map)
if not new:
try:
# NOTE: We're modifying the current node's csig in a query.
self.get_ninfo().csig = new_prev_ni.csig
except AttributeError:
pass
return False
return self.changed_content(target, new_prev_ni)
def changed_timestamp_newer(self, target, prev_ni, repo_node=None):
try:
return self.get_timestamp() > target.get_timestamp()
except AttributeError:
return 1
def changed_timestamp_match(self, target, prev_ni, repo_node=None):
"""
Return True if the timestamps don't match or if there is no previous timestamp
:param target:
:param prev_ni: Information about the node from the previous build
:return:
"""
try:
return self.get_timestamp() != prev_ni.timestamp
except AttributeError:
return 1
def is_up_to_date(self):
"""Check for whether the Node is current
In all cases self is the target we're checking to see if it's up to date
"""
T = 0
if T: Trace('is_up_to_date(%s):' % self)
if not self.exists():
if T: Trace(' not self.exists():')
# The file (always a target) doesn't exist locally...
r = self.rfile()
if r != self:
# ...but there is one (always a target) in a Repository...
if not self.changed(r):
if T: Trace(' changed(%s):' % r)
# ...and it's even up-to-date...
if self._local:
# ...and they'd like a local copy.
e = LocalCopy(self, r, None)
if isinstance(e, SCons.Errors.BuildError):
# Likely this should be re-raising exception e
# (which would be BuildError)
raise e
SCons.Node.store_info_map[self.store_info](self)
if T: Trace(' 1\n')
return 1
self.changed()
if T: Trace(' None\n')
return None
else:
r = self.changed()
if T: Trace(' self.exists(): %s\n' % r)
return not r
@SCons.Memoize.CountMethodCall
def rfile(self):
try:
return self._memo['rfile']
except KeyError:
pass
result = self
if not self.exists():
norm_name = _my_normcase(self.name)
for repo_dir in self.dir.get_all_rdirs():
try:
node = repo_dir.entries[norm_name]
except KeyError:
node = repo_dir.file_on_disk(self.name)
if node and node.exists() and \
(isinstance(node, File) or isinstance(node, Entry)
or not node.is_derived()):
result = node
# Copy over our local attributes to the repository
# Node so we identify shared object files in the
# repository and don't assume they're static.
#
# This isn't perfect; the attribute would ideally
# be attached to the object in the repository in
# case it was built statically in the repository
# and we changed it to shared locally, but that's
# rarely the case and would only occur if you
# intentionally used the same suffix for both
# shared and static objects anyway. So this
# should work well in practice.
result.attributes = self.attributes
break
self._memo['rfile'] = result
return result
def find_repo_file(self):
"""
For this node, find if there exists a corresponding file in one or more repositories
:return: list of corresponding files in repositories
"""
retvals = []
norm_name = _my_normcase(self.name)
for repo_dir in self.dir.get_all_rdirs():
try:
node = repo_dir.entries[norm_name]
except KeyError:
node = repo_dir.file_on_disk(self.name)
if node and node.exists() and \
(isinstance(node, File) or isinstance(node, Entry) \
or not node.is_derived()):
retvals.append(node)
return retvals
def rstr(self):
return str(self.rfile())
def get_cachedir_csig(self):
"""
Fetch a Node's content signature for purposes of computing
another Node's cachesig.
This is a wrapper around the normal get_csig() method that handles
the somewhat obscure case of using CacheDir with the -n option.
Any files that don't exist would normally be "built" by fetching
them from the cache, but the normal get_csig() method will try
to open up the local file, which doesn't exist because the -n
option meant we didn't actually pull the file from cachedir.
But since the file *does* actually exist in the cachedir, we
can use its contents for the csig.
"""
try:
return self.cachedir_csig
except AttributeError:
pass
cachedir, cachefile = self.get_build_env().get_CacheDir().cachepath(self)
if not self.exists() and cachefile and os.path.exists(cachefile):
self.cachedir_csig = SCons.Util.MD5filesignature(cachefile, \
SCons.Node.FS.File.md5_chunksize * 1024)
else:
self.cachedir_csig = self.get_csig()
return self.cachedir_csig
def get_contents_sig(self):
"""
A helper method for get_cachedir_bsig.
It computes and returns the signature for this
node's contents.
"""
try:
return self.contentsig
except AttributeError:
pass
executor = self.get_executor()
result = self.contentsig = SCons.Util.MD5signature(executor.get_contents())
return result
def get_cachedir_bsig(self):
"""
Return the signature for a cached file, including
its children.
It adds the path of the cached file to the cache signature,
because multiple targets built by the same action will all
have the same build signature, and we have to differentiate
them somehow.
Signature should normally be string of hex digits.
"""
try:
return self.cachesig
except AttributeError:
pass
# Collect signatures for all children
children = self.children()
sigs = [n.get_cachedir_csig() for n in children]
# Append this node's signature...
sigs.append(self.get_contents_sig())
# ...and it's path
sigs.append(self.get_internal_path())
# Merge this all into a single signature
result = self.cachesig = SCons.Util.MD5collect(sigs)
return result
default_fs = None
def get_default_fs():
global default_fs
if not default_fs:
default_fs = FS()
return default_fs
class FileFinder(object):
"""
"""
def __init__(self):
self._memo = {}
def filedir_lookup(self, p, fd=None):
"""
A helper method for find_file() that looks up a directory for
a file we're trying to find. This only creates the Dir Node if
it exists on-disk, since if the directory doesn't exist we know
we won't find any files in it... :-)
It would be more compact to just use this as a nested function
with a default keyword argument (see the commented-out version
below), but that doesn't work unless you have nested scopes,
so we define it here just so this work under Python 1.5.2.
"""
if fd is None:
fd = self.default_filedir
dir, name = os.path.split(fd)
drive, d = _my_splitdrive(dir)
if not name and d[:1] in ('/', OS_SEP):
#return p.fs.get_root(drive).dir_on_disk(name)
return p.fs.get_root(drive)
if dir:
p = self.filedir_lookup(p, dir)
if not p:
return None
norm_name = _my_normcase(name)
try:
node = p.entries[norm_name]
except KeyError:
return p.dir_on_disk(name)
if isinstance(node, Dir):
return node
if isinstance(node, Entry):
node.must_be_same(Dir)
return node
return None
def _find_file_key(self, filename, paths, verbose=None):
return (filename, paths)
@SCons.Memoize.CountDictCall(_find_file_key)
def find_file(self, filename, paths, verbose=None):
"""
Find a node corresponding to either a derived file or a file that exists already.
Only the first file found is returned, and none is returned if no file is found.
filename: A filename to find
paths: A list of directory path *nodes* to search in. Can be represented as a list, a tuple, or a callable that is called with no arguments and returns the list or tuple.
returns The node created from the found file.
"""
memo_key = self._find_file_key(filename, paths)
try:
memo_dict = self._memo['find_file']
except KeyError:
memo_dict = {}
self._memo['find_file'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
if verbose and not callable(verbose):
if not SCons.Util.is_String(verbose):
verbose = "find_file"
_verbose = u' %s: ' % verbose
verbose = lambda s: sys.stdout.write(_verbose + s)
filedir, filename = os.path.split(filename)
if filedir:
self.default_filedir = filedir
paths = [_f for _f in map(self.filedir_lookup, paths) if _f]
result = None
for dir in paths:
if verbose:
verbose("looking for '%s' in '%s' ...\n" % (filename, dir))
node, d = dir.srcdir_find_file(filename)
if node:
if verbose:
verbose("... FOUND '%s' in '%s'\n" % (filename, d))
result = node
break
memo_dict[memo_key] = result
return result
find_file = FileFinder().find_file
def invalidate_node_memos(targets):
"""
Invalidate the memoized values of all Nodes (files or directories)
that are associated with the given entries. Has been added to
clear the cache of nodes affected by a direct execution of an
action (e.g. Delete/Copy/Chmod). Existing Node caches become
inconsistent if the action is run through Execute(). The argument
`targets` can be a single Node object or filename, or a sequence
of Nodes/filenames.
"""
from traceback import extract_stack
# First check if the cache really needs to be flushed. Only
# actions run in the SConscript with Execute() seem to be
# affected. XXX The way to check if Execute() is in the stacktrace
# is a very dirty hack and should be replaced by a more sensible
# solution.
for f in extract_stack():
if f[2] == 'Execute' and f[0][-14:] == 'Environment.py':
break
else:
# Dont have to invalidate, so return
return
if not SCons.Util.is_List(targets):
targets = [targets]
for entry in targets:
# If the target is a Node object, clear the cache. If it is a
# filename, look up potentially existing Node object first.
try:
entry.clear_memoized_values()
except AttributeError:
# Not a Node object, try to look up Node by filename. XXX
# This creates Node objects even for those filenames which
# do not correspond to an existing Node object.
node = get_default_fs().Entry(entry)
if node:
node.clear_memoized_values()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
Scons: Allow symlinks for Python2 on Windows prefixes too
"""scons.Node.FS
File system nodes.
These Nodes represent the canonical external objects that people think
of when they think of building software: files and directories.
This holds a "default_fs" variable that should be initialized with an FS
that can be used by scripts or modules looking for the canonical default.
"""
#
# Copyright (c) 2001 - 2019 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
__revision__ = "src/engine/SCons/Node/FS.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import fnmatch
import os
import re
import shutil
import stat
import sys
import time
import codecs
from itertools import chain
import SCons.Action
import SCons.Debug
from SCons.Debug import logInstanceCreation
import SCons.Errors
import SCons.Memoize
import SCons.Node
import SCons.Node.Alias
import SCons.Subst
import SCons.Util
import SCons.Warnings
from SCons.Debug import Trace
print_duplicate = 0
MD5_TIMESTAMP_DEBUG = False
def sconsign_none(node):
raise NotImplementedError
def sconsign_dir(node):
"""Return the .sconsign file info for this directory,
creating it first if necessary."""
if not node._sconsign:
import SCons.SConsign
node._sconsign = SCons.SConsign.ForDirectory(node)
return node._sconsign
_sconsign_map = {0 : sconsign_none,
1 : sconsign_dir}
class FileBuildInfoFileToCsigMappingError(Exception):
pass
class EntryProxyAttributeError(AttributeError):
"""
An AttributeError subclass for recording and displaying the name
of the underlying Entry involved in an AttributeError exception.
"""
def __init__(self, entry_proxy, attribute):
AttributeError.__init__(self)
self.entry_proxy = entry_proxy
self.attribute = attribute
def __str__(self):
entry = self.entry_proxy.get()
fmt = "%s instance %s has no attribute %s"
return fmt % (entry.__class__.__name__,
repr(entry.name),
repr(self.attribute))
# The max_drift value: by default, use a cached signature value for
# any file that's been untouched for more than two days.
default_max_drift = 2*24*60*60
#
# We stringify these file system Nodes a lot. Turning a file system Node
# into a string is non-trivial, because the final string representation
# can depend on a lot of factors: whether it's a derived target or not,
# whether it's linked to a repository or source directory, and whether
# there's duplication going on. The normal technique for optimizing
# calculations like this is to memoize (cache) the string value, so you
# only have to do the calculation once.
#
# A number of the above factors, however, can be set after we've already
# been asked to return a string for a Node, because a Repository() or
# VariantDir() call or the like may not occur until later in SConscript
# files. So this variable controls whether we bother trying to save
# string values for Nodes. The wrapper interface can set this whenever
# they're done mucking with Repository and VariantDir and the other stuff,
# to let this module know it can start returning saved string values
# for Nodes.
#
Save_Strings = None
def save_strings(val):
global Save_Strings
Save_Strings = val
#
# Avoid unnecessary function calls by recording a Boolean value that
# tells us whether or not os.path.splitdrive() actually does anything
# on this system, and therefore whether we need to bother calling it
# when looking up path names in various methods below.
#
do_splitdrive = None
_my_splitdrive =None
def initialize_do_splitdrive():
global do_splitdrive
global has_unc
drive, path = os.path.splitdrive('X:/foo')
# splitunc is removed from python 3.7 and newer
# so we can also just test if splitdrive works with UNC
has_unc = (hasattr(os.path, 'splitunc')
or os.path.splitdrive(r'\\split\drive\test')[0] == r'\\split\drive')
do_splitdrive = not not drive or has_unc
global _my_splitdrive
if has_unc:
def splitdrive(p):
if p[1:2] == ':':
return p[:2], p[2:]
if p[0:2] == '//':
# Note that we leave a leading slash in the path
# because UNC paths are always absolute.
return '//', p[1:]
return '', p
else:
def splitdrive(p):
if p[1:2] == ':':
return p[:2], p[2:]
return '', p
_my_splitdrive = splitdrive
# Keep some commonly used values in global variables to skip to
# module look-up costs.
global OS_SEP
global UNC_PREFIX
global os_sep_is_slash
OS_SEP = os.sep
UNC_PREFIX = OS_SEP + OS_SEP
os_sep_is_slash = OS_SEP == '/'
initialize_do_splitdrive()
# Used to avoid invoking os.path.normpath if not necessary.
needs_normpath_check = re.compile(
r'''
# We need to renormalize the path if it contains any consecutive
# '/' characters.
.*// |
# We need to renormalize the path if it contains a '..' directory.
# Note that we check for all the following cases:
#
# a) The path is a single '..'
# b) The path starts with '..'. E.g. '../' or '../moredirs'
# but we not match '..abc/'.
# c) The path ends with '..'. E.g. '/..' or 'dirs/..'
# d) The path contains a '..' in the middle.
# E.g. dirs/../moredirs
(.*/)?\.\.(?:/|$) |
# We need to renormalize the path if it contains a '.'
# directory, but NOT if it is a single '.' '/' characters. We
# do not want to match a single '.' because this case is checked
# for explicitly since this is common enough case.
#
# Note that we check for all the following cases:
#
# a) We don't match a single '.'
# b) We match if the path starts with '.'. E.g. './' or
# './moredirs' but we not match '.abc/'.
# c) We match if the path ends with '.'. E.g. '/.' or
# 'dirs/.'
# d) We match if the path contains a '.' in the middle.
# E.g. dirs/./moredirs
\./|.*/\.(?:/|$)
''',
re.VERBOSE
)
needs_normpath_match = needs_normpath_check.match
#
# SCons.Action objects for interacting with the outside world.
#
# The Node.FS methods in this module should use these actions to
# create and/or remove files and directories; they should *not* use
# os.{link,symlink,unlink,mkdir}(), etc., directly.
#
# Using these SCons.Action objects ensures that descriptions of these
# external activities are properly displayed, that the displays are
# suppressed when the -s (silent) option is used, and (most importantly)
# the actions are disabled when the the -n option is used, in which case
# there should be *no* changes to the external file system(s)...
#
# For Now disable hard & softlinks for win32
# PY3 supports them, but the rest of SCons is not ready for this
# in some cases user permissions may be required.
# TODO: See if theres a reasonable way to enable using links on win32/64
if hasattr(os, 'link') and sys.platform != 'win32':
def _hardlink_func(fs, src, dst):
# If the source is a symlink, we can't just hard-link to it
# because a relative symlink may point somewhere completely
# different. We must disambiguate the symlink and then
# hard-link the final destination file.
while fs.islink(src):
link = fs.readlink(src)
if not os.path.isabs(link):
src = link
else:
src = os.path.join(os.path.dirname(src), link)
fs.link(src, dst)
else:
_hardlink_func = None
if hasattr(os, 'symlink') and sys.platform != 'win32':
def _softlink_func(fs, src, dst):
fs.symlink(src, dst)
else:
_softlink_func = None
def _copy_func(fs, src, dest):
shutil.copy2(src, dest)
st = fs.stat(src)
fs.chmod(dest, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
Valid_Duplicates = ['hard-soft-copy', 'soft-hard-copy',
'hard-copy', 'soft-copy', 'copy']
Link_Funcs = [] # contains the callables of the specified duplication style
def set_duplicate(duplicate):
# Fill in the Link_Funcs list according to the argument
# (discarding those not available on the platform).
# Set up the dictionary that maps the argument names to the
# underlying implementations. We do this inside this function,
# not in the top-level module code, so that we can remap os.link
# and os.symlink for testing purposes.
link_dict = {
'hard' : _hardlink_func,
'soft' : _softlink_func,
'copy' : _copy_func
}
if duplicate not in Valid_Duplicates:
raise SCons.Errors.InternalError("The argument of set_duplicate "
"should be in Valid_Duplicates")
global Link_Funcs
Link_Funcs = []
for func in duplicate.split('-'):
if link_dict[func]:
Link_Funcs.append(link_dict[func])
def LinkFunc(target, source, env):
"""
Relative paths cause problems with symbolic links, so
we use absolute paths, which may be a problem for people
who want to move their soft-linked src-trees around. Those
people should use the 'hard-copy' mode, softlinks cannot be
used for that; at least I have no idea how ...
"""
src = source[0].get_abspath()
dest = target[0].get_abspath()
dir, file = os.path.split(dest)
if dir and not target[0].fs.isdir(dir):
os.makedirs(dir)
if not Link_Funcs:
# Set a default order of link functions.
set_duplicate('hard-soft-copy')
fs = source[0].fs
# Now link the files with the previously specified order.
for func in Link_Funcs:
try:
func(fs, src, dest)
break
except (IOError, OSError):
# An OSError indicates something happened like a permissions
# problem or an attempt to symlink across file-system
# boundaries. An IOError indicates something like the file
# not existing. In either case, keeping trying additional
# functions in the list and only raise an error if the last
# one failed.
if func == Link_Funcs[-1]:
# exception of the last link method (copy) are fatal
raise
return 0
Link = SCons.Action.Action(LinkFunc, None)
def LocalString(target, source, env):
return 'Local copy of %s from %s' % (target[0], source[0])
LocalCopy = SCons.Action.Action(LinkFunc, LocalString)
def UnlinkFunc(target, source, env):
t = target[0]
t.fs.unlink(t.get_abspath())
return 0
Unlink = SCons.Action.Action(UnlinkFunc, None)
def MkdirFunc(target, source, env):
t = target[0]
# This os.path.exists test looks redundant, but it's possible
# when using Install() to install multiple dirs outside the
# source tree to get a case where t.exists() is true but
# the path does already exist, so this prevents spurious
# build failures in that case. See test/Install/multi-dir.
if not t.exists() and not os.path.exists(t.get_abspath()):
t.fs.mkdir(t.get_abspath())
return 0
Mkdir = SCons.Action.Action(MkdirFunc, None, presub=None)
MkdirBuilder = None
def get_MkdirBuilder():
global MkdirBuilder
if MkdirBuilder is None:
import SCons.Builder
import SCons.Defaults
# "env" will get filled in by Executor.get_build_env()
# calling SCons.Defaults.DefaultEnvironment() when necessary.
MkdirBuilder = SCons.Builder.Builder(action = Mkdir,
env = None,
explain = None,
is_explicit = None,
target_scanner = SCons.Defaults.DirEntryScanner,
name = "MkdirBuilder")
return MkdirBuilder
class _Null(object):
pass
_null = _Null()
# Cygwin's os.path.normcase pretends it's on a case-sensitive filesystem.
_is_cygwin = sys.platform == "cygwin"
if os.path.normcase("TeSt") == os.path.normpath("TeSt") and not _is_cygwin:
def _my_normcase(x):
return x
else:
def _my_normcase(x):
return x.upper()
class DiskChecker(object):
def __init__(self, type, do, ignore):
self.type = type
self.do = do
self.ignore = ignore
self.func = do
def __call__(self, *args, **kw):
return self.func(*args, **kw)
def set(self, list):
if self.type in list:
self.func = self.do
else:
self.func = self.ignore
def do_diskcheck_match(node, predicate, errorfmt):
result = predicate()
try:
# If calling the predicate() cached a None value from stat(),
# remove it so it doesn't interfere with later attempts to
# build this Node as we walk the DAG. (This isn't a great way
# to do this, we're reaching into an interface that doesn't
# really belong to us, but it's all about performance, so
# for now we'll just document the dependency...)
if node._memo['stat'] is None:
del node._memo['stat']
except (AttributeError, KeyError):
pass
if result:
raise TypeError(errorfmt % node.get_abspath())
def ignore_diskcheck_match(node, predicate, errorfmt):
pass
diskcheck_match = DiskChecker('match', do_diskcheck_match, ignore_diskcheck_match)
diskcheckers = [
diskcheck_match,
]
def set_diskcheck(list):
for dc in diskcheckers:
dc.set(list)
def diskcheck_types():
return [dc.type for dc in diskcheckers]
class EntryProxy(SCons.Util.Proxy):
__str__ = SCons.Util.Delegate('__str__')
# In PY3 if a class defines __eq__, then it must explicitly provide
# __hash__. Since SCons.Util.Proxy provides __eq__ we need the following
# see: https://docs.python.org/3.1/reference/datamodel.html#object.__hash__
__hash__ = SCons.Util.Delegate('__hash__')
def __get_abspath(self):
entry = self.get()
return SCons.Subst.SpecialAttrWrapper(entry.get_abspath(),
entry.name + "_abspath")
def __get_filebase(self):
name = self.get().name
return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(name)[0],
name + "_filebase")
def __get_suffix(self):
name = self.get().name
return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(name)[1],
name + "_suffix")
def __get_file(self):
name = self.get().name
return SCons.Subst.SpecialAttrWrapper(name, name + "_file")
def __get_base_path(self):
"""Return the file's directory and file name, with the
suffix stripped."""
entry = self.get()
return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(entry.get_path())[0],
entry.name + "_base")
def __get_posix_path(self):
"""Return the path with / as the path separator,
regardless of platform."""
if os_sep_is_slash:
return self
else:
entry = self.get()
r = entry.get_path().replace(OS_SEP, '/')
return SCons.Subst.SpecialAttrWrapper(r, entry.name + "_posix")
def __get_windows_path(self):
r"""Return the path with \ as the path separator,
regardless of platform."""
if OS_SEP == '\\':
return self
else:
entry = self.get()
r = entry.get_path().replace(OS_SEP, '\\')
return SCons.Subst.SpecialAttrWrapper(r, entry.name + "_windows")
def __get_srcnode(self):
return EntryProxy(self.get().srcnode())
def __get_srcdir(self):
"""Returns the directory containing the source node linked to this
node via VariantDir(), or the directory of this node if not linked."""
return EntryProxy(self.get().srcnode().dir)
def __get_rsrcnode(self):
return EntryProxy(self.get().srcnode().rfile())
def __get_rsrcdir(self):
"""Returns the directory containing the source node linked to this
node via VariantDir(), or the directory of this node if not linked."""
return EntryProxy(self.get().srcnode().rfile().dir)
def __get_dir(self):
return EntryProxy(self.get().dir)
dictSpecialAttrs = { "base" : __get_base_path,
"posix" : __get_posix_path,
"windows" : __get_windows_path,
"win32" : __get_windows_path,
"srcpath" : __get_srcnode,
"srcdir" : __get_srcdir,
"dir" : __get_dir,
"abspath" : __get_abspath,
"filebase" : __get_filebase,
"suffix" : __get_suffix,
"file" : __get_file,
"rsrcpath" : __get_rsrcnode,
"rsrcdir" : __get_rsrcdir,
}
def __getattr__(self, name):
# This is how we implement the "special" attributes
# such as base, posix, srcdir, etc.
try:
attr_function = self.dictSpecialAttrs[name]
except KeyError:
try:
attr = SCons.Util.Proxy.__getattr__(self, name)
except AttributeError:
# Raise our own AttributeError subclass with an
# overridden __str__() method that identifies the
# name of the entry that caused the exception.
raise EntryProxyAttributeError(self, name)
return attr
else:
return attr_function(self)
class Base(SCons.Node.Node):
"""A generic class for file system entries. This class is for
when we don't know yet whether the entry being looked up is a file
or a directory. Instances of this class can morph into either
Dir or File objects by a later, more precise lookup.
Note: this class does not define __cmp__ and __hash__ for
efficiency reasons. SCons does a lot of comparing of
Node.FS.{Base,Entry,File,Dir} objects, so those operations must be
as fast as possible, which means we want to use Python's built-in
object identity comparisons.
"""
__slots__ = ['name',
'fs',
'_abspath',
'_labspath',
'_path',
'_tpath',
'_path_elements',
'dir',
'cwd',
'duplicate',
'_local',
'sbuilder',
'_proxy',
'_func_sconsign']
def __init__(self, name, directory, fs):
"""Initialize a generic Node.FS.Base object.
Call the superclass initialization, take care of setting up
our relative and absolute paths, identify our parent
directory, and indicate that this node should use
signatures."""
if SCons.Debug.track_instances: logInstanceCreation(self, 'Node.FS.Base')
SCons.Node.Node.__init__(self)
# Filenames and paths are probably reused and are intern'ed to save some memory.
# Filename with extension as it was specified when the object was
# created; to obtain filesystem path, use Python str() function
self.name = SCons.Util.silent_intern(name)
self.fs = fs #: Reference to parent Node.FS object
assert directory, "A directory must be provided"
self._abspath = None
self._labspath = None
self._path = None
self._tpath = None
self._path_elements = None
self.dir = directory
self.cwd = None # will hold the SConscript directory for target nodes
self.duplicate = directory.duplicate
self.changed_since_last_build = 2
self._func_sconsign = 0
self._func_exists = 2
self._func_rexists = 2
self._func_get_contents = 0
self._func_target_from_source = 1
self.store_info = 1
def str_for_display(self):
return '"' + self.__str__() + '"'
def must_be_same(self, klass):
"""
This node, which already existed, is being looked up as the
specified klass. Raise an exception if it isn't.
"""
if isinstance(self, klass) or klass is Entry:
return
raise TypeError("Tried to lookup %s '%s' as a %s." %\
(self.__class__.__name__, self.get_internal_path(), klass.__name__))
def get_dir(self):
return self.dir
def get_suffix(self):
return SCons.Util.splitext(self.name)[1]
def rfile(self):
return self
def __getattr__(self, attr):
""" Together with the node_bwcomp dict defined below,
this method provides a simple backward compatibility
layer for the Node attributes 'abspath', 'labspath',
'path', 'tpath', 'suffix' and 'path_elements'. These Node
attributes used to be directly available in v2.3 and earlier, but
have been replaced by getter methods that initialize the
single variables lazily when required, in order to save memory.
The redirection to the getters lets older Tools and
SConstruct continue to work without any additional changes,
fully transparent to the user.
Note, that __getattr__ is only called as fallback when the
requested attribute can't be found, so there should be no
speed performance penalty involved for standard builds.
"""
if attr in node_bwcomp:
return node_bwcomp[attr](self)
raise AttributeError("%r object has no attribute %r" %
(self.__class__, attr))
def __str__(self):
"""A Node.FS.Base object's string representation is its path
name."""
global Save_Strings
if Save_Strings:
return self._save_str()
return self._get_str()
def __lt__(self, other):
""" less than operator used by sorting on py3"""
return str(self) < str(other)
@SCons.Memoize.CountMethodCall
def _save_str(self):
try:
return self._memo['_save_str']
except KeyError:
pass
result = SCons.Util.silent_intern(self._get_str())
self._memo['_save_str'] = result
return result
def _get_str(self):
global Save_Strings
if self.duplicate or self.is_derived():
return self.get_path()
srcnode = self.srcnode()
if srcnode.stat() is None and self.stat() is not None:
result = self.get_path()
else:
result = srcnode.get_path()
if not Save_Strings:
# We're not at the point where we're saving the string
# representations of FS Nodes (because we haven't finished
# reading the SConscript files and need to have str() return
# things relative to them). That also means we can't yet
# cache values returned (or not returned) by stat(), since
# Python code in the SConscript files might still create
# or otherwise affect the on-disk file. So get rid of the
# values that the underlying stat() method saved.
try: del self._memo['stat']
except KeyError: pass
if self is not srcnode:
try: del srcnode._memo['stat']
except KeyError: pass
return result
rstr = __str__
@SCons.Memoize.CountMethodCall
def stat(self):
try:
return self._memo['stat']
except KeyError:
pass
try:
result = self.fs.stat(self.get_abspath())
except os.error:
result = None
self._memo['stat'] = result
return result
def exists(self):
return SCons.Node._exists_map[self._func_exists](self)
def rexists(self):
return SCons.Node._rexists_map[self._func_rexists](self)
def getmtime(self):
st = self.stat()
if st:
return st[stat.ST_MTIME]
else:
return None
def getsize(self):
st = self.stat()
if st:
return st[stat.ST_SIZE]
else:
return None
def isdir(self):
st = self.stat()
return st is not None and stat.S_ISDIR(st[stat.ST_MODE])
def isfile(self):
st = self.stat()
return st is not None and stat.S_ISREG(st[stat.ST_MODE])
if hasattr(os, 'symlink'):
def islink(self):
try: st = self.fs.lstat(self.get_abspath())
except os.error: return 0
return stat.S_ISLNK(st[stat.ST_MODE])
else:
def islink(self):
return 0 # no symlinks
def is_under(self, dir):
if self is dir:
return 1
else:
return self.dir.is_under(dir)
def set_local(self):
self._local = 1
def srcnode(self):
"""If this node is in a build path, return the node
corresponding to its source file. Otherwise, return
ourself.
"""
srcdir_list = self.dir.srcdir_list()
if srcdir_list:
srcnode = srcdir_list[0].Entry(self.name)
srcnode.must_be_same(self.__class__)
return srcnode
return self
def get_path(self, dir=None):
"""Return path relative to the current working directory of the
Node.FS.Base object that owns us."""
if not dir:
dir = self.fs.getcwd()
if self == dir:
return '.'
path_elems = self.get_path_elements()
pathname = ''
try: i = path_elems.index(dir)
except ValueError:
for p in path_elems[:-1]:
pathname += p.dirname
else:
for p in path_elems[i+1:-1]:
pathname += p.dirname
return pathname + path_elems[-1].name
def set_src_builder(self, builder):
"""Set the source code builder for this node."""
self.sbuilder = builder
if not self.has_builder():
self.builder_set(builder)
def src_builder(self):
"""Fetch the source code builder for this node.
If there isn't one, we cache the source code builder specified
for the directory (which in turn will cache the value from its
parent directory, and so on up to the file system root).
"""
try:
scb = self.sbuilder
except AttributeError:
scb = self.dir.src_builder()
self.sbuilder = scb
return scb
def get_abspath(self):
"""Get the absolute path of the file."""
return self.dir.entry_abspath(self.name)
def get_labspath(self):
"""Get the absolute path of the file."""
return self.dir.entry_labspath(self.name)
def get_internal_path(self):
if self.dir._path == '.':
return self.name
else:
return self.dir.entry_path(self.name)
def get_tpath(self):
if self.dir._tpath == '.':
return self.name
else:
return self.dir.entry_tpath(self.name)
def get_path_elements(self):
return self.dir._path_elements + [self]
def for_signature(self):
# Return just our name. Even an absolute path would not work,
# because that can change thanks to symlinks or remapped network
# paths.
return self.name
def get_subst_proxy(self):
try:
return self._proxy
except AttributeError:
ret = EntryProxy(self)
self._proxy = ret
return ret
def target_from_source(self, prefix, suffix, splitext=SCons.Util.splitext):
"""
Generates a target entry that corresponds to this entry (usually
a source file) with the specified prefix and suffix.
Note that this method can be overridden dynamically for generated
files that need different behavior. See Tool/swig.py for
an example.
"""
return SCons.Node._target_from_source_map[self._func_target_from_source](self, prefix, suffix, splitext)
def _Rfindalldirs_key(self, pathlist):
return pathlist
@SCons.Memoize.CountDictCall(_Rfindalldirs_key)
def Rfindalldirs(self, pathlist):
"""
Return all of the directories for a given path list, including
corresponding "backing" directories in any repositories.
The Node lookups are relative to this Node (typically a
directory), so memoizing result saves cycles from looking
up the same path for each target in a given directory.
"""
try:
memo_dict = self._memo['Rfindalldirs']
except KeyError:
memo_dict = {}
self._memo['Rfindalldirs'] = memo_dict
else:
try:
return memo_dict[pathlist]
except KeyError:
pass
create_dir_relative_to_self = self.Dir
result = []
for path in pathlist:
if isinstance(path, SCons.Node.Node):
result.append(path)
else:
dir = create_dir_relative_to_self(path)
result.extend(dir.get_all_rdirs())
memo_dict[pathlist] = result
return result
def RDirs(self, pathlist):
"""Search for a list of directories in the Repository list."""
cwd = self.cwd or self.fs._cwd
return cwd.Rfindalldirs(pathlist)
@SCons.Memoize.CountMethodCall
def rentry(self):
try:
return self._memo['rentry']
except KeyError:
pass
result = self
if not self.exists():
norm_name = _my_normcase(self.name)
for dir in self.dir.get_all_rdirs():
try:
node = dir.entries[norm_name]
except KeyError:
if dir.entry_exists_on_disk(self.name):
result = dir.Entry(self.name)
break
self._memo['rentry'] = result
return result
def _glob1(self, pattern, ondisk=True, source=False, strings=False):
return []
# Dict that provides a simple backward compatibility
# layer for the Node attributes 'abspath', 'labspath',
# 'path', 'tpath' and 'path_elements'.
# @see Base.__getattr__ above
node_bwcomp = {'abspath' : Base.get_abspath,
'labspath' : Base.get_labspath,
'path' : Base.get_internal_path,
'tpath' : Base.get_tpath,
'path_elements' : Base.get_path_elements,
'suffix' : Base.get_suffix}
class Entry(Base):
"""This is the class for generic Node.FS entries--that is, things
that could be a File or a Dir, but we're just not sure yet.
Consequently, the methods in this class really exist just to
transform their associated object into the right class when the
time comes, and then call the same-named method in the transformed
class."""
__slots__ = ['scanner_paths',
'cachedir_csig',
'cachesig',
'repositories',
'srcdir',
'entries',
'searched',
'_sconsign',
'variant_dirs',
'root',
'dirname',
'on_disk_entries',
'released_target_info',
'contentsig']
def __init__(self, name, directory, fs):
Base.__init__(self, name, directory, fs)
self._func_exists = 3
self._func_get_contents = 1
def diskcheck_match(self):
pass
def disambiguate(self, must_exist=None):
"""
"""
if self.isfile():
self.__class__ = File
self._morph()
self.clear()
elif self.isdir():
self.__class__ = Dir
self._morph()
else:
# There was nothing on-disk at this location, so look in
# the src directory.
#
# We can't just use self.srcnode() straight away because
# that would create an actual Node for this file in the src
# directory, and there might not be one. Instead, use the
# dir_on_disk() method to see if there's something on-disk
# with that name, in which case we can go ahead and call
# self.srcnode() to create the right type of entry.
srcdir = self.dir.srcnode()
if srcdir != self.dir and \
srcdir.entry_exists_on_disk(self.name) and \
self.srcnode().isdir():
self.__class__ = Dir
self._morph()
elif must_exist:
msg = "No such file or directory: '%s'" % self.get_abspath()
raise SCons.Errors.UserError(msg)
else:
self.__class__ = File
self._morph()
self.clear()
return self
def rfile(self):
"""We're a generic Entry, but the caller is actually looking for
a File at this point, so morph into one."""
self.__class__ = File
self._morph()
self.clear()
return File.rfile(self)
def scanner_key(self):
return self.get_suffix()
def get_contents(self):
"""Fetch the contents of the entry. Returns the exact binary
contents of the file."""
return SCons.Node._get_contents_map[self._func_get_contents](self)
def get_text_contents(self):
"""Fetch the decoded text contents of a Unicode encoded Entry.
Since this should return the text contents from the file
system, we check to see into what sort of subclass we should
morph this Entry."""
try:
self = self.disambiguate(must_exist=1)
except SCons.Errors.UserError:
# There was nothing on disk with which to disambiguate
# this entry. Leave it as an Entry, but return a null
# string so calls to get_text_contents() in emitters and
# the like (e.g. in qt.py) don't have to disambiguate by
# hand or catch the exception.
return ''
else:
return self.get_text_contents()
def must_be_same(self, klass):
"""Called to make sure a Node is a Dir. Since we're an
Entry, we can morph into one."""
if self.__class__ is not klass:
self.__class__ = klass
self._morph()
self.clear()
# The following methods can get called before the Taskmaster has
# had a chance to call disambiguate() directly to see if this Entry
# should really be a Dir or a File. We therefore use these to call
# disambiguate() transparently (from our caller's point of view).
#
# Right now, this minimal set of methods has been derived by just
# looking at some of the methods that will obviously be called early
# in any of the various Taskmasters' calling sequences, and then
# empirically figuring out which additional methods are necessary
# to make various tests pass.
def exists(self):
return SCons.Node._exists_map[self._func_exists](self)
def rel_path(self, other):
d = self.disambiguate()
if d.__class__ is Entry:
raise Exception("rel_path() could not disambiguate File/Dir")
return d.rel_path(other)
def new_ninfo(self):
return self.disambiguate().new_ninfo()
def _glob1(self, pattern, ondisk=True, source=False, strings=False):
return self.disambiguate()._glob1(pattern, ondisk, source, strings)
def get_subst_proxy(self):
return self.disambiguate().get_subst_proxy()
# This is for later so we can differentiate between Entry the class and Entry
# the method of the FS class.
_classEntry = Entry
class LocalFS(object):
"""
This class implements an abstraction layer for operations involving
a local file system. Essentially, this wraps any function in
the os, os.path or shutil modules that we use to actually go do
anything with or to the local file system.
Note that there's a very good chance we'll refactor this part of
the architecture in some way as we really implement the interface(s)
for remote file system Nodes. For example, the right architecture
might be to have this be a subclass instead of a base class.
Nevertheless, we're using this as a first step in that direction.
We're not using chdir() yet because the calling subclass method
needs to use os.chdir() directly to avoid recursion. Will we
really need this one?
"""
#def chdir(self, path):
# return os.chdir(path)
def chmod(self, path, mode):
return os.chmod(path, mode)
def copy(self, src, dst):
return shutil.copy(src, dst)
def copy2(self, src, dst):
return shutil.copy2(src, dst)
def exists(self, path):
return os.path.exists(path)
def getmtime(self, path):
return os.path.getmtime(path)
def getsize(self, path):
return os.path.getsize(path)
def isdir(self, path):
return os.path.isdir(path)
def isfile(self, path):
return os.path.isfile(path)
def link(self, src, dst):
return os.link(src, dst)
def lstat(self, path):
return os.lstat(path)
def listdir(self, path):
return os.listdir(path)
def makedirs(self, path):
return os.makedirs(path)
def mkdir(self, path):
return os.mkdir(path)
def rename(self, old, new):
return os.rename(old, new)
def stat(self, path):
return os.stat(path)
def symlink(self, src, dst):
return os.symlink(src, dst)
def open(self, path):
return open(path)
def unlink(self, path):
return os.unlink(path)
if hasattr(os, 'symlink'):
def islink(self, path):
return os.path.islink(path)
else:
def islink(self, path):
return 0 # no symlinks
if hasattr(os, 'readlink'):
def readlink(self, file):
return os.readlink(file)
else:
def readlink(self, file):
return ''
class FS(LocalFS):
def __init__(self, path = None):
"""Initialize the Node.FS subsystem.
The supplied path is the top of the source tree, where we
expect to find the top-level build file. If no path is
supplied, the current directory is the default.
The path argument must be a valid absolute path.
"""
if SCons.Debug.track_instances: logInstanceCreation(self, 'Node.FS')
self._memo = {}
self.Root = {}
self.SConstruct_dir = None
self.max_drift = default_max_drift
self.Top = None
if path is None:
self.pathTop = os.getcwd()
else:
self.pathTop = path
self.defaultDrive = _my_normcase(_my_splitdrive(self.pathTop)[0])
self.Top = self.Dir(self.pathTop)
self.Top._path = '.'
self.Top._tpath = '.'
self._cwd = self.Top
DirNodeInfo.fs = self
FileNodeInfo.fs = self
def set_SConstruct_dir(self, dir):
self.SConstruct_dir = dir
def get_max_drift(self):
return self.max_drift
def set_max_drift(self, max_drift):
self.max_drift = max_drift
def getcwd(self):
if hasattr(self, "_cwd"):
return self._cwd
else:
return "<no cwd>"
def chdir(self, dir, change_os_dir=0):
"""Change the current working directory for lookups.
If change_os_dir is true, we will also change the "real" cwd
to match.
"""
curr=self._cwd
try:
if dir is not None:
self._cwd = dir
if change_os_dir:
os.chdir(dir.get_abspath())
except OSError:
self._cwd = curr
raise
def get_root(self, drive):
"""
Returns the root directory for the specified drive, creating
it if necessary.
"""
drive = _my_normcase(drive)
try:
return self.Root[drive]
except KeyError:
root = RootDir(drive, self)
self.Root[drive] = root
if not drive:
self.Root[self.defaultDrive] = root
elif drive == self.defaultDrive:
self.Root[''] = root
return root
def _lookup(self, p, directory, fsclass, create=1):
"""
The generic entry point for Node lookup with user-supplied data.
This translates arbitrary input into a canonical Node.FS object
of the specified fsclass. The general approach for strings is
to turn it into a fully normalized absolute path and then call
the root directory's lookup_abs() method for the heavy lifting.
If the path name begins with '#', it is unconditionally
interpreted relative to the top-level directory of this FS. '#'
is treated as a synonym for the top-level SConstruct directory,
much like '~' is treated as a synonym for the user's home
directory in a UNIX shell. So both '#foo' and '#/foo' refer
to the 'foo' subdirectory underneath the top-level SConstruct
directory.
If the path name is relative, then the path is looked up relative
to the specified directory, or the current directory (self._cwd,
typically the SConscript directory) if the specified directory
is None.
"""
if isinstance(p, Base):
# It's already a Node.FS object. Make sure it's the right
# class and return.
p.must_be_same(fsclass)
return p
# str(p) in case it's something like a proxy object
p = str(p)
if not os_sep_is_slash:
p = p.replace(OS_SEP, '/')
if p[0:1] == '#':
# There was an initial '#', so we strip it and override
# whatever directory they may have specified with the
# top-level SConstruct directory.
p = p[1:]
directory = self.Top
# There might be a drive letter following the
# '#'. Although it is not described in the SCons man page,
# the regression test suite explicitly tests for that
# syntax. It seems to mean the following thing:
#
# Assuming the the SCons top dir is in C:/xxx/yyy,
# '#X:/toto' means X:/xxx/yyy/toto.
#
# i.e. it assumes that the X: drive has a directory
# structure similar to the one found on drive C:.
if do_splitdrive:
drive, p = _my_splitdrive(p)
if drive:
root = self.get_root(drive)
else:
root = directory.root
else:
root = directory.root
# We can only strip trailing after splitting the drive
# since the drive might the UNC '//' prefix.
p = p.strip('/')
needs_normpath = needs_normpath_match(p)
# The path is relative to the top-level SCons directory.
if p in ('', '.'):
p = directory.get_labspath()
else:
p = directory.get_labspath() + '/' + p
else:
if do_splitdrive:
drive, p = _my_splitdrive(p)
if drive and not p:
# This causes a naked drive letter to be treated
# as a synonym for the root directory on that
# drive.
p = '/'
else:
drive = ''
# We can only strip trailing '/' since the drive might the
# UNC '//' prefix.
if p != '/':
p = p.rstrip('/')
needs_normpath = needs_normpath_match(p)
if p[0:1] == '/':
# Absolute path
root = self.get_root(drive)
else:
# This is a relative lookup or to the current directory
# (the path name is not absolute). Add the string to the
# appropriate directory lookup path, after which the whole
# thing gets normalized.
if directory:
if not isinstance(directory, Dir):
directory = self.Dir(directory)
else:
directory = self._cwd
if p in ('', '.'):
p = directory.get_labspath()
else:
p = directory.get_labspath() + '/' + p
if drive:
root = self.get_root(drive)
else:
root = directory.root
if needs_normpath is not None:
# Normalize a pathname. Will return the same result for
# equivalent paths.
#
# We take advantage of the fact that we have an absolute
# path here for sure. In addition, we know that the
# components of lookup path are separated by slashes at
# this point. Because of this, this code is about 2X
# faster than calling os.path.normpath() followed by
# replacing os.sep with '/' again.
ins = p.split('/')[1:]
outs = []
for d in ins:
if d == '..':
try:
outs.pop()
except IndexError:
pass
elif d not in ('', '.'):
outs.append(d)
p = '/' + '/'.join(outs)
return root._lookup_abs(p, fsclass, create)
def Entry(self, name, directory = None, create = 1):
"""Look up or create a generic Entry node with the specified name.
If the name is a relative path (begins with ./, ../, or a file
name), then it is looked up relative to the supplied directory
node, or to the top level directory of the FS (supplied at
construction time) if no directory is supplied.
"""
return self._lookup(name, directory, Entry, create)
def File(self, name, directory = None, create = 1):
"""Look up or create a File node with the specified name. If
the name is a relative path (begins with ./, ../, or a file name),
then it is looked up relative to the supplied directory node,
or to the top level directory of the FS (supplied at construction
time) if no directory is supplied.
This method will raise TypeError if a directory is found at the
specified path.
"""
return self._lookup(name, directory, File, create)
def Dir(self, name, directory = None, create = True):
"""Look up or create a Dir node with the specified name. If
the name is a relative path (begins with ./, ../, or a file name),
then it is looked up relative to the supplied directory node,
or to the top level directory of the FS (supplied at construction
time) if no directory is supplied.
This method will raise TypeError if a normal file is found at the
specified path.
"""
return self._lookup(name, directory, Dir, create)
def VariantDir(self, variant_dir, src_dir, duplicate=1):
"""Link the supplied variant directory to the source directory
for purposes of building files."""
if not isinstance(src_dir, SCons.Node.Node):
src_dir = self.Dir(src_dir)
if not isinstance(variant_dir, SCons.Node.Node):
variant_dir = self.Dir(variant_dir)
if src_dir.is_under(variant_dir):
raise SCons.Errors.UserError("Source directory cannot be under variant directory.")
if variant_dir.srcdir:
if variant_dir.srcdir == src_dir:
return # We already did this.
raise SCons.Errors.UserError("'%s' already has a source directory: '%s'."%(variant_dir, variant_dir.srcdir))
variant_dir.link(src_dir, duplicate)
def Repository(self, *dirs):
"""Specify Repository directories to search."""
for d in dirs:
if not isinstance(d, SCons.Node.Node):
d = self.Dir(d)
self.Top.addRepository(d)
def PyPackageDir(self, modulename):
r"""Locate the directory of a given python module name
For example scons might resolve to
Windows: C:\Python27\Lib\site-packages\scons-2.5.1
Linux: /usr/lib/scons
This can be useful when we want to determine a toolpath based on a python module name"""
dirpath = ''
if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and sys.version_info[1] in (0,1,2,3,4)):
# Python2 Code
import imp
splitname = modulename.split('.')
srchpths = sys.path
for item in splitname:
file, path, desc = imp.find_module(item, srchpths)
if file is not None:
path = os.path.dirname(path)
srchpths = [path]
dirpath = path
else:
# Python3 Code
import importlib.util
modspec = importlib.util.find_spec(modulename)
dirpath = os.path.dirname(modspec.origin)
return self._lookup(dirpath, None, Dir, True)
def variant_dir_target_climb(self, orig, dir, tail):
"""Create targets in corresponding variant directories
Climb the directory tree, and look up path names
relative to any linked variant directories we find.
Even though this loops and walks up the tree, we don't memoize
the return value because this is really only used to process
the command-line targets.
"""
targets = []
message = None
fmt = "building associated VariantDir targets: %s"
start_dir = dir
while dir:
for bd in dir.variant_dirs:
if start_dir.is_under(bd):
# If already in the build-dir location, don't reflect
return [orig], fmt % str(orig)
p = os.path.join(bd._path, *tail)
targets.append(self.Entry(p))
tail = [dir.name] + tail
dir = dir.up()
if targets:
message = fmt % ' '.join(map(str, targets))
return targets, message
def Glob(self, pathname, ondisk=True, source=True, strings=False, exclude=None, cwd=None):
"""
Globs
This is mainly a shim layer
"""
if cwd is None:
cwd = self.getcwd()
return cwd.glob(pathname, ondisk, source, strings, exclude)
class DirNodeInfo(SCons.Node.NodeInfoBase):
__slots__ = ()
# This should get reset by the FS initialization.
current_version_id = 2
fs = None
def str_to_node(self, s):
top = self.fs.Top
root = top.root
if do_splitdrive:
drive, s = _my_splitdrive(s)
if drive:
root = self.fs.get_root(drive)
if not os.path.isabs(s):
s = top.get_labspath() + '/' + s
return root._lookup_abs(s, Entry)
class DirBuildInfo(SCons.Node.BuildInfoBase):
__slots__ = ()
current_version_id = 2
glob_magic_check = re.compile('[*?[]')
def has_glob_magic(s):
return glob_magic_check.search(s) is not None
class Dir(Base):
"""A class for directories in a file system.
"""
__slots__ = ['scanner_paths',
'cachedir_csig',
'cachesig',
'repositories',
'srcdir',
'entries',
'searched',
'_sconsign',
'variant_dirs',
'root',
'dirname',
'on_disk_entries',
'released_target_info',
'contentsig']
NodeInfo = DirNodeInfo
BuildInfo = DirBuildInfo
def __init__(self, name, directory, fs):
if SCons.Debug.track_instances: logInstanceCreation(self, 'Node.FS.Dir')
Base.__init__(self, name, directory, fs)
self._morph()
def _morph(self):
"""Turn a file system Node (either a freshly initialized directory
object or a separate Entry object) into a proper directory object.
Set up this directory's entries and hook it into the file
system tree. Specify that directories (this Node) don't use
signatures for calculating whether they're current.
"""
self.repositories = []
self.srcdir = None
self.entries = {}
self.entries['.'] = self
self.entries['..'] = self.dir
self.cwd = self
self.searched = 0
self._sconsign = None
self.variant_dirs = []
self.root = self.dir.root
self.changed_since_last_build = 3
self._func_sconsign = 1
self._func_exists = 2
self._func_get_contents = 2
self._abspath = SCons.Util.silent_intern(self.dir.entry_abspath(self.name))
self._labspath = SCons.Util.silent_intern(self.dir.entry_labspath(self.name))
if self.dir._path == '.':
self._path = SCons.Util.silent_intern(self.name)
else:
self._path = SCons.Util.silent_intern(self.dir.entry_path(self.name))
if self.dir._tpath == '.':
self._tpath = SCons.Util.silent_intern(self.name)
else:
self._tpath = SCons.Util.silent_intern(self.dir.entry_tpath(self.name))
self._path_elements = self.dir._path_elements + [self]
# For directories, we make a difference between the directory
# 'name' and the directory 'dirname'. The 'name' attribute is
# used when we need to print the 'name' of the directory or
# when we it is used as the last part of a path. The 'dirname'
# is used when the directory is not the last element of the
# path. The main reason for making that distinction is that
# for RoorDir's the dirname can not be easily inferred from
# the name. For example, we have to add a '/' after a drive
# letter but not after a UNC path prefix ('//').
self.dirname = self.name + OS_SEP
# Don't just reset the executor, replace its action list,
# because it might have some pre-or post-actions that need to
# be preserved.
#
# But don't reset the executor if there is a non-null executor
# attached already. The existing executor might have other
# targets, in which case replacing the action list with a
# Mkdir action is a big mistake.
if not hasattr(self, 'executor'):
self.builder = get_MkdirBuilder()
self.get_executor().set_action_list(self.builder.action)
else:
# Prepend MkdirBuilder action to existing action list
l = self.get_executor().action_list
a = get_MkdirBuilder().action
l.insert(0, a)
self.get_executor().set_action_list(l)
def diskcheck_match(self):
# Nuitka: This check breaks with symlinks on Windows and Python2
if os.name == "nt" and str is bytes:
return
diskcheck_match(self, self.isfile,
"File %s found where directory expected.")
def __clearRepositoryCache(self, duplicate=None):
"""Called when we change the repository(ies) for a directory.
This clears any cached information that is invalidated by changing
the repository."""
for node in list(self.entries.values()):
if node != self.dir:
if node != self and isinstance(node, Dir):
node.__clearRepositoryCache(duplicate)
else:
node.clear()
try:
del node._srcreps
except AttributeError:
pass
if duplicate is not None:
node.duplicate=duplicate
def __resetDuplicate(self, node):
if node != self:
node.duplicate = node.get_dir().duplicate
def Entry(self, name):
"""
Looks up or creates an entry node named 'name' relative to
this directory.
"""
return self.fs.Entry(name, self)
def Dir(self, name, create=True):
"""
Looks up or creates a directory node named 'name' relative to
this directory.
"""
return self.fs.Dir(name, self, create)
def File(self, name):
"""
Looks up or creates a file node named 'name' relative to
this directory.
"""
return self.fs.File(name, self)
def link(self, srcdir, duplicate):
"""Set this directory as the variant directory for the
supplied source directory."""
self.srcdir = srcdir
self.duplicate = duplicate
self.__clearRepositoryCache(duplicate)
srcdir.variant_dirs.append(self)
def getRepositories(self):
"""Returns a list of repositories for this directory.
"""
if self.srcdir and not self.duplicate:
return self.srcdir.get_all_rdirs() + self.repositories
return self.repositories
@SCons.Memoize.CountMethodCall
def get_all_rdirs(self):
try:
return list(self._memo['get_all_rdirs'])
except KeyError:
pass
result = [self]
fname = '.'
dir = self
while dir:
for rep in dir.getRepositories():
result.append(rep.Dir(fname))
if fname == '.':
fname = dir.name
else:
fname = dir.name + OS_SEP + fname
dir = dir.up()
self._memo['get_all_rdirs'] = list(result)
return result
def addRepository(self, dir):
if dir != self and dir not in self.repositories:
self.repositories.append(dir)
dir._tpath = '.'
self.__clearRepositoryCache()
def up(self):
return self.dir
def _rel_path_key(self, other):
return str(other)
@SCons.Memoize.CountDictCall(_rel_path_key)
def rel_path(self, other):
"""Return a path to "other" relative to this directory.
"""
# This complicated and expensive method, which constructs relative
# paths between arbitrary Node.FS objects, is no longer used
# by SCons itself. It was introduced to store dependency paths
# in .sconsign files relative to the target, but that ended up
# being significantly inefficient.
#
# We're continuing to support the method because some SConstruct
# files out there started using it when it was available, and
# we're all about backwards compatibility..
try:
memo_dict = self._memo['rel_path']
except KeyError:
memo_dict = {}
self._memo['rel_path'] = memo_dict
else:
try:
return memo_dict[other]
except KeyError:
pass
if self is other:
result = '.'
elif other not in self._path_elements:
try:
other_dir = other.get_dir()
except AttributeError:
result = str(other)
else:
if other_dir is None:
result = other.name
else:
dir_rel_path = self.rel_path(other_dir)
if dir_rel_path == '.':
result = other.name
else:
result = dir_rel_path + OS_SEP + other.name
else:
i = self._path_elements.index(other) + 1
path_elems = ['..'] * (len(self._path_elements) - i) \
+ [n.name for n in other._path_elements[i:]]
result = OS_SEP.join(path_elems)
memo_dict[other] = result
return result
def get_env_scanner(self, env, kw={}):
import SCons.Defaults
return SCons.Defaults.DirEntryScanner
def get_target_scanner(self):
import SCons.Defaults
return SCons.Defaults.DirEntryScanner
def get_found_includes(self, env, scanner, path):
"""Return this directory's implicit dependencies.
We don't bother caching the results because the scan typically
shouldn't be requested more than once (as opposed to scanning
.h file contents, which can be requested as many times as the
files is #included by other files).
"""
if not scanner:
return []
# Clear cached info for this Dir. If we already visited this
# directory on our walk down the tree (because we didn't know at
# that point it was being used as the source for another Node)
# then we may have calculated build signature before realizing
# we had to scan the disk. Now that we have to, though, we need
# to invalidate the old calculated signature so that any node
# dependent on our directory structure gets one that includes
# info about everything on disk.
self.clear()
return scanner(self, env, path)
#
# Taskmaster interface subsystem
#
def prepare(self):
pass
def build(self, **kw):
"""A null "builder" for directories."""
global MkdirBuilder
if self.builder is not MkdirBuilder:
SCons.Node.Node.build(self, **kw)
#
#
#
def _create(self):
"""Create this directory, silently and without worrying about
whether the builder is the default or not."""
listDirs = []
parent = self
while parent:
if parent.exists():
break
listDirs.append(parent)
p = parent.up()
if p is None:
# Don't use while: - else: for this condition because
# if so, then parent is None and has no .path attribute.
raise SCons.Errors.StopError(parent._path)
parent = p
listDirs.reverse()
for dirnode in listDirs:
try:
# Don't call dirnode.build(), call the base Node method
# directly because we definitely *must* create this
# directory. The dirnode.build() method will suppress
# the build if it's the default builder.
SCons.Node.Node.build(dirnode)
dirnode.get_executor().nullify()
# The build() action may or may not have actually
# created the directory, depending on whether the -n
# option was used or not. Delete the _exists and
# _rexists attributes so they can be reevaluated.
dirnode.clear()
except OSError:
pass
def multiple_side_effect_has_builder(self):
global MkdirBuilder
return self.builder is not MkdirBuilder and self.has_builder()
def alter_targets(self):
"""Return any corresponding targets in a variant directory.
"""
return self.fs.variant_dir_target_climb(self, self, [])
def scanner_key(self):
"""A directory does not get scanned."""
return None
def get_text_contents(self):
"""We already emit things in text, so just return the binary
version."""
return self.get_contents()
def get_contents(self):
"""Return content signatures and names of all our children
separated by new-lines. Ensure that the nodes are sorted."""
return SCons.Node._get_contents_map[self._func_get_contents](self)
def get_csig(self):
"""Compute the content signature for Directory nodes. In
general, this is not needed and the content signature is not
stored in the DirNodeInfo. However, if get_contents on a Dir
node is called which has a child directory, the child
directory should return the hash of its contents."""
contents = self.get_contents()
return SCons.Util.MD5signature(contents)
def do_duplicate(self, src):
pass
def is_up_to_date(self):
"""If any child is not up-to-date, then this directory isn't,
either."""
if self.builder is not MkdirBuilder and not self.exists():
return 0
up_to_date = SCons.Node.up_to_date
for kid in self.children():
if kid.get_state() > up_to_date:
return 0
return 1
def rdir(self):
if not self.exists():
norm_name = _my_normcase(self.name)
for dir in self.dir.get_all_rdirs():
try: node = dir.entries[norm_name]
except KeyError: node = dir.dir_on_disk(self.name)
if node and node.exists() and \
(isinstance(dir, Dir) or isinstance(dir, Entry)):
return node
return self
def sconsign(self):
"""Return the .sconsign file info for this directory. """
return _sconsign_map[self._func_sconsign](self)
def srcnode(self):
"""Dir has a special need for srcnode()...if we
have a srcdir attribute set, then that *is* our srcnode."""
if self.srcdir:
return self.srcdir
return Base.srcnode(self)
def get_timestamp(self):
"""Return the latest timestamp from among our children"""
stamp = 0
for kid in self.children():
if kid.get_timestamp() > stamp:
stamp = kid.get_timestamp()
return stamp
def get_abspath(self):
"""Get the absolute path of the file."""
return self._abspath
def get_labspath(self):
"""Get the absolute path of the file."""
return self._labspath
def get_internal_path(self):
return self._path
def get_tpath(self):
return self._tpath
def get_path_elements(self):
return self._path_elements
def entry_abspath(self, name):
return self._abspath + OS_SEP + name
def entry_labspath(self, name):
return self._labspath + '/' + name
def entry_path(self, name):
return self._path + OS_SEP + name
def entry_tpath(self, name):
return self._tpath + OS_SEP + name
def entry_exists_on_disk(self, name):
""" Searches through the file/dir entries of the current
directory, and returns True if a physical entry with the given
name could be found.
@see rentry_exists_on_disk
"""
try:
d = self.on_disk_entries
except AttributeError:
d = {}
try:
entries = os.listdir(self._abspath)
except OSError:
pass
else:
for entry in map(_my_normcase, entries):
d[entry] = True
self.on_disk_entries = d
if sys.platform == 'win32' or sys.platform == 'cygwin':
name = _my_normcase(name)
result = d.get(name)
if result is None:
# Belt-and-suspenders for Windows: check directly for
# 8.3 file names that don't show up in os.listdir().
result = os.path.exists(self._abspath + OS_SEP + name)
d[name] = result
return result
else:
return name in d
def rentry_exists_on_disk(self, name):
""" Searches through the file/dir entries of the current
*and* all its remote directories (repos), and returns
True if a physical entry with the given name could be found.
The local directory (self) gets searched first, so
repositories take a lower precedence regarding the
searching order.
@see entry_exists_on_disk
"""
rentry_exists = self.entry_exists_on_disk(name)
if not rentry_exists:
# Search through the repository folders
norm_name = _my_normcase(name)
for rdir in self.get_all_rdirs():
try:
node = rdir.entries[norm_name]
if node:
rentry_exists = True
break
except KeyError:
if rdir.entry_exists_on_disk(name):
rentry_exists = True
break
return rentry_exists
@SCons.Memoize.CountMethodCall
def srcdir_list(self):
try:
return self._memo['srcdir_list']
except KeyError:
pass
result = []
dirname = '.'
dir = self
while dir:
if dir.srcdir:
result.append(dir.srcdir.Dir(dirname))
dirname = dir.name + OS_SEP + dirname
dir = dir.up()
self._memo['srcdir_list'] = result
return result
def srcdir_duplicate(self, name):
for dir in self.srcdir_list():
if self.is_under(dir):
# We shouldn't source from something in the build path;
# variant_dir is probably under src_dir, in which case
# we are reflecting.
break
if dir.entry_exists_on_disk(name):
srcnode = dir.Entry(name).disambiguate()
if self.duplicate:
node = self.Entry(name).disambiguate()
node.do_duplicate(srcnode)
return node
else:
return srcnode
return None
def _srcdir_find_file_key(self, filename):
return filename
@SCons.Memoize.CountDictCall(_srcdir_find_file_key)
def srcdir_find_file(self, filename):
try:
memo_dict = self._memo['srcdir_find_file']
except KeyError:
memo_dict = {}
self._memo['srcdir_find_file'] = memo_dict
else:
try:
return memo_dict[filename]
except KeyError:
pass
def func(node):
if (isinstance(node, File) or isinstance(node, Entry)) and \
(node.is_derived() or node.exists()):
return node
return None
norm_name = _my_normcase(filename)
for rdir in self.get_all_rdirs():
try: node = rdir.entries[norm_name]
except KeyError: node = rdir.file_on_disk(filename)
else: node = func(node)
if node:
result = (node, self)
memo_dict[filename] = result
return result
for srcdir in self.srcdir_list():
for rdir in srcdir.get_all_rdirs():
try: node = rdir.entries[norm_name]
except KeyError: node = rdir.file_on_disk(filename)
else: node = func(node)
if node:
result = (File(filename, self, self.fs), srcdir)
memo_dict[filename] = result
return result
result = (None, None)
memo_dict[filename] = result
return result
def dir_on_disk(self, name):
if self.entry_exists_on_disk(name):
try: return self.Dir(name)
except TypeError: pass
node = self.srcdir_duplicate(name)
if isinstance(node, File):
return None
return node
def file_on_disk(self, name):
if self.entry_exists_on_disk(name):
try: return self.File(name)
except TypeError: pass
node = self.srcdir_duplicate(name)
if isinstance(node, Dir):
return None
return node
def walk(self, func, arg):
"""
Walk this directory tree by calling the specified function
for each directory in the tree.
This behaves like the os.path.walk() function, but for in-memory
Node.FS.Dir objects. The function takes the same arguments as
the functions passed to os.path.walk():
func(arg, dirname, fnames)
Except that "dirname" will actually be the directory *Node*,
not the string. The '.' and '..' entries are excluded from
fnames. The fnames list may be modified in-place to filter the
subdirectories visited or otherwise impose a specific order.
The "arg" argument is always passed to func() and may be used
in any way (or ignored, passing None is common).
"""
entries = self.entries
names = list(entries.keys())
names.remove('.')
names.remove('..')
func(arg, self, names)
for dirname in [n for n in names if isinstance(entries[n], Dir)]:
entries[dirname].walk(func, arg)
def glob(self, pathname, ondisk=True, source=False, strings=False, exclude=None):
"""
Returns a list of Nodes (or strings) matching a specified
pathname pattern.
Pathname patterns follow UNIX shell semantics: * matches
any-length strings of any characters, ? matches any character,
and [] can enclose lists or ranges of characters. Matches do
not span directory separators.
The matches take into account Repositories, returning local
Nodes if a corresponding entry exists in a Repository (either
an in-memory Node or something on disk).
By defafult, the glob() function matches entries that exist
on-disk, in addition to in-memory Nodes. Setting the "ondisk"
argument to False (or some other non-true value) causes the glob()
function to only match in-memory Nodes. The default behavior is
to return both the on-disk and in-memory Nodes.
The "source" argument, when true, specifies that corresponding
source Nodes must be returned if you're globbing in a build
directory (initialized with VariantDir()). The default behavior
is to return Nodes local to the VariantDir().
The "strings" argument, when true, returns the matches as strings,
not Nodes. The strings are path names relative to this directory.
The "exclude" argument, if not None, must be a pattern or a list
of patterns following the same UNIX shell semantics.
Elements matching a least one pattern of this list will be excluded
from the result.
The underlying algorithm is adapted from the glob.glob() function
in the Python library (but heavily modified), and uses fnmatch()
under the covers.
"""
dirname, basename = os.path.split(pathname)
if not dirname:
result = self._glob1(basename, ondisk, source, strings)
else:
if has_glob_magic(dirname):
list = self.glob(dirname, ondisk, source, False, exclude)
else:
list = [self.Dir(dirname, create=True)]
result = []
for dir in list:
r = dir._glob1(basename, ondisk, source, strings)
if strings:
r = [os.path.join(str(dir), x) for x in r]
result.extend(r)
if exclude:
excludes = []
excludeList = SCons.Util.flatten(exclude)
for x in excludeList:
r = self.glob(x, ondisk, source, strings)
excludes.extend(r)
result = [x for x in result if not any(fnmatch.fnmatch(str(x), str(e)) for e in SCons.Util.flatten(excludes))]
return sorted(result, key=lambda a: str(a))
def _glob1(self, pattern, ondisk=True, source=False, strings=False):
"""
Globs for and returns a list of entry names matching a single
pattern in this directory.
This searches any repositories and source directories for
corresponding entries and returns a Node (or string) relative
to the current directory if an entry is found anywhere.
TODO: handle pattern with no wildcard
"""
search_dir_list = self.get_all_rdirs()
for srcdir in self.srcdir_list():
search_dir_list.extend(srcdir.get_all_rdirs())
selfEntry = self.Entry
names = []
for dir in search_dir_list:
# We use the .name attribute from the Node because the keys of
# the dir.entries dictionary are normalized (that is, all upper
# case) on case-insensitive systems like Windows.
node_names = [ v.name for k, v in dir.entries.items()
if k not in ('.', '..') ]
names.extend(node_names)
if not strings:
# Make sure the working directory (self) actually has
# entries for all Nodes in repositories or variant dirs.
for name in node_names: selfEntry(name)
if ondisk:
try:
disk_names = os.listdir(dir._abspath)
except os.error:
continue
names.extend(disk_names)
if not strings:
# We're going to return corresponding Nodes in
# the local directory, so we need to make sure
# those Nodes exist. We only want to create
# Nodes for the entries that will match the
# specified pattern, though, which means we
# need to filter the list here, even though
# the overall list will also be filtered later,
# after we exit this loop.
if pattern[0] != '.':
disk_names = [x for x in disk_names if x[0] != '.']
disk_names = fnmatch.filter(disk_names, pattern)
dirEntry = dir.Entry
for name in disk_names:
# Add './' before disk filename so that '#' at
# beginning of filename isn't interpreted.
name = './' + name
node = dirEntry(name).disambiguate()
n = selfEntry(name)
if n.__class__ != node.__class__:
n.__class__ = node.__class__
n._morph()
names = set(names)
if pattern[0] != '.':
names = [x for x in names if x[0] != '.']
names = fnmatch.filter(names, pattern)
if strings:
return names
return [self.entries[_my_normcase(n)] for n in names]
class RootDir(Dir):
"""A class for the root directory of a file system.
This is the same as a Dir class, except that the path separator
('/' or '\\') is actually part of the name, so we don't need to
add a separator when creating the path names of entries within
this directory.
"""
__slots__ = ('_lookupDict', )
def __init__(self, drive, fs):
if SCons.Debug.track_instances: logInstanceCreation(self, 'Node.FS.RootDir')
SCons.Node.Node.__init__(self)
# Handle all the types of drives:
if drive == '':
# No drive, regular UNIX root or Windows default drive.
name = OS_SEP
dirname = OS_SEP
elif drive == '//':
# UNC path
name = UNC_PREFIX
dirname = UNC_PREFIX
else:
# Windows drive letter
name = drive
dirname = drive + OS_SEP
# Filename with extension as it was specified when the object was
# created; to obtain filesystem path, use Python str() function
self.name = SCons.Util.silent_intern(name)
self.fs = fs #: Reference to parent Node.FS object
self._path_elements = [self]
self.dir = self
self._func_rexists = 2
self._func_target_from_source = 1
self.store_info = 1
# Now set our paths to what we really want them to be. The
# name should already contain any necessary separators, such
# as the initial drive letter (the name) plus the directory
# separator, except for the "lookup abspath," which does not
# have the drive letter.
self._abspath = dirname
self._labspath = ''
self._path = dirname
self._tpath = dirname
self.dirname = dirname
self._morph()
self.duplicate = 0
self._lookupDict = {}
self._lookupDict[''] = self
self._lookupDict['/'] = self
self.root = self
# The // entry is necessary because os.path.normpath()
# preserves double slashes at the beginning of a path on Posix
# platforms.
if not has_unc:
self._lookupDict['//'] = self
def _morph(self):
"""Turn a file system Node (either a freshly initialized directory
object or a separate Entry object) into a proper directory object.
Set up this directory's entries and hook it into the file
system tree. Specify that directories (this Node) don't use
signatures for calculating whether they're current.
"""
self.repositories = []
self.srcdir = None
self.entries = {}
self.entries['.'] = self
self.entries['..'] = self.dir
self.cwd = self
self.searched = 0
self._sconsign = None
self.variant_dirs = []
self.changed_since_last_build = 3
self._func_sconsign = 1
self._func_exists = 2
self._func_get_contents = 2
# Don't just reset the executor, replace its action list,
# because it might have some pre-or post-actions that need to
# be preserved.
#
# But don't reset the executor if there is a non-null executor
# attached already. The existing executor might have other
# targets, in which case replacing the action list with a
# Mkdir action is a big mistake.
if not hasattr(self, 'executor'):
self.builder = get_MkdirBuilder()
self.get_executor().set_action_list(self.builder.action)
else:
# Prepend MkdirBuilder action to existing action list
l = self.get_executor().action_list
a = get_MkdirBuilder().action
l.insert(0, a)
self.get_executor().set_action_list(l)
def must_be_same(self, klass):
if klass is Dir:
return
Base.must_be_same(self, klass)
def _lookup_abs(self, p, klass, create=1):
"""
Fast (?) lookup of a *normalized* absolute path.
This method is intended for use by internal lookups with
already-normalized path data. For general-purpose lookups,
use the FS.Entry(), FS.Dir() or FS.File() methods.
The caller is responsible for making sure we're passed a
normalized absolute path; we merely let Python's dictionary look
up and return the One True Node.FS object for the path.
If a Node for the specified "p" doesn't already exist, and
"create" is specified, the Node may be created after recursive
invocation to find or create the parent directory or directories.
"""
k = _my_normcase(p)
try:
result = self._lookupDict[k]
except KeyError:
if not create:
msg = "No such file or directory: '%s' in '%s' (and create is False)" % (p, str(self))
raise SCons.Errors.UserError(msg)
# There is no Node for this path name, and we're allowed
# to create it.
dir_name, file_name = p.rsplit('/',1)
dir_node = self._lookup_abs(dir_name, Dir)
result = klass(file_name, dir_node, self.fs)
# Double-check on disk (as configured) that the Node we
# created matches whatever is out there in the real world.
result.diskcheck_match()
self._lookupDict[k] = result
dir_node.entries[_my_normcase(file_name)] = result
dir_node.implicit = None
else:
# There is already a Node for this path name. Allow it to
# complain if we were looking for an inappropriate type.
result.must_be_same(klass)
return result
def __str__(self):
return self._abspath
def entry_abspath(self, name):
return self._abspath + name
def entry_labspath(self, name):
return '/' + name
def entry_path(self, name):
return self._path + name
def entry_tpath(self, name):
return self._tpath + name
def is_under(self, dir):
if self is dir:
return 1
else:
return 0
def up(self):
return None
def get_dir(self):
return None
def src_builder(self):
return _null
class FileNodeInfo(SCons.Node.NodeInfoBase):
__slots__ = ('csig', 'timestamp', 'size')
current_version_id = 2
field_list = ['csig', 'timestamp', 'size']
# This should get reset by the FS initialization.
fs = None
def str_to_node(self, s):
top = self.fs.Top
root = top.root
if do_splitdrive:
drive, s = _my_splitdrive(s)
if drive:
root = self.fs.get_root(drive)
if not os.path.isabs(s):
s = top.get_labspath() + '/' + s
return root._lookup_abs(s, Entry)
def __getstate__(self):
"""
Return all fields that shall be pickled. Walk the slots in the class
hierarchy and add those to the state dictionary. If a '__dict__' slot is
available, copy all entries to the dictionary. Also include the version
id, which is fixed for all instances of a class.
"""
state = getattr(self, '__dict__', {}).copy()
for obj in type(self).mro():
for name in getattr(obj, '__slots__', ()):
if hasattr(self, name):
state[name] = getattr(self, name)
state['_version_id'] = self.current_version_id
try:
del state['__weakref__']
except KeyError:
pass
return state
def __setstate__(self, state):
"""
Restore the attributes from a pickled state.
"""
# TODO check or discard version
del state['_version_id']
for key, value in state.items():
if key not in ('__weakref__',):
setattr(self, key, value)
def __eq__(self, other):
return self.csig == other.csig and self.timestamp == other.timestamp and self.size == other.size
def __ne__(self, other):
return not self.__eq__(other)
class FileBuildInfo(SCons.Node.BuildInfoBase):
"""
This is info loaded from sconsign.
Attributes unique to FileBuildInfo:
dependency_map : Caches file->csig mapping
for all dependencies. Currently this is only used when using
MD5-timestamp decider.
It's used to ensure that we copy the correct
csig from previous build to be written to .sconsign when current build
is done. Previously the matching of csig to file was strictly by order
they appeared in bdepends, bsources, or bimplicit, and so a change in order
or count of any of these could yield writing wrong csig, and then false positive
rebuilds
"""
__slots__ = ['dependency_map', ]
current_version_id = 2
def __setattr__(self, key, value):
# If any attributes are changed in FileBuildInfo, we need to
# invalidate the cached map of file name to content signature
# heald in dependency_map. Currently only used with
# MD5-timestamp decider
if key != 'dependency_map' and hasattr(self, 'dependency_map'):
del self.dependency_map
return super(FileBuildInfo, self).__setattr__(key, value)
def convert_to_sconsign(self):
"""
Converts this FileBuildInfo object for writing to a .sconsign file
This replaces each Node in our various dependency lists with its
usual string representation: relative to the top-level SConstruct
directory, or an absolute path if it's outside.
"""
if os_sep_is_slash:
node_to_str = str
else:
def node_to_str(n):
try:
s = n.get_internal_path()
except AttributeError:
s = str(n)
else:
s = s.replace(OS_SEP, '/')
return s
for attr in ['bsources', 'bdepends', 'bimplicit']:
try:
val = getattr(self, attr)
except AttributeError:
pass
else:
setattr(self, attr, list(map(node_to_str, val)))
def convert_from_sconsign(self, dir, name):
"""
Converts a newly-read FileBuildInfo object for in-SCons use
For normal up-to-date checking, we don't have any conversion to
perform--but we're leaving this method here to make that clear.
"""
pass
def prepare_dependencies(self):
"""
Prepares a FileBuildInfo object for explaining what changed
The bsources, bdepends and bimplicit lists have all been
stored on disk as paths relative to the top-level SConstruct
directory. Convert the strings to actual Nodes (for use by the
--debug=explain code and --implicit-cache).
"""
attrs = [
('bsources', 'bsourcesigs'),
('bdepends', 'bdependsigs'),
('bimplicit', 'bimplicitsigs'),
]
for (nattr, sattr) in attrs:
try:
strings = getattr(self, nattr)
nodeinfos = getattr(self, sattr)
except AttributeError:
continue
if strings is None or nodeinfos is None:
continue
nodes = []
for s, ni in zip(strings, nodeinfos):
if not isinstance(s, SCons.Node.Node):
s = ni.str_to_node(s)
nodes.append(s)
setattr(self, nattr, nodes)
def format(self, names=0):
result = []
bkids = self.bsources + self.bdepends + self.bimplicit
bkidsigs = self.bsourcesigs + self.bdependsigs + self.bimplicitsigs
for bkid, bkidsig in zip(bkids, bkidsigs):
result.append(str(bkid) + ': ' +
' '.join(bkidsig.format(names=names)))
if not hasattr(self,'bact'):
self.bact = "none"
result.append('%s [%s]' % (self.bactsig, self.bact))
return '\n'.join(result)
class File(Base):
"""A class for files in a file system.
"""
__slots__ = ['scanner_paths',
'cachedir_csig',
'cachesig',
'repositories',
'srcdir',
'entries',
'searched',
'_sconsign',
'variant_dirs',
'root',
'dirname',
'on_disk_entries',
'released_target_info',
'contentsig']
NodeInfo = FileNodeInfo
BuildInfo = FileBuildInfo
md5_chunksize = 64
def diskcheck_match(self):
diskcheck_match(self, self.isdir,
"Directory %s found where file expected.")
def __init__(self, name, directory, fs):
if SCons.Debug.track_instances: logInstanceCreation(self, 'Node.FS.File')
Base.__init__(self, name, directory, fs)
self._morph()
def Entry(self, name):
"""Create an entry node named 'name' relative to
the directory of this file."""
return self.dir.Entry(name)
def Dir(self, name, create=True):
"""Create a directory node named 'name' relative to
the directory of this file."""
return self.dir.Dir(name, create=create)
def Dirs(self, pathlist):
"""Create a list of directories relative to the SConscript
directory of this file."""
return [self.Dir(p) for p in pathlist]
def File(self, name):
"""Create a file node named 'name' relative to
the directory of this file."""
return self.dir.File(name)
def _morph(self):
"""Turn a file system node into a File object."""
self.scanner_paths = {}
if not hasattr(self, '_local'):
self._local = 0
if not hasattr(self, 'released_target_info'):
self.released_target_info = False
self.store_info = 1
self._func_exists = 4
self._func_get_contents = 3
# Initialize this Node's decider function to decide_source() because
# every file is a source file until it has a Builder attached...
self.changed_since_last_build = 4
# If there was already a Builder set on this entry, then
# we need to make sure we call the target-decider function,
# not the source-decider. Reaching in and doing this by hand
# is a little bogus. We'd prefer to handle this by adding
# an Entry.builder_set() method that disambiguates like the
# other methods, but that starts running into problems with the
# fragile way we initialize Dir Nodes with their Mkdir builders,
# yet still allow them to be overridden by the user. Since it's
# not clear right now how to fix that, stick with what works
# until it becomes clear...
if self.has_builder():
self.changed_since_last_build = 5
def scanner_key(self):
return self.get_suffix()
def get_contents(self):
return SCons.Node._get_contents_map[self._func_get_contents](self)
def get_text_contents(self):
"""
This attempts to figure out what the encoding of the text is
based upon the BOM bytes, and then decodes the contents so that
it's a valid python string.
"""
contents = self.get_contents()
# The behavior of various decode() methods and functions
# w.r.t. the initial BOM bytes is different for different
# encodings and/or Python versions. ('utf-8' does not strip
# them, but has a 'utf-8-sig' which does; 'utf-16' seems to
# strip them; etc.) Just sidestep all the complication by
# explicitly stripping the BOM before we decode().
if contents[:len(codecs.BOM_UTF8)] == codecs.BOM_UTF8:
return contents[len(codecs.BOM_UTF8):].decode('utf-8')
if contents[:len(codecs.BOM_UTF16_LE)] == codecs.BOM_UTF16_LE:
return contents[len(codecs.BOM_UTF16_LE):].decode('utf-16-le')
if contents[:len(codecs.BOM_UTF16_BE)] == codecs.BOM_UTF16_BE:
return contents[len(codecs.BOM_UTF16_BE):].decode('utf-16-be')
try:
return contents.decode('utf-8')
except UnicodeDecodeError as e:
try:
return contents.decode('latin-1')
except UnicodeDecodeError as e:
return contents.decode('utf-8', error='backslashreplace')
def get_content_hash(self):
"""
Compute and return the MD5 hash for this file.
"""
if not self.rexists():
return SCons.Util.MD5signature('')
fname = self.rfile().get_abspath()
try:
cs = SCons.Util.MD5filesignature(fname,
chunksize=SCons.Node.FS.File.md5_chunksize*1024)
except EnvironmentError as e:
if not e.filename:
e.filename = fname
raise
return cs
@SCons.Memoize.CountMethodCall
def get_size(self):
try:
return self._memo['get_size']
except KeyError:
pass
if self.rexists():
size = self.rfile().getsize()
else:
size = 0
self._memo['get_size'] = size
return size
@SCons.Memoize.CountMethodCall
def get_timestamp(self):
try:
return self._memo['get_timestamp']
except KeyError:
pass
if self.rexists():
timestamp = self.rfile().getmtime()
else:
timestamp = 0
self._memo['get_timestamp'] = timestamp
return timestamp
convert_copy_attrs = [
'bsources',
'bimplicit',
'bdepends',
'bact',
'bactsig',
'ninfo',
]
convert_sig_attrs = [
'bsourcesigs',
'bimplicitsigs',
'bdependsigs',
]
def convert_old_entry(self, old_entry):
# Convert a .sconsign entry from before the Big Signature
# Refactoring, doing what we can to convert its information
# to the new .sconsign entry format.
#
# The old format looked essentially like this:
#
# BuildInfo
# .ninfo (NodeInfo)
# .bsig
# .csig
# .timestamp
# .size
# .bsources
# .bsourcesigs ("signature" list)
# .bdepends
# .bdependsigs ("signature" list)
# .bimplicit
# .bimplicitsigs ("signature" list)
# .bact
# .bactsig
#
# The new format looks like this:
#
# .ninfo (NodeInfo)
# .bsig
# .csig
# .timestamp
# .size
# .binfo (BuildInfo)
# .bsources
# .bsourcesigs (NodeInfo list)
# .bsig
# .csig
# .timestamp
# .size
# .bdepends
# .bdependsigs (NodeInfo list)
# .bsig
# .csig
# .timestamp
# .size
# .bimplicit
# .bimplicitsigs (NodeInfo list)
# .bsig
# .csig
# .timestamp
# .size
# .bact
# .bactsig
#
# The basic idea of the new structure is that a NodeInfo always
# holds all available information about the state of a given Node
# at a certain point in time. The various .b*sigs lists can just
# be a list of pointers to the .ninfo attributes of the different
# dependent nodes, without any copying of information until it's
# time to pickle it for writing out to a .sconsign file.
#
# The complicating issue is that the *old* format only stored one
# "signature" per dependency, based on however the *last* build
# was configured. We don't know from just looking at it whether
# it was a build signature, a content signature, or a timestamp
# "signature". Since we no longer use build signatures, the
# best we can do is look at the length and if it's thirty two,
# assume that it was (or might have been) a content signature.
# If it was actually a build signature, then it will cause a
# rebuild anyway when it doesn't match the new content signature,
# but that's probably the best we can do.
import SCons.SConsign
new_entry = SCons.SConsign.SConsignEntry()
new_entry.binfo = self.new_binfo()
binfo = new_entry.binfo
for attr in self.convert_copy_attrs:
try:
value = getattr(old_entry, attr)
except AttributeError:
continue
setattr(binfo, attr, value)
delattr(old_entry, attr)
for attr in self.convert_sig_attrs:
try:
sig_list = getattr(old_entry, attr)
except AttributeError:
continue
value = []
for sig in sig_list:
ninfo = self.new_ninfo()
if len(sig) == 32:
ninfo.csig = sig
else:
ninfo.timestamp = sig
value.append(ninfo)
setattr(binfo, attr, value)
delattr(old_entry, attr)
return new_entry
@SCons.Memoize.CountMethodCall
def get_stored_info(self):
try:
return self._memo['get_stored_info']
except KeyError:
pass
try:
sconsign_entry = self.dir.sconsign().get_entry(self.name)
except (KeyError, EnvironmentError):
import SCons.SConsign
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = self.new_binfo()
sconsign_entry.ninfo = self.new_ninfo()
else:
if isinstance(sconsign_entry, FileBuildInfo):
# This is a .sconsign file from before the Big Signature
# Refactoring; convert it as best we can.
sconsign_entry = self.convert_old_entry(sconsign_entry)
try:
delattr(sconsign_entry.ninfo, 'bsig')
except AttributeError:
pass
self._memo['get_stored_info'] = sconsign_entry
return sconsign_entry
def get_stored_implicit(self):
binfo = self.get_stored_info().binfo
binfo.prepare_dependencies()
try: return binfo.bimplicit
except AttributeError: return None
def rel_path(self, other):
return self.dir.rel_path(other)
def _get_found_includes_key(self, env, scanner, path):
return (id(env), id(scanner), path)
@SCons.Memoize.CountDictCall(_get_found_includes_key)
def get_found_includes(self, env, scanner, path):
"""Return the included implicit dependencies in this file.
Cache results so we only scan the file once per path
regardless of how many times this information is requested.
"""
memo_key = (id(env), id(scanner), path)
try:
memo_dict = self._memo['get_found_includes']
except KeyError:
memo_dict = {}
self._memo['get_found_includes'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
if scanner:
result = [n.disambiguate() for n in scanner(self, env, path)]
else:
result = []
memo_dict[memo_key] = result
return result
def _createDir(self):
# ensure that the directories for this node are
# created.
self.dir._create()
def push_to_cache(self):
"""Try to push the node into a cache
"""
# This should get called before the Nodes' .built() method is
# called, which would clear the build signature if the file has
# a source scanner.
#
# We have to clear the local memoized values *before* we push
# the node to cache so that the memoization of the self.exists()
# return value doesn't interfere.
if self.nocache:
return
self.clear_memoized_values()
if self.exists():
self.get_build_env().get_CacheDir().push(self)
def retrieve_from_cache(self):
"""Try to retrieve the node's content from a cache
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff in
built().
Returns true if the node was successfully retrieved.
"""
if self.nocache:
return None
if not self.is_derived():
return None
return self.get_build_env().get_CacheDir().retrieve(self)
def visited(self):
if self.exists() and self.executor is not None:
self.get_build_env().get_CacheDir().push_if_forced(self)
ninfo = self.get_ninfo()
csig = self.get_max_drift_csig()
if csig:
ninfo.csig = csig
ninfo.timestamp = self.get_timestamp()
ninfo.size = self.get_size()
if not self.has_builder():
# This is a source file, but it might have been a target file
# in another build that included more of the DAG. Copy
# any build information that's stored in the .sconsign file
# into our binfo object so it doesn't get lost.
old = self.get_stored_info()
self.get_binfo().merge(old.binfo)
SCons.Node.store_info_map[self.store_info](self)
def release_target_info(self):
"""Called just after this node has been marked
up-to-date or was built completely.
This is where we try to release as many target node infos
as possible for clean builds and update runs, in order
to minimize the overall memory consumption.
We'd like to remove a lot more attributes like self.sources
and self.sources_set, but they might get used
in a next build step. For example, during configuration
the source files for a built E{*}.o file are used to figure out
which linker to use for the resulting Program (gcc vs. g++)!
That's why we check for the 'keep_targetinfo' attribute,
config Nodes and the Interactive mode just don't allow
an early release of most variables.
In the same manner, we can't simply remove the self.attributes
here. The smart linking relies on the shared flag, and some
parts of the java Tool use it to transport information
about nodes...
@see: built() and Node.release_target_info()
"""
if (self.released_target_info or SCons.Node.interactive):
return
if not hasattr(self.attributes, 'keep_targetinfo'):
# Cache some required values, before releasing
# stuff like env, executor and builder...
self.changed(allowcache=True)
self.get_contents_sig()
self.get_build_env()
# Now purge unneeded stuff to free memory...
self.executor = None
self._memo.pop('rfile', None)
self.prerequisites = None
# Cleanup lists, but only if they're empty
if not len(self.ignore_set):
self.ignore_set = None
if not len(self.implicit_set):
self.implicit_set = None
if not len(self.depends_set):
self.depends_set = None
if not len(self.ignore):
self.ignore = None
if not len(self.depends):
self.depends = None
# Mark this node as done, we only have to release
# the memory once...
self.released_target_info = True
def find_src_builder(self):
if self.rexists():
return None
scb = self.dir.src_builder()
if scb is _null:
scb = None
if scb is not None:
try:
b = self.builder
except AttributeError:
b = None
if b is None:
self.builder_set(scb)
return scb
def has_src_builder(self):
"""Return whether this Node has a source builder or not.
If this Node doesn't have an explicit source code builder, this
is where we figure out, on the fly, if there's a transparent
source code builder for it.
Note that if we found a source builder, we also set the
self.builder attribute, so that all of the methods that actually
*build* this file don't have to do anything different.
"""
try:
scb = self.sbuilder
except AttributeError:
scb = self.sbuilder = self.find_src_builder()
return scb is not None
def alter_targets(self):
"""Return any corresponding targets in a variant directory.
"""
if self.is_derived():
return [], None
return self.fs.variant_dir_target_climb(self, self.dir, [self.name])
def _rmv_existing(self):
self.clear_memoized_values()
if SCons.Node.print_duplicate:
print("dup: removing existing target {}".format(self))
e = Unlink(self, [], None)
if isinstance(e, SCons.Errors.BuildError):
raise e
#
# Taskmaster interface subsystem
#
def make_ready(self):
self.has_src_builder()
self.get_binfo()
def prepare(self):
"""Prepare for this file to be created."""
SCons.Node.Node.prepare(self)
if self.get_state() != SCons.Node.up_to_date:
if self.exists():
if self.is_derived() and not self.precious:
self._rmv_existing()
else:
try:
self._createDir()
except SCons.Errors.StopError as drive:
raise SCons.Errors.StopError("No drive `{}' for target `{}'.".format(drive, self))
#
#
#
def remove(self):
"""Remove this file."""
if self.exists() or self.islink():
self.fs.unlink(self.get_internal_path())
return 1
return None
def do_duplicate(self, src):
self._createDir()
if SCons.Node.print_duplicate:
print("dup: relinking variant '{}' from '{}'".format(self, src))
Unlink(self, None, None)
e = Link(self, src, None)
if isinstance(e, SCons.Errors.BuildError):
raise SCons.Errors.StopError("Cannot duplicate `{}' in `{}': {}.".format(src.get_internal_path(), self.dir._path, e.errstr))
self.linked = 1
# The Link() action may or may not have actually
# created the file, depending on whether the -n
# option was used or not. Delete the _exists and
# _rexists attributes so they can be reevaluated.
self.clear()
@SCons.Memoize.CountMethodCall
def exists(self):
try:
return self._memo['exists']
except KeyError:
pass
result = SCons.Node._exists_map[self._func_exists](self)
self._memo['exists'] = result
return result
#
# SIGNATURE SUBSYSTEM
#
def get_max_drift_csig(self):
"""
Returns the content signature currently stored for this node
if it's been unmodified longer than the max_drift value, or the
max_drift value is 0. Returns None otherwise.
"""
old = self.get_stored_info()
mtime = self.get_timestamp()
max_drift = self.fs.max_drift
if max_drift > 0:
if (time.time() - mtime) > max_drift:
try:
n = old.ninfo
if n.timestamp and n.csig and n.timestamp == mtime:
return n.csig
except AttributeError:
pass
elif max_drift == 0:
try:
return old.ninfo.csig
except AttributeError:
pass
return None
def get_csig(self):
"""
Generate a node's content signature, the digested signature
of its content.
node - the node
cache - alternate node to use for the signature cache
returns - the content signature
"""
ninfo = self.get_ninfo()
try:
return ninfo.csig
except AttributeError:
pass
csig = self.get_max_drift_csig()
if csig is None:
try:
if self.get_size() < SCons.Node.FS.File.md5_chunksize:
contents = self.get_contents()
else:
csig = self.get_content_hash()
except IOError:
# This can happen if there's actually a directory on-disk,
# which can be the case if they've disabled disk checks,
# or if an action with a File target actually happens to
# create a same-named directory by mistake.
csig = ''
else:
if not csig:
csig = SCons.Util.MD5signature(contents)
ninfo.csig = csig
return csig
#
# DECISION SUBSYSTEM
#
def builder_set(self, builder):
SCons.Node.Node.builder_set(self, builder)
self.changed_since_last_build = 5
def built(self):
"""Called just after this File node is successfully built.
Just like for 'release_target_info' we try to release
some more target node attributes in order to minimize the
overall memory consumption.
@see: release_target_info
"""
SCons.Node.Node.built(self)
if (not SCons.Node.interactive and
not hasattr(self.attributes, 'keep_targetinfo')):
# Ensure that the build infos get computed and cached...
SCons.Node.store_info_map[self.store_info](self)
# ... then release some more variables.
self._specific_sources = False
self._labspath = None
self._save_str()
self.cwd = None
self.scanner_paths = None
def changed(self, node=None, allowcache=False):
"""
Returns if the node is up-to-date with respect to the BuildInfo
stored last time it was built.
For File nodes this is basically a wrapper around Node.changed(),
but we allow the return value to get cached after the reference
to the Executor got released in release_target_info().
@see: Node.changed()
"""
if node is None:
try:
return self._memo['changed']
except KeyError:
pass
has_changed = SCons.Node.Node.changed(self, node)
if allowcache:
self._memo['changed'] = has_changed
return has_changed
def changed_content(self, target, prev_ni, repo_node=None):
cur_csig = self.get_csig()
try:
return cur_csig != prev_ni.csig
except AttributeError:
return 1
def changed_state(self, target, prev_ni, repo_node=None):
return self.state != SCons.Node.up_to_date
# Caching node -> string mapping for the below method
__dmap_cache = {}
__dmap_sig_cache = {}
def _build_dependency_map(self, binfo):
"""
Build mapping from file -> signature
Args:
self - self
binfo - buildinfo from node being considered
Returns:
dictionary of file->signature mappings
"""
# For an "empty" binfo properties like bsources
# do not exist: check this to avoid exception.
if (len(binfo.bsourcesigs) + len(binfo.bdependsigs) + \
len(binfo.bimplicitsigs)) == 0:
return {}
binfo.dependency_map = { child:signature for child, signature in zip(chain(binfo.bsources, binfo.bdepends, binfo.bimplicit),
chain(binfo.bsourcesigs, binfo.bdependsigs, binfo.bimplicitsigs))}
return binfo.dependency_map
# @profile
def _add_strings_to_dependency_map(self, dmap):
"""
In the case comparing node objects isn't sufficient, we'll add the strings for the nodes to the dependency map
:return:
"""
first_string = str(next(iter(dmap)))
# print("DMAP:%s"%id(dmap))
if first_string not in dmap:
string_dict = {str(child): signature for child, signature in dmap.items()}
dmap.update(string_dict)
return dmap
def _get_previous_signatures(self, dmap):
"""
Return a list of corresponding csigs from previous
build in order of the node/files in children.
Args:
self - self
dmap - Dictionary of file -> csig
Returns:
List of csigs for provided list of children
"""
prev = []
# MD5_TIMESTAMP_DEBUG = False
if len(dmap) == 0:
if MD5_TIMESTAMP_DEBUG: print("Nothing dmap shortcutting")
return None
elif MD5_TIMESTAMP_DEBUG: print("len(dmap):%d"%len(dmap))
# First try retrieving via Node
if MD5_TIMESTAMP_DEBUG: print("Checking if self is in map:%s id:%s type:%s"%(str(self), id(self), type(self)))
df = dmap.get(self, False)
if df:
return df
# Now check if self's repository file is in map.
rf = self.rfile()
if MD5_TIMESTAMP_DEBUG: print("Checking if self.rfile is in map:%s id:%s type:%s"%(str(rf), id(rf), type(rf)))
rfm = dmap.get(rf, False)
if rfm:
return rfm
# get default string for node and then also string swapping os.altsep for os.sep (/ for \)
c_strs = [str(self)]
if os.altsep:
c_strs.append(c_strs[0].replace(os.sep, os.altsep))
# In some cases the dependency_maps' keys are already strings check.
# Check if either string is now in dmap.
for s in c_strs:
if MD5_TIMESTAMP_DEBUG: print("Checking if str(self) is in map :%s" % s)
df = dmap.get(s, False)
if df:
return df
# Strings don't exist in map, add them and try again
# If there are no strings in this dmap, then add them.
# This may not be necessary, we could walk the nodes in the dmap and check each string
# rather than adding ALL the strings to dmap. In theory that would be n/2 vs 2n str() calls on node
# if not dmap.has_strings:
dmap = self._add_strings_to_dependency_map(dmap)
# In some cases the dependency_maps' keys are already strings check.
# Check if either string is now in dmap.
for s in c_strs:
if MD5_TIMESTAMP_DEBUG: print("Checking if str(self) is in map (now with strings) :%s" % s)
df = dmap.get(s, False)
if df:
return df
# Lastly use nodes get_path() to generate string and see if that's in dmap
if not df:
try:
# this should yield a path which matches what's in the sconsign
c_str = self.get_path()
if os.altsep:
c_str = c_str.replace(os.sep, os.altsep)
if MD5_TIMESTAMP_DEBUG: print("Checking if self.get_path is in map (now with strings) :%s" % s)
df = dmap.get(c_str, None)
except AttributeError as e:
raise FileBuildInfoFileToCsigMappingError("No mapping from file name to content signature for :%s"%c_str)
return df
def changed_timestamp_then_content(self, target, prev_ni, node=None):
"""
Used when decider for file is Timestamp-MD5
NOTE: If the timestamp hasn't changed this will skip md5'ing the
file and just copy the prev_ni provided. If the prev_ni
is wrong. It will propagate it.
See: https://github.com/SCons/scons/issues/2980
Args:
self - dependency
target - target
prev_ni - The NodeInfo object loaded from previous builds .sconsign
node - Node instance. Check this node for file existence/timestamp
if specified.
Returns:
Boolean - Indicates if node(File) has changed.
"""
if node is None:
node = self
# Now get sconsign name -> csig map and then get proper prev_ni if possible
bi = node.get_stored_info().binfo
rebuilt = False
try:
dependency_map = bi.dependency_map
except AttributeError as e:
dependency_map = self._build_dependency_map(bi)
rebuilt = True
if len(dependency_map) == 0:
# If there's no dependency map, there's no need to find the
# prev_ni as there aren't any
# shortcut the rest of the logic
if MD5_TIMESTAMP_DEBUG: print("Skipping checks len(dmap)=0")
# We still need to get the current file's csig
# This should be slightly faster than calling self.changed_content(target, new_prev_ni)
self.get_csig()
return True
new_prev_ni = self._get_previous_signatures(dependency_map)
new = self.changed_timestamp_match(target, new_prev_ni)
if MD5_TIMESTAMP_DEBUG:
old = self.changed_timestamp_match(target, prev_ni)
if old != new:
print("Mismatch self.changed_timestamp_match(%s, prev_ni) old:%s new:%s"%(str(target), old, new))
new_prev_ni = self._get_previous_signatures(dependency_map)
if not new:
try:
# NOTE: We're modifying the current node's csig in a query.
self.get_ninfo().csig = new_prev_ni.csig
except AttributeError:
pass
return False
return self.changed_content(target, new_prev_ni)
def changed_timestamp_newer(self, target, prev_ni, repo_node=None):
try:
return self.get_timestamp() > target.get_timestamp()
except AttributeError:
return 1
def changed_timestamp_match(self, target, prev_ni, repo_node=None):
"""
Return True if the timestamps don't match or if there is no previous timestamp
:param target:
:param prev_ni: Information about the node from the previous build
:return:
"""
try:
return self.get_timestamp() != prev_ni.timestamp
except AttributeError:
return 1
def is_up_to_date(self):
"""Check for whether the Node is current
In all cases self is the target we're checking to see if it's up to date
"""
T = 0
if T: Trace('is_up_to_date(%s):' % self)
if not self.exists():
if T: Trace(' not self.exists():')
# The file (always a target) doesn't exist locally...
r = self.rfile()
if r != self:
# ...but there is one (always a target) in a Repository...
if not self.changed(r):
if T: Trace(' changed(%s):' % r)
# ...and it's even up-to-date...
if self._local:
# ...and they'd like a local copy.
e = LocalCopy(self, r, None)
if isinstance(e, SCons.Errors.BuildError):
# Likely this should be re-raising exception e
# (which would be BuildError)
raise e
SCons.Node.store_info_map[self.store_info](self)
if T: Trace(' 1\n')
return 1
self.changed()
if T: Trace(' None\n')
return None
else:
r = self.changed()
if T: Trace(' self.exists(): %s\n' % r)
return not r
@SCons.Memoize.CountMethodCall
def rfile(self):
try:
return self._memo['rfile']
except KeyError:
pass
result = self
if not self.exists():
norm_name = _my_normcase(self.name)
for repo_dir in self.dir.get_all_rdirs():
try:
node = repo_dir.entries[norm_name]
except KeyError:
node = repo_dir.file_on_disk(self.name)
if node and node.exists() and \
(isinstance(node, File) or isinstance(node, Entry)
or not node.is_derived()):
result = node
# Copy over our local attributes to the repository
# Node so we identify shared object files in the
# repository and don't assume they're static.
#
# This isn't perfect; the attribute would ideally
# be attached to the object in the repository in
# case it was built statically in the repository
# and we changed it to shared locally, but that's
# rarely the case and would only occur if you
# intentionally used the same suffix for both
# shared and static objects anyway. So this
# should work well in practice.
result.attributes = self.attributes
break
self._memo['rfile'] = result
return result
def find_repo_file(self):
"""
For this node, find if there exists a corresponding file in one or more repositories
:return: list of corresponding files in repositories
"""
retvals = []
norm_name = _my_normcase(self.name)
for repo_dir in self.dir.get_all_rdirs():
try:
node = repo_dir.entries[norm_name]
except KeyError:
node = repo_dir.file_on_disk(self.name)
if node and node.exists() and \
(isinstance(node, File) or isinstance(node, Entry) \
or not node.is_derived()):
retvals.append(node)
return retvals
def rstr(self):
return str(self.rfile())
def get_cachedir_csig(self):
"""
Fetch a Node's content signature for purposes of computing
another Node's cachesig.
This is a wrapper around the normal get_csig() method that handles
the somewhat obscure case of using CacheDir with the -n option.
Any files that don't exist would normally be "built" by fetching
them from the cache, but the normal get_csig() method will try
to open up the local file, which doesn't exist because the -n
option meant we didn't actually pull the file from cachedir.
But since the file *does* actually exist in the cachedir, we
can use its contents for the csig.
"""
try:
return self.cachedir_csig
except AttributeError:
pass
cachedir, cachefile = self.get_build_env().get_CacheDir().cachepath(self)
if not self.exists() and cachefile and os.path.exists(cachefile):
self.cachedir_csig = SCons.Util.MD5filesignature(cachefile, \
SCons.Node.FS.File.md5_chunksize * 1024)
else:
self.cachedir_csig = self.get_csig()
return self.cachedir_csig
def get_contents_sig(self):
"""
A helper method for get_cachedir_bsig.
It computes and returns the signature for this
node's contents.
"""
try:
return self.contentsig
except AttributeError:
pass
executor = self.get_executor()
result = self.contentsig = SCons.Util.MD5signature(executor.get_contents())
return result
def get_cachedir_bsig(self):
"""
Return the signature for a cached file, including
its children.
It adds the path of the cached file to the cache signature,
because multiple targets built by the same action will all
have the same build signature, and we have to differentiate
them somehow.
Signature should normally be string of hex digits.
"""
try:
return self.cachesig
except AttributeError:
pass
# Collect signatures for all children
children = self.children()
sigs = [n.get_cachedir_csig() for n in children]
# Append this node's signature...
sigs.append(self.get_contents_sig())
# ...and it's path
sigs.append(self.get_internal_path())
# Merge this all into a single signature
result = self.cachesig = SCons.Util.MD5collect(sigs)
return result
default_fs = None
def get_default_fs():
global default_fs
if not default_fs:
default_fs = FS()
return default_fs
class FileFinder(object):
"""
"""
def __init__(self):
self._memo = {}
def filedir_lookup(self, p, fd=None):
"""
A helper method for find_file() that looks up a directory for
a file we're trying to find. This only creates the Dir Node if
it exists on-disk, since if the directory doesn't exist we know
we won't find any files in it... :-)
It would be more compact to just use this as a nested function
with a default keyword argument (see the commented-out version
below), but that doesn't work unless you have nested scopes,
so we define it here just so this work under Python 1.5.2.
"""
if fd is None:
fd = self.default_filedir
dir, name = os.path.split(fd)
drive, d = _my_splitdrive(dir)
if not name and d[:1] in ('/', OS_SEP):
#return p.fs.get_root(drive).dir_on_disk(name)
return p.fs.get_root(drive)
if dir:
p = self.filedir_lookup(p, dir)
if not p:
return None
norm_name = _my_normcase(name)
try:
node = p.entries[norm_name]
except KeyError:
return p.dir_on_disk(name)
if isinstance(node, Dir):
return node
if isinstance(node, Entry):
node.must_be_same(Dir)
return node
return None
def _find_file_key(self, filename, paths, verbose=None):
return (filename, paths)
@SCons.Memoize.CountDictCall(_find_file_key)
def find_file(self, filename, paths, verbose=None):
"""
Find a node corresponding to either a derived file or a file that exists already.
Only the first file found is returned, and none is returned if no file is found.
filename: A filename to find
paths: A list of directory path *nodes* to search in. Can be represented as a list, a tuple, or a callable that is called with no arguments and returns the list or tuple.
returns The node created from the found file.
"""
memo_key = self._find_file_key(filename, paths)
try:
memo_dict = self._memo['find_file']
except KeyError:
memo_dict = {}
self._memo['find_file'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
if verbose and not callable(verbose):
if not SCons.Util.is_String(verbose):
verbose = "find_file"
_verbose = u' %s: ' % verbose
verbose = lambda s: sys.stdout.write(_verbose + s)
filedir, filename = os.path.split(filename)
if filedir:
self.default_filedir = filedir
paths = [_f for _f in map(self.filedir_lookup, paths) if _f]
result = None
for dir in paths:
if verbose:
verbose("looking for '%s' in '%s' ...\n" % (filename, dir))
node, d = dir.srcdir_find_file(filename)
if node:
if verbose:
verbose("... FOUND '%s' in '%s'\n" % (filename, d))
result = node
break
memo_dict[memo_key] = result
return result
find_file = FileFinder().find_file
def invalidate_node_memos(targets):
"""
Invalidate the memoized values of all Nodes (files or directories)
that are associated with the given entries. Has been added to
clear the cache of nodes affected by a direct execution of an
action (e.g. Delete/Copy/Chmod). Existing Node caches become
inconsistent if the action is run through Execute(). The argument
`targets` can be a single Node object or filename, or a sequence
of Nodes/filenames.
"""
from traceback import extract_stack
# First check if the cache really needs to be flushed. Only
# actions run in the SConscript with Execute() seem to be
# affected. XXX The way to check if Execute() is in the stacktrace
# is a very dirty hack and should be replaced by a more sensible
# solution.
for f in extract_stack():
if f[2] == 'Execute' and f[0][-14:] == 'Environment.py':
break
else:
# Dont have to invalidate, so return
return
if not SCons.Util.is_List(targets):
targets = [targets]
for entry in targets:
# If the target is a Node object, clear the cache. If it is a
# filename, look up potentially existing Node object first.
try:
entry.clear_memoized_values()
except AttributeError:
# Not a Node object, try to look up Node by filename. XXX
# This creates Node objects even for those filenames which
# do not correspond to an existing Node object.
node = get_default_fs().Entry(entry)
if node:
node.clear_memoized_values()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
# -*- coding: utf-8 -*-
from hashlib import md5
from werkzeug import check_password_hash, cached_property
import bcrypt
from sqlalchemy import or_
from sqlalchemy.orm import defer
from sqlalchemy.ext.hybrid import hybrid_property
from coaster import newid, newsecret, newpin, valid_username
from . import db, TimestampMixin, BaseMixin
__all__ = ['User', 'UserEmail', 'UserEmailClaim', 'PasswordResetRequest', 'UserExternalId',
'UserPhone', 'UserPhoneClaim', 'Team', 'Organization', 'UserOldId', 'USER_STATUS']
class USER_STATUS:
ACTIVE = 0
SUSPENDED = 1
MERGED = 2
class User(BaseMixin, db.Model):
__tablename__ = 'user'
__bind_key__ = 'lastuser'
userid = db.Column(db.String(22), unique=True, nullable=False, default=newid)
fullname = db.Column(db.Unicode(80), default=u'', nullable=False)
_username = db.Column('username', db.Unicode(80), unique=True, nullable=True)
pw_hash = db.Column(db.String(80), nullable=True)
timezone = db.Column(db.Unicode(40), nullable=True)
description = db.Column(db.UnicodeText, default=u'', nullable=False)
status = db.Column(db.SmallInteger, nullable=False, default=USER_STATUS.ACTIVE)
_defercols = [
defer('created_at'),
defer('updated_at'),
defer('pw_hash'),
defer('timezone'),
defer('description'),
]
def __init__(self, password=None, **kwargs):
self.userid = newid()
self.password = password
super(User, self).__init__(**kwargs)
@property
def is_active(self):
return self.status == USER_STATUS.ACTIVE
def merged_user(self):
if self.status == USER_STATUS.MERGED:
return UserOldId.get(self.userid).user
else:
return self
def _set_password(self, password):
if password is None:
self.pw_hash = None
else:
self.pw_hash = bcrypt.hashpw(
password.encode('utf-8') if isinstance(password, unicode) else password,
bcrypt.gensalt())
#: Write-only property (passwords cannot be read back in plain text)
password = property(fset=_set_password)
#: Username (may be null)
@hybrid_property
def username(self):
return self._username
@username.setter
def username(self, value):
if not value:
self._username = None
elif self.is_valid_username(value):
self._username = value
def is_valid_username(self, value):
if not valid_username(value):
return False
existing = User.query.filter(db.or_(
User.username == value,
User.userid == value)).first() # Avoid User.get to skip status check
if existing and existing.id != self.id:
return False
existing = Organization.get(name=value)
if existing:
return False
return True
def password_is(self, password):
if self.pw_hash is None:
return False
if self.pw_hash.startswith('sha1$'):
return check_password_hash(self.pw_hash, password)
else:
return bcrypt.hashpw(
password.encode('utf-8') if isinstance(password, unicode) else password,
self.pw_hash) == self.pw_hash
def __repr__(self):
return u'<User {username} "{fullname}">'.format(username=self.username or self.userid,
fullname=self.fullname)
def profileid(self):
if self.username:
return self.username
else:
return self.userid
def displayname(self):
return self.fullname or self.username or self.userid
@property
def pickername(self):
if self.username:
return u'{fullname} (~{username})'.format(fullname=self.fullname, username=self.username)
else:
return self.fullname
def add_email(self, email, primary=False):
if primary:
for emailob in self.emails:
if emailob.primary:
emailob.primary = False
useremail = UserEmail(user=self, email=email, primary=primary)
db.session.add(useremail)
return useremail
def del_email(self, email):
setprimary = False
useremail = UserEmail.query.filter_by(user=self, email=email).first()
if useremail:
if useremail.primary:
setprimary = True
db.session.delete(useremail)
if setprimary:
for emailob in UserEmail.query.filter_by(user=self).all():
if emailob is not useremail:
emailob.primary = True
break
@cached_property
def email(self):
"""
Returns primary email address for user.
"""
# Look for a primary address
useremail = UserEmail.query.filter_by(user_id=self.id, primary=True).first()
if useremail:
return useremail
# No primary? Maybe there's one that's not set as primary?
useremail = UserEmail.query.filter_by(user_id=self.id).first()
if useremail:
# XXX: Mark at primary. This may or may not be saved depending on
# whether the request ended in a database commit.
useremail.primary = True
return useremail
# This user has no email address. Return a blank string instead of None
# to support the common use case, where the caller will use unicode(user.email)
# to get the email address as a string.
return u''
@cached_property
def phone(self):
"""
Returns primary phone number for user.
"""
# Look for a primary address
userphone = UserPhone.query.filter_by(user=self, primary=True).first()
if userphone:
return userphone
# No primary? Maybe there's one that's not set as primary?
userphone = UserPhone.query.filter_by(user=self).first()
if userphone:
# XXX: Mark at primary. This may or may not be saved depending on
# whether the request ended in a database commit.
userphone.primary = True
return userphone
# This user has no phone number. Return a blank string instead of None
# to support the common use case, where the caller will use unicode(user.phone)
# to get the phone number as a string.
return u''
def organizations(self):
"""
Return the organizations this user is a member of.
"""
return sorted(set([team.org for team in self.teams]), key=lambda o: o.title)
def organizations_owned(self):
"""
Return the organizations this user is an owner of.
"""
return sorted(set([team.org for team in self.teams if team.org.owners == team]),
key=lambda o: o.title)
def organizations_owned_ids(self):
"""
Return the database ids of the organizations this user is an owner of. This is used
for database queries.
"""
return list(set([team.org.id for team in self.teams if team.org.owners == team]))
def is_profile_complete(self):
"""
Return True if profile is complete (fullname, username and email are present), False
otherwise.
"""
return bool(self.fullname and self.username and self.email)
def available_permissions(self):
"""
Return all permission objects available to this user
(either owned by user or available to all users).
"""
from .client import Permission
return Permission.query.filter(
db.or_(Permission.allusers == True, Permission.user == self)
).order_by(Permission.name).all()
@classmethod
def get(cls, username=None, userid=None, defercols=False):
"""
Return a User with the given username or userid.
:param str username: Username to lookup
:param str userid: Userid to lookup
:param bool defercols: Defer loading non-critical columns
"""
if not bool(username) ^ bool(userid):
raise TypeError("Either username or userid should be specified")
if userid:
query = cls.query.filter_by(userid=userid)
else:
query = cls.query.filter_by(username=username)
if defercols:
query = query.options(*cls._defercols)
user = query.one_or_none()
if user and user.status == USER_STATUS.MERGED:
user = user.merged_user()
if user.is_active:
return user
@classmethod
def all(cls, userids=None, usernames=None, defercols=False):
"""
Return all matching users.
:param list userids: Userids to look up
:param list usernames: Usernames to look up
:param bool defercols: Defer loading non-critical columns
"""
users = set()
if userids:
query = cls.query.filter(cls.userid.in_(userids))
if defercols:
query = query.options(*cls._defercols)
for user in query.all():
user = user.merged_user()
if user.is_active:
users.add(user)
return list(users)
@classmethod
def autocomplete(cls, query):
"""
Return users whose names begin with the query, for autocomplete widgets.
Looks up users by fullname, username, external ids and email addresses.
:param str query: Letters to start matching with
"""
# Escape the '%' and '_' wildcards in SQL LIKE clauses.
# Some SQL dialects respond to '[' and ']', so remove them.
query = query.replace(u'%', ur'\%').replace(u'_', ur'\_').replace(u'[', u'').replace(u']', u'') + u'%'
# Use User._username since 'username' is a hybrid property that checks for validity
# before passing on to _username, the actual column name on the model.
# We convert to lowercase and use the LIKE operator since ILIKE isn't standard.
if not query:
return []
users = cls.query.filter(cls.status == USER_STATUS.ACTIVE,
or_( # Match against userid (exact value only), fullname or username, case insensitive
cls.userid == query[:-1],
db.func.lower(cls.fullname).like(db.func.lower(query)),
db.func.lower(cls._username).like(db.func.lower(query))
)
).options(*cls._defercols).limit(100).all() # Limit to 100 results
if query.startswith('@'):
# Add Twitter/GitHub accounts to the head of results
# TODO: Move this query to a login provider class method
users = cls.query.filter(cls.status == USER_STATUS.ACTIVE, cls.id.in_(
db.session.query(UserExternalId.user_id).filter(
UserExternalId.service.in_([u'twitter', u'github']),
db.func.lower(UserExternalId.username).like(db.func.lower(query[1:]))
).subquery())).options(*cls._defercols).limit(100).all() + users
elif '@' in query:
users = cls.query.filter(cls.status == USER_STATUS.ACTIVE, cls.id.in_(
db.session.query(UserEmail.user_id).filter(
db.func.lower(UserEmail.email).like(db.func.lower(query))
).subquery())).options(*cls._defercols).limit(100).all() + users
return users
class UserOldId(TimestampMixin, db.Model):
__tablename__ = 'useroldid'
__bind_key__ = 'lastuser'
userid = db.Column(db.String(22), nullable=False, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
user = db.relationship(User, primaryjoin=user_id == User.id,
backref=db.backref('oldids', cascade="all, delete-orphan"))
def __repr__(self):
return u'<UserOldId {userid} of {user}'.format(
userid=self.userid, user=repr(self.user)[1:-1])
@classmethod
def get(cls, userid):
return cls.query.filter_by(userid=userid).one_or_none()
class UserEmail(BaseMixin, db.Model):
__tablename__ = 'useremail'
__bind_key__ = 'lastuser'
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
user = db.relationship(User, primaryjoin=user_id == User.id,
backref=db.backref('emails', cascade="all, delete-orphan"))
_email = db.Column('email', db.Unicode(254), unique=True, nullable=False)
md5sum = db.Column(db.String(32), unique=True, nullable=False)
primary = db.Column(db.Boolean, nullable=False, default=False)
def __init__(self, email, **kwargs):
super(UserEmail, self).__init__(**kwargs)
self._email = email
self.md5sum = md5(self._email).hexdigest()
@hybrid_property
def email(self):
return self._email
#: Make email immutable. There is no setter for email.
email = db.synonym('_email', descriptor=email)
def __repr__(self):
return u'<UserEmail {email} of {user}>'.format(
email=self.email, user=repr(self.user)[1:-1])
def __unicode__(self):
return unicode(self.email)
def __str__(self):
return str(self.__unicode__())
@classmethod
def get(cls, email=None, md5sum=None):
"""
Return a UserEmail with matching email or md5sum.
:param str email: Email address to lookup
:param str md5sum: md5sum of email address to lookup
"""
if not bool(email) ^ bool(md5sum):
raise TypeError("Either email or md5sum should be specified")
if email:
return cls.query.filter(cls.email.in_([email, email.lower()])).one_or_none()
else:
return cls.query.filter_by(md5sum=md5sum).one_or_none()
class UserEmailClaim(BaseMixin, db.Model):
__tablename__ = 'useremailclaim'
__bind_key__ = 'lastuser'
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
user = db.relationship(User, primaryjoin=user_id == User.id,
backref=db.backref('emailclaims', cascade="all, delete-orphan"))
_email = db.Column('email', db.Unicode(254), nullable=True)
verification_code = db.Column(db.String(44), nullable=False, default=newsecret)
md5sum = db.Column(db.String(32), nullable=False)
__table_args__ = (db.UniqueConstraint('user_id', 'email'),)
def __init__(self, email, **kwargs):
super(UserEmailClaim, self).__init__(**kwargs)
self.verification_code = newsecret()
self._email = email
self.md5sum = md5(self._email).hexdigest()
@hybrid_property
def email(self):
return self._email
#: Make email immutable. There is no setter for email.
email = db.synonym('_email', descriptor=email)
def __repr__(self):
return u'<UserEmailClaim {email} of {user}>'.format(
email=self.email, user=repr(self.user)[1:-1])
def __unicode__(self):
return unicode(self.email)
def __str__(self):
return str(self.__unicode__())
def permissions(self, user, inherited=None):
perms = super(UserEmailClaim, self).permissions(user, inherited)
if user and user == self.user:
perms.add('verify')
return perms
@classmethod
def get(cls, email, user):
"""
Return a UserEmailClaim with matching email address for the given user.
:param str email: Email address to lookup
:param User user: User who claimed this email address
"""
return cls.query.filter(UserEmailClaim.email.in_([email, email.lower()])).filter_by(user=user).one_or_none()
@classmethod
def all(cls, email):
"""
Return all UserEmailClaim instances with matching email address.
:param str email: Email address to lookup
"""
return cls.query.filter(UserEmailClaim.email.in_([email, email.lower()])).order_by(cls.user_id).all()
class UserPhone(BaseMixin, db.Model):
__tablename__ = 'userphone'
__bind_key__ = 'lastuser'
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
user = db.relationship(User, primaryjoin=user_id == User.id,
backref=db.backref('phones', cascade="all, delete-orphan"))
primary = db.Column(db.Boolean, nullable=False, default=False)
_phone = db.Column('phone', db.Unicode(80), unique=True, nullable=False)
gets_text = db.Column(db.Boolean, nullable=False, default=True)
def __init__(self, phone, **kwargs):
super(UserPhone, self).__init__(**kwargs)
self._phone = phone
@hybrid_property
def phone(self):
return self._phone
phone = db.synonym('_phone', descriptor=phone)
def __repr__(self):
return u'<UserPhone {phone} of {user}>'.format(
phone=self.phone, user=repr(self.user)[1:-1])
def __unicode__(self):
return unicode(self.phone)
def __str__(self):
return str(self.__unicode__())
@classmethod
def get(cls, phone):
"""
Return a UserPhone with matching phone number.
:param str phone: Phone number to lookup (must be an exact match)
"""
return cls.query.filter_by(phone=phone).one_or_none()
class UserPhoneClaim(BaseMixin, db.Model):
__tablename__ = 'userphoneclaim'
__bind_key__ = 'lastuser'
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
user = db.relationship(User, primaryjoin=user_id == User.id,
backref=db.backref('phoneclaims', cascade="all, delete-orphan"))
_phone = db.Column('phone', db.Unicode(80), nullable=False)
gets_text = db.Column(db.Boolean, nullable=False, default=True)
verification_code = db.Column(db.Unicode(4), nullable=False, default=newpin)
__table_args__ = (db.UniqueConstraint('user_id', 'phone'),)
def __init__(self, phone, **kwargs):
super(UserPhoneClaim, self).__init__(**kwargs)
self.verification_code = newpin()
self._phone = phone
@hybrid_property
def phone(self):
return self._phone
phone = db.synonym('_phone', descriptor=phone)
def __repr__(self):
return u'<UserPhoneClaim {phone} of {user}>'.format(
phone=self.phone, user=repr(self.user)[1:-1])
def __unicode__(self):
return unicode(self.phone)
def __str__(self):
return str(self.__unicode__())
def permissions(self, user, inherited=None):
perms = super(UserPhoneClaim, self).permissions(user, inherited)
if user and user == self.user:
perms.add('verify')
return perms
@classmethod
def get(cls, phone, user):
"""
Return a UserPhoneClaim with matching phone number for the given user.
:param str phone: Phone number to lookup (must be an exact match)
:param User user: User who claimed this phone number
"""
return cls.query.filter_by(phone=phone, user=user).one_or_none()
@classmethod
def all(cls, phone):
"""
Return all UserPhoneClaim instances with matching phone number.
:param str phone: Phone number to lookup (must be an exact match)
"""
return cls.query.filter_by(phone=phone).all()
class PasswordResetRequest(BaseMixin, db.Model):
__tablename__ = 'passwordresetrequest'
__bind_key__ = 'lastuser'
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
user = db.relationship(User, primaryjoin=user_id == User.id)
reset_code = db.Column(db.String(44), nullable=False, default=newsecret)
def __init__(self, **kwargs):
super(PasswordResetRequest, self).__init__(**kwargs)
self.reset_code = newsecret()
class UserExternalId(BaseMixin, db.Model):
__tablename__ = 'userexternalid'
__bind_key__ = 'lastuser'
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
user = db.relationship(User, primaryjoin=user_id == User.id,
backref=db.backref('externalids', cascade="all, delete-orphan"))
service = db.Column(db.String(20), nullable=False)
userid = db.Column(db.String(250), nullable=False) # Unique id (or OpenID)
username = db.Column(db.Unicode(80), nullable=True)
oauth_token = db.Column(db.String(250), nullable=True)
oauth_token_secret = db.Column(db.String(250), nullable=True)
oauth_token_type = db.Column(db.String(250), nullable=True)
__table_args__ = (db.UniqueConstraint("service", "userid"), {})
def __repr__(self):
return u'<UserExternalId {service}:{username} of {user}'.format(
service=self.service, username=self.username, user=repr(self.user)[1:-1])
@classmethod
def get(cls, service, userid=None, username=None):
"""
Return a UserExternalId with the given service and userid or username.
:param str service: Service to lookup
:param str userid: Userid to lookup
:param str username: Username to lookup (may be non-unique)
Usernames are not guaranteed to be unique within a service. An example is with Google,
where the userid is a directed OpenID URL, unique but subject to change if the Lastuser
site URL changes. The username is the email address, which will be the same despite
different userids.
"""
if not bool(userid) ^ bool(username):
raise TypeError("Either userid or username should be specified")
if userid:
return cls.query.filter_by(service=service, userid=userid).one_or_none()
else:
return cls.query.filter_by(service=service, username=username).one_or_none()
# --- Organizations and teams -------------------------------------------------
team_membership = db.Table(
'team_membership', db.Model.metadata,
db.Column('user_id', db.Integer, db.ForeignKey('user.id'), nullable=False),
db.Column('team_id', db.Integer, db.ForeignKey('team.id'), nullable=False),
info={'bind_key': 'lastuser'}
)
class Organization(BaseMixin, db.Model):
__tablename__ = 'organization'
__bind_key__ = 'lastuser'
# owners_id cannot be null, but must be declared with nullable=True since there is
# a circular dependency. The post_update flag on the relationship tackles the circular
# dependency within SQLAlchemy.
owners_id = db.Column(db.Integer, db.ForeignKey('team.id',
use_alter=True, name='fk_organization_owners_id'), nullable=True)
owners = db.relationship('Team', primaryjoin='Organization.owners_id == Team.id',
uselist=False, cascade='all', post_update=True)
userid = db.Column(db.String(22), unique=True, nullable=False, default=newid)
_name = db.Column('name', db.Unicode(80), unique=True, nullable=True)
title = db.Column(db.Unicode(80), default=u'', nullable=False)
description = db.Column(db.UnicodeText, default=u'', nullable=False)
_defercols = [
defer('created_at'),
defer('updated_at'),
defer('description'),
]
def __init__(self, *args, **kwargs):
super(Organization, self).__init__(*args, **kwargs)
if self.owners is None:
self.owners = Team(title=u"Owners", org=self)
@hybrid_property
def name(self):
return self._name
@name.setter
def name(self, value):
if self.valid_name(value):
self._name = value
def valid_name(self, value):
if not valid_username(value):
return False
existing = Organization.get(name=value)
if existing and existing.id != self.id:
return False
existing = User.query.filter_by(username=value).first() # Avoid User.get to skip status check
if existing:
return False
return True
def __repr__(self):
return u'<Organization {name} "{title}">'.format(
name=self.name or self.userid, title=self.title)
@property
def pickername(self):
if self.name:
return u'{title} (~{name})'.format(title=self.title, name=self.name)
else:
return self.title
def clients_with_team_access(self):
"""
Return a list of clients with access to the organization's teams.
"""
from lastuser_core.models.client import CLIENT_TEAM_ACCESS
return [cta.client for cta in self.client_team_access if cta.access_level == CLIENT_TEAM_ACCESS.ALL]
def permissions(self, user, inherited=None):
perms = super(Organization, self).permissions(user, inherited)
if user and user in self.owners.users:
perms.add('view')
perms.add('edit')
perms.add('delete')
perms.add('view-teams')
perms.add('new-team')
else:
if 'view' in perms:
perms.remove('view')
if 'edit' in perms:
perms.remove('edit')
if 'delete' in perms:
perms.remove('delete')
return perms
def available_permissions(self):
"""
Return all permission objects available to this organization
(either owned by this organization or available to all users).
"""
from .client import Permission
return Permission.query.filter(
db.or_(Permission.allusers == True, Permission.org == self)
).order_by(Permission.name).all()
@classmethod
def get(cls, name=None, userid=None, defercols=False):
"""
Return an Organization with matching name or userid. Note that ``name`` is the username, not the title.
:param str name: Name of the organization
:param str userid: Userid of the organization
:param bool defercols: Defer loading non-critical columns
"""
if not bool(name) ^ bool(userid):
raise TypeError("Either name or userid should be specified")
if userid:
query = cls.query.filter_by(userid=userid)
else:
query = cls.query.filter_by(name=name)
if defercols:
query = query.options(*cls._defercols)
return query.one_or_none()
@classmethod
def all(cls, userids=None, names=None, defercols=False):
orgs = []
if userids:
query = cls.query.filter(cls.userid.in_(userids))
if defercols:
query = query.options(*cls._defercols)
orgs.extend(query.all())
if names:
query = cls.query.filter(cls.name.in_(names))
if defercols:
query = query.options(*cls._defercols)
orgs.extend(query.all())
return orgs
class Team(BaseMixin, db.Model):
__tablename__ = 'team'
__bind_key__ = 'lastuser'
#: Unique and non-changing id
userid = db.Column(db.String(22), unique=True, nullable=False, default=newid)
#: Displayed name
title = db.Column(db.Unicode(250), nullable=False)
#: Organization
org_id = db.Column(db.Integer, db.ForeignKey('organization.id'), nullable=False)
org = db.relationship(Organization, primaryjoin=org_id == Organization.id,
backref=db.backref('teams', order_by=title, cascade='all, delete-orphan'))
users = db.relationship(User, secondary='team_membership',
backref='teams') # No cascades here! Cascades will delete users
def __repr__(self):
return u'<Team {team} of {org}>'.format(
team=self.title, org=repr(self.org)[1:-1])
@property
def pickername(self):
return self.title
def permissions(self, user, inherited=None):
perms = super(Team, self).permissions(user, inherited)
if user and user in self.org.owners.users:
perms.add('edit')
perms.add('delete')
return perms
@classmethod
def migrate_user(cls, olduser, newuser):
for team in olduser.teams:
if team not in newuser.teams:
newuser.teams.append(team)
olduser.teams = []
@classmethod
def get(cls, userid=None):
"""
Return a Team with matching userid.
:param str userid: Userid of the organization
"""
return cls.query.filter_by(userid=userid).one_or_none()
Fix for User.get.
# -*- coding: utf-8 -*-
from hashlib import md5
from werkzeug import check_password_hash, cached_property
import bcrypt
from sqlalchemy import or_
from sqlalchemy.orm import defer
from sqlalchemy.ext.hybrid import hybrid_property
from coaster import newid, newsecret, newpin, valid_username
from . import db, TimestampMixin, BaseMixin
__all__ = ['User', 'UserEmail', 'UserEmailClaim', 'PasswordResetRequest', 'UserExternalId',
'UserPhone', 'UserPhoneClaim', 'Team', 'Organization', 'UserOldId', 'USER_STATUS']
class USER_STATUS:
ACTIVE = 0
SUSPENDED = 1
MERGED = 2
class User(BaseMixin, db.Model):
__tablename__ = 'user'
__bind_key__ = 'lastuser'
userid = db.Column(db.String(22), unique=True, nullable=False, default=newid)
fullname = db.Column(db.Unicode(80), default=u'', nullable=False)
_username = db.Column('username', db.Unicode(80), unique=True, nullable=True)
pw_hash = db.Column(db.String(80), nullable=True)
timezone = db.Column(db.Unicode(40), nullable=True)
description = db.Column(db.UnicodeText, default=u'', nullable=False)
status = db.Column(db.SmallInteger, nullable=False, default=USER_STATUS.ACTIVE)
_defercols = [
defer('created_at'),
defer('updated_at'),
defer('pw_hash'),
defer('timezone'),
defer('description'),
]
def __init__(self, password=None, **kwargs):
self.userid = newid()
self.password = password
super(User, self).__init__(**kwargs)
@property
def is_active(self):
return self.status == USER_STATUS.ACTIVE
def merged_user(self):
if self.status == USER_STATUS.MERGED:
return UserOldId.get(self.userid).user
else:
return self
def _set_password(self, password):
if password is None:
self.pw_hash = None
else:
self.pw_hash = bcrypt.hashpw(
password.encode('utf-8') if isinstance(password, unicode) else password,
bcrypt.gensalt())
#: Write-only property (passwords cannot be read back in plain text)
password = property(fset=_set_password)
#: Username (may be null)
@hybrid_property
def username(self):
return self._username
@username.setter
def username(self, value):
if not value:
self._username = None
elif self.is_valid_username(value):
self._username = value
def is_valid_username(self, value):
if not valid_username(value):
return False
existing = User.query.filter(db.or_(
User.username == value,
User.userid == value)).first() # Avoid User.get to skip status check
if existing and existing.id != self.id:
return False
existing = Organization.get(name=value)
if existing:
return False
return True
def password_is(self, password):
if self.pw_hash is None:
return False
if self.pw_hash.startswith('sha1$'):
return check_password_hash(self.pw_hash, password)
else:
return bcrypt.hashpw(
password.encode('utf-8') if isinstance(password, unicode) else password,
self.pw_hash) == self.pw_hash
def __repr__(self):
return u'<User {username} "{fullname}">'.format(username=self.username or self.userid,
fullname=self.fullname)
def profileid(self):
if self.username:
return self.username
else:
return self.userid
def displayname(self):
return self.fullname or self.username or self.userid
@property
def pickername(self):
if self.username:
return u'{fullname} (~{username})'.format(fullname=self.fullname, username=self.username)
else:
return self.fullname
def add_email(self, email, primary=False):
if primary:
for emailob in self.emails:
if emailob.primary:
emailob.primary = False
useremail = UserEmail(user=self, email=email, primary=primary)
db.session.add(useremail)
return useremail
def del_email(self, email):
setprimary = False
useremail = UserEmail.query.filter_by(user=self, email=email).first()
if useremail:
if useremail.primary:
setprimary = True
db.session.delete(useremail)
if setprimary:
for emailob in UserEmail.query.filter_by(user=self).all():
if emailob is not useremail:
emailob.primary = True
break
@cached_property
def email(self):
"""
Returns primary email address for user.
"""
# Look for a primary address
useremail = UserEmail.query.filter_by(user_id=self.id, primary=True).first()
if useremail:
return useremail
# No primary? Maybe there's one that's not set as primary?
useremail = UserEmail.query.filter_by(user_id=self.id).first()
if useremail:
# XXX: Mark at primary. This may or may not be saved depending on
# whether the request ended in a database commit.
useremail.primary = True
return useremail
# This user has no email address. Return a blank string instead of None
# to support the common use case, where the caller will use unicode(user.email)
# to get the email address as a string.
return u''
@cached_property
def phone(self):
"""
Returns primary phone number for user.
"""
# Look for a primary address
userphone = UserPhone.query.filter_by(user=self, primary=True).first()
if userphone:
return userphone
# No primary? Maybe there's one that's not set as primary?
userphone = UserPhone.query.filter_by(user=self).first()
if userphone:
# XXX: Mark at primary. This may or may not be saved depending on
# whether the request ended in a database commit.
userphone.primary = True
return userphone
# This user has no phone number. Return a blank string instead of None
# to support the common use case, where the caller will use unicode(user.phone)
# to get the phone number as a string.
return u''
def organizations(self):
"""
Return the organizations this user is a member of.
"""
return sorted(set([team.org for team in self.teams]), key=lambda o: o.title)
def organizations_owned(self):
"""
Return the organizations this user is an owner of.
"""
return sorted(set([team.org for team in self.teams if team.org.owners == team]),
key=lambda o: o.title)
def organizations_owned_ids(self):
"""
Return the database ids of the organizations this user is an owner of. This is used
for database queries.
"""
return list(set([team.org.id for team in self.teams if team.org.owners == team]))
def is_profile_complete(self):
"""
Return True if profile is complete (fullname, username and email are present), False
otherwise.
"""
return bool(self.fullname and self.username and self.email)
def available_permissions(self):
"""
Return all permission objects available to this user
(either owned by user or available to all users).
"""
from .client import Permission
return Permission.query.filter(
db.or_(Permission.allusers == True, Permission.user == self)
).order_by(Permission.name).all()
@classmethod
def get(cls, username=None, userid=None, defercols=False):
"""
Return a User with the given username or userid.
:param str username: Username to lookup
:param str userid: Userid to lookup
:param bool defercols: Defer loading non-critical columns
"""
if not bool(username) ^ bool(userid):
raise TypeError("Either username or userid should be specified")
if userid:
query = cls.query.filter_by(userid=userid)
else:
query = cls.query.filter_by(username=username)
if defercols:
query = query.options(*cls._defercols)
user = query.one_or_none()
if user and user.status == USER_STATUS.MERGED:
user = user.merged_user()
if user and user.is_active:
return user
@classmethod
def all(cls, userids=None, usernames=None, defercols=False):
"""
Return all matching users.
:param list userids: Userids to look up
:param list usernames: Usernames to look up
:param bool defercols: Defer loading non-critical columns
"""
users = set()
if userids:
query = cls.query.filter(cls.userid.in_(userids))
if defercols:
query = query.options(*cls._defercols)
for user in query.all():
user = user.merged_user()
if user.is_active:
users.add(user)
return list(users)
@classmethod
def autocomplete(cls, query):
"""
Return users whose names begin with the query, for autocomplete widgets.
Looks up users by fullname, username, external ids and email addresses.
:param str query: Letters to start matching with
"""
# Escape the '%' and '_' wildcards in SQL LIKE clauses.
# Some SQL dialects respond to '[' and ']', so remove them.
query = query.replace(u'%', ur'\%').replace(u'_', ur'\_').replace(u'[', u'').replace(u']', u'') + u'%'
# Use User._username since 'username' is a hybrid property that checks for validity
# before passing on to _username, the actual column name on the model.
# We convert to lowercase and use the LIKE operator since ILIKE isn't standard.
if not query:
return []
users = cls.query.filter(cls.status == USER_STATUS.ACTIVE,
or_( # Match against userid (exact value only), fullname or username, case insensitive
cls.userid == query[:-1],
db.func.lower(cls.fullname).like(db.func.lower(query)),
db.func.lower(cls._username).like(db.func.lower(query))
)
).options(*cls._defercols).limit(100).all() # Limit to 100 results
if query.startswith('@'):
# Add Twitter/GitHub accounts to the head of results
# TODO: Move this query to a login provider class method
users = cls.query.filter(cls.status == USER_STATUS.ACTIVE, cls.id.in_(
db.session.query(UserExternalId.user_id).filter(
UserExternalId.service.in_([u'twitter', u'github']),
db.func.lower(UserExternalId.username).like(db.func.lower(query[1:]))
).subquery())).options(*cls._defercols).limit(100).all() + users
elif '@' in query:
users = cls.query.filter(cls.status == USER_STATUS.ACTIVE, cls.id.in_(
db.session.query(UserEmail.user_id).filter(
db.func.lower(UserEmail.email).like(db.func.lower(query))
).subquery())).options(*cls._defercols).limit(100).all() + users
return users
class UserOldId(TimestampMixin, db.Model):
__tablename__ = 'useroldid'
__bind_key__ = 'lastuser'
userid = db.Column(db.String(22), nullable=False, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
user = db.relationship(User, primaryjoin=user_id == User.id,
backref=db.backref('oldids', cascade="all, delete-orphan"))
def __repr__(self):
return u'<UserOldId {userid} of {user}'.format(
userid=self.userid, user=repr(self.user)[1:-1])
@classmethod
def get(cls, userid):
return cls.query.filter_by(userid=userid).one_or_none()
class UserEmail(BaseMixin, db.Model):
__tablename__ = 'useremail'
__bind_key__ = 'lastuser'
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
user = db.relationship(User, primaryjoin=user_id == User.id,
backref=db.backref('emails', cascade="all, delete-orphan"))
_email = db.Column('email', db.Unicode(254), unique=True, nullable=False)
md5sum = db.Column(db.String(32), unique=True, nullable=False)
primary = db.Column(db.Boolean, nullable=False, default=False)
def __init__(self, email, **kwargs):
super(UserEmail, self).__init__(**kwargs)
self._email = email
self.md5sum = md5(self._email).hexdigest()
@hybrid_property
def email(self):
return self._email
#: Make email immutable. There is no setter for email.
email = db.synonym('_email', descriptor=email)
def __repr__(self):
return u'<UserEmail {email} of {user}>'.format(
email=self.email, user=repr(self.user)[1:-1])
def __unicode__(self):
return unicode(self.email)
def __str__(self):
return str(self.__unicode__())
@classmethod
def get(cls, email=None, md5sum=None):
"""
Return a UserEmail with matching email or md5sum.
:param str email: Email address to lookup
:param str md5sum: md5sum of email address to lookup
"""
if not bool(email) ^ bool(md5sum):
raise TypeError("Either email or md5sum should be specified")
if email:
return cls.query.filter(cls.email.in_([email, email.lower()])).one_or_none()
else:
return cls.query.filter_by(md5sum=md5sum).one_or_none()
class UserEmailClaim(BaseMixin, db.Model):
__tablename__ = 'useremailclaim'
__bind_key__ = 'lastuser'
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
user = db.relationship(User, primaryjoin=user_id == User.id,
backref=db.backref('emailclaims', cascade="all, delete-orphan"))
_email = db.Column('email', db.Unicode(254), nullable=True)
verification_code = db.Column(db.String(44), nullable=False, default=newsecret)
md5sum = db.Column(db.String(32), nullable=False)
__table_args__ = (db.UniqueConstraint('user_id', 'email'),)
def __init__(self, email, **kwargs):
super(UserEmailClaim, self).__init__(**kwargs)
self.verification_code = newsecret()
self._email = email
self.md5sum = md5(self._email).hexdigest()
@hybrid_property
def email(self):
return self._email
#: Make email immutable. There is no setter for email.
email = db.synonym('_email', descriptor=email)
def __repr__(self):
return u'<UserEmailClaim {email} of {user}>'.format(
email=self.email, user=repr(self.user)[1:-1])
def __unicode__(self):
return unicode(self.email)
def __str__(self):
return str(self.__unicode__())
def permissions(self, user, inherited=None):
perms = super(UserEmailClaim, self).permissions(user, inherited)
if user and user == self.user:
perms.add('verify')
return perms
@classmethod
def get(cls, email, user):
"""
Return a UserEmailClaim with matching email address for the given user.
:param str email: Email address to lookup
:param User user: User who claimed this email address
"""
return cls.query.filter(UserEmailClaim.email.in_([email, email.lower()])).filter_by(user=user).one_or_none()
@classmethod
def all(cls, email):
"""
Return all UserEmailClaim instances with matching email address.
:param str email: Email address to lookup
"""
return cls.query.filter(UserEmailClaim.email.in_([email, email.lower()])).order_by(cls.user_id).all()
class UserPhone(BaseMixin, db.Model):
__tablename__ = 'userphone'
__bind_key__ = 'lastuser'
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
user = db.relationship(User, primaryjoin=user_id == User.id,
backref=db.backref('phones', cascade="all, delete-orphan"))
primary = db.Column(db.Boolean, nullable=False, default=False)
_phone = db.Column('phone', db.Unicode(80), unique=True, nullable=False)
gets_text = db.Column(db.Boolean, nullable=False, default=True)
def __init__(self, phone, **kwargs):
super(UserPhone, self).__init__(**kwargs)
self._phone = phone
@hybrid_property
def phone(self):
return self._phone
phone = db.synonym('_phone', descriptor=phone)
def __repr__(self):
return u'<UserPhone {phone} of {user}>'.format(
phone=self.phone, user=repr(self.user)[1:-1])
def __unicode__(self):
return unicode(self.phone)
def __str__(self):
return str(self.__unicode__())
@classmethod
def get(cls, phone):
"""
Return a UserPhone with matching phone number.
:param str phone: Phone number to lookup (must be an exact match)
"""
return cls.query.filter_by(phone=phone).one_or_none()
class UserPhoneClaim(BaseMixin, db.Model):
__tablename__ = 'userphoneclaim'
__bind_key__ = 'lastuser'
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
user = db.relationship(User, primaryjoin=user_id == User.id,
backref=db.backref('phoneclaims', cascade="all, delete-orphan"))
_phone = db.Column('phone', db.Unicode(80), nullable=False)
gets_text = db.Column(db.Boolean, nullable=False, default=True)
verification_code = db.Column(db.Unicode(4), nullable=False, default=newpin)
__table_args__ = (db.UniqueConstraint('user_id', 'phone'),)
def __init__(self, phone, **kwargs):
super(UserPhoneClaim, self).__init__(**kwargs)
self.verification_code = newpin()
self._phone = phone
@hybrid_property
def phone(self):
return self._phone
phone = db.synonym('_phone', descriptor=phone)
def __repr__(self):
return u'<UserPhoneClaim {phone} of {user}>'.format(
phone=self.phone, user=repr(self.user)[1:-1])
def __unicode__(self):
return unicode(self.phone)
def __str__(self):
return str(self.__unicode__())
def permissions(self, user, inherited=None):
perms = super(UserPhoneClaim, self).permissions(user, inherited)
if user and user == self.user:
perms.add('verify')
return perms
@classmethod
def get(cls, phone, user):
"""
Return a UserPhoneClaim with matching phone number for the given user.
:param str phone: Phone number to lookup (must be an exact match)
:param User user: User who claimed this phone number
"""
return cls.query.filter_by(phone=phone, user=user).one_or_none()
@classmethod
def all(cls, phone):
"""
Return all UserPhoneClaim instances with matching phone number.
:param str phone: Phone number to lookup (must be an exact match)
"""
return cls.query.filter_by(phone=phone).all()
class PasswordResetRequest(BaseMixin, db.Model):
__tablename__ = 'passwordresetrequest'
__bind_key__ = 'lastuser'
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
user = db.relationship(User, primaryjoin=user_id == User.id)
reset_code = db.Column(db.String(44), nullable=False, default=newsecret)
def __init__(self, **kwargs):
super(PasswordResetRequest, self).__init__(**kwargs)
self.reset_code = newsecret()
class UserExternalId(BaseMixin, db.Model):
__tablename__ = 'userexternalid'
__bind_key__ = 'lastuser'
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
user = db.relationship(User, primaryjoin=user_id == User.id,
backref=db.backref('externalids', cascade="all, delete-orphan"))
service = db.Column(db.String(20), nullable=False)
userid = db.Column(db.String(250), nullable=False) # Unique id (or OpenID)
username = db.Column(db.Unicode(80), nullable=True)
oauth_token = db.Column(db.String(250), nullable=True)
oauth_token_secret = db.Column(db.String(250), nullable=True)
oauth_token_type = db.Column(db.String(250), nullable=True)
__table_args__ = (db.UniqueConstraint("service", "userid"), {})
def __repr__(self):
return u'<UserExternalId {service}:{username} of {user}'.format(
service=self.service, username=self.username, user=repr(self.user)[1:-1])
@classmethod
def get(cls, service, userid=None, username=None):
"""
Return a UserExternalId with the given service and userid or username.
:param str service: Service to lookup
:param str userid: Userid to lookup
:param str username: Username to lookup (may be non-unique)
Usernames are not guaranteed to be unique within a service. An example is with Google,
where the userid is a directed OpenID URL, unique but subject to change if the Lastuser
site URL changes. The username is the email address, which will be the same despite
different userids.
"""
if not bool(userid) ^ bool(username):
raise TypeError("Either userid or username should be specified")
if userid:
return cls.query.filter_by(service=service, userid=userid).one_or_none()
else:
return cls.query.filter_by(service=service, username=username).one_or_none()
# --- Organizations and teams -------------------------------------------------
team_membership = db.Table(
'team_membership', db.Model.metadata,
db.Column('user_id', db.Integer, db.ForeignKey('user.id'), nullable=False),
db.Column('team_id', db.Integer, db.ForeignKey('team.id'), nullable=False),
info={'bind_key': 'lastuser'}
)
class Organization(BaseMixin, db.Model):
__tablename__ = 'organization'
__bind_key__ = 'lastuser'
# owners_id cannot be null, but must be declared with nullable=True since there is
# a circular dependency. The post_update flag on the relationship tackles the circular
# dependency within SQLAlchemy.
owners_id = db.Column(db.Integer, db.ForeignKey('team.id',
use_alter=True, name='fk_organization_owners_id'), nullable=True)
owners = db.relationship('Team', primaryjoin='Organization.owners_id == Team.id',
uselist=False, cascade='all', post_update=True)
userid = db.Column(db.String(22), unique=True, nullable=False, default=newid)
_name = db.Column('name', db.Unicode(80), unique=True, nullable=True)
title = db.Column(db.Unicode(80), default=u'', nullable=False)
description = db.Column(db.UnicodeText, default=u'', nullable=False)
_defercols = [
defer('created_at'),
defer('updated_at'),
defer('description'),
]
def __init__(self, *args, **kwargs):
super(Organization, self).__init__(*args, **kwargs)
if self.owners is None:
self.owners = Team(title=u"Owners", org=self)
@hybrid_property
def name(self):
return self._name
@name.setter
def name(self, value):
if self.valid_name(value):
self._name = value
def valid_name(self, value):
if not valid_username(value):
return False
existing = Organization.get(name=value)
if existing and existing.id != self.id:
return False
existing = User.query.filter_by(username=value).first() # Avoid User.get to skip status check
if existing:
return False
return True
def __repr__(self):
return u'<Organization {name} "{title}">'.format(
name=self.name or self.userid, title=self.title)
@property
def pickername(self):
if self.name:
return u'{title} (~{name})'.format(title=self.title, name=self.name)
else:
return self.title
def clients_with_team_access(self):
"""
Return a list of clients with access to the organization's teams.
"""
from lastuser_core.models.client import CLIENT_TEAM_ACCESS
return [cta.client for cta in self.client_team_access if cta.access_level == CLIENT_TEAM_ACCESS.ALL]
def permissions(self, user, inherited=None):
perms = super(Organization, self).permissions(user, inherited)
if user and user in self.owners.users:
perms.add('view')
perms.add('edit')
perms.add('delete')
perms.add('view-teams')
perms.add('new-team')
else:
if 'view' in perms:
perms.remove('view')
if 'edit' in perms:
perms.remove('edit')
if 'delete' in perms:
perms.remove('delete')
return perms
def available_permissions(self):
"""
Return all permission objects available to this organization
(either owned by this organization or available to all users).
"""
from .client import Permission
return Permission.query.filter(
db.or_(Permission.allusers == True, Permission.org == self)
).order_by(Permission.name).all()
@classmethod
def get(cls, name=None, userid=None, defercols=False):
"""
Return an Organization with matching name or userid. Note that ``name`` is the username, not the title.
:param str name: Name of the organization
:param str userid: Userid of the organization
:param bool defercols: Defer loading non-critical columns
"""
if not bool(name) ^ bool(userid):
raise TypeError("Either name or userid should be specified")
if userid:
query = cls.query.filter_by(userid=userid)
else:
query = cls.query.filter_by(name=name)
if defercols:
query = query.options(*cls._defercols)
return query.one_or_none()
@classmethod
def all(cls, userids=None, names=None, defercols=False):
orgs = []
if userids:
query = cls.query.filter(cls.userid.in_(userids))
if defercols:
query = query.options(*cls._defercols)
orgs.extend(query.all())
if names:
query = cls.query.filter(cls.name.in_(names))
if defercols:
query = query.options(*cls._defercols)
orgs.extend(query.all())
return orgs
class Team(BaseMixin, db.Model):
__tablename__ = 'team'
__bind_key__ = 'lastuser'
#: Unique and non-changing id
userid = db.Column(db.String(22), unique=True, nullable=False, default=newid)
#: Displayed name
title = db.Column(db.Unicode(250), nullable=False)
#: Organization
org_id = db.Column(db.Integer, db.ForeignKey('organization.id'), nullable=False)
org = db.relationship(Organization, primaryjoin=org_id == Organization.id,
backref=db.backref('teams', order_by=title, cascade='all, delete-orphan'))
users = db.relationship(User, secondary='team_membership',
backref='teams') # No cascades here! Cascades will delete users
def __repr__(self):
return u'<Team {team} of {org}>'.format(
team=self.title, org=repr(self.org)[1:-1])
@property
def pickername(self):
return self.title
def permissions(self, user, inherited=None):
perms = super(Team, self).permissions(user, inherited)
if user and user in self.org.owners.users:
perms.add('edit')
perms.add('delete')
return perms
@classmethod
def migrate_user(cls, olduser, newuser):
for team in olduser.teams:
if team not in newuser.teams:
newuser.teams.append(team)
olduser.teams = []
@classmethod
def get(cls, userid=None):
"""
Return a Team with matching userid.
:param str userid: Userid of the organization
"""
return cls.query.filter_by(userid=userid).one_or_none()
|
from django.test import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class AdminTest(LiveServerTestCase):
fixtures = ['admin_user.json']
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(2)
def tearDown(self):
self.browser.quit()
def test_admin_page_reachable(self):
self.browser.get(self.live_server_url + '/admin/')
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Django administration', body.text)
def test_admin_page_login(self):
self.browser.get(self.live_server_url + '/admin/')
username_field = self.browser.find_element_by_name('username')
username_field.send_keys('admin')
password_field = self.browser.find_element_by_name('password')
password_field.send_keys('asdf')
password_field.send_keys(Keys.RETURN)
# Validate in Site Administration page
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Site administration', body.text)
Compacted selenium feature tests
Selenium testing is real slow, since these are feature
tests I guess I can live with them being bigger and less
modular for sake of speeding up testing.
from django.test import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class AdminTest(LiveServerTestCase):
fixtures = ['admin_user.json']
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(2)
def tearDown(self):
self.browser.quit()
def test_admin_page_login(self):
self.admin_page_reachable()
self.admin_login()
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Site administration', body.text)
def admin_page_reachable(self):
self.browser.get(self.live_server_url + '/admin/')
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Django administration', body.text)
def admin_login(self):
username_field = self.browser.find_element_by_name('username')
username_field.send_keys('admin')
password_field = self.browser.find_element_by_name('password')
password_field.send_keys('asdf')
password_field.send_keys(Keys.RETURN)
|
"""
A Particle contains information about a physical particle.
"""
import math
from .pdg import PDG
class MissingValueError(Exception):
""" Trivial Exception subclass. For display purposes. """
pass
class Particle:
"""
Stores standard particle information.
Usage
-----
>>> Particle(ID=ID,px=px,py=py,pz=pz)
>>> Particle(ID=ID,pT=pT,phi=phi,eta=eta)
In the former case, pT,phi,eta are calculated from px,py,pz via the methods
_calc_<var>. In both cases, pT,phi,eta become attributes of the instance.
The __str__ method has been manually defined to return standard particle
information. This is very convenient for printing to stdout, e.g.
>>> p = Particle(ID=211,pT=0.5,phi=1.0,eta=2.0)
>>> print(p)
211 0.5 1.0 2.0
"""
def __init__(self,ID=None,px=None,py=None,pz=None,pT=None,phi=None,eta=None):
if not ID:
raise MissingValueError('Must provide a particle ID.')
if not ( (px and py and pz) or (pT and phi and eta) ):
raise MissingValueError('Must provide pT,phi,eta or px,py,pz.')
self.ID = ID
self.pT = pT or self._calc_pT(px,py)
self.phi = phi or self._calc_phi(px,py)
self.eta = eta or self._calc_eta(px,py,pz)
def __str__(self):
# fastest way I have found to create the string
# _much_ faster than ' '.join(str(i) for i in (...))
# also faster than returning a tuple and print(*tuple)
return '{} {} {} {}'.format(self.ID, self.pT, self.phi, self.eta)
def _calc_pT(self,px,py,sqrt=math.sqrt):
return sqrt(px*px + py*py)
def _calc_phi(self,px,py,atan2=math.atan2):
return atan2(py,px)
def _calc_eta(self,px,py,pz,sqrt=math.sqrt,log=math.log):
pmag = sqrt(px*px + py*py + pz*pz)
return 0.5*log((pmag+pz)/max(pmag-pz,1e-10)) # avoid division by zero
class ParticleFilter:
"""
Creates a function to filter Particles with specified criteria. The
function returns True if all criteria are met, else False.
Usage
-----
>>> pf = ParticleFilter(criterion1,criterion2,...)
Allowed criteria are:
ID -- a list of particle IDs
charged -- boolean, shortcut to select all charged particles
pTmin,pTmax -- range of pT; can omit either min or max
etamin,etamax -- range of eta; if only etamax, assume |eta| < etamax;
if only etamin, assume etamin < |eta|
After creating a ParticleFilter, it is then applied to Particle objects,
e.g.
>>> pf = ParticleFilter(IDlist=[211],pTmin=0.5,etamax=2.5)
>>> p1 = Particle(ID=211,pT=1.0,phi=1.0,eta=1.0)
>>> pf(p1)
True
>>> p2 = Particle(ID=211,pT=1.0,phi=1.0,eta=3.0)
>>> pf(p2)
False
"""
def __init__(self,ID=[],charged=False,pTmin=None,pTmax=None,etamin=None,etamax=None):
# init. empty list of filters
self._filters = []
### create a lambda function for all specified criteria
### each lambda is a boolean function of Particle objects
# match particle ID
if ID:
self._filters.append(lambda Particle: abs(Particle.ID) in ID)
# match charged particles
if charged:
# retrieve ID list from PDG class
pdg = PDG()
self._charged = pdg.charged()
del pdg
self._filters.append(lambda Particle: abs(Particle.ID) in self._charged)
# match pT range
if pTmin and pTmax:
self._filters.append(lambda Particle: pTmin < Particle.pT < pTmax)
elif pTmin and not pTmax:
self._filters.append(lambda Particle: pTmin < Particle.pT)
elif pTmax and not pTmin:
self._filters.append(lambda Particle: Particle.pT < pTmax)
# match eta range
if etamax and etamin:
self._filters.append(lambda Particle: etamin < Particle.eta < etamax)
elif etamin and not etamax:
self._filters.append(lambda Particle: etamin < abs(Particle.eta))
elif etamax and not etamin:
self._filters.append(lambda Particle: abs(Particle.eta) < etamax)
def __call__(self,Particle):
# match all filter functions
return all(f(Particle) for f in self._filters)
Remove manual exception in Particle.__init__() [provides a slight speed boost].
"""
A Particle contains information about a physical particle.
"""
import math
from .pdg import PDG
class Particle:
"""
Stores standard particle information.
Usage
-----
>>> Particle(ID=ID,px=px,py=py,pz=pz)
>>> Particle(ID=ID,pT=pT,phi=phi,eta=eta)
In the former case, pT,phi,eta are calculated from px,py,pz via the methods
_calc_<var>. In both cases, pT,phi,eta become attributes of the instance.
The __str__ method has been manually defined to return standard particle
information. This is very convenient for printing to stdout, e.g.
>>> p = Particle(ID=211,pT=0.5,phi=1.0,eta=2.0)
>>> print(p)
211 0.5 1.0 2.0
"""
def __init__(self,ID=None,px=None,py=None,pz=None,pT=None,phi=None,eta=None):
self.ID = ID
self.pT = pT or self._calc_pT(px,py)
self.phi = phi or self._calc_phi(px,py)
self.eta = eta or self._calc_eta(px,py,pz)
def __str__(self):
# fastest way I have found to create the string
# _much_ faster than ' '.join(str(i) for i in (...))
# also faster than returning a tuple and print(*tuple)
return '{} {} {} {}'.format(self.ID, self.pT, self.phi, self.eta)
def _calc_pT(self,px,py,sqrt=math.sqrt):
return sqrt(px*px + py*py)
def _calc_phi(self,px,py,atan2=math.atan2):
return atan2(py,px)
def _calc_eta(self,px,py,pz,sqrt=math.sqrt,log=math.log):
pmag = sqrt(px*px + py*py + pz*pz)
return 0.5*log((pmag+pz)/max(pmag-pz,1e-10)) # avoid division by zero
class ParticleFilter:
"""
Creates a function to filter Particles with specified criteria. The
function returns True if all criteria are met, else False.
Usage
-----
>>> pf = ParticleFilter(criterion1,criterion2,...)
Allowed criteria are:
ID -- a list of particle IDs
charged -- boolean, shortcut to select all charged particles
pTmin,pTmax -- range of pT; can omit either min or max
etamin,etamax -- range of eta; if only etamax, assume |eta| < etamax;
if only etamin, assume etamin < |eta|
After creating a ParticleFilter, it is then applied to Particle objects,
e.g.
>>> pf = ParticleFilter(IDlist=[211],pTmin=0.5,etamax=2.5)
>>> p1 = Particle(ID=211,pT=1.0,phi=1.0,eta=1.0)
>>> pf(p1)
True
>>> p2 = Particle(ID=211,pT=1.0,phi=1.0,eta=3.0)
>>> pf(p2)
False
"""
def __init__(self,ID=[],charged=False,pTmin=None,pTmax=None,etamin=None,etamax=None):
# init. empty list of filters
self._filters = []
### create a lambda function for all specified criteria
### each lambda is a boolean function of Particle objects
# match particle ID
if ID:
self._filters.append(lambda Particle: abs(Particle.ID) in ID)
# match charged particles
if charged:
# retrieve ID list from PDG class
pdg = PDG()
self._charged = pdg.charged()
del pdg
self._filters.append(lambda Particle: abs(Particle.ID) in self._charged)
# match pT range
if pTmin and pTmax:
self._filters.append(lambda Particle: pTmin < Particle.pT < pTmax)
elif pTmin and not pTmax:
self._filters.append(lambda Particle: pTmin < Particle.pT)
elif pTmax and not pTmin:
self._filters.append(lambda Particle: Particle.pT < pTmax)
# match eta range
if etamax and etamin:
self._filters.append(lambda Particle: etamin < Particle.eta < etamax)
elif etamin and not etamax:
self._filters.append(lambda Particle: etamin < abs(Particle.eta))
elif etamax and not etamin:
self._filters.append(lambda Particle: abs(Particle.eta) < etamax)
def __call__(self,Particle):
# match all filter functions
return all(f(Particle) for f in self._filters)
|
"""
Drudge, a symbolic system for non-commutative and tensor algebra
================================================================
"""
from .canonpy import Perm, Group, canon_eldag
from .vec import Vec
__all__ = [
# Canonpy.
'Perm',
'Group',
'canon_eldag',
# Vec.
'Vec'
]
Forward range definition to base drudge module
"""
Drudge, a symbolic system for non-commutative and tensor algebra
================================================================
"""
from .canonpy import Perm, Group, canon_eldag
from .vec import Vec
from .term import Range
__all__ = [
# Canonpy.
'Perm',
'Group',
'canon_eldag',
# Vec.
'Vec',
# Term.
'Range'
]
|
""" Convenience reader functions """
import os
import shapefile
from . import guppy
from . import geojson
from . import xyfile
from .. import crs
from .metadata import Metadata
def _parsegeojsoncrs(crstup):
""" From a tuple representing a GeoJSON (name,None) or (href,type) pair,
return an appropriate karta crs instance. """
if crstup[0] is None: # use default as defined by spec
return crs.LONLAT_WGS84
elif crstup[1] is None: # named CRS
for c in crs.crslist:
if c.urn == crstup[0]:
return c
return crs.CRS("unknown", "unknown", crstup[0])
else: # linked CRS
return crs.CRS("unknown", crstup[1], crstup[0])
def read_geojson(f):
""" Read a GeoJSON object file and return a list of geometries """
R = geojson.GeoJSONReader(f)
geoms = R.iter_geometries()
gplist = []
for geom in geoms:
coordsys = _parsegeojsoncrs(geom.crs)
if isinstance(geom, geojson.Point):
gplist.append(guppy.Point(geom.coordinates, crs=coordsys))
elif isinstance(geom, geojson.MultiPoint):
gplist.append(guppy.Multipoint(geom.coordinates, crs=coordsys))
elif isinstance(geom, geojson.LineString):
gplist.append(guppy.Line(geom.coordinates, crs=coordsys))
elif isinstance(geom, geojson.Polygon):
gplist.append(guppy.Polygon(geom.coordinates[0],
subs=geom.coordinates[1:],
crs=coordsys))
return gplist
def read_geojson_features(f):
""" Read a GeoJSON object file and return a list of features """
R = geojson.GeoJSONReader(f)
features = R.pull_features()
print()
geoms = []
coordsys = _parsegeojsoncrs(R.getcrs())
for (geom, properties, id) in features:
if isinstance(geom, geojson.Point):
(p, d) = _geojson_properties2guppy(properties, 1)
geoms.append(guppy.Point(geom.coordinates, properties=p, data=d,
crs=coordsys))
elif isinstance(geom, geojson.MultiPoint):
(p, d) = _geojson_properties2guppy(properties, len(geom.coordinates))
geoms.append(guppy.Multipoint(geom.coordinates, properties=p, data=d,
crs=coordsys))
elif isinstance(geom, geojson.LineString):
(p, d) = _geojson_properties2guppy(properties, len(geom.coordinates))
geoms.append(guppy.Line(geom.coordinates, properties=p, data=d,
crs=coordsys))
elif isinstance(geom, geojson.Polygon):
(p, d) = _geojson_properties2guppy(properties, len(geom.coordinates))
geoms.append(guppy.Polygon(geom.coordinates[0], properties=p, data=d,
subs=geom.coordinates[1:],
crs=coordsys))
return geoms
def _geojson_properties2guppy(properties, n):
""" Takes a dictionary (derived from a GeoJSON properties object) and
divides it into singleton properties and *n*-degree data. """
props = {}
data = {}
for (key, value) in properties.items():
if isinstance(value, list) or isinstance(value, tuple):
if len(value) == n:
data[key] = value
else:
raise ValueError("properties must be singleton or per-vertex")
else:
props[key] = value
return props, data
def read_xyfile(f, delimiter='', header_rows=0, astype=guppy.Multipoint, coordrank=2):
""" Read an ASCII delimited table and return a guppy object given by *astype*.
"""
dat = xyfile.load_xy(f, delimiter=delimiter, header_rows=header_rows)
ncols = dat.shape[1]
if ncols >= coordrank:
coords = dat[:,:coordrank]
if ncols > coordrank:
data = dat[:,coordrank:]
else:
data = None
return astype(coords, data=data)
else:
raise IOError('data table has insufficient number of columns')
### Shapefile functions ###
def shape2point(shape):
""" Convert a shapefile._Shape `shape` to a guppy.Point. """
return guppy.Point(*shape.points)
def shape2line(shape):
""" Convert a shapefile._Shape `shape` to a guppy.Line. """
verts = shape.points
return guppy.Line(verts)
def shape2poly(shape):
""" Converts a shapefile._Shape `shape` to a guppy.Polygon. """
verts = shape.points
return guppy.Polygon(verts)
def shape2multipoint(shape):
""" Converts a shapefile._Shape `shape` to a guppy.Polygon. """
verts = shape.points
return guppy.Multipoint(verts)
def get_filenames(stem, check=False):
""" Given a filename basename, return the associated shapefile paths. If
`check` is True, ensure that the files exist."""
shp = stem + '.shp'
shx = stem + '.shx'
dbf = stem + '.dbf'
if check:
for fnm in (shp, shx, dbf):
if not os.path.isfile(fnm):
raise Exception('missing {0}'.format(fnm))
return {'shp':shp, 'shx':shx, 'dbf':dbf}
def open_file_dict(fdict):
""" Open each file in a dictionary of filenames and return a matching
dictionary of the file objects. """
files = {}
for ext in fdict.keys():
files[ext] = open(fdict[ext], 'rb')
return files
def recordsasdata(reader):
""" Interpret shapefile records as a Metadata object """
d = {}
records = [rec for rec in reader.records()]
for (i,k) in enumerate(reader.fields[1:]):
d[k[0]] = [rec[i] for rec in records]
return Metadata(d)
def recordsasproperties(reader):
""" Interpret shapefile records as a list of properties dictionaries """
proplist = []
keys = reader.fields
for (i,rec) in enumerate(reader.records()):
properties = dict(zip(keys, [val for val in rec]))
proplist.append(properties)
return proplist
def read_shapefile(stem):
""" Read a shapefile given `stem`, which is the name without an extension.
"""
fnms = get_filenames(stem, check=True)
try:
files = open_file_dict(fnms)
reader = shapefile.Reader(shp=files['shp'], shx=files['shx'],
dbf=files['dbf'])
if reader.shapeType == 1: # Points
verts = [shp.points[0] for shp in reader.shapes()]
d = recordsasdata(reader)
geoms = [guppy.Multipoint(verts, data=d)]
elif reader.shapeType == 3: # Lines
plist = recordsasproperties(reader)
geoms = []
for (shp,prop) in zip(reader.shapes(), plist):
geoms.append(guppy.Line(shp.points, properties=prop))
elif reader.shapeType == 5: # Polygon
plist = recordsasproperties(reader)
geoms = []
for (shp,prop) in zip(reader.shapes(), plist):
geoms.append(guppy.Polygon(shp.points, properties=prop))
else:
raise NotImplementedError("Shapefile shape type {0} not "
"implemented".format(reader.shapeType))
finally:
for f in files.values():
f.close()
return geoms
cast shapefile records to the appropriate type
""" Convenience reader functions """
import os
import shapefile
import dateutil.parser
from . import guppy
from . import geojson
from . import xyfile
from .. import crs
from .metadata import Metadata
def _parsegeojsoncrs(crstup):
""" From a tuple representing a GeoJSON (name,None) or (href,type) pair,
return an appropriate karta crs instance. """
if crstup[0] is None: # use default as defined by spec
return crs.LONLAT_WGS84
elif crstup[1] is None: # named CRS
for c in crs.crslist:
if c.urn == crstup[0]:
return c
return crs.CRS("unknown", "unknown", crstup[0])
else: # linked CRS
return crs.CRS("unknown", crstup[1], crstup[0])
def read_geojson(f):
""" Read a GeoJSON object file and return a list of geometries """
R = geojson.GeoJSONReader(f)
geoms = R.iter_geometries()
gplist = []
for geom in geoms:
coordsys = _parsegeojsoncrs(geom.crs)
if isinstance(geom, geojson.Point):
gplist.append(guppy.Point(geom.coordinates, crs=coordsys))
elif isinstance(geom, geojson.MultiPoint):
gplist.append(guppy.Multipoint(geom.coordinates, crs=coordsys))
elif isinstance(geom, geojson.LineString):
gplist.append(guppy.Line(geom.coordinates, crs=coordsys))
elif isinstance(geom, geojson.Polygon):
gplist.append(guppy.Polygon(geom.coordinates[0],
subs=geom.coordinates[1:],
crs=coordsys))
return gplist
def read_geojson_features(f):
""" Read a GeoJSON object file and return a list of features """
R = geojson.GeoJSONReader(f)
features = R.pull_features()
print()
geoms = []
coordsys = _parsegeojsoncrs(R.getcrs())
for (geom, properties, id) in features:
if isinstance(geom, geojson.Point):
(p, d) = _geojson_properties2guppy(properties, 1)
geoms.append(guppy.Point(geom.coordinates, properties=p, data=d,
crs=coordsys))
elif isinstance(geom, geojson.MultiPoint):
(p, d) = _geojson_properties2guppy(properties, len(geom.coordinates))
geoms.append(guppy.Multipoint(geom.coordinates, properties=p, data=d,
crs=coordsys))
elif isinstance(geom, geojson.LineString):
(p, d) = _geojson_properties2guppy(properties, len(geom.coordinates))
geoms.append(guppy.Line(geom.coordinates, properties=p, data=d,
crs=coordsys))
elif isinstance(geom, geojson.Polygon):
(p, d) = _geojson_properties2guppy(properties, len(geom.coordinates))
geoms.append(guppy.Polygon(geom.coordinates[0], properties=p, data=d,
subs=geom.coordinates[1:],
crs=coordsys))
return geoms
def _geojson_properties2guppy(properties, n):
""" Takes a dictionary (derived from a GeoJSON properties object) and
divides it into singleton properties and *n*-degree data. """
props = {}
data = {}
for (key, value) in properties.items():
if isinstance(value, list) or isinstance(value, tuple):
if len(value) == n:
data[key] = value
else:
raise ValueError("properties must be singleton or per-vertex")
else:
props[key] = value
return props, data
def read_xyfile(f, delimiter='', header_rows=0, astype=guppy.Multipoint, coordrank=2):
""" Read an ASCII delimited table and return a guppy object given by *astype*.
"""
dat = xyfile.load_xy(f, delimiter=delimiter, header_rows=header_rows)
ncols = dat.shape[1]
if ncols >= coordrank:
coords = dat[:,:coordrank]
if ncols > coordrank:
data = dat[:,coordrank:]
else:
data = None
return astype(coords, data=data)
else:
raise IOError('data table has insufficient number of columns')
### Shapefile functions ###
def get_filenames(stem, check=False):
""" Given a filename basename, return the associated shapefile paths. If
`check` is True, ensure that the files exist."""
shp = stem + '.shp'
shx = stem + '.shx'
dbf = stem + '.dbf'
if check:
for fnm in (shp, shx, dbf):
if not os.path.isfile(fnm):
raise Exception('missing {0}'.format(fnm))
return {'shp':shp, 'shx':shx, 'dbf':dbf}
def open_file_dict(fdict):
""" Open each file in a dictionary of filenames and return a matching
dictionary of the file objects. """
files = {}
for ext in fdict.keys():
files[ext] = open(fdict[ext], 'rb')
return files
dBase_type_dict = {"I": int,
"O": float,
"C": str,
"@": dateutil.parser.parse,
"L": bool}
def recordsasdata(reader):
""" Interpret shapefile records as a Metadata object """
d = {}
idfunc = lambda a: a
records = [rec for rec in reader.records()]
for (i,k) in enumerate(reader.fields[1:]):
f = dBase_type_dict.get(k[1], idfunc)
d[k[0]] = [f(rec[i]) for rec in records]
return Metadata(d)
def recordsasproperties(reader):
""" Interpret shapefile records as a list of properties dictionaries """
proplist = []
keys = reader.fields
idfunc = lambda a: a
for (i,rec) in enumerate(reader.records()):
properties = {}
for (k,v) in zip(keys, rec):
f = dBase_type_dict.get(k[1], idfunc)
properties[k[0]] = f(v)
proplist.append(properties)
return proplist
def read_shapefile(stem):
""" Read a shapefile given `stem`, which is the name without an extension.
"""
fnms = get_filenames(stem, check=True)
try:
files = open_file_dict(fnms)
reader = shapefile.Reader(shp=files['shp'], shx=files['shx'],
dbf=files['dbf'])
if reader.shapeType == 1: # Points
verts = [shp.points[0] for shp in reader.shapes()]
d = recordsasdata(reader)
geoms = [guppy.Multipoint(verts, data=d)]
elif reader.shapeType == 3: # Lines
plist = recordsasproperties(reader)
geoms = []
for (shp,prop) in zip(reader.shapes(), plist):
geoms.append(guppy.Line(shp.points, properties=prop))
elif reader.shapeType == 5: # Polygon
plist = recordsasproperties(reader)
geoms = []
for (shp,prop) in zip(reader.shapes(), plist):
geoms.append(guppy.Polygon(shp.points, properties=prop))
else:
raise NotImplementedError("Shapefile shape type {0} not "
"implemented".format(reader.shapeType))
finally:
for f in files.values():
f.close()
return geoms
|
from django import forms
from django.forms import ModelForm
from django.forms.models import BaseModelFormSet
from django.core.validators import RegexValidator
from django.contrib.auth.models import User
from django.contrib.auth.forms import AuthenticationForm, PasswordChangeForm, PasswordResetForm, SetPasswordForm
from django.templatetags.static import static
from .models import MonsterInstance, Summoner, TeamGroup, Team, RuneInstance
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Div, Layout, Field, Button, HTML, Hidden, Reset
from crispy_forms.bootstrap import FormActions, PrependedText, FieldWithButtons, StrictButton, InlineField
from captcha.fields import ReCaptchaField
import autocomplete_light
STATIC_URL_PREFIX = static('herders/images/')
# User stuff
class CrispyAuthenticationForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super(CrispyAuthenticationForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_action = 'login'
self.helper.layout = Layout(
Field('username'),
Field('password'),
Hidden('next', value='{{ next }}'),
FormActions(Submit('login', 'Log In', css_class='btn-lg btn-primary btn-block')),
)
class CrispyPasswordChangeForm(PasswordChangeForm):
def __init__(self, *args, **kwargs):
super(CrispyPasswordChangeForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_action = 'password_change'
self.helper.layout = Layout(
Field('old_password'),
Field('new_password1'),
Field('new_password2'),
FormActions(Submit('submit', 'Submit', css_class='btn-lg btn-primary btn-block')),
)
class CrispyPasswordResetForm(PasswordResetForm):
def __init__(self, *args, **kwargs):
super(CrispyPasswordResetForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_action = 'password_reset'
self.helper.layout = Layout(
Field('email'),
FormActions(Submit('submit', 'Submit', css_class='btn-lg btn-primary btn-block')),
)
class CrispySetPasswordForm(SetPasswordForm):
def __init__(self, *args, **kwargs):
super(CrispySetPasswordForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.layout = Layout(
Field('new_password1'),
Field('new_password2'),
FormActions(Submit('submit', 'Submit', css_class='btn-lg btn-primary btn-block')),
)
class CrispyChangeUsernameForm(forms.Form):
username = forms.CharField(
label='New Username',
required=True,
help_text='This will change the username used to log in and the URL used to access your profile.',
validators=[
RegexValidator(
regex='^[a-zA-Z0-9_]+$',
message='Username must contain only alphanumeric characters and underscore.',
code='invalid_username'
),
]
)
helper = FormHelper()
helper.form_method = 'post'
helper.form_action = 'username_change'
helper.layout = Layout(
Field('username', css_class='input-sm'),
FormActions(Submit('change', 'Change', css_class='btn-lg btn-primary btn-block'))
)
class RegisterUserForm(forms.Form):
username = forms.CharField(
label='Username',
required=True,
help_text='Used to link to your profile to others: http://swarfarm.com/profile/<username>/',
validators=[
RegexValidator(
regex='^[a-zA-Z0-9_]+$',
message='Username must contain only alphanumeric characters and underscore.',
code='invalid_username'
),
]
)
email = forms.EmailField(required=True, help_text='Your email address will only be used for password resets and account expiration notices.')
password = forms.CharField(label="Password", required=True, widget=forms.PasswordInput)
summoner_name = forms.CharField(label="Summoner's War Account Name", required=False, help_text='Not required. Visible to others if you make your SWARFARM account public.')
is_public = forms.BooleanField(label='Make my SWARFARM account visible to others', required=False)
captcha = ReCaptchaField()
helper = FormHelper()
helper.form_method = 'post'
helper.form_action = 'herders:register'
helper.layout = Layout(
Field('username', css_class='input-sm'),
Field('password', css_class='input-sm'),
Field('email', css_class='input-sm'),
Field('summoner_name', css_class='input-sm'),
Field('is_public'),
Field('captcha'),
FormActions(Submit('register', 'Register', css_class='btn-lg btn-primary btn-block'))
)
class EditUserForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditUserForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_tag = False
self.helper.layout = Layout(
Div(
Field('email'),
)
)
class Meta:
model = User
fields = (
'email',
)
class EditSummonerForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditSummonerForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_tag = False
self.helper.layout = Layout(
Div(
Field('summoner_name'),
Field('public'),
Field('timezone'),
),
)
class Meta:
model = Summoner
fields = (
'summoner_name',
'public',
'timezone',
)
labels = {
'summoner_name': "Summoner's War Account Name",
'public': 'Make my SWARFARM account visible to others',
}
class DeleteProfileForm(forms.Form):
confirmbox = forms.BooleanField(label="I seriously do want to delete my account and all associated data", required=True)
passcode = forms.CharField(
label='Acknowledgement:',
required=True,
help_text='Enter the following text: I acknowledge everything will be permanently deleted',
validators=[
RegexValidator(
regex='^I acknowledge everything will be permanently deleted$',
message="You didn't enter the correct text.",
code='invalid_acknowledgement'
)
]
)
captcha = ReCaptchaField()
helper = FormHelper()
helper.form_method = 'post'
helper.layout = Layout(
Div(
Field('confirmbox', css_class='checkbox'),
Field('passcode', css_class='input-sm'),
Field('captcha'),
FormActions(
Submit('delete', 'Delete', css_class='btn-lg btn-danger btn-block'),
),
css_class='col-md-6 col-md-offset-3',
),
)
# SWARFARM forms
class EditEssenceStorageForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditEssenceStorageForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'post'
self.helper.form_show_labels = True
self.helper.layout = Layout(
Div(
Div(
PrependedText('storage_magic_low', '<img src="' + STATIC_URL_PREFIX + 'essences/magic_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_magic_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/magic_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_magic_high', '<img src="' + STATIC_URL_PREFIX + 'essences/magic_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
Div(
PrependedText('storage_fire_low', '<img src="' + STATIC_URL_PREFIX + 'essences/fire_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_fire_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/fire_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_fire_high', '<img src="' + STATIC_URL_PREFIX + 'essences/fire_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
Div(
PrependedText('storage_water_low', '<img src="' + STATIC_URL_PREFIX + 'essences/water_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_water_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/water_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_water_high', '<img src="' + STATIC_URL_PREFIX + 'essences/water_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
Div(
PrependedText('storage_wind_low', '<img src="' + STATIC_URL_PREFIX + 'essences/wind_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_wind_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/wind_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_wind_high', '<img src="' + STATIC_URL_PREFIX + 'essences/wind_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
Div(
PrependedText('storage_light_low', '<img src="' + STATIC_URL_PREFIX + 'essences/light_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_light_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/light_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_light_high', '<img src="' + STATIC_URL_PREFIX + 'essences/light_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
Div(
PrependedText('storage_dark_low', '<img src="' + STATIC_URL_PREFIX + 'essences/dark_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_dark_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/dark_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_dark_high', '<img src="' + STATIC_URL_PREFIX + 'essences/dark_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
FormActions(
Submit('save', 'Save and Go Back'),
Submit('saveandcontinue', 'Save and Continue Editing'),
),
css_class='row',
)
)
class Meta:
model = Summoner
fields = (
'storage_magic_low',
'storage_magic_mid',
'storage_magic_high',
'storage_fire_low',
'storage_fire_mid',
'storage_fire_high',
'storage_water_low',
'storage_water_mid',
'storage_water_high',
'storage_wind_low',
'storage_wind_mid',
'storage_wind_high',
'storage_light_low',
'storage_light_mid',
'storage_light_high',
'storage_dark_low',
'storage_dark_mid',
'storage_dark_high',
)
labels = {
'storage_magic_low': 'Magic Low',
'storage_magic_mid': 'Magic Mid',
'storage_magic_high': 'Magic High',
'storage_fire_low': 'Fire Low',
'storage_fire_mid': 'Fire Mid',
'storage_fire_high': 'Fire High',
'storage_water_low': 'Water Low',
'storage_water_mid': 'Water Mid',
'storage_water_high': 'Water High',
'storage_wind_low': 'Wind Low',
'storage_wind_mid': 'Wind Mid',
'storage_wind_high': 'Wind High',
'storage_light_low': 'Light Low',
'storage_light_mid': 'Light Mid',
'storage_light_high': 'Light High',
'storage_dark_low': 'Dark Low',
'storage_dark_mid': 'Dark Mid',
'storage_dark_high': 'Dark High',
}
class AddMonsterInstanceForm(autocomplete_light.ModelForm):
monster = autocomplete_light.ModelChoiceField('MonsterAutocomplete')
def __init__(self, *args, **kwargs):
super(AddMonsterInstanceForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_class = 'ajax-form'
self.helper.layout = Layout(
Field(
'monster',
data_toggle='popover',
data_trigger='focus',
data_container='body',
title='Autocomplete Tips',
data_content="Enter the monster's awakened or unawakened name (either will work). To further narrow results, type the element too. Example: \"Raksha water\" will list water Rakshasa and Su",
data_stars_field=self['stars'].auto_id,
data_fodder_field=self['fodder'].auto_id,
data_priority_field=self['priority'].auto_id,
data_set_stars='',
),
Field('stars', css_class='rating hidden', value=1, data_start=0, data_stop=6, data_stars=6),
FieldWithButtons(
Field('level', value=1, min=1, max=40),
StrictButton("Max", name="Set_Max_Level", data_stars_field=self['stars'].auto_id, data_level_field=self['level'].auto_id, data_set_max_level=''),
),
Field('fodder', css_class='checkbox'),
Field('in_storage', css_class='checkbox'),
Field('ignore_for_fusion', css_class='checkbox'),
Field('priority',),
Field('notes'),
FormActions(
Submit('save', 'Save', css_class='btn btn-primary'),
Button('cancel', 'Cancel', css_class='btn btn-link', data_dismiss='modal')
),
)
class Meta:
model = MonsterInstance
fields = ('monster', 'stars', 'level', 'fodder', 'in_storage', 'ignore_for_fusion', 'priority', 'notes')
class BulkAddMonsterInstanceFormset(BaseModelFormSet):
def __init__(self, *args, **kwargs):
super(BulkAddMonsterInstanceFormset, self).__init__(*args, **kwargs)
self.queryset = MonsterInstance.objects.none()
class BulkAddMonsterInstanceForm(autocomplete_light.ModelForm):
monster = autocomplete_light.ModelChoiceField('MonsterAutocomplete')
def __init__(self, *args, **kwargs):
super(BulkAddMonsterInstanceForm, self).__init__(*args, **kwargs)
self.fields['monster'].required = False
self.helper = FormHelper(self)
self.helper.form_tag = False
self.helper.form_show_labels = False
self.helper.disable_csrf = True
self.helper.layout = Layout(
HTML('<td>'),
InlineField(
'monster',
data_stars_field=self['stars'].auto_id,
data_fodder_field=self['fodder'].auto_id,
data_set_stars=''
),
HTML('</td><td>'),
InlineField('stars', css_class='rating hidden', value=1, data_start=0, data_stop=6, data_stars=6),
HTML('</td><td>'),
FieldWithButtons(
Field('level', value=1, min=1, max=40),
StrictButton("Max", name="Set_Max_Level", data_stars_field=self['stars'].auto_id, data_level_field=self['level'].auto_id, data_set_max_level=''),
),
HTML('</td><td>'),
Field('in_storage'),
HTML('</td><td>'),
Field('fodder'),
HTML('</td>'),
)
class Meta:
model = MonsterInstance
fields = ('monster', 'stars', 'level', 'in_storage', 'fodder')
class EditMonsterInstanceForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditMonsterInstanceForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'post'
self.helper.form_class = 'ajax-form'
self.helper.layout = Layout(
Div(
Field('stars', css_class='rating hidden', value=1, data_start=0, data_stop=6, data_stars=6),
FieldWithButtons(
Field('level', value=1, min=1, max=40),
StrictButton("Max", name="Set_Max_Level", data_stars_field=self['stars'].auto_id, data_level_field=self['level'].auto_id, data_set_max_level=''),
),
Field('fodder', css_class='checkbox'),
Field('in_storage', css_class='checkbox'),
Field('ignore_for_fusion', css_class='checkbox'),
'priority',
'skill_1_level',
'skill_2_level',
'skill_3_level',
'skill_4_level',
Field('notes'),
),
Div(
FormActions(
Submit('save', 'Save', css_class='btn btn-primary'),
HTML("""<button class="btn btn-link" data-dismiss="modal">Cancel</button>"""),
),
)
)
class Meta:
model = MonsterInstance
exclude = ('owner', 'monster')
class PowerUpMonsterInstanceForm(forms.Form):
monster = autocomplete_light.ModelMultipleChoiceField('MonsterInstanceAutocomplete')
monster.label = 'Material Monsters'
monster.required = False
ignore_evolution = forms.BooleanField(
label='Ignore evolution error checking',
required=False,
)
helper = FormHelper()
helper.form_method = 'post'
helper.form_class = 'ajax-form'
helper.layout = Layout(
Field('monster'),
Field('ignore_evolution'),
FormActions(
Submit('power_up', 'Power Up', css_class='btn btn-primary'),
Submit('evolve', 'Evolve', css_class='btn btn-primary'),
)
)
class AwakenMonsterInstanceForm(forms.Form):
subtract_materials = forms.BooleanField(
label='Subtract Materials from stock (Insufficient quantities will be reduced to 0)',
required=False
)
helper = FormHelper()
helper.form_method = 'post'
helper.form_class = 'ajax-form'
helper.layout = Layout(
Div(
Field('subtract_materials', css_class='checkbox', checked=''),
),
Div(
FormActions(
Submit('awaken', 'Awaken', css_class='btn btn-primary'),
HTML("""<a href="{{ return_path }}" class="btn btn-link">Cancel</a>"""),
),
)
)
class AddTeamGroupForm(ModelForm):
def __init__(self, *args, **kwargs):
super(AddTeamGroupForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'post'
# helper.form_action must be set in view
self.helper.layout = Layout(
Div(
Field('name'),
css_class='modal-body',
),
Div(
FormActions(
Submit('save', 'Save', css_class='btn btn-primary'),
Button('cancel', 'Cancel', css_class='btn btn-link', data_dismiss='modal')
),
css_class='modal-footer',
)
)
class Meta:
model = TeamGroup
exclude = ('id', 'owner')
class EditTeamGroupForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditTeamGroupForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'post'
self.helper.layout = Layout(
Field('name'),
FormActions(
Submit('save', 'Save', css_class='btn btn-primary'),
HTML("""<a href="{{ return_path }}" class="btn btn-link">Cancel</a>"""),
HTML("""<a href="{% url 'herders:team_group_delete' profile_name=profile_name group_id=group_id%}" class="btn btn-danger pull-right">Delete</a>"""),
),
)
class Meta:
model = TeamGroup
exclude = ('id', 'owner')
class DeleteTeamGroupForm(forms.Form):
reassign_group = forms.ModelChoiceField(
queryset=TeamGroup.objects.all(),
required=False,
label="Reassign teams in this group to:"
)
helper = FormHelper()
helper.form_method = 'post'
# helper.form_action must be set in view
helper.layout = Layout(
Field('reassign_group', css_class='input-sm'),
FormActions(
Submit('apply', 'Apply', css_class='btn btn-primary'),
Submit('delete', 'Delete all teams', css_class='btn btn-danger'),
)
)
class EditTeamForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditTeamForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'post'
self.helper.form_id = 'EditTeamForm'
self.helper.layout = Layout(
Div(
Field('group'),
Field('name'),
Field('favorite'),
),
Field('description'),
Field('leader'),
Field('roster'),
FormActions(
Submit('save', 'Save', css_class='btn btn-primary'),
),
)
class Meta:
model = Team
exclude = ('id',)
widgets = {
'roster': autocomplete_light.MultipleChoiceWidget('MonsterInstanceAutocomplete'),
'leader': autocomplete_light.ChoiceWidget('MonsterInstanceAutocomplete'),
}
def clean(self):
from django.core.exceptions import ValidationError
# Check that leader is not also in the roster
leader = self.cleaned_data.get('leader')
roster = self.cleaned_data.get('roster')
if leader in roster:
raise ValidationError(
'Leader cannot be included in the roster as well',
code='leader_in_roster'
)
super(EditTeamForm, self).clean()
class AddRuneInstanceForm(ModelForm):
def __init__(self, *args, **kwargs):
super(AddRuneInstanceForm, self).__init__(*args, **kwargs)
self.fields['stars'].label = False
self.fields['main_stat'].label = False
self.fields['main_stat_value'].label = False
self.fields['innate_stat'].label = False
self.fields['innate_stat_value'].label = False
self.fields['substat_1'].label = False
self.fields['substat_1_value'].label = False
self.fields['substat_2'].label = False
self.fields['substat_2_value'].label = False
self.fields['substat_3'].label = False
self.fields['substat_3_value'].label = False
self.fields['substat_4'].label = False
self.fields['substat_4_value'].label = False
self.fields['assigned_to'].label = False
self.helper = FormHelper(self)
self.helper.form_method = 'post'
self.helper.form_id = 'addRuneForm'
self.helper.form_class = 'ajax-form'
self.helper.layout = Layout(
Div(
Field('type', template="crispy/rune_button_radio_select.html"),
css_class='col-lg-3',
),
Div(
Div(
Div(Field('slot', placeholder='1-6'), css_class='col-lg-4 col-lg-offset-3'),
Div(Field('level', placeholder='0-15'), css_class='col-lg-5'),
css_class='row'
),
Div(
Div(HTML('<label>Stars</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div(Field('stars', css_class='rating hidden', value=1, data_start=0, data_stop=6, data_stars=6), css_class='col-lg-9'),
css_class='row'
),
Div(
Div(HTML('<label>Stat Type</label>'), css_class='col-lg-4 col-lg-offset-3'),
Div(HTML('<label>Stat Value</label>'), css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Main Stat</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Field('main_stat', wrapper_class='col-lg-4'),
Field('main_stat_value', wrapper_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Innate Stat</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div('innate_stat', css_class='col-lg-4'),
Div('innate_stat_value', css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Substat 1</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div('substat_1', css_class='col-lg-4'),
Div('substat_1_value', css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Substat 2</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div('substat_2', css_class='col-lg-4'),
Div('substat_2_value', css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Substat 3</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div('substat_3', css_class='col-lg-4'),
Div('substat_3_value', css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Substat 4</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div('substat_4', css_class='col-lg-4'),
Div('substat_4_value', css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Assign To</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div(
Field('assigned_to'),
css_class='col-lg-9',
),
css_class='row',
),
css_class='col-lg-9',
),
Div(css_class='clearfix'),
FormActions(
Submit('save', 'Save'),
),
)
class Meta:
model = RuneInstance
fields = (
'type', 'stars', 'level', 'slot',
'main_stat', 'main_stat_value',
'innate_stat', 'innate_stat_value',
'substat_1', 'substat_1_value',
'substat_2', 'substat_2_value',
'substat_3', 'substat_3_value',
'substat_4', 'substat_4_value',
'assigned_to',
)
widgets = {
'assigned_to': autocomplete_light.ChoiceWidget('MonsterInstanceAutocomplete'),
}
class AssignRuneForm(forms.Form):
type = forms.MultipleChoiceField(
choices=RuneInstance.TYPE_CHOICES,
widget=forms.CheckboxSelectMultiple,
required=False
)
level__gte = forms.IntegerField(
label="Minimum Level",
min_value=0,
max_value=15,
required=False,
)
stars__gte = forms.IntegerField(
label="Minimum Stars",
required=False
)
slot = forms.IntegerField(
min_value=1,
max_value=6,
required=False
)
helper = FormHelper()
helper.form_method = 'post'
helper.form_id = 'AssignRuneForm'
helper.layout = Layout(
StrictButton('Create New', id='addNewRune', css_class='btn btn-primary btn-block'),
Reset('Reset Form', 'Reset Filters', css_class='btn btn-danger btn-block'),
Field('type', css_class='auto-submit', template='crispy/rune_button_checkbox_select.html'),
Field('slot', type='hidden', css_class='auto-submit'),
Field('level__gte', css_class='auto-submit'),
Field('stars__gte', css_class='rating hidden auto-submit', value=1, data_start=0, data_stop=6, data_stars=6),
)
class FilterRuneForm(forms.Form):
type = forms.MultipleChoiceField(
choices=RuneInstance.TYPE_CHOICES,
widget=forms.CheckboxSelectMultiple,
required=False
)
level__gte = forms.IntegerField(
label="Minimum Level",
min_value=0,
initial=1,
max_value=15,
required=False,
)
stars__gte = forms.IntegerField(
label="Minimum Stars",
required=False
)
slot = forms.IntegerField(
min_value=1,
max_value=6,
required=False
)
helper = FormHelper()
helper.form_method = 'post'
helper.form_id = 'FilterInventoryForm'
helper.layout = Layout(
Reset('Reset Form', 'Reset Filters', css_class='btn btn-danger btn-block'),
Field('type', css_class='auto-submit', template='crispy/rune_button_checkbox_select.html'),
Field('slot', css_class='auto-submit'),
Field('level__gte', css_class='auto-submit'),
Field('stars__gte', css_class='rating hidden auto-submit', value=1, data_start=0, data_stop=6, data_stars=6),
)
Remove default filter level of 1
from django import forms
from django.forms import ModelForm
from django.forms.models import BaseModelFormSet
from django.core.validators import RegexValidator
from django.contrib.auth.models import User
from django.contrib.auth.forms import AuthenticationForm, PasswordChangeForm, PasswordResetForm, SetPasswordForm
from django.templatetags.static import static
from .models import MonsterInstance, Summoner, TeamGroup, Team, RuneInstance
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Div, Layout, Field, Button, HTML, Hidden, Reset
from crispy_forms.bootstrap import FormActions, PrependedText, FieldWithButtons, StrictButton, InlineField
from captcha.fields import ReCaptchaField
import autocomplete_light
STATIC_URL_PREFIX = static('herders/images/')
# User stuff
class CrispyAuthenticationForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super(CrispyAuthenticationForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_action = 'login'
self.helper.layout = Layout(
Field('username'),
Field('password'),
Hidden('next', value='{{ next }}'),
FormActions(Submit('login', 'Log In', css_class='btn-lg btn-primary btn-block')),
)
class CrispyPasswordChangeForm(PasswordChangeForm):
def __init__(self, *args, **kwargs):
super(CrispyPasswordChangeForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_action = 'password_change'
self.helper.layout = Layout(
Field('old_password'),
Field('new_password1'),
Field('new_password2'),
FormActions(Submit('submit', 'Submit', css_class='btn-lg btn-primary btn-block')),
)
class CrispyPasswordResetForm(PasswordResetForm):
def __init__(self, *args, **kwargs):
super(CrispyPasswordResetForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_action = 'password_reset'
self.helper.layout = Layout(
Field('email'),
FormActions(Submit('submit', 'Submit', css_class='btn-lg btn-primary btn-block')),
)
class CrispySetPasswordForm(SetPasswordForm):
def __init__(self, *args, **kwargs):
super(CrispySetPasswordForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.layout = Layout(
Field('new_password1'),
Field('new_password2'),
FormActions(Submit('submit', 'Submit', css_class='btn-lg btn-primary btn-block')),
)
class CrispyChangeUsernameForm(forms.Form):
username = forms.CharField(
label='New Username',
required=True,
help_text='This will change the username used to log in and the URL used to access your profile.',
validators=[
RegexValidator(
regex='^[a-zA-Z0-9_]+$',
message='Username must contain only alphanumeric characters and underscore.',
code='invalid_username'
),
]
)
helper = FormHelper()
helper.form_method = 'post'
helper.form_action = 'username_change'
helper.layout = Layout(
Field('username', css_class='input-sm'),
FormActions(Submit('change', 'Change', css_class='btn-lg btn-primary btn-block'))
)
class RegisterUserForm(forms.Form):
username = forms.CharField(
label='Username',
required=True,
help_text='Used to link to your profile to others: http://swarfarm.com/profile/<username>/',
validators=[
RegexValidator(
regex='^[a-zA-Z0-9_]+$',
message='Username must contain only alphanumeric characters and underscore.',
code='invalid_username'
),
]
)
email = forms.EmailField(required=True, help_text='Your email address will only be used for password resets and account expiration notices.')
password = forms.CharField(label="Password", required=True, widget=forms.PasswordInput)
summoner_name = forms.CharField(label="Summoner's War Account Name", required=False, help_text='Not required. Visible to others if you make your SWARFARM account public.')
is_public = forms.BooleanField(label='Make my SWARFARM account visible to others', required=False)
captcha = ReCaptchaField()
helper = FormHelper()
helper.form_method = 'post'
helper.form_action = 'herders:register'
helper.layout = Layout(
Field('username', css_class='input-sm'),
Field('password', css_class='input-sm'),
Field('email', css_class='input-sm'),
Field('summoner_name', css_class='input-sm'),
Field('is_public'),
Field('captcha'),
FormActions(Submit('register', 'Register', css_class='btn-lg btn-primary btn-block'))
)
class EditUserForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditUserForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_tag = False
self.helper.layout = Layout(
Div(
Field('email'),
)
)
class Meta:
model = User
fields = (
'email',
)
class EditSummonerForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditSummonerForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_tag = False
self.helper.layout = Layout(
Div(
Field('summoner_name'),
Field('public'),
Field('timezone'),
),
)
class Meta:
model = Summoner
fields = (
'summoner_name',
'public',
'timezone',
)
labels = {
'summoner_name': "Summoner's War Account Name",
'public': 'Make my SWARFARM account visible to others',
}
class DeleteProfileForm(forms.Form):
confirmbox = forms.BooleanField(label="I seriously do want to delete my account and all associated data", required=True)
passcode = forms.CharField(
label='Acknowledgement:',
required=True,
help_text='Enter the following text: I acknowledge everything will be permanently deleted',
validators=[
RegexValidator(
regex='^I acknowledge everything will be permanently deleted$',
message="You didn't enter the correct text.",
code='invalid_acknowledgement'
)
]
)
captcha = ReCaptchaField()
helper = FormHelper()
helper.form_method = 'post'
helper.layout = Layout(
Div(
Field('confirmbox', css_class='checkbox'),
Field('passcode', css_class='input-sm'),
Field('captcha'),
FormActions(
Submit('delete', 'Delete', css_class='btn-lg btn-danger btn-block'),
),
css_class='col-md-6 col-md-offset-3',
),
)
# SWARFARM forms
class EditEssenceStorageForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditEssenceStorageForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'post'
self.helper.form_show_labels = True
self.helper.layout = Layout(
Div(
Div(
PrependedText('storage_magic_low', '<img src="' + STATIC_URL_PREFIX + 'essences/magic_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_magic_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/magic_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_magic_high', '<img src="' + STATIC_URL_PREFIX + 'essences/magic_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
Div(
PrependedText('storage_fire_low', '<img src="' + STATIC_URL_PREFIX + 'essences/fire_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_fire_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/fire_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_fire_high', '<img src="' + STATIC_URL_PREFIX + 'essences/fire_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
Div(
PrependedText('storage_water_low', '<img src="' + STATIC_URL_PREFIX + 'essences/water_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_water_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/water_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_water_high', '<img src="' + STATIC_URL_PREFIX + 'essences/water_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
Div(
PrependedText('storage_wind_low', '<img src="' + STATIC_URL_PREFIX + 'essences/wind_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_wind_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/wind_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_wind_high', '<img src="' + STATIC_URL_PREFIX + 'essences/wind_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
Div(
PrependedText('storage_light_low', '<img src="' + STATIC_URL_PREFIX + 'essences/light_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_light_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/light_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_light_high', '<img src="' + STATIC_URL_PREFIX + 'essences/light_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
Div(
PrependedText('storage_dark_low', '<img src="' + STATIC_URL_PREFIX + 'essences/dark_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_dark_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/dark_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_dark_high', '<img src="' + STATIC_URL_PREFIX + 'essences/dark_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
FormActions(
Submit('save', 'Save and Go Back'),
Submit('saveandcontinue', 'Save and Continue Editing'),
),
css_class='row',
)
)
class Meta:
model = Summoner
fields = (
'storage_magic_low',
'storage_magic_mid',
'storage_magic_high',
'storage_fire_low',
'storage_fire_mid',
'storage_fire_high',
'storage_water_low',
'storage_water_mid',
'storage_water_high',
'storage_wind_low',
'storage_wind_mid',
'storage_wind_high',
'storage_light_low',
'storage_light_mid',
'storage_light_high',
'storage_dark_low',
'storage_dark_mid',
'storage_dark_high',
)
labels = {
'storage_magic_low': 'Magic Low',
'storage_magic_mid': 'Magic Mid',
'storage_magic_high': 'Magic High',
'storage_fire_low': 'Fire Low',
'storage_fire_mid': 'Fire Mid',
'storage_fire_high': 'Fire High',
'storage_water_low': 'Water Low',
'storage_water_mid': 'Water Mid',
'storage_water_high': 'Water High',
'storage_wind_low': 'Wind Low',
'storage_wind_mid': 'Wind Mid',
'storage_wind_high': 'Wind High',
'storage_light_low': 'Light Low',
'storage_light_mid': 'Light Mid',
'storage_light_high': 'Light High',
'storage_dark_low': 'Dark Low',
'storage_dark_mid': 'Dark Mid',
'storage_dark_high': 'Dark High',
}
class AddMonsterInstanceForm(autocomplete_light.ModelForm):
monster = autocomplete_light.ModelChoiceField('MonsterAutocomplete')
def __init__(self, *args, **kwargs):
super(AddMonsterInstanceForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_class = 'ajax-form'
self.helper.layout = Layout(
Field(
'monster',
data_toggle='popover',
data_trigger='focus',
data_container='body',
title='Autocomplete Tips',
data_content="Enter the monster's awakened or unawakened name (either will work). To further narrow results, type the element too. Example: \"Raksha water\" will list water Rakshasa and Su",
data_stars_field=self['stars'].auto_id,
data_fodder_field=self['fodder'].auto_id,
data_priority_field=self['priority'].auto_id,
data_set_stars='',
),
Field('stars', css_class='rating hidden', value=1, data_start=0, data_stop=6, data_stars=6),
FieldWithButtons(
Field('level', value=1, min=1, max=40),
StrictButton("Max", name="Set_Max_Level", data_stars_field=self['stars'].auto_id, data_level_field=self['level'].auto_id, data_set_max_level=''),
),
Field('fodder', css_class='checkbox'),
Field('in_storage', css_class='checkbox'),
Field('ignore_for_fusion', css_class='checkbox'),
Field('priority',),
Field('notes'),
FormActions(
Submit('save', 'Save', css_class='btn btn-primary'),
Button('cancel', 'Cancel', css_class='btn btn-link', data_dismiss='modal')
),
)
class Meta:
model = MonsterInstance
fields = ('monster', 'stars', 'level', 'fodder', 'in_storage', 'ignore_for_fusion', 'priority', 'notes')
class BulkAddMonsterInstanceFormset(BaseModelFormSet):
def __init__(self, *args, **kwargs):
super(BulkAddMonsterInstanceFormset, self).__init__(*args, **kwargs)
self.queryset = MonsterInstance.objects.none()
class BulkAddMonsterInstanceForm(autocomplete_light.ModelForm):
monster = autocomplete_light.ModelChoiceField('MonsterAutocomplete')
def __init__(self, *args, **kwargs):
super(BulkAddMonsterInstanceForm, self).__init__(*args, **kwargs)
self.fields['monster'].required = False
self.helper = FormHelper(self)
self.helper.form_tag = False
self.helper.form_show_labels = False
self.helper.disable_csrf = True
self.helper.layout = Layout(
HTML('<td>'),
InlineField(
'monster',
data_stars_field=self['stars'].auto_id,
data_fodder_field=self['fodder'].auto_id,
data_set_stars=''
),
HTML('</td><td>'),
InlineField('stars', css_class='rating hidden', value=1, data_start=0, data_stop=6, data_stars=6),
HTML('</td><td>'),
FieldWithButtons(
Field('level', value=1, min=1, max=40),
StrictButton("Max", name="Set_Max_Level", data_stars_field=self['stars'].auto_id, data_level_field=self['level'].auto_id, data_set_max_level=''),
),
HTML('</td><td>'),
Field('in_storage'),
HTML('</td><td>'),
Field('fodder'),
HTML('</td>'),
)
class Meta:
model = MonsterInstance
fields = ('monster', 'stars', 'level', 'in_storage', 'fodder')
class EditMonsterInstanceForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditMonsterInstanceForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'post'
self.helper.form_class = 'ajax-form'
self.helper.layout = Layout(
Div(
Field('stars', css_class='rating hidden', value=1, data_start=0, data_stop=6, data_stars=6),
FieldWithButtons(
Field('level', value=1, min=1, max=40),
StrictButton("Max", name="Set_Max_Level", data_stars_field=self['stars'].auto_id, data_level_field=self['level'].auto_id, data_set_max_level=''),
),
Field('fodder', css_class='checkbox'),
Field('in_storage', css_class='checkbox'),
Field('ignore_for_fusion', css_class='checkbox'),
'priority',
'skill_1_level',
'skill_2_level',
'skill_3_level',
'skill_4_level',
Field('notes'),
),
Div(
FormActions(
Submit('save', 'Save', css_class='btn btn-primary'),
HTML("""<button class="btn btn-link" data-dismiss="modal">Cancel</button>"""),
),
)
)
class Meta:
model = MonsterInstance
exclude = ('owner', 'monster')
class PowerUpMonsterInstanceForm(forms.Form):
monster = autocomplete_light.ModelMultipleChoiceField('MonsterInstanceAutocomplete')
monster.label = 'Material Monsters'
monster.required = False
ignore_evolution = forms.BooleanField(
label='Ignore evolution error checking',
required=False,
)
helper = FormHelper()
helper.form_method = 'post'
helper.form_class = 'ajax-form'
helper.layout = Layout(
Field('monster'),
Field('ignore_evolution'),
FormActions(
Submit('power_up', 'Power Up', css_class='btn btn-primary'),
Submit('evolve', 'Evolve', css_class='btn btn-primary'),
)
)
class AwakenMonsterInstanceForm(forms.Form):
subtract_materials = forms.BooleanField(
label='Subtract Materials from stock (Insufficient quantities will be reduced to 0)',
required=False
)
helper = FormHelper()
helper.form_method = 'post'
helper.form_class = 'ajax-form'
helper.layout = Layout(
Div(
Field('subtract_materials', css_class='checkbox', checked=''),
),
Div(
FormActions(
Submit('awaken', 'Awaken', css_class='btn btn-primary'),
HTML("""<a href="{{ return_path }}" class="btn btn-link">Cancel</a>"""),
),
)
)
class AddTeamGroupForm(ModelForm):
def __init__(self, *args, **kwargs):
super(AddTeamGroupForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'post'
# helper.form_action must be set in view
self.helper.layout = Layout(
Div(
Field('name'),
css_class='modal-body',
),
Div(
FormActions(
Submit('save', 'Save', css_class='btn btn-primary'),
Button('cancel', 'Cancel', css_class='btn btn-link', data_dismiss='modal')
),
css_class='modal-footer',
)
)
class Meta:
model = TeamGroup
exclude = ('id', 'owner')
class EditTeamGroupForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditTeamGroupForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'post'
self.helper.layout = Layout(
Field('name'),
FormActions(
Submit('save', 'Save', css_class='btn btn-primary'),
HTML("""<a href="{{ return_path }}" class="btn btn-link">Cancel</a>"""),
HTML("""<a href="{% url 'herders:team_group_delete' profile_name=profile_name group_id=group_id%}" class="btn btn-danger pull-right">Delete</a>"""),
),
)
class Meta:
model = TeamGroup
exclude = ('id', 'owner')
class DeleteTeamGroupForm(forms.Form):
reassign_group = forms.ModelChoiceField(
queryset=TeamGroup.objects.all(),
required=False,
label="Reassign teams in this group to:"
)
helper = FormHelper()
helper.form_method = 'post'
# helper.form_action must be set in view
helper.layout = Layout(
Field('reassign_group', css_class='input-sm'),
FormActions(
Submit('apply', 'Apply', css_class='btn btn-primary'),
Submit('delete', 'Delete all teams', css_class='btn btn-danger'),
)
)
class EditTeamForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditTeamForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'post'
self.helper.form_id = 'EditTeamForm'
self.helper.layout = Layout(
Div(
Field('group'),
Field('name'),
Field('favorite'),
),
Field('description'),
Field('leader'),
Field('roster'),
FormActions(
Submit('save', 'Save', css_class='btn btn-primary'),
),
)
class Meta:
model = Team
exclude = ('id',)
widgets = {
'roster': autocomplete_light.MultipleChoiceWidget('MonsterInstanceAutocomplete'),
'leader': autocomplete_light.ChoiceWidget('MonsterInstanceAutocomplete'),
}
def clean(self):
from django.core.exceptions import ValidationError
# Check that leader is not also in the roster
leader = self.cleaned_data.get('leader')
roster = self.cleaned_data.get('roster')
if leader in roster:
raise ValidationError(
'Leader cannot be included in the roster as well',
code='leader_in_roster'
)
super(EditTeamForm, self).clean()
class AddRuneInstanceForm(ModelForm):
def __init__(self, *args, **kwargs):
super(AddRuneInstanceForm, self).__init__(*args, **kwargs)
self.fields['stars'].label = False
self.fields['main_stat'].label = False
self.fields['main_stat_value'].label = False
self.fields['innate_stat'].label = False
self.fields['innate_stat_value'].label = False
self.fields['substat_1'].label = False
self.fields['substat_1_value'].label = False
self.fields['substat_2'].label = False
self.fields['substat_2_value'].label = False
self.fields['substat_3'].label = False
self.fields['substat_3_value'].label = False
self.fields['substat_4'].label = False
self.fields['substat_4_value'].label = False
self.fields['assigned_to'].label = False
self.helper = FormHelper(self)
self.helper.form_method = 'post'
self.helper.form_id = 'addRuneForm'
self.helper.form_class = 'ajax-form'
self.helper.layout = Layout(
Div(
Field('type', template="crispy/rune_button_radio_select.html"),
css_class='col-lg-3',
),
Div(
Div(
Div(Field('slot', placeholder='1-6'), css_class='col-lg-4 col-lg-offset-3'),
Div(Field('level', placeholder='0-15'), css_class='col-lg-5'),
css_class='row'
),
Div(
Div(HTML('<label>Stars</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div(Field('stars', css_class='rating hidden', value=1, data_start=0, data_stop=6, data_stars=6), css_class='col-lg-9'),
css_class='row'
),
Div(
Div(HTML('<label>Stat Type</label>'), css_class='col-lg-4 col-lg-offset-3'),
Div(HTML('<label>Stat Value</label>'), css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Main Stat</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Field('main_stat', wrapper_class='col-lg-4'),
Field('main_stat_value', wrapper_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Innate Stat</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div('innate_stat', css_class='col-lg-4'),
Div('innate_stat_value', css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Substat 1</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div('substat_1', css_class='col-lg-4'),
Div('substat_1_value', css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Substat 2</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div('substat_2', css_class='col-lg-4'),
Div('substat_2_value', css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Substat 3</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div('substat_3', css_class='col-lg-4'),
Div('substat_3_value', css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Substat 4</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div('substat_4', css_class='col-lg-4'),
Div('substat_4_value', css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Assign To</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div(
Field('assigned_to'),
css_class='col-lg-9',
),
css_class='row',
),
css_class='col-lg-9',
),
Div(css_class='clearfix'),
FormActions(
Submit('save', 'Save'),
),
)
class Meta:
model = RuneInstance
fields = (
'type', 'stars', 'level', 'slot',
'main_stat', 'main_stat_value',
'innate_stat', 'innate_stat_value',
'substat_1', 'substat_1_value',
'substat_2', 'substat_2_value',
'substat_3', 'substat_3_value',
'substat_4', 'substat_4_value',
'assigned_to',
)
widgets = {
'assigned_to': autocomplete_light.ChoiceWidget('MonsterInstanceAutocomplete'),
}
class AssignRuneForm(forms.Form):
type = forms.MultipleChoiceField(
choices=RuneInstance.TYPE_CHOICES,
widget=forms.CheckboxSelectMultiple,
required=False
)
level__gte = forms.IntegerField(
label="Minimum Level",
min_value=0,
max_value=15,
required=False,
)
stars__gte = forms.IntegerField(
label="Minimum Stars",
required=False
)
slot = forms.IntegerField(
min_value=1,
max_value=6,
required=False
)
helper = FormHelper()
helper.form_method = 'post'
helper.form_id = 'AssignRuneForm'
helper.layout = Layout(
StrictButton('Create New', id='addNewRune', css_class='btn btn-primary btn-block'),
Reset('Reset Form', 'Reset Filters', css_class='btn btn-danger btn-block'),
Field('type', css_class='auto-submit', template='crispy/rune_button_checkbox_select.html'),
Field('slot', type='hidden', css_class='auto-submit'),
Field('level__gte', css_class='auto-submit'),
Field('stars__gte', css_class='rating hidden auto-submit', value=1, data_start=0, data_stop=6, data_stars=6),
)
class FilterRuneForm(forms.Form):
type = forms.MultipleChoiceField(
choices=RuneInstance.TYPE_CHOICES,
widget=forms.CheckboxSelectMultiple,
required=False
)
level__gte = forms.IntegerField(
label="Minimum Level",
min_value=0,
max_value=15,
required=False,
)
stars__gte = forms.IntegerField(
label="Minimum Stars",
required=False
)
slot = forms.IntegerField(
min_value=1,
max_value=6,
required=False
)
helper = FormHelper()
helper.form_method = 'post'
helper.form_id = 'FilterInventoryForm'
helper.layout = Layout(
Reset('Reset Form', 'Reset Filters', css_class='btn btn-danger btn-block'),
Field('type', css_class='auto-submit', template='crispy/rune_button_checkbox_select.html'),
Field('slot', css_class='auto-submit'),
Field('level__gte', css_class='auto-submit'),
Field('stars__gte', css_class='rating hidden auto-submit', value=1, data_start=0, data_stop=6, data_stars=6),
)
|
# -*- coding: utf-8 -*-
import re
from flask import g
import flaskext.wtf as wtf
from lastuserapp.utils import valid_username
from lastuserapp.models import User, UserEmail, UserEmailClaim, getuser
class PasswordResetRequestForm(wtf.Form):
username = wtf.TextField('Username or Email', validators=[wtf.Required()])
def validate_username(self, field):
user = getuser(field.data)
if user is None:
raise wtf.ValidationError, "Could not find a user with that id"
self.user = user
class PasswordResetForm(wtf.Form):
password = wtf.PasswordField('New password', validators=[wtf.Required()])
confirm_password = wtf.PasswordField('Confirm password',
validators=[wtf.Required(), wtf.EqualTo('password')])
class PasswordChangeForm(wtf.Form):
old_password = wtf.PasswordField('Current password', validators=[wtf.Required()])
password = wtf.PasswordField('New password', validators=[wtf.Required()])
confirm_password = wtf.PasswordField('Confirm password',
validators=[wtf.Required(), wtf.EqualTo('password')])
def validate_old_password(self, field):
if g.user is None:
raise wtf.ValidationError, "Not logged in"
if not g.user.password_is(field.data):
raise wtf.ValidationError, "Incorrect password"
class ProfileForm(wtf.Form):
fullname = wtf.TextField('Full name', validators=[wtf.Required()])
username = wtf.TextField('Username (optional)', validators=[wtf.Optional()])
description = wtf.TextAreaField('Bio')
def validate_username(self, field):
if not valid_username(field.data):
return wtf.ValidationError, "Invalid characters in username"
if field.data == g.user.username:
return
existing = User.query.filter_by(username=field.data).first()
if existing is not None:
raise wtf.ValidationError, "That username is taken"
class NewEmailAddressForm(wtf.Form):
email = wtf.html5.EmailField('Email address', validators=[wtf.Required(), wtf.Email()])
def validate_email(self, field):
existing = UserEmail.query.filter_by(email=field.data).first()
if existing is not None:
if existing.user == g.user:
raise wtf.ValidationError, "You have already registered this email address."
else:
raise wtf.ValidationError, "That email address has already been claimed."
existing = UserEmailClaim.query.filter_by(email=field.data, user=g.user).first()
if existing is not None:
raise wtf.ValidationError, "That email address is pending verification."
Bugfix: raise validationerror, not return it.
<div class=""></div>
# -*- coding: utf-8 -*-
import re
from flask import g
import flaskext.wtf as wtf
from lastuserapp.utils import valid_username
from lastuserapp.models import User, UserEmail, UserEmailClaim, getuser
class PasswordResetRequestForm(wtf.Form):
username = wtf.TextField('Username or Email', validators=[wtf.Required()])
def validate_username(self, field):
user = getuser(field.data)
if user is None:
raise wtf.ValidationError, "Could not find a user with that id"
self.user = user
class PasswordResetForm(wtf.Form):
password = wtf.PasswordField('New password', validators=[wtf.Required()])
confirm_password = wtf.PasswordField('Confirm password',
validators=[wtf.Required(), wtf.EqualTo('password')])
class PasswordChangeForm(wtf.Form):
old_password = wtf.PasswordField('Current password', validators=[wtf.Required()])
password = wtf.PasswordField('New password', validators=[wtf.Required()])
confirm_password = wtf.PasswordField('Confirm password',
validators=[wtf.Required(), wtf.EqualTo('password')])
def validate_old_password(self, field):
if g.user is None:
raise wtf.ValidationError, "Not logged in"
if not g.user.password_is(field.data):
raise wtf.ValidationError, "Incorrect password"
class ProfileForm(wtf.Form):
fullname = wtf.TextField('Full name', validators=[wtf.Required()])
username = wtf.TextField('Username (optional)', validators=[wtf.Optional()])
description = wtf.TextAreaField('Bio')
def validate_username(self, field):
if not valid_username(field.data):
raise wtf.ValidationError, "Invalid characters in username"
if field.data == g.user.username:
return
existing = User.query.filter_by(username=field.data).first()
if existing is not None:
raise wtf.ValidationError, "That username is taken"
class NewEmailAddressForm(wtf.Form):
email = wtf.html5.EmailField('Email address', validators=[wtf.Required(), wtf.Email()])
def validate_email(self, field):
existing = UserEmail.query.filter_by(email=field.data).first()
if existing is not None:
if existing.user == g.user:
raise wtf.ValidationError, "You have already registered this email address."
else:
raise wtf.ValidationError, "That email address has already been claimed."
existing = UserEmailClaim.query.filter_by(email=field.data, user=g.user).first()
if existing is not None:
raise wtf.ValidationError, "That email address is pending verification."
|
from coverage.misc import join_regex, NoSource
from coverage.parser import CodeParser
from coverage.results import Analysis, Numbers
import os
import sys
from django.template import Lexer, Token
from django.template.base import TOKEN_BLOCK, TOKEN_VAR
from dtcov.dt_django import read_file
class DjangoTemplateAnalysis(Analysis):
def __init__(self, cov, code_unit):
self.coverage = cov
self.code_unit = code_unit
self.filename = self.code_unit.filename
ext = os.path.splitext(self.filename)[1]
source = None
if os.path.exists(self.filename):
try:
self.source = read_file(self.filename)
except :
_, err, _ = sys.exc_info()
raise NoSource(
"No source for code: %r: %s" % (self.filename, err)
)
if self.source is None:
raise NoSource("No source for code: %r" % self.filename)
self.parser = DjangoTemplateCodeParser(
text=source, filename=self.filename,
exclude=self.coverage._exclude_regex('exclude')
)
self.statements, self.excluded = self.parser.parse_source()
# Identify missing statements.
executed = self.coverage.data.executed_lines(self.filename)
self.missing = sorted(set(self.statements) - set(executed))
if self.coverage.data.has_arcs():
self.no_branch = self.parser.lines_matching(
join_regex(self.coverage.config.partial_list),
join_regex(self.coverage.config.partial_always_list)
)
n_branches = self.total_branches()
mba = self.missing_branch_arcs()
n_missing_branches = sum(
[len(v) for k,v in mba.items() if k not in self.missing]
)
else:
n_branches = n_missing_branches = 0
self.no_branch = set()
self.numbers = Numbers(
n_files=1,
n_statements=len(self.statements),
n_excluded=len(self.excluded),
n_missing=len(self.missing),
n_branches=n_branches,
n_missing_branches=n_missing_branches,
)
class DjangoTemplateCodeParser(CodeParser):
def __init__(self, text=None, filename=None, exclude=None):
super(DjangoTemplateCodeParser, self).__init__(text, filename, exclude)
def _get_byte_parser(self):
return None
def parse_source(self):
source_lines = set()
lexer = Lexer(self.text, "<string>")
tokens = lexer.tokenize()
comment = False
for token in tokens:
assert isinstance(token, Token)
if token.token_type == TOKEN_BLOCK:
if token.contents == 'comment':
comment = True
continue
elif token.contents == 'endcomment':
comment = False
continue
if comment:
continue
if token.token_type == TOKEN_BLOCK or token.token_type == TOKEN_VAR:
source_lines.add(token.lineno)
return tuple(sorted(source_lines)), ()
def _raw_parse(self):
pass
def first_line(self, line):
return line
def arcs(self):
return []
Don't count end tags.
from coverage.misc import join_regex, NoSource
from coverage.parser import CodeParser
from coverage.results import Analysis, Numbers
import os
import sys
from django.template import Lexer, Token
from django.template.base import TOKEN_BLOCK, TOKEN_VAR
from dtcov.dt_django import read_file
class DjangoTemplateAnalysis(Analysis):
def __init__(self, cov, code_unit):
self.coverage = cov
self.code_unit = code_unit
self.filename = self.code_unit.filename
ext = os.path.splitext(self.filename)[1]
source = None
if os.path.exists(self.filename):
try:
self.source = read_file(self.filename)
except :
_, err, _ = sys.exc_info()
raise NoSource(
"No source for code: %r: %s" % (self.filename, err)
)
if self.source is None:
raise NoSource("No source for code: %r" % self.filename)
self.parser = DjangoTemplateCodeParser(
text=source, filename=self.filename,
exclude=self.coverage._exclude_regex('exclude')
)
self.statements, self.excluded = self.parser.parse_source()
# Identify missing statements.
executed = self.coverage.data.executed_lines(self.filename)
self.missing = sorted(set(self.statements) - set(executed))
if self.coverage.data.has_arcs():
self.no_branch = self.parser.lines_matching(
join_regex(self.coverage.config.partial_list),
join_regex(self.coverage.config.partial_always_list)
)
n_branches = self.total_branches()
mba = self.missing_branch_arcs()
n_missing_branches = sum(
[len(v) for k,v in mba.items() if k not in self.missing]
)
else:
n_branches = n_missing_branches = 0
self.no_branch = set()
self.numbers = Numbers(
n_files=1,
n_statements=len(self.statements),
n_excluded=len(self.excluded),
n_missing=len(self.missing),
n_branches=n_branches,
n_missing_branches=n_missing_branches,
)
class DjangoTemplateCodeParser(CodeParser):
def __init__(self, text=None, filename=None, exclude=None):
super(DjangoTemplateCodeParser, self).__init__(text, filename, exclude)
def _get_byte_parser(self):
return None
def parse_source(self):
source_lines = set()
lexer = Lexer(self.text, "<string>")
tokens = lexer.tokenize()
comment = False
for token in tokens:
assert isinstance(token, Token)
if token.token_type == TOKEN_BLOCK:
if token.contents == 'comment':
comment = True
continue
elif token.contents == 'endcomment':
comment = False
continue
if comment:
continue
if token.token_type == TOKEN_BLOCK or token.token_type == TOKEN_VAR:
if token.token_type == TOKEN_BLOCK and token.contents.startswith('end'):
continue
source_lines.add(token.lineno)
return tuple(sorted(source_lines)), ()
def _raw_parse(self):
pass
def first_line(self, line):
return line
def arcs(self):
return []
|
# Created by JHJ on 2016. 10. 5.
from django.conf.urls import url
from . import views
app_name = 'board'
urlpatterns = [
url(r'^$', views.board_list, name='board_list'),
url(r'^(?P<board_slug>[-\w]+)/$', views.post_list, name='post_list'),
url(r'^(?P<board_slug>[-\w]+)/new/$', views.new_post, name='new_post'),
url(r'^(?P<board_slug>[-\w]+)/(?P<post_id>\d+)/delete/$', views.delete_post, name='delete_post'),
url(r'^(?P<board_slug>[-\w]+)/(?P<post_id>\d+)/$', views.view_post, name='view_post'),
url(r'^(?P<board_slug>[-\w]+)/(?P<post_id>\d+)/comment/new/$', views.new_comment, name='new_comment'),
url(r'^(?P<post_id>\d+)/comment/delete/$', views.delete_comment, name='delete_comment'),
]
Delete board_slug parameter on 'delete_post' url
# Created by JHJ on 2016. 10. 5.
from django.conf.urls import url
from . import views
app_name = 'board'
urlpatterns = [
url(r'^$', views.board_list, name='board_list'),
url(r'^(?P<board_slug>[-\w]+)/$', views.post_list, name='post_list'),
url(r'^(?P<board_slug>[-\w]+)/new/$', views.new_post, name='new_post'),
url(r'^(?P<post_id>\d+)/delete/$', views.delete_post, name='delete_post'),
url(r'^(?P<board_slug>[-\w]+)/(?P<post_id>\d+)/$', views.view_post, name='view_post'),
url(r'^(?P<board_slug>[-\w]+)/(?P<post_id>\d+)/comment/new/$', views.new_comment, name='new_comment'),
url(r'^(?P<post_id>\d+)/comment/delete/$', views.delete_comment, name='delete_comment'),
]
|
# -*- coding: UTF-8 -*-
"""Run configuration files with OSIRIS."""
from os import path, remove, walk, listdir
from shutil import copyfile
import subprocess
from time import sleep, time
import re
from glob import glob
from ..common import ensure_dir_exists, ensure_executable, ifd, tail, logger, get_dir_size, human_order_key, MPCaller, \
Call
import psutil
# Path to osiris executables - guessed later in the code
osiris_1d = ""
"""str: Path to the osiris-1D.e file"""
osiris_2d = ""
"""str: Path to the osiris-2D.e file"""
osiris_3d = ""
"""str: Path to the osiris-3D.e file"""
def set_osiris_path(folder, warn=True):
global osiris_1d, osiris_2d, osiris_3d
if not path.isdir(folder):
if warn:
logger.warning("%s is not an existing folder." % folder)
return
r = path.join(folder, "osiris-1D.e")
if path.isfile(r):
osiris_1d = r
elif warn:
logger.warning("osiris-1D not found in %s" % folder)
r = path.join(folder, "osiris-2D.e")
if path.isfile(r):
osiris_2d = r
elif warn:
logger.warning("osiris-2D not found in %s" % folder)
r = path.join(folder, "osiris-3D.e")
if path.isfile(r):
osiris_3d = r
elif warn:
logger.warning("osiris-3D not found in %s" % folder)
def _find_running_exe(exe):
"""Return the list of the pid of the processes of the argument executable (absolute path)"""
candidates = []
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict(attrs=['pid', 'exe'])
except psutil.NoSuchProcess:
pass
else:
if pinfo["exe"] and pinfo['exe'] == exe:
candidates.append(pinfo['pid'])
return candidates
class Run:
"""
An osiris run.
Attributes:
run_dir (str): Directory where the run takes place.
total_steps (int): Amount of time steps in the simulation.
process (psutil.Process): Representation of the process running the simulation. If no process is found it will
be None. Only in that case, methods that update the state of simulation will check if
a process has spawned when called.
Notes:
Only single-process runs are supported at the moment. Resuming runs are neither supported yet.
"""
def __init__(self, run_dir):
"""
Create a Run instance.
Args:
run_dir (str): Path where the OSIRIS run takes place. An os-stdin file must exist there.
Raises:
ValueError: If no os-stdin is found.
"""
self.run_dir = run_dir
try:
with open(path.join(run_dir, "os-stdin"), "r") as f:
text = f.read()
r = re.match(r".*time_step(.*?){(.*?)dt(.*?)=(.*?),(.*?)}", text, re.DOTALL + re.MULTILINE)
if not r:
raise ValueError("No dt found in os-stdin.")
dt = float(r.group(4))
r = re.match(r".*time(.*?){(.*?)tmin(.*?)=(.*?),(.*?)}", text, re.DOTALL + re.MULTILINE)
t_min = float(r.group(4)) if r else 0.0
r = re.match(r".*time(.*?){(.*?)tmax(.*?)=(.*?),(.*?)}", text, re.DOTALL + re.MULTILINE)
if not r:
raise ValueError("No tmax found in os-stdin. Default value 0.0 is trivial.")
t_max = float(r.group(4))
self.total_steps = int((t_max - t_min) // dt) + 1
except FileNotFoundError:
raise ValueError("No os-stdin file in %s" % run_dir)
self.update()
def __repr__(self):
if self.is_running():
return "Run<%s (%s/%d)>" % (self.run_dir, self.current_step(), self.total_steps)
elif self.has_error():
# The run started but failed
return "Run<%s [FAILED]>" % (self.run_dir,)
elif self.is_finished():
# The run was finished
return "Run<%s> [FINISHED]" % (self.run_dir,)
else:
# No process was detected. Perhaps it is set to start later, but this should remain unkown for the object
return "Run<%s> [NOT STARTED]" % (self.run_dir,)
def update(self):
"""Update the process info using what is found at the moment."""
candidates = _find_running_exe(path.join(self.run_dir, "osiris"))
try:
if not candidates: # No process running found
self.process = None
elif len(candidates) > 1:
logger.warning("More than one pid was found for the run.\n"
"Multiple processes are not really handled by duat yet, do not trust what you see.")
self.process = psutil.Process(candidates[0])
else:
self.process = psutil.Process(candidates[0])
except psutil.NoSuchProcess:
# If the process have died before processing was completed.
self.process = None
def current_step(self):
"""
Find the current simulation step.
Returns:
int: The simulation step or -1 if it could not be found.
"""
last_line = tail(path.join(self.run_dir, "out.txt"), 8)
if not last_line: # Empty file
return -1
if re.search("now at t", last_line[-1]):
return int(re.match(r".* n = *(.*?)$", last_line[-1]).group(1))
elif " Osiris run completed normally\n" in last_line:
return self.total_steps
else:
return -1
def is_running(self):
"""Return True if the simulation is known to be running, or False otherwise."""
if self.process is None:
# Try to find a process only if none was found when the instance was created
self.update()
if self.process is None:
return False
else:
return self.process.is_running()
return self.process.is_running()
def is_finished(self):
if self.is_running():
return False
else:
if path.isfile(path.join(self.run_dir, "TIMINGS", "timings.001")):
return True
else:
return False
def terminate(self):
"""Terminate the OSIRIS process (if running)."""
if self.process is not None:
if self.process.is_running():
try:
self.process.terminate()
except psutil.NoSuchProcess:
# The process has just terminated
pass
else:
logger.warning("The process had already stopped")
else:
logger.warning("Asked for termination of a Run with no known process")
def kill(self):
"""
Abruptly terminate the OSIRIS process (if running).
The :func:`~duat.osiris.run.Run.terminate` method should be used instead to perform a cleaner exit.
"""
if self.process is not None:
if self.process.is_running():
try:
self.process.kill()
except psutil.NoSuchProcess:
# The process has just terminated
pass
else:
logger.warning("The process had already stopped")
else:
logger.warning("Asked for termination of a Run with no known process")
def estimated_time(self):
"""
Estimated time to end the simulation in seconds.
The estimation uses a linear model and considers initialization negligible.
The modification time of the os-stdin file is used in the calculation. If altered, estimation will be meaningless.
Returns:
float: The estimation of the time to end the simulation or NaN if no estimation could be done.
"""
if not self.is_running(): # Already finished
return 0
else:
current = self.current_step()
if current <= 0: # If not started or error
return float('nan')
else:
elapsed = time() - path.getmtime(path.join(self.run_dir, "os-stdin"))
return elapsed * (self.total_steps / current - 1)
def real_time(self):
"""Find the total time in seconds taken by the simulation if it has finished, otherwise returning nan."""
try:
# TODO: Update for resuming runs
with open(path.join(self.run_dir, "TIMINGS", "timings.001"), "r") as f:
text = f.read()
r = re.match(r" Total time for loop was(?: *)(.*?)(?: *)seconds", text, re.DOTALL + re.MULTILINE)
if not r:
logger.warning("Bad format in timings file. The real time could not be read.")
return float("nan")
else:
return float(r.group(1))
except FileNotFoundError:
return float("nan")
def get_size(self):
"""Get the size of all run data in bytes."""
return get_dir_size(self.run_dir)
def has_error(self):
"""Search for common error messages in the output file."""
# TODO: Cache result if reached execution with no error
try:
with open(path.join(self.run_dir, "out.txt"), "r") as f:
text = f.read()
# TODO: Optimize this search
if "(*error*)" in text or re.search("Error reading .* parameters", text) or re.search(
"MPI_ABORT was invoked",
text):
return True
else:
return False
except FileNotFoundError:
return False
def open_run_list(base_path, filter=None):
"""
Create a Run instance for each of the subdirectories in the given path.
Args:
base_path (str): Path where the runs are found.
filter (str): Filter the directories using a UNIX-like pattern.
Returns:
list of `Run`: A list with the Run instances, ordered so their paths are in human order.
"""
dir_list = listdir(base_path)
if not dir_list:
return []
if filter is not None:
filter_list = glob(path.join(base_path, filter))
filter_list = [path.basename(x) for x in filter_list]
dir_list = [x for x in dir_list if x in filter_list]
if not dir_list:
return []
dir_list.sort(key=human_order_key)
return [Run(x) for x in [path.join(base_path, y) for y in dir_list]]
def _execute_run(prefix, osiris_path, run_dir, run_object=None):
"""Execute and wait for a run to finish, optionally updating a Run instance when the call is made."""
# Cf. run_config
p = subprocess.Popen(prefix + osiris_path + " > out.txt 2> err.txt", shell=True, cwd=path.abspath(run_dir))
if run_object is not None:
sleep(0.2)
print("Updating")
run_object.update()
p.wait()
def run_config(config, run_dir, prefix=None, clean_dir=True, blocking=None, force=None, mpcaller=None):
"""
Initiate a OSIRIS run from a config instance.
Args:
config (`ConfigFile`): The instance describing the configuration file.
run_dir (str): Folder where the run is carried.
prefix (str): A prefix to run the command (e.g., "qsub", ...).
clean_dir (bool): Whether to remove the files in the directory before execution.
blocking (bool): Whether to wait for the run to finish.
force (str): Set what to do if a running executable is found in the directory. Set to "ignore" to launch anyway,
possibly resulting in multiple instances running simultaneously; set to "kill" to terminate the
existing processes.
mpcaller (MPCaller): An instance controlling multithreaded calls. If supplied, all calls will be handled by this
instance and the blocking parameter will be ignored.
Returns:
tuple: A Run instance describing the execution.
"""
candidates = _find_running_exe(path.join(run_dir, "osiris"))
if candidates:
if force == "ignore":
logger.warning("Ignored %d running exe found in %s" % (len(candidates), run_dir))
elif force == "kill":
logger.warning("Killing %d running exe found in %s" % (len(candidates), run_dir))
for c in candidates:
try:
psutil.Process(c).terminate()
except psutil.NoSuchProcess:
pass # If just ended
else:
logger.warning("Running exe found in %s. Aborting launch." % run_dir)
return
if clean_dir:
for root, dirs, files in walk(run_dir):
for f in files:
remove(path.join(root, f))
for root, dirs, files in walk(run_dir):
for f in files:
logger.warning("Could not remove file %s" % f)
ensure_dir_exists(run_dir)
config.write(path.join(run_dir, "os-stdin"))
osiris_path = path.abspath(path.join(run_dir, "osiris"))
osiris = ifd(config.get_d(), osiris_1d, osiris_2d, osiris_3d)
copyfile(osiris, osiris_path)
ensure_executable(osiris_path)
if not prefix: # None or ""
prefix = ""
elif prefix[-1] != " ":
prefix += " "
if mpcaller is not None:
run = Run(run_dir)
# Set the run instance to update the process info when the call is made.
mpcaller.add_call(Call(_execute_run, prefix, osiris_path, run_dir, run_object=run))
return run
else:
proc = subprocess.Popen(prefix + osiris_path + " > out.txt 2> err.txt", shell=True, cwd=path.abspath(run_dir))
if blocking:
proc.wait()
else: # Sleep a little to check for quickly appearing errors and to allow the shell to start osiris
sleep(0.2)
# BEWARE: Perhaps under extreme circumstances, OSIRIS might have not started despite sleeping.
# This could be solved reinstantiating RUN. Consider it a feature instead of a bug :P
run = Run(run_dir)
# Try to detect errors checking the output
if run.has_error():
logger.warning(
"Error detected while launching %s.\nCheck out.txt there for more information or re-run in console." % run_dir)
return run
def run_variation(config, variation, run_base, caller=None, **kwargs):
"""
Make consecutive calls to :func:`~duat.osiris.run.run_config` with ConfigFiles generated from a variation.
Args:
config (`ConfigFile`): Base configuration file.
variation (`Variation`): Description of the variations to apply.
run_base (str): Path to the directory where the runs will take place, each in a folder named var_number.
caller (int or `MPCaller`): If supplied, the calls will be managed by a MPCaller instance. If an int is provided
an MPCaller with such a number of threads will be created. Provide an instance if
interested in further controlling.
**kwargs: Keyword arguments to pass to :func:`~duat.osiris.run.run_config`
Returns:
list: List with the return values of each call.
"""
r_list = []
if caller is None:
for i, c in enumerate(variation.get_generator(config)):
r = run_config(c, path.join(run_base, "var_" + str(i)), **kwargs)
r_list.append(r)
else:
if isinstance(caller, int):
_caller = MPCaller(caller)
else:
# Otherwise assume it was a MPCaller instance
_caller = caller
for i, c in enumerate(variation.get_generator(config)):
r = run_config(c, path.join(run_base, "var_" + str(i)), mpcaller=_caller, **kwargs)
r_list.append(r)
if isinstance(caller, int):
# If the MPCaller was created in this method, threads should die after execution
_caller.wait_calls(blocking=False)
# Nevertheless, processes seems not to be discarded until a new call to this method is made
return r_list
# Try to guess the OSIRIS location:
for t in [path.join(path.expanduser("~"), "osiris", "bin"),
path.join("usr", "local", "osiris", "bin")]:
set_osiris_path(t, warn=False)
if osiris_1d and osiris_2d and osiris_3d:
break
if not (osiris_1d and osiris_2d and osiris_3d):
if not (osiris_1d or osiris_2d or osiris_3d):
logger.warning("Warning: no OSIRIS executables were found.")
else:
if not osiris_1d:
logger.warning("Warning: osiris-1D.e not found.")
if not osiris_2d:
logger.warning("Warning: osiris-2D.e not found.")
if not osiris_3d:
logger.warning("Warning: osiris-3D.e not found.")
logger.warning(
"Use the function set_osiris_path or set the variables run.osiris_1d and so to allow the run module to work.")
Add environment variable for the OSIRIS path
# -*- coding: UTF-8 -*-
"""Run configuration files with OSIRIS."""
from os import path, remove, walk, listdir, environ
from shutil import copyfile
import subprocess
from time import sleep, time
import re
from glob import glob
from ..common import ensure_dir_exists, ensure_executable, ifd, tail, logger, get_dir_size, human_order_key, MPCaller, \
Call
import psutil
# Path to osiris executables - guessed later in the code
osiris_1d = ""
"""str: Path to the osiris-1D.e file"""
osiris_2d = ""
"""str: Path to the osiris-2D.e file"""
osiris_3d = ""
"""str: Path to the osiris-3D.e file"""
def set_osiris_path(folder, warn=True):
global osiris_1d, osiris_2d, osiris_3d
if not path.isdir(folder):
if warn:
logger.warning("%s is not an existing folder." % folder)
return
r = path.join(folder, "osiris-1D.e")
if path.isfile(r):
osiris_1d = r
elif warn:
logger.warning("osiris-1D not found in %s" % folder)
r = path.join(folder, "osiris-2D.e")
if path.isfile(r):
osiris_2d = r
elif warn:
logger.warning("osiris-2D not found in %s" % folder)
r = path.join(folder, "osiris-3D.e")
if path.isfile(r):
osiris_3d = r
elif warn:
logger.warning("osiris-3D not found in %s" % folder)
def _find_running_exe(exe):
"""Return the list of the pid of the processes of the argument executable (absolute path)"""
candidates = []
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict(attrs=['pid', 'exe'])
except psutil.NoSuchProcess:
pass
else:
if pinfo["exe"] and pinfo['exe'] == exe:
candidates.append(pinfo['pid'])
return candidates
class Run:
"""
An osiris run.
Attributes:
run_dir (str): Directory where the run takes place.
total_steps (int): Amount of time steps in the simulation.
process (psutil.Process): Representation of the process running the simulation. If no process is found it will
be None. Only in that case, methods that update the state of simulation will check if
a process has spawned when called.
Notes:
Only single-process runs are supported at the moment. Resuming runs are neither supported yet.
"""
def __init__(self, run_dir):
"""
Create a Run instance.
Args:
run_dir (str): Path where the OSIRIS run takes place. An os-stdin file must exist there.
Raises:
ValueError: If no os-stdin is found.
"""
self.run_dir = run_dir
try:
with open(path.join(run_dir, "os-stdin"), "r") as f:
text = f.read()
r = re.match(r".*time_step(.*?){(.*?)dt(.*?)=(.*?),(.*?)}", text, re.DOTALL + re.MULTILINE)
if not r:
raise ValueError("No dt found in os-stdin.")
dt = float(r.group(4))
r = re.match(r".*time(.*?){(.*?)tmin(.*?)=(.*?),(.*?)}", text, re.DOTALL + re.MULTILINE)
t_min = float(r.group(4)) if r else 0.0
r = re.match(r".*time(.*?){(.*?)tmax(.*?)=(.*?),(.*?)}", text, re.DOTALL + re.MULTILINE)
if not r:
raise ValueError("No tmax found in os-stdin. Default value 0.0 is trivial.")
t_max = float(r.group(4))
self.total_steps = int((t_max - t_min) // dt) + 1
except FileNotFoundError:
raise ValueError("No os-stdin file in %s" % run_dir)
self.update()
def __repr__(self):
if self.is_running():
return "Run<%s (%s/%d)>" % (self.run_dir, self.current_step(), self.total_steps)
elif self.has_error():
# The run started but failed
return "Run<%s [FAILED]>" % (self.run_dir,)
elif self.is_finished():
# The run was finished
return "Run<%s> [FINISHED]" % (self.run_dir,)
else:
# No process was detected. Perhaps it is set to start later, but this should remain unkown for the object
return "Run<%s> [NOT STARTED]" % (self.run_dir,)
def update(self):
"""Update the process info using what is found at the moment."""
candidates = _find_running_exe(path.join(self.run_dir, "osiris"))
try:
if not candidates: # No process running found
self.process = None
elif len(candidates) > 1:
logger.warning("More than one pid was found for the run.\n"
"Multiple processes are not really handled by duat yet, do not trust what you see.")
self.process = psutil.Process(candidates[0])
else:
self.process = psutil.Process(candidates[0])
except psutil.NoSuchProcess:
# If the process have died before processing was completed.
self.process = None
def current_step(self):
"""
Find the current simulation step.
Returns:
int: The simulation step or -1 if it could not be found.
"""
last_line = tail(path.join(self.run_dir, "out.txt"), 8)
if not last_line: # Empty file
return -1
if re.search("now at t", last_line[-1]):
return int(re.match(r".* n = *(.*?)$", last_line[-1]).group(1))
elif " Osiris run completed normally\n" in last_line:
return self.total_steps
else:
return -1
def is_running(self):
"""Return True if the simulation is known to be running, or False otherwise."""
if self.process is None:
# Try to find a process only if none was found when the instance was created
self.update()
if self.process is None:
return False
else:
return self.process.is_running()
return self.process.is_running()
def is_finished(self):
if self.is_running():
return False
else:
if path.isfile(path.join(self.run_dir, "TIMINGS", "timings.001")):
return True
else:
return False
def terminate(self):
"""Terminate the OSIRIS process (if running)."""
if self.process is not None:
if self.process.is_running():
try:
self.process.terminate()
except psutil.NoSuchProcess:
# The process has just terminated
pass
else:
logger.warning("The process had already stopped")
else:
logger.warning("Asked for termination of a Run with no known process")
def kill(self):
"""
Abruptly terminate the OSIRIS process (if running).
The :func:`~duat.osiris.run.Run.terminate` method should be used instead to perform a cleaner exit.
"""
if self.process is not None:
if self.process.is_running():
try:
self.process.kill()
except psutil.NoSuchProcess:
# The process has just terminated
pass
else:
logger.warning("The process had already stopped")
else:
logger.warning("Asked for termination of a Run with no known process")
def estimated_time(self):
"""
Estimated time to end the simulation in seconds.
The estimation uses a linear model and considers initialization negligible.
The modification time of the os-stdin file is used in the calculation. If altered, estimation will be meaningless.
Returns:
float: The estimation of the time to end the simulation or NaN if no estimation could be done.
"""
if not self.is_running(): # Already finished
return 0
else:
current = self.current_step()
if current <= 0: # If not started or error
return float('nan')
else:
elapsed = time() - path.getmtime(path.join(self.run_dir, "os-stdin"))
return elapsed * (self.total_steps / current - 1)
def real_time(self):
"""Find the total time in seconds taken by the simulation if it has finished, otherwise returning nan."""
try:
# TODO: Update for resuming runs
with open(path.join(self.run_dir, "TIMINGS", "timings.001"), "r") as f:
text = f.read()
r = re.match(r" Total time for loop was(?: *)(.*?)(?: *)seconds", text, re.DOTALL + re.MULTILINE)
if not r:
logger.warning("Bad format in timings file. The real time could not be read.")
return float("nan")
else:
return float(r.group(1))
except FileNotFoundError:
return float("nan")
def get_size(self):
"""Get the size of all run data in bytes."""
return get_dir_size(self.run_dir)
def has_error(self):
"""Search for common error messages in the output file."""
# TODO: Cache result if reached execution with no error
try:
with open(path.join(self.run_dir, "out.txt"), "r") as f:
text = f.read()
# TODO: Optimize this search
if "(*error*)" in text or re.search("Error reading .* parameters", text) or re.search(
"MPI_ABORT was invoked",
text):
return True
else:
return False
except FileNotFoundError:
return False
def open_run_list(base_path, filter=None):
"""
Create a Run instance for each of the subdirectories in the given path.
Args:
base_path (str): Path where the runs are found.
filter (str): Filter the directories using a UNIX-like pattern.
Returns:
list of `Run`: A list with the Run instances, ordered so their paths are in human order.
"""
dir_list = listdir(base_path)
if not dir_list:
return []
if filter is not None:
filter_list = glob(path.join(base_path, filter))
filter_list = [path.basename(x) for x in filter_list]
dir_list = [x for x in dir_list if x in filter_list]
if not dir_list:
return []
dir_list.sort(key=human_order_key)
return [Run(x) for x in [path.join(base_path, y) for y in dir_list]]
def _execute_run(prefix, osiris_path, run_dir, run_object=None):
"""Execute and wait for a run to finish, optionally updating a Run instance when the call is made."""
# Cf. run_config
p = subprocess.Popen(prefix + osiris_path + " > out.txt 2> err.txt", shell=True, cwd=path.abspath(run_dir))
if run_object is not None:
sleep(0.2)
print("Updating")
run_object.update()
p.wait()
def run_config(config, run_dir, prefix=None, clean_dir=True, blocking=None, force=None, mpcaller=None):
"""
Initiate a OSIRIS run from a config instance.
Args:
config (`ConfigFile`): The instance describing the configuration file.
run_dir (str): Folder where the run is carried.
prefix (str): A prefix to run the command (e.g., "qsub", ...).
clean_dir (bool): Whether to remove the files in the directory before execution.
blocking (bool): Whether to wait for the run to finish.
force (str): Set what to do if a running executable is found in the directory. Set to "ignore" to launch anyway,
possibly resulting in multiple instances running simultaneously; set to "kill" to terminate the
existing processes.
mpcaller (MPCaller): An instance controlling multithreaded calls. If supplied, all calls will be handled by this
instance and the blocking parameter will be ignored.
Returns:
tuple: A Run instance describing the execution.
"""
candidates = _find_running_exe(path.join(run_dir, "osiris"))
if candidates:
if force == "ignore":
logger.warning("Ignored %d running exe found in %s" % (len(candidates), run_dir))
elif force == "kill":
logger.warning("Killing %d running exe found in %s" % (len(candidates), run_dir))
for c in candidates:
try:
psutil.Process(c).terminate()
except psutil.NoSuchProcess:
pass # If just ended
else:
logger.warning("Running exe found in %s. Aborting launch." % run_dir)
return
if clean_dir:
for root, dirs, files in walk(run_dir):
for f in files:
remove(path.join(root, f))
for root, dirs, files in walk(run_dir):
for f in files:
logger.warning("Could not remove file %s" % f)
ensure_dir_exists(run_dir)
config.write(path.join(run_dir, "os-stdin"))
osiris_path = path.abspath(path.join(run_dir, "osiris"))
osiris = ifd(config.get_d(), osiris_1d, osiris_2d, osiris_3d)
copyfile(osiris, osiris_path)
ensure_executable(osiris_path)
if not prefix: # None or ""
prefix = ""
elif prefix[-1] != " ":
prefix += " "
if mpcaller is not None:
run = Run(run_dir)
# Set the run instance to update the process info when the call is made.
mpcaller.add_call(Call(_execute_run, prefix, osiris_path, run_dir, run_object=run))
return run
else:
proc = subprocess.Popen(prefix + osiris_path + " > out.txt 2> err.txt", shell=True, cwd=path.abspath(run_dir))
if blocking:
proc.wait()
else: # Sleep a little to check for quickly appearing errors and to allow the shell to start osiris
sleep(0.2)
# BEWARE: Perhaps under extreme circumstances, OSIRIS might have not started despite sleeping.
# This could be solved reinstantiating RUN. Consider it a feature instead of a bug :P
run = Run(run_dir)
# Try to detect errors checking the output
if run.has_error():
logger.warning(
"Error detected while launching %s.\nCheck out.txt there for more information or re-run in console." % run_dir)
return run
def run_variation(config, variation, run_base, caller=None, **kwargs):
"""
Make consecutive calls to :func:`~duat.osiris.run.run_config` with ConfigFiles generated from a variation.
Args:
config (`ConfigFile`): Base configuration file.
variation (`Variation`): Description of the variations to apply.
run_base (str): Path to the directory where the runs will take place, each in a folder named var_number.
caller (int or `MPCaller`): If supplied, the calls will be managed by a MPCaller instance. If an int is provided
an MPCaller with such a number of threads will be created. Provide an instance if
interested in further controlling.
**kwargs: Keyword arguments to pass to :func:`~duat.osiris.run.run_config`
Returns:
list: List with the return values of each call.
"""
r_list = []
if caller is None:
for i, c in enumerate(variation.get_generator(config)):
r = run_config(c, path.join(run_base, "var_" + str(i)), **kwargs)
r_list.append(r)
else:
if isinstance(caller, int):
_caller = MPCaller(caller)
else:
# Otherwise assume it was a MPCaller instance
_caller = caller
for i, c in enumerate(variation.get_generator(config)):
r = run_config(c, path.join(run_base, "var_" + str(i)), mpcaller=_caller, **kwargs)
r_list.append(r)
if isinstance(caller, int):
# If the MPCaller was created in this method, threads should die after execution
_caller.wait_calls(blocking=False)
# Nevertheless, processes seems not to be discarded until a new call to this method is made
return r_list
# Try to guess the OSIRIS location:
_candidates = []
if "OSIRIS_PATH" in environ:
_candidates.append(environ["OSIRIS_PATH"])
_candidates.append(path.join(path.expanduser("~"), "osiris", "bin"))
_candidates.append(path.join("usr", "local", "osiris", "bin"))
for t in _candidates:
set_osiris_path(t, warn=False)
if osiris_1d and osiris_2d and osiris_3d:
break
if not (osiris_1d and osiris_2d and osiris_3d):
if not (osiris_1d or osiris_2d or osiris_3d):
logger.warning("Warning: no OSIRIS executables were found.")
else:
if not osiris_1d:
logger.warning("Warning: osiris-1D.e not found.")
if not osiris_2d:
logger.warning("Warning: osiris-2D.e not found.")
if not osiris_3d:
logger.warning("Warning: osiris-3D.e not found.")
logger.warning("Set the environment variable OSIRIS_PATH to a folder where the OSIRIS executables with names "
"osiris-1D.e and so on are found.\n"
"You can also use run.set_osiris_path or set the variables run.osiris_1d (and so on).")
|
# Copyright 2018, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Attribute related codes.
Attribute lookup, setting.
"""
from nuitka import Options
from .CodeHelpers import generateChildExpressionsCode, generateExpressionCode
from .ErrorCodes import getErrorExitBoolCode, getErrorExitCode, getReleaseCode
from .PythonAPICodes import generateCAPIObjectCode, generateCAPIObjectCode0
def generateAssignmentAttributeCode(statement, emit, context):
lookup_source = statement.getLookupSource()
attribute_name = statement.getAttributeName()
value = statement.getAssignSource()
value_name = context.allocateTempName("assattr_name")
generateExpressionCode(
to_name = value_name,
expression = value,
emit = emit,
context = context
)
target_name = context.allocateTempName("assattr_target")
generateExpressionCode(
to_name = target_name,
expression = lookup_source,
emit = emit,
context = context
)
old_source_ref = context.setCurrentSourceCodeReference(
value.getSourceReference()
if Options.isFullCompat() else
statement.getSourceReference()
)
if attribute_name == "__dict__":
getAttributeAssignmentDictSlotCode(
target_name = target_name,
value_name = value_name,
emit = emit,
context = context
)
elif attribute_name == "__class__":
getAttributeAssignmentClassSlotCode(
target_name = target_name,
value_name = value_name,
emit = emit,
context = context
)
else:
getAttributeAssignmentCode(
target_name = target_name,
value_name = value_name,
attribute_name = context.getConstantCode(
constant = attribute_name
),
emit = emit,
context = context
)
context.setCurrentSourceCodeReference(old_source_ref)
def generateDelAttributeCode(statement, emit, context):
target_name = context.allocateTempName("attrdel_target")
generateExpressionCode(
to_name = target_name,
expression = statement.getLookupSource(),
emit = emit,
context = context
)
old_source_ref = context.setCurrentSourceCodeReference(
statement.getLookupSource().getSourceReference()
if Options.isFullCompat() else
statement.getSourceReference()
)
getAttributeDelCode(
target_name = target_name,
attribute_name = context.getConstantCode(
constant = statement.getAttributeName()
),
emit = emit,
context = context
)
context.setCurrentSourceCodeReference(old_source_ref)
def generateAttributeLookupCode(to_name, expression, emit, context):
source_name, = generateChildExpressionsCode(
expression = expression,
emit = emit,
context = context
)
attribute_name = expression.getAttributeName()
getAttributeLookupCode(
to_name = to_name,
source_name = source_name,
attribute_name = attribute_name,
needs_check = expression.getLookupSource().mayRaiseExceptionAttributeLookup(
exception_type = BaseException,
attribute_name = attribute_name
),
emit = emit,
context = context
)
def getAttributeLookupCode(to_name, source_name, attribute_name, needs_check,
emit, context):
if attribute_name == "__dict__":
emit(
"%s = LOOKUP_ATTRIBUTE_DICT_SLOT( %s );" % (
to_name,
source_name
)
)
elif attribute_name == "__class__":
emit(
"%s = LOOKUP_ATTRIBUTE_CLASS_SLOT( %s );" % (
to_name,
source_name
)
)
else:
emit(
"%s = LOOKUP_ATTRIBUTE( %s, %s );" % (
to_name,
source_name,
context.getConstantCode(attribute_name)
)
)
getErrorExitCode(
check_name = to_name,
release_name = source_name,
needs_check = needs_check,
emit = emit,
context = context
)
context.addCleanupTempName(to_name)
def getAttributeAssignmentCode(target_name, attribute_name, value_name, emit,
context):
res_name = context.getBoolResName()
emit(
"%s = SET_ATTRIBUTE( %s, %s, %s );" % (
res_name,
target_name,
attribute_name,
value_name
)
)
getErrorExitBoolCode(
condition = "%s == false" % res_name,
release_names = (value_name, target_name, attribute_name),
emit = emit,
context = context
)
def getAttributeAssignmentDictSlotCode(target_name, value_name, emit, context):
""" Code for special case target.__dict__ = value """
res_name = context.getBoolResName()
emit(
"%s = SET_ATTRIBUTE_DICT_SLOT( %s, %s );" % (
res_name,
target_name,
value_name
)
)
getErrorExitBoolCode(
condition = "%s == false" % res_name,
release_names = (value_name, target_name),
emit = emit,
context = context
)
def getAttributeAssignmentClassSlotCode(target_name, value_name, emit, context):
""" Get code for special case target.__class__ = value """
res_name = context.getBoolResName()
emit(
"%s = SET_ATTRIBUTE_CLASS_SLOT( %s, %s );" % (
res_name,
target_name,
value_name
)
)
getErrorExitBoolCode(
condition = "%s == false" % res_name,
release_names = (value_name, target_name),
emit = emit,
context = context
)
def getAttributeDelCode(target_name, attribute_name, emit, context):
res_name = context.getIntResName()
emit(
"%s = PyObject_DelAttr( %s, %s );" % (
res_name,
target_name,
attribute_name
)
)
getErrorExitBoolCode(
condition = "%s == -1" % res_name,
release_names = (target_name, attribute_name),
emit = emit,
context = context
)
def generateAttributeLookupSpecialCode(to_name, expression, emit, context):
source_name, = generateChildExpressionsCode(
expression = expression,
emit = emit,
context = context
)
attribute_name = expression.getAttributeName()
getAttributeLookupSpecialCode(
to_name = to_name,
source_name = source_name,
attr_name = context.getConstantCode(
constant = attribute_name
),
needs_check = expression.getLookupSource().mayRaiseExceptionAttributeLookupSpecial(
exception_type = BaseException,
attribute_name = attribute_name
),
emit = emit,
context = context
)
def getAttributeLookupSpecialCode(to_name, source_name, attr_name, needs_check,
emit, context):
emit(
"%s = LOOKUP_SPECIAL( %s, %s );" % (
to_name,
source_name,
attr_name,
)
)
getErrorExitCode(
check_name = to_name,
release_names = (source_name, attr_name),
emit = emit,
needs_check = needs_check,
context = context
)
context.addCleanupTempName(to_name)
def generateBuiltinHasattrCode(to_name, expression, emit, context):
source_name, attr_name = generateChildExpressionsCode(
expression = expression,
emit = emit,
context = context
)
res_name = context.getIntResName()
emit(
"%s = BUILTIN_HASATTR_BOOL( %s, %s );" % (
res_name,
source_name,
attr_name
)
)
getErrorExitBoolCode(
condition = "%s == -1" % res_name,
release_names = (source_name, attr_name),
needs_check = expression.mayRaiseException(BaseException),
emit = emit,
context = context
)
emit(
to_name.getCType().getAssignmentCodeFromBoolCondition(
to_name = to_name,
condition = "%s != 0" % res_name
)
)
def generateAttributeCheckCode(to_name, expression, emit, context):
source_name, = generateChildExpressionsCode(
expression = expression,
emit = emit,
context = context
)
res_name = context.getIntResName()
emit(
"%s = PyObject_HasAttr( %s, %s );" % (
res_name,
source_name,
context.getConstantCode(
constant = expression.getAttributeName()
)
)
)
getReleaseCode(
release_name = source_name,
emit = emit,
context = context
)
emit(
to_name.getCType().getAssignmentCodeFromBoolCondition(
to_name = to_name,
condition = "%s != 0" % res_name
)
)
def generateBuiltinGetattrCode(to_name, expression, emit, context):
generateCAPIObjectCode(
to_name = to_name,
capi = "BUILTIN_GETATTR",
arg_desc = (
("getattr_target", expression.getLookupSource()),
("getattr_attr", expression.getAttribute()),
("getattr_default", expression.getDefault()),
),
may_raise = expression.mayRaiseException(BaseException),
source_ref = expression.getCompatibleSourceReference(),
none_null = True,
emit = emit,
context = context
)
def generateBuiltinSetattrCode(to_name, expression, emit, context):
generateCAPIObjectCode0(
to_name = to_name,
capi = "BUILTIN_SETATTR",
arg_desc = (
("setattr_target", expression.getLookupSource()),
("setattr_attr", expression.getAttribute()),
("setattr_value", expression.getValue()),
),
may_raise = expression.mayRaiseException(BaseException),
source_ref = expression.getCompatibleSourceReference(),
emit = emit,
context = context,
)
Ctypes: Make attribute lookups C target type aware.
# Copyright 2018, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Attribute related codes.
Attribute lookup, setting.
"""
from nuitka import Options
from .CodeHelpers import generateChildExpressionsCode, generateExpressionCode
from .ErrorCodes import getErrorExitBoolCode, getErrorExitCode, getReleaseCode
from .PythonAPICodes import generateCAPIObjectCode, generateCAPIObjectCode0
def generateAssignmentAttributeCode(statement, emit, context):
lookup_source = statement.getLookupSource()
attribute_name = statement.getAttributeName()
value = statement.getAssignSource()
value_name = context.allocateTempName("assattr_name")
generateExpressionCode(
to_name = value_name,
expression = value,
emit = emit,
context = context
)
target_name = context.allocateTempName("assattr_target")
generateExpressionCode(
to_name = target_name,
expression = lookup_source,
emit = emit,
context = context
)
old_source_ref = context.setCurrentSourceCodeReference(
value.getSourceReference()
if Options.isFullCompat() else
statement.getSourceReference()
)
if attribute_name == "__dict__":
getAttributeAssignmentDictSlotCode(
target_name = target_name,
value_name = value_name,
emit = emit,
context = context
)
elif attribute_name == "__class__":
getAttributeAssignmentClassSlotCode(
target_name = target_name,
value_name = value_name,
emit = emit,
context = context
)
else:
getAttributeAssignmentCode(
target_name = target_name,
value_name = value_name,
attribute_name = context.getConstantCode(
constant = attribute_name
),
emit = emit,
context = context
)
context.setCurrentSourceCodeReference(old_source_ref)
def generateDelAttributeCode(statement, emit, context):
target_name = context.allocateTempName("attrdel_target")
generateExpressionCode(
to_name = target_name,
expression = statement.getLookupSource(),
emit = emit,
context = context
)
old_source_ref = context.setCurrentSourceCodeReference(
statement.getLookupSource().getSourceReference()
if Options.isFullCompat() else
statement.getSourceReference()
)
getAttributeDelCode(
target_name = target_name,
attribute_name = context.getConstantCode(
constant = statement.getAttributeName()
),
emit = emit,
context = context
)
context.setCurrentSourceCodeReference(old_source_ref)
def generateAttributeLookupCode(to_name, expression, emit, context):
source_name, = generateChildExpressionsCode(
expression = expression,
emit = emit,
context = context
)
attribute_name = expression.getAttributeName()
needs_check = expression.getLookupSource().mayRaiseExceptionAttributeLookup(
exception_type = BaseException,
attribute_name = attribute_name
)
if to_name.c_type == "PyObject *":
value_name = to_name
else:
value_name = context.allocateTempName("attribute_value")
if attribute_name == "__dict__":
emit(
"%s = LOOKUP_ATTRIBUTE_DICT_SLOT( %s );" % (
value_name,
source_name
)
)
elif attribute_name == "__class__":
emit(
"%s = LOOKUP_ATTRIBUTE_CLASS_SLOT( %s );" % (
value_name,
source_name
)
)
else:
emit(
"%s = LOOKUP_ATTRIBUTE( %s, %s );" % (
value_name,
source_name,
context.getConstantCode(attribute_name)
)
)
getErrorExitCode(
check_name = value_name,
release_name = source_name,
needs_check = needs_check,
emit = emit,
context = context
)
context.addCleanupTempName(value_name)
if to_name is not value_name:
to_name.getCType().emitAssignConversionCode(
to_name = to_name,
value_name = value_name,
emit = emit,
context = context
)
getReleaseCode(value_name, emit, context)
def getAttributeAssignmentCode(target_name, attribute_name, value_name, emit,
context):
res_name = context.getBoolResName()
emit(
"%s = SET_ATTRIBUTE( %s, %s, %s );" % (
res_name,
target_name,
attribute_name,
value_name
)
)
getErrorExitBoolCode(
condition = "%s == false" % res_name,
release_names = (value_name, target_name, attribute_name),
emit = emit,
context = context
)
def getAttributeAssignmentDictSlotCode(target_name, value_name, emit, context):
""" Code for special case target.__dict__ = value """
res_name = context.getBoolResName()
emit(
"%s = SET_ATTRIBUTE_DICT_SLOT( %s, %s );" % (
res_name,
target_name,
value_name
)
)
getErrorExitBoolCode(
condition = "%s == false" % res_name,
release_names = (value_name, target_name),
emit = emit,
context = context
)
def getAttributeAssignmentClassSlotCode(target_name, value_name, emit, context):
""" Get code for special case target.__class__ = value """
res_name = context.getBoolResName()
emit(
"%s = SET_ATTRIBUTE_CLASS_SLOT( %s, %s );" % (
res_name,
target_name,
value_name
)
)
getErrorExitBoolCode(
condition = "%s == false" % res_name,
release_names = (value_name, target_name),
emit = emit,
context = context
)
def getAttributeDelCode(target_name, attribute_name, emit, context):
res_name = context.getIntResName()
emit(
"%s = PyObject_DelAttr( %s, %s );" % (
res_name,
target_name,
attribute_name
)
)
getErrorExitBoolCode(
condition = "%s == -1" % res_name,
release_names = (target_name, attribute_name),
emit = emit,
context = context
)
def generateAttributeLookupSpecialCode(to_name, expression, emit, context):
source_name, = generateChildExpressionsCode(
expression = expression,
emit = emit,
context = context
)
attribute_name = expression.getAttributeName()
getAttributeLookupSpecialCode(
to_name = to_name,
source_name = source_name,
attr_name = context.getConstantCode(
constant = attribute_name
),
needs_check = expression.getLookupSource().mayRaiseExceptionAttributeLookupSpecial(
exception_type = BaseException,
attribute_name = attribute_name
),
emit = emit,
context = context
)
def getAttributeLookupSpecialCode(to_name, source_name, attr_name, needs_check,
emit, context):
emit(
"%s = LOOKUP_SPECIAL( %s, %s );" % (
to_name,
source_name,
attr_name,
)
)
getErrorExitCode(
check_name = to_name,
release_names = (source_name, attr_name),
emit = emit,
needs_check = needs_check,
context = context
)
context.addCleanupTempName(to_name)
def generateBuiltinHasattrCode(to_name, expression, emit, context):
source_name, attr_name = generateChildExpressionsCode(
expression = expression,
emit = emit,
context = context
)
res_name = context.getIntResName()
emit(
"%s = BUILTIN_HASATTR_BOOL( %s, %s );" % (
res_name,
source_name,
attr_name
)
)
getErrorExitBoolCode(
condition = "%s == -1" % res_name,
release_names = (source_name, attr_name),
needs_check = expression.mayRaiseException(BaseException),
emit = emit,
context = context
)
emit(
to_name.getCType().getAssignmentCodeFromBoolCondition(
to_name = to_name,
condition = "%s != 0" % res_name
)
)
def generateAttributeCheckCode(to_name, expression, emit, context):
source_name, = generateChildExpressionsCode(
expression = expression,
emit = emit,
context = context
)
res_name = context.getIntResName()
emit(
"%s = PyObject_HasAttr( %s, %s );" % (
res_name,
source_name,
context.getConstantCode(
constant = expression.getAttributeName()
)
)
)
getReleaseCode(
release_name = source_name,
emit = emit,
context = context
)
emit(
to_name.getCType().getAssignmentCodeFromBoolCondition(
to_name = to_name,
condition = "%s != 0" % res_name
)
)
def generateBuiltinGetattrCode(to_name, expression, emit, context):
generateCAPIObjectCode(
to_name = to_name,
capi = "BUILTIN_GETATTR",
arg_desc = (
("getattr_target", expression.getLookupSource()),
("getattr_attr", expression.getAttribute()),
("getattr_default", expression.getDefault()),
),
may_raise = expression.mayRaiseException(BaseException),
source_ref = expression.getCompatibleSourceReference(),
none_null = True,
emit = emit,
context = context
)
def generateBuiltinSetattrCode(to_name, expression, emit, context):
generateCAPIObjectCode0(
to_name = to_name,
capi = "BUILTIN_SETATTR",
arg_desc = (
("setattr_target", expression.getLookupSource()),
("setattr_attr", expression.getAttribute()),
("setattr_value", expression.getValue()),
),
may_raise = expression.mayRaiseException(BaseException),
source_ref = expression.getCompatibleSourceReference(),
emit = emit,
context = context,
)
|
import copy
import operator
import types as pytypes
import operator
import warnings
from dataclasses import make_dataclass
import llvmlite.ir
import numpy as np
import numba
from numba.parfors import parfor
from numba.core import types, ir, config, compiler, sigutils, cgutils
from numba.core.ir_utils import (
add_offset_to_labels,
replace_var_names,
remove_dels,
legalize_names,
mk_unique_var,
rename_labels,
get_name_var_table,
visit_vars_inner,
get_definition,
guard,
get_call_table,
is_pure,
get_np_ufunc_typ,
get_unused_var_name,
is_const_call,
fixup_var_define_in_scope,
transfer_scope,
find_max_label,
get_global_func_typ,
)
from numba.core.typing import signature
from numba.parfors.parfor import ensure_parallel_support
from numba.core.errors import (
NumbaParallelSafetyWarning, NotDefinedError, CompilerError, InternalError,
)
from numba.parfors.parfor_lowering_utils import ParforLoweringBuilder
def _lower_parfor_parallel(lowerer, parfor):
"""Lowerer that handles LLVM code generation for parfor.
This function lowers a parfor IR node to LLVM.
The general approach is as follows:
1) The code from the parfor's init block is lowered normally
in the context of the current function.
2) The body of the parfor is transformed into a gufunc function.
3) Code is inserted into the main function that calls do_scheduling
to divide the iteration space for each thread, allocates
reduction arrays, calls the gufunc function, and then invokes
the reduction function across the reduction arrays to produce
the final reduction values.
"""
from numba.np.ufunc.parallel import get_thread_count
ensure_parallel_support()
typingctx = lowerer.context.typing_context
targetctx = lowerer.context
builder = lowerer.builder
# We copy the typemap here because for race condition variable we'll
# update their type to array so they can be updated by the gufunc.
orig_typemap = lowerer.fndesc.typemap
# replace original typemap with copy and restore the original at the end.
lowerer.fndesc.typemap = copy.copy(orig_typemap)
if config.DEBUG_ARRAY_OPT:
print("lowerer.fndesc", lowerer.fndesc, type(lowerer.fndesc))
typemap = lowerer.fndesc.typemap
varmap = lowerer.varmap
if config.DEBUG_ARRAY_OPT:
print("_lower_parfor_parallel")
parfor.dump()
loc = parfor.init_block.loc
scope = parfor.init_block.scope
# produce instructions for init_block
if config.DEBUG_ARRAY_OPT:
print("init_block = ", parfor.init_block, " ", type(parfor.init_block))
for instr in parfor.init_block.body:
if config.DEBUG_ARRAY_OPT:
print("lower init_block instr = ", instr)
lowerer.lower_inst(instr)
for racevar in parfor.races:
if racevar not in varmap:
rvtyp = typemap[racevar]
rv = ir.Var(scope, racevar, loc)
lowerer._alloca_var(rv.name, rvtyp)
alias_map = {}
arg_aliases = {}
numba.parfors.parfor.find_potential_aliases_parfor(parfor, parfor.params, typemap,
lowerer.func_ir, alias_map, arg_aliases)
if config.DEBUG_ARRAY_OPT:
print("alias_map", alias_map)
print("arg_aliases", arg_aliases)
# run get_parfor_outputs() and get_parfor_reductions() before gufunc creation
# since Jumps are modified so CFG of loop_body dict will become invalid
assert parfor.params is not None
parfor_output_arrays = numba.parfors.parfor.get_parfor_outputs(
parfor, parfor.params)
parfor_redvars, parfor_reddict = parfor.redvars, parfor.reddict
if config.DEBUG_ARRAY_OPT:
print("parfor_redvars:", parfor_redvars)
print("parfor_reddict:", parfor_reddict)
# init reduction array allocation here.
nredvars = len(parfor_redvars)
redarrs = {}
if nredvars > 0:
# reduction arrays outer dimension equal to thread count
scope = parfor.init_block.scope
loc = parfor.init_block.loc
pfbdr = ParforLoweringBuilder(lowerer=lowerer, scope=scope, loc=loc)
# Get the Numba internal function to call to get the thread count.
get_num_threads = pfbdr.bind_global_function(
fobj=numba.np.ufunc.parallel._iget_num_threads,
ftype=get_global_func_typ(numba.np.ufunc.parallel._iget_num_threads),
args=()
)
# Insert the call to assign the thread count to a variable.
num_threads_var = pfbdr.assign(
rhs=pfbdr.call(get_num_threads, args=[]),
typ=types.intp,
name="num_threads_var")
# For each reduction variable...
for i in range(nredvars):
red_name = parfor_redvars[i]
# Get the type of the reduction variable.
redvar_typ = lowerer.fndesc.typemap[red_name]
# Get the ir.Var for the reduction variable.
redvar = ir.Var(scope, red_name, loc)
# Get the type of the array that holds the per-thread
# reduction variables.
redarrvar_typ = redtyp_to_redarraytype(redvar_typ)
reddtype = redarrvar_typ.dtype
if config.DEBUG_ARRAY_OPT:
print(
"reduction_info",
red_name,
redvar_typ,
redarrvar_typ,
reddtype,
types.DType(reddtype),
num_threads_var,
type(num_threads_var)
)
# If this is reduction over an array,
# the reduction array has just one added per-worker dimension.
if isinstance(redvar_typ, types.npytypes.Array):
redarrdim = redvar_typ.ndim + 1
else:
redarrdim = 1
# Reduction array is created and initialized to the initial reduction value.
# First create a var for the numpy empty ufunc.
glbl_np_empty = pfbdr.bind_global_function(
fobj=np.empty,
ftype=get_np_ufunc_typ(np.empty),
args=(
types.UniTuple(types.intp, redarrdim),
),
kws={'dtype': types.DType(reddtype)}
)
# Create var for outer dimension size of reduction array equal to number of threads.
#num_threads_var = pfbdr.make_const_variable(
# cval=thread_count,
# typ=types.intp,
# name='num_threads',
#)
size_var_list = [num_threads_var]
# If this is a reduction over an array...
if isinstance(redvar_typ, types.npytypes.Array):
# Add code to get the shape of the array being reduced over.
redshape_var = pfbdr.assign(
rhs=ir.Expr.getattr(redvar, "shape", loc),
typ=types.UniTuple(types.intp, redvar_typ.ndim),
name="redarr_shape",
)
# Add the dimension sizes of the array being reduced over to the tuple of sizes pass to empty.
for j in range(redvar_typ.ndim):
onedimvar = pfbdr.assign(
rhs=ir.Expr.static_getitem(redshape_var, j, None, loc),
typ=types.intp,
name="redshapeonedim",
)
size_var_list.append(onedimvar)
# Empty call takes tuple of sizes. Create here and fill in outer dimension (num threads).
size_var = pfbdr.make_tuple_variable(
size_var_list, name='tuple_size_var',
)
# Resolve dtype
cval = pfbdr._typingctx.resolve_value_type(reddtype)
dt = pfbdr.make_const_variable(cval=cval, typ=types.DType(reddtype))
# Add call to empty passing the size var tuple.
empty_call = pfbdr.call(glbl_np_empty, args=[size_var, dt])
redarr_var = pfbdr.assign(
rhs=empty_call, typ=redarrvar_typ, name="redarr",
)
# Remember mapping of original reduction array to the newly created per-worker reduction array.
redarrs[redvar.name] = redarr_var
init_val = parfor_reddict[red_name].init_val
if init_val is not None:
if isinstance(redvar_typ, types.npytypes.Array):
# Create an array of identity values for the reduction.
# First, create a variable for np.full.
full_func_node = pfbdr.bind_global_function(
fobj=np.full,
ftype=get_np_ufunc_typ(np.full),
args=(
types.UniTuple(types.intp, redvar_typ.ndim),
reddtype,
),
kws={'dtype': types.DType(reddtype)},
)
# Then create a var with the identify value.
init_val_var = pfbdr.make_const_variable(
cval=init_val,
typ=reddtype,
name="init_val",
)
# Then, call np.full with the shape of the reduction array and the identity value.
full_call = pfbdr.call(
full_func_node, args=[redshape_var, init_val_var, dt],
)
redtoset = pfbdr.assign(
rhs=full_call,
typ=redvar_typ,
name="redtoset",
)
else:
redtoset = pfbdr.make_const_variable(
cval=init_val,
typ=reddtype,
name="redtoset",
)
else:
redtoset = redvar
if config.DEBUG_ARRAY_OPT_RUNTIME:
res_print_str = "res_print1 for redvar " + str(redvar) + ":"
strconsttyp = types.StringLiteral(res_print_str)
lhs = pfbdr.make_const_variable(
cval=res_print_str,
typ=strconsttyp,
name="str_const",
)
res_print = ir.Print(args=[lhs, redvar],
vararg=None, loc=loc)
lowerer.fndesc.calltypes[res_print] = signature(types.none,
typemap[lhs.name],
typemap[redvar.name])
print("res_print_redvar", res_print)
lowerer.lower_inst(res_print)
# For each thread, initialize the per-worker reduction array to
# the current reduction array value.
# Get the Numba type of the variable that holds the thread count.
num_thread_type = typemap[num_threads_var.name]
# Get the LLVM type of the thread count variable.
ntllvm_type = targetctx.get_value_type(num_thread_type)
# Create a LLVM variable to hold the loop index.
alloc_loop_var = cgutils.alloca_once(builder, ntllvm_type)
# Associate this LLVM variable to a Numba IR variable so that
# we can use setitem IR builder.
# Create a Numba IR variable.
numba_ir_loop_index_var = ir.Var(scope,
mk_unique_var("loop_index"), loc)
# Give that variable the right type.
typemap[numba_ir_loop_index_var.name] = num_thread_type
# Associate this Numba variable to the LLVm variable in the
# lowerer's varmap.
lowerer.varmap[numba_ir_loop_index_var.name] = alloc_loop_var
# Insert a loop into the outputed LLVM that goes from 0 to
# the current thread count.
with cgutils.for_range(builder, lowerer.loadvar(num_threads_var.name), intp=ntllvm_type) as loop:
# Store the loop index into the alloca'd LLVM loop index variable.
builder.store(loop.index, alloc_loop_var)
# Initialize one element of the reduction array using the Numba
# IR variable associated with this loop's index.
pfbdr.setitem(obj=redarr_var, index=numba_ir_loop_index_var, val=redtoset)
# compile parfor body as a separate function to be used with GUFuncWrapper
flags = parfor.flags.copy()
flags.error_model = "numpy"
# Can't get here unless flags.auto_parallel == ParallelOptions(True)
index_var_typ = typemap[parfor.loop_nests[0].index_variable.name]
# index variables should have the same type, check rest of indices
for l in parfor.loop_nests[1:]:
assert typemap[l.index_variable.name] == index_var_typ
numba.parfors.parfor.sequential_parfor_lowering = True
try:
(func,
func_args,
func_sig,
func_arg_types,
exp_name_to_tuple_var) = _create_gufunc_for_parfor_body(
lowerer, parfor, typemap, typingctx, targetctx, flags, {},
bool(alias_map), index_var_typ, parfor.races)
finally:
numba.parfors.parfor.sequential_parfor_lowering = False
# get the shape signature
func_args = ['sched'] + func_args
num_reductions = len(parfor_redvars)
num_inputs = len(func_args) - len(parfor_output_arrays) - num_reductions
if config.DEBUG_ARRAY_OPT:
print("func_args = ", func_args)
print("num_inputs = ", num_inputs)
print("parfor_outputs = ", parfor_output_arrays)
print("parfor_redvars = ", parfor_redvars)
print("num_reductions = ", num_reductions)
gu_signature = _create_shape_signature(
parfor.get_shape_classes,
num_inputs,
num_reductions,
func_args,
func_sig,
parfor.races,
typemap)
if config.DEBUG_ARRAY_OPT:
print("gu_signature = ", gu_signature)
# call the func in parallel by wrapping it with ParallelGUFuncBuilder
loop_ranges = [(l.start, l.stop, l.step) for l in parfor.loop_nests]
if config.DEBUG_ARRAY_OPT:
print("loop_nests = ", parfor.loop_nests)
print("loop_ranges = ", loop_ranges)
call_parallel_gufunc(
lowerer,
func,
gu_signature,
func_sig,
func_args,
func_arg_types,
loop_ranges,
parfor_redvars,
parfor_reddict,
redarrs,
parfor.init_block,
index_var_typ,
parfor.races,
exp_name_to_tuple_var)
_parfor_lowering_finalize_reduction(
parfor, redarrs, lowerer, parfor_reddict,
)
# Cleanup reduction variable
for v in redarrs.values():
lowerer.lower_inst(ir.Del(v.name, loc=loc))
# Restore the original typemap of the function that was replaced temporarily at the
# Beginning of this function.
lowerer.fndesc.typemap = orig_typemap
if config.DEBUG_ARRAY_OPT:
print("_lower_parfor_parallel done")
_ReductionInfo = make_dataclass(
"_ReductionInfo",
[
"redvar_info",
"redvar_name",
"redvar_typ",
"redarr_var",
"redarr_typ",
"init_val",
],
frozen=True,
)
def _parfor_lowering_finalize_reduction(
parfor,
redarrs,
lowerer,
parfor_reddict,
):
"""Emit code to finalize the reduction from the intermediate values of
each thread.
"""
from numba.np.ufunc.parallel import get_thread_count
thread_count = get_thread_count()
# For each reduction variable
for redvar_name, redarr_var in redarrs.items():
# Pseudo-code for this loop body:
# tmp = redarr[0]
# for i in range(1, thread_count):
# tmp = reduce_op(redarr[i], tmp)
# reduction_result = tmp
redvar_typ = lowerer.fndesc.typemap[redvar_name]
redarr_typ = lowerer.fndesc.typemap[redarr_var.name]
init_val = lowerer.loadvar(redvar_name)
reduce_info = _ReductionInfo(
redvar_info = parfor_reddict[redvar_name],
redvar_name=redvar_name,
redvar_typ=redvar_typ,
redarr_var=redarr_var,
redarr_typ=redarr_typ,
init_val=init_val,
)
# generate code for combining reduction variable with thread output
handler = (_lower_trivial_inplace_binops
if reduce_info.redvar_info.redop is not None
else _lower_non_trivial_reduce)
handler(parfor, lowerer, thread_count, reduce_info)
class ParforsUnexpectedReduceNodeError(InternalError):
def __init__(self, inst):
super().__init__(f"Unknown reduce instruction node: {inst}")
def _lower_trivial_inplace_binops(parfor, lowerer, thread_count, reduce_info):
"""Lower trivial inplace-binop reduction.
"""
for inst in reduce_info.redvar_info.reduce_nodes:
# Var assigns to Var?
if _lower_var_to_var_assign(lowerer, inst):
pass
# Is inplace-binop for the reduction?
elif _is_inplace_binop_and_rhs_is_init(inst, reduce_info.redvar_name):
fn = inst.value.fn
redvar_result = _emit_binop_reduce_call(
fn, lowerer, thread_count, reduce_info,
)
lowerer.storevar(redvar_result, name=inst.target.name)
# Otherwise?
else:
raise ParforsUnexpectedReduceNodeError(inst)
# XXX: This seems like a hack to stop the loop with this condition.
if _fix_redvar_name_ssa_mismatch(parfor, lowerer, inst,
reduce_info.redvar_name):
break
if config.DEBUG_ARRAY_OPT_RUNTIME:
varname = reduce_info.redvar_name
lowerer.print_variable(
f"{parfor.loc}: parfor {fn.__name__} reduction {varname} =",
varname,
)
def _lower_non_trivial_reduce(parfor, lowerer, thread_count, reduce_info):
"""Lower non-trivial reduction such as call to `functools.reduce()`.
"""
ctx = lowerer.context
init_name = f"{reduce_info.redvar_name}#init"
# The init_name variable is not defined at this point.
lowerer.fndesc.typemap.setdefault(init_name, reduce_info.redvar_typ)
# Emit a sequence of the reduction operation for each intermediate result
# of each thread.
for tid in range(thread_count):
for inst in reduce_info.redvar_info.reduce_nodes:
# Var assigns to Var?
if _lower_var_to_var_assign(lowerer, inst):
pass
# The reduction operation?
elif (isinstance(inst, ir.Assign)
and any(var.name == init_name for var in inst.list_vars())):
elem = _emit_getitem_call(
ctx.get_constant(types.intp, tid), lowerer, reduce_info,
)
lowerer.storevar(elem, init_name)
lowerer.lower_inst(inst)
# Otherwise?
else:
raise ParforsUnexpectedReduceNodeError(inst)
# XXX: This seems like a hack to stop the loop with this condition.
if _fix_redvar_name_ssa_mismatch(parfor, lowerer, inst,
reduce_info.redvar_name):
break
if config.DEBUG_ARRAY_OPT_RUNTIME:
varname = reduce_info.redvar_name
lowerer.print_variable(
f"{parfor.loc}: parfor non-trivial reduction {varname} =",
varname,
)
def _lower_var_to_var_assign(lowerer, inst):
"""Lower Var->Var assignment.
Returns True if-and-only-if `inst` is a Var->Var assignment.
"""
if isinstance(inst, ir.Assign) and isinstance(inst.value, ir.Var):
loaded = lowerer.loadvar(inst.value.name)
lowerer.storevar(loaded, name=inst.target.name)
return True
return False
def _emit_getitem_call(idx, lowerer, reduce_info):
"""Emit call to ``redarr_var[idx]``
"""
def reducer_getitem(redarr, index):
return redarr[index]
builder = lowerer.builder
ctx = lowerer.context
redarr_typ = reduce_info.redarr_typ
arg_arr = lowerer.loadvar(reduce_info.redarr_var.name)
args = (arg_arr, idx)
sig = signature(reduce_info.redvar_typ, redarr_typ, types.intp)
elem = ctx.compile_internal(builder, reducer_getitem, sig, args)
return elem
def _emit_binop_reduce_call(binop, lowerer, thread_count, reduce_info):
"""Emit call to the ``binop`` for the reduction variable.
"""
def reduction_add(thread_count, redarr, init):
c = init
for i in range(thread_count):
c += redarr[i]
return c
def reduction_mul(thread_count, redarr, init):
c = init
for i in range(thread_count):
c *= redarr[i]
return c
kernel = {
operator.iadd: reduction_add,
operator.isub: reduction_add,
operator.imul: reduction_mul,
operator.ifloordiv: reduction_mul,
operator.itruediv: reduction_mul,
}[binop]
ctx = lowerer.context
builder = lowerer.builder
redarr_typ = reduce_info.redarr_typ
arg_arr = lowerer.loadvar(reduce_info.redarr_var.name)
if config.DEBUG_ARRAY_OPT_RUNTIME:
init_var = reduce_info.redarr_var.scope.get(reduce_info.redvar_name)
res_print = ir.Print(
args=[reduce_info.redarr_var, init_var], vararg=None,
loc=lowerer.loc,
)
typemap = lowerer.fndesc.typemap
lowerer.fndesc.calltypes[res_print] = signature(
types.none, typemap[reduce_info.redarr_var.name],
typemap[init_var.name],
)
lowerer.lower_inst(res_print)
arg_thread_count = ctx.get_constant_generic(
builder, types.uintp, thread_count,
)
args = (arg_thread_count, arg_arr, reduce_info.init_val)
sig = signature(
reduce_info.redvar_typ, types.uintp, redarr_typ, reduce_info.redvar_typ,
)
redvar_result = ctx.compile_internal(builder, kernel, sig, args)
return redvar_result
def _is_inplace_binop_and_rhs_is_init(inst, redvar_name):
"""Is ``inst`` an inplace-binop and the RHS is the reduction init?
"""
if not isinstance(inst, ir.Assign):
return False
rhs = inst.value
if not isinstance(rhs, ir.Expr):
return False
if rhs.op != "inplace_binop":
return False
if rhs.rhs.name != f"{redvar_name}#init":
return False
return True
def _fix_redvar_name_ssa_mismatch(parfor, lowerer, inst, redvar_name):
"""Fix reduction variable name mismatch due to SSA.
"""
# Only process reduction statements post-gufunc execution
# until we see an assignment with a left-hand side to the
# reduction variable's name. This fixes problems with
# cases where there are multiple assignments to the
# reduction variable in the parfor.
scope = parfor.init_block.scope
if isinstance(inst, ir.Assign):
try:
reduction_var = scope.get_exact(redvar_name)
except NotDefinedError:
# Ideally, this shouldn't happen. The redvar name
# missing from scope indicates an error from
# other rewrite passes.
is_same_source_var = redvar_name == inst.target.name
else:
# Because of SSA, the redvar and target var of
# the current assignment would be different even
# though they refer to the same source-level var.
redvar_unver_name = reduction_var.unversioned_name
target_unver_name = inst.target.unversioned_name
is_same_source_var = redvar_unver_name == target_unver_name
if is_same_source_var:
# If redvar is different from target var, add an
# assignment to put target var into redvar.
if redvar_name != inst.target.name:
val = lowerer.loadvar(inst.target.name)
lowerer.storevar(val, name=redvar_name)
return True
return False
def _create_shape_signature(
get_shape_classes,
num_inputs,
num_reductions,
args,
func_sig,
races,
typemap):
'''Create shape signature for GUFunc
'''
if config.DEBUG_ARRAY_OPT:
print("_create_shape_signature", num_inputs, num_reductions, args, races)
for i in args[1:]:
print("argument", i, type(i), get_shape_classes(i, typemap=typemap))
num_inouts = len(args) - num_reductions
# maximum class number for array shapes
classes = [get_shape_classes(var, typemap=typemap) if var not in races else (-1,) for var in args[1:]]
class_set = set()
for _class in classes:
if _class:
for i in _class:
class_set.add(i)
max_class = max(class_set) + 1 if class_set else 0
classes.insert(0, (max_class,)) # force set the class of 'sched' argument
class_set.add(max_class)
thread_num_class = max_class + 1
class_set.add(thread_num_class)
class_map = {}
# TODO: use prefix + class number instead of single char
alphabet = ord('a')
for n in class_set:
if n >= 0:
class_map[n] = chr(alphabet)
alphabet += 1
threadcount_ordinal = chr(alphabet)
alpha_dict = {'latest_alpha' : alphabet}
def bump_alpha(c, class_map):
if c >= 0:
return class_map[c]
else:
alpha_dict['latest_alpha'] += 1
return chr(alpha_dict['latest_alpha'])
gu_sin = []
gu_sout = []
count = 0
syms_sin = ()
if config.DEBUG_ARRAY_OPT:
print("args", args)
print("classes", classes)
print("threadcount_ordinal", threadcount_ordinal)
for cls, arg in zip(classes, args):
count = count + 1
if cls:
dim_syms = tuple(bump_alpha(c, class_map) for c in cls)
else:
dim_syms = ()
if (count > num_inouts):
# Add the threadcount_ordinal to represent the thread count
# to the start of the reduction array.
gu_sin.append(tuple([threadcount_ordinal] + list(dim_syms[1:])))
else:
gu_sin.append(dim_syms)
syms_sin += dim_syms
return (gu_sin, gu_sout)
def _print_block(block):
for i, inst in enumerate(block.body):
print(" ", i, " ", inst)
def _print_body(body_dict):
'''Pretty-print a set of IR blocks.
'''
for label, block in body_dict.items():
print("label: ", label)
_print_block(block)
def wrap_loop_body(loop_body):
blocks = loop_body.copy() # shallow copy is enough
first_label = min(blocks.keys())
last_label = max(blocks.keys())
loc = blocks[last_label].loc
blocks[last_label].body.append(ir.Jump(first_label, loc))
return blocks
def unwrap_loop_body(loop_body):
last_label = max(loop_body.keys())
loop_body[last_label].body = loop_body[last_label].body[:-1]
def add_to_def_once_sets(a_def, def_once, def_more):
'''If the variable is already defined more than once, do nothing.
Else if defined exactly once previously then transition this
variable to the defined more than once set (remove it from
def_once set and add to def_more set).
Else this must be the first time we've seen this variable defined
so add to def_once set.
'''
if a_def in def_more:
pass
elif a_def in def_once:
def_more.add(a_def)
def_once.remove(a_def)
else:
def_once.add(a_def)
def compute_def_once_block(block, def_once, def_more, getattr_taken, typemap, module_assigns):
'''Effect changes to the set of variables defined once or more than once
for a single block.
block - the block to process
def_once - set of variable names known to be defined exactly once
def_more - set of variable names known to be defined more than once
getattr_taken - dict mapping variable name to tuple of object and attribute taken
module_assigns - dict mapping variable name to the Global that they came from
'''
# The only "defs" occur in assignments, so find such instructions.
assignments = block.find_insts(ir.Assign)
# For each assignment...
for one_assign in assignments:
# Get the LHS/target of the assignment.
a_def = one_assign.target.name
# Add variable to def sets.
add_to_def_once_sets(a_def, def_once, def_more)
rhs = one_assign.value
if isinstance(rhs, ir.Global):
# Remember assignments of the form "a = Global(...)"
# Is this a module?
if isinstance(rhs.value, pytypes.ModuleType):
module_assigns[a_def] = rhs.value.__name__
if isinstance(rhs, ir.Expr) and rhs.op == 'getattr' and rhs.value.name in def_once:
# Remember assignments of the form "a = b.c"
getattr_taken[a_def] = (rhs.value.name, rhs.attr)
if isinstance(rhs, ir.Expr) and rhs.op == 'call' and rhs.func.name in getattr_taken:
# If "a" is being called then lookup the getattr definition of "a"
# as above, getting the module variable "b" (base_obj)
# and the attribute "c" (base_attr).
base_obj, base_attr = getattr_taken[rhs.func.name]
if base_obj in module_assigns:
# If we know the definition of the module variable then get the module
# name from module_assigns.
base_mod_name = module_assigns[base_obj]
if not is_const_call(base_mod_name, base_attr):
# Calling a method on an object could modify the object and is thus
# like a def of that object. We call is_const_call to see if this module/attribute
# combination is known to not modify the module state. If we don't know that
# the combination is safe then we have to assume there could be a modification to
# the module and thus add the module variable as defined more than once.
add_to_def_once_sets(base_obj, def_once, def_more)
else:
# Assume the worst and say that base_obj could be modified by the call.
add_to_def_once_sets(base_obj, def_once, def_more)
if isinstance(rhs, ir.Expr) and rhs.op == 'call':
# If a mutable object is passed to a function, then it may be changed and
# therefore can't be hoisted.
# For each argument to the function...
for argvar in rhs.args:
# Get the argument's type.
if isinstance(argvar, ir.Var):
argvar = argvar.name
avtype = typemap[argvar]
# If that type doesn't have a mutable attribute or it does and it's set to
# not mutable then this usage is safe for hoisting.
if getattr(avtype, 'mutable', False):
# Here we have a mutable variable passed to a function so add this variable
# to the def lists.
add_to_def_once_sets(argvar, def_once, def_more)
def compute_def_once_internal(loop_body, def_once, def_more, getattr_taken, typemap, module_assigns):
'''Compute the set of variables defined exactly once in the given set of blocks
and use the given sets for storing which variables are defined once, more than
once and which have had a getattr call on them.
'''
# For each block...
for label, block in loop_body.items():
# Scan this block and effect changes to def_once, def_more, and getattr_taken
# based on the instructions in that block.
compute_def_once_block(block, def_once, def_more, getattr_taken, typemap, module_assigns)
# Have to recursively process parfors manually here.
for inst in block.body:
if isinstance(inst, parfor.Parfor):
# Recursively compute for the parfor's init block.
compute_def_once_block(inst.init_block, def_once, def_more, getattr_taken, typemap, module_assigns)
# Recursively compute for the parfor's loop body.
compute_def_once_internal(inst.loop_body, def_once, def_more, getattr_taken, typemap, module_assigns)
def compute_def_once(loop_body, typemap):
'''Compute the set of variables defined exactly once in the given set of blocks.
'''
def_once = set() # set to hold variables defined exactly once
def_more = set() # set to hold variables defined more than once
getattr_taken = {}
module_assigns = {}
compute_def_once_internal(loop_body, def_once, def_more, getattr_taken, typemap, module_assigns)
return def_once, def_more
def find_vars(var, varset):
assert isinstance(var, ir.Var)
varset.add(var.name)
return var
def _hoist_internal(inst, dep_on_param, call_table, hoisted, not_hoisted,
typemap, stored_arrays):
if inst.target.name in stored_arrays:
not_hoisted.append((inst, "stored array"))
if config.DEBUG_ARRAY_OPT >= 1:
print("Instruction", inst, " could not be hoisted because the created array is stored.")
return False
uses = set()
visit_vars_inner(inst.value, find_vars, uses)
diff = uses.difference(dep_on_param)
if config.DEBUG_ARRAY_OPT >= 1:
print("_hoist_internal:", inst, "uses:", uses, "diff:", diff)
if len(diff) == 0 and is_pure(inst.value, None, call_table):
if config.DEBUG_ARRAY_OPT >= 1:
print("Will hoist instruction", inst, typemap[inst.target.name])
hoisted.append(inst)
if not isinstance(typemap[inst.target.name], types.npytypes.Array):
dep_on_param += [inst.target.name]
return True
else:
if len(diff) > 0:
not_hoisted.append((inst, "dependency"))
if config.DEBUG_ARRAY_OPT >= 1:
print("Instruction", inst, " could not be hoisted because of a dependency.")
else:
not_hoisted.append((inst, "not pure"))
if config.DEBUG_ARRAY_OPT >= 1:
print("Instruction", inst, " could not be hoisted because it isn't pure.")
return False
def find_setitems_block(setitems, itemsset, block, typemap):
for inst in block.body:
if isinstance(inst, (ir.StaticSetItem, ir.SetItem)):
setitems.add(inst.target.name)
# If we store a non-mutable object into an array then that is safe to hoist.
# If the stored object is mutable and you hoist then multiple entries in the
# outer array could reference the same object and changing one index would then
# change other indices.
if getattr(typemap[inst.value.name], "mutable", False):
itemsset.add(inst.value.name)
elif isinstance(inst, parfor.Parfor):
find_setitems_block(setitems, itemsset, inst.init_block, typemap)
find_setitems_body(setitems, itemsset, inst.loop_body, typemap)
def find_setitems_body(setitems, itemsset, loop_body, typemap):
"""
Find the arrays that are written into (goes into setitems) and the
mutable objects (mostly arrays) that are written into other arrays
(goes into itemsset).
"""
for label, block in loop_body.items():
find_setitems_block(setitems, itemsset, block, typemap)
def empty_container_allocator_hoist(inst, dep_on_param, call_table, hoisted,
not_hoisted, typemap, stored_arrays):
if (isinstance(inst, ir.Assign) and
isinstance(inst.value, ir.Expr) and
inst.value.op == 'call' and
inst.value.func.name in call_table):
call_list = call_table[inst.value.func.name]
if call_list == ['empty', np]:
return _hoist_internal(inst, dep_on_param, call_table, hoisted,
not_hoisted, typemap, stored_arrays)
return False
def hoist(parfor_params, loop_body, typemap, wrapped_blocks):
dep_on_param = copy.copy(parfor_params)
hoisted = []
not_hoisted = []
# Compute the set of variable defined exactly once in the loop body.
def_once, def_more = compute_def_once(loop_body, typemap)
(call_table, reverse_call_table) = get_call_table(wrapped_blocks)
setitems = set()
itemsset = set()
find_setitems_body(setitems, itemsset, loop_body, typemap)
dep_on_param = list(set(dep_on_param).difference(setitems))
if config.DEBUG_ARRAY_OPT >= 1:
print("hoist - def_once:", def_once, "setitems:", setitems, "itemsset:", itemsset, "dep_on_param:", dep_on_param, "parfor_params:", parfor_params)
for si in setitems:
add_to_def_once_sets(si, def_once, def_more)
for label, block in loop_body.items():
new_block = []
for inst in block.body:
if empty_container_allocator_hoist(inst, dep_on_param, call_table,
hoisted, not_hoisted, typemap, itemsset):
continue
elif isinstance(inst, ir.Assign) and inst.target.name in def_once:
if _hoist_internal(inst, dep_on_param, call_table,
hoisted, not_hoisted, typemap, itemsset):
# don't add this instruction to the block since it is
# hoisted
continue
elif isinstance(inst, parfor.Parfor):
new_init_block = []
if config.DEBUG_ARRAY_OPT >= 1:
print("parfor")
inst.dump()
for ib_inst in inst.init_block.body:
if empty_container_allocator_hoist(ib_inst, dep_on_param,
call_table, hoisted, not_hoisted, typemap, itemsset):
continue
elif (isinstance(ib_inst, ir.Assign) and
ib_inst.target.name in def_once):
if _hoist_internal(ib_inst, dep_on_param, call_table,
hoisted, not_hoisted, typemap, itemsset):
# don't add this instruction to the block since it is hoisted
continue
new_init_block.append(ib_inst)
inst.init_block.body = new_init_block
new_block.append(inst)
block.body = new_block
return hoisted, not_hoisted
def redtyp_is_scalar(redtype):
return not isinstance(redtype, types.npytypes.Array)
def redtyp_to_redarraytype(redtyp):
"""Go from a reducation variable type to a reduction array type used to hold
per-worker results.
"""
redarrdim = 1
# If the reduction type is an array then allocate reduction array with ndim+1 dimensions.
if isinstance(redtyp, types.npytypes.Array):
redarrdim += redtyp.ndim
# We don't create array of array but multi-dimensional reduction array with same dtype.
redtyp = redtyp.dtype
return types.npytypes.Array(redtyp, redarrdim, "C")
def redarraytype_to_sig(redarraytyp):
"""Given a reduction array type, find the type of the reduction argument to the gufunc.
"""
assert isinstance(redarraytyp, types.npytypes.Array)
return types.npytypes.Array(redarraytyp.dtype, redarraytyp.ndim, redarraytyp.layout)
def legalize_names_with_typemap(names, typemap):
""" We use ir_utils.legalize_names to replace internal IR variable names
containing illegal characters (e.g. period) with a legal character
(underscore) so as to create legal variable names.
The original variable names are in the typemap so we also
need to add the legalized name to the typemap as well.
"""
outdict = legalize_names(names)
# For each pair in the dict of legalized names...
for x, y in outdict.items():
# If the name had some legalization change to it...
if x != y:
# Set the type of the new name the same as the type of the old name.
typemap[y] = typemap[x]
return outdict
def to_scalar_from_0d(x):
if isinstance(x, types.ArrayCompatible):
if x.ndim == 0:
return x.dtype
return x
def _create_gufunc_for_parfor_body(
lowerer,
parfor,
typemap,
typingctx,
targetctx,
flags,
locals,
has_aliases,
index_var_typ,
races):
'''
Takes a parfor and creates a gufunc function for its body.
There are two parts to this function.
1) Code to iterate across the iteration space as defined by the schedule.
2) The parfor body that does the work for a single point in the iteration space.
Part 1 is created as Python text for simplicity with a sentinel assignment to mark the point
in the IR where the parfor body should be added.
This Python text is 'exec'ed into existence and its IR retrieved with run_frontend.
The IR is scanned for the sentinel assignment where that basic block is split and the IR
for the parfor body inserted.
'''
if config.DEBUG_ARRAY_OPT >= 1:
print("starting _create_gufunc_for_parfor_body")
loc = parfor.init_block.loc
# The parfor body and the main function body share ir.Var nodes.
# We have to do some replacements of Var names in the parfor body to make them
# legal parameter names. If we don't copy then the Vars in the main function also
# would incorrectly change their name.
loop_body = copy.copy(parfor.loop_body)
remove_dels(loop_body)
parfor_dim = len(parfor.loop_nests)
loop_indices = [l.index_variable.name for l in parfor.loop_nests]
# Get all the parfor params.
parfor_params = parfor.params
# Get just the outputs of the parfor.
parfor_outputs = numba.parfors.parfor.get_parfor_outputs(parfor, parfor_params)
# Get all parfor reduction vars, and operators.
typemap = lowerer.fndesc.typemap
parfor_redvars, parfor_reddict = numba.parfors.parfor.get_parfor_reductions(
lowerer.func_ir, parfor, parfor_params, lowerer.fndesc.calltypes)
# Compute just the parfor inputs as a set difference.
parfor_inputs = sorted(
list(
set(parfor_params) -
set(parfor_outputs) -
set(parfor_redvars)))
if config.DEBUG_ARRAY_OPT >= 1:
print("parfor_params = ", parfor_params, " ", type(parfor_params))
print("parfor_outputs = ", parfor_outputs, " ", type(parfor_outputs))
print("parfor_inputs = ", parfor_inputs, " ", type(parfor_inputs))
print("parfor_redvars = ", parfor_redvars, " ", type(parfor_redvars))
# -------------------------------------------------------------------------
# Convert tuples to individual parameters.
tuple_expanded_parfor_inputs = []
tuple_var_to_expanded_names = {}
expanded_name_to_tuple_var = {}
next_expanded_tuple_var = 0
parfor_tuple_params = []
# For each input to the parfor.
for pi in parfor_inputs:
# Get the type of the input.
pi_type = typemap[pi]
# If it is a UniTuple or Tuple we will do the conversion.
if isinstance(pi_type, types.UniTuple) or isinstance(pi_type, types.NamedUniTuple):
# Get the size and dtype of the tuple.
tuple_count = pi_type.count
tuple_dtype = pi_type.dtype
# Only do tuples up to config.PARFOR_MAX_TUPLE_SIZE length.
assert(tuple_count <= config.PARFOR_MAX_TUPLE_SIZE)
this_var_expansion = []
for i in range(tuple_count):
# Generate a new name for the individual part of the tuple var.
expanded_name = "expanded_tuple_var_" + str(next_expanded_tuple_var)
# Add that name to the new list of inputs to the gufunc.
tuple_expanded_parfor_inputs.append(expanded_name)
this_var_expansion.append(expanded_name)
# Remember a mapping from new param name to original tuple
# var and the index within the tuple.
expanded_name_to_tuple_var[expanded_name] = (pi, i)
next_expanded_tuple_var += 1
# Set the type of the new parameter.
typemap[expanded_name] = tuple_dtype
# Remember a mapping from the original tuple var to the
# individual parts.
tuple_var_to_expanded_names[pi] = this_var_expansion
parfor_tuple_params.append(pi)
elif isinstance(pi_type, types.Tuple) or isinstance(pi_type, types.NamedTuple):
# This is the same as above for UniTuple except that each part of
# the tuple can have a different type and we fetch that type with
# pi_type.types[offset].
tuple_count = pi_type.count
tuple_types = pi_type.types
# Only do tuples up to config.PARFOR_MAX_TUPLE_SIZE length.
assert(tuple_count <= config.PARFOR_MAX_TUPLE_SIZE)
this_var_expansion = []
for i in range(tuple_count):
expanded_name = "expanded_tuple_var_" + str(next_expanded_tuple_var)
tuple_expanded_parfor_inputs.append(expanded_name)
this_var_expansion.append(expanded_name)
expanded_name_to_tuple_var[expanded_name] = (pi, i)
next_expanded_tuple_var += 1
typemap[expanded_name] = tuple_types[i]
tuple_var_to_expanded_names[pi] = this_var_expansion
parfor_tuple_params.append(pi)
else:
tuple_expanded_parfor_inputs.append(pi)
parfor_inputs = tuple_expanded_parfor_inputs
if config.DEBUG_ARRAY_OPT >= 1:
print("parfor_inputs post tuple handling = ", parfor_inputs, " ", type(parfor_inputs))
# -------------------------------------------------------------------------
races = races.difference(set(parfor_redvars))
for race in races:
msg = ("Variable %s used in parallel loop may be written "
"to simultaneously by multiple workers and may result "
"in non-deterministic or unintended results." % race)
warnings.warn(NumbaParallelSafetyWarning(msg, loc))
replace_var_with_array(races, loop_body, typemap, lowerer.fndesc.calltypes)
# Reduction variables are represented as arrays, so they go under
# different names.
parfor_redarrs = []
parfor_red_arg_types = []
for var in parfor_redvars:
arr = var + "_arr"
parfor_redarrs.append(arr)
redarraytype = redtyp_to_redarraytype(typemap[var])
parfor_red_arg_types.append(redarraytype)
redarrsig = redarraytype_to_sig(redarraytype)
if arr in typemap:
assert(typemap[arr] == redarrsig)
else:
typemap[arr] = redarrsig
# Reorder all the params so that inputs go first then outputs.
parfor_params = parfor_inputs + parfor_outputs + parfor_redarrs
if config.DEBUG_ARRAY_OPT >= 1:
print("parfor_params = ", parfor_params, " ", type(parfor_params))
print("loop_indices = ", loop_indices, " ", type(loop_indices))
print("loop_body = ", loop_body, " ", type(loop_body))
_print_body(loop_body)
# Some Var are not legal parameter names so create a dict of potentially illegal
# param name to guaranteed legal name.
param_dict = legalize_names_with_typemap(parfor_params + parfor_redvars + parfor_tuple_params, typemap)
if config.DEBUG_ARRAY_OPT >= 1:
print(
"param_dict = ",
sorted(
param_dict.items()),
" ",
type(param_dict))
# Some loop_indices are not legal parameter names so create a dict of potentially illegal
# loop index to guaranteed legal name.
ind_dict = legalize_names_with_typemap(loop_indices, typemap)
# Compute a new list of legal loop index names.
legal_loop_indices = [ind_dict[v] for v in loop_indices]
if config.DEBUG_ARRAY_OPT >= 1:
print("ind_dict = ", sorted(ind_dict.items()), " ", type(ind_dict))
print(
"legal_loop_indices = ",
legal_loop_indices,
" ",
type(legal_loop_indices))
for pd in parfor_params:
print("pd = ", pd)
print("pd type = ", typemap[pd], " ", type(typemap[pd]))
# Get the types of each parameter.
param_types = [to_scalar_from_0d(typemap[v]) for v in parfor_params]
# Calculate types of args passed to gufunc.
func_arg_types = [typemap[v] for v in (parfor_inputs + parfor_outputs)] + parfor_red_arg_types
if config.DEBUG_ARRAY_OPT >= 1:
print("new param_types:", param_types)
print("new func_arg_types:", func_arg_types)
# Replace illegal parameter names in the loop body with legal ones.
replace_var_names(loop_body, param_dict)
# remember the name before legalizing as the actual arguments
parfor_args = parfor_params
# Change parfor_params to be legal names.
parfor_params = [param_dict[v] for v in parfor_params]
parfor_params_orig = parfor_params
parfor_params = []
ascontig = False
for pindex in range(len(parfor_params_orig)):
if (ascontig and
pindex < len(parfor_inputs) and
isinstance(param_types[pindex], types.npytypes.Array)):
parfor_params.append(parfor_params_orig[pindex]+"param")
else:
parfor_params.append(parfor_params_orig[pindex])
# Change parfor body to replace illegal loop index vars with legal ones.
replace_var_names(loop_body, ind_dict)
loop_body_var_table = get_name_var_table(loop_body)
sentinel_name = get_unused_var_name("__sentinel__", loop_body_var_table)
if config.DEBUG_ARRAY_OPT >= 1:
print(
"legal parfor_params = ",
parfor_params,
" ",
type(parfor_params))
# Determine the unique names of the scheduling and gufunc functions.
# sched_func_name = "__numba_parfor_sched_%s" % (hex(hash(parfor)).replace("-", "_"))
gufunc_name = "__numba_parfor_gufunc_%s" % (
hex(hash(parfor)).replace("-", "_"))
if config.DEBUG_ARRAY_OPT:
# print("sched_func_name ", type(sched_func_name), " ", sched_func_name)
print("gufunc_name ", type(gufunc_name), " ", gufunc_name)
gufunc_txt = ""
# Create the gufunc function.
gufunc_txt += "def " + gufunc_name + \
"(sched, " + (", ".join(parfor_params)) + "):\n"
globls = {"np": np, "numba": numba}
# First thing in the gufunc, we reconstruct tuples from their
# individual parts, e.g., orig_tup_name = (part1, part2,).
# The rest of the code of the function will use the original tuple name.
for tup_var, exp_names in tuple_var_to_expanded_names.items():
tup_type = typemap[tup_var]
gufunc_txt += " " + param_dict[tup_var]
# Determine if the tuple is a named tuple.
if (isinstance(tup_type, types.NamedTuple) or
isinstance(tup_type, types.NamedUniTuple)):
named_tup = True
else:
named_tup = False
if named_tup:
# It is a named tuple so try to find the global that defines the
# named tuple.
func_def = guard(get_definition, lowerer.func_ir, tup_var)
named_tuple_def = None
if config.DEBUG_ARRAY_OPT:
print("func_def:", func_def, type(func_def))
if func_def is not None:
if (isinstance(func_def, ir.Expr) and
func_def.op == 'call'):
named_tuple_def = guard(get_definition, lowerer.func_ir, func_def.func)
if config.DEBUG_ARRAY_OPT:
print("named_tuple_def:", named_tuple_def, type(named_tuple_def))
elif isinstance(func_def, ir.Arg):
named_tuple_def = typemap[func_def.name]
if config.DEBUG_ARRAY_OPT:
print("named_tuple_def:", named_tuple_def,
type(named_tuple_def), named_tuple_def.name)
if named_tuple_def is not None:
if (isinstance(named_tuple_def, ir.Global) or
isinstance(named_tuple_def, ir.FreeVar)):
gval = named_tuple_def.value
if config.DEBUG_ARRAY_OPT:
print("gval:", gval, type(gval))
globls[named_tuple_def.name] = gval
elif isinstance(named_tuple_def, types.containers.BaseNamedTuple):
named_tuple_name = named_tuple_def.name.split('(')[0]
if config.DEBUG_ARRAY_OPT:
print("name:", named_tuple_name,
named_tuple_def.instance_class,
type(named_tuple_def.instance_class))
globls[named_tuple_name] = named_tuple_def.instance_class
else:
if config.DEBUG_ARRAY_OPT:
print("Didn't find definition of namedtuple for globls.")
raise CompilerError("Could not find definition of " + str(tup_var),
tup_var.loc)
gufunc_txt += " = " + tup_type.instance_class.__name__ + "("
for name, field_name in zip(exp_names, tup_type.fields):
gufunc_txt += field_name + "=" + param_dict[name] + ","
else:
# Just a regular tuple so use (part0, part1, ...)
gufunc_txt += " = (" + ", ".join([param_dict[x] for x in exp_names])
if len(exp_names) == 1:
# Add comma for tuples with singular values. We can't unilaterally
# add a comma alway because (,) isn't valid.
gufunc_txt += ","
gufunc_txt += ")\n"
for pindex in range(len(parfor_inputs)):
if ascontig and isinstance(param_types[pindex], types.npytypes.Array):
gufunc_txt += (" " + parfor_params_orig[pindex]
+ " = np.ascontiguousarray(" + parfor_params[pindex] + ")\n")
gufunc_thread_id_var = "ParallelAcceleratorGufuncThreadId"
if len(parfor_redarrs) > 0:
gufunc_txt += " " + gufunc_thread_id_var + " = "
gufunc_txt += "numba.np.ufunc.parallel._iget_thread_id()\n"
# Add initialization of reduction variables
for arr, var in zip(parfor_redarrs, parfor_redvars):
gufunc_txt += " " + param_dict[var] + \
"=" + param_dict[arr] + "[" + gufunc_thread_id_var + "]\n"
if config.DEBUG_ARRAY_OPT_RUNTIME:
gufunc_txt += " print(\"thread id =\", ParallelAcceleratorGufuncThreadId)\n"
gufunc_txt += " print(\"initial reduction value\",ParallelAcceleratorGufuncThreadId," + param_dict[var] + "," + param_dict[var] + ".shape)\n"
gufunc_txt += " print(\"reduction array\",ParallelAcceleratorGufuncThreadId," + param_dict[arr] + "," + param_dict[arr] + ".shape)\n"
# For each dimension of the parfor, create a for loop in the generated gufunc function.
# Iterate across the proper values extracted from the schedule.
# The form of the schedule is start_dim0, start_dim1, ..., start_dimN, end_dim0,
# end_dim1, ..., end_dimN
for eachdim in range(parfor_dim):
for indent in range(eachdim + 1):
gufunc_txt += " "
sched_dim = eachdim
gufunc_txt += ("for " +
legal_loop_indices[eachdim] +
" in range(sched[" +
str(sched_dim) +
"], sched[" +
str(sched_dim +
parfor_dim) +
"] + np.uint8(1)):\n")
if config.DEBUG_ARRAY_OPT_RUNTIME:
for indent in range(parfor_dim + 1):
gufunc_txt += " "
gufunc_txt += "print("
for eachdim in range(parfor_dim):
gufunc_txt += "\"" + legal_loop_indices[eachdim] + "\"," + legal_loop_indices[eachdim] + ","
gufunc_txt += ")\n"
# Add the sentinel assignment so that we can find the loop body position
# in the IR.
for indent in range(parfor_dim + 1):
gufunc_txt += " "
gufunc_txt += sentinel_name + " = 0\n"
# Add assignments of reduction variables (for returning the value)
for arr, var in zip(parfor_redarrs, parfor_redvars):
if config.DEBUG_ARRAY_OPT_RUNTIME:
gufunc_txt += " print(\"final reduction value\",ParallelAcceleratorGufuncThreadId," + param_dict[var] + ")\n"
gufunc_txt += " print(\"final reduction array\",ParallelAcceleratorGufuncThreadId," + param_dict[arr] + ")\n"
# After the gufunc loops, copy the accumulated temp value back to reduction array.
gufunc_txt += " " + param_dict[arr] + \
"[" + gufunc_thread_id_var + "] = " + param_dict[var] + "\n"
gufunc_txt += " return None\n"
if config.DEBUG_ARRAY_OPT:
print("gufunc_txt = ", type(gufunc_txt), "\n", gufunc_txt)
print("globls:", globls, type(globls))
# Force gufunc outline into existence.
locls = {}
exec(gufunc_txt, globls, locls)
gufunc_func = locls[gufunc_name]
if config.DEBUG_ARRAY_OPT:
print("gufunc_func = ", type(gufunc_func), "\n", gufunc_func)
# Get the IR for the gufunc outline.
gufunc_ir = compiler.run_frontend(gufunc_func)
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir dump ", type(gufunc_ir))
gufunc_ir.dump()
print("loop_body dump ", type(loop_body))
_print_body(loop_body)
# rename all variables in gufunc_ir afresh
var_table = get_name_var_table(gufunc_ir.blocks)
new_var_dict = {}
reserved_names = [sentinel_name] + \
list(param_dict.values()) + legal_loop_indices
for name, var in var_table.items():
if not (name in reserved_names):
new_var_dict[name] = mk_unique_var(name)
replace_var_names(gufunc_ir.blocks, new_var_dict)
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir dump after renaming ")
gufunc_ir.dump()
gufunc_param_types = [types.npytypes.Array(
index_var_typ, 1, "C")] + param_types
if config.DEBUG_ARRAY_OPT:
print(
"gufunc_param_types = ",
type(gufunc_param_types),
"\n",
gufunc_param_types)
gufunc_stub_last_label = find_max_label(gufunc_ir.blocks) + 1
# Add gufunc stub last label to each parfor.loop_body label to prevent
# label conflicts.
loop_body = add_offset_to_labels(loop_body, gufunc_stub_last_label)
# new label for splitting sentinel block
new_label = find_max_label(loop_body) + 1
# If enabled, add a print statement after every assignment.
if config.DEBUG_ARRAY_OPT_RUNTIME:
for label, block in loop_body.items():
new_block = block.copy()
new_block.clear()
loc = block.loc
scope = block.scope
for inst in block.body:
new_block.append(inst)
# Append print after assignment
if isinstance(inst, ir.Assign):
# Only apply to numbers
if typemap[inst.target.name] not in types.number_domain:
continue
# Make constant string
strval = "{} =".format(inst.target.name)
strconsttyp = types.StringLiteral(strval)
lhs = ir.Var(scope, mk_unique_var("str_const"), loc)
assign_lhs = ir.Assign(value=ir.Const(value=strval, loc=loc),
target=lhs, loc=loc)
typemap[lhs.name] = strconsttyp
new_block.append(assign_lhs)
# Make print node
print_node = ir.Print(args=[lhs, inst.target], vararg=None, loc=loc)
new_block.append(print_node)
sig = numba.core.typing.signature(types.none,
typemap[lhs.name],
typemap[inst.target.name])
lowerer.fndesc.calltypes[print_node] = sig
loop_body[label] = new_block
if config.DEBUG_ARRAY_OPT:
print("parfor loop body")
_print_body(loop_body)
wrapped_blocks = wrap_loop_body(loop_body)
hoisted, not_hoisted = hoist(parfor_params, loop_body, typemap, wrapped_blocks)
start_block = gufunc_ir.blocks[min(gufunc_ir.blocks.keys())]
start_block.body = start_block.body[:-1] + hoisted + [start_block.body[-1]]
unwrap_loop_body(loop_body)
# store hoisted into diagnostics
diagnostics = lowerer.metadata['parfor_diagnostics']
diagnostics.hoist_info[parfor.id] = {'hoisted': hoisted,
'not_hoisted': not_hoisted}
if config.DEBUG_ARRAY_OPT:
print("After hoisting")
_print_body(loop_body)
# Search all the block in the gufunc outline for the sentinel assignment.
for label, block in gufunc_ir.blocks.items():
for i, inst in enumerate(block.body):
if isinstance(
inst,
ir.Assign) and inst.target.name == sentinel_name:
# We found the sentinel assignment.
loc = inst.loc
scope = block.scope
# split block across __sentinel__
# A new block is allocated for the statements prior to the sentinel
# but the new block maintains the current block label.
prev_block = ir.Block(scope, loc)
prev_block.body = block.body[:i]
# The current block is used for statements after the sentinel.
block.body = block.body[i + 1:]
# But the current block gets a new label.
body_first_label = min(loop_body.keys())
# The previous block jumps to the minimum labelled block of the
# parfor body.
prev_block.append(ir.Jump(body_first_label, loc))
# Add all the parfor loop body blocks to the gufunc function's
# IR.
for (l, b) in loop_body.items():
gufunc_ir.blocks[l] = transfer_scope(b, scope)
body_last_label = max(loop_body.keys())
gufunc_ir.blocks[new_label] = block
gufunc_ir.blocks[label] = prev_block
# Add a jump from the last parfor body block to the block containing
# statements after the sentinel.
gufunc_ir.blocks[body_last_label].append(
ir.Jump(new_label, loc))
break
else:
continue
break
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir last dump before renaming")
gufunc_ir.dump()
gufunc_ir.blocks = rename_labels(gufunc_ir.blocks)
remove_dels(gufunc_ir.blocks)
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir last dump")
gufunc_ir.dump()
print("flags", flags)
print("typemap", typemap)
old_alias = flags.noalias
if not has_aliases:
if config.DEBUG_ARRAY_OPT:
print("No aliases found so adding noalias flag.")
flags.noalias = True
fixup_var_define_in_scope(gufunc_ir.blocks)
kernel_func = compiler.compile_ir(
typingctx,
targetctx,
gufunc_ir,
gufunc_param_types,
types.none,
flags,
locals)
flags.noalias = old_alias
kernel_sig = signature(types.none, *gufunc_param_types)
if config.DEBUG_ARRAY_OPT:
print("finished create_gufunc_for_parfor_body. kernel_sig = ", kernel_sig)
return kernel_func, parfor_args, kernel_sig, func_arg_types, expanded_name_to_tuple_var
def replace_var_with_array_in_block(vars, block, typemap, calltypes):
new_block = []
for inst in block.body:
if isinstance(inst, ir.Assign) and inst.target.name in vars:
const_node = ir.Const(0, inst.loc)
const_var = ir.Var(inst.target.scope, mk_unique_var("$const_ind_0"), inst.loc)
typemap[const_var.name] = types.uintp
const_assign = ir.Assign(const_node, const_var, inst.loc)
new_block.append(const_assign)
val_var = ir.Var(inst.target.scope, mk_unique_var("$val"), inst.loc)
typemap[val_var.name] = typemap[inst.target.name]
new_block.append(ir.Assign(inst.value, val_var, inst.loc))
setitem_node = ir.SetItem(inst.target, const_var, val_var, inst.loc)
calltypes[setitem_node] = signature(
types.none, types.npytypes.Array(typemap[inst.target.name], 1, "C"), types.intp, typemap[inst.target.name])
new_block.append(setitem_node)
continue
elif isinstance(inst, parfor.Parfor):
replace_var_with_array_internal(vars, {0: inst.init_block}, typemap, calltypes)
replace_var_with_array_internal(vars, inst.loop_body, typemap, calltypes)
new_block.append(inst)
return new_block
def replace_var_with_array_internal(vars, loop_body, typemap, calltypes):
for label, block in loop_body.items():
block.body = replace_var_with_array_in_block(vars, block, typemap, calltypes)
def replace_var_with_array(vars, loop_body, typemap, calltypes):
replace_var_with_array_internal(vars, loop_body, typemap, calltypes)
for v in vars:
el_typ = typemap[v]
typemap.pop(v, None)
typemap[v] = types.npytypes.Array(el_typ, 1, "C")
def call_parallel_gufunc(lowerer, cres, gu_signature, outer_sig, expr_args, expr_arg_types,
loop_ranges, redvars, reddict, redarrdict, init_block, index_var_typ, races,
exp_name_to_tuple_var):
'''
Adds the call to the gufunc function from the main function.
'''
context = lowerer.context
builder = lowerer.builder
from numba.np.ufunc.parallel import (build_gufunc_wrapper,
_launch_threads)
if config.DEBUG_ARRAY_OPT:
print("make_parallel_loop")
print("outer_sig = ", outer_sig.args, outer_sig.return_type,
outer_sig.recvr, outer_sig.pysig)
print("loop_ranges = ", loop_ranges)
print("expr_args", expr_args)
print("expr_arg_types", expr_arg_types)
print("gu_signature", gu_signature)
# Build the wrapper for GUFunc
args, return_type = sigutils.normalize_signature(outer_sig)
llvm_func = cres.library.get_function(cres.fndesc.llvm_func_name)
sin, sout = gu_signature
# These are necessary for build_gufunc_wrapper to find external symbols
_launch_threads()
info = build_gufunc_wrapper(llvm_func, cres, sin, sout,
cache=False, is_parfors=True)
wrapper_name = info.name
cres.library._ensure_finalized()
if config.DEBUG_ARRAY_OPT:
print("parallel function = ", wrapper_name, cres)
# loadvars for loop_ranges
def load_range(v):
if isinstance(v, ir.Var):
return lowerer.loadvar(v.name)
else:
return context.get_constant(types.uintp, v)
num_dim = len(loop_ranges)
for i in range(num_dim):
start, stop, step = loop_ranges[i]
start = load_range(start)
stop = load_range(stop)
assert(step == 1) # We do not support loop steps other than 1
step = load_range(step)
loop_ranges[i] = (start, stop, step)
if config.DEBUG_ARRAY_OPT:
print("call_parallel_gufunc loop_ranges[{}] = ".format(i), start,
stop, step)
cgutils.printf(builder, "loop range[{}]: %d %d (%d)\n".format(i),
start, stop, step)
# Commonly used LLVM types and constants
byte_t = llvmlite.ir.IntType(8)
byte_ptr_t = llvmlite.ir.PointerType(byte_t)
byte_ptr_ptr_t = llvmlite.ir.PointerType(byte_ptr_t)
intp_t = context.get_value_type(types.intp)
uintp_t = context.get_value_type(types.uintp)
intp_ptr_t = llvmlite.ir.PointerType(intp_t)
uintp_ptr_t = llvmlite.ir.PointerType(uintp_t)
zero = context.get_constant(types.uintp, 0)
one = context.get_constant(types.uintp, 1)
one_type = one.type
sizeof_intp = context.get_abi_sizeof(intp_t)
# Prepare sched, first pop it out of expr_args, outer_sig, and gu_signature
expr_args.pop(0)
sched_sig = sin.pop(0)
if config.DEBUG_ARRAY_OPT:
print("Parfor has potentially negative start", index_var_typ.signed)
if index_var_typ.signed:
sched_type = intp_t
sched_ptr_type = intp_ptr_t
else:
sched_type = uintp_t
sched_ptr_type = uintp_ptr_t
# Call do_scheduling with appropriate arguments
dim_starts = cgutils.alloca_once(
builder, sched_type, size=context.get_constant(
types.uintp, num_dim), name="dim_starts")
dim_stops = cgutils.alloca_once(
builder, sched_type, size=context.get_constant(
types.uintp, num_dim), name="dim_stops")
for i in range(num_dim):
start, stop, step = loop_ranges[i]
if start.type != one_type:
start = builder.sext(start, one_type)
if stop.type != one_type:
stop = builder.sext(stop, one_type)
if step.type != one_type:
step = builder.sext(step, one_type)
# substract 1 because do-scheduling takes inclusive ranges
stop = builder.sub(stop, one)
builder.store(
start, builder.gep(
dim_starts, [
context.get_constant(
types.uintp, i)]))
builder.store(stop, builder.gep(dim_stops,
[context.get_constant(types.uintp, i)]))
get_chunksize = cgutils.get_or_insert_function(
builder.module,
llvmlite.ir.FunctionType(uintp_t, []),
name="get_parallel_chunksize")
set_chunksize = cgutils.get_or_insert_function(
builder.module,
llvmlite.ir.FunctionType(llvmlite.ir.VoidType(), [uintp_t]),
name="set_parallel_chunksize")
get_num_threads = cgutils.get_or_insert_function(
builder.module,
llvmlite.ir.FunctionType(llvmlite.ir.IntType(types.intp.bitwidth), []),
"get_num_threads")
num_threads = builder.call(get_num_threads, [])
current_chunksize = builder.call(get_chunksize, [])
with cgutils.if_unlikely(builder, builder.icmp_signed('<=', num_threads,
num_threads.type(0))):
cgutils.printf(builder, "num_threads: %d\n", num_threads)
context.call_conv.return_user_exc(builder, RuntimeError,
("Invalid number of threads. "
"This likely indicates a bug in Numba.",))
get_sched_size_fnty = llvmlite.ir.FunctionType(uintp_t, [uintp_t, uintp_t, intp_ptr_t, intp_ptr_t])
get_sched_size = cgutils.get_or_insert_function(
builder.module,
get_sched_size_fnty,
name="get_sched_size")
num_divisions = builder.call(get_sched_size, [num_threads,
context.get_constant(types.uintp, num_dim),
dim_starts,
dim_stops])
builder.call(set_chunksize, [zero])
multiplier = context.get_constant(types.uintp, num_dim * 2)
sched_size = builder.mul(num_divisions, multiplier)
sched = builder.alloca(sched_type, size=sched_size, name="sched")
debug_flag = 1 if config.DEBUG_ARRAY_OPT else 0
scheduling_fnty = llvmlite.ir.FunctionType(
intp_ptr_t, [uintp_t, intp_ptr_t, intp_ptr_t, uintp_t, sched_ptr_type, intp_t])
if index_var_typ.signed:
do_scheduling = cgutils.get_or_insert_function(builder.module,
scheduling_fnty,
name="do_scheduling_signed")
else:
do_scheduling = cgutils.get_or_insert_function(builder.module,
scheduling_fnty,
name="do_scheduling_unsigned")
builder.call(
do_scheduling, [
context.get_constant(
types.uintp, num_dim), dim_starts, dim_stops, num_divisions,
sched, context.get_constant(
types.intp, debug_flag)])
# Get the LLVM vars for the Numba IR reduction array vars.
redarrs = [lowerer.loadvar(redarrdict[x].name) for x in redvars]
nredvars = len(redvars)
ninouts = len(expr_args) - nredvars
def load_potential_tuple_var(x):
"""Given a variable name, if that variable is not a new name
introduced as the extracted part of a tuple then just return
the variable loaded from its name. However, if the variable
does represent part of a tuple, as recognized by the name of
the variable being present in the exp_name_to_tuple_var dict,
then we load the original tuple var instead that we get from
the dict and then extract the corresponding element of the
tuple, also stored and returned to use in the dict (i.e., offset).
"""
if x in exp_name_to_tuple_var:
orig_tup, offset = exp_name_to_tuple_var[x]
tup_var = lowerer.loadvar(orig_tup)
res = builder.extract_value(tup_var, offset)
return res
else:
return lowerer.loadvar(x)
# ----------------------------------------------------------------------------
# Prepare arguments: args, shapes, steps, data
all_args = [load_potential_tuple_var(x) for x in expr_args[:ninouts]] + redarrs
num_args = len(all_args)
num_inps = len(sin) + 1
args = cgutils.alloca_once(
builder,
byte_ptr_t,
size=context.get_constant(
types.intp,
1 + num_args),
name="pargs")
array_strides = []
# sched goes first
builder.store(builder.bitcast(sched, byte_ptr_t), args)
array_strides.append(context.get_constant(types.intp, sizeof_intp))
rv_to_arg_dict = {}
# followed by other arguments
for i in range(num_args):
arg = all_args[i]
var = expr_args[i]
aty = expr_arg_types[i]
dst = builder.gep(args, [context.get_constant(types.intp, i + 1)])
if i >= ninouts: # reduction variables
ary = context.make_array(aty)(context, builder, arg)
strides = cgutils.unpack_tuple(builder, ary.strides, aty.ndim)
# Start from 1 because we skip the first dimension of length num_threads just like sched.
for j in range(len(strides)):
array_strides.append(strides[j])
builder.store(builder.bitcast(ary.data, byte_ptr_t), dst)
elif isinstance(aty, types.ArrayCompatible):
if var in races:
typ = (context.get_data_type(aty.dtype)
if aty.dtype != types.boolean
else llvmlite.ir.IntType(1))
rv_arg = cgutils.alloca_once(builder, typ)
builder.store(arg, rv_arg)
builder.store(builder.bitcast(rv_arg, byte_ptr_t), dst)
rv_to_arg_dict[var] = (arg, rv_arg)
array_strides.append(context.get_constant(types.intp, context.get_abi_sizeof(typ)))
else:
ary = context.make_array(aty)(context, builder, arg)
strides = cgutils.unpack_tuple(builder, ary.strides, aty.ndim)
for j in range(len(strides)):
array_strides.append(strides[j])
builder.store(builder.bitcast(ary.data, byte_ptr_t), dst)
else:
if i < num_inps:
# Scalar input, need to store the value in an array of size 1
typ = (context.get_data_type(aty)
if not isinstance(aty, types.Boolean)
else llvmlite.ir.IntType(1))
ptr = cgutils.alloca_once(builder, typ)
builder.store(arg, ptr)
else:
# Scalar output, must allocate
typ = (context.get_data_type(aty)
if not isinstance(aty, types.Boolean)
else llvmlite.ir.IntType(1))
ptr = cgutils.alloca_once(builder, typ)
builder.store(builder.bitcast(ptr, byte_ptr_t), dst)
# ----------------------------------------------------------------------------
# Next, we prepare the individual dimension info recorded in gu_signature
sig_dim_dict = {}
occurances = []
occurances = [sched_sig[0]]
sig_dim_dict[sched_sig[0]] = context.get_constant(types.intp, 2 * num_dim)
assert len(expr_args) == len(all_args)
assert len(expr_args) == len(expr_arg_types)
assert len(expr_args) == len(sin + sout)
assert len(expr_args) == len(outer_sig.args[1:])
for var, arg, aty, gu_sig in zip(expr_args, all_args,
expr_arg_types, sin + sout):
if isinstance(aty, types.npytypes.Array):
i = aty.ndim - len(gu_sig)
else:
i = 0
if config.DEBUG_ARRAY_OPT:
print("var =", var, "gu_sig =", gu_sig, "type =", aty, "i =", i)
for dim_sym in gu_sig:
if config.DEBUG_ARRAY_OPT:
print("var = ", var, " type = ", aty)
if var in races:
sig_dim_dict[dim_sym] = context.get_constant(types.intp, 1)
else:
ary = context.make_array(aty)(context, builder, arg)
shapes = cgutils.unpack_tuple(builder, ary.shape, aty.ndim)
sig_dim_dict[dim_sym] = shapes[i]
if not (dim_sym in occurances):
if config.DEBUG_ARRAY_OPT:
print("dim_sym = ", dim_sym, ", i = ", i)
cgutils.printf(builder, dim_sym + " = %d\n", sig_dim_dict[dim_sym])
occurances.append(dim_sym)
i = i + 1
# ----------------------------------------------------------------------------
# Prepare shapes, which is a single number (outer loop size), followed by
# the size of individual shape variables.
nshapes = len(sig_dim_dict) + 1
shapes = cgutils.alloca_once(builder, intp_t, size=nshapes, name="pshape")
# For now, outer loop size is the same as number of threads
builder.store(num_divisions, shapes)
# Individual shape variables go next
i = 1
for dim_sym in occurances:
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, dim_sym + " = %d\n", sig_dim_dict[dim_sym])
builder.store(
sig_dim_dict[dim_sym], builder.gep(
shapes, [
context.get_constant(
types.intp, i)]))
i = i + 1
# ----------------------------------------------------------------------------
# Prepare steps for each argument. Note that all steps are counted in
# bytes.
num_steps = num_args + 1 + len(array_strides)
steps = cgutils.alloca_once(
builder, intp_t, size=context.get_constant(
types.intp, num_steps), name="psteps")
# First goes the step size for sched, which is 2 * num_dim
builder.store(context.get_constant(types.intp, 2 * num_dim * sizeof_intp),
steps)
# The steps for all others are 0, except for reduction results.
for i in range(num_args):
# steps are strides from one thread to the next
stepsize = zero
dst = builder.gep(steps, [context.get_constant(types.intp, 1 + i)])
builder.store(stepsize, dst)
for j in range(len(array_strides)):
dst = builder.gep(
steps, [
context.get_constant(
types.intp, 1 + num_args + j)])
builder.store(array_strides[j], dst)
# ----------------------------------------------------------------------------
# prepare data
data = cgutils.get_null_value(byte_ptr_t)
fnty = llvmlite.ir.FunctionType(llvmlite.ir.VoidType(),
[byte_ptr_ptr_t, intp_ptr_t,
intp_ptr_t, byte_ptr_t])
fn = cgutils.get_or_insert_function(builder.module, fnty, wrapper_name)
context.active_code_library.add_linking_library(info.library)
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, "before calling kernel %p\n", fn)
builder.call(fn, [args, shapes, steps, data])
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, "after calling kernel %p\n", fn)
builder.call(set_chunksize, [current_chunksize])
for k, v in rv_to_arg_dict.items():
arg, rv_arg = v
only_elem_ptr = builder.gep(rv_arg, [context.get_constant(types.intp, 0)])
builder.store(builder.load(only_elem_ptr), lowerer.getvar(k))
context.active_code_library.add_linking_library(cres.library)
Update numba/parfors/parfor_lowering.py
Co-authored-by: stuartarchibald <bea6903cc29f240e46cd18ca8cb3cffc8245594f@users.noreply.github.com>
import copy
import operator
import types as pytypes
import operator
import warnings
from dataclasses import make_dataclass
import llvmlite.ir
import numpy as np
import numba
from numba.parfors import parfor
from numba.core import types, ir, config, compiler, sigutils, cgutils
from numba.core.ir_utils import (
add_offset_to_labels,
replace_var_names,
remove_dels,
legalize_names,
mk_unique_var,
rename_labels,
get_name_var_table,
visit_vars_inner,
get_definition,
guard,
get_call_table,
is_pure,
get_np_ufunc_typ,
get_unused_var_name,
is_const_call,
fixup_var_define_in_scope,
transfer_scope,
find_max_label,
get_global_func_typ,
)
from numba.core.typing import signature
from numba.parfors.parfor import ensure_parallel_support
from numba.core.errors import (
NumbaParallelSafetyWarning, NotDefinedError, CompilerError, InternalError,
)
from numba.parfors.parfor_lowering_utils import ParforLoweringBuilder
def _lower_parfor_parallel(lowerer, parfor):
"""Lowerer that handles LLVM code generation for parfor.
This function lowers a parfor IR node to LLVM.
The general approach is as follows:
1) The code from the parfor's init block is lowered normally
in the context of the current function.
2) The body of the parfor is transformed into a gufunc function.
3) Code is inserted into the main function that calls do_scheduling
to divide the iteration space for each thread, allocates
reduction arrays, calls the gufunc function, and then invokes
the reduction function across the reduction arrays to produce
the final reduction values.
"""
from numba.np.ufunc.parallel import get_thread_count
ensure_parallel_support()
typingctx = lowerer.context.typing_context
targetctx = lowerer.context
builder = lowerer.builder
# We copy the typemap here because for race condition variable we'll
# update their type to array so they can be updated by the gufunc.
orig_typemap = lowerer.fndesc.typemap
# replace original typemap with copy and restore the original at the end.
lowerer.fndesc.typemap = copy.copy(orig_typemap)
if config.DEBUG_ARRAY_OPT:
print("lowerer.fndesc", lowerer.fndesc, type(lowerer.fndesc))
typemap = lowerer.fndesc.typemap
varmap = lowerer.varmap
if config.DEBUG_ARRAY_OPT:
print("_lower_parfor_parallel")
parfor.dump()
loc = parfor.init_block.loc
scope = parfor.init_block.scope
# produce instructions for init_block
if config.DEBUG_ARRAY_OPT:
print("init_block = ", parfor.init_block, " ", type(parfor.init_block))
for instr in parfor.init_block.body:
if config.DEBUG_ARRAY_OPT:
print("lower init_block instr = ", instr)
lowerer.lower_inst(instr)
for racevar in parfor.races:
if racevar not in varmap:
rvtyp = typemap[racevar]
rv = ir.Var(scope, racevar, loc)
lowerer._alloca_var(rv.name, rvtyp)
alias_map = {}
arg_aliases = {}
numba.parfors.parfor.find_potential_aliases_parfor(parfor, parfor.params, typemap,
lowerer.func_ir, alias_map, arg_aliases)
if config.DEBUG_ARRAY_OPT:
print("alias_map", alias_map)
print("arg_aliases", arg_aliases)
# run get_parfor_outputs() and get_parfor_reductions() before gufunc creation
# since Jumps are modified so CFG of loop_body dict will become invalid
assert parfor.params is not None
parfor_output_arrays = numba.parfors.parfor.get_parfor_outputs(
parfor, parfor.params)
parfor_redvars, parfor_reddict = parfor.redvars, parfor.reddict
if config.DEBUG_ARRAY_OPT:
print("parfor_redvars:", parfor_redvars)
print("parfor_reddict:", parfor_reddict)
# init reduction array allocation here.
nredvars = len(parfor_redvars)
redarrs = {}
if nredvars > 0:
# reduction arrays outer dimension equal to thread count
scope = parfor.init_block.scope
loc = parfor.init_block.loc
pfbdr = ParforLoweringBuilder(lowerer=lowerer, scope=scope, loc=loc)
# Get the Numba internal function to call to get the thread count.
get_num_threads = pfbdr.bind_global_function(
fobj=numba.np.ufunc.parallel._iget_num_threads,
ftype=get_global_func_typ(numba.np.ufunc.parallel._iget_num_threads),
args=()
)
# Insert the call to assign the thread count to a variable.
num_threads_var = pfbdr.assign(
rhs=pfbdr.call(get_num_threads, args=[]),
typ=types.intp,
name="num_threads_var")
# For each reduction variable...
for i in range(nredvars):
red_name = parfor_redvars[i]
# Get the type of the reduction variable.
redvar_typ = lowerer.fndesc.typemap[red_name]
# Get the ir.Var for the reduction variable.
redvar = ir.Var(scope, red_name, loc)
# Get the type of the array that holds the per-thread
# reduction variables.
redarrvar_typ = redtyp_to_redarraytype(redvar_typ)
reddtype = redarrvar_typ.dtype
if config.DEBUG_ARRAY_OPT:
print(
"reduction_info",
red_name,
redvar_typ,
redarrvar_typ,
reddtype,
types.DType(reddtype),
num_threads_var,
type(num_threads_var)
)
# If this is reduction over an array,
# the reduction array has just one added per-worker dimension.
if isinstance(redvar_typ, types.npytypes.Array):
redarrdim = redvar_typ.ndim + 1
else:
redarrdim = 1
# Reduction array is created and initialized to the initial reduction value.
# First create a var for the numpy empty ufunc.
glbl_np_empty = pfbdr.bind_global_function(
fobj=np.empty,
ftype=get_np_ufunc_typ(np.empty),
args=(
types.UniTuple(types.intp, redarrdim),
),
kws={'dtype': types.DType(reddtype)}
)
# Create var for outer dimension size of reduction array equal to number of threads.
#num_threads_var = pfbdr.make_const_variable(
# cval=thread_count,
# typ=types.intp,
# name='num_threads',
#)
size_var_list = [num_threads_var]
# If this is a reduction over an array...
if isinstance(redvar_typ, types.npytypes.Array):
# Add code to get the shape of the array being reduced over.
redshape_var = pfbdr.assign(
rhs=ir.Expr.getattr(redvar, "shape", loc),
typ=types.UniTuple(types.intp, redvar_typ.ndim),
name="redarr_shape",
)
# Add the dimension sizes of the array being reduced over to the tuple of sizes pass to empty.
for j in range(redvar_typ.ndim):
onedimvar = pfbdr.assign(
rhs=ir.Expr.static_getitem(redshape_var, j, None, loc),
typ=types.intp,
name="redshapeonedim",
)
size_var_list.append(onedimvar)
# Empty call takes tuple of sizes. Create here and fill in outer dimension (num threads).
size_var = pfbdr.make_tuple_variable(
size_var_list, name='tuple_size_var',
)
# Resolve dtype
cval = pfbdr._typingctx.resolve_value_type(reddtype)
dt = pfbdr.make_const_variable(cval=cval, typ=types.DType(reddtype))
# Add call to empty passing the size var tuple.
empty_call = pfbdr.call(glbl_np_empty, args=[size_var, dt])
redarr_var = pfbdr.assign(
rhs=empty_call, typ=redarrvar_typ, name="redarr",
)
# Remember mapping of original reduction array to the newly created per-worker reduction array.
redarrs[redvar.name] = redarr_var
init_val = parfor_reddict[red_name].init_val
if init_val is not None:
if isinstance(redvar_typ, types.npytypes.Array):
# Create an array of identity values for the reduction.
# First, create a variable for np.full.
full_func_node = pfbdr.bind_global_function(
fobj=np.full,
ftype=get_np_ufunc_typ(np.full),
args=(
types.UniTuple(types.intp, redvar_typ.ndim),
reddtype,
),
kws={'dtype': types.DType(reddtype)},
)
# Then create a var with the identify value.
init_val_var = pfbdr.make_const_variable(
cval=init_val,
typ=reddtype,
name="init_val",
)
# Then, call np.full with the shape of the reduction array and the identity value.
full_call = pfbdr.call(
full_func_node, args=[redshape_var, init_val_var, dt],
)
redtoset = pfbdr.assign(
rhs=full_call,
typ=redvar_typ,
name="redtoset",
)
else:
redtoset = pfbdr.make_const_variable(
cval=init_val,
typ=reddtype,
name="redtoset",
)
else:
redtoset = redvar
if config.DEBUG_ARRAY_OPT_RUNTIME:
res_print_str = "res_print1 for redvar " + str(redvar) + ":"
strconsttyp = types.StringLiteral(res_print_str)
lhs = pfbdr.make_const_variable(
cval=res_print_str,
typ=strconsttyp,
name="str_const",
)
res_print = ir.Print(args=[lhs, redvar],
vararg=None, loc=loc)
lowerer.fndesc.calltypes[res_print] = signature(types.none,
typemap[lhs.name],
typemap[redvar.name])
print("res_print_redvar", res_print)
lowerer.lower_inst(res_print)
# For each thread, initialize the per-worker reduction array to
# the current reduction array value.
# Get the Numba type of the variable that holds the thread count.
num_thread_type = typemap[num_threads_var.name]
# Get the LLVM type of the thread count variable.
ntllvm_type = targetctx.get_value_type(num_thread_type)
# Create a LLVM variable to hold the loop index.
alloc_loop_var = cgutils.alloca_once(builder, ntllvm_type)
# Associate this LLVM variable to a Numba IR variable so that
# we can use setitem IR builder.
# Create a Numba IR variable.
numba_ir_loop_index_var = ir.Var(scope,
mk_unique_var("loop_index"), loc)
# Give that variable the right type.
typemap[numba_ir_loop_index_var.name] = num_thread_type
# Associate this Numba variable to the LLVM variable in the
# lowerer's varmap.
lowerer.varmap[numba_ir_loop_index_var.name] = alloc_loop_var
# Insert a loop into the outputed LLVM that goes from 0 to
# the current thread count.
with cgutils.for_range(builder, lowerer.loadvar(num_threads_var.name), intp=ntllvm_type) as loop:
# Store the loop index into the alloca'd LLVM loop index variable.
builder.store(loop.index, alloc_loop_var)
# Initialize one element of the reduction array using the Numba
# IR variable associated with this loop's index.
pfbdr.setitem(obj=redarr_var, index=numba_ir_loop_index_var, val=redtoset)
# compile parfor body as a separate function to be used with GUFuncWrapper
flags = parfor.flags.copy()
flags.error_model = "numpy"
# Can't get here unless flags.auto_parallel == ParallelOptions(True)
index_var_typ = typemap[parfor.loop_nests[0].index_variable.name]
# index variables should have the same type, check rest of indices
for l in parfor.loop_nests[1:]:
assert typemap[l.index_variable.name] == index_var_typ
numba.parfors.parfor.sequential_parfor_lowering = True
try:
(func,
func_args,
func_sig,
func_arg_types,
exp_name_to_tuple_var) = _create_gufunc_for_parfor_body(
lowerer, parfor, typemap, typingctx, targetctx, flags, {},
bool(alias_map), index_var_typ, parfor.races)
finally:
numba.parfors.parfor.sequential_parfor_lowering = False
# get the shape signature
func_args = ['sched'] + func_args
num_reductions = len(parfor_redvars)
num_inputs = len(func_args) - len(parfor_output_arrays) - num_reductions
if config.DEBUG_ARRAY_OPT:
print("func_args = ", func_args)
print("num_inputs = ", num_inputs)
print("parfor_outputs = ", parfor_output_arrays)
print("parfor_redvars = ", parfor_redvars)
print("num_reductions = ", num_reductions)
gu_signature = _create_shape_signature(
parfor.get_shape_classes,
num_inputs,
num_reductions,
func_args,
func_sig,
parfor.races,
typemap)
if config.DEBUG_ARRAY_OPT:
print("gu_signature = ", gu_signature)
# call the func in parallel by wrapping it with ParallelGUFuncBuilder
loop_ranges = [(l.start, l.stop, l.step) for l in parfor.loop_nests]
if config.DEBUG_ARRAY_OPT:
print("loop_nests = ", parfor.loop_nests)
print("loop_ranges = ", loop_ranges)
call_parallel_gufunc(
lowerer,
func,
gu_signature,
func_sig,
func_args,
func_arg_types,
loop_ranges,
parfor_redvars,
parfor_reddict,
redarrs,
parfor.init_block,
index_var_typ,
parfor.races,
exp_name_to_tuple_var)
_parfor_lowering_finalize_reduction(
parfor, redarrs, lowerer, parfor_reddict,
)
# Cleanup reduction variable
for v in redarrs.values():
lowerer.lower_inst(ir.Del(v.name, loc=loc))
# Restore the original typemap of the function that was replaced temporarily at the
# Beginning of this function.
lowerer.fndesc.typemap = orig_typemap
if config.DEBUG_ARRAY_OPT:
print("_lower_parfor_parallel done")
_ReductionInfo = make_dataclass(
"_ReductionInfo",
[
"redvar_info",
"redvar_name",
"redvar_typ",
"redarr_var",
"redarr_typ",
"init_val",
],
frozen=True,
)
def _parfor_lowering_finalize_reduction(
parfor,
redarrs,
lowerer,
parfor_reddict,
):
"""Emit code to finalize the reduction from the intermediate values of
each thread.
"""
from numba.np.ufunc.parallel import get_thread_count
thread_count = get_thread_count()
# For each reduction variable
for redvar_name, redarr_var in redarrs.items():
# Pseudo-code for this loop body:
# tmp = redarr[0]
# for i in range(1, thread_count):
# tmp = reduce_op(redarr[i], tmp)
# reduction_result = tmp
redvar_typ = lowerer.fndesc.typemap[redvar_name]
redarr_typ = lowerer.fndesc.typemap[redarr_var.name]
init_val = lowerer.loadvar(redvar_name)
reduce_info = _ReductionInfo(
redvar_info = parfor_reddict[redvar_name],
redvar_name=redvar_name,
redvar_typ=redvar_typ,
redarr_var=redarr_var,
redarr_typ=redarr_typ,
init_val=init_val,
)
# generate code for combining reduction variable with thread output
handler = (_lower_trivial_inplace_binops
if reduce_info.redvar_info.redop is not None
else _lower_non_trivial_reduce)
handler(parfor, lowerer, thread_count, reduce_info)
class ParforsUnexpectedReduceNodeError(InternalError):
def __init__(self, inst):
super().__init__(f"Unknown reduce instruction node: {inst}")
def _lower_trivial_inplace_binops(parfor, lowerer, thread_count, reduce_info):
"""Lower trivial inplace-binop reduction.
"""
for inst in reduce_info.redvar_info.reduce_nodes:
# Var assigns to Var?
if _lower_var_to_var_assign(lowerer, inst):
pass
# Is inplace-binop for the reduction?
elif _is_inplace_binop_and_rhs_is_init(inst, reduce_info.redvar_name):
fn = inst.value.fn
redvar_result = _emit_binop_reduce_call(
fn, lowerer, thread_count, reduce_info,
)
lowerer.storevar(redvar_result, name=inst.target.name)
# Otherwise?
else:
raise ParforsUnexpectedReduceNodeError(inst)
# XXX: This seems like a hack to stop the loop with this condition.
if _fix_redvar_name_ssa_mismatch(parfor, lowerer, inst,
reduce_info.redvar_name):
break
if config.DEBUG_ARRAY_OPT_RUNTIME:
varname = reduce_info.redvar_name
lowerer.print_variable(
f"{parfor.loc}: parfor {fn.__name__} reduction {varname} =",
varname,
)
def _lower_non_trivial_reduce(parfor, lowerer, thread_count, reduce_info):
"""Lower non-trivial reduction such as call to `functools.reduce()`.
"""
ctx = lowerer.context
init_name = f"{reduce_info.redvar_name}#init"
# The init_name variable is not defined at this point.
lowerer.fndesc.typemap.setdefault(init_name, reduce_info.redvar_typ)
# Emit a sequence of the reduction operation for each intermediate result
# of each thread.
for tid in range(thread_count):
for inst in reduce_info.redvar_info.reduce_nodes:
# Var assigns to Var?
if _lower_var_to_var_assign(lowerer, inst):
pass
# The reduction operation?
elif (isinstance(inst, ir.Assign)
and any(var.name == init_name for var in inst.list_vars())):
elem = _emit_getitem_call(
ctx.get_constant(types.intp, tid), lowerer, reduce_info,
)
lowerer.storevar(elem, init_name)
lowerer.lower_inst(inst)
# Otherwise?
else:
raise ParforsUnexpectedReduceNodeError(inst)
# XXX: This seems like a hack to stop the loop with this condition.
if _fix_redvar_name_ssa_mismatch(parfor, lowerer, inst,
reduce_info.redvar_name):
break
if config.DEBUG_ARRAY_OPT_RUNTIME:
varname = reduce_info.redvar_name
lowerer.print_variable(
f"{parfor.loc}: parfor non-trivial reduction {varname} =",
varname,
)
def _lower_var_to_var_assign(lowerer, inst):
"""Lower Var->Var assignment.
Returns True if-and-only-if `inst` is a Var->Var assignment.
"""
if isinstance(inst, ir.Assign) and isinstance(inst.value, ir.Var):
loaded = lowerer.loadvar(inst.value.name)
lowerer.storevar(loaded, name=inst.target.name)
return True
return False
def _emit_getitem_call(idx, lowerer, reduce_info):
"""Emit call to ``redarr_var[idx]``
"""
def reducer_getitem(redarr, index):
return redarr[index]
builder = lowerer.builder
ctx = lowerer.context
redarr_typ = reduce_info.redarr_typ
arg_arr = lowerer.loadvar(reduce_info.redarr_var.name)
args = (arg_arr, idx)
sig = signature(reduce_info.redvar_typ, redarr_typ, types.intp)
elem = ctx.compile_internal(builder, reducer_getitem, sig, args)
return elem
def _emit_binop_reduce_call(binop, lowerer, thread_count, reduce_info):
"""Emit call to the ``binop`` for the reduction variable.
"""
def reduction_add(thread_count, redarr, init):
c = init
for i in range(thread_count):
c += redarr[i]
return c
def reduction_mul(thread_count, redarr, init):
c = init
for i in range(thread_count):
c *= redarr[i]
return c
kernel = {
operator.iadd: reduction_add,
operator.isub: reduction_add,
operator.imul: reduction_mul,
operator.ifloordiv: reduction_mul,
operator.itruediv: reduction_mul,
}[binop]
ctx = lowerer.context
builder = lowerer.builder
redarr_typ = reduce_info.redarr_typ
arg_arr = lowerer.loadvar(reduce_info.redarr_var.name)
if config.DEBUG_ARRAY_OPT_RUNTIME:
init_var = reduce_info.redarr_var.scope.get(reduce_info.redvar_name)
res_print = ir.Print(
args=[reduce_info.redarr_var, init_var], vararg=None,
loc=lowerer.loc,
)
typemap = lowerer.fndesc.typemap
lowerer.fndesc.calltypes[res_print] = signature(
types.none, typemap[reduce_info.redarr_var.name],
typemap[init_var.name],
)
lowerer.lower_inst(res_print)
arg_thread_count = ctx.get_constant_generic(
builder, types.uintp, thread_count,
)
args = (arg_thread_count, arg_arr, reduce_info.init_val)
sig = signature(
reduce_info.redvar_typ, types.uintp, redarr_typ, reduce_info.redvar_typ,
)
redvar_result = ctx.compile_internal(builder, kernel, sig, args)
return redvar_result
def _is_inplace_binop_and_rhs_is_init(inst, redvar_name):
"""Is ``inst`` an inplace-binop and the RHS is the reduction init?
"""
if not isinstance(inst, ir.Assign):
return False
rhs = inst.value
if not isinstance(rhs, ir.Expr):
return False
if rhs.op != "inplace_binop":
return False
if rhs.rhs.name != f"{redvar_name}#init":
return False
return True
def _fix_redvar_name_ssa_mismatch(parfor, lowerer, inst, redvar_name):
"""Fix reduction variable name mismatch due to SSA.
"""
# Only process reduction statements post-gufunc execution
# until we see an assignment with a left-hand side to the
# reduction variable's name. This fixes problems with
# cases where there are multiple assignments to the
# reduction variable in the parfor.
scope = parfor.init_block.scope
if isinstance(inst, ir.Assign):
try:
reduction_var = scope.get_exact(redvar_name)
except NotDefinedError:
# Ideally, this shouldn't happen. The redvar name
# missing from scope indicates an error from
# other rewrite passes.
is_same_source_var = redvar_name == inst.target.name
else:
# Because of SSA, the redvar and target var of
# the current assignment would be different even
# though they refer to the same source-level var.
redvar_unver_name = reduction_var.unversioned_name
target_unver_name = inst.target.unversioned_name
is_same_source_var = redvar_unver_name == target_unver_name
if is_same_source_var:
# If redvar is different from target var, add an
# assignment to put target var into redvar.
if redvar_name != inst.target.name:
val = lowerer.loadvar(inst.target.name)
lowerer.storevar(val, name=redvar_name)
return True
return False
def _create_shape_signature(
get_shape_classes,
num_inputs,
num_reductions,
args,
func_sig,
races,
typemap):
'''Create shape signature for GUFunc
'''
if config.DEBUG_ARRAY_OPT:
print("_create_shape_signature", num_inputs, num_reductions, args, races)
for i in args[1:]:
print("argument", i, type(i), get_shape_classes(i, typemap=typemap))
num_inouts = len(args) - num_reductions
# maximum class number for array shapes
classes = [get_shape_classes(var, typemap=typemap) if var not in races else (-1,) for var in args[1:]]
class_set = set()
for _class in classes:
if _class:
for i in _class:
class_set.add(i)
max_class = max(class_set) + 1 if class_set else 0
classes.insert(0, (max_class,)) # force set the class of 'sched' argument
class_set.add(max_class)
thread_num_class = max_class + 1
class_set.add(thread_num_class)
class_map = {}
# TODO: use prefix + class number instead of single char
alphabet = ord('a')
for n in class_set:
if n >= 0:
class_map[n] = chr(alphabet)
alphabet += 1
threadcount_ordinal = chr(alphabet)
alpha_dict = {'latest_alpha' : alphabet}
def bump_alpha(c, class_map):
if c >= 0:
return class_map[c]
else:
alpha_dict['latest_alpha'] += 1
return chr(alpha_dict['latest_alpha'])
gu_sin = []
gu_sout = []
count = 0
syms_sin = ()
if config.DEBUG_ARRAY_OPT:
print("args", args)
print("classes", classes)
print("threadcount_ordinal", threadcount_ordinal)
for cls, arg in zip(classes, args):
count = count + 1
if cls:
dim_syms = tuple(bump_alpha(c, class_map) for c in cls)
else:
dim_syms = ()
if (count > num_inouts):
# Add the threadcount_ordinal to represent the thread count
# to the start of the reduction array.
gu_sin.append(tuple([threadcount_ordinal] + list(dim_syms[1:])))
else:
gu_sin.append(dim_syms)
syms_sin += dim_syms
return (gu_sin, gu_sout)
def _print_block(block):
for i, inst in enumerate(block.body):
print(" ", i, " ", inst)
def _print_body(body_dict):
'''Pretty-print a set of IR blocks.
'''
for label, block in body_dict.items():
print("label: ", label)
_print_block(block)
def wrap_loop_body(loop_body):
blocks = loop_body.copy() # shallow copy is enough
first_label = min(blocks.keys())
last_label = max(blocks.keys())
loc = blocks[last_label].loc
blocks[last_label].body.append(ir.Jump(first_label, loc))
return blocks
def unwrap_loop_body(loop_body):
last_label = max(loop_body.keys())
loop_body[last_label].body = loop_body[last_label].body[:-1]
def add_to_def_once_sets(a_def, def_once, def_more):
'''If the variable is already defined more than once, do nothing.
Else if defined exactly once previously then transition this
variable to the defined more than once set (remove it from
def_once set and add to def_more set).
Else this must be the first time we've seen this variable defined
so add to def_once set.
'''
if a_def in def_more:
pass
elif a_def in def_once:
def_more.add(a_def)
def_once.remove(a_def)
else:
def_once.add(a_def)
def compute_def_once_block(block, def_once, def_more, getattr_taken, typemap, module_assigns):
'''Effect changes to the set of variables defined once or more than once
for a single block.
block - the block to process
def_once - set of variable names known to be defined exactly once
def_more - set of variable names known to be defined more than once
getattr_taken - dict mapping variable name to tuple of object and attribute taken
module_assigns - dict mapping variable name to the Global that they came from
'''
# The only "defs" occur in assignments, so find such instructions.
assignments = block.find_insts(ir.Assign)
# For each assignment...
for one_assign in assignments:
# Get the LHS/target of the assignment.
a_def = one_assign.target.name
# Add variable to def sets.
add_to_def_once_sets(a_def, def_once, def_more)
rhs = one_assign.value
if isinstance(rhs, ir.Global):
# Remember assignments of the form "a = Global(...)"
# Is this a module?
if isinstance(rhs.value, pytypes.ModuleType):
module_assigns[a_def] = rhs.value.__name__
if isinstance(rhs, ir.Expr) and rhs.op == 'getattr' and rhs.value.name in def_once:
# Remember assignments of the form "a = b.c"
getattr_taken[a_def] = (rhs.value.name, rhs.attr)
if isinstance(rhs, ir.Expr) and rhs.op == 'call' and rhs.func.name in getattr_taken:
# If "a" is being called then lookup the getattr definition of "a"
# as above, getting the module variable "b" (base_obj)
# and the attribute "c" (base_attr).
base_obj, base_attr = getattr_taken[rhs.func.name]
if base_obj in module_assigns:
# If we know the definition of the module variable then get the module
# name from module_assigns.
base_mod_name = module_assigns[base_obj]
if not is_const_call(base_mod_name, base_attr):
# Calling a method on an object could modify the object and is thus
# like a def of that object. We call is_const_call to see if this module/attribute
# combination is known to not modify the module state. If we don't know that
# the combination is safe then we have to assume there could be a modification to
# the module and thus add the module variable as defined more than once.
add_to_def_once_sets(base_obj, def_once, def_more)
else:
# Assume the worst and say that base_obj could be modified by the call.
add_to_def_once_sets(base_obj, def_once, def_more)
if isinstance(rhs, ir.Expr) and rhs.op == 'call':
# If a mutable object is passed to a function, then it may be changed and
# therefore can't be hoisted.
# For each argument to the function...
for argvar in rhs.args:
# Get the argument's type.
if isinstance(argvar, ir.Var):
argvar = argvar.name
avtype = typemap[argvar]
# If that type doesn't have a mutable attribute or it does and it's set to
# not mutable then this usage is safe for hoisting.
if getattr(avtype, 'mutable', False):
# Here we have a mutable variable passed to a function so add this variable
# to the def lists.
add_to_def_once_sets(argvar, def_once, def_more)
def compute_def_once_internal(loop_body, def_once, def_more, getattr_taken, typemap, module_assigns):
'''Compute the set of variables defined exactly once in the given set of blocks
and use the given sets for storing which variables are defined once, more than
once and which have had a getattr call on them.
'''
# For each block...
for label, block in loop_body.items():
# Scan this block and effect changes to def_once, def_more, and getattr_taken
# based on the instructions in that block.
compute_def_once_block(block, def_once, def_more, getattr_taken, typemap, module_assigns)
# Have to recursively process parfors manually here.
for inst in block.body:
if isinstance(inst, parfor.Parfor):
# Recursively compute for the parfor's init block.
compute_def_once_block(inst.init_block, def_once, def_more, getattr_taken, typemap, module_assigns)
# Recursively compute for the parfor's loop body.
compute_def_once_internal(inst.loop_body, def_once, def_more, getattr_taken, typemap, module_assigns)
def compute_def_once(loop_body, typemap):
'''Compute the set of variables defined exactly once in the given set of blocks.
'''
def_once = set() # set to hold variables defined exactly once
def_more = set() # set to hold variables defined more than once
getattr_taken = {}
module_assigns = {}
compute_def_once_internal(loop_body, def_once, def_more, getattr_taken, typemap, module_assigns)
return def_once, def_more
def find_vars(var, varset):
assert isinstance(var, ir.Var)
varset.add(var.name)
return var
def _hoist_internal(inst, dep_on_param, call_table, hoisted, not_hoisted,
typemap, stored_arrays):
if inst.target.name in stored_arrays:
not_hoisted.append((inst, "stored array"))
if config.DEBUG_ARRAY_OPT >= 1:
print("Instruction", inst, " could not be hoisted because the created array is stored.")
return False
uses = set()
visit_vars_inner(inst.value, find_vars, uses)
diff = uses.difference(dep_on_param)
if config.DEBUG_ARRAY_OPT >= 1:
print("_hoist_internal:", inst, "uses:", uses, "diff:", diff)
if len(diff) == 0 and is_pure(inst.value, None, call_table):
if config.DEBUG_ARRAY_OPT >= 1:
print("Will hoist instruction", inst, typemap[inst.target.name])
hoisted.append(inst)
if not isinstance(typemap[inst.target.name], types.npytypes.Array):
dep_on_param += [inst.target.name]
return True
else:
if len(diff) > 0:
not_hoisted.append((inst, "dependency"))
if config.DEBUG_ARRAY_OPT >= 1:
print("Instruction", inst, " could not be hoisted because of a dependency.")
else:
not_hoisted.append((inst, "not pure"))
if config.DEBUG_ARRAY_OPT >= 1:
print("Instruction", inst, " could not be hoisted because it isn't pure.")
return False
def find_setitems_block(setitems, itemsset, block, typemap):
for inst in block.body:
if isinstance(inst, (ir.StaticSetItem, ir.SetItem)):
setitems.add(inst.target.name)
# If we store a non-mutable object into an array then that is safe to hoist.
# If the stored object is mutable and you hoist then multiple entries in the
# outer array could reference the same object and changing one index would then
# change other indices.
if getattr(typemap[inst.value.name], "mutable", False):
itemsset.add(inst.value.name)
elif isinstance(inst, parfor.Parfor):
find_setitems_block(setitems, itemsset, inst.init_block, typemap)
find_setitems_body(setitems, itemsset, inst.loop_body, typemap)
def find_setitems_body(setitems, itemsset, loop_body, typemap):
"""
Find the arrays that are written into (goes into setitems) and the
mutable objects (mostly arrays) that are written into other arrays
(goes into itemsset).
"""
for label, block in loop_body.items():
find_setitems_block(setitems, itemsset, block, typemap)
def empty_container_allocator_hoist(inst, dep_on_param, call_table, hoisted,
not_hoisted, typemap, stored_arrays):
if (isinstance(inst, ir.Assign) and
isinstance(inst.value, ir.Expr) and
inst.value.op == 'call' and
inst.value.func.name in call_table):
call_list = call_table[inst.value.func.name]
if call_list == ['empty', np]:
return _hoist_internal(inst, dep_on_param, call_table, hoisted,
not_hoisted, typemap, stored_arrays)
return False
def hoist(parfor_params, loop_body, typemap, wrapped_blocks):
dep_on_param = copy.copy(parfor_params)
hoisted = []
not_hoisted = []
# Compute the set of variable defined exactly once in the loop body.
def_once, def_more = compute_def_once(loop_body, typemap)
(call_table, reverse_call_table) = get_call_table(wrapped_blocks)
setitems = set()
itemsset = set()
find_setitems_body(setitems, itemsset, loop_body, typemap)
dep_on_param = list(set(dep_on_param).difference(setitems))
if config.DEBUG_ARRAY_OPT >= 1:
print("hoist - def_once:", def_once, "setitems:", setitems, "itemsset:", itemsset, "dep_on_param:", dep_on_param, "parfor_params:", parfor_params)
for si in setitems:
add_to_def_once_sets(si, def_once, def_more)
for label, block in loop_body.items():
new_block = []
for inst in block.body:
if empty_container_allocator_hoist(inst, dep_on_param, call_table,
hoisted, not_hoisted, typemap, itemsset):
continue
elif isinstance(inst, ir.Assign) and inst.target.name in def_once:
if _hoist_internal(inst, dep_on_param, call_table,
hoisted, not_hoisted, typemap, itemsset):
# don't add this instruction to the block since it is
# hoisted
continue
elif isinstance(inst, parfor.Parfor):
new_init_block = []
if config.DEBUG_ARRAY_OPT >= 1:
print("parfor")
inst.dump()
for ib_inst in inst.init_block.body:
if empty_container_allocator_hoist(ib_inst, dep_on_param,
call_table, hoisted, not_hoisted, typemap, itemsset):
continue
elif (isinstance(ib_inst, ir.Assign) and
ib_inst.target.name in def_once):
if _hoist_internal(ib_inst, dep_on_param, call_table,
hoisted, not_hoisted, typemap, itemsset):
# don't add this instruction to the block since it is hoisted
continue
new_init_block.append(ib_inst)
inst.init_block.body = new_init_block
new_block.append(inst)
block.body = new_block
return hoisted, not_hoisted
def redtyp_is_scalar(redtype):
return not isinstance(redtype, types.npytypes.Array)
def redtyp_to_redarraytype(redtyp):
"""Go from a reducation variable type to a reduction array type used to hold
per-worker results.
"""
redarrdim = 1
# If the reduction type is an array then allocate reduction array with ndim+1 dimensions.
if isinstance(redtyp, types.npytypes.Array):
redarrdim += redtyp.ndim
# We don't create array of array but multi-dimensional reduction array with same dtype.
redtyp = redtyp.dtype
return types.npytypes.Array(redtyp, redarrdim, "C")
def redarraytype_to_sig(redarraytyp):
"""Given a reduction array type, find the type of the reduction argument to the gufunc.
"""
assert isinstance(redarraytyp, types.npytypes.Array)
return types.npytypes.Array(redarraytyp.dtype, redarraytyp.ndim, redarraytyp.layout)
def legalize_names_with_typemap(names, typemap):
""" We use ir_utils.legalize_names to replace internal IR variable names
containing illegal characters (e.g. period) with a legal character
(underscore) so as to create legal variable names.
The original variable names are in the typemap so we also
need to add the legalized name to the typemap as well.
"""
outdict = legalize_names(names)
# For each pair in the dict of legalized names...
for x, y in outdict.items():
# If the name had some legalization change to it...
if x != y:
# Set the type of the new name the same as the type of the old name.
typemap[y] = typemap[x]
return outdict
def to_scalar_from_0d(x):
if isinstance(x, types.ArrayCompatible):
if x.ndim == 0:
return x.dtype
return x
def _create_gufunc_for_parfor_body(
lowerer,
parfor,
typemap,
typingctx,
targetctx,
flags,
locals,
has_aliases,
index_var_typ,
races):
'''
Takes a parfor and creates a gufunc function for its body.
There are two parts to this function.
1) Code to iterate across the iteration space as defined by the schedule.
2) The parfor body that does the work for a single point in the iteration space.
Part 1 is created as Python text for simplicity with a sentinel assignment to mark the point
in the IR where the parfor body should be added.
This Python text is 'exec'ed into existence and its IR retrieved with run_frontend.
The IR is scanned for the sentinel assignment where that basic block is split and the IR
for the parfor body inserted.
'''
if config.DEBUG_ARRAY_OPT >= 1:
print("starting _create_gufunc_for_parfor_body")
loc = parfor.init_block.loc
# The parfor body and the main function body share ir.Var nodes.
# We have to do some replacements of Var names in the parfor body to make them
# legal parameter names. If we don't copy then the Vars in the main function also
# would incorrectly change their name.
loop_body = copy.copy(parfor.loop_body)
remove_dels(loop_body)
parfor_dim = len(parfor.loop_nests)
loop_indices = [l.index_variable.name for l in parfor.loop_nests]
# Get all the parfor params.
parfor_params = parfor.params
# Get just the outputs of the parfor.
parfor_outputs = numba.parfors.parfor.get_parfor_outputs(parfor, parfor_params)
# Get all parfor reduction vars, and operators.
typemap = lowerer.fndesc.typemap
parfor_redvars, parfor_reddict = numba.parfors.parfor.get_parfor_reductions(
lowerer.func_ir, parfor, parfor_params, lowerer.fndesc.calltypes)
# Compute just the parfor inputs as a set difference.
parfor_inputs = sorted(
list(
set(parfor_params) -
set(parfor_outputs) -
set(parfor_redvars)))
if config.DEBUG_ARRAY_OPT >= 1:
print("parfor_params = ", parfor_params, " ", type(parfor_params))
print("parfor_outputs = ", parfor_outputs, " ", type(parfor_outputs))
print("parfor_inputs = ", parfor_inputs, " ", type(parfor_inputs))
print("parfor_redvars = ", parfor_redvars, " ", type(parfor_redvars))
# -------------------------------------------------------------------------
# Convert tuples to individual parameters.
tuple_expanded_parfor_inputs = []
tuple_var_to_expanded_names = {}
expanded_name_to_tuple_var = {}
next_expanded_tuple_var = 0
parfor_tuple_params = []
# For each input to the parfor.
for pi in parfor_inputs:
# Get the type of the input.
pi_type = typemap[pi]
# If it is a UniTuple or Tuple we will do the conversion.
if isinstance(pi_type, types.UniTuple) or isinstance(pi_type, types.NamedUniTuple):
# Get the size and dtype of the tuple.
tuple_count = pi_type.count
tuple_dtype = pi_type.dtype
# Only do tuples up to config.PARFOR_MAX_TUPLE_SIZE length.
assert(tuple_count <= config.PARFOR_MAX_TUPLE_SIZE)
this_var_expansion = []
for i in range(tuple_count):
# Generate a new name for the individual part of the tuple var.
expanded_name = "expanded_tuple_var_" + str(next_expanded_tuple_var)
# Add that name to the new list of inputs to the gufunc.
tuple_expanded_parfor_inputs.append(expanded_name)
this_var_expansion.append(expanded_name)
# Remember a mapping from new param name to original tuple
# var and the index within the tuple.
expanded_name_to_tuple_var[expanded_name] = (pi, i)
next_expanded_tuple_var += 1
# Set the type of the new parameter.
typemap[expanded_name] = tuple_dtype
# Remember a mapping from the original tuple var to the
# individual parts.
tuple_var_to_expanded_names[pi] = this_var_expansion
parfor_tuple_params.append(pi)
elif isinstance(pi_type, types.Tuple) or isinstance(pi_type, types.NamedTuple):
# This is the same as above for UniTuple except that each part of
# the tuple can have a different type and we fetch that type with
# pi_type.types[offset].
tuple_count = pi_type.count
tuple_types = pi_type.types
# Only do tuples up to config.PARFOR_MAX_TUPLE_SIZE length.
assert(tuple_count <= config.PARFOR_MAX_TUPLE_SIZE)
this_var_expansion = []
for i in range(tuple_count):
expanded_name = "expanded_tuple_var_" + str(next_expanded_tuple_var)
tuple_expanded_parfor_inputs.append(expanded_name)
this_var_expansion.append(expanded_name)
expanded_name_to_tuple_var[expanded_name] = (pi, i)
next_expanded_tuple_var += 1
typemap[expanded_name] = tuple_types[i]
tuple_var_to_expanded_names[pi] = this_var_expansion
parfor_tuple_params.append(pi)
else:
tuple_expanded_parfor_inputs.append(pi)
parfor_inputs = tuple_expanded_parfor_inputs
if config.DEBUG_ARRAY_OPT >= 1:
print("parfor_inputs post tuple handling = ", parfor_inputs, " ", type(parfor_inputs))
# -------------------------------------------------------------------------
races = races.difference(set(parfor_redvars))
for race in races:
msg = ("Variable %s used in parallel loop may be written "
"to simultaneously by multiple workers and may result "
"in non-deterministic or unintended results." % race)
warnings.warn(NumbaParallelSafetyWarning(msg, loc))
replace_var_with_array(races, loop_body, typemap, lowerer.fndesc.calltypes)
# Reduction variables are represented as arrays, so they go under
# different names.
parfor_redarrs = []
parfor_red_arg_types = []
for var in parfor_redvars:
arr = var + "_arr"
parfor_redarrs.append(arr)
redarraytype = redtyp_to_redarraytype(typemap[var])
parfor_red_arg_types.append(redarraytype)
redarrsig = redarraytype_to_sig(redarraytype)
if arr in typemap:
assert(typemap[arr] == redarrsig)
else:
typemap[arr] = redarrsig
# Reorder all the params so that inputs go first then outputs.
parfor_params = parfor_inputs + parfor_outputs + parfor_redarrs
if config.DEBUG_ARRAY_OPT >= 1:
print("parfor_params = ", parfor_params, " ", type(parfor_params))
print("loop_indices = ", loop_indices, " ", type(loop_indices))
print("loop_body = ", loop_body, " ", type(loop_body))
_print_body(loop_body)
# Some Var are not legal parameter names so create a dict of potentially illegal
# param name to guaranteed legal name.
param_dict = legalize_names_with_typemap(parfor_params + parfor_redvars + parfor_tuple_params, typemap)
if config.DEBUG_ARRAY_OPT >= 1:
print(
"param_dict = ",
sorted(
param_dict.items()),
" ",
type(param_dict))
# Some loop_indices are not legal parameter names so create a dict of potentially illegal
# loop index to guaranteed legal name.
ind_dict = legalize_names_with_typemap(loop_indices, typemap)
# Compute a new list of legal loop index names.
legal_loop_indices = [ind_dict[v] for v in loop_indices]
if config.DEBUG_ARRAY_OPT >= 1:
print("ind_dict = ", sorted(ind_dict.items()), " ", type(ind_dict))
print(
"legal_loop_indices = ",
legal_loop_indices,
" ",
type(legal_loop_indices))
for pd in parfor_params:
print("pd = ", pd)
print("pd type = ", typemap[pd], " ", type(typemap[pd]))
# Get the types of each parameter.
param_types = [to_scalar_from_0d(typemap[v]) for v in parfor_params]
# Calculate types of args passed to gufunc.
func_arg_types = [typemap[v] for v in (parfor_inputs + parfor_outputs)] + parfor_red_arg_types
if config.DEBUG_ARRAY_OPT >= 1:
print("new param_types:", param_types)
print("new func_arg_types:", func_arg_types)
# Replace illegal parameter names in the loop body with legal ones.
replace_var_names(loop_body, param_dict)
# remember the name before legalizing as the actual arguments
parfor_args = parfor_params
# Change parfor_params to be legal names.
parfor_params = [param_dict[v] for v in parfor_params]
parfor_params_orig = parfor_params
parfor_params = []
ascontig = False
for pindex in range(len(parfor_params_orig)):
if (ascontig and
pindex < len(parfor_inputs) and
isinstance(param_types[pindex], types.npytypes.Array)):
parfor_params.append(parfor_params_orig[pindex]+"param")
else:
parfor_params.append(parfor_params_orig[pindex])
# Change parfor body to replace illegal loop index vars with legal ones.
replace_var_names(loop_body, ind_dict)
loop_body_var_table = get_name_var_table(loop_body)
sentinel_name = get_unused_var_name("__sentinel__", loop_body_var_table)
if config.DEBUG_ARRAY_OPT >= 1:
print(
"legal parfor_params = ",
parfor_params,
" ",
type(parfor_params))
# Determine the unique names of the scheduling and gufunc functions.
# sched_func_name = "__numba_parfor_sched_%s" % (hex(hash(parfor)).replace("-", "_"))
gufunc_name = "__numba_parfor_gufunc_%s" % (
hex(hash(parfor)).replace("-", "_"))
if config.DEBUG_ARRAY_OPT:
# print("sched_func_name ", type(sched_func_name), " ", sched_func_name)
print("gufunc_name ", type(gufunc_name), " ", gufunc_name)
gufunc_txt = ""
# Create the gufunc function.
gufunc_txt += "def " + gufunc_name + \
"(sched, " + (", ".join(parfor_params)) + "):\n"
globls = {"np": np, "numba": numba}
# First thing in the gufunc, we reconstruct tuples from their
# individual parts, e.g., orig_tup_name = (part1, part2,).
# The rest of the code of the function will use the original tuple name.
for tup_var, exp_names in tuple_var_to_expanded_names.items():
tup_type = typemap[tup_var]
gufunc_txt += " " + param_dict[tup_var]
# Determine if the tuple is a named tuple.
if (isinstance(tup_type, types.NamedTuple) or
isinstance(tup_type, types.NamedUniTuple)):
named_tup = True
else:
named_tup = False
if named_tup:
# It is a named tuple so try to find the global that defines the
# named tuple.
func_def = guard(get_definition, lowerer.func_ir, tup_var)
named_tuple_def = None
if config.DEBUG_ARRAY_OPT:
print("func_def:", func_def, type(func_def))
if func_def is not None:
if (isinstance(func_def, ir.Expr) and
func_def.op == 'call'):
named_tuple_def = guard(get_definition, lowerer.func_ir, func_def.func)
if config.DEBUG_ARRAY_OPT:
print("named_tuple_def:", named_tuple_def, type(named_tuple_def))
elif isinstance(func_def, ir.Arg):
named_tuple_def = typemap[func_def.name]
if config.DEBUG_ARRAY_OPT:
print("named_tuple_def:", named_tuple_def,
type(named_tuple_def), named_tuple_def.name)
if named_tuple_def is not None:
if (isinstance(named_tuple_def, ir.Global) or
isinstance(named_tuple_def, ir.FreeVar)):
gval = named_tuple_def.value
if config.DEBUG_ARRAY_OPT:
print("gval:", gval, type(gval))
globls[named_tuple_def.name] = gval
elif isinstance(named_tuple_def, types.containers.BaseNamedTuple):
named_tuple_name = named_tuple_def.name.split('(')[0]
if config.DEBUG_ARRAY_OPT:
print("name:", named_tuple_name,
named_tuple_def.instance_class,
type(named_tuple_def.instance_class))
globls[named_tuple_name] = named_tuple_def.instance_class
else:
if config.DEBUG_ARRAY_OPT:
print("Didn't find definition of namedtuple for globls.")
raise CompilerError("Could not find definition of " + str(tup_var),
tup_var.loc)
gufunc_txt += " = " + tup_type.instance_class.__name__ + "("
for name, field_name in zip(exp_names, tup_type.fields):
gufunc_txt += field_name + "=" + param_dict[name] + ","
else:
# Just a regular tuple so use (part0, part1, ...)
gufunc_txt += " = (" + ", ".join([param_dict[x] for x in exp_names])
if len(exp_names) == 1:
# Add comma for tuples with singular values. We can't unilaterally
# add a comma alway because (,) isn't valid.
gufunc_txt += ","
gufunc_txt += ")\n"
for pindex in range(len(parfor_inputs)):
if ascontig and isinstance(param_types[pindex], types.npytypes.Array):
gufunc_txt += (" " + parfor_params_orig[pindex]
+ " = np.ascontiguousarray(" + parfor_params[pindex] + ")\n")
gufunc_thread_id_var = "ParallelAcceleratorGufuncThreadId"
if len(parfor_redarrs) > 0:
gufunc_txt += " " + gufunc_thread_id_var + " = "
gufunc_txt += "numba.np.ufunc.parallel._iget_thread_id()\n"
# Add initialization of reduction variables
for arr, var in zip(parfor_redarrs, parfor_redvars):
gufunc_txt += " " + param_dict[var] + \
"=" + param_dict[arr] + "[" + gufunc_thread_id_var + "]\n"
if config.DEBUG_ARRAY_OPT_RUNTIME:
gufunc_txt += " print(\"thread id =\", ParallelAcceleratorGufuncThreadId)\n"
gufunc_txt += " print(\"initial reduction value\",ParallelAcceleratorGufuncThreadId," + param_dict[var] + "," + param_dict[var] + ".shape)\n"
gufunc_txt += " print(\"reduction array\",ParallelAcceleratorGufuncThreadId," + param_dict[arr] + "," + param_dict[arr] + ".shape)\n"
# For each dimension of the parfor, create a for loop in the generated gufunc function.
# Iterate across the proper values extracted from the schedule.
# The form of the schedule is start_dim0, start_dim1, ..., start_dimN, end_dim0,
# end_dim1, ..., end_dimN
for eachdim in range(parfor_dim):
for indent in range(eachdim + 1):
gufunc_txt += " "
sched_dim = eachdim
gufunc_txt += ("for " +
legal_loop_indices[eachdim] +
" in range(sched[" +
str(sched_dim) +
"], sched[" +
str(sched_dim +
parfor_dim) +
"] + np.uint8(1)):\n")
if config.DEBUG_ARRAY_OPT_RUNTIME:
for indent in range(parfor_dim + 1):
gufunc_txt += " "
gufunc_txt += "print("
for eachdim in range(parfor_dim):
gufunc_txt += "\"" + legal_loop_indices[eachdim] + "\"," + legal_loop_indices[eachdim] + ","
gufunc_txt += ")\n"
# Add the sentinel assignment so that we can find the loop body position
# in the IR.
for indent in range(parfor_dim + 1):
gufunc_txt += " "
gufunc_txt += sentinel_name + " = 0\n"
# Add assignments of reduction variables (for returning the value)
for arr, var in zip(parfor_redarrs, parfor_redvars):
if config.DEBUG_ARRAY_OPT_RUNTIME:
gufunc_txt += " print(\"final reduction value\",ParallelAcceleratorGufuncThreadId," + param_dict[var] + ")\n"
gufunc_txt += " print(\"final reduction array\",ParallelAcceleratorGufuncThreadId," + param_dict[arr] + ")\n"
# After the gufunc loops, copy the accumulated temp value back to reduction array.
gufunc_txt += " " + param_dict[arr] + \
"[" + gufunc_thread_id_var + "] = " + param_dict[var] + "\n"
gufunc_txt += " return None\n"
if config.DEBUG_ARRAY_OPT:
print("gufunc_txt = ", type(gufunc_txt), "\n", gufunc_txt)
print("globls:", globls, type(globls))
# Force gufunc outline into existence.
locls = {}
exec(gufunc_txt, globls, locls)
gufunc_func = locls[gufunc_name]
if config.DEBUG_ARRAY_OPT:
print("gufunc_func = ", type(gufunc_func), "\n", gufunc_func)
# Get the IR for the gufunc outline.
gufunc_ir = compiler.run_frontend(gufunc_func)
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir dump ", type(gufunc_ir))
gufunc_ir.dump()
print("loop_body dump ", type(loop_body))
_print_body(loop_body)
# rename all variables in gufunc_ir afresh
var_table = get_name_var_table(gufunc_ir.blocks)
new_var_dict = {}
reserved_names = [sentinel_name] + \
list(param_dict.values()) + legal_loop_indices
for name, var in var_table.items():
if not (name in reserved_names):
new_var_dict[name] = mk_unique_var(name)
replace_var_names(gufunc_ir.blocks, new_var_dict)
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir dump after renaming ")
gufunc_ir.dump()
gufunc_param_types = [types.npytypes.Array(
index_var_typ, 1, "C")] + param_types
if config.DEBUG_ARRAY_OPT:
print(
"gufunc_param_types = ",
type(gufunc_param_types),
"\n",
gufunc_param_types)
gufunc_stub_last_label = find_max_label(gufunc_ir.blocks) + 1
# Add gufunc stub last label to each parfor.loop_body label to prevent
# label conflicts.
loop_body = add_offset_to_labels(loop_body, gufunc_stub_last_label)
# new label for splitting sentinel block
new_label = find_max_label(loop_body) + 1
# If enabled, add a print statement after every assignment.
if config.DEBUG_ARRAY_OPT_RUNTIME:
for label, block in loop_body.items():
new_block = block.copy()
new_block.clear()
loc = block.loc
scope = block.scope
for inst in block.body:
new_block.append(inst)
# Append print after assignment
if isinstance(inst, ir.Assign):
# Only apply to numbers
if typemap[inst.target.name] not in types.number_domain:
continue
# Make constant string
strval = "{} =".format(inst.target.name)
strconsttyp = types.StringLiteral(strval)
lhs = ir.Var(scope, mk_unique_var("str_const"), loc)
assign_lhs = ir.Assign(value=ir.Const(value=strval, loc=loc),
target=lhs, loc=loc)
typemap[lhs.name] = strconsttyp
new_block.append(assign_lhs)
# Make print node
print_node = ir.Print(args=[lhs, inst.target], vararg=None, loc=loc)
new_block.append(print_node)
sig = numba.core.typing.signature(types.none,
typemap[lhs.name],
typemap[inst.target.name])
lowerer.fndesc.calltypes[print_node] = sig
loop_body[label] = new_block
if config.DEBUG_ARRAY_OPT:
print("parfor loop body")
_print_body(loop_body)
wrapped_blocks = wrap_loop_body(loop_body)
hoisted, not_hoisted = hoist(parfor_params, loop_body, typemap, wrapped_blocks)
start_block = gufunc_ir.blocks[min(gufunc_ir.blocks.keys())]
start_block.body = start_block.body[:-1] + hoisted + [start_block.body[-1]]
unwrap_loop_body(loop_body)
# store hoisted into diagnostics
diagnostics = lowerer.metadata['parfor_diagnostics']
diagnostics.hoist_info[parfor.id] = {'hoisted': hoisted,
'not_hoisted': not_hoisted}
if config.DEBUG_ARRAY_OPT:
print("After hoisting")
_print_body(loop_body)
# Search all the block in the gufunc outline for the sentinel assignment.
for label, block in gufunc_ir.blocks.items():
for i, inst in enumerate(block.body):
if isinstance(
inst,
ir.Assign) and inst.target.name == sentinel_name:
# We found the sentinel assignment.
loc = inst.loc
scope = block.scope
# split block across __sentinel__
# A new block is allocated for the statements prior to the sentinel
# but the new block maintains the current block label.
prev_block = ir.Block(scope, loc)
prev_block.body = block.body[:i]
# The current block is used for statements after the sentinel.
block.body = block.body[i + 1:]
# But the current block gets a new label.
body_first_label = min(loop_body.keys())
# The previous block jumps to the minimum labelled block of the
# parfor body.
prev_block.append(ir.Jump(body_first_label, loc))
# Add all the parfor loop body blocks to the gufunc function's
# IR.
for (l, b) in loop_body.items():
gufunc_ir.blocks[l] = transfer_scope(b, scope)
body_last_label = max(loop_body.keys())
gufunc_ir.blocks[new_label] = block
gufunc_ir.blocks[label] = prev_block
# Add a jump from the last parfor body block to the block containing
# statements after the sentinel.
gufunc_ir.blocks[body_last_label].append(
ir.Jump(new_label, loc))
break
else:
continue
break
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir last dump before renaming")
gufunc_ir.dump()
gufunc_ir.blocks = rename_labels(gufunc_ir.blocks)
remove_dels(gufunc_ir.blocks)
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir last dump")
gufunc_ir.dump()
print("flags", flags)
print("typemap", typemap)
old_alias = flags.noalias
if not has_aliases:
if config.DEBUG_ARRAY_OPT:
print("No aliases found so adding noalias flag.")
flags.noalias = True
fixup_var_define_in_scope(gufunc_ir.blocks)
kernel_func = compiler.compile_ir(
typingctx,
targetctx,
gufunc_ir,
gufunc_param_types,
types.none,
flags,
locals)
flags.noalias = old_alias
kernel_sig = signature(types.none, *gufunc_param_types)
if config.DEBUG_ARRAY_OPT:
print("finished create_gufunc_for_parfor_body. kernel_sig = ", kernel_sig)
return kernel_func, parfor_args, kernel_sig, func_arg_types, expanded_name_to_tuple_var
def replace_var_with_array_in_block(vars, block, typemap, calltypes):
new_block = []
for inst in block.body:
if isinstance(inst, ir.Assign) and inst.target.name in vars:
const_node = ir.Const(0, inst.loc)
const_var = ir.Var(inst.target.scope, mk_unique_var("$const_ind_0"), inst.loc)
typemap[const_var.name] = types.uintp
const_assign = ir.Assign(const_node, const_var, inst.loc)
new_block.append(const_assign)
val_var = ir.Var(inst.target.scope, mk_unique_var("$val"), inst.loc)
typemap[val_var.name] = typemap[inst.target.name]
new_block.append(ir.Assign(inst.value, val_var, inst.loc))
setitem_node = ir.SetItem(inst.target, const_var, val_var, inst.loc)
calltypes[setitem_node] = signature(
types.none, types.npytypes.Array(typemap[inst.target.name], 1, "C"), types.intp, typemap[inst.target.name])
new_block.append(setitem_node)
continue
elif isinstance(inst, parfor.Parfor):
replace_var_with_array_internal(vars, {0: inst.init_block}, typemap, calltypes)
replace_var_with_array_internal(vars, inst.loop_body, typemap, calltypes)
new_block.append(inst)
return new_block
def replace_var_with_array_internal(vars, loop_body, typemap, calltypes):
for label, block in loop_body.items():
block.body = replace_var_with_array_in_block(vars, block, typemap, calltypes)
def replace_var_with_array(vars, loop_body, typemap, calltypes):
replace_var_with_array_internal(vars, loop_body, typemap, calltypes)
for v in vars:
el_typ = typemap[v]
typemap.pop(v, None)
typemap[v] = types.npytypes.Array(el_typ, 1, "C")
def call_parallel_gufunc(lowerer, cres, gu_signature, outer_sig, expr_args, expr_arg_types,
loop_ranges, redvars, reddict, redarrdict, init_block, index_var_typ, races,
exp_name_to_tuple_var):
'''
Adds the call to the gufunc function from the main function.
'''
context = lowerer.context
builder = lowerer.builder
from numba.np.ufunc.parallel import (build_gufunc_wrapper,
_launch_threads)
if config.DEBUG_ARRAY_OPT:
print("make_parallel_loop")
print("outer_sig = ", outer_sig.args, outer_sig.return_type,
outer_sig.recvr, outer_sig.pysig)
print("loop_ranges = ", loop_ranges)
print("expr_args", expr_args)
print("expr_arg_types", expr_arg_types)
print("gu_signature", gu_signature)
# Build the wrapper for GUFunc
args, return_type = sigutils.normalize_signature(outer_sig)
llvm_func = cres.library.get_function(cres.fndesc.llvm_func_name)
sin, sout = gu_signature
# These are necessary for build_gufunc_wrapper to find external symbols
_launch_threads()
info = build_gufunc_wrapper(llvm_func, cres, sin, sout,
cache=False, is_parfors=True)
wrapper_name = info.name
cres.library._ensure_finalized()
if config.DEBUG_ARRAY_OPT:
print("parallel function = ", wrapper_name, cres)
# loadvars for loop_ranges
def load_range(v):
if isinstance(v, ir.Var):
return lowerer.loadvar(v.name)
else:
return context.get_constant(types.uintp, v)
num_dim = len(loop_ranges)
for i in range(num_dim):
start, stop, step = loop_ranges[i]
start = load_range(start)
stop = load_range(stop)
assert(step == 1) # We do not support loop steps other than 1
step = load_range(step)
loop_ranges[i] = (start, stop, step)
if config.DEBUG_ARRAY_OPT:
print("call_parallel_gufunc loop_ranges[{}] = ".format(i), start,
stop, step)
cgutils.printf(builder, "loop range[{}]: %d %d (%d)\n".format(i),
start, stop, step)
# Commonly used LLVM types and constants
byte_t = llvmlite.ir.IntType(8)
byte_ptr_t = llvmlite.ir.PointerType(byte_t)
byte_ptr_ptr_t = llvmlite.ir.PointerType(byte_ptr_t)
intp_t = context.get_value_type(types.intp)
uintp_t = context.get_value_type(types.uintp)
intp_ptr_t = llvmlite.ir.PointerType(intp_t)
uintp_ptr_t = llvmlite.ir.PointerType(uintp_t)
zero = context.get_constant(types.uintp, 0)
one = context.get_constant(types.uintp, 1)
one_type = one.type
sizeof_intp = context.get_abi_sizeof(intp_t)
# Prepare sched, first pop it out of expr_args, outer_sig, and gu_signature
expr_args.pop(0)
sched_sig = sin.pop(0)
if config.DEBUG_ARRAY_OPT:
print("Parfor has potentially negative start", index_var_typ.signed)
if index_var_typ.signed:
sched_type = intp_t
sched_ptr_type = intp_ptr_t
else:
sched_type = uintp_t
sched_ptr_type = uintp_ptr_t
# Call do_scheduling with appropriate arguments
dim_starts = cgutils.alloca_once(
builder, sched_type, size=context.get_constant(
types.uintp, num_dim), name="dim_starts")
dim_stops = cgutils.alloca_once(
builder, sched_type, size=context.get_constant(
types.uintp, num_dim), name="dim_stops")
for i in range(num_dim):
start, stop, step = loop_ranges[i]
if start.type != one_type:
start = builder.sext(start, one_type)
if stop.type != one_type:
stop = builder.sext(stop, one_type)
if step.type != one_type:
step = builder.sext(step, one_type)
# substract 1 because do-scheduling takes inclusive ranges
stop = builder.sub(stop, one)
builder.store(
start, builder.gep(
dim_starts, [
context.get_constant(
types.uintp, i)]))
builder.store(stop, builder.gep(dim_stops,
[context.get_constant(types.uintp, i)]))
get_chunksize = cgutils.get_or_insert_function(
builder.module,
llvmlite.ir.FunctionType(uintp_t, []),
name="get_parallel_chunksize")
set_chunksize = cgutils.get_or_insert_function(
builder.module,
llvmlite.ir.FunctionType(llvmlite.ir.VoidType(), [uintp_t]),
name="set_parallel_chunksize")
get_num_threads = cgutils.get_or_insert_function(
builder.module,
llvmlite.ir.FunctionType(llvmlite.ir.IntType(types.intp.bitwidth), []),
"get_num_threads")
num_threads = builder.call(get_num_threads, [])
current_chunksize = builder.call(get_chunksize, [])
with cgutils.if_unlikely(builder, builder.icmp_signed('<=', num_threads,
num_threads.type(0))):
cgutils.printf(builder, "num_threads: %d\n", num_threads)
context.call_conv.return_user_exc(builder, RuntimeError,
("Invalid number of threads. "
"This likely indicates a bug in Numba.",))
get_sched_size_fnty = llvmlite.ir.FunctionType(uintp_t, [uintp_t, uintp_t, intp_ptr_t, intp_ptr_t])
get_sched_size = cgutils.get_or_insert_function(
builder.module,
get_sched_size_fnty,
name="get_sched_size")
num_divisions = builder.call(get_sched_size, [num_threads,
context.get_constant(types.uintp, num_dim),
dim_starts,
dim_stops])
builder.call(set_chunksize, [zero])
multiplier = context.get_constant(types.uintp, num_dim * 2)
sched_size = builder.mul(num_divisions, multiplier)
sched = builder.alloca(sched_type, size=sched_size, name="sched")
debug_flag = 1 if config.DEBUG_ARRAY_OPT else 0
scheduling_fnty = llvmlite.ir.FunctionType(
intp_ptr_t, [uintp_t, intp_ptr_t, intp_ptr_t, uintp_t, sched_ptr_type, intp_t])
if index_var_typ.signed:
do_scheduling = cgutils.get_or_insert_function(builder.module,
scheduling_fnty,
name="do_scheduling_signed")
else:
do_scheduling = cgutils.get_or_insert_function(builder.module,
scheduling_fnty,
name="do_scheduling_unsigned")
builder.call(
do_scheduling, [
context.get_constant(
types.uintp, num_dim), dim_starts, dim_stops, num_divisions,
sched, context.get_constant(
types.intp, debug_flag)])
# Get the LLVM vars for the Numba IR reduction array vars.
redarrs = [lowerer.loadvar(redarrdict[x].name) for x in redvars]
nredvars = len(redvars)
ninouts = len(expr_args) - nredvars
def load_potential_tuple_var(x):
"""Given a variable name, if that variable is not a new name
introduced as the extracted part of a tuple then just return
the variable loaded from its name. However, if the variable
does represent part of a tuple, as recognized by the name of
the variable being present in the exp_name_to_tuple_var dict,
then we load the original tuple var instead that we get from
the dict and then extract the corresponding element of the
tuple, also stored and returned to use in the dict (i.e., offset).
"""
if x in exp_name_to_tuple_var:
orig_tup, offset = exp_name_to_tuple_var[x]
tup_var = lowerer.loadvar(orig_tup)
res = builder.extract_value(tup_var, offset)
return res
else:
return lowerer.loadvar(x)
# ----------------------------------------------------------------------------
# Prepare arguments: args, shapes, steps, data
all_args = [load_potential_tuple_var(x) for x in expr_args[:ninouts]] + redarrs
num_args = len(all_args)
num_inps = len(sin) + 1
args = cgutils.alloca_once(
builder,
byte_ptr_t,
size=context.get_constant(
types.intp,
1 + num_args),
name="pargs")
array_strides = []
# sched goes first
builder.store(builder.bitcast(sched, byte_ptr_t), args)
array_strides.append(context.get_constant(types.intp, sizeof_intp))
rv_to_arg_dict = {}
# followed by other arguments
for i in range(num_args):
arg = all_args[i]
var = expr_args[i]
aty = expr_arg_types[i]
dst = builder.gep(args, [context.get_constant(types.intp, i + 1)])
if i >= ninouts: # reduction variables
ary = context.make_array(aty)(context, builder, arg)
strides = cgutils.unpack_tuple(builder, ary.strides, aty.ndim)
# Start from 1 because we skip the first dimension of length num_threads just like sched.
for j in range(len(strides)):
array_strides.append(strides[j])
builder.store(builder.bitcast(ary.data, byte_ptr_t), dst)
elif isinstance(aty, types.ArrayCompatible):
if var in races:
typ = (context.get_data_type(aty.dtype)
if aty.dtype != types.boolean
else llvmlite.ir.IntType(1))
rv_arg = cgutils.alloca_once(builder, typ)
builder.store(arg, rv_arg)
builder.store(builder.bitcast(rv_arg, byte_ptr_t), dst)
rv_to_arg_dict[var] = (arg, rv_arg)
array_strides.append(context.get_constant(types.intp, context.get_abi_sizeof(typ)))
else:
ary = context.make_array(aty)(context, builder, arg)
strides = cgutils.unpack_tuple(builder, ary.strides, aty.ndim)
for j in range(len(strides)):
array_strides.append(strides[j])
builder.store(builder.bitcast(ary.data, byte_ptr_t), dst)
else:
if i < num_inps:
# Scalar input, need to store the value in an array of size 1
typ = (context.get_data_type(aty)
if not isinstance(aty, types.Boolean)
else llvmlite.ir.IntType(1))
ptr = cgutils.alloca_once(builder, typ)
builder.store(arg, ptr)
else:
# Scalar output, must allocate
typ = (context.get_data_type(aty)
if not isinstance(aty, types.Boolean)
else llvmlite.ir.IntType(1))
ptr = cgutils.alloca_once(builder, typ)
builder.store(builder.bitcast(ptr, byte_ptr_t), dst)
# ----------------------------------------------------------------------------
# Next, we prepare the individual dimension info recorded in gu_signature
sig_dim_dict = {}
occurances = []
occurances = [sched_sig[0]]
sig_dim_dict[sched_sig[0]] = context.get_constant(types.intp, 2 * num_dim)
assert len(expr_args) == len(all_args)
assert len(expr_args) == len(expr_arg_types)
assert len(expr_args) == len(sin + sout)
assert len(expr_args) == len(outer_sig.args[1:])
for var, arg, aty, gu_sig in zip(expr_args, all_args,
expr_arg_types, sin + sout):
if isinstance(aty, types.npytypes.Array):
i = aty.ndim - len(gu_sig)
else:
i = 0
if config.DEBUG_ARRAY_OPT:
print("var =", var, "gu_sig =", gu_sig, "type =", aty, "i =", i)
for dim_sym in gu_sig:
if config.DEBUG_ARRAY_OPT:
print("var = ", var, " type = ", aty)
if var in races:
sig_dim_dict[dim_sym] = context.get_constant(types.intp, 1)
else:
ary = context.make_array(aty)(context, builder, arg)
shapes = cgutils.unpack_tuple(builder, ary.shape, aty.ndim)
sig_dim_dict[dim_sym] = shapes[i]
if not (dim_sym in occurances):
if config.DEBUG_ARRAY_OPT:
print("dim_sym = ", dim_sym, ", i = ", i)
cgutils.printf(builder, dim_sym + " = %d\n", sig_dim_dict[dim_sym])
occurances.append(dim_sym)
i = i + 1
# ----------------------------------------------------------------------------
# Prepare shapes, which is a single number (outer loop size), followed by
# the size of individual shape variables.
nshapes = len(sig_dim_dict) + 1
shapes = cgutils.alloca_once(builder, intp_t, size=nshapes, name="pshape")
# For now, outer loop size is the same as number of threads
builder.store(num_divisions, shapes)
# Individual shape variables go next
i = 1
for dim_sym in occurances:
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, dim_sym + " = %d\n", sig_dim_dict[dim_sym])
builder.store(
sig_dim_dict[dim_sym], builder.gep(
shapes, [
context.get_constant(
types.intp, i)]))
i = i + 1
# ----------------------------------------------------------------------------
# Prepare steps for each argument. Note that all steps are counted in
# bytes.
num_steps = num_args + 1 + len(array_strides)
steps = cgutils.alloca_once(
builder, intp_t, size=context.get_constant(
types.intp, num_steps), name="psteps")
# First goes the step size for sched, which is 2 * num_dim
builder.store(context.get_constant(types.intp, 2 * num_dim * sizeof_intp),
steps)
# The steps for all others are 0, except for reduction results.
for i in range(num_args):
# steps are strides from one thread to the next
stepsize = zero
dst = builder.gep(steps, [context.get_constant(types.intp, 1 + i)])
builder.store(stepsize, dst)
for j in range(len(array_strides)):
dst = builder.gep(
steps, [
context.get_constant(
types.intp, 1 + num_args + j)])
builder.store(array_strides[j], dst)
# ----------------------------------------------------------------------------
# prepare data
data = cgutils.get_null_value(byte_ptr_t)
fnty = llvmlite.ir.FunctionType(llvmlite.ir.VoidType(),
[byte_ptr_ptr_t, intp_ptr_t,
intp_ptr_t, byte_ptr_t])
fn = cgutils.get_or_insert_function(builder.module, fnty, wrapper_name)
context.active_code_library.add_linking_library(info.library)
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, "before calling kernel %p\n", fn)
builder.call(fn, [args, shapes, steps, data])
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, "after calling kernel %p\n", fn)
builder.call(set_chunksize, [current_chunksize])
for k, v in rv_to_arg_dict.items():
arg, rv_arg = v
only_elem_ptr = builder.gep(rv_arg, [context.get_constant(types.intp, 0)])
builder.store(builder.load(only_elem_ptr), lowerer.getvar(k))
context.active_code_library.add_linking_library(cres.library)
|
from numba import ir, errors, types
from . import register_rewrite, Rewrite
@register_rewrite('before-inference')
class RewriteConstGetitems(Rewrite):
"""
Rewrite IR expressions of the kind `getitem(value=arr, index=$constXX)`
where `$constXX` is a known constant as
`static_getitem(value=arr, index=<constant value>)`.
"""
def match(self, func_ir, block, typemap, calltypes):
self.getitems = getitems = {}
self.block = block
# Detect all getitem expressions and find which ones can be
# rewritten
for expr in block.find_exprs(op='getitem'):
if expr.op == 'getitem':
try:
const = func_ir.infer_constant(expr.index)
except errors.ConstantInferenceError:
continue
getitems[expr] = const
return len(getitems) > 0
def apply(self):
"""
Rewrite all matching getitems as static_getitems.
"""
new_block = self.block.copy()
new_block.clear()
for inst in self.block.body:
if isinstance(inst, ir.Assign):
expr = inst.value
if expr in self.getitems:
const = self.getitems[expr]
new_expr = ir.Expr.static_getitem(value=expr.value,
index=const,
index_var=expr.index,
loc=expr.loc)
inst = ir.Assign(value=new_expr, target=inst.target,
loc=inst.loc)
new_block.append(inst)
return new_block
@register_rewrite('after-inference')
class RewriteConstGetitemsAfterInf(Rewrite):
"""
Rewrite IR expressions of the kind `getitem(value=arr, index=$constXX)`
where `$constXX` is a known constant as
`static_getitem(value=arr, index=<constant value>)`.
"""
def match(self, func_ir, block, typemap, calltypes):
self.getitems = getitems = {}
self.block = block
# Detect all getitem expressions and find which ones can be
# rewritten
for expr in block.find_exprs(op='getitem'):
if expr.op == 'getitem':
if isinstance(typemap[expr.index.name],
types.StringLiteral):
getitems[expr] = (expr.index,
typemap[expr.index.name].literal_value)
return len(getitems) > 0
def apply(self):
"""
Rewrite all matching getitems as static_getitems.
"""
new_block = self.block.copy()
new_block.clear()
for inst in self.block.body:
if isinstance(inst, ir.Assign):
expr = inst.value
if expr in self.getitems:
const, lit_val = self.getitems[expr]
new_expr = ir.Expr.static_getitem(value=expr.value,
index=lit_val,
index_var=expr.index,
loc=expr.loc)
inst = ir.Assign(value=new_expr, target=inst.target,
loc=inst.loc)
new_block.append(inst)
return new_block
@register_rewrite('before-inference')
class RewriteConstSetitems(Rewrite):
"""
Rewrite IR statements of the kind `setitem(target=arr, index=$constXX, ...)`
where `$constXX` is a known constant as
`static_setitem(target=arr, index=<constant value>, ...)`.
"""
def match(self, func_ir, block, typemap, calltypes):
self.setitems = setitems = {}
self.block = block
# Detect all setitem statements and find which ones can be
# rewritten
for inst in block.find_insts(ir.SetItem):
try:
const = func_ir.infer_constant(inst.index)
except errors.ConstantInferenceError:
continue
setitems[inst] = const
return len(setitems) > 0
def apply(self):
"""
Rewrite all matching setitems as static_setitems.
"""
new_block = self.block.copy()
new_block.clear()
for inst in self.block.body:
if inst in self.setitems:
const = self.setitems[inst]
new_inst = ir.StaticSetItem(inst.target, const,
inst.index, inst.value, inst.loc)
new_block.append(new_inst)
else:
new_block.append(inst)
return new_block
updates docstring
from numba import ir, errors, types
from . import register_rewrite, Rewrite
@register_rewrite('before-inference')
class RewriteConstGetitems(Rewrite):
"""
Rewrite IR expressions of the kind `getitem(value=arr, index=$constXX)`
where `$constXX` is a known constant as
`static_getitem(value=arr, index=<constant value>)`.
"""
def match(self, func_ir, block, typemap, calltypes):
self.getitems = getitems = {}
self.block = block
# Detect all getitem expressions and find which ones can be
# rewritten
for expr in block.find_exprs(op='getitem'):
if expr.op == 'getitem':
try:
const = func_ir.infer_constant(expr.index)
except errors.ConstantInferenceError:
continue
getitems[expr] = const
return len(getitems) > 0
def apply(self):
"""
Rewrite all matching getitems as static_getitems.
"""
new_block = self.block.copy()
new_block.clear()
for inst in self.block.body:
if isinstance(inst, ir.Assign):
expr = inst.value
if expr in self.getitems:
const = self.getitems[expr]
new_expr = ir.Expr.static_getitem(value=expr.value,
index=const,
index_var=expr.index,
loc=expr.loc)
inst = ir.Assign(value=new_expr, target=inst.target,
loc=inst.loc)
new_block.append(inst)
return new_block
@register_rewrite('after-inference')
class RewriteLiteralGetitems(Rewrite):
"""
Rewrite IR expressions of the kind `getitem(value=arr, index=$XX)`
where `$XX` is a Literal value as
`static_getitem(value=arr, index=<literal value>)`.
"""
def match(self, func_ir, block, typemap, calltypes):
self.getitems = getitems = {}
self.block = block
# Detect all getitem expressions and find which ones can be
# rewritten
for expr in block.find_exprs(op='getitem'):
if expr.op == 'getitem':
if isinstance(typemap[expr.index.name], types.StringLiteral):
literal_value = typemap[expr.index.name].literal_value
getitems[expr] = (expr.index, literal_value)
return len(getitems) > 0
def apply(self):
"""
Rewrite all matching getitems as static_getitems.
"""
new_block = self.block.copy()
new_block.clear()
for inst in self.block.body:
if isinstance(inst, ir.Assign):
expr = inst.value
if expr in self.getitems:
const, lit_val = self.getitems[expr]
new_expr = ir.Expr.static_getitem(value=expr.value,
index=lit_val,
index_var=expr.index,
loc=expr.loc)
inst = ir.Assign(value=new_expr, target=inst.target,
loc=inst.loc)
new_block.append(inst)
return new_block
@register_rewrite('before-inference')
class RewriteConstSetitems(Rewrite):
"""
Rewrite IR statements of the kind `setitem(target=arr, index=$constXX, ...)`
where `$constXX` is a known constant as
`static_setitem(target=arr, index=<constant value>, ...)`.
"""
def match(self, func_ir, block, typemap, calltypes):
self.setitems = setitems = {}
self.block = block
# Detect all setitem statements and find which ones can be
# rewritten
for inst in block.find_insts(ir.SetItem):
try:
const = func_ir.infer_constant(inst.index)
except errors.ConstantInferenceError:
continue
setitems[inst] = const
return len(setitems) > 0
def apply(self):
"""
Rewrite all matching setitems as static_setitems.
"""
new_block = self.block.copy()
new_block.clear()
for inst in self.block.body:
if inst in self.setitems:
const = self.setitems[inst]
new_inst = ir.StaticSetItem(inst.target, const,
inst.index, inst.value, inst.loc)
new_block.append(new_inst)
else:
new_block.append(inst)
return new_block
|
#!/usr/bin/python
# takes templated file .xxx.src and produces .xxx file where .xxx is .i or .c or .h
# using the following template rules
# /**begin repeat on a line by itself marks the beginning of a segment of code to be repeated
# /**end repeat**/ on a line by itself marks it's end
# after the /**begin repeat and before the */
# all the named templates are placed
# these should all have the same number of replacements
# in the main body, the names are used.
# Each replace will use one entry from the list of named replacements
# Note that all #..# forms in a block must have the same number of
# comma-separated entries.
__all__ = ['process_str', 'process_file']
import string,os,sys
if sys.version[:3]>='2.3':
import re
else:
import pre as re
False = 0
True = 1
def parse_structure(astr):
spanlist = []
# subroutines
ind = 0
line = 1
while 1:
start = astr.find("/**begin repeat", ind)
if start == -1:
break
start2 = astr.find("*/",start)
start2 = astr.find("\n",start2)
fini1 = astr.find("/**end repeat**/",start2)
fini2 = astr.find("\n",fini1)
line += astr.count("\n", ind, start2+1)
spanlist.append((start, start2+1, fini1, fini2+1, line))
line += astr.count("\n", start2+1, fini2)
ind = fini2
spanlist.sort()
return spanlist
# return n copies of substr with template replacement
_special_names = {}
template_re = re.compile(r"@([\w]+)@")
named_re = re.compile(r"#([\w]*)=([^#]*?)#")
parenrep = re.compile(r"[(]([^)]*?)[)]\*(\d+)")
def paren_repl(obj):
torep = obj.group(1)
numrep = obj.group(2)
return ','.join([torep]*int(numrep))
plainrep = re.compile(r"([^*]+)\*(\d+)")
def conv(astr):
# replaces all occurrences of '(a,b,c)*4' in astr
# with 'a,b,c,a,b,c,a,b,c,a,b,c'
astr = parenrep.sub(paren_repl,astr)
# replaces occurences of xxx*3 with xxx, xxx, xxx
astr = ','.join([plainrep.sub(paren_repl,x.strip()) for x in astr.split(',')])
return astr
def unique_key(adict):
# this obtains a unique key given a dictionary
# currently it works by appending together n of the letters of the
# current keys and increasing n until a unique key is found
# -- not particularly quick
allkeys = adict.keys()
done = False
n = 1
while not done:
newkey = "".join([x[:n] for x in allkeys])
if newkey in allkeys:
n += 1
else:
done = True
return newkey
def namerepl(match):
global _names, _thissub
name = match.group(1)
return _names[name][_thissub]
def expand_sub(substr, namestr, line):
global _names, _thissub
# find all named replacements
reps = named_re.findall(namestr)
_names = {}
_names.update(_special_names)
numsubs = None
for rep in reps:
name = rep[0].strip()
thelist = conv(rep[1])
_names[name] = thelist
# make lists out of string entries in name dictionary
for name in _names.keys():
entry = _names[name]
entrylist = entry.split(',')
_names[name] = entrylist
num = len(entrylist)
if numsubs is None:
numsubs = num
elif (numsubs != num):
print namestr
print substr
raise ValueError, "Mismatch in number to replace"
# now replace all keys for each of the lists
mystr = ''
for k in range(numsubs):
_thissub = k
mystr += ("#line %d\n%s\n\n"
% (line, template_re.sub(namerepl, substr)))
return mystr
_head = \
"""/* This file was autogenerated from a template DO NOT EDIT!!!!
Changes should be made to the original source (.src) file
*/
"""
def get_line_header(str,beg):
extra = []
ind = beg-1
char = str[ind]
while (ind > 0) and (char != '\n'):
extra.insert(0,char)
ind = ind - 1
char = str[ind]
return ''.join(extra)
def process_str(allstr):
newstr = allstr
writestr = _head
struct = parse_structure(newstr)
# return a (sorted) list of tuples for each begin repeat section
# each tuple is the start and end of a region to be template repeated
oldend = 0
for sub in struct:
writestr += newstr[oldend:sub[0]]
expanded = expand_sub(newstr[sub[1]:sub[2]],
newstr[sub[0]:sub[1]], sub[4])
writestr += expanded
oldend = sub[3]
writestr += newstr[oldend:]
return writestr
include_src_re = re.compile(r"(\n|\A)#include\s*['\"]"
r"(?P<name>[\w\d./\\]+[.]src)['\"]", re.I)
def resolve_includes(source):
d = os.path.dirname(source)
fid = open(source)
lines = []
for line in fid.readlines():
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d,fn)
if os.path.isfile(fn):
print 'Including file',fn
lines.extend(resolve_includes(fn))
else:
lines.append(line)
else:
lines.append(line)
fid.close()
return lines
def process_file(source):
lines = resolve_includes(source)
sourcefile = os.path.normpath(source).replace("\\","\\\\")
return ('#line 1 "%s"\n%s'
% (sourcefile, process_str(''.join(lines))))
if __name__ == "__main__":
try:
file = sys.argv[1]
except IndexError:
fid = sys.stdin
outfile = sys.stdout
else:
fid = open(file,'r')
(base, ext) = os.path.splitext(file)
newname = base
outfile = open(newname,'w')
allstr = fid.read()
writestr = process_str(allstr)
outfile.write(writestr)
Changed normpath to normcase to close #129
git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@2605 94b884b6-d6fd-0310-90d3-974f1d3f35e1
#!/usr/bin/python
# takes templated file .xxx.src and produces .xxx file where .xxx is .i or .c or .h
# using the following template rules
# /**begin repeat on a line by itself marks the beginning of a segment of code to be repeated
# /**end repeat**/ on a line by itself marks it's end
# after the /**begin repeat and before the */
# all the named templates are placed
# these should all have the same number of replacements
# in the main body, the names are used.
# Each replace will use one entry from the list of named replacements
# Note that all #..# forms in a block must have the same number of
# comma-separated entries.
__all__ = ['process_str', 'process_file']
import string,os,sys
if sys.version[:3]>='2.3':
import re
else:
import pre as re
False = 0
True = 1
def parse_structure(astr):
spanlist = []
# subroutines
ind = 0
line = 1
while 1:
start = astr.find("/**begin repeat", ind)
if start == -1:
break
start2 = astr.find("*/",start)
start2 = astr.find("\n",start2)
fini1 = astr.find("/**end repeat**/",start2)
fini2 = astr.find("\n",fini1)
line += astr.count("\n", ind, start2+1)
spanlist.append((start, start2+1, fini1, fini2+1, line))
line += astr.count("\n", start2+1, fini2)
ind = fini2
spanlist.sort()
return spanlist
# return n copies of substr with template replacement
_special_names = {}
template_re = re.compile(r"@([\w]+)@")
named_re = re.compile(r"#([\w]*)=([^#]*?)#")
parenrep = re.compile(r"[(]([^)]*?)[)]\*(\d+)")
def paren_repl(obj):
torep = obj.group(1)
numrep = obj.group(2)
return ','.join([torep]*int(numrep))
plainrep = re.compile(r"([^*]+)\*(\d+)")
def conv(astr):
# replaces all occurrences of '(a,b,c)*4' in astr
# with 'a,b,c,a,b,c,a,b,c,a,b,c'
astr = parenrep.sub(paren_repl,astr)
# replaces occurences of xxx*3 with xxx, xxx, xxx
astr = ','.join([plainrep.sub(paren_repl,x.strip()) for x in astr.split(',')])
return astr
def unique_key(adict):
# this obtains a unique key given a dictionary
# currently it works by appending together n of the letters of the
# current keys and increasing n until a unique key is found
# -- not particularly quick
allkeys = adict.keys()
done = False
n = 1
while not done:
newkey = "".join([x[:n] for x in allkeys])
if newkey in allkeys:
n += 1
else:
done = True
return newkey
def namerepl(match):
global _names, _thissub
name = match.group(1)
return _names[name][_thissub]
def expand_sub(substr, namestr, line):
global _names, _thissub
# find all named replacements
reps = named_re.findall(namestr)
_names = {}
_names.update(_special_names)
numsubs = None
for rep in reps:
name = rep[0].strip()
thelist = conv(rep[1])
_names[name] = thelist
# make lists out of string entries in name dictionary
for name in _names.keys():
entry = _names[name]
entrylist = entry.split(',')
_names[name] = entrylist
num = len(entrylist)
if numsubs is None:
numsubs = num
elif (numsubs != num):
print namestr
print substr
raise ValueError, "Mismatch in number to replace"
# now replace all keys for each of the lists
mystr = ''
for k in range(numsubs):
_thissub = k
mystr += ("#line %d\n%s\n\n"
% (line, template_re.sub(namerepl, substr)))
return mystr
_head = \
"""/* This file was autogenerated from a template DO NOT EDIT!!!!
Changes should be made to the original source (.src) file
*/
"""
def get_line_header(str,beg):
extra = []
ind = beg-1
char = str[ind]
while (ind > 0) and (char != '\n'):
extra.insert(0,char)
ind = ind - 1
char = str[ind]
return ''.join(extra)
def process_str(allstr):
newstr = allstr
writestr = _head
struct = parse_structure(newstr)
# return a (sorted) list of tuples for each begin repeat section
# each tuple is the start and end of a region to be template repeated
oldend = 0
for sub in struct:
writestr += newstr[oldend:sub[0]]
expanded = expand_sub(newstr[sub[1]:sub[2]],
newstr[sub[0]:sub[1]], sub[4])
writestr += expanded
oldend = sub[3]
writestr += newstr[oldend:]
return writestr
include_src_re = re.compile(r"(\n|\A)#include\s*['\"]"
r"(?P<name>[\w\d./\\]+[.]src)['\"]", re.I)
def resolve_includes(source):
d = os.path.dirname(source)
fid = open(source)
lines = []
for line in fid.readlines():
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d,fn)
if os.path.isfile(fn):
print 'Including file',fn
lines.extend(resolve_includes(fn))
else:
lines.append(line)
else:
lines.append(line)
fid.close()
return lines
def process_file(source):
lines = resolve_includes(source)
sourcefile = os.path.normcase(source).replace("\\","\\\\")
return ('#line 1 "%s"\n%s'
% (sourcefile, process_str(''.join(lines))))
if __name__ == "__main__":
try:
file = sys.argv[1]
except IndexError:
fid = sys.stdin
outfile = sys.stdout
else:
fid = open(file,'r')
(base, ext) = os.path.splitext(file)
newname = base
outfile = open(newname,'w')
allstr = fid.read()
writestr = process_str(allstr)
outfile.write(writestr)
|
Use utc as default timezone
|
import os
import sys
from numpy.distutils.cpuinfo import cpu
from numpy.distutils.fcompiler import FCompiler
class NAGFCompiler(FCompiler):
compiler_type = 'nag'
version_pattern = r'NAGWare Fortran 95 compiler Release (?P<version>[^\s]*)'
executables = {
'version_cmd' : ["f95", "-V"],
'compiler_f77' : ["f95", "-fixed"],
'compiler_fix' : ["f95", "-fixed"],
'compiler_f90' : ["f95"],
'linker_so' : ["f95"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
def get_flags_linker_so(self):
if sys.platform=='darwin':
return ['-unsharedf95','-Wl,-bundle,-flat_namespace,-undefined,suppress']
return ["-Wl,-shared"]
def get_flags_opt(self):
return ['-O4']
def get_flags_arch(self):
return ['-target=native']
def get_flags_debug(self):
return ['-g','-gline','-g90','-nan','-C']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='nag')
compiler.customize()
print compiler.get_version()
Try updating version command for NAG compiler.
git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@3467 94b884b6-d6fd-0310-90d3-974f1d3f35e1
import os
import sys
from numpy.distutils.cpuinfo import cpu
from numpy.distutils.fcompiler import FCompiler
class NAGFCompiler(FCompiler):
compiler_type = 'nag'
version_pattern = r'NAGWare Fortran 95 compiler Release (?P<version>[^\s]*)'
executables = {
'version_cmd' : ["f95", "-V"],
'compiler_f77' : ["f95", "-fixed"],
'compiler_fix' : ["f95", "-fixed"],
'compiler_f90' : ["f95"],
'linker_so' : ["f95"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
def get_flags_linker_so(self):
if sys.platform=='darwin':
return ['-unsharedf95','-Wl,-bundle,-flat_namespace,-undefined,suppress']
return ["-Wl,-shared"]
def get_flags_opt(self):
return ['-O4']
def get_flags_arch(self):
version = self.get_version()
if version < '5.1':
return ['-target=native']
else:
return ['']
def get_flags_debug(self):
return ['-g','-gline','-g90','-nan','-C']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='nag')
compiler.customize()
print compiler.get_version()
|
"""Module to handle compiler configuration. Configuration itself is kept in
.ini like files"""
import sys
from ConfigParser import ConfigParser
from os.path import join as pjoin, dirname as pdirname
from numscons.core.utils import DefaultDict
from numscons.core.misc import _CONFDIR
_OPTIONS = ['optim', 'warn', 'debug_sym', 'debug', 'thread', 'extra',
'link_optim']
class NoCompilerConfig(Exception):
"""This exception is thrown when no configuration for a compiler was
found."""
pass
class CompilerConfig(DefaultDict):
"""Place holder for compiler configuration. Behaves like a dictionary."""
def __init__(self):
DefaultDict.__init__(self, _OPTIONS)
for k in self.keys():
self[k] = []
def _get_win32_config_files(filename):
# We import platform here as we only need it for windows and platform
# import is relatively slow
import platform
files = [pjoin(_CONFDIR, 'win32', filename)]
if platform.machine() == 'AMD64':
files.append(pjoin(_CONFDIR, 'win64', filename))
return files
def get_config_files(filename):
"""Return the list of configuration files to consider for perflib
configuration."""
if sys.platform == 'win32':
return _get_win32_config_files(filename)
else:
return [pjoin(_CONFDIR, filename)]
def get_config(name, language):
"""Returns a CompilerConfig instance for given compiler name and
language.
name should be a string (corresponding to the section name in the .cfg
file), and language should be 'C', 'CXX' or 'F77'."""
# XXX: ugly backward compat stuff
if name == 'intelc':
name = 'icc'
# XXX name should be a list
config = ConfigParser()
if language == 'C':
files = get_config_files("compiler.cfg")
elif language == 'F77':
files = get_config_files("fcompiler.cfg")
elif language == 'CXX':
files = get_config_files("cxxcompiler.cfg")
else:
raise NoCompilerConfig("language %s not recognized !" % language)
st = config.read(files)
if len(st) < 1:
raise IOError("config file %s not found" % files)
if not config.has_section(name):
raise NoCompilerConfig("compiler %s (lang %s) has no configuration "\
"in %s" % (name, language, cfgfname))
cfg = CompilerConfig()
# XXX: fix this at some point
if name == "ifort" and sys.platform == "win32":
name = "ifort_win32"
for o in config.options(name):
r = config.get(name, o)
if r:
cfg[o] = r.split()
return cfg
BUG: remove obsolete hack for ifort on win32.
"""Module to handle compiler configuration. Configuration itself is kept in
.ini like files"""
import sys
from ConfigParser import ConfigParser
from os.path import join as pjoin, dirname as pdirname
from numscons.core.utils import DefaultDict
from numscons.core.misc import _CONFDIR
_OPTIONS = ['optim', 'warn', 'debug_sym', 'debug', 'thread', 'extra',
'link_optim']
class NoCompilerConfig(Exception):
"""This exception is thrown when no configuration for a compiler was
found."""
pass
class CompilerConfig(DefaultDict):
"""Place holder for compiler configuration. Behaves like a dictionary."""
def __init__(self):
DefaultDict.__init__(self, _OPTIONS)
for k in self.keys():
self[k] = []
def _get_win32_config_files(filename):
# We import platform here as we only need it for windows and platform
# import is relatively slow
import platform
files = [pjoin(_CONFDIR, 'win32', filename)]
if platform.machine() == 'AMD64':
files.append(pjoin(_CONFDIR, 'win64', filename))
return files
def get_config_files(filename):
"""Return the list of configuration files to consider for perflib
configuration."""
if sys.platform == 'win32':
return _get_win32_config_files(filename)
else:
return [pjoin(_CONFDIR, filename)]
def get_config(name, language):
"""Returns a CompilerConfig instance for given compiler name and
language.
name should be a string (corresponding to the section name in the .cfg
file), and language should be 'C', 'CXX' or 'F77'."""
# XXX: ugly backward compat stuff
if name == 'intelc':
name = 'icc'
# XXX name should be a list
config = ConfigParser()
if language == 'C':
files = get_config_files("compiler.cfg")
elif language == 'F77':
files = get_config_files("fcompiler.cfg")
elif language == 'CXX':
files = get_config_files("cxxcompiler.cfg")
else:
raise NoCompilerConfig("language %s not recognized !" % language)
st = config.read(files)
if len(st) < 1:
raise IOError("config file %s not found" % files)
if not config.has_section(name):
raise NoCompilerConfig("compiler %s (lang %s) has no configuration "\
"in %s" % (name, language, cfgfname))
cfg = CompilerConfig()
for o in config.options(name):
r = config.get(name, o)
if r:
cfg[o] = r.split()
return cfg
|
from django.contrib.auth.models import User
from django.test import TestCase
from molo.core.tests.base import MoloTestCaseMixin
from molo.profiles import models
from nurseconnect import forms
class RegisterFormTestCase(MoloTestCaseMixin, TestCase):
def setUp(self):
self.msisdn_form = forms.RegistrationMSISDNForm
self.security_questions_form = forms.RegistrationSecurityQuestionsForm
self.clinic_code_form = forms.RegistrationClinicCodeForm
self.mk_main()
self.user = User.objects.create_user(
username="+27791234567",
password="1234")
self.question = models.SecurityQuestion(question="What is this?")
self.clinic_code = "123456"
def test_register_username_correct(self):
form_data = {
"username": "0820000000",
"password": "1234",
"confirm_password": "1234",
"terms_and_conditions": True
}
form = self.msisdn_form(
data=form_data,
)
self.assertEqual(form.is_valid(), True)
def test_register_username_incorrect(self):
form_data = {
"username": "Jeyabal#",
"password": "1234",
"terms_and_conditions": True
}
form = self.msisdn_form(
data=form_data
)
self.assertEqual(form.is_valid(), False)
def test_register_security_questions_correct(self):
form_data = {
"question_0": "answer"
}
form = self.security_questions_form(
data=form_data,
questions=[self.question, ]
)
self.assertEqual(form.is_valid(), True)
def test_register_clinic_code_correct(self):
form_data = {
"clinic_code": "123456"
}
form = self.clinic_code_form(
data=form_data
)
self.assertEqual(form.is_valid(), True)
def test_register_clinic_code_incorrect(self):
form_data = {
"clinic_code": "000000"
}
form = self.clinic_code_form(
data=form_data
)
self.assertEqual(form.is_valid(), False)
def test_register_password_incorrect(self):
form_data = {
"username": "Jeyabal#",
"password": "12345",
"terms_and_conditions": True
}
form = self.msisdn_form(
data=form_data
)
self.assertEqual(form.is_valid(), False)
def test_password_change_incorrect(self):
form_data = {
"old_password": "123",
"new_password": "jey123",
"confirm_password": "jey123",
}
form = forms.ProfilePasswordChangeForm(
data=form_data,
)
self.assertEqual(form.is_valid(), False)
def test_password_change_correct(self):
form_data = {
"old_password": "1234",
"new_password": "3456",
"confirm_password": "3456",
}
form = forms.ProfilePasswordChangeForm(
data=form_data,
)
self.assertEqual(form.is_valid(), True)
def test_terms_and_conditions_is_required(self):
form_data = {
"username": "test",
"password": "12345",
}
form = self.msisdn_form(
data=form_data
)
self.assertEqual(form.is_valid(), False)
class PasswordRecoveryTestCase(MoloTestCaseMixin, TestCase):
def setUp(self):
self.mk_main()
self.user = User.objects.create_user(
username="0831231234",
email="tester@example.com",
password="tester")
self.question = models.SecurityQuestion(question="What is this?")
self.question.save()
def test_username_and_security_answer(self):
form_data = {
"username": "0831231234",
"question_0": "20"
}
form = forms.ForgotPasswordForm(
data=form_data,
questions=[self.question, ]
)
self.assertEqual(form.is_valid(), True)
Test fix
from django.contrib.auth.models import User
from django.test import TestCase
from molo.core.tests.base import MoloTestCaseMixin
from molo.profiles import models
from nurseconnect import forms
class RegisterFormTestCase(MoloTestCaseMixin, TestCase):
def setUp(self):
self.msisdn_form = forms.RegistrationMSISDNForm
self.security_questions_form = forms.RegistrationSecurityQuestionsForm
self.clinic_code_form = forms.RegistrationClinicCodeForm
self.mk_main()
self.user = User.objects.create_user(
username="+27791234567",
password="1234")
self.question = models.SecurityQuestion(question="What is this?")
self.user.profile.for_nurseconnect.clinic_code = "123456"
def test_register_username_correct(self):
form_data = {
"username": "0820000000",
"password": "1234",
"confirm_password": "1234",
"terms_and_conditions": True
}
form = self.msisdn_form(
data=form_data,
)
self.assertEqual(form.is_valid(), True)
def test_register_username_incorrect(self):
form_data = {
"username": "Jeyabal#",
"password": "1234",
"terms_and_conditions": True
}
form = self.msisdn_form(
data=form_data
)
self.assertEqual(form.is_valid(), False)
def test_register_security_questions_correct(self):
form_data = {
"question_0": "answer"
}
form = self.security_questions_form(
data=form_data,
questions=[self.question, ]
)
self.assertEqual(form.is_valid(), True)
def test_register_clinic_code_correct(self):
form_data = {
"clinic_code": "123456"
}
form = self.clinic_code_form(
data=form_data
)
self.assertEqual(form.is_valid(), True)
def test_register_clinic_code_incorrect(self):
form_data = {
"clinic_code": "000000"
}
form = self.clinic_code_form(
data=form_data
)
self.assertEqual(form.is_valid(), False)
def test_register_password_incorrect(self):
form_data = {
"username": "Jeyabal#",
"password": "12345",
"terms_and_conditions": True
}
form = self.msisdn_form(
data=form_data
)
self.assertEqual(form.is_valid(), False)
def test_password_change_incorrect(self):
form_data = {
"old_password": "123",
"new_password": "jey123",
"confirm_password": "jey123",
}
form = forms.ProfilePasswordChangeForm(
data=form_data,
)
self.assertEqual(form.is_valid(), False)
def test_password_change_correct(self):
form_data = {
"old_password": "1234",
"new_password": "3456",
"confirm_password": "3456",
}
form = forms.ProfilePasswordChangeForm(
data=form_data,
)
self.assertEqual(form.is_valid(), True)
def test_terms_and_conditions_is_required(self):
form_data = {
"username": "test",
"password": "12345",
}
form = self.msisdn_form(
data=form_data
)
self.assertEqual(form.is_valid(), False)
class PasswordRecoveryTestCase(MoloTestCaseMixin, TestCase):
def setUp(self):
self.mk_main()
self.user = User.objects.create_user(
username="0831231234",
email="tester@example.com",
password="tester")
self.question = models.SecurityQuestion(question="What is this?")
self.question.save()
def test_username_and_security_answer(self):
form_data = {
"username": "0831231234",
"question_0": "20"
}
form = forms.ForgotPasswordForm(
data=form_data,
questions=[self.question, ]
)
self.assertEqual(form.is_valid(), True)
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Alpaka(CMakePackage, CudaPackage):
"""Abstraction Library for Parallel Kernel Acceleration."""
homepage = "https://alpaka.readthedocs.io"
url = "https://github.com/alpaka-group/alpaka/archive/refs/tags/0.6.0.tar.gz"
git = "https://github.com/alpaka-group/alpaka.git"
maintainers = ['vvolkl']
version('develop', branch='develop')
version('0.6.0', sha256='7424ecaee3af15e587b327e983998410fa379c61d987bfe923c7e95d65db11a3')
version('0.5.0', sha256='0ba08ea19961dd986160219ba00d6162fe7758980d88a606eff6494d7b3a6cd1')
version('0.4.0', sha256='ad7905b13c22abcee4344ba225a65078e3f452ad45a9eda907e7d27c08315e46')
variant("backend", multi=True, values=('serial', 'threads', 'fiber', 'tbb', 'omp2_gridblock', 'omp2_blockthread', 'omp5', 'oacc', 'cuda', 'cuda_only', 'hip', 'hip_only'), description="Backends to enable", default='serial')
variant("examples", default=False, description="Build alpaka examples")
depends_on('boost')
depends_on('boost+fiber', when="backend=fiber")
# make sure no other backend is enabled if using cuda_only or hip_only
for v in ('serial', 'threads', 'fiber', 'tbb', 'oacc',
'omp2_gridblock', 'omp2_blockthread', 'omp5', 'cuda', 'hip'):
conflicts('backend=cuda_only,%s' % v)
conflicts('backend=hip_only,%s' % v)
conflicts('backend=cuda_only,hip_only')
for v in ('omp2_blockthread', 'omp2_blockthread', 'omp5'):
conflicts('backend=oacc,%s' % v)
# todo: add conflict between cuda 11.3 and gcc 10.3.0
# see https://github.com/alpaka-group/alpaka/issues/1297
def cmake_args(self):
spec = self.spec
args = []
if 'backend=serial' in spec:
args.append(self.define("ALPAKA_ACC_CPU_B_SEQ_T_SEQ_ENABLE", True))
if 'backend=threads' in self.spec:
args.append(self.define("ALPAKA_ACC_CPU_B_SEQ_T_THREADS_ENABLE", True))
if 'backend=fiber' in spec:
args.append(self.define("ALPAKA_ACC_CPU_B_SEQ_T_FIBERS_ENABLE", True))
if 'backend=tbb' in spec:
args.append(self.define("ALPAKA_ACC_CPU_B_TBB_T_SEQ_ENABLE", True))
if 'backend=omp2_gridblock' in spec:
args.append(self.define("ALPAKA_ACC_CPU_B_OMP2_T_SEQ_ENABLE", True))
if 'backend=omp2_blockthread' in spec:
args.append(self.define("ALPAKA_ACC_CPU_B_SEQ_T_OMP2_ENABLE", True))
if 'backend=omp5' in spec:
args.append(self.define("ALPAKA_ACC_ANY_BT_OMP5_ENABLE", True))
if 'backend=oacc' in spec:
args.append(self.define("ALPAKA_ACC_ANY_BT_OACC_ENABLE", True))
if 'backend=cuda' in spec:
args.append(self.define("ALPAKA_ACC_GPU_CUDA_ENABLE", True))
if 'backend=cuda_only' in spec:
args.append(self.define("ALPAKA_ACC_GPU_CUDA_ENABLE", True))
args.append(self.define("ALPAKA_ACC_GPU_CUDA_ONLY_MODE", True))
if 'backend=hip' in spec:
args.append(self.define("ALPAKA_ACC_GPU_HIP_ENABLE", True))
if 'backend=hip_only' in spec:
args.append(self.define("ALPAKA_ACC_GPU_HIP_ENABLE", True))
args.append(self.define("ALPAKA_ACC_GPU_HIP_ONLY_MODE", True))
args.append(self.define_from_variant("alpaka_BUILD_EXAMPLES",
"examples"))
# need to define, as it is explicitly declared as an option by alpaka:
args.append(self.define("BUILD_TESTING", self.run_tests))
return args
Add new version of Alpaka, set minimal CMake version (#25835)
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Alpaka(CMakePackage, CudaPackage):
"""Abstraction Library for Parallel Kernel Acceleration."""
homepage = "https://alpaka.readthedocs.io"
url = "https://github.com/alpaka-group/alpaka/archive/refs/tags/0.6.0.tar.gz"
git = "https://github.com/alpaka-group/alpaka.git"
maintainers = ['vvolkl']
version('develop', branch='develop')
version('0.7.0', sha256='4b61119a7b3b073f281ba15b63430db98b77dbd9420bc290a114f80121fbdd97')
version('0.6.0', sha256='7424ecaee3af15e587b327e983998410fa379c61d987bfe923c7e95d65db11a3')
version('0.5.0', sha256='0ba08ea19961dd986160219ba00d6162fe7758980d88a606eff6494d7b3a6cd1')
version('0.4.0', sha256='ad7905b13c22abcee4344ba225a65078e3f452ad45a9eda907e7d27c08315e46')
variant("backend", multi=True, values=('serial', 'threads', 'fiber', 'tbb', 'omp2_gridblock', 'omp2_blockthread', 'omp5', 'oacc', 'cuda', 'cuda_only', 'hip', 'hip_only'), description="Backends to enable", default='serial')
variant("examples", default=False, description="Build alpaka examples")
depends_on('boost')
depends_on('boost+fiber', when="backend=fiber")
depends_on('cmake@3.18:', when='@0.7.0:')
# make sure no other backend is enabled if using cuda_only or hip_only
for v in ('serial', 'threads', 'fiber', 'tbb', 'oacc',
'omp2_gridblock', 'omp2_blockthread', 'omp5', 'cuda', 'hip'):
conflicts('backend=cuda_only,%s' % v)
conflicts('backend=hip_only,%s' % v)
conflicts('backend=cuda_only,hip_only')
for v in ('omp2_blockthread', 'omp2_blockthread', 'omp5'):
conflicts('backend=oacc,%s' % v)
# todo: add conflict between cuda 11.3 and gcc 10.3.0
# see https://github.com/alpaka-group/alpaka/issues/1297
def cmake_args(self):
spec = self.spec
args = []
if 'backend=serial' in spec:
args.append(self.define("ALPAKA_ACC_CPU_B_SEQ_T_SEQ_ENABLE", True))
if 'backend=threads' in self.spec:
args.append(self.define("ALPAKA_ACC_CPU_B_SEQ_T_THREADS_ENABLE", True))
if 'backend=fiber' in spec:
args.append(self.define("ALPAKA_ACC_CPU_B_SEQ_T_FIBERS_ENABLE", True))
if 'backend=tbb' in spec:
args.append(self.define("ALPAKA_ACC_CPU_B_TBB_T_SEQ_ENABLE", True))
if 'backend=omp2_gridblock' in spec:
args.append(self.define("ALPAKA_ACC_CPU_B_OMP2_T_SEQ_ENABLE", True))
if 'backend=omp2_blockthread' in spec:
args.append(self.define("ALPAKA_ACC_CPU_B_SEQ_T_OMP2_ENABLE", True))
if 'backend=omp5' in spec:
args.append(self.define("ALPAKA_ACC_ANY_BT_OMP5_ENABLE", True))
if 'backend=oacc' in spec:
args.append(self.define("ALPAKA_ACC_ANY_BT_OACC_ENABLE", True))
if 'backend=cuda' in spec:
args.append(self.define("ALPAKA_ACC_GPU_CUDA_ENABLE", True))
if 'backend=cuda_only' in spec:
args.append(self.define("ALPAKA_ACC_GPU_CUDA_ENABLE", True))
args.append(self.define("ALPAKA_ACC_GPU_CUDA_ONLY_MODE", True))
if 'backend=hip' in spec:
args.append(self.define("ALPAKA_ACC_GPU_HIP_ENABLE", True))
if 'backend=hip_only' in spec:
args.append(self.define("ALPAKA_ACC_GPU_HIP_ENABLE", True))
args.append(self.define("ALPAKA_ACC_GPU_HIP_ONLY_MODE", True))
args.append(self.define_from_variant("alpaka_BUILD_EXAMPLES",
"examples"))
# need to define, as it is explicitly declared as an option by alpaka:
args.append(self.define("BUILD_TESTING", self.run_tests))
return args
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Arborx(CMakePackage):
"""ArborX is a performance-portable library for geometric search"""
homepage = "https://github.com/arborx/arborx"
url = "https://github.com/arborx/arborx/archive/v0.9-beta.tar.gz"
git = "https://github.com/arborx/arborx.git"
maintainers = ['aprokop']
version('master', branch='master')
version('1.0', sha256='9b5f45c8180622c907ef0b7cc27cb18ba272ac6558725d9e460c3f3e764f1075')
version('0.9-beta', sha256='b349b5708d1aa00e8c20c209ac75dc2d164ff9bf1b85adb5437346d194ba6c0d')
# ArborX relies on Kokkos to provide devices, providing one-to-one matching
# variants. The only way to disable those devices is to make sure Kokkos
# does not provide them.
kokkos_backends = {
'serial': (True, "enable Serial backend (default)"),
'cuda': (False, "enable Cuda backend"),
'openmp': (False, "enable OpenMP backend"),
'hip': (False, "enable HIP backend")
}
variant('mpi', default=True, description='enable MPI')
for backend in kokkos_backends:
deflt, descr = kokkos_backends[backend]
variant(backend.lower(), default=deflt, description=descr)
variant('trilinos', default=False, description='use Kokkos from Trilinos')
depends_on('cmake@3.12:', type='build')
depends_on('mpi', when='+mpi')
# Standalone Kokkos
depends_on('kokkos@3.1.00:', when='~trilinos')
for backend in kokkos_backends:
depends_on('kokkos+%s' % backend.lower(), when='~trilinos+%s' %
backend.lower())
depends_on('kokkos+cuda_lambda', when='~trilinos+cuda')
# Trilinos/Kokkos
# Notes:
# - there is no Trilinos release with Kokkos 3.1 yet
# - current version of Trilinos package does not allow disabling Serial
# - current version of Trilinos package does not allow enabling CUDA
depends_on('trilinos+kokkos@develop', when='+trilinos')
depends_on('trilinos+openmp', when='+trilinos+openmp')
conflicts('~serial', when='+trilinos')
conflicts('+cuda', when='+trilinos')
def cmake_args(self):
spec = self.spec
options = [
'-DKokkos_ROOT=%s' % (spec['kokkos'].prefix if '~trilinos' in spec
else spec['trilinos'].prefix),
'-DARBORX_ENABLE_MPI=%s' % ('ON' if '+mpi' in spec else 'OFF')
]
if '+cuda' in spec:
# Only Kokkos allows '+cuda' for now
options.append(
'-DCMAKE_CXX_COMPILER=%s' % spec["kokkos"].kokkos_cxx)
return options
examples_src_dir = "examples"
@run_after('install')
def setup_build_tests(self):
"""Copy the example source files after the package is installed to an
install test subdirectory for use during `spack test run`."""
self.cache_extra_test_sources([self.examples_src_dir])
def build_tests(self):
"""Build test."""
cmake_build_path = join_path(self.install_test_root,
self.examples_src_dir, "build")
mkdirp(cmake_build_path)
cmake_prefix_path = "-DCMAKE_PREFIX_PATH={0}".format(self.spec['arborx'].prefix)
# We don't need to append the path to Kokkos to CMAKE_PREFIX_PATH
# since a hint is already hardcoded inside the CMake ArborX configuration.
# Omitting it here allows us to avoid to distinguish between Kokkos
# being installed as a standalone or as part of Trilinos.
if '+mpi' in self.spec:
cmake_prefix_path += ";{0}".format(self.spec['mpi'].prefix)
with working_dir(cmake_build_path):
cmake_args = ["..",
cmake_prefix_path,
"-DCMAKE_CXX_COMPILER={0}".format(self.compiler.cxx)]
cmake(*cmake_args)
make()
def run_tests(self):
"""Run test."""
reason = 'Checking ability to execute.'
run_path = join_path(self.install_test_root, self.examples_src_dir, 'build')
with working_dir(run_path):
self.run_test('ctest', ['-V'], [], installed=False, purpose=reason)
def test(self):
self.build_tests()
self.run_tests()
arborx: kokkos hip backend is actually now called rocm (#23390)
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Arborx(CMakePackage):
"""ArborX is a performance-portable library for geometric search"""
homepage = "https://github.com/arborx/arborx"
url = "https://github.com/arborx/arborx/archive/v0.9-beta.tar.gz"
git = "https://github.com/arborx/arborx.git"
maintainers = ['aprokop']
version('master', branch='master')
version('1.0', sha256='9b5f45c8180622c907ef0b7cc27cb18ba272ac6558725d9e460c3f3e764f1075')
version('0.9-beta', sha256='b349b5708d1aa00e8c20c209ac75dc2d164ff9bf1b85adb5437346d194ba6c0d')
# ArborX relies on Kokkos to provide devices, providing one-to-one matching
# variants. The only way to disable those devices is to make sure Kokkos
# does not provide them.
kokkos_backends = {
'serial': (True, "enable Serial backend (default)"),
'cuda': (False, "enable Cuda backend"),
'openmp': (False, "enable OpenMP backend"),
'rocm': (False, "enable HIP backend")
}
variant('mpi', default=True, description='enable MPI')
for backend in kokkos_backends:
deflt, descr = kokkos_backends[backend]
variant(backend.lower(), default=deflt, description=descr)
variant('trilinos', default=False, description='use Kokkos from Trilinos')
depends_on('cmake@3.12:', type='build')
depends_on('mpi', when='+mpi')
# Standalone Kokkos
depends_on('kokkos@3.1.00:', when='~trilinos')
for backend in kokkos_backends:
depends_on('kokkos+%s' % backend.lower(), when='~trilinos+%s' %
backend.lower())
depends_on('kokkos+cuda_lambda', when='~trilinos+cuda')
# Trilinos/Kokkos
# Notes:
# - there is no Trilinos release with Kokkos 3.1 yet
# - current version of Trilinos package does not allow disabling Serial
# - current version of Trilinos package does not allow enabling CUDA
depends_on('trilinos+kokkos@develop', when='+trilinos')
depends_on('trilinos+openmp', when='+trilinos+openmp')
conflicts('~serial', when='+trilinos')
conflicts('+cuda', when='+trilinos')
def cmake_args(self):
spec = self.spec
options = [
'-DKokkos_ROOT=%s' % (spec['kokkos'].prefix if '~trilinos' in spec
else spec['trilinos'].prefix),
'-DARBORX_ENABLE_MPI=%s' % ('ON' if '+mpi' in spec else 'OFF')
]
if '+cuda' in spec:
# Only Kokkos allows '+cuda' for now
options.append(
'-DCMAKE_CXX_COMPILER=%s' % spec["kokkos"].kokkos_cxx)
return options
examples_src_dir = "examples"
@run_after('install')
def setup_build_tests(self):
"""Copy the example source files after the package is installed to an
install test subdirectory for use during `spack test run`."""
self.cache_extra_test_sources([self.examples_src_dir])
def build_tests(self):
"""Build test."""
cmake_build_path = join_path(self.install_test_root,
self.examples_src_dir, "build")
mkdirp(cmake_build_path)
cmake_prefix_path = "-DCMAKE_PREFIX_PATH={0}".format(self.spec['arborx'].prefix)
# We don't need to append the path to Kokkos to CMAKE_PREFIX_PATH
# since a hint is already hardcoded inside the CMake ArborX configuration.
# Omitting it here allows us to avoid to distinguish between Kokkos
# being installed as a standalone or as part of Trilinos.
if '+mpi' in self.spec:
cmake_prefix_path += ";{0}".format(self.spec['mpi'].prefix)
with working_dir(cmake_build_path):
cmake_args = ["..",
cmake_prefix_path,
"-DCMAKE_CXX_COMPILER={0}".format(self.compiler.cxx)]
cmake(*cmake_args)
make()
def run_tests(self):
"""Run test."""
reason = 'Checking ability to execute.'
run_path = join_path(self.install_test_root, self.examples_src_dir, 'build')
with working_dir(run_path):
self.run_test('ctest', ['-V'], [], installed=False, purpose=reason)
def test(self):
self.build_tests()
self.run_tests()
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
class Dealii(CMakePackage, CudaPackage):
"""C++ software library providing well-documented tools to build finite
element codes for a broad variety of PDEs."""
homepage = "https://www.dealii.org"
url = "https://github.com/dealii/dealii/releases/download/v8.4.1/dealii-8.4.1.tar.gz"
git = "https://github.com/dealii/dealii.git"
maintainers = ['davydden', 'jppelteret', 'luca-heltai']
# Don't add RPATHs to this package for the full build DAG.
# only add for immediate deps.
transitive_rpaths = False
generator = 'Ninja'
version('master', branch='master')
version('9.2.0', sha256='d05a82fb40f1f1e24407451814b5a6004e39366a44c81208b1ae9d65f3efa43a')
version('9.1.1', sha256='fc5b483f7fe58dfeb52d05054011280f115498e337af3e085bf272fd1fd81276')
version('9.1.0', sha256='5b070112403f8afbb72345c1bb24d2a38d11ce58891217e353aab97957a04600')
version('9.0.1', sha256='df2f0d666f2224be07e3741c0e8e02132fd67ea4579cd16a2429f7416146ee64')
version('9.0.0', sha256='c918dc5c1a31d62f6eea7b524dcc81c6d00b3c378d4ed6965a708ab548944f08')
version('8.5.1', sha256='d33e812c21a51f7e5e3d3e6af86aec343155650b611d61c1891fbc3cabce09ae')
version('8.5.0', sha256='e6913ff6f184d16bc2598c1ba31f879535b72b6dff043e15aef048043ff1d779')
version('8.4.2', sha256='ec7c00fadc9d298d1a0d16c08fb26818868410a9622c59ba624096872f3058e4')
version('8.4.1', sha256='00a0e92d069cdafd216816f1aff460f7dbd48744b0d9e0da193287ebf7d6b3ad')
version('8.4.0', sha256='36a20e097a03f17b557e11aad1400af8c6252d25f7feca40b611d5fc16d71990')
version('8.3.0', sha256='4ddf72632eb501e1c814e299f32fc04fd680d6fda9daff58be4209e400e41779')
version('8.2.1', sha256='d75674e45fe63cd9fa294460fe45228904d51a68f744dbb99cd7b60720f3b2a0')
version('8.1.0', sha256='d666bbda2a17b41b80221d7029468246f2658051b8c00d9c5907cd6434c4df99')
# Configuration variants
variant('build_type', default='DebugRelease',
description='The build type to build',
values=('Debug', 'Release', 'DebugRelease'))
variant('cxxstd', default='default', multi=False,
description='Compile using the specified C++ standard',
values=('default', '11', '14', '17'))
variant('doc', default=False,
description='Compile with documentation')
variant('examples', default=True,
description='Compile tutorial programs')
variant('int64', default=False,
description='Compile with 64 bit indices support')
variant('mpi', default=True,
description='Compile with MPI')
variant('optflags', default=False,
description='Compile using additional optimization flags')
variant('python', default=False,
description='Compile with Python bindings')
# Package variants
variant('assimp', default=True,
description='Compile with Assimp')
variant('arpack', default=True,
description='Compile with Arpack and PArpack (only with MPI)')
variant('adol-c', default=True,
description='Compile with ADOL-C')
variant('ginkgo', default=True,
description='Compile with Ginkgo')
variant('gmsh', default=True,
description='Compile with GMSH')
variant('gsl', default=True,
description='Compile with GSL')
variant('hdf5', default=True,
description='Compile with HDF5 (only with MPI)')
variant('metis', default=True,
description='Compile with Metis')
variant('muparser', default=True,
description='Compile with muParser')
variant('nanoflann', default=True,
description='Compile with Nanoflann')
variant('netcdf', default=False,
description='Compile with Netcdf (only with MPI)')
variant('oce', default=True,
description='Compile with OCE')
variant('p4est', default=True,
description='Compile with P4est (only with MPI)')
variant('petsc', default=True,
description='Compile with Petsc (only with MPI)')
variant('scalapack', default=True,
description='Compile with ScaLAPACK (only with MPI)')
variant('sundials', default=True,
description='Compile with Sundials')
variant('slepc', default=True,
description='Compile with Slepc (only with Petsc and MPI)')
variant('symengine', default=True,
description='Compile with SymEngine')
variant('threads', default=True,
description='Compile with multi-threading via TBB')
variant('trilinos', default=True,
description='Compile with Trilinos (only with MPI)')
# Required dependencies: Light version
depends_on('blas')
# Boost 1.58 is blacklisted, require at least 1.59, see
# https://github.com/dealii/dealii/issues/1591
# There are issues with 1.65.1 and 1.65.0:
# https://github.com/dealii/dealii/issues/5262
# we take the patch from https://github.com/boostorg/serialization/pull/79
# more precisely its variation https://github.com/dealii/dealii/pull/5572#issuecomment-349742019
# 1.68.0 has issues with serialization https://github.com/dealii/dealii/issues/7074
# adopt https://github.com/boostorg/serialization/pull/105 as a fix
depends_on('boost@1.59.0:1.63,1.65.1,1.67.0:+thread+system+serialization+iostreams',
patches=[patch('boost_1.65.1_singleton.patch',
level=1,
when='@1.65.1'),
patch('boost_1.68.0.patch',
level=1,
when='@1.68.0'),
],
when='~python')
depends_on('boost@1.59.0:1.63,1.65.1,1.67.0:+thread+system+serialization+iostreams+python',
patches=[patch('boost_1.65.1_singleton.patch',
level=1,
when='@1.65.1'),
patch('boost_1.68.0.patch',
level=1,
when='@1.68.0'),
],
when='+python')
# The std::auto_ptr is removed in the C++ 17 standard.
# See https://github.com/dealii/dealii/issues/4662
# and related topics discussed for other software libraries.
depends_on('boost cxxstd=11', when='cxxstd=11')
depends_on('boost cxxstd=14', when='cxxstd=14')
depends_on('boost cxxstd=17', when='cxxstd=17')
depends_on('bzip2', when='@:8.99')
depends_on('lapack')
depends_on('ninja', type='build')
depends_on('suite-sparse')
depends_on('zlib')
# Optional dependencies: Configuration
depends_on('cuda@8:', when='+cuda')
depends_on('cmake@3.9:', when='+cuda', type='build')
# Older version of deal.II do not build with Cmake 3.10, see
# https://github.com/dealii/dealii/issues/5510
depends_on('cmake@:3.9.99', when='@:8.99', type='build')
depends_on('mpi', when='+mpi')
depends_on('python', when='@8.5.0:+python')
# Optional dependencies: Packages
depends_on('adol-c@2.6.4:', when='@9.0:+adol-c')
depends_on('arpack-ng+mpi', when='+arpack+mpi')
depends_on('assimp', when='@9.0:+assimp')
depends_on('doxygen+graphviz', when='+doc')
depends_on('graphviz', when='+doc')
depends_on('ginkgo', when='@9.1:+ginkgo')
depends_on('gmsh+tetgen+netgen+oce', when='@9.0:+gmsh', type=('build', 'run'))
depends_on('gsl', when='@8.5.0:+gsl')
# FIXME: next line fixes concretization with petsc
depends_on('hdf5+mpi+hl+fortran', when='+hdf5+mpi+petsc')
depends_on('hdf5+mpi+hl', when='+hdf5+mpi~petsc')
# FIXME: concretizer bug. The two lines mimic what comes from PETSc
# but we should not need it
depends_on('metis@5:+int64', when='+metis+int64')
depends_on('metis@5:~int64', when='+metis~int64')
depends_on('muparser', when='+muparser')
# Nanoflann support has been removed after 9.2.0
depends_on('nanoflann', when='@9.0:9.2+nanoflann')
depends_on('netcdf-c+mpi', when='+netcdf+mpi')
depends_on('netcdf-cxx', when='+netcdf+mpi')
depends_on('oce', when='+oce')
depends_on('p4est', when='+p4est+mpi')
depends_on('petsc+mpi~int64', when='+petsc+mpi~int64')
depends_on('petsc+mpi+int64', when='+petsc+mpi+int64')
depends_on('petsc@:3.6.4', when='@:8.4.1+petsc+mpi')
depends_on('scalapack', when='@9.0:+scalapack')
depends_on('slepc', when='+slepc+petsc+mpi')
depends_on('slepc@:3.6.3', when='@:8.4.1+slepc+petsc+mpi')
depends_on('slepc~arpack', when='+slepc+petsc+mpi+int64')
depends_on('sundials@:3~pthread', when='@9.0:+sundials')
depends_on('trilinos gotype=int', when='+trilinos@12.18.1:')
# Both Trilinos and SymEngine bundle the Teuchos RCP library.
# This leads to conflicts between macros defined in the included
# headers when they are not compiled in the same mode.
# See https://github.com/symengine/symengine/issues/1516
# FIXME: uncomment when the following is fixed
# https://github.com/spack/spack/issues/11160
# depends_on("symengine@0.4: build_type=Release", when="@9.1:+symengine+trilinos^trilinos~debug") # NOQA: ignore=E501
# depends_on("symengine@0.4: build_type=Debug", when="@9.1:+symengine+trilinos^trilinos+debug") # NOQA: ignore=E501
depends_on('symengine@0.4:', when='@9.1:+symengine')
depends_on('symengine@0.6:', when='@9.2:+symengine')
depends_on('tbb', when='+threads')
# do not require +rol to make concretization of xsdk possible
depends_on('trilinos+amesos+aztec+epetra+ifpack+ml+muelu+sacado+teuchos', when='+trilinos+mpi~int64~cuda')
depends_on('trilinos+amesos+aztec+epetra+ifpack+ml+muelu+sacado+teuchos~hypre', when='+trilinos+mpi+int64~cuda')
# FIXME: temporary disable Tpetra when using CUDA due to
# namespace "Kokkos::Impl" has no member "cuda_abort"
depends_on('trilinos@master+amesos+aztec+epetra+ifpack+ml+muelu+rol+sacado+teuchos~amesos2~ifpack2~intrepid2~kokkos~tpetra~zoltan2', when='+trilinos+mpi~int64+cuda')
depends_on('trilinos@master+amesos+aztec+epetra+ifpack+ml+muelu+rol+sacado+teuchos~hypre~amesos2~ifpack2~intrepid2~kokkos~tpetra~zoltan2', when='+trilinos+mpi+int64+cuda')
# Explicitly provide a destructor in BlockVector,
# otherwise deal.II may fail to build with Intel compilers.
patch('https://github.com/dealii/dealii/commit/a89d90f9993ee9ad39e492af466b3595c06c3e25.patch',
sha256='4282b32e96f2f5d376eb34f3fddcc4615fcd99b40004cca784eb874288d1b31c',
when='@9.0.1')
# https://github.com/dealii/dealii/pull/7935
patch('https://github.com/dealii/dealii/commit/f8de8c5c28c715717bf8a086e94f071e0fe9deab.patch',
sha256='61f217744b70f352965be265d2f06e8c1276685e2944ca0a88b7297dd55755da',
when='@9.0.1 ^boost@1.70.0:')
# Fix TBB version check
# https://github.com/dealii/dealii/pull/9208
patch('https://github.com/dealii/dealii/commit/80b13fe5a2eaefc77fa8c9266566fa8a2de91edf.patch',
sha256='6f876dc8eadafe2c4ec2a6673864fb451c6627ca80511b6e16f3c401946fdf33',
when='@9.0.0:9.1.1')
# Check for sufficiently modern versions
conflicts('cxxstd=98', when='@9.0:')
conflicts('cxxstd=11', when='@9.3:')
# Interfaces added in 8.5.0:
for p in ['gsl', 'python']:
conflicts('+{0}'.format(p), when='@:8.4.2',
msg='The interface to {0} is supported from version 8.5.0 '
'onwards. Please explicitly disable this variant '
'via ~{0}'.format(p))
# Interfaces added in 9.0.0:
for p in ['assimp', 'gmsh', 'nanoflann', 'scalapack', 'sundials',
'adol-c']:
conflicts('+{0}'.format(p), when='@:8.5.1',
msg='The interface to {0} is supported from version 9.0.0 '
'onwards. Please explicitly disable this variant '
'via ~{0}'.format(p))
# interfaces added in 9.1.0:
for p in ['ginkgo', 'symengine']:
conflicts('+{0}'.format(p), when='@:9.0',
msg='The interface to {0} is supported from version 9.1.0 '
'onwards. Please explicitly disable this variant '
'via ~{0}'.format(p))
# Interfaces removed in 9.3.0:
conflicts('+nanoflann', when='@9.3.0:',
msg='The interface to Nanoflann was removed from version 9.3.0. '
'Please explicitly disable this variant via ~nanoflann')
# Check that the combination of variants makes sense
# 64-bit BLAS:
for p in ['openblas', 'intel-mkl', 'intel-parallel-studio+mkl']:
conflicts('^{0}+ilp64'.format(p), when='@:8.5.1',
msg='64bit BLAS is only supported from 9.0.0')
# MPI requirements:
for p in ['arpack', 'hdf5', 'netcdf', 'p4est', 'petsc', 'scalapack',
'slepc', 'trilinos']:
conflicts('+{0}'.format(p), when='~mpi',
msg='To enable {0} it is necessary to build deal.II with '
'MPI support enabled.'.format(p))
# Optional dependencies:
conflicts('+adol-c', when='^trilinos+chaco',
msg='symbol clash between the ADOL-C library and '
'Trilinos SEACAS Chaco.')
conflicts('+slepc', when='~petsc',
msg='It is not possible to enable slepc interfaces '
'without petsc.')
def cmake_args(self):
spec = self.spec
options = []
# Release flags
cxx_flags_release = []
# Debug and release flags
cxx_flags = []
# Set directory structure:
if spec.satisfies('@:8.2.1'):
options.append(
self.define('DEAL_II_COMPONENT_COMPAT_FILES', False)
)
else:
options.extend([
self.define(
'DEAL_II_EXAMPLES_RELDIR', 'share/deal.II/examples'
),
self.define('DEAL_II_DOCREADME_RELDIR', 'share/deal.II/'),
self.define('DEAL_II_DOCHTML_RELDIR', 'share/deal.II/doc')
])
# Required dependencies
lapack_blas_libs = spec['lapack'].libs + spec['blas'].libs
lapack_blas_headers = spec['lapack'].headers + spec['blas'].headers
options.extend([
self.define('BOOST_DIR', spec['boost'].prefix),
# CMake's FindBlas/Lapack may pickup system's blas/lapack instead
# of Spack's. Be more specific to avoid this.
# Note that both lapack and blas are provided in -DLAPACK_XYZ.
self.define('LAPACK_FOUND', True),
self.define(
'LAPACK_INCLUDE_DIRS', lapack_blas_headers.directories
),
self.define('LAPACK_LIBRARIES', lapack_blas_libs),
self.define('UMFPACK_DIR', spec['suite-sparse'].prefix),
self.define('ZLIB_DIR', spec['zlib'].prefix),
self.define('DEAL_II_ALLOW_BUNDLED', False)
])
if spec.satisfies('@:8.99'):
options.extend([
# Cmake may still pick up system's bzip2, fix this:
self.define('BZIP2_FOUND', True),
self.define(
'BZIP2_INCLUDE_DIRS', spec['bzip2'].prefix.include
),
self.define('BZIP2_LIBRARIES', spec['bzip2'].libs)
])
# Doxygen documentation
options.append(self.define_from_variant(
'DEAL_II_COMPONENT_DOCUMENTATION', 'doc'
))
# Examples / tutorial programs
options.append(self.define_from_variant(
'DEAL_II_COMPONENT_EXAMPLES', 'examples'
))
# Enforce the specified C++ standard
if spec.variants['cxxstd'].value != 'default':
cxxstd = spec.variants['cxxstd'].value
options.append(
self.define('DEAL_II_WITH_CXX{0}'.format(cxxstd), True)
)
# Performance
# Set recommended flags for maximum (matrix-free) performance, see
# https://groups.google.com/forum/?fromgroups#!topic/dealii/3Yjy8CBIrgU
if spec.satisfies('%gcc'):
cxx_flags_release.extend(['-O3'])
elif spec.satisfies('%intel'):
cxx_flags_release.extend(['-O3'])
elif spec.satisfies('%clang') or spec.satisfies('%apple-clang'):
cxx_flags_release.extend(['-O3', '-ffp-contract=fast'])
# 64 bit indices
options.append(self.define_from_variant(
'DEAL_II_WITH_64BIT_INDICES', 'int64'
))
if (spec.satisfies('^openblas+ilp64') or
spec.satisfies('^intel-mkl+ilp64') or
spec.satisfies('^intel-parallel-studio+mkl+ilp64')):
options.append(
self.define('LAPACK_WITH_64BIT_BLAS_INDICES', True)
)
# CUDA
options.append(self.define_from_variant(
'DEAL_II_WITH_CUDA', 'cuda'
))
if '+cuda' in spec:
if not spec.satisfies('^cuda@9:'):
options.append('-DDEAL_II_WITH_CXX14=OFF')
cuda_arch = spec.variants['cuda_arch'].value
if cuda_arch != 'none':
if len(cuda_arch) > 1:
raise InstallError(
'deal.II only supports compilation for a single GPU!'
)
flags = '-arch=sm_{0}'.format(cuda_arch[0])
# FIXME: there are some compiler errors in dealii
# with: flags = ' '.join(self.cuda_flags(cuda_arch))
# Stick with -arch=sm_xy for now.
options.append(
self.define('DEAL_II_CUDA_FLAGS', flags)
)
# MPI
options.append(self.define_from_variant(
'DEAL_II_WITH_MPI', 'mpi'
))
if '+mpi' in spec:
options.extend([
self.define('CMAKE_C_COMPILER', spec['mpi'].mpicc),
self.define('CMAKE_CXX_COMPILER', spec['mpi'].mpicxx),
self.define('CMAKE_Fortran_COMPILER', spec['mpi'].mpifc),
self.define('MPI_C_COMPILER', spec['mpi'].mpicc),
self.define('MPI_CXX_COMPILER', spec['mpi'].mpicxx),
self.define('MPI_Fortran_COMPILER', spec['mpi'].mpifc)
])
# Python bindings
if spec.satisfies('@8.5.0:'):
options.append(self.define_from_variant(
'DEAL_II_COMPONENT_PYTHON_BINDINGS', 'python'
))
if '+python' in spec:
python_exe = spec['python'].command.path
python_library = spec['python'].libs[0]
python_include = spec['python'].headers.directories[0]
options.extend([
self.define('PYTHON_EXECUTABLE', python_exe),
self.define('PYTHON_INCLUDE_DIR', python_include),
self.define('PYTHON_LIBRARY', python_library)
])
# Threading
options.append(self.define_from_variant(
'DEAL_II_WITH_THREADS', 'threads'
))
if '+threads' in spec:
if (spec.satisfies('^intel-parallel-studio+tbb')):
# deal.II/cmake will have hard time picking up TBB from Intel.
tbb_ver = '.'.join(('%s' % spec['tbb'].version).split('.')[1:])
options.extend([
self.define('TBB_FOUND', True),
self.define('TBB_VERSION', tbb_ver),
self.define(
'TBB_INCLUDE_DIRS', spec['tbb'].headers.directories
),
self.define('TBB_LIBRARIES', spec['tbb'].libs)
])
else:
options.append(
self.define('TBB_DIR', spec['tbb'].prefix)
)
# Optional dependencies for which library names are the same as CMake
# variables:
for library in (
'gsl', 'hdf5', 'p4est', 'petsc', 'slepc', 'trilinos', 'metis',
'sundials', 'nanoflann', 'assimp', 'gmsh', 'muparser',
'symengine', 'ginkgo'):
options.append(self.define_from_variant(
'DEAL_II_WITH_{0}'.format(library.upper()), library
))
if ('+' + library) in spec:
options.append(self.define(
'{0}_DIR'.format(library.upper()), spec[library].prefix
))
# Optional dependencies that do not fit the above pattern:
# ADOL-C
options.append(self.define_from_variant(
'DEAL_II_WITH_ADOLC', 'adol-c'
))
if '+adol-c' in spec:
options.append(
self.define('ADOLC_DIR', spec['adol-c'].prefix)
)
# ARPACK
options.append(self.define_from_variant(
'DEAL_II_WITH_ARPACK', 'arpack'
))
if '+arpack' in spec and '+mpi' in spec:
options.extend([
self.define('ARPACK_DIR', spec['arpack-ng'].prefix),
self.define('DEAL_II_ARPACK_WITH_PARPACK', True)
])
# NetCDF
# since Netcdf is spread among two, need to do it by hand:
if '+netcdf' in spec and '+mpi' in spec:
netcdf_libs = spec['netcdf-cxx'].libs + spec['netcdf-c'].libs
options.extend([
self.define('NETCDF_FOUND', True),
self.define('NETCDF_INCLUDE_DIRS', '{0};{1}'.format(
spec['netcdf-cxx'].prefix.include,
spec['netcdf-c'].prefix.include
)),
self.define('NETCDF_LIBRARIES', netcdf_libs)
])
else:
options.append(
self.define('DEAL_II_WITH_NETCDF', False)
)
# ScaLAPACK
options.append(self.define_from_variant(
'DEAL_II_WITH_SCALAPACK', 'scalapack'
))
if '+scalapack' in spec:
scalapack_libs = spec['scalapack'].libs
options.extend([
self.define('SCALAPACK_FOUND', True),
self.define(
'SCALAPACK_INCLUDE_DIRS', spec['scalapack'].prefix.include
),
self.define('SCALAPACK_LIBRARIES', scalapack_libs)
])
# Open Cascade
options.append(self.define_from_variant(
'DEAL_II_WITH_OPENCASCADE', 'oce'
))
if '+oce' in spec:
options.append(
self.define('OPENCASCADE_DIR', spec['oce'].prefix)
)
# As a final step, collect CXX flags that may have been
# added anywhere above:
if len(cxx_flags_release) > 0 and '+optflags' in spec:
options.extend([
self.define(
'CMAKE_CXX_FLAGS_RELEASE', ' '.join(cxx_flags_release)
),
self.define('CMAKE_CXX_FLAGS', ' '.join(cxx_flags))
])
# Add flags for machine vectorization, used when tutorials
# and user code is built.
# See https://github.com/dealii/dealii/issues/9164
options.append(
self.define('DEAL_II_CXX_FLAGS', os.environ['SPACK_TARGET_ARGS'])
)
return options
def setup_run_environment(self, env):
env.set('DEAL_II_DIR', self.prefix)
Added CUDAHOSTCXX variable needed to compile with cuda and mpi. (#19254)
* Added CUDAHOSTCXX variable needed to compile with cuda and mpi.
* Added guard for setting CUDAHOSTCXX with MPI.
* Acceptable working version of dealii+cuda+mpi.
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
class Dealii(CMakePackage, CudaPackage):
"""C++ software library providing well-documented tools to build finite
element codes for a broad variety of PDEs."""
homepage = "https://www.dealii.org"
url = "https://github.com/dealii/dealii/releases/download/v8.4.1/dealii-8.4.1.tar.gz"
git = "https://github.com/dealii/dealii.git"
maintainers = ['davydden', 'jppelteret', 'luca-heltai']
# Don't add RPATHs to this package for the full build DAG.
# only add for immediate deps.
transitive_rpaths = False
generator = 'Ninja'
version('master', branch='master')
version('9.2.0', sha256='d05a82fb40f1f1e24407451814b5a6004e39366a44c81208b1ae9d65f3efa43a')
version('9.1.1', sha256='fc5b483f7fe58dfeb52d05054011280f115498e337af3e085bf272fd1fd81276')
version('9.1.0', sha256='5b070112403f8afbb72345c1bb24d2a38d11ce58891217e353aab97957a04600')
version('9.0.1', sha256='df2f0d666f2224be07e3741c0e8e02132fd67ea4579cd16a2429f7416146ee64')
version('9.0.0', sha256='c918dc5c1a31d62f6eea7b524dcc81c6d00b3c378d4ed6965a708ab548944f08')
version('8.5.1', sha256='d33e812c21a51f7e5e3d3e6af86aec343155650b611d61c1891fbc3cabce09ae')
version('8.5.0', sha256='e6913ff6f184d16bc2598c1ba31f879535b72b6dff043e15aef048043ff1d779')
version('8.4.2', sha256='ec7c00fadc9d298d1a0d16c08fb26818868410a9622c59ba624096872f3058e4')
version('8.4.1', sha256='00a0e92d069cdafd216816f1aff460f7dbd48744b0d9e0da193287ebf7d6b3ad')
version('8.4.0', sha256='36a20e097a03f17b557e11aad1400af8c6252d25f7feca40b611d5fc16d71990')
version('8.3.0', sha256='4ddf72632eb501e1c814e299f32fc04fd680d6fda9daff58be4209e400e41779')
version('8.2.1', sha256='d75674e45fe63cd9fa294460fe45228904d51a68f744dbb99cd7b60720f3b2a0')
version('8.1.0', sha256='d666bbda2a17b41b80221d7029468246f2658051b8c00d9c5907cd6434c4df99')
# Configuration variants
variant('build_type', default='DebugRelease',
description='The build type to build',
values=('Debug', 'Release', 'DebugRelease'))
variant('cxxstd', default='default', multi=False,
description='Compile using the specified C++ standard',
values=('default', '11', '14', '17'))
variant('doc', default=False,
description='Compile with documentation')
variant('examples', default=True,
description='Compile tutorial programs')
variant('int64', default=False,
description='Compile with 64 bit indices support')
variant('mpi', default=True,
description='Compile with MPI')
variant('optflags', default=False,
description='Compile using additional optimization flags')
variant('python', default=False,
description='Compile with Python bindings')
# Package variants
variant('assimp', default=True,
description='Compile with Assimp')
variant('arpack', default=True,
description='Compile with Arpack and PArpack (only with MPI)')
variant('adol-c', default=True,
description='Compile with ADOL-C')
variant('ginkgo', default=True,
description='Compile with Ginkgo')
variant('gmsh', default=True,
description='Compile with GMSH')
variant('gsl', default=True,
description='Compile with GSL')
variant('hdf5', default=True,
description='Compile with HDF5 (only with MPI)')
variant('metis', default=True,
description='Compile with Metis')
variant('muparser', default=True,
description='Compile with muParser')
variant('nanoflann', default=True,
description='Compile with Nanoflann')
variant('netcdf', default=False,
description='Compile with Netcdf (only with MPI)')
variant('oce', default=True,
description='Compile with OCE')
variant('p4est', default=True,
description='Compile with P4est (only with MPI)')
variant('petsc', default=True,
description='Compile with Petsc (only with MPI)')
variant('scalapack', default=True,
description='Compile with ScaLAPACK (only with MPI)')
variant('sundials', default=True,
description='Compile with Sundials')
variant('slepc', default=True,
description='Compile with Slepc (only with Petsc and MPI)')
variant('symengine', default=True,
description='Compile with SymEngine')
variant('threads', default=True,
description='Compile with multi-threading via TBB')
variant('trilinos', default=True,
description='Compile with Trilinos (only with MPI)')
# Required dependencies: Light version
depends_on('blas')
# Boost 1.58 is blacklisted, require at least 1.59, see
# https://github.com/dealii/dealii/issues/1591
# There are issues with 1.65.1 and 1.65.0:
# https://github.com/dealii/dealii/issues/5262
# we take the patch from https://github.com/boostorg/serialization/pull/79
# more precisely its variation https://github.com/dealii/dealii/pull/5572#issuecomment-349742019
# 1.68.0 has issues with serialization https://github.com/dealii/dealii/issues/7074
# adopt https://github.com/boostorg/serialization/pull/105 as a fix
depends_on('boost@1.59.0:1.63,1.65.1,1.67.0:+thread+system+serialization+iostreams',
patches=[patch('boost_1.65.1_singleton.patch',
level=1,
when='@1.65.1'),
patch('boost_1.68.0.patch',
level=1,
when='@1.68.0'),
],
when='~python')
depends_on('boost@1.59.0:1.63,1.65.1,1.67.0:+thread+system+serialization+iostreams+python',
patches=[patch('boost_1.65.1_singleton.patch',
level=1,
when='@1.65.1'),
patch('boost_1.68.0.patch',
level=1,
when='@1.68.0'),
],
when='+python')
# The std::auto_ptr is removed in the C++ 17 standard.
# See https://github.com/dealii/dealii/issues/4662
# and related topics discussed for other software libraries.
depends_on('boost cxxstd=11', when='cxxstd=11')
depends_on('boost cxxstd=14', when='cxxstd=14')
depends_on('boost cxxstd=17', when='cxxstd=17')
depends_on('bzip2', when='@:8.99')
depends_on('lapack')
depends_on('ninja', type='build')
depends_on('suite-sparse')
depends_on('zlib')
# Optional dependencies: Configuration
depends_on('cuda@8:', when='+cuda')
depends_on('cmake@3.9:', when='+cuda', type='build')
# Older version of deal.II do not build with Cmake 3.10, see
# https://github.com/dealii/dealii/issues/5510
depends_on('cmake@:3.9.99', when='@:8.99', type='build')
depends_on('mpi', when='+mpi')
depends_on('python', when='@8.5.0:+python')
# Optional dependencies: Packages
depends_on('adol-c@2.6.4:', when='@9.0:+adol-c')
depends_on('arpack-ng+mpi', when='+arpack+mpi')
depends_on('assimp', when='@9.0:+assimp')
depends_on('doxygen+graphviz', when='+doc')
depends_on('graphviz', when='+doc')
depends_on('ginkgo', when='@9.1:+ginkgo')
depends_on('gmsh+tetgen+netgen+oce', when='@9.0:+gmsh', type=('build', 'run'))
depends_on('gsl', when='@8.5.0:+gsl')
# FIXME: next line fixes concretization with petsc
depends_on('hdf5+mpi+hl+fortran', when='+hdf5+mpi+petsc')
depends_on('hdf5+mpi+hl', when='+hdf5+mpi~petsc')
# FIXME: concretizer bug. The two lines mimic what comes from PETSc
# but we should not need it
depends_on('metis@5:+int64', when='+metis+int64')
depends_on('metis@5:~int64', when='+metis~int64')
depends_on('muparser', when='+muparser')
# Nanoflann support has been removed after 9.2.0
depends_on('nanoflann', when='@9.0:9.2+nanoflann')
depends_on('netcdf-c+mpi', when='+netcdf+mpi')
depends_on('netcdf-cxx', when='+netcdf+mpi')
depends_on('oce', when='+oce')
depends_on('p4est', when='+p4est+mpi')
depends_on('petsc+mpi~int64', when='+petsc+mpi~int64')
depends_on('petsc+mpi+int64', when='+petsc+mpi+int64')
depends_on('petsc@:3.6.4', when='@:8.4.1+petsc+mpi')
depends_on('scalapack', when='@9.0:+scalapack')
depends_on('slepc', when='+slepc+petsc+mpi')
depends_on('slepc@:3.6.3', when='@:8.4.1+slepc+petsc+mpi')
depends_on('slepc~arpack', when='+slepc+petsc+mpi+int64')
depends_on('sundials@:3~pthread', when='@9.0:+sundials')
depends_on('trilinos gotype=int', when='+trilinos@12.18.1:')
# Both Trilinos and SymEngine bundle the Teuchos RCP library.
# This leads to conflicts between macros defined in the included
# headers when they are not compiled in the same mode.
# See https://github.com/symengine/symengine/issues/1516
# FIXME: uncomment when the following is fixed
# https://github.com/spack/spack/issues/11160
# depends_on("symengine@0.4: build_type=Release", when="@9.1:+symengine+trilinos^trilinos~debug") # NOQA: ignore=E501
# depends_on("symengine@0.4: build_type=Debug", when="@9.1:+symengine+trilinos^trilinos+debug") # NOQA: ignore=E501
depends_on('symengine@0.4:', when='@9.1:+symengine')
depends_on('symengine@0.6:', when='@9.2:+symengine')
depends_on('tbb', when='+threads')
# do not require +rol to make concretization of xsdk possible
depends_on('trilinos+amesos+aztec+epetra+ifpack+ml+muelu+sacado+teuchos', when='+trilinos+mpi~int64~cuda')
depends_on('trilinos+amesos+aztec+epetra+ifpack+ml+muelu+sacado+teuchos~hypre', when='+trilinos+mpi+int64~cuda')
# FIXME: temporary disable Tpetra when using CUDA due to
# namespace "Kokkos::Impl" has no member "cuda_abort"
depends_on('trilinos@master+amesos+aztec+epetra+ifpack+ml+muelu+rol+sacado+teuchos~amesos2~ifpack2~intrepid2~kokkos~tpetra~zoltan2', when='+trilinos+mpi~int64+cuda')
depends_on('trilinos@master+amesos+aztec+epetra+ifpack+ml+muelu+rol+sacado+teuchos~hypre~amesos2~ifpack2~intrepid2~kokkos~tpetra~zoltan2', when='+trilinos+mpi+int64+cuda')
# Explicitly provide a destructor in BlockVector,
# otherwise deal.II may fail to build with Intel compilers.
patch('https://github.com/dealii/dealii/commit/a89d90f9993ee9ad39e492af466b3595c06c3e25.patch',
sha256='4282b32e96f2f5d376eb34f3fddcc4615fcd99b40004cca784eb874288d1b31c',
when='@9.0.1')
# https://github.com/dealii/dealii/pull/7935
patch('https://github.com/dealii/dealii/commit/f8de8c5c28c715717bf8a086e94f071e0fe9deab.patch',
sha256='61f217744b70f352965be265d2f06e8c1276685e2944ca0a88b7297dd55755da',
when='@9.0.1 ^boost@1.70.0:')
# Fix TBB version check
# https://github.com/dealii/dealii/pull/9208
patch('https://github.com/dealii/dealii/commit/80b13fe5a2eaefc77fa8c9266566fa8a2de91edf.patch',
sha256='6f876dc8eadafe2c4ec2a6673864fb451c6627ca80511b6e16f3c401946fdf33',
when='@9.0.0:9.1.1')
# Check for sufficiently modern versions
conflicts('cxxstd=98', when='@9.0:')
conflicts('cxxstd=11', when='@9.3:')
# Interfaces added in 8.5.0:
for p in ['gsl', 'python']:
conflicts('+{0}'.format(p), when='@:8.4.2',
msg='The interface to {0} is supported from version 8.5.0 '
'onwards. Please explicitly disable this variant '
'via ~{0}'.format(p))
# Interfaces added in 9.0.0:
for p in ['assimp', 'gmsh', 'nanoflann', 'scalapack', 'sundials',
'adol-c']:
conflicts('+{0}'.format(p), when='@:8.5.1',
msg='The interface to {0} is supported from version 9.0.0 '
'onwards. Please explicitly disable this variant '
'via ~{0}'.format(p))
# interfaces added in 9.1.0:
for p in ['ginkgo', 'symengine']:
conflicts('+{0}'.format(p), when='@:9.0',
msg='The interface to {0} is supported from version 9.1.0 '
'onwards. Please explicitly disable this variant '
'via ~{0}'.format(p))
# Interfaces removed in 9.3.0:
conflicts('+nanoflann', when='@9.3.0:',
msg='The interface to Nanoflann was removed from version 9.3.0. '
'Please explicitly disable this variant via ~nanoflann')
# Check that the combination of variants makes sense
# 64-bit BLAS:
for p in ['openblas', 'intel-mkl', 'intel-parallel-studio+mkl']:
conflicts('^{0}+ilp64'.format(p), when='@:8.5.1',
msg='64bit BLAS is only supported from 9.0.0')
# MPI requirements:
for p in ['arpack', 'hdf5', 'netcdf', 'p4est', 'petsc', 'scalapack',
'slepc', 'trilinos']:
conflicts('+{0}'.format(p), when='~mpi',
msg='To enable {0} it is necessary to build deal.II with '
'MPI support enabled.'.format(p))
# Optional dependencies:
conflicts('+adol-c', when='^trilinos+chaco',
msg='symbol clash between the ADOL-C library and '
'Trilinos SEACAS Chaco.')
conflicts('+slepc', when='~petsc',
msg='It is not possible to enable slepc interfaces '
'without petsc.')
def cmake_args(self):
spec = self.spec
options = []
# Release flags
cxx_flags_release = []
# Debug and release flags
cxx_flags = []
# Set directory structure:
if spec.satisfies('@:8.2.1'):
options.append(
self.define('DEAL_II_COMPONENT_COMPAT_FILES', False)
)
else:
options.extend([
self.define(
'DEAL_II_EXAMPLES_RELDIR', 'share/deal.II/examples'
),
self.define('DEAL_II_DOCREADME_RELDIR', 'share/deal.II/'),
self.define('DEAL_II_DOCHTML_RELDIR', 'share/deal.II/doc')
])
# Required dependencies
lapack_blas_libs = spec['lapack'].libs + spec['blas'].libs
lapack_blas_headers = spec['lapack'].headers + spec['blas'].headers
options.extend([
self.define('BOOST_DIR', spec['boost'].prefix),
# CMake's FindBlas/Lapack may pickup system's blas/lapack instead
# of Spack's. Be more specific to avoid this.
# Note that both lapack and blas are provided in -DLAPACK_XYZ.
self.define('LAPACK_FOUND', True),
self.define(
'LAPACK_INCLUDE_DIRS', lapack_blas_headers.directories
),
self.define('LAPACK_LIBRARIES', lapack_blas_libs),
self.define('UMFPACK_DIR', spec['suite-sparse'].prefix),
self.define('ZLIB_DIR', spec['zlib'].prefix),
self.define('DEAL_II_ALLOW_BUNDLED', False)
])
if spec.satisfies('@:8.99'):
options.extend([
# Cmake may still pick up system's bzip2, fix this:
self.define('BZIP2_FOUND', True),
self.define(
'BZIP2_INCLUDE_DIRS', spec['bzip2'].prefix.include
),
self.define('BZIP2_LIBRARIES', spec['bzip2'].libs)
])
# Doxygen documentation
options.append(self.define_from_variant(
'DEAL_II_COMPONENT_DOCUMENTATION', 'doc'
))
# Examples / tutorial programs
options.append(self.define_from_variant(
'DEAL_II_COMPONENT_EXAMPLES', 'examples'
))
# Enforce the specified C++ standard
if spec.variants['cxxstd'].value != 'default':
cxxstd = spec.variants['cxxstd'].value
options.append(
self.define('DEAL_II_WITH_CXX{0}'.format(cxxstd), True)
)
# Performance
# Set recommended flags for maximum (matrix-free) performance, see
# https://groups.google.com/forum/?fromgroups#!topic/dealii/3Yjy8CBIrgU
if spec.satisfies('%gcc'):
cxx_flags_release.extend(['-O3'])
elif spec.satisfies('%intel'):
cxx_flags_release.extend(['-O3'])
elif spec.satisfies('%clang') or spec.satisfies('%apple-clang'):
cxx_flags_release.extend(['-O3', '-ffp-contract=fast'])
# 64 bit indices
options.append(self.define_from_variant(
'DEAL_II_WITH_64BIT_INDICES', 'int64'
))
if (spec.satisfies('^openblas+ilp64') or
spec.satisfies('^intel-mkl+ilp64') or
spec.satisfies('^intel-parallel-studio+mkl+ilp64')):
options.append(
self.define('LAPACK_WITH_64BIT_BLAS_INDICES', True)
)
# CUDA
options.append(self.define_from_variant(
'DEAL_II_WITH_CUDA', 'cuda'
))
if '+cuda' in spec:
if not spec.satisfies('^cuda@9:'):
options.append('-DDEAL_II_WITH_CXX14=OFF')
cuda_arch = spec.variants['cuda_arch'].value
if cuda_arch != 'none':
if len(cuda_arch) > 1:
raise InstallError(
'deal.II only supports compilation for a single GPU!'
)
flags = '-arch=sm_{0}'.format(cuda_arch[0])
# FIXME: there are some compiler errors in dealii
# with: flags = ' '.join(self.cuda_flags(cuda_arch))
# Stick with -arch=sm_xy for now.
options.append(
self.define('DEAL_II_CUDA_FLAGS', flags)
)
# MPI
options.append(self.define_from_variant(
'DEAL_II_WITH_MPI', 'mpi'
))
if '+mpi' in spec:
options.extend([
self.define('CMAKE_C_COMPILER', spec['mpi'].mpicc),
self.define('CMAKE_CXX_COMPILER', spec['mpi'].mpicxx),
self.define('CMAKE_Fortran_COMPILER', spec['mpi'].mpifc),
self.define('MPI_C_COMPILER', spec['mpi'].mpicc),
self.define('MPI_CXX_COMPILER', spec['mpi'].mpicxx),
self.define('MPI_Fortran_COMPILER', spec['mpi'].mpifc)
])
if '+cuda' in spec:
options.extend([
self.define('DEAL_II_MPI_WITH_CUDA_SUPPORT', True),
self.define('CUDA_HOST_COMPILER', spec['mpi'].mpicxx)
])
# Python bindings
if spec.satisfies('@8.5.0:'):
options.append(self.define_from_variant(
'DEAL_II_COMPONENT_PYTHON_BINDINGS', 'python'
))
if '+python' in spec:
python_exe = spec['python'].command.path
python_library = spec['python'].libs[0]
python_include = spec['python'].headers.directories[0]
options.extend([
self.define('PYTHON_EXECUTABLE', python_exe),
self.define('PYTHON_INCLUDE_DIR', python_include),
self.define('PYTHON_LIBRARY', python_library)
])
# Threading
options.append(self.define_from_variant(
'DEAL_II_WITH_THREADS', 'threads'
))
if '+threads' in spec:
if (spec.satisfies('^intel-parallel-studio+tbb')):
# deal.II/cmake will have hard time picking up TBB from Intel.
tbb_ver = '.'.join(('%s' % spec['tbb'].version).split('.')[1:])
options.extend([
self.define('TBB_FOUND', True),
self.define('TBB_VERSION', tbb_ver),
self.define(
'TBB_INCLUDE_DIRS', spec['tbb'].headers.directories
),
self.define('TBB_LIBRARIES', spec['tbb'].libs)
])
else:
options.append(
self.define('TBB_DIR', spec['tbb'].prefix)
)
# Optional dependencies for which library names are the same as CMake
# variables:
for library in (
'gsl', 'hdf5', 'p4est', 'petsc', 'slepc', 'trilinos', 'metis',
'sundials', 'nanoflann', 'assimp', 'gmsh', 'muparser',
'symengine', 'ginkgo'):
options.append(self.define_from_variant(
'DEAL_II_WITH_{0}'.format(library.upper()), library
))
if ('+' + library) in spec:
options.append(self.define(
'{0}_DIR'.format(library.upper()), spec[library].prefix
))
# Optional dependencies that do not fit the above pattern:
# ADOL-C
options.append(self.define_from_variant(
'DEAL_II_WITH_ADOLC', 'adol-c'
))
if '+adol-c' in spec:
options.append(
self.define('ADOLC_DIR', spec['adol-c'].prefix)
)
# ARPACK
options.append(self.define_from_variant(
'DEAL_II_WITH_ARPACK', 'arpack'
))
if '+arpack' in spec and '+mpi' in spec:
options.extend([
self.define('ARPACK_DIR', spec['arpack-ng'].prefix),
self.define('DEAL_II_ARPACK_WITH_PARPACK', True)
])
# NetCDF
# since Netcdf is spread among two, need to do it by hand:
if '+netcdf' in spec and '+mpi' in spec:
netcdf_libs = spec['netcdf-cxx'].libs + spec['netcdf-c'].libs
options.extend([
self.define('NETCDF_FOUND', True),
self.define('NETCDF_INCLUDE_DIRS', '{0};{1}'.format(
spec['netcdf-cxx'].prefix.include,
spec['netcdf-c'].prefix.include
)),
self.define('NETCDF_LIBRARIES', netcdf_libs)
])
else:
options.append(
self.define('DEAL_II_WITH_NETCDF', False)
)
# ScaLAPACK
options.append(self.define_from_variant(
'DEAL_II_WITH_SCALAPACK', 'scalapack'
))
if '+scalapack' in spec:
scalapack_libs = spec['scalapack'].libs
options.extend([
self.define('SCALAPACK_FOUND', True),
self.define(
'SCALAPACK_INCLUDE_DIRS', spec['scalapack'].prefix.include
),
self.define('SCALAPACK_LIBRARIES', scalapack_libs)
])
# Open Cascade
options.append(self.define_from_variant(
'DEAL_II_WITH_OPENCASCADE', 'oce'
))
if '+oce' in spec:
options.append(
self.define('OPENCASCADE_DIR', spec['oce'].prefix)
)
# As a final step, collect CXX flags that may have been
# added anywhere above:
if len(cxx_flags_release) > 0 and '+optflags' in spec:
options.extend([
self.define(
'CMAKE_CXX_FLAGS_RELEASE', ' '.join(cxx_flags_release)
),
self.define('CMAKE_CXX_FLAGS', ' '.join(cxx_flags))
])
# Add flags for machine vectorization, used when tutorials
# and user code is built.
# See https://github.com/dealii/dealii/issues/9164
options.append(
self.define('DEAL_II_CXX_FLAGS', os.environ['SPACK_TARGET_ARGS'])
)
return options
def setup_run_environment(self, env):
env.set('DEAL_II_DIR', self.prefix)
def setup_build_environment(self, env):
spec = self.spec
if '+cuda' in spec and '+mpi' in spec:
env.set('CUDAHOSTCXX', spec['mpi'].mpicxx)
|
import os
import numpy as np
import pandas as pd
from pandas.tools.plotting import scatter_matrix
import pylab
import statsmodels.formula.api as smf
import statsmodels.stats.api as sms
"""
USAGE:
python full_1984.py
CREATES:
results/1984/clean.csv
results/1984/scatter_matrix.png
results/1984/summary.txt
"""
PSID_CSV = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'psid', '1984.csv'))
def get_f_path(fname):
return os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'results', '1984', fname))
CLEAN_CSV = get_f_path('clean.csv')
CORR1_TXT = get_f_path('corr1.txt')
HET_WHITE_TXT = get_f_path('het_white.txt')
OLS1_TXT = get_f_path('ols1.txt')
OLS2_TXT = get_f_path('ols2.txt')
SCAT_MATRIX1_PNG = get_f_path('scatter_matrix1.png')
SCAT_MATRIX2_PNG = get_f_path('scatter_matrix2.png')
SUMMARY_TXT = get_f_path('summary.txt')
f_exists = (lambda file_: os.path.isfile(file_))
def _calc_vacation(key1, key2, bad, scale):
def fn(row):
took, amount = row[key1], row[key2]
if took in [0, 8, 9] or amount == bad:
return np.nan
elif took == 5:
return 0
else:
return scale * amount
return fn
def clean(df):
# make sex into dummy for is_female
df['sex'] -= 1
# figure out total vacation taken
df['vacation'] = df.apply(
_calc_vacation('took_vac', 'weeks_vac', 99, 7), axis=1)
# fix salary to be annual amount
df.salary = df.salary.replace(0.00, np.nan)
df.salary = df.salary.replace(99.99, np.nan)
df.salary *= 2000
# remove unknown age values
df.age = df.age.replace(99, np.nan)
# compute vacation given
df['paid_vacation'] = df.apply(
_calc_vacation('given_vac', 'hrs_paid_vac', 9999, 1. / 40.), axis=1)
# drop old values
for col in ['took_vac', 'weeks_vac', 'given_vac', 'hrs_paid_vac']:
df.drop(col, axis=1, inplace=True)
def do_stats(df):
# Summary stats
if not f_exists(SUMMARY_TXT):
with open(SUMMARY_TXT, 'w') as f:
f.write(df.describe().to_string())
# Test for autocorrelation: scatter matrix, correlation, run OLS
if not f_exists(SCAT_MATRIX1_PNG):
scatter_matrix(df, alpha=0.2, figsize=(64, 64), diagonal='hist')
pylab.savefig(SCAT_MATRIX1_PNG, bbox_inches='tight')
if not f_exists(CORR1_TXT):
corr = df.corr()
for i, k in enumerate(corr):
row = corr[k]
for j in range(len(row)):
if j > i:
row[j] = np.nan
with open(CORR1_TXT, 'w') as f:
f.write(corr.to_string(na_rep=''))
if not f_exists(OLS1_TXT):
ols_results = smf.ols(
formula='vacation ~ paid_vacation + np.square(paid_vacation) + '
'age + fam_size + income83 + sex + salary + '
'np.square(salary)',
data=df).fit()
with open(OLS1_TXT, 'w') as f:
f.write(str(ols_results.summary()))
f.write('\n\nCondition Number: {}'.format(
np.linalg.cond(ols_results.model.exog)))
# Need to drop salary, too much autocorrelation
df.drop('salary', axis=1, inplace=True)
if not f_exists(HET_WHITE_TXT):
ols_results = smf.ols(
formula='vacation ~ paid_vacation + np.square(paid_vacation) + '
'age + fam_size + income83 + sex',
data=df).fit()
names = ['LM', 'LM P val.', 'F Stat.', 'F Stat. P val.']
test = sms.het_white(ols_results.resid, ols_results.model.exog)
f_p = test[3]
with open(HET_WHITE_TXT, 'w') as f:
str_ = '\n'.join('{}: {}'.format(n, v)
for n, v in zip(names, test))
f.write(str_ + '\n\n')
if f_p < .01:
f.write('No Heteroskedasticity found.\n')
else:
f.write('Warning: Heteroskedasticity found!\n')
# no Heteroskedasticity found
# make a new scatter matrix to use for the paper
if not f_exists(SCAT_MATRIX2_PNG):
scatter_matrix(df, alpha=0.2, figsize=(64, 64), diagonal='hist')
pylab.savefig(SCAT_MATRIX2_PNG, bbox_inches='tight')
# final OLS results
if not f_exists(OLS2_TXT):
ols_results = smf.ols(
formula='vacation ~ paid_vacation + np.square(paid_vacation) + '
'age + fam_size + income83 + sex',
data=df).fit()
with open(OLS2_TXT, 'w') as f:
f.write(str(ols_results.summary()))
f.write('\n\nCondition Number: {}'.format(
np.linalg.cond(ols_results.model.exog)))
def main():
df = None
if f_exists(CLEAN_CSV):
df = pd.io.parsers.read_csv(CLEAN_CSV)
df.drop('Unnamed: 0', axis=1, inplace=True)
else:
with open(PSID_CSV) as csv:
df = pd.io.parsers.read_csv(csv)
clean(df)
# write output to a file
with open(CLEAN_CSV, 'w+') as csv:
df.to_csv(path_or_buf=csv)
do_stats(df)
if __name__ == '__main__':
main()
print 'Success! :)'
only need correlation once
import os
import numpy as np
import pandas as pd
from pandas.tools.plotting import scatter_matrix
import pylab
import statsmodels.formula.api as smf
import statsmodels.stats.api as sms
"""
USAGE:
python full_1984.py
CREATES:
results/1984/clean.csv
results/1984/scatter_matrix.png
results/1984/summary.txt
"""
PSID_CSV = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'psid', '1984.csv'))
def get_f_path(fname):
return os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'results', '1984', fname))
CLEAN_CSV = get_f_path('clean.csv')
CORR_TXT = get_f_path('corr.txt')
HET_WHITE_TXT = get_f_path('het_white.txt')
OLS1_TXT = get_f_path('ols1.txt')
OLS2_TXT = get_f_path('ols2.txt')
SCAT_MATRIX1_PNG = get_f_path('scatter_matrix1.png')
SCAT_MATRIX2_PNG = get_f_path('scatter_matrix2.png')
SUMMARY_TXT = get_f_path('summary.txt')
f_exists = (lambda file_: os.path.isfile(file_))
def _calc_vacation(key1, key2, bad, scale):
def fn(row):
took, amount = row[key1], row[key2]
if took in [0, 8, 9] or amount == bad:
return np.nan
elif took == 5:
return 0
else:
return scale * amount
return fn
def clean(df):
# make sex into dummy for is_female
df['sex'] -= 1
# figure out total vacation taken
df['vacation'] = df.apply(
_calc_vacation('took_vac', 'weeks_vac', 99, 7), axis=1)
# fix salary to be annual amount
df.salary = df.salary.replace(0.00, np.nan)
df.salary = df.salary.replace(99.99, np.nan)
df.salary *= 2000
# remove unknown age values
df.age = df.age.replace(99, np.nan)
# compute vacation given
df['paid_vacation'] = df.apply(
_calc_vacation('given_vac', 'hrs_paid_vac', 9999, 1. / 40.), axis=1)
# drop old values
for col in ['took_vac', 'weeks_vac', 'given_vac', 'hrs_paid_vac']:
df.drop(col, axis=1, inplace=True)
def do_stats(df):
# Summary stats
if not f_exists(SUMMARY_TXT):
with open(SUMMARY_TXT, 'w') as f:
f.write(df.describe().to_string())
# Test for autocorrelation: scatter matrix, correlation, run OLS
if not f_exists(SCAT_MATRIX1_PNG):
scatter_matrix(df, alpha=0.2, figsize=(64, 64), diagonal='hist')
pylab.savefig(SCAT_MATRIX1_PNG, bbox_inches='tight')
if not f_exists(CORR_TXT):
corr = df.corr()
for i, k in enumerate(corr):
row = corr[k]
for j in range(len(row)):
if j > i:
row[j] = np.nan
with open(CORR_TXT, 'w') as f:
f.write(corr.to_string(na_rep=''))
if not f_exists(OLS1_TXT):
ols_results = smf.ols(
formula='vacation ~ paid_vacation + np.square(paid_vacation) + '
'age + fam_size + income83 + sex + salary + '
'np.square(salary)',
data=df).fit()
with open(OLS1_TXT, 'w') as f:
f.write(str(ols_results.summary()))
f.write('\n\nCondition Number: {}'.format(
np.linalg.cond(ols_results.model.exog)))
# Need to drop salary, too much autocorrelation
df.drop('salary', axis=1, inplace=True)
if not f_exists(HET_WHITE_TXT):
ols_results = smf.ols(
formula='vacation ~ paid_vacation + np.square(paid_vacation) + '
'age + fam_size + income83 + sex',
data=df).fit()
names = ['LM', 'LM P val.', 'F Stat.', 'F Stat. P val.']
test = sms.het_white(ols_results.resid, ols_results.model.exog)
f_p = test[3]
with open(HET_WHITE_TXT, 'w') as f:
str_ = '\n'.join('{}: {}'.format(n, v)
for n, v in zip(names, test))
f.write(str_ + '\n\n')
if f_p < .01:
f.write('No Heteroskedasticity found.\n')
else:
f.write('Warning: Heteroskedasticity found!\n')
# no Heteroskedasticity found
# make a new scatter matrix to use for the paper
if not f_exists(SCAT_MATRIX2_PNG):
scatter_matrix(df, alpha=0.2, figsize=(64, 64), diagonal='hist')
pylab.savefig(SCAT_MATRIX2_PNG, bbox_inches='tight')
# final OLS results
if not f_exists(OLS2_TXT):
ols_results = smf.ols(
formula='vacation ~ paid_vacation + np.square(paid_vacation) + '
'age + fam_size + income83 + sex',
data=df).fit()
with open(OLS2_TXT, 'w') as f:
f.write(str(ols_results.summary()))
f.write('\n\nCondition Number: {}'.format(
np.linalg.cond(ols_results.model.exog)))
def main():
df = None
if f_exists(CLEAN_CSV):
df = pd.io.parsers.read_csv(CLEAN_CSV)
df.drop('Unnamed: 0', axis=1, inplace=True)
else:
with open(PSID_CSV) as csv:
df = pd.io.parsers.read_csv(csv)
clean(df)
# write output to a file
with open(CLEAN_CSV, 'w+') as csv:
df.to_csv(path_or_buf=csv)
do_stats(df)
if __name__ == '__main__':
main()
print 'Success! :)'
|
# -*- coding: utf-8 -*-
# import the "show info" tool from utils.py
from aqt.utils import showInfo
import anki.importing as importing
from anki.importing.noteimp import ForeignNote
from anki.importing.noteimp import NoteImporter
import operator
import re
from aqt import mw
from aqt.qt import *
from aqt.importing import ImportDialog
from anki.hooks import wrap
# GUI:
def hideAllowHTML(self):
if type(self.importer) == LatexImporter:
self.frm.allowHTML.setShown(False)
ImportDialog.setupMappingFrame = wrap(ImportDialog.setupMappingFrame, hideAllowHTML)
# MEAT:
class LatexImporter(NoteImporter):
needMapper = True
needDelimiter = False
# allowHTML always True (assignments have no affect):
allowHTML = property(lambda self: True, lambda self,value: 0, lambda self: 0, "allowHTML, always returns True")
def __init__(self, *args):
# excerpt from TextImporter (csvfile.py)
NoteImporter.__init__(self, *args)
self.fileobj = None
# preamble and postamble are saved in the following variables,
# but they are not actually processed by the current version
# of LatexImporter:
self.preamble = ""
self.postamble = ""
# noteList will contain all ForeignNotes that have been imported:
self.noteList = []
# the log will be built from a list of warnings and a list
# of text passages that have been ignored
self.log = []
self.rubbishList = []
self.warningList = []
def fields(self):
# exact copy from TextImporter (csvfile.py)
"Number of fields."
self.open()
return self.numFields
def open(self):
# exact copy from TextImporter (csvfile.py)
"Parse the top line and determine the pattern and number of fields."
self.cacheFile()
def cacheFile(self):
# exact copy from TextImporter (csvfile.py)
"Read file into self.lines if not already there."
if not self.fileobj:
self.openFile()
def openFile(self):
# modified from TextImporter (csvfile.py)
self.dialect = None
self.fileobj = open(self.file, "rbU")
self.processFile(unicode(self.fileobj.read(), "utf-8"))
def foreignNotes(self):
# modified from TextImporter (csvfile.py)
return self.noteList
def textToHtml(self, text):
"Replace line breaks, <, > and & by HTML equivalents"
htmldict = [[r"&", r"&"],
[r"<", r"<"],
[r">", r">"]]
for v in htmldict:
text = text.replace(v[0], v[1])
#order of replacements matters -- line breaks need to be replaced last!
text = text.replace("\n", r"<br>")
return text
def ignore(self, value, ignored):
if re.search("\S",value) != None:
ignored.append(value.strip())
# parsing functions for different parts of the latex document
# 1. parsing functions for different field types/tags
def process_plain(self, value, note):
value = self.textToHtml(value)
note.fields.append(value)
def process_latex(self, value, note):
value = value.strip()
if value != "":
value = r"[latex]" + value + r"[/latex]"
value = self.textToHtml(value)
note.fields.append(value)
def process_tags(self, value, note):
note.tags.extend(value.split())
#Klammer-zu-suche:
def findClosingBrace(self, string):
"return position of } matching invisible { at beginning of string"
l = 1 #parenthization level
p = 0
while p < len(string) and l > 0:
if string[p] == "\\":
p += 1 #skip a character
elif string[p] == "{":
l += 1
elif string[p] == "}":
l -= 1
elif string[p] == "%":
jump = string[p:].find("\n") #jump to end of line
if jump == -1: break
else: p += jump
p += 1 #loop
if l == 0: #matching "}" found
return (p-1,p)
else:
self.warningList.append("\nWARNING: } expected at the end of the following string.\n")
self.warningList.append(string + "\n")
return None
def findCommand(self, string, command, arg = None, warning = False):
if arg == None:
pattern = ur"\\" + command + ur"(?![a-z])"
elif arg == "?":
pattern = ur"\\" + command + ur"\s*{"
else:
pattern = ur"\\" + command + ur"\s*{" + arg + ur"}"
p = 0
mo = None
while p < len(string):
mo = re.match(pattern, string[p:])
if mo: break
if string[p] == "\\":
p += 1 #skip a character
elif string[p] == "%":
jump = string[p:].find("\n") #jump to end of line
if jump == -1: break
else: p += jump
p += 1 #loop
if mo:
return (p + mo.start(), p + mo.end())
else:
if warning == True:
self.warningList.append("\nWARNING: The environment containing the following string seems to be corrupted.\n")
self.warningList.append(string + "\n")
return None
def findIter(self, string, findfun): #command, arg = None):
poslist = []
pos = (0,0)
while True:
adpos = findfun(string[pos[1]:]) #findCommand(string[pos[1]:], command, arg)
if adpos == None:
break
if adpos[1] == adpos[0]:
#This really shouldn't happen, I just want to make sure I don't land in an infinite loop
self.warningList.append("\nERROR: An error occurred while parsing the following string. Import may have failed.\n")
self.warningList.append(string + "\n")
break
pos = (pos[1] + adpos[0], pos[1] + adpos[1])
poslist.append(pos)
return poslist
def cutIntoPieces(self, string, cList):
"returns a list of the strings before and in between all sections marked by the commands in cList, and a list of all sections"
triples = [(ao[0],ao[1],cList.index(command))
for command in cList
for ao in self.findIter(string, command['beginfun'])]
triples.sort()
Begins = [p[0] for p in triples] + [len(string)]
intBegins = [p[1] for p in triples]
commandIndices = [p[2] for p in triples]
returnList = []
i, prevEnd = 0, 0
while i < len(intBegins):
ci = commandIndices[i]
preString = string[prevEnd:Begins[i]]
intString = string[intBegins[i]:Begins[i+1]]
ends = cList[ci]['endfun'](intString)
if ends == None:
valueString = intString
prevEnd = Begins[i+1]
else:
valueString = intString[:ends[0]]
prevEnd = intBegins[i] + ends[1]
returnList.append((preString, valueString, ci))
i += 1
return returnList, string[prevEnd:]
def processFile(self, fileString):
docCommands = [{'beginfun': lambda string: self.findCommand(string, ur"begin", ur"document"),
'endfun': lambda string: self.findCommand(string, ur"end", ur"document", warning = True),
'process': lambda string: self.processDocument(string)}]
pieces, post = self.cutIntoPieces(fileString, docCommands)
# may return several documents if file was written like that,
# but I'll ignore all except the first, just like any latex interpreter would
self.preamble, document, ci = pieces[0]
self.preamble = self.preamble + u"\\begin{document}"
self.postamble = u"\\end{document}" + post
self.processDocument(document)
# make all notes same length and
# add tags as extra field at the very end
# (Adding tags as field is necessary for tags to be "update-able":
# when updating a note via import, the core ANKI importer does not check
# whether tags have changed unless they are imported as an additional field.)
self.numFields = max([len(note.fields) for note in self.noteList]) + 1 # +1 for tag-field
for note in self.noteList:
note.fields = note.fields + [""]*(self.numFields-1-len(note.fields)) + [" ".join(note.tags)]
note.tags = []
# clean up rubbishList:
self.rubbishList = [s.strip() for s in self.rubbishList if re.search("\S",s) != None]
self.log = self.warningList + ["\nTHE FOLLOWING TEXT HAS BEEN IGNORED because it occurred in between notes or in between fields:\n"] + self.rubbishList + ["\n"]
def processDocument(self, document):
globalTags = []
noteCommands = [{'beginfun': lambda string: self.findCommand(string, ur"begin", ur"note"),
'endfun': lambda string: self.findCommand(string, ur"end", ur"note", warning = True),
'process': lambda string: self.processNote(string, globalTags)}]
pieces, post = self.cutIntoPieces(document, noteCommands)
for pre, value, ci in pieces:
globalTags = self.processInterNoteText(pre, globalTags)
noteCommands[ci]['process'](value)
def processNote(self, noteString, globalTags):
newNote = ForeignNote()
newNote.tags.extend(globalTags)
fieldCommands = [{'beginfun': lambda string: self.findCommand(string, ur"field"),
'endfun': lambda string: self.findCommand(string, ur"endfield"),
'process': lambda string: self.processLatexField(string, newNote)},
{'beginfun': lambda string: self.findCommand(string, ur"begin", ur"field"),
'endfun': lambda string: self.findCommand(string, ur"end", ur"field", warning = True),
'process': lambda string: self.processLatexField(string, newNote)},
{'beginfun': lambda string: self.findCommand(string, ur"xfield", ur"?"),
'endfun': self.findClosingBrace,
'process': lambda string: self.processLatexField(string, newNote)},
{'beginfun': lambda string: self.findCommand(string, ur"plain"),
'endfun': lambda string: self.findCommand(string, ur"endplain"),
'process': lambda string: self.processPlainField(string, newNote)},
{'beginfun': lambda string: self.findCommand(string, ur"begin", ur"plain"),
'endfun': lambda string: self.findCommand(string, ur"end", ur"plain", warning = True),
'process': lambda string: self.processPlainField(string, newNote)},
{'beginfun': lambda string: self.findCommand(string, ur"xplain", ur"?"),
'endfun': self.findClosingBrace,
'process': lambda string: self.processPlainField(string, newNote)},
{'beginfun': lambda string: self.findCommand(string, ur"tags", ur"?"),
'endfun': self.findClosingBrace,
'process': lambda string: self.processTags(string, newNote)}
]
pieces, post = self.cutIntoPieces(noteString, fieldCommands)
for pre, value, ci in pieces:
self.rubbishList.append(pre)
fieldCommands[ci]['process'](value)
self.rubbishList.append(post)
self.noteList.append(newNote)
def processInterNoteText(self, string, globalTags):
tagCommands = [{'beginfun': lambda string: self.findCommand(string, ur"tags", "?"),
'endfun': self.findClosingBrace,
'process': None}
]
pieces, post = self.cutIntoPieces(string, tagCommands)
self.rubbishList.extend([pre for pre, value, ci in pieces] + [post])
tags = [tag for pre, value, ci in pieces for tag in value.split() if tag != ""]
if len(tags) > 0:
return tags
else:
return globalTags
def processTags(self, string, note):
tags = [tag for tag in string.split() if tag != ""]
note.tags.extend(tags)
def processLatexField(self, string, note):
if string.strip() != "":
# string = re.sub(r"^[ \t]*","",string,flags = re.MULTILINE) # see note below
string = self.textToHtml(string)
string = r"[latex]" + string + r"[/latex]"
note.fields.append(string)
def processPlainField(self, string, note):
string = string.strip()
string = re.sub(r"^[ \t]*","",string,flags = re.MULTILINE) # see note below
string = self.textToHtml(string)
note.fields.append(string)
importing.Importers = importing.Importers + ((_("Latex Notes (*.tex)"), LatexImporter),)
# note:
# The command
# string = re.sub(r"^[ \t]*","",string,flags = re.MULTILINE)
# removes all whitespace at the beginning of each line of the (possibly-multiline) string.
# However, I no longer understand why I should do this.
# In the normal Anki editor, such leading whitespace does not show up in any case.
# When importing verbatim-environments, deleting whitespace causes problems.
Minor edits:
- delete some whitespaces at end of lines
- insert some whitespaces after commas
- shorten some lines
- etc.
# -*- coding: utf-8 -*-
# import the "show info" tool from utils.py
from aqt.utils import showInfo
import anki.importing as importing
from anki.importing.noteimp import ForeignNote
from anki.importing.noteimp import NoteImporter
import operator
import re
from aqt import mw
from aqt.qt import *
from aqt.importing import ImportDialog
from anki.hooks import wrap
# GUI:
def hideAllowHTML(self):
if type(self.importer) == LatexImporter:
self.frm.allowHTML.setShown(False)
ImportDialog.setupMappingFrame = wrap(ImportDialog.setupMappingFrame, hideAllowHTML)
# MEAT:
class LatexImporter(NoteImporter):
needMapper = True
needDelimiter = False
# allowHTML always True (assignments have no affect):
allowHTML = property(lambda self: True, lambda self,value: 0, lambda self: 0, "allowHTML, always returns True")
def __init__(self, *args):
# excerpt from TextImporter (csvfile.py)
NoteImporter.__init__(self, *args)
self.fileobj = None
# preamble and postamble are saved in the following variables,
# but they are not actually processed by the current version
# of LatexImporter:
self.preamble = ""
self.postamble = ""
# noteList will contain all ForeignNotes that have been imported:
self.noteList = []
# the log will be built from a list of warnings and a list
# of text passages that have been ignored
self.log = []
self.rubbishList = []
self.warningList = []
def fields(self):
# exact copy from TextImporter (csvfile.py)
"Number of fields."
self.open()
return self.numFields
def open(self):
# exact copy from TextImporter (csvfile.py)
"Parse the top line and determine the pattern and number of fields."
self.cacheFile()
def cacheFile(self):
# exact copy from TextImporter (csvfile.py)
"Read file into self.lines if not already there."
if not self.fileobj:
self.openFile()
def openFile(self):
# modified from TextImporter (csvfile.py)
self.dialect = None
self.fileobj = open(self.file, "rbU")
self.processFile(unicode(self.fileobj.read(), "utf-8"))
def foreignNotes(self):
# modified from TextImporter (csvfile.py)
return self.noteList
def textToHtml(self, text):
"Replace line breaks, <, > and & by HTML equivalents"
htmldict = [[r"&", r"&"],
[r"<", r"<"],
[r">", r">"]]
for v in htmldict:
text = text.replace(v[0], v[1])
# order of replacements matters --
# line breaks need to be replaced last!
text = text.replace("\n", r"<br>")
return text
def ignore(self, value, ignored):
if re.search("\S", value) != None:
ignored.append(value.strip())
# parsing functions for different parts of the latex document
# 1. parsing functions for different field types/tags
def process_plain(self, value, note):
value = self.textToHtml(value)
note.fields.append(value)
def process_latex(self, value, note):
value = value.strip()
if value != "":
value = r"[latex]" + value + r"[/latex]"
value = self.textToHtml(value)
note.fields.append(value)
def process_tags(self, value, note):
note.tags.extend(value.split())
# Klammer-zu-suche:
def findClosingBrace(self, string):
"return position of } matching invisible { at beginning of string"
l = 1 # parenthization level
p = 0
while p < len(string) and l > 0:
if string[p] == "\\":
p += 1 # skip a character
elif string[p] == "{":
l += 1
elif string[p] == "}":
l -= 1
elif string[p] == "%":
jump = string[p:].find("\n") # jump to end of line
if jump == -1: break
else: p += jump
p += 1 # loop
if l == 0: # matching "}" found
return (p-1, p)
else:
self.warningList.append("\nWARNING: } expected at the end of the following string.\n")
self.warningList.append(string + "\n")
return None
def findCommand(self, string, command, arg=None, warning=False):
if arg == None:
pattern = ur"\\" + command + ur"(?![a-z])"
elif arg == "?":
pattern = ur"\\" + command + ur"\s*{"
else:
pattern = ur"\\" + command + ur"\s*{" + arg + ur"}"
p = 0
mo = None
while p < len(string):
mo = re.match(pattern, string[p:])
if mo: break
if string[p] == "\\":
p += 1 # skip a character
elif string[p] == "%":
jump = string[p:].find("\n") # jump to end of line
if jump == -1: break
else: p += jump
p += 1 # loop
if mo:
return (p + mo.start(), p + mo.end())
else:
if warning == True:
self.warningList.append("\nWARNING: The environment containing the following string seems to be corrupted.\n")
self.warningList.append(string + "\n")
return None
def findIter(self, string, findfun):
poslist = []
pos = (0, 0)
while True:
adpos = findfun(string[pos[1]:])
if adpos == None:
break
if adpos[1] == adpos[0]:
# This really shouldn't happen, I just want to make sure
# I don't land in an infinite loop
self.warningList.append("\nERROR: An error occurred while parsing the following string. Import may have failed.\n")
self.warningList.append(string + "\n")
break
pos = (pos[1] + adpos[0], pos[1] + adpos[1])
poslist.append(pos)
return poslist
def cutIntoPieces(self, string, cList):
"""returns a list of the strings before and in between all sections
marked by the commands in cList, and a list of all sections"""
triples = [(ao[0], ao[1], cList.index(command))
for command in cList
for ao in self.findIter(string, command['beginfun'])]
triples.sort()
Begins = [p[0] for p in triples] + [len(string)]
intBegins = [p[1] for p in triples]
commandIndices = [p[2] for p in triples]
returnList = []
i, prevEnd = 0, 0
while i < len(intBegins):
ci = commandIndices[i]
preString = string[prevEnd:Begins[i]]
intString = string[intBegins[i]:Begins[i+1]]
ends = cList[ci]['endfun'](intString)
if ends == None:
valueString = intString
prevEnd = Begins[i+1]
else:
valueString = intString[:ends[0]]
prevEnd = intBegins[i] + ends[1]
returnList.append((preString, valueString, ci))
i += 1
return returnList, string[prevEnd:]
def processFile(self, fileString):
docCommands = [{'beginfun': lambda string: self.findCommand(string, ur"begin", ur"document"),
'endfun': lambda string: self.findCommand(string, ur"end", ur"document", warning=True),
'process': lambda string: self.processDocument(string)}]y
pieces, post = self.cutIntoPieces(fileString, docCommands)
# may return several documents if file was written like that,
# but I'll ignore all except the first,
# just like any latex interpreter would
self.preamble, document, ci = pieces[0]
self.preamble = self.preamble + u"\\begin{document}"
self.postamble = u"\\end{document}" + post
self.processDocument(document)
# make all notes same length and
# add tags as extra field at the very end
# (Adding tags as field is necessary for tags to be "update-able":
# when updating a note via import, the core ANKI importer
# does not check whether tags have changed unless they are
# imported as an additional field.)
self.numFields = max([len(note.fields) for note in self.noteList]) + 1
# (+1 for tag-field)
for note in self.noteList:
note.fields = note.fields + [""]*(self.numFields-1-len(note.fields)) + [" ".join(note.tags)]
note.tags = []
# clean up rubbishList:
self.rubbishList = [s.strip() for s in self.rubbishList if re.search("\S", s) != None]
self.log = self.warningList + ["\nTHE FOLLOWING TEXT HAS BEEN IGNORED because it occurred in between notes or in between fields:\n"] + self.rubbishList + ["\n"]
def processDocument(self, document):
globalTags = []
noteCommands = [{'beginfun': lambda string: self.findCommand(string, ur"begin", ur"note"),
'endfun': lambda string: self.findCommand(string, ur"end", ur"note", warning=True),
'process': lambda string: self.processNote(string, globalTags)}]
pieces, post = self.cutIntoPieces(document, noteCommands)
for pre, value, ci in pieces:
globalTags = self.processInterNoteText(pre, globalTags)
noteCommands[ci]['process'](value)
def processNote(self, noteString, globalTags):
newNote = ForeignNote()
newNote.tags.extend(globalTags)
fieldCommands = [{'beginfun': lambda string: self.findCommand(string, ur"field"),
'endfun': lambda string: self.findCommand(string, ur"endfield"),
'process': lambda string: self.processLatexField(string, newNote)},
{'beginfun': lambda string: self.findCommand(string, ur"begin", ur"field"),
'endfun': lambda string: self.findCommand(string, ur"end", ur"field", warning=True),
'process': lambda string: self.processLatexField(string, newNote)},
{'beginfun': lambda string: self.findCommand(string, ur"xfield", ur"?"),
'endfun': self.findClosingBrace,
'process': lambda string: self.processLatexField(string, newNote)},
{'beginfun': lambda string: self.findCommand(string, ur"plain"),
'endfun': lambda string: self.findCommand(string, ur"endplain"),
'process': lambda string: self.processPlainField(string, newNote)},
{'beginfun': lambda string: self.findCommand(string, ur"begin", ur"plain"),
'endfun': lambda string: self.findCommand(string, ur"end", ur"plain", warning=True),
'process': lambda string: self.processPlainField(string, newNote)},
{'beginfun': lambda string: self.findCommand(string, ur"xplain", ur"?"),
'endfun': self.findClosingBrace,
'process': lambda string: self.processPlainField(string, newNote)},
{'beginfun': lambda string: self.findCommand(string, ur"tags", ur"?"),
'endfun': self.findClosingBrace,
'process': lambda string: self.processTags(string, newNote)}
]
pieces, post = self.cutIntoPieces(noteString, fieldCommands)
for pre, value, ci in pieces:
self.rubbishList.append(pre)
fieldCommands[ci]['process'](value)
self.rubbishList.append(post)
self.noteList.append(newNote)
def processInterNoteText(self, string, globalTags):
tagCommands = [{'beginfun': lambda string: self.findCommand(string, ur"tags", "?"),
'endfun': self.findClosingBrace,
'process': None}
]
pieces, post = self.cutIntoPieces(string, tagCommands)
self.rubbishList.extend([pre for pre, value, ci in pieces] + [post])
tags = [tag for pre, value, ci in pieces for tag in value.split() if tag != ""]
if len(tags) > 0:
return tags
else:
return globalTags
def processTags(self, string, note):
tags = [tag for tag in string.split() if tag != ""]
note.tags.extend(tags)
def processLatexField(self, string, note):
if string.strip() != "":
# string = re.sub(r"^[ \t]*", "", string, flags=re.MULTILINE)
# see note below
string = self.textToHtml(string)
string = r"[latex]" + string + r"[/latex]"
note.fields.append(string)
def processPlainField(self, string, note):
string = string.strip()
string = re.sub(r"^[ \t]*", "", string, flags=re.MULTILINE)
# see note below
string = self.textToHtml(string)
note.fields.append(string)
importing.Importers = importing.Importers + ((_("Latex Notes (*.tex)"), LatexImporter),)
# note:
# The command
# string = re.sub(r"^[ \t]*","",string,flags = re.MULTILINE)
# removes all whitespace at the beginning of each line of the
# (possibly-multiline) string. However, I no longer understand
# why I should do this. In the normal Anki editor, such leading
# whitespace does not show up in any case. When importing
# verbatim-environments, deleting whitespace causes problems.
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Eospac(Package):
"""A collection of C routines that can be used to access the Sesame data
library.
"""
homepage = "http://laws-green.lanl.gov/projects/data/eos.html"
list_url = "http://laws-green.lanl.gov/projects/data/eos/eospacReleases.php"
version('6.4.0', sha256='15a953beac735c68431afe86ffe33323d540d0fbbbec03ba79438dd29736051d',
url="http://laws-green.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0_612ea8c9b8ffa6d9175d9118955571d9107f1e3c.tgz")
version('6.4.0beta.4', sha256='0ebfd8badff575ea77444aa978629dbdca3135a0b5eb373b8daba058773d4635',
url="http://laws-green.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.4_aff6429bb6868de31a980278bafa13487c2ce83f.tgz")
version('6.4.0beta.3', sha256='9f387ca5356519494c6f3f27adb0c165cf9f9e15e3355a67bf940a4a92eebdab',
url="http://laws-green.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.3_90ff265f62aa1780bfcd0a62dad807b6be6ed461.tgz")
version('6.4.0beta.2', sha256='f9db46cd6c62a6f83960d802350f3e37675921af102969b293c02eb797558a53',
url="http://laws-green.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.2_69196eadbc77506561eef711f19d2f03b4ab0ffa.tgz")
version('6.4.0beta.1', sha256='14c5c804e5f628f41e8ed80bcee5a80adeb6c6f3d130715421ca99a30c7eb7e2',
url="http://laws-green.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.1_r20171213193219.tgz")
version('6.3.1', sha256='aa1112c4251c9c3c2883a7ab2c7f2abff2c339f29dbbf8421ef88b0c9df904f8', preferred=True,
url="http://laws-green.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.3.1_r20161202150449.tgz")
# This patch allows the use of spack's compile wrapper 'flang'
patch('flang.patch', when='@:6.4.0beta.2%clang')
def install(self, spec, prefix):
with working_dir('Source'):
make('install',
'CC={0}'.format(spack_cc),
'CXX={0}'.format(spack_cxx),
'F77={0}'.format(spack_f77),
'F90={0}'.format(spack_fc),
'prefix={0}'.format(prefix),
'INSTALLED_LIBRARY_DIR={0}'.format(prefix.lib),
'INSTALLED_INCLUDE_DIR={0}'.format(prefix.include),
'INSTALLED_EXAMPLE_DIR={0}'.format(prefix.example),
'INSTALLED_BIN_DIR={0}'.format(prefix.bin))
# fix conflict with linux's getopt for 6.4.0beta.2
if spec.satisfies('@6.4.0beta.2'):
with working_dir(prefix.bin):
move('getopt', 'driver_getopt')
eospac: fix url (#13998)
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Eospac(Package):
"""A collection of C routines that can be used to access the Sesame data
library.
"""
homepage = "http://laws.lanl.gov/projects/data/eos.html"
list_url = "http://laws.lanl.gov/projects/data/eos/eospacReleases.php"
version('6.4.0', sha256='15a953beac735c68431afe86ffe33323d540d0fbbbec03ba79438dd29736051d',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0_612ea8c9b8ffa6d9175d9118955571d9107f1e3c.tgz")
version('6.4.0beta.4', sha256='0ebfd8badff575ea77444aa978629dbdca3135a0b5eb373b8daba058773d4635',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.4_aff6429bb6868de31a980278bafa13487c2ce83f.tgz")
version('6.4.0beta.3', sha256='9f387ca5356519494c6f3f27adb0c165cf9f9e15e3355a67bf940a4a92eebdab',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.3_90ff265f62aa1780bfcd0a62dad807b6be6ed461.tgz")
version('6.4.0beta.2', sha256='f9db46cd6c62a6f83960d802350f3e37675921af102969b293c02eb797558a53',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.2_69196eadbc77506561eef711f19d2f03b4ab0ffa.tgz")
version('6.4.0beta.1', sha256='14c5c804e5f628f41e8ed80bcee5a80adeb6c6f3d130715421ca99a30c7eb7e2',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.1_r20171213193219.tgz")
version('6.3.1', sha256='aa1112c4251c9c3c2883a7ab2c7f2abff2c339f29dbbf8421ef88b0c9df904f8', preferred=True,
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.3.1_r20161202150449.tgz")
# This patch allows the use of spack's compile wrapper 'flang'
patch('flang.patch', when='@:6.4.0beta.2%clang')
def install(self, spec, prefix):
with working_dir('Source'):
make('install',
'CC={0}'.format(spack_cc),
'CXX={0}'.format(spack_cxx),
'F77={0}'.format(spack_f77),
'F90={0}'.format(spack_fc),
'prefix={0}'.format(prefix),
'INSTALLED_LIBRARY_DIR={0}'.format(prefix.lib),
'INSTALLED_INCLUDE_DIR={0}'.format(prefix.include),
'INSTALLED_EXAMPLE_DIR={0}'.format(prefix.example),
'INSTALLED_BIN_DIR={0}'.format(prefix.bin))
# fix conflict with linux's getopt for 6.4.0beta.2
if spec.satisfies('@6.4.0beta.2'):
with working_dir(prefix.bin):
move('getopt', 'driver_getopt')
|
#!/usr/bin/env pythonw
# -*- coding: utf-8 -*-
"""Latin Search program"""
from __future__ import (print_function, unicode_literals,
with_statement)
import os
import sys
try:
import cPickle as pickle
except ImportError:
import pickle
import string
import collections
from difflib import SequenceMatcher
from multiprocessing import Pool
from multiprocessing import Process, Value, Lock
try:
from prettytable import PrettyTable
except ImportError:
PrettyTable = None
if sys.version[0] == '2':
import Tkinter as tk
import ttk
import tkFileDialog
import ScrolledText as st
elif sys.version[0] == '3':
import tkinter as tk
from tkinter import ttk
import tkinter.scrolledtext as st
from tkinter import filedialog as tkFileDialog
__version__ = "v0.1.0"
__author__ = 'Jin'
_history = []
DATA_FILE = os.path.abspath('data/latin_without_sougou.csv')
PICKLE_KEYS_FILE = os.path.abspath('data/latin_60000.keys.pickle')
PICKLE_DICT_FILE = os.path.abspath('data/latin_60000.dict.pickle')
# 中国植物志网站链接
FRPS_BASE_URL = 'http://frps.eflora.cn/frps/'
# Limit of results for similarity candidate area
SIMILAR_RESULT_NUM_LIMIT = 30
# Similarity threshold for similarity search
SIMILARITY_THRESHOLD = 0.3
# 拉丁名中的特殊字符
SPECIAL_CHARS = ['×', '〔', ')', '【', '】', '', '', '<', '>',
'*', '[', '@', ']', '[', '|']
TRAINED_OBJECT = object()
USAGE_INFO = """
植物拉丁名搜索(Latin Namer Finer)
[介绍]
根据植物拼音缩写、拼音、中文名称或者拉丁名搜索植物其他信息。
得到候选词后,*双击* 候选词,将得到详细信息。如果没有匹配,将会使用糢糊搜索。
[版本]
%s
[使用方法]
1. 使用拼音首字母搜索。例如搜索 “eqxlm”,将会得到 “二球悬铃木”
及其他悬铃木相关的结果。
2. 使用拼音全称搜索。例如搜索 “erqiuxuanlingmu”,将会得到 “二球悬铃木”
及其他悬铃木相关的结果。
3. 使用中文搜索。例如搜索 “悬铃木”,将会得到 “二球悬铃木”, “三球悬铃木”
等相关搜索结果。
4. 使用拉丁名搜索。例如搜索 “Platanus × acerifolia”,将会得到 “二球悬铃木”
相关的结果。
[候选词介绍]
+---+------------------------+
| 1 | 候选词以查询词开始或结尾
|---+------------------------+
| 2 | 候选词包含查询词
|---+------------------------+
| 3 | 根据相似性进行糢糊搜索
|---+------------------------+
| 4 | 拼写检查(编辑距离为 1)
+---+------------------------+
""" % __version__
def get_lines(file_name):
"""Read file and return a list of lines."""
with open(file_name, 'r') as f_in:
return f_in.readlines()
def get_key_value_pairs_from_file(file_name):
"""
File:
+-----+-------+------+----------------------+-------+
| mg | mugua | 木瓜 | Chaenomeles sinensis | Lynn. |
+-----+-------+--- --+----------------------+-------+
Processing:
dict_1: {'mg': ('mg', 'mugua', '木瓜', 'Chaenomeles sinensis', '')}
dict_2: {'mugua': ('mg', 'mugua', '木瓜', 'Chaenomeles sinensis', '')}
dict_3: {'木瓜': ('mg', 'mugua', '木瓜', 'Chaenomeles sinensis', '')}
dict_4: {'Chaenomeles sinensis': ('mg', 'mugua',
'木瓜', 'Chaenomeles sinensis', '')}
Returns:
(dict_1, dict_2, dict_3, dict_4)
"""
column_list_1, column_list_2, column_list_3, column_list_4 = [], [], [], []
detailed_info_tuple_list = []
with open(file_name, 'r') as f_in:
for line in f_in:
if sys.version[0] == '2':
line = line.decode('utf-8')
elements = [x.strip() for x in line.split(',')]
column_list_1.append(elements[0])
column_list_2.append(elements[1])
column_list_3.append(elements[2])
column_list_4.append(elements[3])
detailed_info_tuple_list.append(tuple(elements))
return (column_list_1, column_list_2, column_list_3,
column_list_4, detailed_info_tuple_list)
def get_one_to_more_dict(key_list, value_list):
"""
Generate a dictionary from two lists. keys may be duplicated.
>>> get_one_to_more_dict(['a', 'b', 'a', 'a'], [1, 2, 3, 4])
{'a': [1, 3, 4], 'b': [2]}
"""
_out_dict = {}
for i, (key, value) in enumerate(zip(key_list, value_list)):
if key not in _out_dict:
_out_dict[key] = []
_out_dict[key].append(value)
else:
_out_dict[key].append(value)
return _out_dict
def get_dict_for_all_columns():
"""Combine dicts, each with one column as key and whole line as value."""
(column_list_1, column_list_2, column_list_3,
column_list_4, detailed_info_tuple_list) = \
get_key_value_pairs_from_file(DATA_FILE)
dict_1 = get_one_to_more_dict(column_list_1, detailed_info_tuple_list)
dict_2 = get_one_to_more_dict(column_list_2, detailed_info_tuple_list)
dict_3 = get_one_to_more_dict(column_list_3, detailed_info_tuple_list)
dict_4 = get_one_to_more_dict(column_list_4, detailed_info_tuple_list)
# Merge all dicts to a single dict
for each_dict in (dict_2, dict_3, dict_4):
if each_dict:
dict_1.update(each_dict)
keys_for_all = list(set(column_list_1 + column_list_2 +
column_list_3 + column_list_4))
return keys_for_all, dict_1
# ============================================================================
# Utils Part
# ============================================================================
def get_similarity(str_a, str_b):
"""Return similarity of two strings.
[Example]
>>> get_similarity('abcde', 'bcdef')
0.8
"""
return SequenceMatcher(None, str_a, str_b).ratio()
# ============================================================================
# Query Part
# ============================================================================
class SpellCheck(object):
"""Train data set with given data then do spell check for given word.
[Example]
>>> s = SpellCheck(['abcd', 'fghi'])
>>> s.correct('abci')
'abcd'
[Reference]
[1]: Title: How to Write a Spelling Corrector
Author: Peter Norvig
Webpage: http://norvig.com/spell-correct.html
"""
def __init__(self, candidate_list):
self.candidate_list = candidate_list
self.NWORDS = self.train()
self.NWORDS_lower = self.train(lower=True)
self.alphabet = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ_-.:1234567890')
def train(self, lower=False):
"""Train model with data set."""
if not self.candidate_list:
raise ValueError('Blank training list (Choosed blank file?).')
model = collections.defaultdict(lambda: 1)
if not lower:
for f in self.candidate_list:
model[f] += 1
else:
tmp_list = self.candidate_list[:]
for f in map(lambda _: _.lower(), tmp_list):
model[f] += 1
return model
def edits1(self, word):
"""Words that has one edit distance.
1. deletion
2. transposition
3. alteration
4. insertion
"""
n = len(word)
return set(
[word[0:i] + word[i + 1:] for i in range(n)] + # deletion
# transposition
[word[0:i] + word[i + 1] + word[i] + word[i + 2:] for i in range(n - 1)] +
# alteration
[word[0:i] + c + word[i + 1:] for i in range(n) for c in self.alphabet] +
# insertion
[word[0:i] + c + word[i:] for i in range(n + 1) for c in self.alphabet])
def known_edits2(self, word):
"""Words that has two edit distance."""
return set(e2 for e1 in self.edits1(word) for e2 in self.edits1(e1)
if e2.lower() in self.NWORDS_lower)
def known(self, words):
"""Known words."""
return set(w for w in words if w.lower() in self.NWORDS_lower)
def correct(self, word):
"""Do spell check and correct word if word was wrong spelled"""
# Edit 1 and Edit 2 (Low performance, but better accuracy)
# candidates = (self.known([word]) or self.known(self.edits1(word))
# or self.known_edits2(word) or [word])
# Only Edit 1 (For better performance)
candidates = (self.known([word]) or self.known(self.edits1(word))
or [word])
return max(candidates, key=lambda w: self.NWORDS_lower[w])
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Query class for easy organizing
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class QueryWord(object):
"""Query with 4 strategy with multi-processing.
[Strategies]
1. Starts with 'abcde'.startswith('abc')
2. Contains 'bcd' in 'abcde'
3. Rank by Similarity
4. Spell check
[turn_on_mode]
Default: (True, True, True, True)
Return all 4 results.
(True, True, False, False)
Return results of strategy_1 and strategy_2, and blank result
of strategy_3 and strategy_4
[Usage]
query_object = QueryWord(keys_for_all, dict_for_all)
# Startswith
# query_object.get_starts_with_candidates(query)
# Contains
# query_object.get_contains_candidates(query)
# Similarity
# query_object.get_similar_candidates(query, limit=30)
# Spell Check
# query_object.get_spell_check_candidate(query)
# All Four
query_object.query_all_four(
query,
turn_on_mode=(True, True, True, True))
"""
def __init__(self, keys_for_all, dict_for_all):
self.keys_for_all = [x.strip() for x in keys_for_all
if x.strip()]
self.dict_for_all = dict_for_all
self.trained_object = SpellCheck(keys_for_all)
self.query = ''
self.result_dict = {}
# --------------------------------------------------------
# 1: Startswith or Endswith Check
# --------------------------------------------------------
@staticmethod
def get_starts_with_candidates(query, keys_for_all, dict_for_all,
result_dict, turn_on=True):
"""Check startswith & endswith"""
_tmp_list = []
if turn_on:
# Check totally match. Totally matched result should be on top
if query in keys_for_all:
_tmp_list.append(query)
result_elements = dict_for_all.get(query)[0]
for each in result_elements[:4]:
if each.strip() and each != query:
_tmp_list.append(each)
# Check partially match
for i, candidate in enumerate(keys_for_all):
if candidate.startswith(query) or \
candidate.endswith(query):
if candidate != query:
_tmp_list.append(candidate)
result_dict.update({'0': _tmp_list})
# --------------------------------------------------------
# 2: Contains Check
# --------------------------------------------------------
@staticmethod
def get_contains_candidates(query, keys_for_all, dict_for_all,
result_dict, turn_on=True):
"""Check contains"""
_tmp_list = []
if turn_on:
# Check totally match. Totally matched result should be on top
if query in keys_for_all:
_tmp_list.append(query)
result_elements = dict_for_all.get(query)[0]
for each in result_elements[:4]:
if each.strip() and each != query:
_tmp_list.append(each)
# Check partially match
for i, candidate in enumerate(keys_for_all):
if query in candidate:
if candidate != query:
_tmp_list.append(candidate)
result_dict.update({'1': _tmp_list})
# --------------------------------------------------------
# 3: Similarity Check
# --------------------------------------------------------
@staticmethod
def get_similar_candidates(query, keys_for_all, dict_for_all,
result_dict, turn_on=True):
"""Rank candidates by similarity"""
_tmp_list = []
_similar_hits = []
# If strategy 2 (contains search) got a result, similarity search
# will skip for performance reason
if turn_on and len(result_dict.get('1')) == 0:
for i, candidate in enumerate(keys_for_all):
_similarity = get_similarity(candidate, query)
if _similarity > SIMILARITY_THRESHOLD:
_tmp_list.append((_similarity, candidate))
_tmp_list.sort(key=lambda x: x[0], reverse=True)
_tmp_list = _tmp_list[:SIMILAR_RESULT_NUM_LIMIT]
_similar_hits = [_[1] for _ in _tmp_list] if _tmp_list else []
# _similar_hits = ['%.4f %s' % _ for _ in _tmp_list]
result_dict.update({'2': _similar_hits})
# --------------------------------------------------------
# 4: Advanced Spell Check
# --------------------------------------------------------
@staticmethod
def get_spell_check_candidate(query, keys_for_all, dict_for_all,
result_dict, turn_on=True):
"""Get spell check candicates"""
candidate = ''
if turn_on and len(result_dict.get('1')) == 0:
candidate = TRAINED_OBJECT.correct(query)
result_dict.update({'3': candidate})
def query_all_four(self, query,
turn_on_mode=(True, True, True, True)):
"""Get four results"""
# Reset self.query to the value of parameter query
self.query = query
result_dict = {}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Single process
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# for i, each_func in enumerate(func_list):
# each_func(turn_on_mode[i])
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Multi-processing
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# startswith/endswith and contains
func_list_1 = [QueryWord.get_starts_with_candidates,
QueryWord.get_contains_candidates]
# similarity and spell check
func_list_2 = [QueryWord.get_similar_candidates,
QueryWord.get_spell_check_candidate]
for i, each_func in enumerate(func_list_1):
p = Process(target=each_func(self.query, self.keys_for_all,
self.dict_for_all,
result_dict, turn_on_mode[i]))
p.start()
p.join()
# If "contains search" got results, similarity search & spell check
# will not be performed for performance reason
for i, each_func in enumerate(func_list_2):
p = Process(target=each_func(self.query, self.keys_for_all,
self.dict_for_all,
result_dict, turn_on_mode[i]))
p.start()
p.join()
return result_dict
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Right Menu
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class RightClickMenu(object):
"""
Simple widget to add basic right click menus to entry widgets.
usage:
rclickmenu = RightClickMenu(some_entry_widget)
some_entry_widget.bind("<3>", rclickmenu)
If you prefer to import Tkinter over Tix, just replace all Tix
references with Tkinter and this will still work fine.
"""
def __init__(self, parent):
self.parent = parent
# bind Control-A to select_all() to the widget. All other
# accelerators seem to work fine without binding such as
# Ctrl-V, Ctrl-X, Ctrl-C etc. Ctrl-A was the only one I had
# issue with.
self.parent.bind("<Control-a>", lambda e: self._select_all(), add='+')
self.parent.bind("<Control-A>", lambda e: self._select_all(), add='+')
def __call__(self, event):
# if the entry widget is disabled do nothing.
if self.parent.cget('state') == 'disable':
return
# grab focus of the entry widget. this way you can see
# the cursor and any marking selections
self.parent.focus_force()
self.build_menu(event)
def build_menu(self, event):
"""Build right click menu"""
menu = tk.Menu(self.parent, tearoff=0)
# check to see if there is any marked text in the entry widget.
# if not then Cut and Copy are disabled.
if not self.parent.selection_present():
menu.add_command(label="Copy", state='disable')
menu.add_command(label="Cut", state='disable')
else:
# use Tkinter's virtual events for brevity. These could
# be hardcoded with our own functions to immitate the same
# actions but there's no point except as a novice exercise
# (which I recommend if you're a novice).
menu.add_command(label="Copy", command=self._copy)
menu.add_command(label="Cut", command=self._cut)
# if there's string data in the clipboard then make the normal
# Paste command. otherwise disable it.
if self.paste_string_state():
menu.add_command(label="Paste", command=self._paste)
else:
menu.add_command(label="Paste", state='disable')
# again, if there's no marked text then the Delete option is disabled.
if not self.parent.selection_present():
menu.add_command(label="Delete", state='disable')
else:
menu.add_command(label="Delete", command=self._clear)
# make things pretty with a horizontal separator
menu.add_separator()
# I don't know of if there's a virtual event for select all though
# I did look in vain for documentation on -any- of Tkinter's
# virtual events. Regardless, the method itself is trivial.
menu.add_command(label="Select All", command=self._select_all)
menu.post(event.x_root, event.y_root)
def _cut(self):
self.parent.event_generate("<<Cut>>")
def _copy(self):
self.parent.event_generate("<<Copy>>")
def _paste(self):
self.parent.event_generate("<<Paste>>")
def _clear(self):
self.parent.event_generate("<<Clear>>")
def _select_all(self):
self.parent.selection_range(0, 'end')
self.parent.icursor('end')
# return 'break' because, for some reason, Control-a (little 'a')
# doesn't work otherwise. There's some natural binding that
# Tkinter entry widgets want to do that send the cursor to Home
# and deselects.
return 'break'
def paste_string_state(self):
"""Returns true if a string is in the clipboard"""
try:
# this assignment will raise an exception if the data
# in the clipboard isn't a string (such as a picture).
# in which case we want to know about it so that the Paste
# option can be appropriately set normal or disabled.
clipboard = self.parent.selection_get(selection='CLIPBOARD')
except:
return False
return True
class RightClickMenuForListBox(object):
"""
Simple widget to add basic right click menus to entry widgets.
usage:
rclickmenu = RightClickMenuForListBox(listbox_widget)
listbox_widget.bind("<3>", rclickmenu)
If you prefer to import Tkinter over Tix, just replace all Tix
references with Tkinter and this will still work fine.
"""
def __init__(self, parent):
self.parent = parent
def __call__(self, event):
# if the entry widget is disabled do nothing.
if self.parent.cget('state') == 'disable':
return
# grab focus of the entry widget. this way you can see
# the cursor and any marking selections
self.parent.focus_force()
self.build_menu(event)
def build_menu(self, event):
"""Build right click menu"""
menu = tk.Menu(self.parent, tearoff=0)
menu.add_command(label="Copy", command=self._copy)
menu.post(event.x_root, event.y_root)
def _copy(self):
self.parent.event_generate("<<Copy>>")
class RightClickMenuForScrolledText(object):
"""Simple widget to add basic right click menus to entry widgets."""
def __init__(self, parent):
self.parent = parent
# bind Control-A to select_all() to the widget. All other
# accelerators seem to work fine without binding such as
# Ctrl-V, Ctrl-X, Ctrl-C etc. Ctrl-A was the only one I had
# issue with.
self.parent.bind("<Control-a>", lambda e: self._select_all(), add='+')
self.parent.bind("<Control-A>", lambda e: self._select_all(), add='+')
def __call__(self, event):
# if the entry widget is disabled do nothing.
if self.parent.cget('state') == tk.DISABLED:
return
# grab focus of the entry widget. this way you can see
# the cursor and any marking selections
self.parent.focus_force()
self.build_menu(event)
def build_menu(self, event):
"""build menu"""
menu = tk.Menu(self.parent, tearoff=0)
# check to see if there is any marked text in the entry widget.
# if not then Cut and Copy are disabled.
# if not self.parent.selection_get():
# menu.add_command(label="Cut", state=tk.DISABLED)
# menu.add_command(label="Copy", state=tk.DISABLED)
# else:
# use Tkinter's virtual events for brevity. These could
# be hardcoded with our own functions to immitate the same
# actions but there's no point except as a novice exercise
# (which I recommend if you're a novice).
menu.add_command(label="Copy", command=self._copy)
menu.add_command(label="Cut", command=self._cut)
# if there's string data in the clipboard then make the normal
# Paste command. otherwise disable it.
if self._paste_string_state():
menu.add_command(label="Paste",
command=self._paste_if_string_in_clipboard)
else:
menu.add_command(label="Paste", state='disable')
# again, if there's no marked text then the Delete option is disabled.
menu.add_command(label="Delete", command=self._delete)
# make things pretty with a horizontal separator
menu.add_separator()
# I don't know of if there's a virtual event for select all though
# I did look in vain for documentation on -any- of Tkinter's
# virtual events. Regardless, the method itself is trivial.
menu.add_command(label="Select All", command=self._select_all)
menu.add_command(label="Clear All", command=self._clear_all)
menu.post(event.x_root, event.y_root)
def _cut(self):
self.parent.event_generate("<<Cut>>")
def _copy(self):
self.parent.event_generate("<<Copy>>")
def _delete(self):
self.parent.event_generate("<<Clear>>")
def _paste_if_string_in_clipboard(self):
self.parent.event_generate("<<Paste>>")
def _select_all(self, ):
"""select all"""
self.parent.tag_add('sel', "1.0", "end-1c")
self.parent.mark_set('insert', "1.0")
self.parent.see('insert')
return 'break'
def _paste_string_state(self):
"""Returns true if a string is in the clipboard"""
try:
# this assignment will raise an exception if the data
# in the clipboard isn't a string (such as a picture).
# in which case we want to know about it so that the Paste
# option can be appropriately set normal or disabled.
clipboard = self.parent.selection_get(selection='CLIPBOARD')
except:
return False
return True
def _clear_all(self):
"""Clear all"""
self.parent.delete('1.0', 'end')
class AutocompleteGUI(tk.Frame):
"""The main GUI for autocomplete program."""
def __init__(self, master=None, keys_for_all=[], dict_for_all={}):
tk.Frame.__init__(self, master)
self.keys_for_all = keys_for_all
self.dict_for_all = dict_for_all
self.history = []
self.master.grid()
self.set_style()
self.create_menu()
self.create_widgets()
self.grid_configure()
self.create_right_menu()
self.bind_func()
self.master.geometry('1400x800')
self.master.title('Latin Finder %s' % __version__)
def set_style(self):
"""Set style for widgets in the main window."""
s = ttk.Style()
s.configure('TCombobox', padding=(11))
s.configure('auto.TCombobox', foreground='red')
s.configure('TButton', padding=(10))
s.configure('open.TButton', foreground='blue')
def create_menu(self):
"""Create menubar for the main window."""
self.menubar = tk.Menu(self.master)
self.file_menu = tk.Menu(self.menubar, tearoff=0)
self.file_menu.add_command(
label='Open',
# command=reload_GUI_with_new_list
)
self.file_menu.add_command(
label='Exit',
command=self.master.quit)
self.menubar.add_cascade(label='File', menu=self.file_menu)
self.help_menu = tk.Menu(self.menubar, tearoff=0)
self.help_menu.add_command(
label='Help',
command=lambda: self.print_help)
self.menubar.add_cascade(label='Help', menu=self.help_menu)
self.master.config(menu=self.menubar)
def create_widgets(self):
"""Create widgets for the main GUI window."""
self.content = ttk.Frame(self.master, padding=(8))
self.content.grid(row=0, column=0, sticky=(tk.W + tk.E + tk.N + tk.S))
self.label_1 = ttk.Label(self.content,
text='Candidates (Startswith / Endswith)')
self.listbox1 = tk.Listbox(self.content, font=('Monospace', 10))
self.scrollbar1 = ttk.Scrollbar(self.content)
self.label_2 = ttk.Label(
self.content, text='Candidates (Contains)')
self.listbox2 = tk.Listbox(self.content, font=('Monospace', 10))
self.scrollbar2 = ttk.Scrollbar(self.content)
self.label_3 = ttk.Label(
self.content, text='Candidates (Rank by similarity)')
self.listbox3 = tk.Listbox(self.content, font=('Monospace', 10))
self.scrollbar3 = ttk.Scrollbar(self.content)
self.label_4 = ttk.Label(
self.content,
text='Candidates (Spell check, single edit distance)')
self.listbox4 = tk.Listbox(self.content, font=('Monospace', 10))
self.scrollbar4 = ttk.Scrollbar(self.content)
self.label_5 = ttk.Label(
self.content,
text=('Click "Do Query" button and see results. '
'** Double Click ** candidate to see detailed result.'))
self.scrolled_text_5 = st.ScrolledText(self.content,
font=('Monospace', 10))
self.input_box = ttk.Combobox(
self.content,
style='auto.TCombobox')
self.input_box.grid(row=0, column=0, columnspan=6, sticky=(tk.W + tk.E))
self.input_box.focus()
# self.open_file_button = ttk.Button(
# self.content,
# text='Open Tree File',
# # command=reload_GUI_with_new_list,
# style='open.TButton')
# self.open_file_button.grid(
# row=0,
# column=0,
# columnspan=2,
# sticky=(tk.W+tk.E))
self.do_query_button = ttk.Button(
self.content,
text='Do Query',
style='copy.TButton')
self.do_query_button.grid(
row=0,
column=6,
columnspan=2,
sticky=(tk.W))
self.label_1.grid(row=1, column=0, columnspan=2, sticky=(tk.W))
self.listbox1.grid(row=2, column=0, sticky=(tk.W + tk.E + tk.N + tk.S))
self.scrollbar1.grid(row=2, column=1, sticky=(tk.N + tk.S))
self.listbox1.config(yscrollcommand=self.scrollbar1.set)
self.scrollbar1.config(command=self.listbox1.yview)
self.label_2.grid(row=1, column=2, columnspan=2, sticky=(tk.W))
self.listbox2.grid(row=2, column=2, sticky=(tk.W + tk.E + tk.N + tk.S))
self.scrollbar2.grid(row=2, column=3, sticky=(tk.N + tk.S))
self.listbox2.config(yscrollcommand=self.scrollbar2.set)
self.scrollbar2.config(command=self.listbox2.yview)
self.label_3.grid(row=1, column=4, columnspan=2, sticky=(tk.W))
self.listbox3.grid(row=2, column=4, sticky=(tk.W + tk.E + tk.N + tk.S))
self.scrollbar3.grid(row=2, column=5, sticky=(tk.N + tk.S))
self.listbox3.config(yscrollcommand=self.scrollbar3.set)
self.scrollbar3.config(command=self.listbox3.yview)
self.label_4.grid(row=1, column=6, columnspan=2, sticky=(tk.W))
self.listbox4.grid(row=2, column=6, sticky=(tk.W + tk.E + tk.N + tk.S))
self.scrollbar4.grid(row=2, column=7, sticky=(tk.N + tk.S))
self.listbox4.config(yscrollcommand=self.scrollbar4.set)
self.scrollbar4.config(command=self.listbox4.yview)
self.label_5.grid(row=3, column=0, columnspan=7, sticky=(tk.W))
self.scrolled_text_5.grid(row=4, column=0, columnspan=7,
sticky=(tk.N + tk.S + tk.W + tk.E))
self.scrolled_text_5.delete("0.1", "end-1c")
self.scrolled_text_5.insert('end', USAGE_INFO)
def bind_command_to_listbox(widget):
"""Bind command to listbox.
Double click on candidates from any column from the four,
then the result will be on the output area.
"""
# Single click left mouse
# widget.bind('<Button-1>',
# lambda e: self.clean_and_insert_value(widget))
# Double click left mouse
widget.bind('<Double-Button-1>',
lambda e: self.clean_and_insert_value(widget))
right_menu_widget = RightClickMenuForListBox(widget)
widget.bind("<Button-3>", right_menu_widget)
for listbox in [self.listbox1, self.listbox2,
self.listbox3, self.listbox4]:
bind_command_to_listbox(listbox)
def clean_and_insert_value(self, widget, is_clean_word=True):
"""Clean content in Output Area and insert new value."""
# Listbox index must be: active, anchor, end, @x,y, or a number
selection_value = widget.get('active')
# if not is_clean_word:
# if selection_value:
# selection_value = selection_value.split()[1]
self.input_box.delete(0, tk.END)
self.input_box.insert(tk.END, selection_value)
self.scrolled_text_5.delete("0.1", "end-1c")
result = self.dict_for_all.get(selection_value)
if result:
if PrettyTable:
table = PrettyTable(
["Short Pinyin", "Long Pinyin", 'Chinese',
'Latin', 'Namer', 'Data Source', 'Web URL'])
for column in ('Short Pinyin', 'Long Pinyin', 'Chinese',
'Latin', 'Namer', 'Data Source', 'Web URL'):
table.align[column] = "l"
table.padding_width = 1
for each_result in result:
normal_word_list = [x for x in each_result[3].split()
if x not in SPECIAL_CHARS]
url = (FRPS_BASE_URL + '%20'.join(normal_word_list))
tmp_list = [_ for _ in each_result]
tmp_list.append(url)
table.add_row(tmp_list)
self.scrolled_text_5.insert('end', table.get_string())
else:
self.scrolled_text_5.insert(
'end',
('请安装 prettytable 以获得更清晰的结果视图。\n'
'安装方法: pip install prettytable\n\n'
'+--------------+-------------+---------'
'+-------+-------+-------------+---------+\n'
'| Short Pinyin | Long Pinyin | Chinese '
'| Latin | Namer | Data Source | Web URL |\n'
'+--------------+-------------+---------+'
'-------+-------+-------------+---------+\n'
'\n%s\n' % ('=' * 100)))
for each_result in result:
elements = ' | '.join(each_result)
self.scrolled_text_5.insert('end', elements)
self.scrolled_text_5.insert('end', ('\n%s\n' % ('-' * 100)))
def _do_query(self):
"""Command of Do Query button with multi-processing"""
query = self.input_box.get().strip()
query_word_object = QueryWord(
self.keys_for_all, self.dict_for_all)
result_dict = {'0': [], '1': [], '2': [], '3': ''}
if query:
# If name match keys in dictionary, just do strategy 1 & 2
if query in self.dict_for_all:
result_dict = query_word_object. \
query_all_four(
query,
turn_on_mode=(True, True, False, False))
# No exactly match
else:
# Latin
# Dirty trick to check if query is Latin name (space between words)
if ' ' in query:
result_dict = query_word_object. \
query_all_four(
query,
turn_on_mode=(True, True, True, False))
# English
# query starts with English letters, not Chinese
# We can apply similarity search and spell check for only English
elif query[0] in string.printable:
result_dict = query_word_object. \
query_all_four(
query,
turn_on_mode=(True, True, True, True))
else:
# For Chinese, fuzzy search does not work.
# No similarity check and spell check.
# Chinese name may encounter Unicode related errors
result_dict = query_word_object. \
query_all_four(
query,
turn_on_mode=(True, True, True, False))
return result_dict
def show_candidates_for_multi_processing(self):
result_dict = self._do_query()
# Display outcome to candidate widget 1
self.listbox1.delete('0', 'end')
for item in result_dict['0']:
self.listbox1.insert('end', item)
# Display outcome to candidate widget 2
self.listbox2.delete('0', 'end')
for item in result_dict['1']:
self.listbox2.insert('end', item)
# Display outcome to candidate widget 3
self.listbox3.delete('0', 'end')
for item in result_dict['2']:
self.listbox3.insert('end', item)
# Display outcome to candidate widget 4
self.listbox4.delete('0', 'end')
self.listbox4.insert('end', result_dict['3'])
def grid_configure(self):
"""Grid configuration of window and widgets."""
self.master.rowconfigure(0, weight=1)
self.master.columnconfigure(0, weight=1)
self.content.rowconfigure(0, weight=0)
self.content.rowconfigure(1, weight=0)
self.content.rowconfigure(2, weight=1)
self.content.rowconfigure(3, weight=0)
self.content.rowconfigure(4, weight=1)
self.content.columnconfigure(0, weight=1)
self.content.columnconfigure(1, weight=0)
self.content.columnconfigure(2, weight=1)
self.content.columnconfigure(3, weight=0)
self.content.columnconfigure(4, weight=1)
self.content.columnconfigure(5, weight=0)
self.content.columnconfigure(6, weight=1)
self.content.columnconfigure(7, weight=0)
def create_right_menu(self):
# Right menu for input combobox
right_menu_input_box = RightClickMenu(self.input_box)
self.input_box.bind('<Button-3>', right_menu_input_box)
# Right menu for output area
right_menu_scrolled_text_5 = RightClickMenuForScrolledText(
self.scrolled_text_5)
self.scrolled_text_5.bind('<Button-3>', right_menu_scrolled_text_5)
def bind_func(self):
self.do_query_button['command'] = self.show_candidates_for_multi_processing
def print_help(self):
self.scrolled_text_5.delete("0.1", "end-1c")
self.scrolled_text_5.insert('end', USAGE_INFO)
def dump_with_pickle(keys_for_all, dict_for_all):
"""Dump generated dictinary to pickle raw file.
Generally, this function need only do once.
"""
keys_for_all, dict_for_all = get_dict_for_all_columns()
pickle_keys = pickle.dumps(keys_for_all)
pickle_dict = pickle.dumps(dict_for_all)
with open(PICKLE_KEYS_FILE, 'wb') as f_out:
f_out.write(pickle_keys)
with open(PICKLE_DICT_FILE, 'wb') as f_out:
f_out.write(pickle_dict)
def load_with_pickle(pickle_keys_file, pickle_dict_file):
"""Load keys and dict from pickle raw file"""
with open(pickle_keys_file, 'rb') as f_in:
pickle_keys = f_in.read()
keys_for_all = pickle.loads(pickle_keys)
with open(pickle_dict_file, 'rb') as f_in:
pickle_dict = f_in.read()
dict_for_all = pickle.loads(pickle_dict)
return keys_for_all, dict_for_all
def gui_main():
"""The main GUI program."""
# Read from plain text file
# keys_for_all, dict_for_all = get_dict_for_all_columns()
# Read from pickle file
keys_for_all, dict_for_all = load_with_pickle(PICKLE_KEYS_FILE,
PICKLE_DICT_FILE)
global TRAINED_OBJECT
TRAINED_OBJECT = SpellCheck(keys_for_all)
app = AutocompleteGUI(keys_for_all=keys_for_all,
dict_for_all=dict_for_all)
app.mainloop()
def main():
"""Main func"""
get_dict_for_all_columns()
if __name__ == '__main__':
gui_main()
# main()
Refine code structure
#!/usr/bin/env pythonw
# -*- coding: utf-8 -*-
"""Latin Search program"""
from __future__ import (print_function, unicode_literals,
with_statement)
import os
import sys
try:
import cPickle as pickle
except ImportError:
import pickle
import string
import collections
from difflib import SequenceMatcher
from multiprocessing import Pool
from multiprocessing import Process, Value, Lock
try:
from prettytable import PrettyTable
except ImportError:
PrettyTable = None
if sys.version[0] == '2':
import Tkinter as tk
import ttk
import tkFileDialog
import ScrolledText as st
elif sys.version[0] == '3':
import tkinter as tk
from tkinter import ttk
import tkinter.scrolledtext as st
from tkinter import filedialog as tkFileDialog
__version__ = "v0.1.0"
__author__ = 'Jin'
_history = []
DATA_FILE = os.path.abspath('data/latin_without_sougou.csv')
PICKLE_KEYS_FILE = os.path.abspath('data/latin_60000.keys.pickle')
PICKLE_DICT_FILE = os.path.abspath('data/latin_60000.dict.pickle')
# 中国植物志网站链接
FRPS_BASE_URL = 'http://frps.eflora.cn/frps/'
# Limit of results for similarity candidate area
SIMILAR_RESULT_NUM_LIMIT = 30
# Similarity threshold for similarity search
SIMILARITY_THRESHOLD = 0.3
# 拉丁名中的特殊字符
SPECIAL_CHARS = ['×', '〔', ')', '【', '】', '', '', '<', '>',
'*', '[', '@', ']', '[', '|']
TRAINED_OBJECT = object()
USAGE_INFO = """
植物拉丁名搜索(Latin Namer Finer)
[介绍]
根据植物拼音缩写、拼音、中文名称或者拉丁名搜索植物其他信息。
得到候选词后,*双击* 候选词,将得到详细信息。如果没有匹配,将会使用糢糊搜索。
[版本]
%s
[使用方法]
1. 使用拼音首字母搜索。例如搜索 “eqxlm”,将会得到 “二球悬铃木”
及其他悬铃木相关的结果。
2. 使用拼音全称搜索。例如搜索 “erqiuxuanlingmu”,将会得到 “二球悬铃木”
及其他悬铃木相关的结果。
3. 使用中文搜索。例如搜索 “悬铃木”,将会得到 “二球悬铃木”, “三球悬铃木”
等相关搜索结果。
4. 使用拉丁名搜索。例如搜索 “Platanus × acerifolia”,将会得到 “二球悬铃木”
相关的结果。
[候选词介绍]
+---+------------------------+
| 1 | 候选词以查询词开始或结尾
|---+------------------------+
| 2 | 候选词包含查询词
|---+------------------------+
| 3 | 根据相似性进行糢糊搜索
|---+------------------------+
| 4 | 拼写检查(编辑距离为 1)
+---+------------------------+
""" % __version__
def get_lines(file_name):
"""Read file and return a list of lines."""
with open(file_name, 'r') as f_in:
return f_in.readlines()
def get_key_value_pairs_from_file(file_name):
"""
File:
+-----+-------+------+----------------------+-------+
| mg | mugua | 木瓜 | Chaenomeles sinensis | Lynn. |
+-----+-------+--- --+----------------------+-------+
Processing:
dict_1: {'mg': ('mg', 'mugua', '木瓜', 'Chaenomeles sinensis', '')}
dict_2: {'mugua': ('mg', 'mugua', '木瓜', 'Chaenomeles sinensis', '')}
dict_3: {'木瓜': ('mg', 'mugua', '木瓜', 'Chaenomeles sinensis', '')}
dict_4: {'Chaenomeles sinensis': ('mg', 'mugua',
'木瓜', 'Chaenomeles sinensis', '')}
Returns:
(dict_1, dict_2, dict_3, dict_4)
"""
column_list_1, column_list_2, column_list_3, column_list_4 = [], [], [], []
detailed_info_tuple_list = []
with open(file_name, 'r') as f_in:
for line in f_in:
if sys.version[0] == '2':
line = line.decode('utf-8')
elements = [x.strip() for x in line.split(',')]
column_list_1.append(elements[0])
column_list_2.append(elements[1])
column_list_3.append(elements[2])
column_list_4.append(elements[3])
detailed_info_tuple_list.append(tuple(elements))
return (column_list_1, column_list_2, column_list_3,
column_list_4, detailed_info_tuple_list)
def get_one_to_more_dict(key_list, value_list):
"""
Generate a dictionary from two lists. keys may be duplicated.
>>> get_one_to_more_dict(['a', 'b', 'a', 'a'], [1, 2, 3, 4])
{'a': [1, 3, 4], 'b': [2]}
"""
_out_dict = {}
for i, (key, value) in enumerate(zip(key_list, value_list)):
if key not in _out_dict:
_out_dict[key] = []
_out_dict[key].append(value)
else:
_out_dict[key].append(value)
return _out_dict
def get_dict_for_all_columns():
"""Combine dicts, each with one column as key and whole line as value."""
(column_list_1, column_list_2, column_list_3,
column_list_4, detailed_info_tuple_list) = \
get_key_value_pairs_from_file(DATA_FILE)
dict_1 = get_one_to_more_dict(column_list_1, detailed_info_tuple_list)
dict_2 = get_one_to_more_dict(column_list_2, detailed_info_tuple_list)
dict_3 = get_one_to_more_dict(column_list_3, detailed_info_tuple_list)
dict_4 = get_one_to_more_dict(column_list_4, detailed_info_tuple_list)
# Merge all dicts to a single dict
for each_dict in (dict_2, dict_3, dict_4):
if each_dict:
dict_1.update(each_dict)
keys_for_all = list(set(column_list_1 + column_list_2 +
column_list_3 + column_list_4))
return keys_for_all, dict_1
# ============================================================================
# Utils Part
# ============================================================================
def get_similarity(str_a, str_b):
"""Return similarity of two strings.
[Example]
>>> get_similarity('abcde', 'bcdef')
0.8
"""
return SequenceMatcher(None, str_a, str_b).ratio()
# ============================================================================
# Query Part
# ============================================================================
class SpellCheck(object):
"""Train data set with given data then do spell check for given word.
[Example]
>>> s = SpellCheck(['abcd', 'fghi'])
>>> s.correct('abci')
'abcd'
[Reference]
[1]: Title: How to Write a Spelling Corrector
Author: Peter Norvig
Webpage: http://norvig.com/spell-correct.html
"""
def __init__(self, candidate_list):
self.candidate_list = candidate_list
self.NWORDS = self.train()
self.NWORDS_lower = self.train(lower=True)
self.alphabet = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ_-.:1234567890')
def train(self, lower=False):
"""Train model with data set."""
if not self.candidate_list:
raise ValueError('Blank training list (Choosed blank file?).')
model = collections.defaultdict(lambda: 1)
if not lower:
for f in self.candidate_list:
model[f] += 1
else:
tmp_list = self.candidate_list[:]
for f in map(lambda _: _.lower(), tmp_list):
model[f] += 1
return model
def edits1(self, word):
"""Words that has one edit distance.
1. deletion
2. transposition
3. alteration
4. insertion
"""
n = len(word)
return set(
[word[0:i] + word[i + 1:] for i in range(n)] + # deletion
# transposition
[word[0:i] + word[i + 1] + word[i] + word[i + 2:] for i in range(n - 1)] +
# alteration
[word[0:i] + c + word[i + 1:] for i in range(n) for c in self.alphabet] +
# insertion
[word[0:i] + c + word[i:] for i in range(n + 1) for c in self.alphabet])
def known_edits2(self, word):
"""Words that has two edit distance."""
return set(e2 for e1 in self.edits1(word) for e2 in self.edits1(e1)
if e2.lower() in self.NWORDS_lower)
def known(self, words):
"""Known words."""
return set(w for w in words if w.lower() in self.NWORDS_lower)
def correct(self, word):
"""Do spell check and correct word if word was wrong spelled"""
# Edit 1 and Edit 2 (Low performance, but better accuracy)
# candidates = (self.known([word]) or self.known(self.edits1(word))
# or self.known_edits2(word) or [word])
# Only Edit 1 (For better performance)
candidates = (self.known([word]) or self.known(self.edits1(word))
or [word])
return max(candidates, key=lambda w: self.NWORDS_lower[w])
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Query class for easy organizing
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class QueryWord(object):
"""Query with 4 strategy with multi-processing.
[Strategies]
1. Starts with 'abcde'.startswith('abc')
2. Contains 'bcd' in 'abcde'
3. Rank by Similarity
4. Spell check
[turn_on_mode]
Default: (True, True, True, True)
Return all 4 results.
(True, True, False, False)
Return results of strategy_1 and strategy_2, and blank result
of strategy_3 and strategy_4
[Usage]
query_object = QueryWord(keys_for_all, dict_for_all)
# Startswith
# query_object.get_starts_with_candidates(query)
# Contains
# query_object.get_contains_candidates(query)
# Similarity
# query_object.get_similar_candidates(query, limit=30)
# Spell Check
# query_object.get_spell_check_candidate(query)
# All Four
query_object.query_all_four(
query,
turn_on_mode=(True, True, True, True))
"""
def __init__(self, keys_for_all, dict_for_all):
self.keys_for_all = [x.strip() for x in keys_for_all
if x.strip()]
self.dict_for_all = dict_for_all
self.trained_object = SpellCheck(keys_for_all)
self.query = ''
self.result_dict = {}
# --------------------------------------------------------
# 1: Startswith or Endswith Check
# --------------------------------------------------------
@staticmethod
def get_starts_with_candidates(query, keys_for_all, dict_for_all,
result_dict, turn_on=True):
"""Check startswith & endswith"""
_tmp_list = []
if turn_on:
# Check totally match. Totally matched result should be on top
if query in keys_for_all:
_tmp_list.append(query)
result_elements = dict_for_all.get(query)[0]
for each in result_elements[:4]:
if each.strip() and each != query:
_tmp_list.append(each)
# Check partially match
for i, candidate in enumerate(keys_for_all):
if candidate.startswith(query) or \
candidate.endswith(query):
if candidate != query:
_tmp_list.append(candidate)
result_dict.update({'0': _tmp_list})
# --------------------------------------------------------
# 2: Contains Check
# --------------------------------------------------------
@staticmethod
def get_contains_candidates(query, keys_for_all, dict_for_all,
result_dict, turn_on=True):
"""Check contains"""
_tmp_list = []
if turn_on:
# Check totally match. Totally matched result should be on top
if query in keys_for_all:
_tmp_list.append(query)
result_elements = dict_for_all.get(query)[0]
for each in result_elements[:4]:
if each.strip() and each != query:
_tmp_list.append(each)
# Check partially match
for i, candidate in enumerate(keys_for_all):
if query in candidate:
if candidate != query:
_tmp_list.append(candidate)
result_dict.update({'1': _tmp_list})
# --------------------------------------------------------
# 3: Similarity Check
# --------------------------------------------------------
@staticmethod
def get_similar_candidates(query, keys_for_all, dict_for_all,
result_dict, turn_on=True):
"""Rank candidates by similarity"""
_tmp_list = []
_similar_hits = []
# If strategy 2 (contains search) got a result, similarity search
# will skip for performance reason
if turn_on and len(result_dict.get('1')) == 0:
for i, candidate in enumerate(keys_for_all):
_similarity = get_similarity(candidate, query)
if _similarity > SIMILARITY_THRESHOLD:
_tmp_list.append((_similarity, candidate))
_tmp_list.sort(key=lambda x: x[0], reverse=True)
_tmp_list = _tmp_list[:SIMILAR_RESULT_NUM_LIMIT]
_similar_hits = [_[1] for _ in _tmp_list] if _tmp_list else []
# _similar_hits = ['%.4f %s' % _ for _ in _tmp_list]
result_dict.update({'2': _similar_hits})
# --------------------------------------------------------
# 4: Advanced Spell Check
# --------------------------------------------------------
@staticmethod
def get_spell_check_candidate(query, keys_for_all, dict_for_all,
result_dict, turn_on=True):
"""Get spell check candicates"""
candidate = ''
if turn_on and len(result_dict.get('1')) == 0:
candidate = TRAINED_OBJECT.correct(query)
result_dict.update({'3': candidate})
def query_all_four(self, query,
turn_on_mode=(True, True, True, True)):
"""Get four results"""
# Reset self.query to the value of parameter query
self.query = query
result_dict = {}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Single process
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# for i, each_func in enumerate(func_list):
# each_func(turn_on_mode[i])
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Multi-processing
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# startswith/endswith and contains
func_list_1 = [QueryWord.get_starts_with_candidates,
QueryWord.get_contains_candidates]
# similarity and spell check
func_list_2 = [QueryWord.get_similar_candidates,
QueryWord.get_spell_check_candidate]
for i, each_func in enumerate(func_list_1):
p = Process(target=each_func(self.query, self.keys_for_all,
self.dict_for_all,
result_dict, turn_on_mode[i]))
p.start()
p.join()
# If "contains search" got results, similarity search & spell check
# will not be performed for performance reason
for i, each_func in enumerate(func_list_2):
p = Process(target=each_func(self.query, self.keys_for_all,
self.dict_for_all,
result_dict, turn_on_mode[i]))
p.start()
p.join()
return result_dict
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Right Menu
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class RightClickMenu(object):
"""
Simple widget to add basic right click menus to entry widgets.
usage:
rclickmenu = RightClickMenu(some_entry_widget)
some_entry_widget.bind("<3>", rclickmenu)
If you prefer to import Tkinter over Tix, just replace all Tix
references with Tkinter and this will still work fine.
"""
def __init__(self, parent):
self.parent = parent
# bind Control-A to select_all() to the widget. All other
# accelerators seem to work fine without binding such as
# Ctrl-V, Ctrl-X, Ctrl-C etc. Ctrl-A was the only one I had
# issue with.
self.parent.bind("<Control-a>", lambda e: self._select_all(), add='+')
self.parent.bind("<Control-A>", lambda e: self._select_all(), add='+')
def __call__(self, event):
# if the entry widget is disabled do nothing.
if self.parent.cget('state') == 'disable':
return
# grab focus of the entry widget. this way you can see
# the cursor and any marking selections
self.parent.focus_force()
self.build_menu(event)
def build_menu(self, event):
"""Build right click menu"""
menu = tk.Menu(self.parent, tearoff=0)
# check to see if there is any marked text in the entry widget.
# if not then Cut and Copy are disabled.
if not self.parent.selection_present():
menu.add_command(label="Copy", state='disable')
menu.add_command(label="Cut", state='disable')
else:
# use Tkinter's virtual events for brevity. These could
# be hardcoded with our own functions to immitate the same
# actions but there's no point except as a novice exercise
# (which I recommend if you're a novice).
menu.add_command(label="Copy", command=self._copy)
menu.add_command(label="Cut", command=self._cut)
# if there's string data in the clipboard then make the normal
# Paste command. otherwise disable it.
if self.paste_string_state():
menu.add_command(label="Paste", command=self._paste)
else:
menu.add_command(label="Paste", state='disable')
# again, if there's no marked text then the Delete option is disabled.
if not self.parent.selection_present():
menu.add_command(label="Delete", state='disable')
else:
menu.add_command(label="Delete", command=self._clear)
# make things pretty with a horizontal separator
menu.add_separator()
# I don't know of if there's a virtual event for select all though
# I did look in vain for documentation on -any- of Tkinter's
# virtual events. Regardless, the method itself is trivial.
menu.add_command(label="Select All", command=self._select_all)
menu.post(event.x_root, event.y_root)
def _cut(self):
self.parent.event_generate("<<Cut>>")
def _copy(self):
self.parent.event_generate("<<Copy>>")
def _paste(self):
self.parent.event_generate("<<Paste>>")
def _clear(self):
self.parent.event_generate("<<Clear>>")
def _select_all(self):
self.parent.selection_range(0, 'end')
self.parent.icursor('end')
# return 'break' because, for some reason, Control-a (little 'a')
# doesn't work otherwise. There's some natural binding that
# Tkinter entry widgets want to do that send the cursor to Home
# and deselects.
return 'break'
def paste_string_state(self):
"""Returns true if a string is in the clipboard"""
try:
# this assignment will raise an exception if the data
# in the clipboard isn't a string (such as a picture).
# in which case we want to know about it so that the Paste
# option can be appropriately set normal or disabled.
clipboard = self.parent.selection_get(selection='CLIPBOARD')
except:
return False
return True
class RightClickMenuForListBox(object):
"""
Simple widget to add basic right click menus to entry widgets.
usage:
rclickmenu = RightClickMenuForListBox(listbox_widget)
listbox_widget.bind("<3>", rclickmenu)
If you prefer to import Tkinter over Tix, just replace all Tix
references with Tkinter and this will still work fine.
"""
def __init__(self, parent):
self.parent = parent
def __call__(self, event):
# if the entry widget is disabled do nothing.
if self.parent.cget('state') == 'disable':
return
# grab focus of the entry widget. this way you can see
# the cursor and any marking selections
self.parent.focus_force()
self.build_menu(event)
def build_menu(self, event):
"""Build right click menu"""
menu = tk.Menu(self.parent, tearoff=0)
menu.add_command(label="Copy", command=self._copy)
menu.post(event.x_root, event.y_root)
def _copy(self):
self.parent.event_generate("<<Copy>>")
class RightClickMenuForScrolledText(object):
"""Simple widget to add basic right click menus to entry widgets."""
def __init__(self, parent):
self.parent = parent
# bind Control-A to select_all() to the widget. All other
# accelerators seem to work fine without binding such as
# Ctrl-V, Ctrl-X, Ctrl-C etc. Ctrl-A was the only one I had
# issue with.
self.parent.bind("<Control-a>", lambda e: self._select_all(), add='+')
self.parent.bind("<Control-A>", lambda e: self._select_all(), add='+')
def __call__(self, event):
# if the entry widget is disabled do nothing.
if self.parent.cget('state') == tk.DISABLED:
return
# grab focus of the entry widget. this way you can see
# the cursor and any marking selections
self.parent.focus_force()
self.build_menu(event)
def build_menu(self, event):
"""build menu"""
menu = tk.Menu(self.parent, tearoff=0)
# check to see if there is any marked text in the entry widget.
# if not then Cut and Copy are disabled.
# if not self.parent.selection_get():
# menu.add_command(label="Cut", state=tk.DISABLED)
# menu.add_command(label="Copy", state=tk.DISABLED)
# else:
# use Tkinter's virtual events for brevity. These could
# be hardcoded with our own functions to immitate the same
# actions but there's no point except as a novice exercise
# (which I recommend if you're a novice).
menu.add_command(label="Copy", command=self._copy)
menu.add_command(label="Cut", command=self._cut)
# if there's string data in the clipboard then make the normal
# Paste command. otherwise disable it.
if self._paste_string_state():
menu.add_command(label="Paste",
command=self._paste_if_string_in_clipboard)
else:
menu.add_command(label="Paste", state='disable')
# again, if there's no marked text then the Delete option is disabled.
menu.add_command(label="Delete", command=self._delete)
# make things pretty with a horizontal separator
menu.add_separator()
# I don't know of if there's a virtual event for select all though
# I did look in vain for documentation on -any- of Tkinter's
# virtual events. Regardless, the method itself is trivial.
menu.add_command(label="Select All", command=self._select_all)
menu.add_command(label="Clear All", command=self._clear_all)
menu.post(event.x_root, event.y_root)
def _cut(self):
self.parent.event_generate("<<Cut>>")
def _copy(self):
self.parent.event_generate("<<Copy>>")
def _delete(self):
self.parent.event_generate("<<Clear>>")
def _paste_if_string_in_clipboard(self):
self.parent.event_generate("<<Paste>>")
def _select_all(self, ):
"""select all"""
self.parent.tag_add('sel', "1.0", "end-1c")
self.parent.mark_set('insert', "1.0")
self.parent.see('insert')
return 'break'
def _paste_string_state(self):
"""Returns true if a string is in the clipboard"""
try:
# this assignment will raise an exception if the data
# in the clipboard isn't a string (such as a picture).
# in which case we want to know about it so that the Paste
# option can be appropriately set normal or disabled.
clipboard = self.parent.selection_get(selection='CLIPBOARD')
except:
return False
return True
def _clear_all(self):
"""Clear all"""
self.parent.delete('1.0', 'end')
class AutocompleteGUI(tk.Frame):
"""The main GUI for autocomplete program."""
def __init__(self, master=None, keys_for_all=[], dict_for_all={}):
tk.Frame.__init__(self, master)
self.keys_for_all = keys_for_all
self.dict_for_all = dict_for_all
self.history = []
self.master.grid()
self.set_style()
self.create_menu()
self.create_widgets()
self.grid_configure()
self.create_right_menu()
self.bind_func()
self.master.geometry('1400x800')
self.master.title('Latin Finder %s' % __version__)
def set_style(self):
"""Set style for widgets in the main window."""
s = ttk.Style()
s.configure('TCombobox', padding=(11))
s.configure('auto.TCombobox', foreground='red')
s.configure('TButton', padding=(10))
s.configure('open.TButton', foreground='blue')
def create_menu(self):
"""Create menubar for the main window."""
self.menubar = tk.Menu(self.master)
self.file_menu = tk.Menu(self.menubar, tearoff=0)
self.file_menu.add_command(
label='Open',
# command=reload_GUI_with_new_list
)
self.file_menu.add_command(
label='Exit',
command=self.master.quit)
self.menubar.add_cascade(label='File', menu=self.file_menu)
self.help_menu = tk.Menu(self.menubar, tearoff=0)
self.help_menu.add_command(
label='Help',
command=lambda: self.print_help)
self.menubar.add_cascade(label='Help', menu=self.help_menu)
self.master.config(menu=self.menubar)
def create_widgets(self):
"""Create widgets for the main GUI window."""
self.content = ttk.Frame(self.master, padding=(8))
self.content.grid(row=0, column=0, sticky=(tk.W + tk.E + tk.N + tk.S))
self.label_1 = ttk.Label(self.content,
text='Candidates (Startswith / Endswith)')
self.listbox1 = tk.Listbox(self.content, font=('Monospace', 10))
self.scrollbar1 = ttk.Scrollbar(self.content)
self.label_2 = ttk.Label(
self.content, text='Candidates (Contains)')
self.listbox2 = tk.Listbox(self.content, font=('Monospace', 10))
self.scrollbar2 = ttk.Scrollbar(self.content)
self.label_3 = ttk.Label(
self.content, text='Candidates (Rank by similarity)')
self.listbox3 = tk.Listbox(self.content, font=('Monospace', 10))
self.scrollbar3 = ttk.Scrollbar(self.content)
self.label_4 = ttk.Label(
self.content,
text='Candidates (Spell check, single edit distance)')
self.listbox4 = tk.Listbox(self.content, font=('Monospace', 10))
self.scrollbar4 = ttk.Scrollbar(self.content)
self.label_5 = ttk.Label(
self.content,
text=('Click "Do Query" button and see results. '
'** Double Click ** candidate to see detailed result.'))
self.scrolled_text_5 = st.ScrolledText(self.content,
font=('Monospace', 10))
self.input_box = ttk.Combobox(
self.content,
style='auto.TCombobox')
self.input_box.grid(row=0, column=0, columnspan=6, sticky=(tk.W + tk.E))
self.input_box.focus()
# self.open_file_button = ttk.Button(
# self.content,
# text='Open Tree File',
# # command=reload_GUI_with_new_list,
# style='open.TButton')
# self.open_file_button.grid(
# row=0,
# column=0,
# columnspan=2,
# sticky=(tk.W+tk.E))
self.do_query_button = ttk.Button(
self.content,
text='Do Query',
style='copy.TButton')
self.do_query_button.grid(
row=0,
column=6,
columnspan=2,
sticky=(tk.W))
self.label_1.grid(row=1, column=0, columnspan=2, sticky=(tk.W))
self.listbox1.grid(row=2, column=0, sticky=(tk.W + tk.E + tk.N + tk.S))
self.scrollbar1.grid(row=2, column=1, sticky=(tk.N + tk.S))
self.listbox1.config(yscrollcommand=self.scrollbar1.set)
self.scrollbar1.config(command=self.listbox1.yview)
self.label_2.grid(row=1, column=2, columnspan=2, sticky=(tk.W))
self.listbox2.grid(row=2, column=2, sticky=(tk.W + tk.E + tk.N + tk.S))
self.scrollbar2.grid(row=2, column=3, sticky=(tk.N + tk.S))
self.listbox2.config(yscrollcommand=self.scrollbar2.set)
self.scrollbar2.config(command=self.listbox2.yview)
self.label_3.grid(row=1, column=4, columnspan=2, sticky=(tk.W))
self.listbox3.grid(row=2, column=4, sticky=(tk.W + tk.E + tk.N + tk.S))
self.scrollbar3.grid(row=2, column=5, sticky=(tk.N + tk.S))
self.listbox3.config(yscrollcommand=self.scrollbar3.set)
self.scrollbar3.config(command=self.listbox3.yview)
self.label_4.grid(row=1, column=6, columnspan=2, sticky=(tk.W))
self.listbox4.grid(row=2, column=6, sticky=(tk.W + tk.E + tk.N + tk.S))
self.scrollbar4.grid(row=2, column=7, sticky=(tk.N + tk.S))
self.listbox4.config(yscrollcommand=self.scrollbar4.set)
self.scrollbar4.config(command=self.listbox4.yview)
self.label_5.grid(row=3, column=0, columnspan=7, sticky=(tk.W))
self.scrolled_text_5.grid(row=4, column=0, columnspan=7,
sticky=(tk.N + tk.S + tk.W + tk.E))
self.scrolled_text_5.delete("0.1", "end-1c")
self.scrolled_text_5.insert('end', USAGE_INFO)
def bind_command_to_listbox(widget):
"""Bind command to listbox.
Double click on candidates from any column from the four,
then the result will be on the output area.
"""
# Single click left mouse
# widget.bind('<Button-1>',
# lambda e: self.clean_and_insert_value(widget))
# Double click left mouse
widget.bind('<Double-Button-1>',
lambda e: self.clean_and_insert_value(widget))
right_menu_widget = RightClickMenuForListBox(widget)
widget.bind("<Button-3>", right_menu_widget)
for listbox in [self.listbox1, self.listbox2,
self.listbox3, self.listbox4]:
bind_command_to_listbox(listbox)
def grid_configure(self):
"""Grid configuration of window and widgets."""
self.master.rowconfigure(0, weight=1)
self.master.columnconfigure(0, weight=1)
self.content.rowconfigure(0, weight=0)
self.content.rowconfigure(1, weight=0)
self.content.rowconfigure(2, weight=1)
self.content.rowconfigure(3, weight=0)
self.content.rowconfigure(4, weight=1)
self.content.columnconfigure(0, weight=1)
self.content.columnconfigure(1, weight=0)
self.content.columnconfigure(2, weight=1)
self.content.columnconfigure(3, weight=0)
self.content.columnconfigure(4, weight=1)
self.content.columnconfigure(5, weight=0)
self.content.columnconfigure(6, weight=1)
self.content.columnconfigure(7, weight=0)
def create_right_menu(self):
# Right menu for input combobox
right_menu_input_box = RightClickMenu(self.input_box)
self.input_box.bind('<Button-3>', right_menu_input_box)
# Right menu for output area
right_menu_scrolled_text_5 = RightClickMenuForScrolledText(
self.scrolled_text_5)
self.scrolled_text_5.bind('<Button-3>', right_menu_scrolled_text_5)
def bind_func(self):
self.do_query_button['command'] = self.show_candidates_for_multi_processing
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Functional methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def clean_and_insert_value(self, widget, is_clean_word=True):
"""Clean content in Output Area and insert new value."""
# Listbox index must be: active, anchor, end, @x,y, or a number
selection_value = widget.get('active')
# if not is_clean_word:
# if selection_value:
# selection_value = selection_value.split()[1]
self.input_box.delete(0, tk.END)
self.input_box.insert(tk.END, selection_value)
self.scrolled_text_5.delete("0.1", "end-1c")
result = self.dict_for_all.get(selection_value)
if result:
if PrettyTable:
table = PrettyTable(
["Short Pinyin", "Long Pinyin", 'Chinese',
'Latin', 'Namer', 'Data Source', 'Web URL'])
for column in ('Short Pinyin', 'Long Pinyin', 'Chinese',
'Latin', 'Namer', 'Data Source', 'Web URL'):
table.align[column] = "l"
table.padding_width = 1
for each_result in result:
normal_word_list = [x for x in each_result[3].split()
if x not in SPECIAL_CHARS]
url = (FRPS_BASE_URL + '%20'.join(normal_word_list))
tmp_list = [_ for _ in each_result]
tmp_list.append(url)
table.add_row(tmp_list)
self.scrolled_text_5.insert('end', table.get_string())
else:
self.scrolled_text_5.insert(
'end',
('请安装 prettytable 以获得更清晰的结果视图。\n'
'安装方法: pip install prettytable\n\n'
'+--------------+-------------+---------'
'+-------+-------+-------------+---------+\n'
'| Short Pinyin | Long Pinyin | Chinese '
'| Latin | Namer | Data Source | Web URL |\n'
'+--------------+-------------+---------+'
'-------+-------+-------------+---------+\n'
'\n%s\n' % ('=' * 100)))
for each_result in result:
elements = ' | '.join(each_result)
self.scrolled_text_5.insert('end', elements)
self.scrolled_text_5.insert('end', ('\n%s\n' % ('-' * 100)))
def _do_query(self):
"""Command of Do Query button with multi-processing"""
query = self.input_box.get().strip()
query_word_object = QueryWord(
self.keys_for_all, self.dict_for_all)
result_dict = {'0': [], '1': [], '2': [], '3': ''}
if query:
# If name match keys in dictionary, just do strategy 1 & 2
if query in self.dict_for_all:
result_dict = query_word_object. \
query_all_four(
query,
turn_on_mode=(True, True, False, False))
# No exactly match
else:
# Latin
# Dirty trick to check if query is Latin name (space between words)
if ' ' in query:
result_dict = query_word_object. \
query_all_four(
query,
turn_on_mode=(True, True, True, False))
# English
# query starts with English letters, not Chinese
# We can apply similarity search and spell check for only English
elif query[0] in string.printable:
result_dict = query_word_object. \
query_all_four(
query,
turn_on_mode=(True, True, True, True))
else:
# For Chinese, fuzzy search does not work.
# No similarity check and spell check.
# Chinese name may encounter Unicode related errors
result_dict = query_word_object. \
query_all_four(
query,
turn_on_mode=(True, True, True, False))
return result_dict
def show_candidates_for_multi_processing(self):
result_dict = self._do_query()
# Display outcome to candidate widget 1
self.listbox1.delete('0', 'end')
for item in result_dict['0']:
self.listbox1.insert('end', item)
# Display outcome to candidate widget 2
self.listbox2.delete('0', 'end')
for item in result_dict['1']:
self.listbox2.insert('end', item)
# Display outcome to candidate widget 3
self.listbox3.delete('0', 'end')
for item in result_dict['2']:
self.listbox3.insert('end', item)
# Display outcome to candidate widget 4
self.listbox4.delete('0', 'end')
self.listbox4.insert('end', result_dict['3'])
def print_help(self):
self.scrolled_text_5.delete("0.1", "end-1c")
self.scrolled_text_5.insert('end', USAGE_INFO)
def dump_with_pickle(keys_for_all, dict_for_all):
"""Dump generated dictinary to pickle raw file.
Generally, this function need only do once.
"""
keys_for_all, dict_for_all = get_dict_for_all_columns()
pickle_keys = pickle.dumps(keys_for_all)
pickle_dict = pickle.dumps(dict_for_all)
with open(PICKLE_KEYS_FILE, 'wb') as f_out:
f_out.write(pickle_keys)
with open(PICKLE_DICT_FILE, 'wb') as f_out:
f_out.write(pickle_dict)
def load_with_pickle(pickle_keys_file, pickle_dict_file):
"""Load keys and dict from pickle raw file"""
with open(pickle_keys_file, 'rb') as f_in:
pickle_keys = f_in.read()
keys_for_all = pickle.loads(pickle_keys)
with open(pickle_dict_file, 'rb') as f_in:
pickle_dict = f_in.read()
dict_for_all = pickle.loads(pickle_dict)
return keys_for_all, dict_for_all
def gui_main():
"""The main GUI program."""
# Read from plain text file
# keys_for_all, dict_for_all = get_dict_for_all_columns()
# Read from pickle file
keys_for_all, dict_for_all = load_with_pickle(PICKLE_KEYS_FILE,
PICKLE_DICT_FILE)
global TRAINED_OBJECT
TRAINED_OBJECT = SpellCheck(keys_for_all)
app = AutocompleteGUI(keys_for_all=keys_for_all,
dict_for_all=dict_for_all)
app.mainloop()
def main():
"""Main func"""
get_dict_for_all_columns()
if __name__ == '__main__':
gui_main()
# main()
|
# coding=utf-8
from pythonpic import plotting_parser
from pythonpic.configs.run_laser import laser, impulse_duration, n_macroparticles, plots, number_cells
from pythonpic.visualization.plotting import plots as general_plots
from pythonpic.visualization.animation import ParticleDensityAnimation
args = plotting_parser("Hydrogen shield")
perturbation_amplitude = 0
powers = range(23, 21, -1)
for power in powers:
intensity = 10**power
for number_particles, n_cells in [
[75000, int(number_cells)], #
# [75000, int(number_cells*2)], #
]:
s = laser(f"{number_particles}_{n_cells}_run_{power}_{perturbation_amplitude}", number_particles, n_cells, impulse_duration,
intensity, perturbation_amplitude).lazy_run()
plots(s, *args, frames="few", animation_type=ParticleDensityAnimation)
del s
Laser polarization in Fulllaser
# coding=utf-8
from pythonpic import plotting_parser
from pythonpic.configs.run_laser import laser, impulse_duration, n_macroparticles, plots, number_cells
from pythonpic.visualization.plotting import plots as general_plots
from pythonpic.visualization.animation import ParticleDensityAnimation, FastAnimation
args = plotting_parser("Hydrogen shield")
perturbation_amplitude = 0
powers = [21, 22, 23]
polarizations = ["Ey", "Circular"]
for polarization in polarizations:
for power in powers:
intensity = 10**power
for number_particles, n_cells in [
[75000, int(number_cells)],
]:
s = laser(f"{number_particles}_{n_cells}_run_{power}_{polarization}", number_particles, n_cells, impulse_duration,
intensity, perturbation_amplitude, laser_polarization=polarization).lazy_run()
plots(s, *args, frames="few", animation_type=FastAnimation)
del s
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Erlang(AutotoolsPackage):
"""
Erlang is a programming language and runtime system for building
massively scalable soft real-time systems with requirements on
high availability.
"""
homepage = "https://erlang.org/"
url = "https://erlang.org/download/otp_src_22.2.tar.gz"
version('23.0', sha256='42dcf3c721f4de59fe74ae7b65950c2174c46dc8d1dd4e27c0594d86f606a635')
version('22.2', sha256='89c2480cdac566065577c82704a48e10f89cf2e6ca5ab99e1cf80027784c678f')
version('22.1', sha256='cd33a102cbac6dd1c7b1e7a9a0d82d13587771fac4e96e8fff92e403d15e32c8')
version('22.0', sha256='042e168d74055a501c75911694758a30597446accd8c82ec569552b9e9fcd272')
version('21.3', sha256='69a743c4f23b2243e06170b1937558122142e47c8ebe652be143199bfafad6e4')
version('21.2', sha256='f6b07bf8e6705915679a63363ce80faaa6b7c231e7236cde443d6445f7430334')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('m4', type='build')
depends_on('libtool', type='build')
elang add depends_on('ncurses', type='link') (#17761)
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Erlang(AutotoolsPackage):
"""
Erlang is a programming language and runtime system for building
massively scalable soft real-time systems with requirements on
high availability.
"""
homepage = "https://erlang.org/"
url = "https://erlang.org/download/otp_src_22.2.tar.gz"
version('23.0', sha256='42dcf3c721f4de59fe74ae7b65950c2174c46dc8d1dd4e27c0594d86f606a635')
version('22.2', sha256='89c2480cdac566065577c82704a48e10f89cf2e6ca5ab99e1cf80027784c678f')
version('22.1', sha256='cd33a102cbac6dd1c7b1e7a9a0d82d13587771fac4e96e8fff92e403d15e32c8')
version('22.0', sha256='042e168d74055a501c75911694758a30597446accd8c82ec569552b9e9fcd272')
version('21.3', sha256='69a743c4f23b2243e06170b1937558122142e47c8ebe652be143199bfafad6e4')
version('21.2', sha256='f6b07bf8e6705915679a63363ce80faaa6b7c231e7236cde443d6445f7430334')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('m4', type='build')
depends_on('libtool', type='build')
depends_on('ncurses', type='link')
|
import re
import os
import sys
import pty
import time
import shlex
import select
import signal
import threading
import saga.utils.logger
import saga.utils.timeout_gc
import saga.exceptions as se
# --------------------------------------------------------------------
#
_CHUNKSIZE = 1024 # default size of each read
_POLLDELAY = 0.01 # seconds in between read attempts
_DEBUG_MAX = 600
# --------------------------------------------------------------------
#
class PTYProcess (object) :
"""
This class spawns a process, providing that child with pty I/O channels --
it will maintain stdin, stdout and stderr channels to the child. All
write-like operations operate on the stdin, all read-like operations operate
on the stdout stream. Data from the stderr stream are at this point
redirected to the stdout channel.
Example::
# run an interactive client process
pty = PTYProcess ("/usr/bin/ssh -t localhost")
# check client's I/O for one of the following patterns (prompts).
# Then search again.
n, match = pty.find (['password\s*:\s*$',
'want to continue connecting.*\(yes/no\)\s*$',
'[\$#>]\s*$'])
while True :
if n == 0 :
# found password prompt - tell the secret
pty.write ("secret\\n")
n, _ = pty.find (['password\s*:\s*$',
'want to continue connecting.*\(yes/no\)\s*$',
'[\$#>]\s*$'])
elif n == 1 :
# found request to accept host key - sure we do... (who checks
# those keys anyways...?). Then search again.
pty.write ("yes\\n")
n, _ = pty.find (['password\s*:\s*$',
'want to continue connecting.*\(yes/no\)\s*$',
'[\$#>]\s*$'])
elif n == 2 :
# found shell prompt! Wohoo!
break
while True :
# go full Dornroeschen (Sleeping Beauty)...
pty.alive (recover=True) or break # check / restart process
pty.find (['[\$#>]\s*$']) # find shell prompt
pty.write ("/bin/sleep "100 years"\\n") # sleep! SLEEEP!
# something bad happened
print pty.autopsy ()
The managed child process is under control of a Timeout Garbage Collector
(:class:`saga.utils.timeout_gc.TimeoutGC`), which will terminate the child
after some inactivity period. The child will be automatically restarted on
the next activity attempts. To support orderly process bootstrapping, users
of the :class:`PTYProcess` class should register hooks for process
initialization and finalization (:func:`set_initialize_hook` and
:func:`set_finalize_hook`). The finalization hook may operate on a dead
child process, and should be written in a way that this does not lead to an
error (which would abort the restart attempt).
If the child process dies on its own, or is terminated by a third party, the
class will also attempt to restart the child. In order to not interfere
with the process state at unexpected points, this will only happen during
explicit :func:`alive` checks, if the `recover` parameter is set to `True`
(`False` by default). This restart mechanism will be used up to
`recover_max` times in a row, any successful activity will reset the recover
counter though. The recover process will invoke both the finalization and
initialization hooks.
"""
# ----------------------------------------------------------------
#
def __init__ (self, command, logger=None) :
"""
The class constructor, which runs (execvpe) command in a separately
forked process. The bew process will inherit the environment of the
application process.
:type command: string or list of strings
:param command: The given command is what is run as a child, and
fed/drained via pty pipes. If given as string, command is split into an
array of strings, using :func:`shlex.split`.
:type logger: :class:`saga.utils.logger.Logger` instance
:param logger: logger stream to send status messages to.
"""
if isinstance (command, basestring) :
command = shlex.split (command)
if not isinstance (command, list) :
raise se.BadParameter ("PTYProcess expects string or list command")
if len(command) < 1 :
raise se.BadParameter ("PTYProcess expects non-empty command")
self.command = command # list of strings too run()
self.logger = logger
self.cache = "" # data cache
self.child = None # the process as created by subprocess.Popen
self.ptyio = None # the process' io channel, from pty.fork()
self.exit_code = None # child died with code (may be revived)
self.exit_signal = None # child kill by signal (may be revived)
self.initialize_hook = None
self.finalize_hook = None
self.recover_max = 3 # TODO: make configure option. This does not
self.recover_attempts = 0 # apply for recovers triggered by gc_timeout!
if not self.logger :
self.logger = saga.utils.logger.getLogger ('PTYProcess')
# register this process instance for timeout garbage collection
self.gc = saga.utils.timeout_gc.TimeoutGC ()
self.gc.register (self, self.initialize, self.finalize)
try :
self.initialize ()
except Exception as e :
raise se.NoSuccess ("pty or process creation failed (%s)" % e)
# --------------------------------------------------------------------
#
def __del__ (self) :
"""
Need to free pty's on destruction, otherwise we might ran out of
them (see cat /proc/sys/kernel/pty/max)
"""
self.logger.error ("pty __del__")
# self.logger.trace ()
try :
self.gc.unregister (self)
self.finalize ()
except :
pass
# ----------------------------------------------------------------------
#
def set_initialize_hook (self, initialize_hook) :
self.initialize_hook = initialize_hook
# ----------------------------------------------------------------------
#
def set_finalize_hook (self, finalize_hook) :
self.finalize_hook = finalize_hook
# ----------------------------------------------------------------------
#
def initialize (self) :
# NOTE: do we need to lock?
self.logger.debug ("PTYProcess: '%s'" % ' '.join ((self.command)))
self.parent_in, self.child_in = pty.openpty ()
self.parent_out, self.child_out = pty.openpty ()
# self.parent_err, self.child_err = pty.openpty ()
self.parent_io, self.child_io = pty.openpty ()
# create the child
try :
self.child = os.fork ()
except Exception as e:
raise se.NoSuccess ("Could not run (%s): %s" \
% (' '.join (self.command), e))
if not self.child :
# this is the child
try :
# close parent end of pty pipes
os.close (self.parent_in)
os.close (self.parent_out)
# os.close (self.parent_err)
# reopen child stdio unbuffered (buffsize=0)
unbuf_in = os.fdopen (sys.stdin.fileno (), 'r+', 0)
unbuf_out = os.fdopen (sys.stdout.fileno (), 'w+', 0)
unbuf_err = os.fdopen (sys.stderr.fileno (), 'w+', 0)
# redirect our precious stdio
os.dup2 (self.child_in, unbuf_in.fileno ())
os.dup2 (self.child_out, unbuf_out.fileno ())
os.dup2 (self.child_out, unbuf_err.fileno ())
# os.dup2 (self.child_err, unbuf_err.fileno ())
# make a process group leader (should close tty tty)
os.setsid ()
# close tty, in case we still own any:
try :
os.close (os.open ("/dev/tty", os.O_RDWR | os.O_NOCTTY));
except :
# was probably closed earlier, that's all right
pass
# now acquire pty
try :
os.close (os.open (os.ttyname (sys.stdout.fileno ()), os.O_RDWR))
except :
# well, this *may* be bad - or may now, depending on the
# type of command ones to run in this shell. So, we print
# a big fat warning, and continue
self.logger.error ("Unclean PTY shell setup - proceed anyway")
pass
# all I/O set up, have a pty (*fingers crossed*), lift-off!
os.execvpe (self.command[0], self.command, os.environ)
except OSError as e:
self.logger.error ("Could not execute (%s): %s" \
% (' '.join (self.command), e))
sys.exit (-1)
else :
# parent
os.close (self.child_in)
os.close (self.child_out)
# os.close (self.child_err)
# check if some additional initialization routines as registered
if self.initialize_hook :
self.initialize_hook ()
# --------------------------------------------------------------------
#
def finalize (self) :
""" kill the child, close all I/O channels """
# NOTE: do we need to lock?
# as long as the chiuld lives, run any higher level shutdown routine.
if self.finalize_hook :
self.finalize_hook ()
# now we can safely kill the child process, and close all I/O channels
try :
if self.child :
os.kill (self.child, signal.SIGTERM)
except OSError :
pass
try :
if self.child :
os.kill (self.child, signal.SIGKILL)
except OSError :
pass
self.child = None
## try :
## os.close (self.parent_in)
## except OSError :
## pass
## try :
## os.close (self.parent_out)
## except OSError :
## pass
# try :
# os.close (self.parent_err)
# except OSError :
# pass
# --------------------------------------------------------------------
#
def wait (self) :
"""
blocks forever until the child finishes on its own, or is getting
killed
"""
with self.gc.active (self) :
# yes, for ever and ever...
while True :
# hey, kiddo, whats up?
wpid, wstat = os.waitpid (self.child, 0)
# did we get a note about child termination?
if 0 == wpid :
# nope, all is well - carry on
continue
# Yes, we got a note.
# Well, maybe the child fooled us and is just playing dead?
if os.WIFSTOPPED (wstat) or \
os.WIFCONTINUED (wstat) :
# we don't care if someone stopped/resumed the child -- that is up
# to higher powers. For our purposes, the child is alive. Ha!
continue
# not stopped, poor thing... - soooo, what happened??
if os.WIFEXITED (wstat) :
# child died of natural causes - perform autopsy...
self.exit_code = os.WEXITSTATUS (wstat)
self.exit_signal = None
elif os.WIFSIGNALED (wstat) :
# murder!! Child got killed by someone! recover evidence...
self.exit_code = None
self.exit_signal = os.WTERMSIG (wstat)
# either way, its dead -- make sure it stays dead, to avoid zombie
# apocalypse...
self.finalize ()
return
# --------------------------------------------------------------------
#
def alive (self, recover=False) :
"""
try to determine if the child process is still active. If not, mark
the child as dead and close all IO descriptors etc ("func:`finalize`).
If `recover` is `True` and the child is indeed dead, we attempt to
re-initialize it (:func:`initialize`). We only do that for so many
times (`self.recover_max`) before giving up -- at that point it seems
likely that the child exits due to a re-occurring operations condition.
Note that upstream consumers of the :class:`PTYProcess` should be
careful to only use `recover=True` when they can indeed handle
a disconnected/reconnected client at that point, i.e. if there are no
assumptions on persistent state beyond those in control of the upstream
consumers themselves.
"""
with self.gc.active (self) :
# do we have a child which we can check?
if self.child :
# hey, kiddo, whats up?
wpid, wstat = os.waitpid (self.child, os.WNOHANG)
# did we get a note about child termination?
if 0 == wpid :
# nope, all is well - carry on
return True
# Yes, we got a note.
# Well, maybe the child fooled us and is just playing dead?
if os.WIFSTOPPED (wstat) or \
os.WIFCONTINUED (wstat) :
# we don't care if someone stopped/resumed the child -- that is up
# to higher powers. For our purposes, the child is alive. Ha!
return True
# not stopped, poor thing... - soooo, what happened??
if os.WIFEXITED (wstat) :
# child died of natural causes - perform autopsy...
self.exit_code = os.WEXITSTATUS (wstat)
self.exit_signal = None
elif os.WIFSIGNALED (wstat) :
# murder!! Child got killed by someone! recover evidence...
self.exit_code = None
self.exit_signal = os.WTERMSIG (wstat)
# either way, its dead -- make sure it stays dead, to avoid zombie
# apocalypse...
self.finalize ()
# check if we can attempt a post-mortem revival though
if not recover :
# nope, we are on holy ground - revival not allowed.
return False
# we are allowed to revive! So can we try one more time... pleeeease??
# (for cats, allow up to 9 attempts; for Buddhists, always allow to
# reincarnate, etc.)
if self.recover_attempts >= self.recover_max :
# nope, its gone for good - just report the sad news
return False
# MEDIIIIC!!!!
self.recover_attempts += 1
self.initialize ()
# well, now we don't trust the child anymore, of course! So we check
# again. Yes, this is recursive -- but note that recover_attempts get
# incremented on every iteration, and this will eventually lead to
# call termination (tm).
return self.alive (recover=True)
# --------------------------------------------------------------------
#
def autopsy (self) :
"""
return diagnostics information string for dead child processes
"""
with self.gc.active (self) :
if self.child :
# Boooh!
return "false alarm, process %s is alive!" % self.child
ret = ""
ret += " exit code : %s\n" % self.exit_code
ret += " exit signal: %s\n" % self.exit_signal
ret += " last output: %s\n" % self.cache[-256:] # FIXME: smarter selection
return ret
# --------------------------------------------------------------------
#
def read (self, size=0, timeout=0, _force=False) :
"""
read some data from the child. By default, the method reads whatever is
available on the next read, up to _CHUNKSIZE, but other read sizes can
be specified.
The method will return whatever data it has at timeout::
timeout == 0 : return the content of the first successful read, with
whatever data up to 'size' have been found.
timeout < 0 : return after first read attempt, even if no data have
been available.
If no data are found, the method returns an empty string (not None).
This method will not fill the cache, but will just read whatever data it
needs (FIXME).
Note: the returned lines do *not* get '\\\\r' stripped.
"""
with self.gc.active (self) :
try:
# start the timeout timer right now. Note that even if timeout is
# short, and child.poll is slow, we will nevertheless attempt at least
# one read...
start = time.time ()
ret = ""
# read until we have enough data, or hit timeout ceiling...
while True :
# first, lets see if we still have data in the cache we can return
if len (self.cache) :
if not size :
ret = self.cache
self.cache = ""
return ret
# we don't even need all of the cache
elif size <= len (self.cache) :
ret = self.cache[:size]
self.cache = self.cache[size:]
return ret
# otherwise we need to read some more data, right?
# idle wait 'til the next data chunk arrives, or 'til _POLLDELAY
rlist, _, _ = select.select ([self.parent_out], [], [], _POLLDELAY)
# got some data?
for f in rlist:
# read whatever we still need
readsize = _CHUNKSIZE
if size:
readsize = size-len(ret)
buf = os.read (f, _CHUNKSIZE)
if len(buf) == 0 and sys.platform == 'darwin' :
self.logger.debug ("read : MacOS EOF")
self.terminate ()
if len (self.cache) :
ret = self.cache
self.cache = ""
return ret
self.cache += buf.replace ('\r', '')
log = buf.replace ('\r', '')
log = log.replace ('\n', '\\n')
# print "buf: --%s--" % buf
# print "log: --%s--" % log
if len(log) > _DEBUG_MAX :
self.logger.debug ("read : [%5d] (%s ... %s)" \
% (len(log), log[:30], log[-30:]))
else :
self.logger.debug ("read : [%5d] (%s)" \
% (len(log), log))
# lets see if we still got any data in the cache we can return
if len (self.cache) :
if not size :
ret = self.cache
self.cache = ""
return ret
# we don't even need all of the cache
elif size <= len (self.cache) :
ret = self.cache[:size]
self.cache = self.cache[size:]
return ret
# at this point, we do not have sufficient data -- only
# return on timeout
if timeout == 0 :
# only return if we have data
if len (self.cache) :
ret = self.cache
self.cache = ""
return ret
elif timeout < 0 :
# return of we have data or not
ret = self.cache
self.cache = ""
return ret
else : # timeout > 0
# return if timeout is reached
now = time.time ()
if (now-start) > timeout :
ret = self.cache
self.cache = ""
return ret
except Exception as e :
raise se.NoSuccess ("read from pty process [%s] failed (%s)" \
% (threading.current_thread().name, e))
# ----------------------------------------------------------------
#
def find (self, patterns, timeout=0) :
"""
This methods reads bytes from the child process until a string matching
any of the given patterns is found. If that is found, all read data are
returned as a string, up to (and including) the match. Note that
pattern can match an empty string, and the call then will return just
that, an empty string. If all patterns end with matching a newline,
this method is effectively matching lines -- but note that '$' will also
match the end of the (currently available) data stream.
The call actually returns a tuple, containing the index of the matching
pattern, and the string up to the match as described above.
If no pattern is found before timeout, the call returns (None, None).
Negative timeouts will block until a match is found
Note that the pattern are interpreted with the re.M (multi-line) and
re.S (dot matches all) regex flags.
Performance: the call is doing repeated string regex searches over
whatever data it finds. On complex regexes, and large data, and small
read buffers, this method can be expensive.
Note: the returned data get '\\\\r' stripped.
"""
try :
start = time.time () # startup timestamp
ret = [] # array of read lines
patts = [] # compiled patterns
data = self.cache # initial data to check
self.cache = ""
if not data : # empty cache?
data = self.read (timeout=_POLLDELAY)
# pre-compile the given pattern, to speed up matching
for pattern in patterns :
patts.append (re.compile (pattern, re.MULTILINE | re.DOTALL))
# we wait forever -- there are two ways out though: data matches
# a pattern, or timeout passes
while True :
# time.sleep (0.1)
# skip non-lines
if None == data :
data += self.read (timeout=_POLLDELAY)
# check current data for any matching pattern
# print ">>%s<<" % data
for n in range (0, len(patts)) :
match = patts[n].search (data)
# print "==%s==" % patterns[n]
if match :
# a pattern matched the current data: return a tuple of
# pattern index and matching data. The remainder of the
# data is cached.
ret = data[0:match.end()]
self.cache = data[match.end():]
# print "~~match!~~ %s" % data[match.start():match.end()]
# print "~~match!~~ %s" % (len(data))
# print "~~match!~~ %s" % (str(match.span()))
# print "~~match!~~ %s" % (ret)
return (n, ret.replace('\r', ''))
# if a timeout is given, and actually passed, return a non-match
if timeout == 0 :
return (None, None)
if timeout > 0 :
now = time.time ()
if (now-start) > timeout :
self.cache = data
return (None, None)
# no match yet, still time -- read more data
data += self.read (timeout=_POLLDELAY)
except Exception as e :
raise se.NoSuccess ("find from pty process [%s] failed (%s)" \
% (threading.current_thread().name, e))
# ----------------------------------------------------------------
#
def write (self, data) :
"""
This method will repeatedly attempt to push the given data into the
child's stdin pipe, until it succeeds to write all data.
"""
try :
with self.gc.active (self) :
log = data.replace ('\n', '\\n')
log = log.replace ('\r', '')
if len(log) > _DEBUG_MAX :
self.logger.debug ("write: [%5d] (%s ... %s)" \
% (len(data), log[:30], log[-30:]))
else :
self.logger.debug ("write: [%5d] (%s)" \
% (len(data), log))
# attempt to write forever -- until we succeeed
while data :
# check if the pty pipe is ready for data
_, wlist, _ = select.select ([], [self.parent_in], [], _POLLDELAY)
for f in wlist :
# write will report the number of written bytes
size = os.write (f, data)
# otherwise, truncate by written data, and try again
data = data[size:]
if data :
self.logger.info ("write: [%5d]" % size)
except Exception as e :
raise se.NoSuccess ("write to pty process [%s] failed (%s)" \
% (threading.current_thread().name, e))
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
variable clean up, catch special case
import re
import os
import sys
import pty
import time
import shlex
import select
import signal
import threading
import saga.utils.logger
import saga.utils.timeout_gc
import saga.exceptions as se
# --------------------------------------------------------------------
#
_CHUNKSIZE = 1024 # default size of each read
_POLLDELAY = 0.01 # seconds in between read attempts
_DEBUG_MAX = 600
# --------------------------------------------------------------------
#
class PTYProcess (object) :
"""
This class spawns a process, providing that child with pty I/O channels --
it will maintain stdin, stdout and stderr channels to the child. All
write-like operations operate on the stdin, all read-like operations operate
on the stdout stream. Data from the stderr stream are at this point
redirected to the stdout channel.
Example::
# run an interactive client process
pty = PTYProcess ("/usr/bin/ssh -t localhost")
# check client's I/O for one of the following patterns (prompts).
# Then search again.
n, match = pty.find (['password\s*:\s*$',
'want to continue connecting.*\(yes/no\)\s*$',
'[\$#>]\s*$'])
while True :
if n == 0 :
# found password prompt - tell the secret
pty.write ("secret\\n")
n, _ = pty.find (['password\s*:\s*$',
'want to continue connecting.*\(yes/no\)\s*$',
'[\$#>]\s*$'])
elif n == 1 :
# found request to accept host key - sure we do... (who checks
# those keys anyways...?). Then search again.
pty.write ("yes\\n")
n, _ = pty.find (['password\s*:\s*$',
'want to continue connecting.*\(yes/no\)\s*$',
'[\$#>]\s*$'])
elif n == 2 :
# found shell prompt! Wohoo!
break
while True :
# go full Dornroeschen (Sleeping Beauty)...
pty.alive (recover=True) or break # check / restart process
pty.find (['[\$#>]\s*$']) # find shell prompt
pty.write ("/bin/sleep "100 years"\\n") # sleep! SLEEEP!
# something bad happened
print pty.autopsy ()
The managed child process is under control of a Timeout Garbage Collector
(:class:`saga.utils.timeout_gc.TimeoutGC`), which will terminate the child
after some inactivity period. The child will be automatically restarted on
the next activity attempts. To support orderly process bootstrapping, users
of the :class:`PTYProcess` class should register hooks for process
initialization and finalization (:func:`set_initialize_hook` and
:func:`set_finalize_hook`). The finalization hook may operate on a dead
child process, and should be written in a way that this does not lead to an
error (which would abort the restart attempt).
If the child process dies on its own, or is terminated by a third party, the
class will also attempt to restart the child. In order to not interfere
with the process state at unexpected points, this will only happen during
explicit :func:`alive` checks, if the `recover` parameter is set to `True`
(`False` by default). This restart mechanism will be used up to
`recover_max` times in a row, any successful activity will reset the recover
counter though. The recover process will invoke both the finalization and
initialization hooks.
"""
# ----------------------------------------------------------------
#
def __init__ (self, command, logger=None) :
"""
The class constructor, which runs (execvpe) command in a separately
forked process. The bew process will inherit the environment of the
application process.
:type command: string or list of strings
:param command: The given command is what is run as a child, and
fed/drained via pty pipes. If given as string, command is split into an
array of strings, using :func:`shlex.split`.
:type logger: :class:`saga.utils.logger.Logger` instance
:param logger: logger stream to send status messages to.
"""
if isinstance (command, basestring) :
command = shlex.split (command)
if not isinstance (command, list) :
raise se.BadParameter ("PTYProcess expects string or list command")
if len(command) < 1 :
raise se.BadParameter ("PTYProcess expects non-empty command")
self.command = command # list of strings too run()
self.logger = logger
self.cache = "" # data cache
self.child = None # the process as created by subprocess.Popen
self.ptyio = None # the process' io channel, from pty.fork()
self.exit_code = None # child died with code (may be revived)
self.exit_signal = None # child kill by signal (may be revived)
self.initialize_hook = None
self.finalize_hook = None
self.recover_max = 3 # TODO: make configure option. This does not
self.recover_attempts = 0 # apply for recovers triggered by gc_timeout!
if not self.logger :
self.logger = saga.utils.logger.getLogger ('PTYProcess')
# register this process instance for timeout garbage collection
self.gc = saga.utils.timeout_gc.TimeoutGC ()
self.gc.register (self, self.initialize, self.finalize)
try :
self.initialize ()
except Exception as e :
raise se.NoSuccess ("pty or process creation failed (%s)" % e)
# --------------------------------------------------------------------
#
def __del__ (self) :
"""
Need to free pty's on destruction, otherwise we might ran out of
them (see cat /proc/sys/kernel/pty/max)
"""
self.logger.error ("pty __del__")
# self.logger.trace ()
try :
self.gc.unregister (self)
self.finalize ()
except :
pass
# ----------------------------------------------------------------------
#
def set_initialize_hook (self, initialize_hook) :
self.initialize_hook = initialize_hook
# ----------------------------------------------------------------------
#
def set_finalize_hook (self, finalize_hook) :
self.finalize_hook = finalize_hook
# ----------------------------------------------------------------------
#
def initialize (self) :
# NOTE: do we need to lock?
self.logger.debug ("PTYProcess: '%s'" % ' '.join ((self.command)))
self.parent_in, self.child_in = pty.openpty ()
self.parent_out, self.child_out = pty.openpty ()
# self.parent_err, self.child_err = pty.openpty ()
self.parent_io, self.child_io = pty.openpty ()
# create the child
try :
self.child = os.fork ()
except Exception as e:
raise se.NoSuccess ("Could not run (%s): %s" \
% (' '.join (self.command), e))
if not self.child :
# this is the child
try :
# close parent end of pty pipes
os.close (self.parent_in)
os.close (self.parent_out)
# os.close (self.parent_err)
# reopen child stdio unbuffered (buffsize=0)
unbuf_in = os.fdopen (sys.stdin.fileno (), 'r+', 0)
unbuf_out = os.fdopen (sys.stdout.fileno (), 'w+', 0)
unbuf_err = os.fdopen (sys.stderr.fileno (), 'w+', 0)
# redirect our precious stdio
os.dup2 (self.child_in, unbuf_in.fileno ())
os.dup2 (self.child_out, unbuf_out.fileno ())
os.dup2 (self.child_out, unbuf_err.fileno ())
# os.dup2 (self.child_err, unbuf_err.fileno ())
# make a process group leader (should close tty tty)
os.setsid ()
# close tty, in case we still own any:
try :
os.close (os.open ("/dev/tty", os.O_RDWR | os.O_NOCTTY));
except :
# was probably closed earlier, that's all right
pass
# now acquire pty
try :
os.close (os.open (os.ttyname (sys.stdout.fileno ()), os.O_RDWR))
except :
# well, this *may* be bad - or may now, depending on the
# type of command ones to run in this shell. So, we print
# a big fat warning, and continue
self.logger.error ("Unclean PTY shell setup - proceed anyway")
pass
# all I/O set up, have a pty (*fingers crossed*), lift-off!
os.execvpe (self.command[0], self.command, os.environ)
except OSError as e:
self.logger.error ("Could not execute (%s): %s" \
% (' '.join (self.command), e))
sys.exit (-1)
else :
# parent
os.close (self.child_in)
os.close (self.child_out)
# os.close (self.child_err)
# check if some additional initialization routines as registered
if self.initialize_hook :
self.initialize_hook ()
# --------------------------------------------------------------------
#
def finalize (self) :
""" kill the child, close all I/O channels """
# NOTE: do we need to lock?
# as long as the chiuld lives, run any higher level shutdown routine.
if self.finalize_hook :
self.finalize_hook ()
# now we can safely kill the child process, and close all I/O channels
try :
if self.child :
os.kill (self.child, signal.SIGTERM)
except OSError :
pass
try :
if self.child :
os.kill (self.child, signal.SIGKILL)
except OSError :
pass
self.child = None
## try :
## os.close (self.parent_in)
## except OSError :
## pass
## try :
## os.close (self.parent_out)
## except OSError :
## pass
# try :
# os.close (self.parent_err)
# except OSError :
# pass
# --------------------------------------------------------------------
#
def wait (self) :
"""
blocks forever until the child finishes on its own, or is getting
killed
"""
with self.gc.active (self) :
# yes, for ever and ever...
while True :
# hey, kiddo, whats up?
wpid, wstat = os.waitpid (self.child, 0)
# did we get a note about child termination?
if 0 == wpid :
# nope, all is well - carry on
continue
# Yes, we got a note.
# Well, maybe the child fooled us and is just playing dead?
if os.WIFSTOPPED (wstat) or \
os.WIFCONTINUED (wstat) :
# we don't care if someone stopped/resumed the child -- that is up
# to higher powers. For our purposes, the child is alive. Ha!
continue
# not stopped, poor thing... - soooo, what happened??
if os.WIFEXITED (wstat) :
# child died of natural causes - perform autopsy...
self.exit_code = os.WEXITSTATUS (wstat)
self.exit_signal = None
elif os.WIFSIGNALED (wstat) :
# murder!! Child got killed by someone! recover evidence...
self.exit_code = None
self.exit_signal = os.WTERMSIG (wstat)
# either way, its dead -- make sure it stays dead, to avoid zombie
# apocalypse...
self.finalize ()
return
# --------------------------------------------------------------------
#
def alive (self, recover=False) :
"""
try to determine if the child process is still active. If not, mark
the child as dead and close all IO descriptors etc ("func:`finalize`).
If `recover` is `True` and the child is indeed dead, we attempt to
re-initialize it (:func:`initialize`). We only do that for so many
times (`self.recover_max`) before giving up -- at that point it seems
likely that the child exits due to a re-occurring operations condition.
Note that upstream consumers of the :class:`PTYProcess` should be
careful to only use `recover=True` when they can indeed handle
a disconnected/reconnected client at that point, i.e. if there are no
assumptions on persistent state beyond those in control of the upstream
consumers themselves.
"""
with self.gc.active (self) :
# do we have a child which we can check?
if self.child :
# hey, kiddo, whats up?
wpid, wstat = os.waitpid (self.child, os.WNOHANG)
# did we get a note about child termination?
if 0 == wpid :
# nope, all is well - carry on
return True
# Yes, we got a note.
# Well, maybe the child fooled us and is just playing dead?
if os.WIFSTOPPED (wstat) or \
os.WIFCONTINUED (wstat) :
# we don't care if someone stopped/resumed the child -- that is up
# to higher powers. For our purposes, the child is alive. Ha!
return True
# not stopped, poor thing... - soooo, what happened??
if os.WIFEXITED (wstat) :
# child died of natural causes - perform autopsy...
self.exit_code = os.WEXITSTATUS (wstat)
self.exit_signal = None
elif os.WIFSIGNALED (wstat) :
# murder!! Child got killed by someone! recover evidence...
self.exit_code = None
self.exit_signal = os.WTERMSIG (wstat)
# either way, its dead -- make sure it stays dead, to avoid zombie
# apocalypse...
self.finalize ()
# check if we can attempt a post-mortem revival though
if not recover :
# nope, we are on holy ground - revival not allowed.
return False
# we are allowed to revive! So can we try one more time... pleeeease??
# (for cats, allow up to 9 attempts; for Buddhists, always allow to
# reincarnate, etc.)
if self.recover_attempts >= self.recover_max :
# nope, its gone for good - just report the sad news
return False
# MEDIIIIC!!!!
self.recover_attempts += 1
self.initialize ()
# well, now we don't trust the child anymore, of course! So we check
# again. Yes, this is recursive -- but note that recover_attempts get
# incremented on every iteration, and this will eventually lead to
# call termination (tm).
return self.alive (recover=True)
# --------------------------------------------------------------------
#
def autopsy (self) :
"""
return diagnostics information string for dead child processes
"""
with self.gc.active (self) :
if self.child :
# Boooh!
return "false alarm, process %s is alive!" % self.child
ret = ""
ret += " exit code : %s\n" % self.exit_code
ret += " exit signal: %s\n" % self.exit_signal
ret += " last output: %s\n" % self.cache[-256:] # FIXME: smarter selection
return ret
# --------------------------------------------------------------------
#
def read (self, size=0, timeout=0, _force=False) :
"""
read some data from the child. By default, the method reads whatever is
available on the next read, up to _CHUNKSIZE, but other read sizes can
be specified.
The method will return whatever data it has at timeout::
timeout == 0 : return the content of the first successful read, with
whatever data up to 'size' have been found.
timeout < 0 : return after first read attempt, even if no data have
been available.
If no data are found, the method returns an empty string (not None).
This method will not fill the cache, but will just read whatever data it
needs (FIXME).
Note: the returned lines do *not* get '\\\\r' stripped.
"""
with self.gc.active (self) :
try:
# start the timeout timer right now. Note that even if timeout is
# short, and child.poll is slow, we will nevertheless attempt at least
# one read...
start = time.time ()
# read until we have enough data, or hit timeout ceiling...
while True :
# first, lets see if we still have data in the cache we can return
if len (self.cache) :
if not size :
ret = self.cache
self.cache = ""
return ret
# we don't even need all of the cache
elif size <= len (self.cache) :
ret = self.cache[:size]
self.cache = self.cache[size:]
return ret
# otherwise we need to read some more data, right?
# idle wait 'til the next data chunk arrives, or 'til _POLLDELAY
rlist, _, _ = select.select ([self.parent_out], [], [], _POLLDELAY)
# got some data?
for f in rlist:
# read whatever we still need
readsize = _CHUNKSIZE
if size:
readsize = size-len(ret)
buf = os.read (f, _CHUNKSIZE)
if len(buf) == 0 and sys.platform == 'darwin' :
self.logger.debug ("read : MacOS EOF")
self.terminate ()
ret = ""
if len (self.cache) :
ret = self.cache
self.cache = ""
return ret
self.cache += buf.replace ('\r', '')
log = buf.replace ('\r', '')
log = log.replace ('\n', '\\n')
# print "buf: --%s--" % buf
# print "log: --%s--" % log
if len(log) > _DEBUG_MAX :
self.logger.debug ("read : [%5d] (%s ... %s)" \
% (len(log), log[:30], log[-30:]))
else :
self.logger.debug ("read : [%5d] (%s)" \
% (len(log), log))
# lets see if we still got any data in the cache we can return
if len (self.cache) :
if not size :
ret = self.cache
self.cache = ""
return ret
# we don't even need all of the cache
elif size <= len (self.cache) :
ret = self.cache[:size]
self.cache = self.cache[size:]
return ret
# at this point, we do not have sufficient data -- only
# return on timeout
if timeout == 0 :
# only return if we have data
if len (self.cache) :
ret = self.cache
self.cache = ""
return ret
elif timeout < 0 :
# return of we have data or not
ret = self.cache
self.cache = ""
return ret
else : # timeout > 0
# return if timeout is reached
now = time.time ()
if (now-start) > timeout :
ret = self.cache
self.cache = ""
return ret
except Exception as e :
raise se.NoSuccess ("read from pty process [%s] failed (%s)" \
% (threading.current_thread().name, e))
# ----------------------------------------------------------------
#
def find (self, patterns, timeout=0) :
"""
This methods reads bytes from the child process until a string matching
any of the given patterns is found. If that is found, all read data are
returned as a string, up to (and including) the match. Note that
pattern can match an empty string, and the call then will return just
that, an empty string. If all patterns end with matching a newline,
this method is effectively matching lines -- but note that '$' will also
match the end of the (currently available) data stream.
The call actually returns a tuple, containing the index of the matching
pattern, and the string up to the match as described above.
If no pattern is found before timeout, the call returns (None, None).
Negative timeouts will block until a match is found
Note that the pattern are interpreted with the re.M (multi-line) and
re.S (dot matches all) regex flags.
Performance: the call is doing repeated string regex searches over
whatever data it finds. On complex regexes, and large data, and small
read buffers, this method can be expensive.
Note: the returned data get '\\\\r' stripped.
"""
try :
start = time.time () # startup timestamp
ret = [] # array of read lines
patts = [] # compiled patterns
data = self.cache # initial data to check
self.cache = ""
if not data : # empty cache?
data = self.read (timeout=_POLLDELAY)
# pre-compile the given pattern, to speed up matching
for pattern in patterns :
patts.append (re.compile (pattern, re.MULTILINE | re.DOTALL))
# we wait forever -- there are two ways out though: data matches
# a pattern, or timeout passes
while True :
# time.sleep (0.1)
# skip non-lines
if None == data :
data += self.read (timeout=_POLLDELAY)
# check current data for any matching pattern
# print ">>%s<<" % data
for n in range (0, len(patts)) :
match = patts[n].search (data)
# print "==%s==" % patterns[n]
if match :
# a pattern matched the current data: return a tuple of
# pattern index and matching data. The remainder of the
# data is cached.
ret = data[0:match.end()]
self.cache = data[match.end():]
# print "~~match!~~ %s" % data[match.start():match.end()]
# print "~~match!~~ %s" % (len(data))
# print "~~match!~~ %s" % (str(match.span()))
# print "~~match!~~ %s" % (ret)
return (n, ret.replace('\r', ''))
# if a timeout is given, and actually passed, return a non-match
if timeout == 0 :
return (None, None)
if timeout > 0 :
now = time.time ()
if (now-start) > timeout :
self.cache = data
return (None, None)
# no match yet, still time -- read more data
data += self.read (timeout=_POLLDELAY)
except Exception as e :
raise se.NoSuccess ("find from pty process [%s] failed (%s)" \
% (threading.current_thread().name, e))
# ----------------------------------------------------------------
#
def write (self, data) :
"""
This method will repeatedly attempt to push the given data into the
child's stdin pipe, until it succeeds to write all data.
"""
try :
with self.gc.active (self) :
log = data.replace ('\n', '\\n')
log = log.replace ('\r', '')
if len(log) > _DEBUG_MAX :
self.logger.debug ("write: [%5d] (%s ... %s)" \
% (len(data), log[:30], log[-30:]))
else :
self.logger.debug ("write: [%5d] (%s)" \
% (len(data), log))
# attempt to write forever -- until we succeeed
while data :
# check if the pty pipe is ready for data
_, wlist, _ = select.select ([], [self.parent_in], [], _POLLDELAY)
for f in wlist :
# write will report the number of written bytes
size = os.write (f, data)
# otherwise, truncate by written data, and try again
data = data[size:]
if data :
self.logger.info ("write: [%5d]" % size)
except Exception as e :
raise se.NoSuccess ("write to pty process [%s] failed (%s)" \
% (threading.current_thread().name, e))
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
from __future__ import unicode_literals
from dvc.ignore import DvcIgnore
from dvc.utils.compat import str, basestring, urlparse
import os
import json
import logging
import tempfile
import itertools
from operator import itemgetter
from multiprocessing import cpu_count
from functools import partial
from concurrent.futures import ThreadPoolExecutor
from copy import copy
from dvc.remote.slow_link_detection import slow_link_guard
import dvc.prompt as prompt
from dvc.config import Config
from dvc.exceptions import (
DvcException,
ConfirmRemoveError,
DvcIgnoreInCollectedDirError,
)
from dvc.progress import Tqdm, TqdmThreadPoolExecutor
from dvc.utils import LARGE_DIR_SIZE, tmp_fname, move, relpath, makedirs
from dvc.state import StateNoop
from dvc.path_info import PathInfo, URLInfo
from dvc.utils.http import open_url
logger = logging.getLogger(__name__)
STATUS_OK = 1
STATUS_MISSING = 2
STATUS_NEW = 3
STATUS_DELETED = 4
STATUS_MAP = {
# (local_exists, remote_exists)
(True, True): STATUS_OK,
(False, False): STATUS_MISSING,
(True, False): STATUS_NEW,
(False, True): STATUS_DELETED,
}
class DataCloudError(DvcException):
""" Data Cloud exception """
def __init__(self, msg):
super(DataCloudError, self).__init__("Data sync error: {}".format(msg))
class RemoteCmdError(DvcException):
def __init__(self, remote, cmd, ret, err):
super(RemoteCmdError, self).__init__(
"{remote} command '{cmd}' finished with non-zero return code"
" {ret}': {err}".format(remote=remote, cmd=cmd, ret=ret, err=err)
)
class RemoteActionNotImplemented(DvcException):
def __init__(self, action, scheme):
m = "{} is not supported by {} remote".format(action, scheme)
super(RemoteActionNotImplemented, self).__init__(m)
class RemoteMissingDepsError(DvcException):
pass
class RemoteBASE(object):
scheme = "base"
path_cls = URLInfo
REQUIRES = {}
JOBS = 4 * cpu_count()
PARAM_RELPATH = "relpath"
CHECKSUM_DIR_SUFFIX = ".dir"
CHECKSUM_JOBS = max(1, min(4, cpu_count() // 2))
DEFAULT_CACHE_TYPES = ["copy"]
state = StateNoop()
def __init__(self, repo, config):
self.repo = repo
deps_ok = all(self.REQUIRES.values())
if not deps_ok:
missing = [k for k, v in self.REQUIRES.items() if v is None]
url = config.get(
Config.SECTION_REMOTE_URL, "{}://".format(self.scheme)
)
msg = (
"URL '{}' is supported but requires these missing "
"dependencies: {}. If you have installed dvc using pip, "
"choose one of these options to proceed: \n"
"\n"
" 1) Install specific missing dependencies:\n"
" pip install {}\n"
" 2) Install dvc package that includes those missing "
"dependencies: \n"
" pip install 'dvc[{}]'\n"
" 3) Install dvc package with all possible "
"dependencies included: \n"
" pip install 'dvc[all]'\n"
"\n"
"If you have installed dvc from a binary package and you "
"are still seeing this message, please report it to us "
"using https://github.com/iterative/dvc/issues. Thank you!"
).format(url, missing, " ".join(missing), self.scheme)
raise RemoteMissingDepsError(msg)
core = config.get(Config.SECTION_CORE, {})
self.checksum_jobs = core.get(
Config.SECTION_CORE_CHECKSUM_JOBS, self.CHECKSUM_JOBS
)
self.protected = False
self.no_traverse = config.get(Config.SECTION_REMOTE_NO_TRAVERSE, True)
self._dir_info = {}
types = config.get(Config.SECTION_CACHE_TYPE, None)
if types:
if isinstance(types, str):
types = [t.strip() for t in types.split(",")]
self.cache_types = types
else:
self.cache_types = copy(self.DEFAULT_CACHE_TYPES)
def __repr__(self):
return "{class_name}: '{path_info}'".format(
class_name=type(self).__name__,
path_info=self.path_info or "No path",
)
@classmethod
def supported(cls, config):
if isinstance(config, basestring):
url = config
else:
url = config[Config.SECTION_REMOTE_URL]
# NOTE: silently skipping remote, calling code should handle that
parsed = urlparse(url)
return parsed.scheme == cls.scheme
@property
def cache(self):
return getattr(self.repo.cache, self.scheme)
def get_file_checksum(self, path_info):
raise NotImplementedError
def _calculate_checksums(self, file_infos):
file_infos = list(file_infos)
with TqdmThreadPoolExecutor(
max_workers=self.checksum_jobs
) as executor:
tasks = executor.map(self.get_file_checksum, file_infos)
if len(file_infos) > LARGE_DIR_SIZE:
logger.info(
(
"Computing md5 for a large number of files. "
"This is only done once."
)
)
tasks = Tqdm(tasks, total=len(file_infos), unit="md5")
checksums = dict(zip(file_infos, tasks))
return checksums
def _collect_dir(self, path_info):
file_infos = set()
for root, _dirs, files in self.walk(path_info):
if DvcIgnore.DVCIGNORE_FILE in files:
raise DvcIgnoreInCollectedDirError(root)
file_infos.update(path_info / root / fname for fname in files)
checksums = {fi: self.state.get(fi) for fi in file_infos}
not_in_state = {
fi for fi, checksum in checksums.items() if checksum is None
}
new_checksums = self._calculate_checksums(not_in_state)
checksums.update(new_checksums)
result = [
{
self.PARAM_CHECKSUM: checksums[fi],
# NOTE: this is lossy transformation:
# "hey\there" -> "hey/there"
# "hey/there" -> "hey/there"
# The latter is fine filename on Windows, which
# will transform to dir/file on back transform.
#
# Yes, this is a BUG, as long as we permit "/" in
# filenames on Windows and "\" on Unix
self.PARAM_RELPATH: fi.relative_to(path_info).as_posix(),
}
for fi in file_infos
]
# Sorting the list by path to ensure reproducibility
return sorted(result, key=itemgetter(self.PARAM_RELPATH))
def get_dir_checksum(self, path_info):
dir_info = self._collect_dir(path_info)
checksum, tmp_info = self._get_dir_info_checksum(dir_info)
new_info = self.cache.checksum_to_path_info(checksum)
if self.cache.changed_cache_file(checksum):
self.cache.makedirs(new_info.parent)
self.cache.move(tmp_info, new_info)
self.state.save(path_info, checksum)
self.state.save(new_info, checksum)
return checksum
def _get_dir_info_checksum(self, dir_info):
tmp = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp, "w+") as fobj:
json.dump(dir_info, fobj, sort_keys=True)
from_info = PathInfo(tmp)
to_info = self.cache.path_info / tmp_fname("")
self.cache.upload(from_info, to_info, no_progress_bar=True)
checksum = self.get_file_checksum(to_info) + self.CHECKSUM_DIR_SUFFIX
return checksum, to_info
def get_dir_cache(self, checksum):
assert checksum
dir_info = self._dir_info.get(checksum)
if dir_info:
return dir_info
dir_info = self.load_dir_cache(checksum)
self._dir_info[checksum] = dir_info
return dir_info
def load_dir_cache(self, checksum):
path_info = self.checksum_to_path_info(checksum)
fobj = tempfile.NamedTemporaryFile(delete=False)
path = fobj.name
to_info = PathInfo(path)
self.cache.download(path_info, to_info, no_progress_bar=True)
try:
with open(path, "r") as fobj:
d = json.load(fobj)
except ValueError:
logger.exception("Failed to load dir cache '{}'".format(path_info))
return []
finally:
os.unlink(path)
if not isinstance(d, list):
msg = "dir cache file format error '{}' [skipping the file]"
logger.error(msg.format(relpath(path)))
return []
for info in d:
# NOTE: here is a BUG, see comment to .as_posix() below
relative_path = PathInfo.from_posix(info[self.PARAM_RELPATH])
info[self.PARAM_RELPATH] = relative_path.fspath
return d
@classmethod
def is_dir_checksum(cls, checksum):
return checksum.endswith(cls.CHECKSUM_DIR_SUFFIX)
def get_checksum(self, path_info):
if not self.exists(path_info):
return None
checksum = self.state.get(path_info)
# If we have dir checksum in state db, but dir cache file is lost,
# then we need to recollect the dir via .get_dir_checksum() call below,
# see https://github.com/iterative/dvc/issues/2219 for context
if (
checksum
and self.is_dir_checksum(checksum)
and not self.exists(self.cache.checksum_to_path_info(checksum))
):
checksum = None
if checksum:
return checksum
if self.isdir(path_info):
checksum = self.get_dir_checksum(path_info)
else:
checksum = self.get_file_checksum(path_info)
if checksum:
self.state.save(path_info, checksum)
return checksum
def save_info(self, path_info):
assert path_info.scheme == self.scheme
return {self.PARAM_CHECKSUM: self.get_checksum(path_info)}
def changed(self, path_info, checksum_info):
"""Checks if data has changed.
A file is considered changed if:
- It doesn't exist on the working directory (was unlinked)
- Checksum is not computed (saving a new file)
- The checkusm stored in the State is different from the given one
- There's no file in the cache
Args:
path_info: dict with path information.
checksum: expected checksum for this data.
Returns:
bool: True if data has changed, False otherwise.
"""
logger.debug(
"checking if '{}'('{}') has changed.".format(
path_info, checksum_info
)
)
if not self.exists(path_info):
logger.debug("'{}' doesn't exist.".format(path_info))
return True
checksum = checksum_info.get(self.PARAM_CHECKSUM)
if checksum is None:
logger.debug("checksum for '{}' is missing.".format(path_info))
return True
if self.changed_cache(checksum):
logger.debug(
"cache for '{}'('{}') has changed.".format(path_info, checksum)
)
return True
actual = self.save_info(path_info)[self.PARAM_CHECKSUM]
if checksum != actual:
logger.debug(
"checksum '{}'(actual '{}') for '{}' has changed.".format(
checksum, actual, path_info
)
)
return True
logger.debug("'{}' hasn't changed.".format(path_info))
return False
def link(self, from_info, to_info):
self._link(from_info, to_info, self.cache_types)
def _link(self, from_info, to_info, link_types):
assert self.isfile(from_info)
self.makedirs(to_info.parent)
self._try_links(from_info, to_info, link_types)
@slow_link_guard
def _try_links(self, from_info, to_info, link_types):
i = len(link_types)
while i > 0:
link_method = getattr(self, link_types[0])
try:
self._do_link(from_info, to_info, link_method)
return
except DvcException as exc:
msg = "Cache type '{}' is not supported: {}"
logger.debug(msg.format(link_types[0], str(exc)))
del link_types[0]
i -= 1
raise DvcException("no possible cache types left to try out.")
def _do_link(self, from_info, to_info, link_method):
# XXX: We are testing if file exists rather than if file is a link
if self.exists(to_info):
raise DvcException("Link '{}' already exists!".format(to_info))
else:
link_method(from_info, to_info)
if self.protected:
self.protect(to_info)
msg = "Created {}'{}': {} -> {}".format(
"protected " if self.protected else "",
self.cache_types[0],
from_info,
to_info,
)
logger.debug(msg)
def _save_file(self, path_info, checksum, save_link=True):
assert checksum
cache_info = self.checksum_to_path_info(checksum)
if self.changed_cache(checksum):
self.move(path_info, cache_info)
else:
self.remove(path_info)
self.link(cache_info, path_info)
if save_link:
self.state.save_link(path_info)
# we need to update path and cache, since in case of reflink,
# or copy cache type moving original file results in updates on
# next executed command, which causes md5 recalculation
self.state.save(path_info, checksum)
self.state.save(cache_info, checksum)
def _save_dir(self, path_info, checksum):
cache_info = self.checksum_to_path_info(checksum)
dir_info = self.get_dir_cache(checksum)
for entry in dir_info:
entry_info = path_info / entry[self.PARAM_RELPATH]
entry_checksum = entry[self.PARAM_CHECKSUM]
self._save_file(entry_info, entry_checksum, save_link=False)
self.state.save_link(path_info)
self.state.save(cache_info, checksum)
self.state.save(path_info, checksum)
def is_empty(self, path_info):
return False
def isfile(self, path_info):
"""Optional: Overwrite only if the remote has a way to distinguish
between a directory and a file.
"""
return True
def isdir(self, path_info):
"""Optional: Overwrite only if the remote has a way to distinguish
between a directory and a file.
"""
return False
def walk(self, path_info):
raise NotImplementedError
@staticmethod
def protect(path_info):
pass
def save(self, path_info, checksum_info):
if path_info.scheme != self.scheme:
raise RemoteActionNotImplemented(
"save {} -> {}".format(path_info.scheme, self.scheme),
self.scheme,
)
checksum = checksum_info[self.PARAM_CHECKSUM]
if not self.changed_cache(checksum):
self._checkout(path_info, checksum)
return
self._save(path_info, checksum)
def _save(self, path_info, checksum):
to_info = self.checksum_to_path_info(checksum)
logger.info("Saving '{}' to '{}'.".format(path_info, to_info))
if self.isdir(path_info):
self._save_dir(path_info, checksum)
return
self._save_file(path_info, checksum)
def upload(self, from_info, to_info, name=None, no_progress_bar=False):
if not hasattr(self, "_upload"):
raise RemoteActionNotImplemented("upload", self.scheme)
if to_info.scheme != self.scheme:
raise NotImplementedError
if from_info.scheme != "local":
raise NotImplementedError
logger.debug("Uploading '{}' to '{}'".format(from_info, to_info))
name = name or from_info.name
try:
self._upload(
from_info.fspath,
to_info,
name=name,
no_progress_bar=no_progress_bar,
)
except Exception:
msg = "failed to upload '{}' to '{}'"
logger.exception(msg.format(from_info, to_info))
return 1 # 1 fail
return 0
def download(
self,
from_info,
to_info,
name=None,
no_progress_bar=False,
file_mode=None,
dir_mode=None,
):
if not hasattr(self, "_download"):
raise RemoteActionNotImplemented("download", self.scheme)
if from_info.scheme != self.scheme:
raise NotImplementedError
if to_info.scheme == self.scheme != "local":
self.copy(from_info, to_info)
return 0
if to_info.scheme != "local":
raise NotImplementedError
logger.debug("Downloading '{}' to '{}'".format(from_info, to_info))
name = name or to_info.name
makedirs(to_info.parent, exist_ok=True, mode=dir_mode)
tmp_file = tmp_fname(to_info)
try:
self._download(
from_info, tmp_file, name=name, no_progress_bar=no_progress_bar
)
except Exception:
msg = "failed to download '{}' to '{}'"
logger.exception(msg.format(from_info, to_info))
return 1 # 1 fail
move(tmp_file, to_info, mode=file_mode)
return 0
def open(self, path_info, mode="r", encoding=None):
if hasattr(self, "_generate_download_url"):
get_url = partial(self._generate_download_url, path_info)
return open_url(get_url, mode=mode, encoding=encoding)
raise RemoteActionNotImplemented("open", self.scheme)
def remove(self, path_info):
raise RemoteActionNotImplemented("remove", self.scheme)
def move(self, from_info, to_info):
self.copy(from_info, to_info)
self.remove(from_info)
def copy(self, from_info, to_info):
raise RemoteActionNotImplemented("copy", self.scheme)
def symlink(self, from_info, to_info):
raise RemoteActionNotImplemented("symlink", self.scheme)
def hardlink(self, from_info, to_info):
raise RemoteActionNotImplemented("hardlink", self.scheme)
def reflink(self, from_info, to_info):
raise RemoteActionNotImplemented("reflink", self.scheme)
def exists(self, path_info):
raise NotImplementedError
def path_to_checksum(self, path):
parts = self.path_cls(path).parts[-2:]
if not (len(parts) == 2 and parts[0] and len(parts[0]) == 2):
raise ValueError("Bad cache file path")
return "".join(parts)
def checksum_to_path_info(self, checksum):
return self.path_info / checksum[0:2] / checksum[2:]
def list_cache_paths(self):
raise NotImplementedError
def all(self):
# NOTE: The list might be way too big(e.g. 100M entries, md5 for each
# is 32 bytes, so ~3200Mb list) and we don't really need all of it at
# the same time, so it makes sense to use a generator to gradually
# iterate over it, without keeping all of it in memory.
for path in self.list_cache_paths():
try:
yield self.path_to_checksum(path)
except ValueError:
# We ignore all the non-cache looking files
pass
def gc(self, cinfos):
used = self.extract_used_local_checksums(cinfos)
if self.scheme != "":
used |= {
info[self.PARAM_CHECKSUM]
for info in cinfos.get(self.scheme, [])
}
removed = False
for checksum in self.all():
if checksum in used:
continue
path_info = self.checksum_to_path_info(checksum)
self.remove(path_info)
removed = True
return removed
def changed_cache_file(self, checksum):
"""Compare the given checksum with the (corresponding) actual one.
- Use `State` as a cache for computed checksums
+ The entries are invalidated by taking into account the following:
* mtime
* inode
* size
* checksum
- Remove the file from cache if it doesn't match the actual checksum
"""
cache_info = self.checksum_to_path_info(checksum)
actual = self.get_checksum(cache_info)
logger.debug(
"cache '{}' expected '{}' actual '{}'".format(
str(cache_info), checksum, actual
)
)
if not checksum or not actual:
return True
if actual.split(".")[0] == checksum.split(".")[0]:
return False
if self.exists(cache_info):
logger.warning("corrupted cache file '{}'.".format(cache_info))
self.remove(cache_info)
return True
def _changed_dir_cache(self, checksum):
if self.changed_cache_file(checksum):
return True
if not self._changed_unpacked_dir(checksum):
return False
for entry in self.get_dir_cache(checksum):
entry_checksum = entry[self.PARAM_CHECKSUM]
if self.changed_cache_file(entry_checksum):
return True
self._update_unpacked_dir(checksum)
return False
def changed_cache(self, checksum):
if self.is_dir_checksum(checksum):
return self._changed_dir_cache(checksum)
return self.changed_cache_file(checksum)
def cache_exists(self, checksums, jobs=None, name=None):
"""Check if the given checksums are stored in the remote.
There are two ways of performing this check:
- Traverse: Get a list of all the files in the remote
(traversing the cache directory) and compare it with
the given checksums.
- No traverse: For each given checksum, run the `exists`
method and filter the checksums that aren't on the remote.
This is done in parallel threads.
It also shows a progress bar when performing the check.
The reason for such an odd logic is that most of the remotes
take much shorter time to just retrieve everything they have under
a certain prefix (e.g. s3, gs, ssh, hdfs). Other remotes that can
check if particular file exists much quicker, use their own
implementation of cache_exists (see ssh, local).
Returns:
A list with checksums that were found in the remote
"""
if not self.no_traverse:
return list(set(checksums) & set(self.all()))
with Tqdm(
desc="Querying "
+ ("cache in " + name if name else "remote cache"),
total=len(checksums),
unit="file",
) as pbar:
def exists_with_progress(path_info):
ret = self.exists(path_info)
pbar.update_desc(str(path_info))
return ret
with ThreadPoolExecutor(max_workers=jobs or self.JOBS) as executor:
path_infos = map(self.checksum_to_path_info, checksums)
in_remote = executor.map(exists_with_progress, path_infos)
ret = list(itertools.compress(checksums, in_remote))
return ret
def already_cached(self, path_info):
current = self.get_checksum(path_info)
if not current:
return False
return not self.changed_cache(current)
def safe_remove(self, path_info, force=False):
if not self.exists(path_info):
return
if not force and not self.already_cached(path_info):
msg = (
"file '{}' is going to be removed."
" Are you sure you want to proceed?".format(str(path_info))
)
if not prompt.confirm(msg):
raise ConfirmRemoveError(str(path_info))
self.remove(path_info)
def _checkout_file(
self, path_info, checksum, force, progress_callback=None
):
cache_info = self.checksum_to_path_info(checksum)
if self.exists(path_info):
msg = "data '{}' exists. Removing before checkout."
logger.warning(msg.format(str(path_info)))
self.safe_remove(path_info, force=force)
self.link(cache_info, path_info)
self.state.save_link(path_info)
self.state.save(path_info, checksum)
if progress_callback:
progress_callback(str(path_info))
def makedirs(self, path_info):
"""Optional: Implement only if the remote needs to create
directories before copying/linking/moving data
"""
pass
def _checkout_dir(
self, path_info, checksum, force, progress_callback=None
):
# Create dir separately so that dir is created
# even if there are no files in it
if not self.exists(path_info):
self.makedirs(path_info)
dir_info = self.get_dir_cache(checksum)
logger.debug("Linking directory '{}'.".format(path_info))
for entry in dir_info:
relative_path = entry[self.PARAM_RELPATH]
entry_checksum = entry[self.PARAM_CHECKSUM]
entry_cache_info = self.checksum_to_path_info(entry_checksum)
entry_info = path_info / relative_path
entry_checksum_info = {self.PARAM_CHECKSUM: entry_checksum}
if self.changed(entry_info, entry_checksum_info):
if self.exists(entry_info):
self.safe_remove(entry_info, force=force)
self.link(entry_cache_info, entry_info)
self.state.save(entry_info, entry_checksum)
if progress_callback:
progress_callback(str(entry_info))
self._remove_redundant_files(path_info, dir_info, force)
self.state.save_link(path_info)
self.state.save(path_info, checksum)
def _remove_redundant_files(self, path_info, dir_info, force):
existing_files = set(
path_info / root / fname
for root, _, files in self.walk(path_info)
for fname in files
)
needed_files = {
path_info / entry[self.PARAM_RELPATH] for entry in dir_info
}
for path in existing_files - needed_files:
self.safe_remove(path, force)
def checkout(
self, path_info, checksum_info, force=False, progress_callback=None
):
if path_info.scheme not in ["local", self.scheme]:
raise NotImplementedError
checksum = checksum_info.get(self.PARAM_CHECKSUM)
skip = False
if not checksum:
logger.warning(
"No checksum info found for '{}'. "
"It won't be created.".format(str(path_info))
)
self.safe_remove(path_info, force=force)
skip = True
elif not self.changed(path_info, checksum_info):
msg = "Data '{}' didn't change."
logger.debug(msg.format(str(path_info)))
skip = True
elif self.changed_cache(checksum):
msg = "Cache '{}' not found. File '{}' won't be created."
logger.warning(msg.format(checksum, str(path_info)))
self.safe_remove(path_info, force=force)
skip = True
if skip:
progress_callback(str(path_info), self.get_files_number(checksum))
return
msg = "Checking out '{}' with cache '{}'."
logger.debug(msg.format(str(path_info), checksum))
self._checkout(path_info, checksum, force, progress_callback)
def _checkout(
self, path_info, checksum, force=False, progress_callback=None
):
if not self.is_dir_checksum(checksum):
return self._checkout_file(
path_info, checksum, force, progress_callback=progress_callback
)
return self._checkout_dir(
path_info, checksum, force, progress_callback=progress_callback
)
def get_files_number(self, checksum):
if not checksum:
return 0
if self.is_dir_checksum(checksum):
return len(self.get_dir_cache(checksum))
return 1
@staticmethod
def unprotect(path_info):
pass
def _get_unpacked_dir_names(self, checksums):
return set()
def extract_used_local_checksums(self, cinfos):
from dvc.remote import RemoteLOCAL
used = {info[RemoteLOCAL.PARAM_CHECKSUM] for info in cinfos["local"]}
unpacked = self._get_unpacked_dir_names(used)
return used | unpacked
def _changed_unpacked_dir(self, checksum):
return True
def _update_unpacked_dir(self, checksum):
pass
remote: remove unecessary else clause
from __future__ import unicode_literals
from dvc.ignore import DvcIgnore
from dvc.utils.compat import str, basestring, urlparse
import os
import json
import logging
import tempfile
import itertools
from operator import itemgetter
from multiprocessing import cpu_count
from functools import partial
from concurrent.futures import ThreadPoolExecutor
from copy import copy
from dvc.remote.slow_link_detection import slow_link_guard
import dvc.prompt as prompt
from dvc.config import Config
from dvc.exceptions import (
DvcException,
ConfirmRemoveError,
DvcIgnoreInCollectedDirError,
)
from dvc.progress import Tqdm, TqdmThreadPoolExecutor
from dvc.utils import LARGE_DIR_SIZE, tmp_fname, move, relpath, makedirs
from dvc.state import StateNoop
from dvc.path_info import PathInfo, URLInfo
from dvc.utils.http import open_url
logger = logging.getLogger(__name__)
STATUS_OK = 1
STATUS_MISSING = 2
STATUS_NEW = 3
STATUS_DELETED = 4
STATUS_MAP = {
# (local_exists, remote_exists)
(True, True): STATUS_OK,
(False, False): STATUS_MISSING,
(True, False): STATUS_NEW,
(False, True): STATUS_DELETED,
}
class DataCloudError(DvcException):
""" Data Cloud exception """
def __init__(self, msg):
super(DataCloudError, self).__init__("Data sync error: {}".format(msg))
class RemoteCmdError(DvcException):
def __init__(self, remote, cmd, ret, err):
super(RemoteCmdError, self).__init__(
"{remote} command '{cmd}' finished with non-zero return code"
" {ret}': {err}".format(remote=remote, cmd=cmd, ret=ret, err=err)
)
class RemoteActionNotImplemented(DvcException):
def __init__(self, action, scheme):
m = "{} is not supported by {} remote".format(action, scheme)
super(RemoteActionNotImplemented, self).__init__(m)
class RemoteMissingDepsError(DvcException):
pass
class RemoteBASE(object):
scheme = "base"
path_cls = URLInfo
REQUIRES = {}
JOBS = 4 * cpu_count()
PARAM_RELPATH = "relpath"
CHECKSUM_DIR_SUFFIX = ".dir"
CHECKSUM_JOBS = max(1, min(4, cpu_count() // 2))
DEFAULT_CACHE_TYPES = ["copy"]
state = StateNoop()
def __init__(self, repo, config):
self.repo = repo
deps_ok = all(self.REQUIRES.values())
if not deps_ok:
missing = [k for k, v in self.REQUIRES.items() if v is None]
url = config.get(
Config.SECTION_REMOTE_URL, "{}://".format(self.scheme)
)
msg = (
"URL '{}' is supported but requires these missing "
"dependencies: {}. If you have installed dvc using pip, "
"choose one of these options to proceed: \n"
"\n"
" 1) Install specific missing dependencies:\n"
" pip install {}\n"
" 2) Install dvc package that includes those missing "
"dependencies: \n"
" pip install 'dvc[{}]'\n"
" 3) Install dvc package with all possible "
"dependencies included: \n"
" pip install 'dvc[all]'\n"
"\n"
"If you have installed dvc from a binary package and you "
"are still seeing this message, please report it to us "
"using https://github.com/iterative/dvc/issues. Thank you!"
).format(url, missing, " ".join(missing), self.scheme)
raise RemoteMissingDepsError(msg)
core = config.get(Config.SECTION_CORE, {})
self.checksum_jobs = core.get(
Config.SECTION_CORE_CHECKSUM_JOBS, self.CHECKSUM_JOBS
)
self.protected = False
self.no_traverse = config.get(Config.SECTION_REMOTE_NO_TRAVERSE, True)
self._dir_info = {}
types = config.get(Config.SECTION_CACHE_TYPE, None)
if types:
if isinstance(types, str):
types = [t.strip() for t in types.split(",")]
self.cache_types = types
else:
self.cache_types = copy(self.DEFAULT_CACHE_TYPES)
def __repr__(self):
return "{class_name}: '{path_info}'".format(
class_name=type(self).__name__,
path_info=self.path_info or "No path",
)
@classmethod
def supported(cls, config):
if isinstance(config, basestring):
url = config
else:
url = config[Config.SECTION_REMOTE_URL]
# NOTE: silently skipping remote, calling code should handle that
parsed = urlparse(url)
return parsed.scheme == cls.scheme
@property
def cache(self):
return getattr(self.repo.cache, self.scheme)
def get_file_checksum(self, path_info):
raise NotImplementedError
def _calculate_checksums(self, file_infos):
file_infos = list(file_infos)
with TqdmThreadPoolExecutor(
max_workers=self.checksum_jobs
) as executor:
tasks = executor.map(self.get_file_checksum, file_infos)
if len(file_infos) > LARGE_DIR_SIZE:
logger.info(
(
"Computing md5 for a large number of files. "
"This is only done once."
)
)
tasks = Tqdm(tasks, total=len(file_infos), unit="md5")
checksums = dict(zip(file_infos, tasks))
return checksums
def _collect_dir(self, path_info):
file_infos = set()
for root, _dirs, files in self.walk(path_info):
if DvcIgnore.DVCIGNORE_FILE in files:
raise DvcIgnoreInCollectedDirError(root)
file_infos.update(path_info / root / fname for fname in files)
checksums = {fi: self.state.get(fi) for fi in file_infos}
not_in_state = {
fi for fi, checksum in checksums.items() if checksum is None
}
new_checksums = self._calculate_checksums(not_in_state)
checksums.update(new_checksums)
result = [
{
self.PARAM_CHECKSUM: checksums[fi],
# NOTE: this is lossy transformation:
# "hey\there" -> "hey/there"
# "hey/there" -> "hey/there"
# The latter is fine filename on Windows, which
# will transform to dir/file on back transform.
#
# Yes, this is a BUG, as long as we permit "/" in
# filenames on Windows and "\" on Unix
self.PARAM_RELPATH: fi.relative_to(path_info).as_posix(),
}
for fi in file_infos
]
# Sorting the list by path to ensure reproducibility
return sorted(result, key=itemgetter(self.PARAM_RELPATH))
def get_dir_checksum(self, path_info):
dir_info = self._collect_dir(path_info)
checksum, tmp_info = self._get_dir_info_checksum(dir_info)
new_info = self.cache.checksum_to_path_info(checksum)
if self.cache.changed_cache_file(checksum):
self.cache.makedirs(new_info.parent)
self.cache.move(tmp_info, new_info)
self.state.save(path_info, checksum)
self.state.save(new_info, checksum)
return checksum
def _get_dir_info_checksum(self, dir_info):
tmp = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp, "w+") as fobj:
json.dump(dir_info, fobj, sort_keys=True)
from_info = PathInfo(tmp)
to_info = self.cache.path_info / tmp_fname("")
self.cache.upload(from_info, to_info, no_progress_bar=True)
checksum = self.get_file_checksum(to_info) + self.CHECKSUM_DIR_SUFFIX
return checksum, to_info
def get_dir_cache(self, checksum):
assert checksum
dir_info = self._dir_info.get(checksum)
if dir_info:
return dir_info
dir_info = self.load_dir_cache(checksum)
self._dir_info[checksum] = dir_info
return dir_info
def load_dir_cache(self, checksum):
path_info = self.checksum_to_path_info(checksum)
fobj = tempfile.NamedTemporaryFile(delete=False)
path = fobj.name
to_info = PathInfo(path)
self.cache.download(path_info, to_info, no_progress_bar=True)
try:
with open(path, "r") as fobj:
d = json.load(fobj)
except ValueError:
logger.exception("Failed to load dir cache '{}'".format(path_info))
return []
finally:
os.unlink(path)
if not isinstance(d, list):
msg = "dir cache file format error '{}' [skipping the file]"
logger.error(msg.format(relpath(path)))
return []
for info in d:
# NOTE: here is a BUG, see comment to .as_posix() below
relative_path = PathInfo.from_posix(info[self.PARAM_RELPATH])
info[self.PARAM_RELPATH] = relative_path.fspath
return d
@classmethod
def is_dir_checksum(cls, checksum):
return checksum.endswith(cls.CHECKSUM_DIR_SUFFIX)
def get_checksum(self, path_info):
if not self.exists(path_info):
return None
checksum = self.state.get(path_info)
# If we have dir checksum in state db, but dir cache file is lost,
# then we need to recollect the dir via .get_dir_checksum() call below,
# see https://github.com/iterative/dvc/issues/2219 for context
if (
checksum
and self.is_dir_checksum(checksum)
and not self.exists(self.cache.checksum_to_path_info(checksum))
):
checksum = None
if checksum:
return checksum
if self.isdir(path_info):
checksum = self.get_dir_checksum(path_info)
else:
checksum = self.get_file_checksum(path_info)
if checksum:
self.state.save(path_info, checksum)
return checksum
def save_info(self, path_info):
assert path_info.scheme == self.scheme
return {self.PARAM_CHECKSUM: self.get_checksum(path_info)}
def changed(self, path_info, checksum_info):
"""Checks if data has changed.
A file is considered changed if:
- It doesn't exist on the working directory (was unlinked)
- Checksum is not computed (saving a new file)
- The checkusm stored in the State is different from the given one
- There's no file in the cache
Args:
path_info: dict with path information.
checksum: expected checksum for this data.
Returns:
bool: True if data has changed, False otherwise.
"""
logger.debug(
"checking if '{}'('{}') has changed.".format(
path_info, checksum_info
)
)
if not self.exists(path_info):
logger.debug("'{}' doesn't exist.".format(path_info))
return True
checksum = checksum_info.get(self.PARAM_CHECKSUM)
if checksum is None:
logger.debug("checksum for '{}' is missing.".format(path_info))
return True
if self.changed_cache(checksum):
logger.debug(
"cache for '{}'('{}') has changed.".format(path_info, checksum)
)
return True
actual = self.save_info(path_info)[self.PARAM_CHECKSUM]
if checksum != actual:
logger.debug(
"checksum '{}'(actual '{}') for '{}' has changed.".format(
checksum, actual, path_info
)
)
return True
logger.debug("'{}' hasn't changed.".format(path_info))
return False
def link(self, from_info, to_info):
self._link(from_info, to_info, self.cache_types)
def _link(self, from_info, to_info, link_types):
assert self.isfile(from_info)
self.makedirs(to_info.parent)
self._try_links(from_info, to_info, link_types)
@slow_link_guard
def _try_links(self, from_info, to_info, link_types):
i = len(link_types)
while i > 0:
link_method = getattr(self, link_types[0])
try:
self._do_link(from_info, to_info, link_method)
return
except DvcException as exc:
msg = "Cache type '{}' is not supported: {}"
logger.debug(msg.format(link_types[0], str(exc)))
del link_types[0]
i -= 1
raise DvcException("no possible cache types left to try out.")
def _do_link(self, from_info, to_info, link_method):
if self.exists(to_info):
raise DvcException("Link '{}' already exists!".format(to_info))
link_method(from_info, to_info)
if self.protected:
self.protect(to_info)
msg = "Created {}'{}': {} -> {}".format(
"protected " if self.protected else "",
self.cache_types[0],
from_info,
to_info,
)
logger.debug(msg)
def _save_file(self, path_info, checksum, save_link=True):
assert checksum
cache_info = self.checksum_to_path_info(checksum)
if self.changed_cache(checksum):
self.move(path_info, cache_info)
else:
self.remove(path_info)
self.link(cache_info, path_info)
if save_link:
self.state.save_link(path_info)
# we need to update path and cache, since in case of reflink,
# or copy cache type moving original file results in updates on
# next executed command, which causes md5 recalculation
self.state.save(path_info, checksum)
self.state.save(cache_info, checksum)
def _save_dir(self, path_info, checksum):
cache_info = self.checksum_to_path_info(checksum)
dir_info = self.get_dir_cache(checksum)
for entry in dir_info:
entry_info = path_info / entry[self.PARAM_RELPATH]
entry_checksum = entry[self.PARAM_CHECKSUM]
self._save_file(entry_info, entry_checksum, save_link=False)
self.state.save_link(path_info)
self.state.save(cache_info, checksum)
self.state.save(path_info, checksum)
def is_empty(self, path_info):
return False
def isfile(self, path_info):
"""Optional: Overwrite only if the remote has a way to distinguish
between a directory and a file.
"""
return True
def isdir(self, path_info):
"""Optional: Overwrite only if the remote has a way to distinguish
between a directory and a file.
"""
return False
def walk(self, path_info):
raise NotImplementedError
@staticmethod
def protect(path_info):
pass
def save(self, path_info, checksum_info):
if path_info.scheme != self.scheme:
raise RemoteActionNotImplemented(
"save {} -> {}".format(path_info.scheme, self.scheme),
self.scheme,
)
checksum = checksum_info[self.PARAM_CHECKSUM]
if not self.changed_cache(checksum):
self._checkout(path_info, checksum)
return
self._save(path_info, checksum)
def _save(self, path_info, checksum):
to_info = self.checksum_to_path_info(checksum)
logger.info("Saving '{}' to '{}'.".format(path_info, to_info))
if self.isdir(path_info):
self._save_dir(path_info, checksum)
return
self._save_file(path_info, checksum)
def upload(self, from_info, to_info, name=None, no_progress_bar=False):
if not hasattr(self, "_upload"):
raise RemoteActionNotImplemented("upload", self.scheme)
if to_info.scheme != self.scheme:
raise NotImplementedError
if from_info.scheme != "local":
raise NotImplementedError
logger.debug("Uploading '{}' to '{}'".format(from_info, to_info))
name = name or from_info.name
try:
self._upload(
from_info.fspath,
to_info,
name=name,
no_progress_bar=no_progress_bar,
)
except Exception:
msg = "failed to upload '{}' to '{}'"
logger.exception(msg.format(from_info, to_info))
return 1 # 1 fail
return 0
def download(
self,
from_info,
to_info,
name=None,
no_progress_bar=False,
file_mode=None,
dir_mode=None,
):
if not hasattr(self, "_download"):
raise RemoteActionNotImplemented("download", self.scheme)
if from_info.scheme != self.scheme:
raise NotImplementedError
if to_info.scheme == self.scheme != "local":
self.copy(from_info, to_info)
return 0
if to_info.scheme != "local":
raise NotImplementedError
logger.debug("Downloading '{}' to '{}'".format(from_info, to_info))
name = name or to_info.name
makedirs(to_info.parent, exist_ok=True, mode=dir_mode)
tmp_file = tmp_fname(to_info)
try:
self._download(
from_info, tmp_file, name=name, no_progress_bar=no_progress_bar
)
except Exception:
msg = "failed to download '{}' to '{}'"
logger.exception(msg.format(from_info, to_info))
return 1 # 1 fail
move(tmp_file, to_info, mode=file_mode)
return 0
def open(self, path_info, mode="r", encoding=None):
if hasattr(self, "_generate_download_url"):
get_url = partial(self._generate_download_url, path_info)
return open_url(get_url, mode=mode, encoding=encoding)
raise RemoteActionNotImplemented("open", self.scheme)
def remove(self, path_info):
raise RemoteActionNotImplemented("remove", self.scheme)
def move(self, from_info, to_info):
self.copy(from_info, to_info)
self.remove(from_info)
def copy(self, from_info, to_info):
raise RemoteActionNotImplemented("copy", self.scheme)
def symlink(self, from_info, to_info):
raise RemoteActionNotImplemented("symlink", self.scheme)
def hardlink(self, from_info, to_info):
raise RemoteActionNotImplemented("hardlink", self.scheme)
def reflink(self, from_info, to_info):
raise RemoteActionNotImplemented("reflink", self.scheme)
def exists(self, path_info):
raise NotImplementedError
def path_to_checksum(self, path):
parts = self.path_cls(path).parts[-2:]
if not (len(parts) == 2 and parts[0] and len(parts[0]) == 2):
raise ValueError("Bad cache file path")
return "".join(parts)
def checksum_to_path_info(self, checksum):
return self.path_info / checksum[0:2] / checksum[2:]
def list_cache_paths(self):
raise NotImplementedError
def all(self):
# NOTE: The list might be way too big(e.g. 100M entries, md5 for each
# is 32 bytes, so ~3200Mb list) and we don't really need all of it at
# the same time, so it makes sense to use a generator to gradually
# iterate over it, without keeping all of it in memory.
for path in self.list_cache_paths():
try:
yield self.path_to_checksum(path)
except ValueError:
# We ignore all the non-cache looking files
pass
def gc(self, cinfos):
used = self.extract_used_local_checksums(cinfos)
if self.scheme != "":
used |= {
info[self.PARAM_CHECKSUM]
for info in cinfos.get(self.scheme, [])
}
removed = False
for checksum in self.all():
if checksum in used:
continue
path_info = self.checksum_to_path_info(checksum)
self.remove(path_info)
removed = True
return removed
def changed_cache_file(self, checksum):
"""Compare the given checksum with the (corresponding) actual one.
- Use `State` as a cache for computed checksums
+ The entries are invalidated by taking into account the following:
* mtime
* inode
* size
* checksum
- Remove the file from cache if it doesn't match the actual checksum
"""
cache_info = self.checksum_to_path_info(checksum)
actual = self.get_checksum(cache_info)
logger.debug(
"cache '{}' expected '{}' actual '{}'".format(
str(cache_info), checksum, actual
)
)
if not checksum or not actual:
return True
if actual.split(".")[0] == checksum.split(".")[0]:
return False
if self.exists(cache_info):
logger.warning("corrupted cache file '{}'.".format(cache_info))
self.remove(cache_info)
return True
def _changed_dir_cache(self, checksum):
if self.changed_cache_file(checksum):
return True
if not self._changed_unpacked_dir(checksum):
return False
for entry in self.get_dir_cache(checksum):
entry_checksum = entry[self.PARAM_CHECKSUM]
if self.changed_cache_file(entry_checksum):
return True
self._update_unpacked_dir(checksum)
return False
def changed_cache(self, checksum):
if self.is_dir_checksum(checksum):
return self._changed_dir_cache(checksum)
return self.changed_cache_file(checksum)
def cache_exists(self, checksums, jobs=None, name=None):
"""Check if the given checksums are stored in the remote.
There are two ways of performing this check:
- Traverse: Get a list of all the files in the remote
(traversing the cache directory) and compare it with
the given checksums.
- No traverse: For each given checksum, run the `exists`
method and filter the checksums that aren't on the remote.
This is done in parallel threads.
It also shows a progress bar when performing the check.
The reason for such an odd logic is that most of the remotes
take much shorter time to just retrieve everything they have under
a certain prefix (e.g. s3, gs, ssh, hdfs). Other remotes that can
check if particular file exists much quicker, use their own
implementation of cache_exists (see ssh, local).
Returns:
A list with checksums that were found in the remote
"""
if not self.no_traverse:
return list(set(checksums) & set(self.all()))
with Tqdm(
desc="Querying "
+ ("cache in " + name if name else "remote cache"),
total=len(checksums),
unit="file",
) as pbar:
def exists_with_progress(path_info):
ret = self.exists(path_info)
pbar.update_desc(str(path_info))
return ret
with ThreadPoolExecutor(max_workers=jobs or self.JOBS) as executor:
path_infos = map(self.checksum_to_path_info, checksums)
in_remote = executor.map(exists_with_progress, path_infos)
ret = list(itertools.compress(checksums, in_remote))
return ret
def already_cached(self, path_info):
current = self.get_checksum(path_info)
if not current:
return False
return not self.changed_cache(current)
def safe_remove(self, path_info, force=False):
if not self.exists(path_info):
return
if not force and not self.already_cached(path_info):
msg = (
"file '{}' is going to be removed."
" Are you sure you want to proceed?".format(str(path_info))
)
if not prompt.confirm(msg):
raise ConfirmRemoveError(str(path_info))
self.remove(path_info)
def _checkout_file(
self, path_info, checksum, force, progress_callback=None
):
cache_info = self.checksum_to_path_info(checksum)
if self.exists(path_info):
msg = "data '{}' exists. Removing before checkout."
logger.warning(msg.format(str(path_info)))
self.safe_remove(path_info, force=force)
self.link(cache_info, path_info)
self.state.save_link(path_info)
self.state.save(path_info, checksum)
if progress_callback:
progress_callback(str(path_info))
def makedirs(self, path_info):
"""Optional: Implement only if the remote needs to create
directories before copying/linking/moving data
"""
pass
def _checkout_dir(
self, path_info, checksum, force, progress_callback=None
):
# Create dir separately so that dir is created
# even if there are no files in it
if not self.exists(path_info):
self.makedirs(path_info)
dir_info = self.get_dir_cache(checksum)
logger.debug("Linking directory '{}'.".format(path_info))
for entry in dir_info:
relative_path = entry[self.PARAM_RELPATH]
entry_checksum = entry[self.PARAM_CHECKSUM]
entry_cache_info = self.checksum_to_path_info(entry_checksum)
entry_info = path_info / relative_path
entry_checksum_info = {self.PARAM_CHECKSUM: entry_checksum}
if self.changed(entry_info, entry_checksum_info):
if self.exists(entry_info):
self.safe_remove(entry_info, force=force)
self.link(entry_cache_info, entry_info)
self.state.save(entry_info, entry_checksum)
if progress_callback:
progress_callback(str(entry_info))
self._remove_redundant_files(path_info, dir_info, force)
self.state.save_link(path_info)
self.state.save(path_info, checksum)
def _remove_redundant_files(self, path_info, dir_info, force):
existing_files = set(
path_info / root / fname
for root, _, files in self.walk(path_info)
for fname in files
)
needed_files = {
path_info / entry[self.PARAM_RELPATH] for entry in dir_info
}
for path in existing_files - needed_files:
self.safe_remove(path, force)
def checkout(
self, path_info, checksum_info, force=False, progress_callback=None
):
if path_info.scheme not in ["local", self.scheme]:
raise NotImplementedError
checksum = checksum_info.get(self.PARAM_CHECKSUM)
skip = False
if not checksum:
logger.warning(
"No checksum info found for '{}'. "
"It won't be created.".format(str(path_info))
)
self.safe_remove(path_info, force=force)
skip = True
elif not self.changed(path_info, checksum_info):
msg = "Data '{}' didn't change."
logger.debug(msg.format(str(path_info)))
skip = True
elif self.changed_cache(checksum):
msg = "Cache '{}' not found. File '{}' won't be created."
logger.warning(msg.format(checksum, str(path_info)))
self.safe_remove(path_info, force=force)
skip = True
if skip:
progress_callback(str(path_info), self.get_files_number(checksum))
return
msg = "Checking out '{}' with cache '{}'."
logger.debug(msg.format(str(path_info), checksum))
self._checkout(path_info, checksum, force, progress_callback)
def _checkout(
self, path_info, checksum, force=False, progress_callback=None
):
if not self.is_dir_checksum(checksum):
return self._checkout_file(
path_info, checksum, force, progress_callback=progress_callback
)
return self._checkout_dir(
path_info, checksum, force, progress_callback=progress_callback
)
def get_files_number(self, checksum):
if not checksum:
return 0
if self.is_dir_checksum(checksum):
return len(self.get_dir_cache(checksum))
return 1
@staticmethod
def unprotect(path_info):
pass
def _get_unpacked_dir_names(self, checksums):
return set()
def extract_used_local_checksums(self, cinfos):
from dvc.remote import RemoteLOCAL
used = {info[RemoteLOCAL.PARAM_CHECKSUM] for info in cinfos["local"]}
unpacked = self._get_unpacked_dir_names(used)
return used | unpacked
def _changed_unpacked_dir(self, checksum):
return True
def _update_unpacked_dir(self, checksum):
pass
|
# -*- coding: utf-8 -*-
#
# ibus-replace-with-kanji - Replace With Kanji input method for IBus
#
# Using source code derived from
# ibus-tmpl - The Input Bus template project
#
# Copyright (c) 2017 Esrille Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
import time
from gi import require_version
require_version('IBus', '1.0')
from gi.repository import IBus
from gi.repository import GLib
from dictionary import Dictionary
from event import Event
import bits
import roomazi
keysyms = IBus
_hiragana = "あいうえおかきくけこさしすせそたちつてとなにぬねのはひふへほまみむめもやゆよらりるれろわをんゔがぎぐげござじずぜぞだぢづでどばびぶべぼぁぃぅぇぉゃゅょっぱぴぷぺぽゎゐゑ"
_katakana = "アイウエオカキクケコサシスセソタチツテトナニヌネノハヒフヘホマミムメモヤユヨラリルレロワヲンヴガギグゲゴザジズゼゾダヂヅデドバビブベボァィゥェォャュョッパピプペポヮヰヱ"
_non_daku = 'あいうえおかきくけこさしすせそたちつてとはひふへほやゆよアイウエオカキクケコサシスセソタチツテトハヒフヘホヤユヨぁぃぅぇぉがぎぐげござじずぜぞだぢづでどばびぶべぼゃゅょァィゥェォガギグゲゴザジズゼゾダヂヅデドバビブベボャュョゔヴ'
_daku = 'ぁぃぅぇぉがぎぐげござじずぜぞだぢづでどばびぶべぼゃゅょァィゥェォガギグゲゴザジズゼゾダヂヅデドバビブベボャュョあいゔえおかきくけこさしすせそたちつてとはひふへほやゆよアイヴエオカキクケコサシスセソタチツテトハヒフヘホヤユヨうウ'
_non_handaku = 'はひふへほハヒフヘホぱぴぷぺぽパピプペポ'
_handaku = 'ぱぴぷぺぽパピプペポはひふへほハヒフヘホ'
_re_tu = re.compile(r'[kstnhmyrwgzdbpfjv]')
def to_katakana(kana):
result = ''
for c in kana:
pos = _hiragana.find(c)
if pos < 0:
result += c
else:
result += _katakana[pos]
return result
class EngineReplaceWithKanji(IBus.Engine):
__gtype_name__ = 'EngineReplaceWithKanji'
def __init__(self):
super(EngineReplaceWithKanji, self).__init__()
self.__enabled = False # True if IME is enabled
self.__katakana_mode = False # True to input Katakana
self.__layout = roomazi.layout
self.__to_kana = self.__handle_roomazi_layout
self.__preedit_string = ''
self.__previous_text = ''
self.__ignore_surrounding_text = False
self.__lookup_table = IBus.LookupTable.new(10, 0, True, False)
self.__lookup_table.set_orientation(IBus.Orientation.VERTICAL)
self.__prop_list = IBus.PropList()
config = IBus.Bus().get_config()
# Load the layout setting
var = config.get_value('engine/replace-with-kanji-python', 'layout')
if var == None or var.get_type_string() != 's':
layout_path = os.path.join(os.getenv('IBUS_REPLACE_WITH_KANJI_LOCATION'), 'layouts')
layout_path = os.path.join(layout_path, 'roomazi.json')
print(layout_path, flush=True)
var = GLib.Variant.new_string(layout_path)
config.set_value('engine/replace-with-kanji-python', 'layout', var)
print("layout:", var.get_string(), flush=True)
try:
with open(var.get_string()) as f:
self.__layout = json.loads(f.read(), "utf-8")
except ValueError as error:
print("JSON error:", error)
except:
print("Cannot open: ", var.get_string(), flush=True)
self.__layout = roomazi.layout
print(json.dumps(self.__layout, ensure_ascii=False), flush=True)
# Load the delay setting
var = config.get_value('engine/replace-with-kanji-python', 'delay')
if var == None or var.get_type_string() != 'i':
var = GLib.Variant.new_int32(delay)
config.set_value('engine/replace-with-kanji-python', 'delay', var)
delay = var.get_int32()
print("delay:", delay, flush=True)
self.__event = Event(self, delay, self.__layout)
self.__dict = Dictionary()
if 'Type' in self.__layout:
if self.__layout['Type'] == 'Kana':
self.__to_kana = self.__handle_kana_layout
else:
self.__to_kana = self.__handle_roomazi_layout
def __handle_kana_layout(self, preedit, keyval, state = 0, modifiers = 0):
yomi = ''
if self.__event.is_ascii(keyval):
c = self.__event.chr(keyval)
if preedit == '\\':
preedit = ''
if 'Shift' in self.__layout and self.__event.is_shift():
yomi = self.__layout['\\Shift'][c]
elif modifiers & bits.ShiftL_Bit:
yomi = self.__layout['\\ShiftL'][c]
elif modifiers & bits.ShiftR_Bit:
yomi = self.__layout['\\ShiftR'][c]
else:
yomi = self.__layout['\\Normal'][c]
else:
if 'Shift' in self.__layout and self.__event.is_shift():
yomi = self.__layout['Shift'][c]
elif modifiers & bits.ShiftL_Bit:
yomi = self.__layout['ShiftL'][c]
elif modifiers & bits.ShiftR_Bit:
yomi = self.__layout['ShiftR'][c]
else:
yomi = self.__layout['Normal'][c]
if yomi == '\\':
preedit += yomi
yomi = ''
elif keyval == keysyms.Zenkaku_Hankaku:
if preedit == '\\':
yomi = '¥'
preedit = ''
else:
preedit += '\\'
return yomi, preedit
def __handle_roomazi_layout(self, preedit, keyval, state = 0, modifiers = 0):
yomi = ''
if self.__event.is_ascii(keyval):
preedit += self.__event.chr(keyval)
if preedit in self.__layout['Roomazi']:
yomi = self.__layout['Roomazi'][preedit]
preedit = ''
elif 2 <= len(preedit) and preedit[0] == 'n' and preedit[1] != 'y':
yomi = 'ん'
preedit = preedit[1:]
elif 2 <= len(preedit) and preedit[0] == preedit[1] and _re_tu.search(preedit[1]):
yomi = 'っ'
preedit = preedit[1:]
return yomi, preedit
def __get_surrounding_text(self):
# Note self.get_surrounding_text() may not work as expected such as in Firefox, Chrome, etc.
if self.__ignore_surrounding_text:
return self.__previous_text
tuple = self.get_surrounding_text()
text = tuple[0].get_text()
pos = tuple[1]
print("surrounding text: '", text, "', ", pos, ", [", self.__previous_text, "]", sep='', flush=True)
if self.__previous_text and pos < len(self.__previous_text) or text[pos - len(self.__previous_text):pos] != self.__previous_text:
self.__ignore_surrounding_text = True
return self.__previous_text
return text[:pos]
def __delete_surrounding_text(self, size):
self.__previous_text = self.__previous_text[:-size]
if not self.__ignore_surrounding_text:
self.delete_surrounding_text(-size, size)
else:
for i in range(size):
self.forward_key_event(IBus.BackSpace, 14, 0)
time.sleep(0.01)
self.forward_key_event(IBus.BackSpace, 14, IBus.ModifierType.RELEASE_MASK)
time.sleep(0.02)
def is_enabled(self):
return self.__enabled
def enable_ime(self):
if not self.is_enabled():
print("enable_ime", flush=True);
self.__preedit_string = ''
self.__enabled = True
self.__dict.confirm()
self.__dict.reset()
self.__update()
return True
return False
def disable_ime(self):
if self.is_enabled():
print("disable_ime", flush=True);
self.__dict.confirm()
self.__reset()
self.__enabled = False
self.__update()
return True
return False
def __is_roomazi_mode(self):
return self.__to_kana == self.__handle_roomazi_layout
def set_katakana_mode(self, enable):
print("set_katakana_mode:", enable, flush=True)
self.__katakana_mode = enable
def do_process_key_event(self, keyval, keycode, state):
return self.__event.process_key_event(keyval, keycode, state)
def handle_key_event(self, keyval, keycode, state, modifiers):
print("handle_key_event(%04x, %04x, %04x, %04x)" % (keyval, keycode, state, modifiers), flush=True)
# Handle Candidate window
if 0 < self.__lookup_table.get_number_of_candidates():
if keyval == keysyms.Page_Up or keyval == keysyms.KP_Page_Up:
return self.do_page_up()
elif keyval == keysyms.Page_Down or keyval == keysyms.KP_Page_Down:
return self.do_page_down()
elif keyval == keysyms.Up or self.__event.is_muhenkan():
return self.do_cursor_up()
elif keyval == keysyms.Down or self.__event.is_henkan():
return self.do_cursor_down()
elif keyval == keysyms.Escape:
print("escape", flush=True)
self.__previous_text = self.handle_escape(state)
return True
elif keyval == keysyms.Return:
self.__commit()
return True
# Ignore modifier keys
if self.__event.is_modifier():
return False
if (state & (IBus.ModifierType.CONTROL_MASK | IBus.ModifierType.MOD1_MASK)) != 0:
self.__commit()
return False
# Handle Japanese text
if self.__event.is_henkan():
self.set_katakana_mode(False)
return self.handle_replace(keyval, state)
if self.__event.is_shrink():
self.set_katakana_mode(False)
return self.handle_shrink(keyval, state)
self.__commit()
if self.__event.is_backspace():
if 1 <= len(self.__preedit_string):
self.__preedit_string = self.__preedit_string[:-1]
self.__update()
return True
elif 0 < len(self.__previous_text):
self.__previous_text = self.__previous_text[:-1]
elif self.__event.is_ascii(keyval) or keyval == keysyms.Zenkaku_Hankaku:
yomi, self.__preedit_string = self.__to_kana(self.__preedit_string, keyval, state, modifiers)
if yomi:
if self.__katakana_mode:
yomi = to_katakana(yomi)
self.__commit_string(yomi)
self.__update()
return True
self.__update()
return True
else:
self.__previous_text = ''
return False
def lookup_dictionary(self, yomi):
# Handle dangling 'n' for 'ん' here to minimize the access to the surrounding text API,
# which could cause an unexpected behaviour occasionally at race conditions.
if self.__preedit_string == 'n':
yomi += 'ん'
cand = self.__dict.lookup(yomi)
size = len(self.__dict.reading())
if 0 < size and self.__preedit_string == 'n':
size -= 1
self.__lookup_table.clear()
if cand and 1 < len(self.__dict.cand()):
for c in self.__dict.cand():
self.__lookup_table.append_candidate(IBus.Text.new_from_string(c))
return (cand, size)
def handle_replace(self, keyval, state):
if not self.__dict.current():
text = self.__get_surrounding_text()
(cand, size) = self.lookup_dictionary(text)
else:
size = len(self.__dict.current())
if not (state & IBus.ModifierType.SHIFT_MASK):
cand = self.__dict.next()
else:
cand = self.__dict.previous()
if self.__dict.current():
self.__delete_surrounding_text(size)
self.__commit_string(cand)
self.__preedit_string = ''
self.__update()
return True
def handle_shrink(self, keyval, state):
if not self.__dict.current():
return False
text = self.handle_escape(state)
if 1 < len(text):
(cand, size) = self.lookup_dictionary(text[1:])
# Note a nap is needed here especially for applications that do not support surrounding text.
time.sleep(0.1)
else:
self.__dict.reset()
return True
if self.__dict.current():
self.__delete_surrounding_text(size)
self.__commit_string(cand)
self.__preedit_string = ''
self.__update()
return True
def handle_escape(self, state):
if not self.__dict.current():
return
size = len(self.__dict.current())
self.__delete_surrounding_text(size)
yomi = self.__dict.reading()
self.__commit_string(yomi)
self.__reset()
self.__update()
return yomi
def __commit(self):
if self.__dict.current():
self.__dict.confirm()
self.__dict.reset()
self.__lookup_table.clear()
visible = 0 < self.__lookup_table.get_number_of_candidates()
self.update_lookup_table(self.__lookup_table, visible)
self.__previous_text = ''
def __commit_string(self, text):
if text == '゛':
prev = self.__get_surrounding_text()
if 0 < len(prev):
pos = _non_daku.find(prev[-1])
if 0 <= pos:
self.__delete_surrounding_text(1)
text = _daku[pos]
elif text == '゜':
prev = self.__get_surrounding_text()
if 0 < len(prev):
pos = _non_handaku.find(prev[-1])
if 0 <= pos:
self.__delete_surrounding_text(1)
text = _handaku[pos]
self.commit_text(IBus.Text.new_from_string(text))
self.__previous_text += text
def __update_candidate(self):
index = self.__lookup_table.get_cursor_pos()
candidate = self.__lookup_table.get_candidate(index)
size = len(self.__dict.current())
self.__dict.set_current(index)
self.__delete_surrounding_text(size)
self.__commit_string(candidate.text);
def do_page_up(self):
if self.__lookup_table.page_up():
self.__update_lookup_table()
self.__update_candidate()
return True
def do_page_down(self):
if self.__lookup_table.page_down():
self.__update_lookup_table()
self.__update_candidate()
return True
def do_cursor_up(self):
if self.__lookup_table.cursor_up():
self.__update_lookup_table()
self.__update_candidate()
return True
def do_cursor_down(self):
if self.__lookup_table.cursor_down():
self.__update_lookup_table()
self.__update_candidate()
return True
def __update(self):
preedit_len = len(self.__preedit_string)
attrs = IBus.AttrList()
attrs.append(IBus.Attribute.new(IBus.AttrType.UNDERLINE,
IBus.AttrUnderline.SINGLE, 0, preedit_len))
text = IBus.Text.new_from_string(self.__preedit_string)
text.set_attributes(attrs)
self.update_preedit_text(text, preedit_len, preedit_len > 0)
self.__update_lookup_table()
def __update_lookup_table(self):
if self.is_enabled():
visible = 0 < self.__lookup_table.get_number_of_candidates()
self.update_lookup_table(self.__lookup_table, visible)
else:
self.hide_lookup_table()
def __reset(self):
self.__dict.reset()
self.__preedit_string = ''
self.__lookup_table.clear()
self.__update_lookup_table()
self.__previous_text = ''
self.__ignore_surrounding_text = False
def do_focus_in(self):
print("focus_in", flush=True)
self.register_properties(self.__prop_list)
# Request the initial surrounding-text in addition to the "enable" handler.
self.get_surrounding_text()
def do_focus_out(self):
print("focus_out", flush=True)
self.__reset()
self.__dict.save_orders()
def do_enable(self):
print("enable", flush=True)
# Request the initial surrounding-text when enabled as documented.
self.get_surrounding_text()
def do_disable(self):
print("disable", flush=True)
self.__reset()
self.__enabled = False
self.__dict.save_orders()
def do_reset(self):
print("reset", flush=True)
self.__reset()
# 'reset' seems to be sent due to an internal error, and
# we don't switch back to the Alphabet mode here.
# NG: self.__enabled = False
self.__dict.save_orders()
def do_property_activate(self, prop_name):
print("PropertyActivate(%s)" % prop_name, flush=True)
Dynamically update the dconf settings. Closes #12.
engine/engine.py
# -*- coding: utf-8 -*-
#
# ibus-replace-with-kanji - Replace With Kanji input method for IBus
#
# Using source code derived from
# ibus-tmpl - The Input Bus template project
#
# Copyright (c) 2017 Esrille Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
import time
from gi import require_version
require_version('IBus', '1.0')
from gi.repository import IBus
from gi.repository import GLib
from dictionary import Dictionary
from event import Event
import bits
import roomazi
keysyms = IBus
_hiragana = "あいうえおかきくけこさしすせそたちつてとなにぬねのはひふへほまみむめもやゆよらりるれろわをんゔがぎぐげござじずぜぞだぢづでどばびぶべぼぁぃぅぇぉゃゅょっぱぴぷぺぽゎゐゑ"
_katakana = "アイウエオカキクケコサシスセソタチツテトナニヌネノハヒフヘホマミムメモヤユヨラリルレロワヲンヴガギグゲゴザジズゼゾダヂヅデドバビブベボァィゥェォャュョッパピプペポヮヰヱ"
_non_daku = 'あいうえおかきくけこさしすせそたちつてとはひふへほやゆよアイウエオカキクケコサシスセソタチツテトハヒフヘホヤユヨぁぃぅぇぉがぎぐげござじずぜぞだぢづでどばびぶべぼゃゅょァィゥェォガギグゲゴザジズゼゾダヂヅデドバビブベボャュョゔヴ'
_daku = 'ぁぃぅぇぉがぎぐげござじずぜぞだぢづでどばびぶべぼゃゅょァィゥェォガギグゲゴザジズゼゾダヂヅデドバビブベボャュョあいゔえおかきくけこさしすせそたちつてとはひふへほやゆよアイヴエオカキクケコサシスセソタチツテトハヒフヘホヤユヨうウ'
_non_handaku = 'はひふへほハヒフヘホぱぴぷぺぽパピプペポ'
_handaku = 'ぱぴぷぺぽパピプペポはひふへほハヒフヘホ'
_re_tu = re.compile(r'[kstnhmyrwgzdbpfjv]')
def to_katakana(kana):
result = ''
for c in kana:
pos = _hiragana.find(c)
if pos < 0:
result += c
else:
result += _katakana[pos]
return result
class EngineReplaceWithKanji(IBus.Engine):
__gtype_name__ = 'EngineReplaceWithKanji'
def __init__(self):
super(EngineReplaceWithKanji, self).__init__()
self.__enabled = False # True if IME is enabled
self.__katakana_mode = False # True to input Katakana
self.__layout = roomazi.layout
self.__to_kana = self.__handle_roomazi_layout
self.__preedit_string = ''
self.__previous_text = ''
self.__ignore_surrounding_text = False
self.__lookup_table = IBus.LookupTable.new(10, 0, True, False)
self.__lookup_table.set_orientation(IBus.Orientation.VERTICAL)
self.__prop_list = IBus.PropList()
config = IBus.Bus().get_config()
config.connect('value-changed', self.__config_value_changed_cb)
self.__layout = self.__load_layout(config)
self.__delay = self.__load_delay(config)
self.__event = Event(self, self.__delay, self.__layout)
self.__dict = Dictionary()
def __load_layout(self, config):
var = config.get_value('engine/replace-with-kanji-python', 'layout')
if var == None or var.get_type_string() != 's':
layout_path = os.path.join(os.getenv('IBUS_REPLACE_WITH_KANJI_LOCATION'), 'layouts')
layout_path = os.path.join(layout_path, 'roomazi.json')
print(layout_path, flush=True)
var = GLib.Variant.new_string(layout_path)
config.set_value('engine/replace-with-kanji-python', 'layout', var)
print("layout:", var.get_string(), flush=True)
try:
with open(var.get_string()) as f:
layout = json.loads(f.read(), "utf-8")
except ValueError as error:
print("JSON error:", error)
except:
print("Cannot open: ", var.get_string(), flush=True)
layout = roomazi.layout
self.__to_kana = self.__handle_roomazi_layout
if 'Type' in layout:
if layout['Type'] == 'Kana':
self.__to_kana = self.__handle_kana_layout
print(json.dumps(layout, ensure_ascii=False), flush=True)
return layout
def __load_delay(self, config):
var = config.get_value('engine/replace-with-kanji-python', 'delay')
if var == None or var.get_type_string() != 'i':
var = GLib.Variant.new_int32(delay)
config.set_value('engine/replace-with-kanji-python', 'delay', var)
delay = var.get_int32()
print("delay:", delay, flush=True)
return delay
def __config_value_changed_cb(self, config, section, name, value):
print("config value changed:", name, flush=True)
if name == "delay":
self.__reset()
self.__delay = self.__load_layout(config)
self.__event = Event(self, self.__delay, self.__layout)
elif name == "layout":
self.__reset()
self.__layout = self.__load_layout(config)
self.__event = Event(self, self.__delay, self.__layout)
def __handle_kana_layout(self, preedit, keyval, state = 0, modifiers = 0):
yomi = ''
if self.__event.is_ascii(keyval):
c = self.__event.chr(keyval)
if preedit == '\\':
preedit = ''
if 'Shift' in self.__layout and self.__event.is_shift():
yomi = self.__layout['\\Shift'][c]
elif modifiers & bits.ShiftL_Bit:
yomi = self.__layout['\\ShiftL'][c]
elif modifiers & bits.ShiftR_Bit:
yomi = self.__layout['\\ShiftR'][c]
else:
yomi = self.__layout['\\Normal'][c]
else:
if 'Shift' in self.__layout and self.__event.is_shift():
yomi = self.__layout['Shift'][c]
elif modifiers & bits.ShiftL_Bit:
yomi = self.__layout['ShiftL'][c]
elif modifiers & bits.ShiftR_Bit:
yomi = self.__layout['ShiftR'][c]
else:
yomi = self.__layout['Normal'][c]
if yomi == '\\':
preedit += yomi
yomi = ''
elif keyval == keysyms.Zenkaku_Hankaku:
if preedit == '\\':
yomi = '¥'
preedit = ''
else:
preedit += '\\'
return yomi, preedit
def __handle_roomazi_layout(self, preedit, keyval, state = 0, modifiers = 0):
yomi = ''
if self.__event.is_ascii(keyval):
preedit += self.__event.chr(keyval)
if preedit in self.__layout['Roomazi']:
yomi = self.__layout['Roomazi'][preedit]
preedit = ''
elif 2 <= len(preedit) and preedit[0] == 'n' and preedit[1] != 'y':
yomi = 'ん'
preedit = preedit[1:]
elif 2 <= len(preedit) and preedit[0] == preedit[1] and _re_tu.search(preedit[1]):
yomi = 'っ'
preedit = preedit[1:]
return yomi, preedit
def __get_surrounding_text(self):
# Note self.get_surrounding_text() may not work as expected such as in Firefox, Chrome, etc.
if self.__ignore_surrounding_text:
return self.__previous_text
tuple = self.get_surrounding_text()
text = tuple[0].get_text()
pos = tuple[1]
print("surrounding text: '", text, "', ", pos, ", [", self.__previous_text, "]", sep='', flush=True)
if self.__previous_text and pos < len(self.__previous_text) or text[pos - len(self.__previous_text):pos] != self.__previous_text:
self.__ignore_surrounding_text = True
return self.__previous_text
return text[:pos]
def __delete_surrounding_text(self, size):
self.__previous_text = self.__previous_text[:-size]
if not self.__ignore_surrounding_text:
self.delete_surrounding_text(-size, size)
else:
for i in range(size):
self.forward_key_event(IBus.BackSpace, 14, 0)
time.sleep(0.01)
self.forward_key_event(IBus.BackSpace, 14, IBus.ModifierType.RELEASE_MASK)
time.sleep(0.02)
def is_enabled(self):
return self.__enabled
def enable_ime(self):
if not self.is_enabled():
print("enable_ime", flush=True);
self.__preedit_string = ''
self.__enabled = True
self.__dict.confirm()
self.__dict.reset()
self.__update()
return True
return False
def disable_ime(self):
if self.is_enabled():
print("disable_ime", flush=True);
self.__dict.confirm()
self.__reset()
self.__enabled = False
self.__update()
return True
return False
def __is_roomazi_mode(self):
return self.__to_kana == self.__handle_roomazi_layout
def set_katakana_mode(self, enable):
print("set_katakana_mode:", enable, flush=True)
self.__katakana_mode = enable
def do_process_key_event(self, keyval, keycode, state):
return self.__event.process_key_event(keyval, keycode, state)
def handle_key_event(self, keyval, keycode, state, modifiers):
print("handle_key_event(%04x, %04x, %04x, %04x)" % (keyval, keycode, state, modifiers), flush=True)
# Handle Candidate window
if 0 < self.__lookup_table.get_number_of_candidates():
if keyval == keysyms.Page_Up or keyval == keysyms.KP_Page_Up:
return self.do_page_up()
elif keyval == keysyms.Page_Down or keyval == keysyms.KP_Page_Down:
return self.do_page_down()
elif keyval == keysyms.Up or self.__event.is_muhenkan():
return self.do_cursor_up()
elif keyval == keysyms.Down or self.__event.is_henkan():
return self.do_cursor_down()
elif keyval == keysyms.Escape:
print("escape", flush=True)
self.__previous_text = self.handle_escape(state)
return True
elif keyval == keysyms.Return:
self.__commit()
return True
# Ignore modifier keys
if self.__event.is_modifier():
return False
if (state & (IBus.ModifierType.CONTROL_MASK | IBus.ModifierType.MOD1_MASK)) != 0:
self.__commit()
return False
# Handle Japanese text
if self.__event.is_henkan():
self.set_katakana_mode(False)
return self.handle_replace(keyval, state)
if self.__event.is_shrink():
self.set_katakana_mode(False)
return self.handle_shrink(keyval, state)
self.__commit()
if self.__event.is_backspace():
if 1 <= len(self.__preedit_string):
self.__preedit_string = self.__preedit_string[:-1]
self.__update()
return True
elif 0 < len(self.__previous_text):
self.__previous_text = self.__previous_text[:-1]
elif self.__event.is_ascii(keyval) or keyval == keysyms.Zenkaku_Hankaku:
yomi, self.__preedit_string = self.__to_kana(self.__preedit_string, keyval, state, modifiers)
if yomi:
if self.__katakana_mode:
yomi = to_katakana(yomi)
self.__commit_string(yomi)
self.__update()
return True
self.__update()
return True
else:
self.__previous_text = ''
return False
def lookup_dictionary(self, yomi):
# Handle dangling 'n' for 'ん' here to minimize the access to the surrounding text API,
# which could cause an unexpected behaviour occasionally at race conditions.
if self.__preedit_string == 'n':
yomi += 'ん'
cand = self.__dict.lookup(yomi)
size = len(self.__dict.reading())
if 0 < size and self.__preedit_string == 'n':
size -= 1
self.__lookup_table.clear()
if cand and 1 < len(self.__dict.cand()):
for c in self.__dict.cand():
self.__lookup_table.append_candidate(IBus.Text.new_from_string(c))
return (cand, size)
def handle_replace(self, keyval, state):
if not self.__dict.current():
text = self.__get_surrounding_text()
(cand, size) = self.lookup_dictionary(text)
else:
size = len(self.__dict.current())
if not (state & IBus.ModifierType.SHIFT_MASK):
cand = self.__dict.next()
else:
cand = self.__dict.previous()
if self.__dict.current():
self.__delete_surrounding_text(size)
self.__commit_string(cand)
self.__preedit_string = ''
self.__update()
return True
def handle_shrink(self, keyval, state):
if not self.__dict.current():
return False
text = self.handle_escape(state)
if 1 < len(text):
(cand, size) = self.lookup_dictionary(text[1:])
# Note a nap is needed here especially for applications that do not support surrounding text.
time.sleep(0.1)
else:
self.__dict.reset()
return True
if self.__dict.current():
self.__delete_surrounding_text(size)
self.__commit_string(cand)
self.__preedit_string = ''
self.__update()
return True
def handle_escape(self, state):
if not self.__dict.current():
return
size = len(self.__dict.current())
self.__delete_surrounding_text(size)
yomi = self.__dict.reading()
self.__commit_string(yomi)
self.__reset()
self.__update()
return yomi
def __commit(self):
if self.__dict.current():
self.__dict.confirm()
self.__dict.reset()
self.__lookup_table.clear()
visible = 0 < self.__lookup_table.get_number_of_candidates()
self.update_lookup_table(self.__lookup_table, visible)
self.__previous_text = ''
def __commit_string(self, text):
if text == '゛':
prev = self.__get_surrounding_text()
if 0 < len(prev):
pos = _non_daku.find(prev[-1])
if 0 <= pos:
self.__delete_surrounding_text(1)
text = _daku[pos]
elif text == '゜':
prev = self.__get_surrounding_text()
if 0 < len(prev):
pos = _non_handaku.find(prev[-1])
if 0 <= pos:
self.__delete_surrounding_text(1)
text = _handaku[pos]
self.commit_text(IBus.Text.new_from_string(text))
self.__previous_text += text
def __update_candidate(self):
index = self.__lookup_table.get_cursor_pos()
candidate = self.__lookup_table.get_candidate(index)
size = len(self.__dict.current())
self.__dict.set_current(index)
self.__delete_surrounding_text(size)
self.__commit_string(candidate.text);
def do_page_up(self):
if self.__lookup_table.page_up():
self.__update_lookup_table()
self.__update_candidate()
return True
def do_page_down(self):
if self.__lookup_table.page_down():
self.__update_lookup_table()
self.__update_candidate()
return True
def do_cursor_up(self):
if self.__lookup_table.cursor_up():
self.__update_lookup_table()
self.__update_candidate()
return True
def do_cursor_down(self):
if self.__lookup_table.cursor_down():
self.__update_lookup_table()
self.__update_candidate()
return True
def __update(self):
preedit_len = len(self.__preedit_string)
attrs = IBus.AttrList()
attrs.append(IBus.Attribute.new(IBus.AttrType.UNDERLINE,
IBus.AttrUnderline.SINGLE, 0, preedit_len))
text = IBus.Text.new_from_string(self.__preedit_string)
text.set_attributes(attrs)
self.update_preedit_text(text, preedit_len, preedit_len > 0)
self.__update_lookup_table()
def __update_lookup_table(self):
if self.is_enabled():
visible = 0 < self.__lookup_table.get_number_of_candidates()
self.update_lookup_table(self.__lookup_table, visible)
else:
self.hide_lookup_table()
def __reset(self):
self.__dict.reset()
self.__preedit_string = ''
self.__lookup_table.clear()
self.__update_lookup_table()
self.__previous_text = ''
self.__ignore_surrounding_text = False
def do_focus_in(self):
print("focus_in", flush=True)
self.register_properties(self.__prop_list)
# Request the initial surrounding-text in addition to the "enable" handler.
self.get_surrounding_text()
def do_focus_out(self):
print("focus_out", flush=True)
self.__reset()
self.__dict.save_orders()
def do_enable(self):
print("enable", flush=True)
# Request the initial surrounding-text when enabled as documented.
self.get_surrounding_text()
def do_disable(self):
print("disable", flush=True)
self.__reset()
self.__enabled = False
self.__dict.save_orders()
def do_reset(self):
print("reset", flush=True)
self.__reset()
# 'reset' seems to be sent due to an internal error, and
# we don't switch back to the Alphabet mode here.
# NG: self.__enabled = False
self.__dict.save_orders()
def do_property_activate(self, prop_name):
print("PropertyActivate(%s)" % prop_name, flush=True)
|
import logging
from discord import Embed, Color
from datetime import datetime
logger = logging.getLogger('gradiusbot')
logger.info("[Public Plugin] <image_collector.py> Collects images from specified channel on a server.")
async def action(**kwargs):
"""
"""
message = kwargs['message']
config = kwargs['config']
client = kwargs['client']
split_message = message.content.split()
admin_channel_id = config.getint('image_collector', 'admin_channel_id')
guild = message.channel.guild
if message.channel.id == admin_channel_id:
if split_message[0] == '!ic':
if len(split_message) == 3 and split_message[1] == 'collect':
target_channel = guild.get_channel(int(split_message[2]))
status_embed = Embed(title="Image Collection Script", color=Color.orange(), description="Starting image collection script...")
message_count = 0
file_count = 0
if target_channel:
status_message = await message.channel.send(embed=status_embed)
async for msg in target_channel.history(limit=None):
message_count += 1
# collect message attachments
if len(msg.attachments) > 0:
timestamp = msg.created_at
author = msg.author.id
for attachment in msg.attachments:
await attachment.save(f"images/{author}-{int(timestamp.timestamp())}")
file_count += 1
if message_count % 50 == 0:
status_embed = Embed(title="Image Collection Script", color=Color.orange(),
description="Starting image collection script...")
status_embed.add_field(name="Messages processed", value=str(message_count))
status_embed.add_field(name="Files Saved", value=str(file_count))
await status_message.edit(embed=status_embed)
# Send final update
status_embed = Embed(title="Image Collection Script", color=Color.green(),
description="Image collection script complete.")
status_embed.add_field(name="Messages processed", value=str(message_count))
status_embed.add_field(name="Files Saved", value=str(file_count))
await status_message.edit(embed=status_embed)
updating image collector
import json
import logging
from discord import Embed, Color
from datetime import datetime
logger = logging.getLogger('gradiusbot')
logger.info("[Public Plugin] <image_collector.py> Collects images from specified channel on a server.")
async def action(**kwargs):
"""
"""
message = kwargs['message']
config = kwargs['config']
client = kwargs['client']
split_message = message.content.split()
admin_channel_id = config.getint('image_collector', 'admin_channel_id')
guild = message.channel.guild
if message.channel.id == admin_channel_id:
if split_message[0] == '!ic':
if len(split_message) == 3 and split_message[1] == 'collect':
target_channel = guild.get_channel(int(split_message[2]))
status_embed = Embed(title="Image Collection Script", color=Color.orange(), description="Starting image collection script...")
message_count = 0
file_count = 0
if target_channel:
status_message = await message.channel.send(embed=status_embed)
async for msg in target_channel.history(limit=None):
message_count += 1
# collect message attachments
if len(msg.attachments) > 0:
attachment_count = 1
timestamp = msg.created_at
author = msg.author.id
for attachment in msg.attachments:
await attachment.save(f"images/{author}-{int(timestamp.timestamp())}-{attachment_count}")
file_count += 1
attachment_count += 1
if message_count % 50 == 0:
status_embed = Embed(title="Image Collection Script", color=Color.orange(),
description="Starting image collection script...")
status_embed.add_field(name="Messages processed", value=str(message_count))
status_embed.add_field(name="Files Saved", value=str(file_count))
await status_message.edit(embed=status_embed)
# Send final update
status_embed = Embed(title="Image Collection Script", color=Color.green(),
description="Image collection script complete.")
status_embed.add_field(name="Messages processed", value=str(message_count))
status_embed.add_field(name="Files Saved", value=str(file_count))
await status_message.edit(embed=status_embed)
elif split_message[1] == 'userjson':
status_embed = Embed(title="Image Collection Script - User JSON", color=Color.green(),
description="User JSON collection script complete.")
member_dict = {}
for member in guild.members:
member_dict[member.id] = {
'name': member.name,
'discriminator': member.discriminator,
'nick': member.nick,
'joined': member.joined_at.strftime('%Y-%m-%d %H:%M:%S')
}
with open('users.json', 'w') as user_json:
user_json.write(json.dumps(member_dict))
await message.channel.send(embed=status_embed)
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Laghos(MakefilePackage):
"""Laghos (LAGrangian High-Order Solver) is a CEED miniapp that solves the
time-dependent Euler equations of compressible gas dynamics in a moving
Lagrangian frame using unstructured high-order finite element spatial
discretization and explicit high-order time-stepping.
"""
tags = ['proxy-app', 'ecp-proxy-app']
homepage = "https://computing.llnl.gov/projects/co-design/laghos"
url = "https://github.com/CEED/Laghos/archive/v1.0.tar.gz"
git = "https://github.com/CEED/Laghos.git"
maintainers = ['v-dobrev', 'tzanio', 'vladotomov']
version('develop', branch='master')
version('3.1', sha256='49b65edcbf9732c7f6c228958620e18980c43ad8381315a8ba9957ecb7534cd5')
version('3.0', sha256='4db56286e15b42ecdc8d540c4888a7dec698b019df9c7ccb8319b7ea1f92d8b4')
version('2.0', sha256='dd3632d5558889beec2cd3c49eb60f633f99e6d886ac868731610dd006c44c14')
version('1.1', sha256='53b9bfe2af263c63eb4544ca1731dd26f40b73a0d2775a9883db51821bf23b7f')
version('1.0', sha256='af50a126355a41c758fcda335a43fdb0a3cd97e608ba51c485afda3dd84a5b34')
variant('metis', default=True, description='Enable/disable METIS support')
depends_on('mfem+mpi+metis', when='+metis')
depends_on('mfem+mpi~metis', when='~metis')
depends_on('mfem@develop', when='@develop')
depends_on('mfem@4.2.0', when='@3.1')
depends_on('mfem@4.1.0:4.1.99', when='@3.0')
# Recommended mfem version for laghos v2.0 is: ^mfem@3.4.1-laghos-v2.0
depends_on('mfem@3.4.0:', when='@2.0')
# Recommended mfem version for laghos v1.x is: ^mfem@3.3.1-laghos-v1.0
depends_on('mfem@3.3.1-laghos-v1.0:', when='@1.0,1.1')
@property
def build_targets(self):
targets = []
spec = self.spec
targets.append('MFEM_DIR=%s' % spec['mfem'].prefix)
targets.append('CONFIG_MK=%s' % spec['mfem'].package.config_mk)
targets.append('TEST_MK=%s' % spec['mfem'].package.test_mk)
if spec.satisfies('@:2.0'):
targets.append('CXX=%s' % spec['mpi'].mpicxx)
return targets
# See lib/spack/spack/build_systems/makefile.py
def check(self):
with working_dir(self.build_directory):
make('test', *self.build_targets)
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('laghos', prefix.bin)
install_time_test_callbacks = []
laghos: add variant with compiler optimization (#24910)
* add variant with compiler optimization
Update package.py to include variant with compiler optimization, benchmarked at A-HUG hackaton to improve major kernel time by roughly 3%.
* fix style
* Update var/spack/repos/builtin/packages/laghos/package.py
Co-authored-by: Adam J. Stewart <023e75a89b1f5ca1e59909f8756c185885836ad9@gmail.com>
Co-authored-by: Adam J. Stewart <023e75a89b1f5ca1e59909f8756c185885836ad9@gmail.com>
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Laghos(MakefilePackage):
"""Laghos (LAGrangian High-Order Solver) is a CEED miniapp that solves the
time-dependent Euler equations of compressible gas dynamics in a moving
Lagrangian frame using unstructured high-order finite element spatial
discretization and explicit high-order time-stepping.
"""
tags = ['proxy-app', 'ecp-proxy-app']
homepage = "https://computing.llnl.gov/projects/co-design/laghos"
url = "https://github.com/CEED/Laghos/archive/v1.0.tar.gz"
git = "https://github.com/CEED/Laghos.git"
maintainers = ['v-dobrev', 'tzanio', 'vladotomov']
version('develop', branch='master')
version('3.1', sha256='49b65edcbf9732c7f6c228958620e18980c43ad8381315a8ba9957ecb7534cd5')
version('3.0', sha256='4db56286e15b42ecdc8d540c4888a7dec698b019df9c7ccb8319b7ea1f92d8b4')
version('2.0', sha256='dd3632d5558889beec2cd3c49eb60f633f99e6d886ac868731610dd006c44c14')
version('1.1', sha256='53b9bfe2af263c63eb4544ca1731dd26f40b73a0d2775a9883db51821bf23b7f')
version('1.0', sha256='af50a126355a41c758fcda335a43fdb0a3cd97e608ba51c485afda3dd84a5b34')
variant('metis', default=True, description='Enable/disable METIS support')
variant('ofast', default=False, description="Enable gcc optimization flags")
depends_on('mfem+mpi+metis', when='+metis')
depends_on('mfem+mpi~metis', when='~metis')
depends_on('mfem@develop', when='@develop')
depends_on('mfem@4.2.0', when='@3.1')
depends_on('mfem@4.1.0:4.1.99', when='@3.0')
# Recommended mfem version for laghos v2.0 is: ^mfem@3.4.1-laghos-v2.0
depends_on('mfem@3.4.0:', when='@2.0')
# Recommended mfem version for laghos v1.x is: ^mfem@3.3.1-laghos-v1.0
depends_on('mfem@3.3.1-laghos-v1.0:', when='@1.0,1.1')
@property
def build_targets(self):
targets = []
spec = self.spec
targets.append('MFEM_DIR=%s' % spec['mfem'].prefix)
targets.append('CONFIG_MK=%s' % spec['mfem'].package.config_mk)
targets.append('TEST_MK=%s' % spec['mfem'].package.test_mk)
if spec.satisfies('@:2.0'):
targets.append('CXX=%s' % spec['mpi'].mpicxx)
if '+ofast %gcc' in self.spec:
targets.append('CXXFLAGS = -Ofast -finline-functions')
return targets
# See lib/spack/spack/build_systems/makefile.py
def check(self):
with working_dir(self.build_directory):
make('test', *self.build_targets)
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('laghos', prefix.bin)
install_time_test_callbacks = []
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class StockDdtVolume(orm.Model):
""" Model name: StockDdtVolume
"""
_name = 'stock.ddt.volume'
_description = 'Volume box'
_rec_name = 'total'
_columns = {
'total': fields.integer('Total', required=True),
'dimension_l': fields.integer('L (cm.)'),#, digits=(16, 2)),
'dimension_h': fields.integer('H (cm.)'),
'dimension_s': fields.integer('S (cm.)'),
'ddt_id': fields.many2one('stock.ddt', 'DDT'),
}
_defaults = {
'total': lambda *x: 1,
}
class StockDdt(orm.Model):
""" Model name: StockDdt
"""
_inherit = 'stock.ddt'
# Button events:
def compute_volume_total(self, cr, uid, ids, context=None):
''' Compute volume total
'''
assert len(ids) == 1, 'Once a time!'
ddt_proxy = self.browse(cr, uid, ids, context=context)[0]
total = 0.0
volume = 0.0
pallet_text = '' # dimension in print text
for pack in ddt_proxy.volume_ids:
total += pack.total
volume += pack.dimension_l * pack.dimension_h * pack.dimension_s
pallet_text += 'N.: %s pallet %s x %s x %s\n' % (
pack.total,
pack.dimension_l,
pack.dimension_h,
pack.dimension_s,
)
volume /= 1000000.0
# Create print text:
print_volume = ddt_proxy.print_volume
print_volume_text = ''
if print_volume in ('number', 'all'):
print_volume_text += _('Number of pallet: %s\n') % total
if print_volume in ('pallet', 'all'):
print_volume_text += pallet_text
print_volume_text += _('Volume %5.2f CBM') % volume
return self.write(cr, uid, ids, {
'volume_total': volume,
'pallet_total': total,
'print_volume_text': print_volume_text,
}, context=context)
_columns = {
'volume_ids': fields.one2many(
'stock.ddt.volume', 'ddt_id', 'Volume (m3)'),
'volume_total': fields.float('Tot. volume', digits=(16, 2)),
'pallet_total': fields.integer('Tot. pallet'),
'only_pallet': fields.boolean('Only pallet'),
'print_volume': fields.selection([
('none', 'Nothing'),
('number', 'Number of pallet'),
('pallet', 'Pallet info'),
('all', 'All (number and pallet info)'),
], 'Print info', help='Print report info on DDT'),
'print_volume_text': fields.text('Print volume text'),
}
_default = {
'print_volume': lambda *x: 'all',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
pallet integer
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class StockDdtVolume(orm.Model):
""" Model name: StockDdtVolume
"""
_name = 'stock.ddt.volume'
_description = 'Volume box'
_rec_name = 'total'
_columns = {
'total': fields.integer('Total', required=True),
'dimension_l': fields.integer('L (cm.)'),#, digits=(16, 2)),
'dimension_h': fields.integer('H (cm.)'),
'dimension_s': fields.integer('S (cm.)'),
'ddt_id': fields.many2one('stock.ddt', 'DDT'),
}
_defaults = {
'total': lambda *x: 1,
}
class StockDdt(orm.Model):
""" Model name: StockDdt
"""
_inherit = 'stock.ddt'
# Button events:
def compute_volume_total(self, cr, uid, ids, context=None):
''' Compute volume total
'''
assert len(ids) == 1, 'Once a time!'
ddt_proxy = self.browse(cr, uid, ids, context=context)[0]
total = 0
volume = 0.0
pallet_text = '' # dimension in print text
for pack in ddt_proxy.volume_ids:
total += pack.total
volume += pack.dimension_l * pack.dimension_h * pack.dimension_s
pallet_text += 'N.: %s pallet %s x %s x %s\n' % (
pack.total,
pack.dimension_l,
pack.dimension_h,
pack.dimension_s,
)
volume /= 1000000.0
# Create print text:
print_volume = ddt_proxy.print_volume
print_volume_text = ''
if print_volume in ('number', 'all'):
print_volume_text += _('Number of pallet: %s\n') % total
if print_volume in ('pallet', 'all'):
print_volume_text += pallet_text
print_volume_text += _('Volume %5.2f CBM') % volume
return self.write(cr, uid, ids, {
'volume_total': volume,
'pallet_total': total,
'print_volume_text': print_volume_text,
}, context=context)
_columns = {
'volume_ids': fields.one2many(
'stock.ddt.volume', 'ddt_id', 'Volume (m3)'),
'volume_total': fields.float('Tot. volume', digits=(16, 2)),
'pallet_total': fields.integer('Tot. pallet'),
'only_pallet': fields.boolean('Only pallet'),
'print_volume': fields.selection([
('none', 'Nothing'),
('number', 'Number of pallet'),
('pallet', 'Pallet info'),
('all', 'All (number and pallet info)'),
], 'Print info', help='Print report info on DDT'),
'print_volume_text': fields.text('Print volume text'),
}
_default = {
'print_volume': lambda *x: 'all',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from HTMLParser import HTMLParser
from twisted.web import server
from twisted.web.resource import Resource
from twisted.web.client import Agent
from twisted.internet.defer import Deferred
from twisted.internet import reactor
from twisted.internet.protocol import Protocol
from database_api import DatabaseAPI
import urllib, codecs
import json
import re
import sys
class IndexService(Resource):
"""
Index microservice class
"""
isLeaf = True
def __init__(self, kwargs):
Resource.__init__(self)
self.content_module_name = kwargs['content_module_name']
self.index = DatabaseAPI(**kwargs)
self.indexer = Indexer(**kwargs)
self.startup_routine()
# Asks the user for some questions at startup.
def startup_routine(self):
indexContent = False
yes = set(['', 'Y', 'y', 'Yes', 'yes', 'YES'])
no = set(['N', 'n', 'No', 'no', 'NO'])
index_on_startup = False
print("Type 'help' for help.")
while True:
print(">> ", end="")
user_input = str(raw_input())
if user_input == 'help': # Print available commands to user.
print()
print(" <command> - <description>")
print(" help - Help.")
print(" reset - Reset index database.")
print(" init - Index all articles from content service on startup.")
print(" start - Start service.")
print(" exit - Quit.")
print()
elif user_input == 'reset': # Clearing tables in the index database.
print("This will delete any existing data and reset the database.")
print("Are you sure you want to continue? [Y/n] ", end="")
while True:
user_input = str(raw_input())
if user_input in yes:
self.index.make_tables()
print("Reset.")
break
else:
print("Abort.")
break
elif user_input == 'init': # Toggle on/off indexing on startup.
while True:
print("Do you want to index all the articles on startup? [Y/n] ", end="")
user_input = str(raw_input())
if user_input in yes:
index_on_startup = True
print("Indexing will begin on start.")
break
elif user_input in no:
print("Indexing will not begin on start.")
index_on_startup = False
break
else:
print("Abort.")
break
elif user_input == 'start': # Start indexing service.
print("Starting index service. Use Ctrl + c to quit.")
if index_on_startup:
self.index_all_articles()
reactor.listenTCP(8001, server.Site(self))
reactor.run()
break
elif user_input == 'exit': # End program.
break
elif user_input == '': # Yes is default on return.
continue
else:
print(user_input + ": command not found")
continue
# Indexes all articles from the content microservice.
def index_all_articles(self):
# **** TODO: dont index already indexed articles ****
#host = 'http://127.0.0.1:8002' # content host - **** TODO: fetched from dht node network ****
host = self.get_service_ip(self.content_module_name) + "/list"
agent = Agent(reactor)
d = agent.request("GET", host)
d.addCallback(self._cbRequest)
# Callback request.
def _cbRequest(self, response):
finished = Deferred()
finished.addCallback(self._index_content)
response.deliverBody(RequestClient(finished))
# Callback for _cbRequest. Indexes the articles in the GET response.
def _index_content(self, response):
article_id_list = json.loads(response)['list']
total = len(article_id_list)
for i in range(total):
sys.stdout.write('\r')
sys.stdout.write("Indexing article {i} of {total}.".format(i=i+1, total=total))
sys.stdout.flush()
article_id = article_id_list[i]['id']
self.index_article(article_id)
print("\nIndexing completed.")
# Indexes page.
def index_article(self, article_id):
host = self.get_service_ip(self.content_module_name)
url = host+'/article/'+article_id # Articles are found at: http://<publish_module_ip>:<port_num>/article/<article_id>
values = self.indexer.make_index(url)
self.index.upsert(article_id, values)
# Handles POST requests from the other microservices.
def render_POST(self, request):
d = json.load(request.content)
if d['task'] == 'getSuggestions': # JSON format: {'task' : 'getSuggestions', 'word' : str}
word_root = d['word']
data = self.index.query("SELECT DISTINCT word FROM wordfreq WHERE word LIKE %s", (word_root+'%',))
response = {"suggestions" : [t[0] for t in data]}
return json.dumps(response)
elif d['task'] == 'getArticles': # JSON format: {'task' : 'getArticles', 'word' : str}
word = d['word']
data = self.index.query("SELECT articleid FROM wordfreq WHERE word = %s", (word,))
response = {"articleID" : [t[0] for t in data]}
return json.dumps(response)
elif d['task'] == 'getFrequencyList': # JSON format: {'task' : 'getFrequencyList'}
data = self.index.query("SELECT word, sum(frequency) FROM wordfreq GROUP BY word")
response = {}
for value in data:
response[value[0]] = value[1]
return json.dumps(response)
elif d['task'] == 'updatedArticle':
article_id = d['articleID']
self.index.remove(article_id)
self.index_article(article_id)
return 'thanks!'
elif d['task'] == 'publishedArticle':
article_id = d['articleID']
self.index.upsert(article_id)
return 'thanks!'
elif d['task'] == 'removedArticle':
article_id = d['articleID']
self.index.remove(article_id)
return('ok!')
else:
return('404')
# Temporary fuction to fetch a services address. Should connect with the dht node somehow.
def get_service_ip(self, service_name):
host = "http://127.0.0.1/" + service_name
agent = Agent(reactor)
d = agent.request("GET", host)
d.addCallback(self._fetch_ip_from_DHT)
def _fetch_ip_from_DHT(self, response):
finished = Deferred()
finished.addCallback(lambda response: json.loads(response))
response.deliverBody(RequestClient(finished))
class RequestClient(Protocol):
"""
Request Client
"""
def __init__(self, finished):
self.finished = finished
def dataReceived(self, data):
self.data = data
def connectionLost(self, reason):
self.finished.callback(self.data)
class Indexer(object):
"""
Basic indexer of HTML pages
"""
stopwords = None
def __init__(self, stopword_file_path, **kwargs):
self.kwargs = kwargs
self.stopwords = set([''])
# Reading in the stopword file:
with codecs.open(stopword_file_path, encoding='utf-8') as f:
for word in f:
self.stopwords.add(unicode(word.strip()))
# Takes an url as arguments and indexes the article at that url. Returns a list of tuple values.
def make_index(self, url):
# Retriving the HTML source from the url:
page = urllib.urlopen(url).read().decode('utf-8')
# Parseing the HTML:
parser = Parser(**self.kwargs)
parser.feed(page)
content = parser.get_content()
parser.close()
# Removing stopwords:
unique_words = set(content).difference(self.stopwords)
# Making a list of tuples: (word, wordfreq):
values = []
for word in unique_words:
values.append((word, content.count(word)))
return values
class Parser(HTMLParser):
"""
Basic parser for parsing of html data
"""
tags_to_ignore = set() # Add HTML tags to the set to ignore the data from that tag.
def __init__(self, tags_to_ignore, **kwargs):
HTMLParser.__init__(self)
self.content = []
self.tags_to_ignore = set(tags_to_ignore)
self.ignore_tag = False
# Keeps track of which tags to ignore data from.
def handle_starttag(self, tag, attrs):
if tag in self.tags_to_ignore:
self.ignore_tag = True
else:
self.ignore_tag = False
# Handles data from tags.
def handle_data(self, data):
if self.ignore_tag:
return
words = re.split("[ .,:;()!#¤%&=?+`´*_@£$<>^~/\[\]\{\}\-\"\']+", data)
for word in words:
if len(word) > 1:
self.content.append(word.lower().strip())
# Get method for content.
def get_content(self):
return self.content
reverted back to hardcoding the publish adress
# -*- coding: utf-8 -*-
from __future__ import print_function
from HTMLParser import HTMLParser
from twisted.web import server
from twisted.web.resource import Resource
from twisted.web.client import Agent
from twisted.internet.defer import Deferred
from twisted.internet import reactor
from twisted.internet.protocol import Protocol
from database_api import DatabaseAPI
import urllib, codecs
import json
import re
import sys
class IndexService(Resource):
"""
Index microservice class
"""
isLeaf = True
def __init__(self, kwargs):
Resource.__init__(self)
self.content_module_name = kwargs['content_module_name']
self.index = DatabaseAPI(**kwargs)
self.indexer = Indexer(**kwargs)
self.startup_routine()
# Asks the user for some questions at startup.
def startup_routine(self):
indexContent = False
yes = set(['', 'Y', 'y', 'Yes', 'yes', 'YES'])
no = set(['N', 'n', 'No', 'no', 'NO'])
index_on_startup = False
print("Type 'help' for help.")
while True:
print(">> ", end="")
user_input = str(raw_input())
if user_input == 'help': # Print available commands to user.
print()
print(" <command> - <description>")
print(" help - Help.")
print(" reset - Reset index database.")
print(" init - Index all articles from content service on startup.")
print(" start - Start service.")
print(" exit - Quit.")
print()
elif user_input == 'reset': # Clearing tables in the index database.
print("This will delete any existing data and reset the database.")
print("Are you sure you want to continue? [Y/n] ", end="")
while True:
user_input = str(raw_input())
if user_input in yes:
self.index.make_tables()
print("Reset.")
break
else:
print("Abort.")
break
elif user_input == 'init': # Toggle on/off indexing on startup.
while True:
print("Do you want to index all the articles on startup? [Y/n] ", end="")
user_input = str(raw_input())
if user_input in yes:
index_on_startup = True
print("Indexing will begin on start.")
break
elif user_input in no:
print("Indexing will not begin on start.")
index_on_startup = False
break
else:
print("Abort.")
break
elif user_input == 'start': # Start indexing service.
print("Starting index service. Use Ctrl + c to quit.")
if index_on_startup:
self.index_all_articles()
reactor.listenTCP(8001, server.Site(self))
reactor.run()
break
elif user_input == 'exit': # End program.
break
elif user_input == '': # Yes is default on return.
continue
else:
print(user_input + ": command not found")
continue
# Indexes all articles from the content microservice.
def index_all_articles(self):
# **** TODO: dont index already indexed articles ****
#host = 'http://127.0.0.1:8002' # content host - **** TODO: fetched from dht node network ****
publish_host = self.get_service_ip(self.content_module_name)
agent = Agent(reactor)
d = agent.request("GET", publish_host)
d.addCallback(self._cbRequestIndex)
# Callback request.
def _cbRequestIndex(self, response):
finished = Deferred()
finished.addCallback(self._index_content)
response.deliverBody(RequestClient(finished))
# Callback for _cbRequest. Indexes the articles in the GET response.
def _index_content(self, response):
article_id_list = json.loads(response)['list']
total = len(article_id_list)
for i in range(total):
sys.stdout.write('\r')
sys.stdout.write("Indexing article {i} of {total}.".format(i=i+1, total=total))
sys.stdout.flush()
article_id = article_id_list[i]['id']
self.index_article(article_id)
print("\nIndexing completed.")
# Temporary fuction to fetch a services address. Should connect with the dht node somehow.
def get_service_ip(self, service_name):
return "http://despina.128.no/" + service_name
# Indexes page.
def index_article(self, article_id):
host = self.get_service_ip(self.content_module_name)
url = host+'/article/'+article_id # Articles are found at: http://<publish_module_ip>:<port_num>/article/<article_id>
print(url)
values = self.indexer.make_index(url)
self.index.upsert(article_id, values)
# Handles POST requests from the other microservices.
def render_POST(self, request):
d = json.load(request.content)
if d['task'] == 'getSuggestions': # JSON format: {'task' : 'getSuggestions', 'word' : str}
word_root = d['word']
data = self.index.query("SELECT DISTINCT word FROM wordfreq WHERE word LIKE %s", (word_root+'%',))
response = {"suggestions" : [t[0] for t in data]}
return json.dumps(response)
elif d['task'] == 'getArticles': # JSON format: {'task' : 'getArticles', 'word' : str}
word = d['word']
data = self.index.query("SELECT articleid FROM wordfreq WHERE word = %s", (word,))
response = {"articleID" : [t[0] for t in data]}
return json.dumps(response)
elif d['task'] == 'getFrequencyList': # JSON format: {'task' : 'getFrequencyList'}
data = self.index.query("SELECT word, sum(frequency) FROM wordfreq GROUP BY word")
response = {}
for value in data:
response[value[0]] = value[1]
return json.dumps(response)
elif d['task'] == 'updatedArticle':
article_id = d['articleID']
self.index.remove(article_id)
self.index_article(article_id)
return 'thanks!'
elif d['task'] == 'publishedArticle':
article_id = d['articleID']
self.index.upsert(article_id)
return 'thanks!'
elif d['task'] == 'removedArticle':
article_id = d['articleID']
self.index.remove(article_id)
return('ok!')
else:
return('404')
class RequestClient(Protocol):
"""
Request Client
"""
def __init__(self, finished):
self.finished = finished
def dataReceived(self, data):
self.data = data
def connectionLost(self, reason):
self.finished.callback(self.data)
class Indexer(object):
"""
Basic indexer of HTML pages
"""
stopwords = None
def __init__(self, stopword_file_path, **kwargs):
self.kwargs = kwargs
self.stopwords = set([''])
# Reading in the stopword file:
with codecs.open(stopword_file_path, encoding='utf-8') as f:
for word in f:
self.stopwords.add(unicode(word.strip()))
# Takes an url as arguments and indexes the article at that url. Returns a list of tuple values.
def make_index(self, url):
# Retriving the HTML source from the url:
page = urllib.urlopen(url).read().decode('utf-8')
# Parseing the HTML:
parser = Parser(**self.kwargs)
parser.feed(page)
content = parser.get_content()
parser.close()
# Removing stopwords:
unique_words = set(content).difference(self.stopwords)
# Making a list of tuples: (word, wordfreq):
values = []
for word in unique_words:
values.append((word, content.count(word)))
return values
class Parser(HTMLParser):
"""
Basic parser for parsing of html data
"""
tags_to_ignore = set() # Add HTML tags to the set to ignore the data from that tag.
def __init__(self, tags_to_ignore, **kwargs):
HTMLParser.__init__(self)
self.content = []
self.tags_to_ignore = set(tags_to_ignore)
self.ignore_tag = False
# Keeps track of which tags to ignore data from.
def handle_starttag(self, tag, attrs):
if tag in self.tags_to_ignore:
self.ignore_tag = True
else:
self.ignore_tag = False
# Handles data from tags.
def handle_data(self, data):
if self.ignore_tag:
return
words = re.split("[ .,:;()!#¤%&=?+`´*_@£$<>^~/\[\]\{\}\-\"\']+", data)
for word in words:
if len(word) > 1:
self.content.append(word.lower().strip())
# Get method for content.
def get_content(self):
return self.content
|
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class CommonVariables:
utils_path_name = 'Utils'
extension_name = 'AzureDiskEncryptionForLinuxTest'
extension_version = '0.1.0.999302'
extension_type = extension_name
extension_media_link = 'https://amextpaas.blob.core.windows.net/prod/' + extension_name + '-' + str(extension_version) + '.zip'
extension_label = 'Windows Azure VMEncryption Extension for Linux IaaS'
extension_description = extension_label
"""
disk/file system related
"""
sector_size = 512
luks_header_size = 4096 * 512
default_block_size = 52428800
min_filesystem_size_support = 52428800 * 3
#TODO for the sles 11, we should use the ext3
default_file_system = 'ext4'
default_mount_name = 'encrypted_disk'
dev_mapper_root = '/dev/mapper/'
disk_by_id_root = '/dev/disk/by-id'
disk_by_uuid_root = '/dev/disk/by-uuid'
"""
parameter key names
"""
PassphraseFileNameKey = 'BekFileName'
KeyEncryptionKeyURLKey = 'KeyEncryptionKeyURL'
KeyVaultURLKey = 'KeyVaultURL'
AADClientIDKey = 'AADClientID'
AADClientCertThumbprintKey = 'AADClientCertThumbprint'
KeyEncryptionAlgorithmKey = 'KeyEncryptionAlgorithm'
DiskFormatQuerykey = "DiskFormatQuery"
PassphraseKey = 'Passphrase'
"""
value for VolumeType could be OS or Data
"""
VolumeTypeKey = 'VolumeType'
AADClientSecretKey = 'AADClientSecret'
SecretUriKey = 'SecretUri'
SecretSeqNum = 'SecretSeqNum'
VolumeTypeOS = 'OS'
VolumeTypeData = 'Data'
VolumeTypeAll = 'All'
SupportedVolumeTypes = [ VolumeTypeOS, VolumeTypeData, VolumeTypeAll ]
"""
command types
"""
EnableEncryption = 'EnableEncryption'
EnableEncryptionFormat = 'EnableEncryptionFormat'
UpdateEncryptionSettings = 'UpdateEncryptionSettings'
DisableEncryption = 'DisableEncryption'
QueryEncryptionStatus = 'QueryEncryptionStatus'
"""
encryption config keys
"""
EncryptionEncryptionOperationKey = 'EncryptionOperation'
EncryptionDecryptionOperationKey = 'DecryptionOperation'
EncryptionVolumeTypeKey = 'VolumeType'
EncryptionDiskFormatQueryKey = 'DiskFormatQuery'
"""
crypt ongoing item config keys
"""
OngoingItemMapperNameKey = 'MapperName'
OngoingItemHeaderFilePathKey = 'HeaderFilePath'
OngoingItemOriginalDevNamePathKey = 'DevNamePath'
OngoingItemOriginalDevPathKey = 'DevicePath'
OngoingItemPhaseKey = 'Phase'
OngoingItemHeaderSliceFilePathKey = 'HeaderSliceFilePath'
OngoingItemFileSystemKey = 'FileSystem'
OngoingItemMountPointKey = 'MountPoint'
OngoingItemDeviceSizeKey = 'Size'
OngoingItemCurrentSliceIndexKey = 'CurrentSliceIndex'
OngoingItemFromEndKey = 'FromEnd'
OngoingItemCurrentDestinationKey = 'CurrentDestination'
OngoingItemCurrentTotalCopySizeKey = 'CurrentTotalCopySize'
OngoingItemCurrentLuksHeaderFilePathKey = 'CurrentLuksHeaderFilePath'
OngoingItemCurrentSourcePathKey = 'CurrentSourcePath'
OngoingItemCurrentBlockSizeKey = 'CurrentBlockSize'
"""
encryption phase devinitions
"""
EncryptionPhaseBackupHeader = 'BackupHeader'
EncryptionPhaseCopyData = 'EncryptingData'
EncryptionPhaseRecoverHeader = 'RecoverHeader'
EncryptionPhaseEncryptDevice = 'EncryptDevice'
EncryptionPhaseDone = 'Done'
"""
decryption phase constants
"""
DecryptionPhaseCopyData = 'DecryptingData'
DecryptionPhaseDone = 'Done'
"""
logs related
"""
InfoLevel = 'Info'
WarningLevel = 'Warning'
ErrorLevel = 'Error'
"""
error codes
"""
extension_success_status = 'success'
extension_error_status = 'error'
process_success = 0
success = 0
os_not_supported = 1
luks_format_error = 2
scsi_number_not_found = 3
device_not_blank = 4
environment_error = 5
luks_open_error = 6
mkfs_error = 7
folder_conflict_error = 8
mount_error = 9
mount_point_not_exists = 10
passphrase_too_long_or_none = 11
parameter_error = 12
create_encryption_secret_failed = 13
encrypttion_already_enabled = 14
passphrase_file_not_found = 15
command_not_support = 16
volue_type_not_support = 17
copy_data_error = 18
encryption_failed = 19
tmpfs_error = 20
backup_slice_file_error = 21
unmount_oldroot_error = 22
operation_lookback_failed = 23
unknown_error = 100
class TestHooks:
search_not_only_ide = False
use_hard_code_passphrase = False
hard_code_passphrase = "Quattro!"
class DeviceItem(object):
def __init__(self):
#NAME,TYPE,FSTYPE,MOUNTPOINT,LABEL,UUID,MODEL,SIZE,MAJ:MIN
self.name = None
self.type = None
self.file_system = None
self.mount_point = None
self.label = None
self.uuid = None
self.model = None
self.size = None
self.majmin = None
self.device_id = None
self.azure_name = None
def __str__(self):
return ("name:" + str(self.name) + " type:" + str(self.type) +
" fstype:" + str(self.file_system) + " mountpoint:" + str(self.mount_point) +
" label:" + str(self.label) + " model:" + str(self.model) +
" size:" + str(self.size) + " majmin:" + str(self.majmin) +
" device_id:" + str(self.device_id)) + " azure_name:" + str(self.azure_name)
class LvmItem(object):
def __init__(self):
#lv_name,vg_name,lv_kernel_major,lv_kernel_minor
self.lv_name = None
self.vg_name = None
self.lv_kernel_major = None
self.lv_kernel_minor = None
def __str__(self):
return ("lv_name:" + str(self.lv_name) + " vg_name:" + str(self.vg_name) +
" lv_kernel_major:" + str(self.lv_kernel_major) + " lv_kernel_minor:" + str(self.lv_kernel_minor))
class CryptItem(object):
def __init__(self):
self.mapper_name = None
self.dev_path = None
self.mount_point = None
self.file_system = None
self.luks_header_path = None
self.uses_cleartext_key = None
self.current_luks_slot = None
def __str__(self):
return ("name: " + str(self.mapper_name) + " dev_path:" + str(self.dev_path) +
" mount_point:" + str(self.mount_point) + " file_system:" + str(self.file_system) +
" luks_header_path:" + str(self.luks_header_path) +
" uses_cleartext_key:" + str(self.uses_cleartext_key) +
" current_luks_slot:" + str(self.current_luks_slot))
remove test suffix from extension name
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class CommonVariables:
utils_path_name = 'Utils'
extension_name = 'AzureDiskEncryptionForLinux'
extension_version = '0.1.0.999302'
extension_type = extension_name
extension_media_link = 'https://amextpaas.blob.core.windows.net/prod/' + extension_name + '-' + str(extension_version) + '.zip'
extension_label = 'Windows Azure VMEncryption Extension for Linux IaaS'
extension_description = extension_label
"""
disk/file system related
"""
sector_size = 512
luks_header_size = 4096 * 512
default_block_size = 52428800
min_filesystem_size_support = 52428800 * 3
#TODO for the sles 11, we should use the ext3
default_file_system = 'ext4'
default_mount_name = 'encrypted_disk'
dev_mapper_root = '/dev/mapper/'
disk_by_id_root = '/dev/disk/by-id'
disk_by_uuid_root = '/dev/disk/by-uuid'
"""
parameter key names
"""
PassphraseFileNameKey = 'BekFileName'
KeyEncryptionKeyURLKey = 'KeyEncryptionKeyURL'
KeyVaultURLKey = 'KeyVaultURL'
AADClientIDKey = 'AADClientID'
AADClientCertThumbprintKey = 'AADClientCertThumbprint'
KeyEncryptionAlgorithmKey = 'KeyEncryptionAlgorithm'
DiskFormatQuerykey = "DiskFormatQuery"
PassphraseKey = 'Passphrase'
"""
value for VolumeType could be OS or Data
"""
VolumeTypeKey = 'VolumeType'
AADClientSecretKey = 'AADClientSecret'
SecretUriKey = 'SecretUri'
SecretSeqNum = 'SecretSeqNum'
VolumeTypeOS = 'OS'
VolumeTypeData = 'Data'
VolumeTypeAll = 'All'
SupportedVolumeTypes = [ VolumeTypeOS, VolumeTypeData, VolumeTypeAll ]
"""
command types
"""
EnableEncryption = 'EnableEncryption'
EnableEncryptionFormat = 'EnableEncryptionFormat'
UpdateEncryptionSettings = 'UpdateEncryptionSettings'
DisableEncryption = 'DisableEncryption'
QueryEncryptionStatus = 'QueryEncryptionStatus'
"""
encryption config keys
"""
EncryptionEncryptionOperationKey = 'EncryptionOperation'
EncryptionDecryptionOperationKey = 'DecryptionOperation'
EncryptionVolumeTypeKey = 'VolumeType'
EncryptionDiskFormatQueryKey = 'DiskFormatQuery'
"""
crypt ongoing item config keys
"""
OngoingItemMapperNameKey = 'MapperName'
OngoingItemHeaderFilePathKey = 'HeaderFilePath'
OngoingItemOriginalDevNamePathKey = 'DevNamePath'
OngoingItemOriginalDevPathKey = 'DevicePath'
OngoingItemPhaseKey = 'Phase'
OngoingItemHeaderSliceFilePathKey = 'HeaderSliceFilePath'
OngoingItemFileSystemKey = 'FileSystem'
OngoingItemMountPointKey = 'MountPoint'
OngoingItemDeviceSizeKey = 'Size'
OngoingItemCurrentSliceIndexKey = 'CurrentSliceIndex'
OngoingItemFromEndKey = 'FromEnd'
OngoingItemCurrentDestinationKey = 'CurrentDestination'
OngoingItemCurrentTotalCopySizeKey = 'CurrentTotalCopySize'
OngoingItemCurrentLuksHeaderFilePathKey = 'CurrentLuksHeaderFilePath'
OngoingItemCurrentSourcePathKey = 'CurrentSourcePath'
OngoingItemCurrentBlockSizeKey = 'CurrentBlockSize'
"""
encryption phase devinitions
"""
EncryptionPhaseBackupHeader = 'BackupHeader'
EncryptionPhaseCopyData = 'EncryptingData'
EncryptionPhaseRecoverHeader = 'RecoverHeader'
EncryptionPhaseEncryptDevice = 'EncryptDevice'
EncryptionPhaseDone = 'Done'
"""
decryption phase constants
"""
DecryptionPhaseCopyData = 'DecryptingData'
DecryptionPhaseDone = 'Done'
"""
logs related
"""
InfoLevel = 'Info'
WarningLevel = 'Warning'
ErrorLevel = 'Error'
"""
error codes
"""
extension_success_status = 'success'
extension_error_status = 'error'
process_success = 0
success = 0
os_not_supported = 1
luks_format_error = 2
scsi_number_not_found = 3
device_not_blank = 4
environment_error = 5
luks_open_error = 6
mkfs_error = 7
folder_conflict_error = 8
mount_error = 9
mount_point_not_exists = 10
passphrase_too_long_or_none = 11
parameter_error = 12
create_encryption_secret_failed = 13
encrypttion_already_enabled = 14
passphrase_file_not_found = 15
command_not_support = 16
volue_type_not_support = 17
copy_data_error = 18
encryption_failed = 19
tmpfs_error = 20
backup_slice_file_error = 21
unmount_oldroot_error = 22
operation_lookback_failed = 23
unknown_error = 100
class TestHooks:
search_not_only_ide = False
use_hard_code_passphrase = False
hard_code_passphrase = "Quattro!"
class DeviceItem(object):
def __init__(self):
#NAME,TYPE,FSTYPE,MOUNTPOINT,LABEL,UUID,MODEL,SIZE,MAJ:MIN
self.name = None
self.type = None
self.file_system = None
self.mount_point = None
self.label = None
self.uuid = None
self.model = None
self.size = None
self.majmin = None
self.device_id = None
self.azure_name = None
def __str__(self):
return ("name:" + str(self.name) + " type:" + str(self.type) +
" fstype:" + str(self.file_system) + " mountpoint:" + str(self.mount_point) +
" label:" + str(self.label) + " model:" + str(self.model) +
" size:" + str(self.size) + " majmin:" + str(self.majmin) +
" device_id:" + str(self.device_id)) + " azure_name:" + str(self.azure_name)
class LvmItem(object):
def __init__(self):
#lv_name,vg_name,lv_kernel_major,lv_kernel_minor
self.lv_name = None
self.vg_name = None
self.lv_kernel_major = None
self.lv_kernel_minor = None
def __str__(self):
return ("lv_name:" + str(self.lv_name) + " vg_name:" + str(self.vg_name) +
" lv_kernel_major:" + str(self.lv_kernel_major) + " lv_kernel_minor:" + str(self.lv_kernel_minor))
class CryptItem(object):
def __init__(self):
self.mapper_name = None
self.dev_path = None
self.mount_point = None
self.file_system = None
self.luks_header_path = None
self.uses_cleartext_key = None
self.current_luks_slot = None
def __str__(self):
return ("name: " + str(self.mapper_name) + " dev_path:" + str(self.dev_path) +
" mount_point:" + str(self.mount_point) + " file_system:" + str(self.file_system) +
" luks_header_path:" + str(self.luks_header_path) +
" uses_cleartext_key:" + str(self.uses_cleartext_key) +
" current_luks_slot:" + str(self.current_luks_slot))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.